lwp_arch.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-11-18 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-06 lizhirui add thread filter
  11. * 2021-02-19 lizhirui port to new version of rt-smart
  12. * 2021-03-02 lizhirui add a auxillary function for interrupt
  13. * 2021-03-04 lizhirui delete thread filter
  14. * 2021-03-04 lizhirui modify for new version of rt-smart
  15. * 2021-11-22 JasonHu add lwp_set_thread_context
  16. * 2021-11-30 JasonHu add clone/fork support
  17. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  18. */
  19. #include <rthw.h>
  20. #include <rtthread.h>
  21. #include <stddef.h>
  22. #ifdef ARCH_MM_MMU
  23. #define DBG_TAG "lwp.arch"
  24. #define DBG_LVL DBG_INFO
  25. #include <rtdbg.h>
  26. #include <lwp.h>
  27. #include <lwp_arch.h>
  28. #include <lwp_user_mm.h>
  29. #include <page.h>
  30. #include <cpuport.h>
  31. #include <encoding.h>
  32. #include <stack.h>
  33. #include <cache.h>
  34. extern rt_ubase_t MMUTable[];
  35. void *lwp_copy_return_code_to_user_stack()
  36. {
  37. void lwp_thread_return();
  38. void lwp_thread_return_end();
  39. rt_thread_t tid = rt_thread_self();
  40. if (tid->user_stack != RT_NULL)
  41. {
  42. rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
  43. rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
  44. rt_memcpy((void *)userstack, lwp_thread_return, size);
  45. return (void *)userstack;
  46. }
  47. return RT_NULL;
  48. }
  49. rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
  50. {
  51. void lwp_thread_return();
  52. void lwp_thread_return_end();
  53. if (cursp == 0)
  54. {
  55. return 0;
  56. }
  57. return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
  58. }
  59. rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
  60. {
  61. return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
  62. }
  63. void *get_thread_kernel_stack_top(rt_thread_t thread)
  64. {
  65. return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
  66. }
  67. void *arch_get_user_sp(void)
  68. {
  69. /* user sp saved in interrupt context */
  70. rt_thread_t self = rt_thread_self();
  71. rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
  72. struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
  73. return (void *)frame->user_sp_exc_stack;
  74. }
  75. int arch_user_space_init(struct rt_lwp *lwp)
  76. {
  77. rt_ubase_t *mmu_table;
  78. mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  79. if (!mmu_table)
  80. {
  81. return -RT_ENOMEM;
  82. }
  83. lwp->end_heap = USER_HEAP_VADDR;
  84. rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
  85. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  86. lwp->aspace = rt_aspace_create(
  87. (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  88. if (!lwp->aspace)
  89. {
  90. return -RT_ERROR;
  91. }
  92. return 0;
  93. }
  94. void *arch_kernel_mmu_table_get(void)
  95. {
  96. return (void *)((char *)MMUTable);
  97. }
  98. void arch_user_space_free(struct rt_lwp *lwp)
  99. {
  100. if (lwp)
  101. {
  102. RT_ASSERT(lwp->aspace);
  103. void *pgtbl = lwp->aspace->page_table;
  104. rt_aspace_delete(lwp->aspace);
  105. /* must be freed after aspace delete, pgtbl is required for unmap */
  106. rt_pages_free(pgtbl, 0);
  107. lwp->aspace = RT_NULL;
  108. }
  109. else
  110. {
  111. LOG_W("%s: NULL lwp as parameter", __func__);
  112. RT_ASSERT(0);
  113. }
  114. }
  115. long _sys_clone(void *arg[]);
  116. long sys_clone(void *arg[])
  117. {
  118. return _sys_clone(arg);
  119. }
  120. long _sys_fork(void);
  121. long sys_fork(void)
  122. {
  123. return _sys_fork();
  124. }
  125. long _sys_vfork(void);
  126. long sys_vfork(void)
  127. {
  128. return _sys_fork();
  129. }
  130. /**
  131. * set exec context for fork/clone.
  132. */
  133. int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
  134. void *user_stack, void **thread_sp)
  135. {
  136. RT_ASSERT(exit != RT_NULL);
  137. RT_ASSERT(user_stack != RT_NULL);
  138. RT_ASSERT(new_thread_stack != RT_NULL);
  139. RT_ASSERT(thread_sp != RT_NULL);
  140. struct rt_hw_stack_frame *syscall_frame;
  141. struct rt_hw_stack_frame *thread_frame;
  142. rt_uint8_t *stk;
  143. rt_uint8_t *syscall_stk;
  144. stk = (rt_uint8_t *)new_thread_stack;
  145. /* reserve syscall context, all the registers are copyed from parent */
  146. stk -= CTX_REG_NR * REGBYTES;
  147. syscall_stk = stk;
  148. syscall_frame = (struct rt_hw_stack_frame *)stk;
  149. /* modify user sp */
  150. syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
  151. /* skip ecall */
  152. syscall_frame->epc += 4;
  153. /* child return value is 0 */
  154. syscall_frame->a0 = 0;
  155. syscall_frame->a1 = 0;
  156. /* reset thread area */
  157. rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
  158. syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
  159. #ifdef ARCH_USING_NEW_CTX_SWITCH
  160. extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
  161. rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
  162. sstatus &= ~SSTATUS_SIE;
  163. /* compatible to RESTORE_CONTEXT */
  164. stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
  165. #else
  166. /* build temp thread context */
  167. stk -= sizeof(struct rt_hw_stack_frame);
  168. thread_frame = (struct rt_hw_stack_frame *)stk;
  169. int i;
  170. for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
  171. {
  172. ((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
  173. }
  174. /* set pc for thread */
  175. thread_frame->epc = (rt_ubase_t)exit;
  176. /* set old exception mode as supervisor, because in kernel */
  177. thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
  178. thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
  179. /* set stack as syscall stack */
  180. thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
  181. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  182. /* save new stack top */
  183. *thread_sp = (void *)stk;
  184. /**
  185. * The stack for child thread:
  186. *
  187. * +------------------------+ --> kernel stack top
  188. * | syscall stack |
  189. * | |
  190. * | @sp | --> `user_stack`
  191. * | @epc | --> user ecall addr + 4 (skip ecall)
  192. * | @a0&a1 | --> 0 (for child return 0)
  193. * | |
  194. * +------------------------+ --> temp thread stack top
  195. * | temp thread stack | ^
  196. * | | |
  197. * | @sp | ---------/
  198. * | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
  199. * | |
  200. * +------------------------+ --> thread sp
  201. */
  202. }
  203. #define ALGIN_BYTES (16)
  204. struct signal_ucontext
  205. {
  206. rt_int64_t sigreturn;
  207. lwp_sigset_t save_sigmask;
  208. siginfo_t si;
  209. rt_align(16)
  210. struct rt_hw_stack_frame frame;
  211. };
  212. void *arch_signal_ucontext_restore(rt_base_t user_sp)
  213. {
  214. struct signal_ucontext *new_sp;
  215. new_sp = (void *)user_sp;
  216. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  217. {
  218. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
  219. }
  220. else
  221. {
  222. LOG_I("User frame corrupted during signal handling\nexiting...");
  223. sys_exit(EXIT_FAILURE);
  224. }
  225. return (void *)&new_sp->frame;
  226. }
  227. void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
  228. struct rt_hw_stack_frame *exp_frame, rt_base_t user_sp,
  229. lwp_sigset_t *save_sig_mask)
  230. {
  231. struct signal_ucontext *new_sp;
  232. new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
  233. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  234. {
  235. /* push psiginfo */
  236. if (psiginfo)
  237. {
  238. memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
  239. }
  240. memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
  241. /* copy the save_sig_mask */
  242. memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
  243. /* copy lwp_sigreturn */
  244. const size_t lwp_sigreturn_bytes = 8;
  245. extern void lwp_sigreturn(void);
  246. /* -> ensure that the sigreturn start at the outer most boundary */
  247. memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
  248. /**
  249. * synchronize dcache & icache if target is
  250. * a Harvard Architecture machine, otherwise
  251. * do nothing
  252. */
  253. rt_hw_sync_cache_local(&new_sp->sigreturn, 8);
  254. }
  255. else
  256. {
  257. LOG_I("%s: User stack overflow", __func__);
  258. sys_exit(EXIT_FAILURE);
  259. }
  260. return new_sp;
  261. }
  262. /**
  263. * void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  264. */
  265. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  266. {
  267. arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
  268. }
  269. #endif /* ARCH_MM_MMU */