lwp_arch.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-11-18 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-06 lizhirui add thread filter
  11. * 2021-02-19 lizhirui port to new version of rt-smart
  12. * 2021-03-02 lizhirui add a auxillary function for interrupt
  13. * 2021-03-04 lizhirui delete thread filter
  14. * 2021-03-04 lizhirui modify for new version of rt-smart
  15. * 2021-11-22 JasonHu add lwp_set_thread_context
  16. * 2021-11-30 JasonHu add clone/fork support
  17. */
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #include <stddef.h>
  21. #ifdef ARCH_MM_MMU
  22. #include <lwp.h>
  23. #include <lwp_arch.h>
  24. #include <lwp_user_mm.h>
  25. #include <page.h>
  26. #include <cpuport.h>
  27. #include <encoding.h>
  28. #include <stack.h>
  29. extern rt_ubase_t MMUTable[];
  30. void *lwp_copy_return_code_to_user_stack()
  31. {
  32. void lwp_thread_return();
  33. void lwp_thread_return_end();
  34. rt_thread_t tid = rt_thread_self();
  35. if (tid->user_stack != RT_NULL)
  36. {
  37. rt_size_t size = (rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return;
  38. rt_size_t userstack = (rt_size_t)tid->user_stack + tid->user_stack_size - size;
  39. rt_memcpy((void *)userstack, lwp_thread_return, size);
  40. return (void *)userstack;
  41. }
  42. return RT_NULL;
  43. }
  44. rt_ubase_t lwp_fix_sp(rt_ubase_t cursp)
  45. {
  46. void lwp_thread_return();
  47. void lwp_thread_return_end();
  48. if (cursp == 0)
  49. {
  50. return 0;
  51. }
  52. return cursp - ((rt_size_t)lwp_thread_return_end - (rt_size_t)lwp_thread_return);
  53. }
  54. rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
  55. {
  56. return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
  57. }
  58. void *get_thread_kernel_stack_top(rt_thread_t thread)
  59. {
  60. return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
  61. }
  62. void *arch_get_user_sp(void)
  63. {
  64. /* user sp saved in interrupt context */
  65. rt_thread_t self = rt_thread_self();
  66. rt_uint8_t *stack_top = (rt_uint8_t *)self->stack_addr + self->stack_size;
  67. struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)(stack_top - sizeof(struct rt_hw_stack_frame));
  68. return (void *)frame->user_sp_exc_stack;
  69. }
  70. int arch_user_space_init(struct rt_lwp *lwp)
  71. {
  72. rt_ubase_t *mmu_table;
  73. mmu_table = (rt_ubase_t *)rt_pages_alloc(0);
  74. if (!mmu_table)
  75. {
  76. return -1;
  77. }
  78. lwp->end_heap = USER_HEAP_VADDR;
  79. rt_memcpy(mmu_table, MMUTable, ARCH_PAGE_SIZE);
  80. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  81. lwp->aspace = rt_aspace_create(
  82. (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  83. if (!lwp->aspace)
  84. {
  85. return -1;
  86. }
  87. return 0;
  88. }
  89. void *arch_kernel_mmu_table_get(void)
  90. {
  91. return (void *)((char *)MMUTable);
  92. }
  93. void arch_user_space_vtable_free(struct rt_lwp *lwp)
  94. {
  95. if (lwp && lwp->aspace->page_table)
  96. {
  97. rt_pages_free(lwp->aspace->page_table, 0);
  98. lwp->aspace->page_table = NULL;
  99. }
  100. }
  101. long _sys_clone(void *arg[]);
  102. long sys_clone(void *arg[])
  103. {
  104. return _sys_clone(arg);
  105. }
  106. long _sys_fork(void);
  107. long sys_fork(void)
  108. {
  109. return _sys_fork();
  110. }
  111. long _sys_vfork(void);
  112. long sys_vfork(void)
  113. {
  114. return _sys_fork();
  115. }
  116. /**
  117. * set exec context for fork/clone.
  118. */
  119. int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
  120. void *user_stack, void **thread_sp)
  121. {
  122. RT_ASSERT(exit != RT_NULL);
  123. RT_ASSERT(user_stack != RT_NULL);
  124. RT_ASSERT(new_thread_stack != RT_NULL);
  125. RT_ASSERT(thread_sp != RT_NULL);
  126. struct rt_hw_stack_frame *syscall_frame;
  127. struct rt_hw_stack_frame *thread_frame;
  128. rt_uint8_t *stk;
  129. rt_uint8_t *syscall_stk;
  130. stk = (rt_uint8_t *)new_thread_stack;
  131. /* reserve syscall context, all the registers are copyed from parent */
  132. stk -= CTX_REG_NR * REGBYTES;
  133. syscall_stk = stk;
  134. syscall_frame = (struct rt_hw_stack_frame *)stk;
  135. /* modify user sp */
  136. syscall_frame->user_sp_exc_stack = (rt_ubase_t)user_stack;
  137. /* skip ecall */
  138. syscall_frame->epc += 4;
  139. /* child return value is 0 */
  140. syscall_frame->a0 = 0;
  141. syscall_frame->a1 = 0;
  142. /* reset thread area */
  143. rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
  144. syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
  145. #ifdef ARCH_USING_NEW_CTX_SWITCH
  146. extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
  147. rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
  148. sstatus &= ~SSTATUS_SIE;
  149. /* compatible to RESTORE_CONTEXT */
  150. stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
  151. #else
  152. /* build temp thread context */
  153. stk -= sizeof(struct rt_hw_stack_frame);
  154. thread_frame = (struct rt_hw_stack_frame *)stk;
  155. int i;
  156. for (i = 0; i < sizeof(struct rt_hw_stack_frame) / sizeof(rt_ubase_t); i++)
  157. {
  158. ((rt_ubase_t *)thread_frame)[i] = 0xdeadbeaf;
  159. }
  160. /* set pc for thread */
  161. thread_frame->epc = (rt_ubase_t)exit;
  162. /* set old exception mode as supervisor, because in kernel */
  163. thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
  164. thread_frame->sstatus &= ~SSTATUS_SIE; /* must disable interrupt */
  165. /* set stack as syscall stack */
  166. thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
  167. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  168. /* save new stack top */
  169. *thread_sp = (void *)stk;
  170. /**
  171. * The stack for child thread:
  172. *
  173. * +------------------------+ --> kernel stack top
  174. * | syscall stack |
  175. * | |
  176. * | @sp | --> `user_stack`
  177. * | @epc | --> user ecall addr + 4 (skip ecall)
  178. * | @a0&a1 | --> 0 (for child return 0)
  179. * | |
  180. * +------------------------+ --> temp thread stack top
  181. * | temp thread stack | ^
  182. * | | |
  183. * | @sp | ---------/
  184. * | @epc | --> `exit` (arch_clone_exit/arch_fork_exit)
  185. * | |
  186. * +------------------------+ --> thread sp
  187. */
  188. }
  189. /**
  190. * void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  191. */
  192. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  193. {
  194. arch_start_umode(args, user_entry, (void *)USER_STACK_VEND, kernel_stack);
  195. }
  196. void *arch_get_usp_from_uctx(struct rt_user_context *uctx)
  197. {
  198. return uctx->sp;
  199. }
  200. #endif /* ARCH_MM_MMU */