lwp_arch.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-7-14 JasonHu first version
  9. */
  10. #include <rthw.h>
  11. #include <stddef.h>
  12. #include <rtconfig.h>
  13. #include <rtdbg.h>
  14. #ifdef ARCH_MM_MMU
  15. #include <stackframe.h>
  16. #include <interrupt.h>
  17. #include <segment.h>
  18. #include <mmu.h>
  19. #include <page.h>
  20. #include <lwp_mm_area.h>
  21. #include <lwp_user_mm.h>
  22. #include <lwp_arch.h>
  23. #ifdef RT_USING_SIGNALS
  24. #include <lwp_signal.h>
  25. #endif /* RT_USING_SIGNALS */
  26. extern size_t g_mmu_table[];
  27. int arch_expand_user_stack(void *addr)
  28. {
  29. int ret = 0;
  30. size_t stack_addr = (size_t)addr;
  31. stack_addr &= ~PAGE_OFFSET_MASK;
  32. if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
  33. {
  34. void *map = lwp_map_user(lwp_self(), (void *)stack_addr, PAGE_SIZE, RT_FALSE);
  35. if (map || lwp_user_accessable(addr, 1))
  36. {
  37. ret = 1; /* map success */
  38. }
  39. else /* map failed, send signal SIGSEGV */
  40. {
  41. #ifdef RT_USING_SIGNALS
  42. dbg_log(DBG_ERROR, "[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
  43. lwp_thread_kill(rt_thread_self(), SIGSEGV);
  44. ret = 1; /* return 1, will return back to intr, then check exit */
  45. #endif
  46. }
  47. }
  48. else /* not stack, send signal SIGSEGV */
  49. {
  50. #ifdef RT_USING_SIGNALS
  51. dbg_log(DBG_ERROR, "[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
  52. lwp_thread_kill(rt_thread_self(), SIGSEGV);
  53. ret = 1; /* return 1, will return back to intr, then check exit */
  54. #endif
  55. }
  56. return ret;
  57. }
  58. void *get_thread_kernel_stack_top(rt_thread_t thread)
  59. {
  60. return RT_NULL;
  61. }
  62. /**
  63. * don't support this in i386, it's ok!
  64. */
  65. void *arch_get_user_sp()
  66. {
  67. return RT_NULL;
  68. }
  69. int arch_user_space_init(struct rt_lwp *lwp)
  70. {
  71. rt_size_t *mmu_table;
  72. mmu_table = (rt_size_t *)rt_pages_alloc(0);
  73. if (!mmu_table)
  74. {
  75. return -1;
  76. }
  77. rt_memset(mmu_table, 0, ARCH_PAGE_SIZE);
  78. lwp->end_heap = USER_HEAP_VADDR;
  79. memcpy(mmu_table, g_mmu_table, ARCH_PAGE_SIZE / 4);
  80. memset((rt_uint8_t *)mmu_table + ARCH_PAGE_SIZE / 4, 0, ARCH_PAGE_SIZE / 4 * 3);
  81. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  82. if (rt_hw_mmu_map_init(&lwp->mmu_info, (void*)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table, PV_OFFSET) < 0)
  83. {
  84. rt_pages_free(mmu_table, 0);
  85. return -1;
  86. }
  87. return 0;
  88. }
  89. void *arch_kernel_mmu_table_get(void)
  90. {
  91. return (void *)((char *)g_mmu_table);
  92. }
  93. void arch_user_space_vtable_free(struct rt_lwp *lwp)
  94. {
  95. if (lwp && lwp->mmu_info.vtable)
  96. {
  97. rt_pages_free(lwp->mmu_info.vtable, 0);
  98. lwp->mmu_info.vtable = NULL;
  99. }
  100. }
  101. void arch_set_thread_area(void *p)
  102. {
  103. rt_hw_seg_tls_set((rt_ubase_t) p);
  104. rt_thread_t cur = rt_thread_self();
  105. cur->thread_idr = p; /* update thread idr after first set */
  106. }
  107. void *arch_get_tidr(void)
  108. {
  109. rt_thread_t cur = rt_thread_self();
  110. if (!cur->lwp) /* no lwp, don't get thread idr from tls seg */
  111. return NULL;
  112. return (void *)rt_hw_seg_tls_get(); /* get thread idr from tls seg */
  113. }
  114. void arch_set_tidr(void *p)
  115. {
  116. rt_thread_t cur = rt_thread_self();
  117. if (!cur->lwp) /* no lwp, don't set thread idr to tls seg */
  118. return;
  119. rt_hw_seg_tls_set((rt_ubase_t) p); /* set tls seg addr as thread idr */
  120. }
  121. static void lwp_user_stack_init(rt_hw_stack_frame_t *frame)
  122. {
  123. frame->ds = frame->es = USER_DATA_SEL;
  124. frame->cs = USER_CODE_SEL;
  125. frame->ss = USER_STACK_SEL;
  126. frame->gs = USER_TLS_SEL;
  127. frame->fs = 0; /* unused */
  128. frame->edi = frame->esi = \
  129. frame->ebp = frame->esp_dummy = 0;
  130. frame->eax = frame->ebx = \
  131. frame->ecx = frame->edx = 0;
  132. frame->error_code = 0;
  133. frame->vec_no = 0;
  134. frame->eflags = (EFLAGS_MBS | EFLAGS_IF_1 | EFLAGS_IOPL_3);
  135. }
  136. extern void lwp_switch_to_user(void *frame);
  137. /**
  138. * user entry, set frame.
  139. * at the end of execute, we need enter user mode,
  140. * in x86, we can set stack, arg, text entry in a stack frame,
  141. * then pop then into register, final use iret to switch kernel mode to user mode.
  142. */
  143. void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack)
  144. {
  145. rt_uint8_t *stk = k_stack;
  146. stk -= sizeof(struct rt_hw_stack_frame);
  147. struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
  148. lwp_user_stack_init(frame);
  149. frame->esp = (rt_uint32_t)ustack - 32;
  150. frame->ebx = (rt_uint32_t)args;
  151. frame->eip = (rt_uint32_t)text;
  152. lwp_switch_to_user(frame);
  153. /* should never return */
  154. }
  155. void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
  156. {
  157. arch_start_umode(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
  158. }
  159. extern void lwp_thread_return();
  160. extern void lwp_thread_return_end();
  161. static void *lwp_copy_return_code_to_user_stack(void *ustack)
  162. {
  163. size_t size = (size_t)lwp_thread_return_end - (size_t)lwp_thread_return;
  164. void *retcode = (void *)((size_t)ustack - size);
  165. memcpy(retcode, (void *)lwp_thread_return, size);
  166. return retcode;
  167. }
  168. /**
  169. * when called sys_thread_create, need create a thread, after thread stared, will come here,
  170. * like arch_start_umode, will enter user mode, but we must set thread exit function. it looks like:
  171. * void func(void *arg)
  172. * {
  173. * ...
  174. * }
  175. * when thread func return, we must call exit code to exit thread, or not the program runs away.
  176. * so we need copy exit code to user and call exit code when func return.
  177. */
  178. void arch_crt_start_umode(void *args, const void *text, void *ustack, void *k_stack)
  179. {
  180. RT_ASSERT(ustack != NULL);
  181. rt_uint8_t *stk;
  182. stk = (rt_uint8_t *)((rt_uint8_t *)k_stack + sizeof(rt_ubase_t));
  183. stk = (rt_uint8_t *)RT_ALIGN_DOWN(((rt_ubase_t)stk), sizeof(rt_ubase_t));
  184. stk -= sizeof(struct rt_hw_stack_frame);
  185. struct rt_hw_stack_frame *frame = (struct rt_hw_stack_frame *)stk;
  186. lwp_user_stack_init(frame);
  187. /* make user thread stack */
  188. unsigned long *retcode = lwp_copy_return_code_to_user_stack(ustack); /* copy ret code */
  189. unsigned long *retstack = (unsigned long *)RT_ALIGN_DOWN(((rt_ubase_t)retcode), sizeof(rt_ubase_t));
  190. /**
  191. * x86 call stack
  192. *
  193. * retcode here
  194. *
  195. * arg n
  196. * arg n - 1
  197. * ...
  198. * arg 2
  199. * arg 1
  200. * arg 0
  201. * eip (caller return addr, point to retcode)
  202. * esp
  203. */
  204. *(--retstack) = (unsigned long) args; /* arg */
  205. *(--retstack) = (unsigned long) retcode; /* ret eip */
  206. frame->esp = (rt_uint32_t)retstack;
  207. frame->eip = (rt_uint32_t)text;
  208. lwp_switch_to_user(frame);
  209. /* should never return */
  210. }
  211. rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
  212. {
  213. return (rt_thread_t)(((rt_ubase_t)spmember_addr) - (offsetof(struct rt_thread, sp)));
  214. }
  215. /**
  216. * set exec context for fork/clone.
  217. * user_stack(unused)
  218. */
  219. void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
  220. {
  221. /**
  222. * thread kernel stack was set to tss.esp0, when intrrupt/syscall occur,
  223. * the stack frame will store in kernel stack top, so we can get the stack
  224. * frame by kernel stack top.
  225. */
  226. rt_hw_stack_frame_t *frame = (rt_hw_stack_frame_t *)((rt_ubase_t)new_thread_stack - sizeof(rt_hw_stack_frame_t));
  227. frame->eax = 0; /* child return 0 */
  228. rt_hw_context_t *context = (rt_hw_context_t *) (((rt_uint32_t *)frame) - HW_CONTEXT_MEMBER_NR);
  229. context->eip = (void *)exit_addr; /* when thread started, jump to intr exit for enter user mode */
  230. context->ebp = context->ebx = context->esi = context->edi = 0;
  231. /**
  232. * set sp as the address of first member of rt_hw_context,
  233. * when scheduler call switch, pop stack from context stack.
  234. */
  235. *thread_sp = (void *)&context->ebp;
  236. /**
  237. * after set context, the stack like this:
  238. *
  239. * -----------
  240. * stack frame| eax = 0
  241. * -----------
  242. * context(only HW_CONTEXT_MEMBER_NR)| eip = rt_hw_intr_exit
  243. * -----------
  244. * thread sp | to <- rt_hw_context_switch(from, to)
  245. * -----------
  246. */
  247. }
  248. #ifdef RT_USING_SIGNALS
  249. #define SIGNAL_RET_CODE_SIZE 16
  250. struct rt_signal_frame
  251. {
  252. char *ret_addr; /* return addr when handler return */
  253. int signo; /* signal for user handler arg */
  254. rt_hw_stack_frame_t frame; /* save kernel signal stack */
  255. char ret_code[SIGNAL_RET_CODE_SIZE]; /* save return code */
  256. };
  257. typedef struct rt_signal_frame rt_signal_frame_t;
  258. extern void lwp_signal_return();
  259. extern void lwp_signal_return_end();
  260. void lwp_try_do_signal(rt_hw_stack_frame_t *frame)
  261. {
  262. if (!lwp_signal_check())
  263. return;
  264. /* 1. backup signal mask */
  265. int signal = lwp_signal_backup((void *) frame->esp, (void *) frame->eip, (void *) frame->eflags);
  266. /* 2. get signal handler */
  267. lwp_sighandler_t handler = lwp_sighandler_get(signal);
  268. if (handler == RT_NULL) /* no handler, ignore */
  269. {
  270. lwp_signal_restore();
  271. return;
  272. }
  273. rt_base_t level = rt_hw_interrupt_disable();
  274. /* 3. backup frame */
  275. rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)((frame->esp - sizeof(rt_signal_frame_t)) & -8UL);
  276. memcpy(&sig_frame->frame, frame, sizeof(rt_hw_stack_frame_t));
  277. sig_frame->signo = signal;
  278. /**
  279. * 4. copy user return code into user stack
  280. *
  281. * save current frame on user stack. the user stack like:
  282. *
  283. * ----------
  284. * user code stack
  285. * ----------+ -> esp before enter kernel
  286. * signal frame
  287. * ----------+ -> esp when handle signal handler
  288. * signal handler stack
  289. * ----------
  290. */
  291. size_t ret_code_size = (size_t)lwp_signal_return_end - (size_t)lwp_signal_return;
  292. memcpy(sig_frame->ret_code, (void *)lwp_signal_return, ret_code_size);
  293. sig_frame->ret_addr = sig_frame->ret_code;
  294. /* 5. jmp to user execute handler, update frame register info */
  295. lwp_user_stack_init(frame);
  296. frame->eip = (rt_uint32_t) handler;
  297. frame->esp = (rt_uint32_t) sig_frame;
  298. rt_hw_interrupt_enable(level);
  299. }
  300. void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
  301. {
  302. /**
  303. * ASSUME: in x86, each stack push and pop element is 4 byte. so STACK_ELEM_SIZE = sizeof(int) => 4.
  304. * when signal handler return, the stack move to the buttom of signal frame.
  305. * but return will pop eip from esp, then {esp += STACK_ELEM_SIZE}, thus {esp = (signal frame) + STACK_ELEM_SIZE}.
  306. * so {(signal frame) = esp - STACK_ELEM_SIZE}
  307. */
  308. rt_signal_frame_t *sig_frame = (rt_signal_frame_t *)(frame->esp - sizeof(rt_uint32_t));
  309. memcpy(frame, &sig_frame->frame, sizeof(rt_hw_stack_frame_t));
  310. /**
  311. * restore signal info, but don't use rt_user_context,
  312. * we use sig_frame to restore stack frame
  313. */
  314. lwp_signal_restore();
  315. }
  316. #endif /* RT_USING_SIGNALS */
  317. #endif /* ARCH_MM_MMU */