lwp_arch.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-18 Jesven first version
  9. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  10. * 2023-10-16 Shell Support a new backtrace framework
  11. */
  12. #include <armv8.h>
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <lwp_internal.h>
  18. #ifdef ARCH_MM_MMU
  19. #define DBG_TAG "lwp.arch"
  20. #define DBG_LVL DBG_INFO
  21. #include <rtdbg.h>
  22. #include <lwp_arch.h>
  23. #include <lwp_user_mm.h>
  24. extern size_t MMUTable[];
  25. int arch_user_space_init(struct rt_lwp *lwp)
  26. {
  27. size_t *mmu_table;
  28. mmu_table = rt_hw_mmu_pgtbl_create();
  29. if (mmu_table)
  30. {
  31. lwp->end_heap = USER_HEAP_VADDR;
  32. lwp->aspace = rt_aspace_create(
  33. (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  34. if (!lwp->aspace)
  35. {
  36. return -RT_ERROR;
  37. }
  38. }
  39. else
  40. {
  41. return -RT_ENOMEM;
  42. }
  43. return 0;
  44. }
  45. void *arch_kernel_mmu_table_get(void)
  46. {
  47. return (void *)NULL;
  48. }
  49. void arch_user_space_free(struct rt_lwp *lwp)
  50. {
  51. if (lwp)
  52. {
  53. RT_ASSERT(lwp->aspace);
  54. void *pgtbl = lwp->aspace->page_table;
  55. rt_aspace_delete(lwp->aspace);
  56. /* must be freed after aspace delete, pgtbl is required for unmap */
  57. rt_pages_free(pgtbl, 0);
  58. lwp->aspace = NULL;
  59. }
  60. else
  61. {
  62. LOG_W("%s: NULL lwp as parameter", __func__);
  63. RT_ASSERT(0);
  64. }
  65. }
  66. int arch_expand_user_stack(void *addr)
  67. {
  68. int ret = 0;
  69. size_t stack_addr = (size_t)addr;
  70. stack_addr &= ~ARCH_PAGE_MASK;
  71. if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
  72. (stack_addr < (size_t)USER_STACK_VEND))
  73. {
  74. void *map =
  75. lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
  76. if (map || lwp_user_accessable(addr, 1))
  77. {
  78. ret = 1;
  79. }
  80. }
  81. return ret;
  82. }
  83. #endif
  84. int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
  85. void *user_stack, void **thread_sp)
  86. {
  87. struct rt_hw_exp_stack *syscall_frame;
  88. struct rt_hw_exp_stack *thread_frame;
  89. struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
  90. RT_ASSERT(ori_syscall != RT_NULL);
  91. thread_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
  92. syscall_frame = (void *)((long)new_thread_stack - 2 * sizeof(struct rt_hw_exp_stack));
  93. memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
  94. syscall_frame->sp_el0 = (long)user_stack;
  95. syscall_frame->x0 = 0;
  96. thread_frame->cpsr = ((3 << 6) | 0x4 | 0x1);
  97. thread_frame->pc = (long)exit;
  98. thread_frame->x0 = 0;
  99. *thread_sp = syscall_frame;
  100. return 0;
  101. }
  102. #define ALGIN_BYTES (16)
  103. struct signal_ucontext
  104. {
  105. rt_int64_t sigreturn;
  106. lwp_sigset_t save_sigmask;
  107. siginfo_t si;
  108. rt_align(16)
  109. struct rt_hw_exp_stack frame;
  110. };
  111. void *arch_signal_ucontext_restore(rt_base_t user_sp)
  112. {
  113. struct signal_ucontext *new_sp;
  114. new_sp = (void *)user_sp;
  115. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  116. {
  117. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
  118. }
  119. else
  120. {
  121. LOG_I("User frame corrupted during signal handling\nexiting...");
  122. sys_exit_group(EXIT_FAILURE);
  123. }
  124. return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
  125. }
  126. void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
  127. struct rt_hw_exp_stack *exp_frame,
  128. lwp_sigset_t *save_sig_mask)
  129. {
  130. struct signal_ucontext *new_sp;
  131. new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
  132. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  133. {
  134. /* push psiginfo */
  135. if (psiginfo)
  136. {
  137. lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
  138. }
  139. /* exp frame is already aligned as AAPCS64 required */
  140. lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
  141. /* copy the save_sig_mask */
  142. lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
  143. /* copy lwp_sigreturn */
  144. const size_t lwp_sigreturn_bytes = 8;
  145. extern void lwp_sigreturn(void);
  146. /* -> ensure that the sigreturn start at the outer most boundary */
  147. lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
  148. }
  149. else
  150. {
  151. LOG_I("%s: User stack overflow", __func__);
  152. sys_exit_group(EXIT_FAILURE);
  153. }
  154. return new_sp;
  155. }
  156. int arch_backtrace_uthread(rt_thread_t thread)
  157. {
  158. struct rt_hw_backtrace_frame frame;
  159. struct rt_hw_exp_stack *stack;
  160. if (thread && thread->lwp)
  161. {
  162. stack = thread->user_ctx.ctx;
  163. if ((long)stack > (unsigned long)thread->stack_addr
  164. && (long)stack < (unsigned long)thread->stack_addr + thread->stack_size)
  165. {
  166. frame.pc = stack->pc;
  167. frame.fp = stack->x29;
  168. lwp_backtrace_frame(thread, &frame);
  169. return 0;
  170. }
  171. else
  172. return -1;
  173. }
  174. return -1;
  175. }