lwp_arch.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <stddef.h>
  14. #include <stdlib.h>
  15. #ifdef ARCH_MM_MMU
  16. #define DBG_TAG "lwp.arch"
  17. #define DBG_LVL DBG_INFO
  18. #include <rtdbg.h>
  19. #include <lwp_arch.h>
  20. #include <lwp_user_mm.h>
  21. #define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
  22. int arch_user_space_init(struct rt_lwp *lwp)
  23. {
  24. size_t *mmu_table;
  25. mmu_table = (size_t *)rt_pages_alloc(2);
  26. if (!mmu_table)
  27. {
  28. return -RT_ENOMEM;
  29. }
  30. lwp->end_heap = USER_HEAP_VADDR;
  31. rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
  32. rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
  33. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
  34. lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  35. if (!lwp->aspace)
  36. {
  37. return -RT_ERROR;
  38. }
  39. return 0;
  40. }
  41. static struct rt_varea kuser_varea;
  42. void arch_kuser_init(rt_aspace_t aspace, void *vectors)
  43. {
  44. int err;
  45. const size_t kuser_size = 0x1000;
  46. extern char __kuser_helper_start[];
  47. extern char __kuser_helper_end[];
  48. rt_base_t start = (rt_base_t)__kuser_helper_start;
  49. rt_base_t end = (rt_base_t)__kuser_helper_end;
  50. int kuser_sz = end - start;
  51. err = rt_aspace_map_static(aspace, &kuser_varea, &vectors, kuser_size,
  52. MMU_MAP_U_RO, MMF_MAP_FIXED | MMF_PREFETCH,
  53. &rt_mm_dummy_mapper, 0);
  54. if (err != 0)
  55. while (1)
  56. ; // early failed
  57. lwp_memcpy((void *)((char *)vectors + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz);
  58. /*
  59. * vectors + 0xfe0 = __kuser_get_tls
  60. * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
  61. */
  62. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
  63. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
  64. }
  65. void arch_user_space_free(struct rt_lwp *lwp)
  66. {
  67. if (lwp)
  68. {
  69. RT_ASSERT(lwp->aspace);
  70. void *pgtbl = lwp->aspace->page_table;
  71. rt_aspace_delete(lwp->aspace);
  72. /* must be freed after aspace delete, pgtbl is required for unmap */
  73. rt_pages_free(pgtbl, 2);
  74. lwp->aspace = RT_NULL;
  75. }
  76. else
  77. {
  78. LOG_W("%s: NULL lwp as parameter", __func__);
  79. RT_ASSERT(0);
  80. }
  81. }
  82. int arch_expand_user_stack(void *addr)
  83. {
  84. int ret = 0;
  85. size_t stack_addr = (size_t)addr;
  86. stack_addr &= ~ARCH_PAGE_MASK;
  87. if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
  88. {
  89. void *map = lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
  90. if (map || lwp_user_accessable(addr, 1))
  91. {
  92. ret = 1;
  93. }
  94. }
  95. return ret;
  96. }
  97. #define ALGIN_BYTES 8
  98. #define lwp_sigreturn_bytes 8
  99. struct signal_regs {
  100. rt_base_t lr;
  101. rt_base_t spsr;
  102. rt_base_t r0_to_r12[13];
  103. rt_base_t ip;
  104. };
  105. struct signal_ucontext
  106. {
  107. rt_base_t sigreturn[lwp_sigreturn_bytes / sizeof(rt_base_t)];
  108. lwp_sigset_t save_sigmask;
  109. siginfo_t si;
  110. rt_align(8)
  111. struct signal_regs frame;
  112. };
  113. void *arch_signal_ucontext_restore(rt_base_t user_sp)
  114. {
  115. struct signal_ucontext *new_sp;
  116. rt_base_t ip;
  117. new_sp = (void *)user_sp;
  118. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  119. {
  120. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
  121. ip = new_sp->frame.ip;
  122. /* let user restore its lr from frame.ip */
  123. new_sp->frame.ip = new_sp->frame.lr;
  124. /* kernel will pick eip from frame.lr */
  125. new_sp->frame.lr = ip;
  126. }
  127. else
  128. {
  129. LOG_I("User frame corrupted during signal handling\nexiting...");
  130. sys_exit_group(EXIT_FAILURE);
  131. }
  132. return (void *)&new_sp->frame;
  133. }
  134. void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
  135. struct signal_regs *exp_frame, rt_base_t user_sp,
  136. lwp_sigset_t *save_sig_mask)
  137. {
  138. rt_base_t spsr;
  139. struct signal_ucontext *new_sp;
  140. new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
  141. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  142. {
  143. /* push psiginfo */
  144. if (psiginfo)
  145. {
  146. lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
  147. }
  148. lwp_memcpy(&new_sp->frame.r0_to_r12, exp_frame, sizeof(new_sp->frame.r0_to_r12) + sizeof(rt_base_t));
  149. new_sp->frame.lr = lr;
  150. __asm__ volatile("mrs %0, spsr":"=r"(spsr));
  151. new_sp->frame.spsr = spsr;
  152. /* copy the save_sig_mask */
  153. lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
  154. /* copy lwp_sigreturn */
  155. extern void lwp_sigreturn(void);
  156. /* -> ensure that the sigreturn start at the outer most boundary */
  157. lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
  158. }
  159. else
  160. {
  161. LOG_I("%s: User stack overflow", __func__);
  162. sys_exit_group(EXIT_FAILURE);
  163. }
  164. return new_sp;
  165. }
  166. #ifdef LWP_ENABLE_ASID
  167. #define MAX_ASID_BITS 8
  168. #define MAX_ASID (1 << MAX_ASID_BITS)
  169. static uint64_t global_generation = 1;
  170. static char asid_valid_bitmap[MAX_ASID];
  171. unsigned int arch_get_asid(struct rt_lwp *lwp)
  172. {
  173. if (lwp == RT_NULL)
  174. {
  175. // kernel
  176. return 0;
  177. }
  178. if (lwp->generation == global_generation)
  179. {
  180. return lwp->asid;
  181. }
  182. if (lwp->asid && !asid_valid_bitmap[lwp->asid])
  183. {
  184. asid_valid_bitmap[lwp->asid] = 1;
  185. return lwp->asid;
  186. }
  187. for (unsigned i = 1; i < MAX_ASID; i++)
  188. {
  189. if (asid_valid_bitmap[i] == 0)
  190. {
  191. asid_valid_bitmap[i] = 1;
  192. lwp->generation = global_generation;
  193. lwp->asid = i;
  194. return lwp->asid;
  195. }
  196. }
  197. global_generation++;
  198. memset(asid_valid_bitmap, 0, MAX_ASID * sizeof(char));
  199. asid_valid_bitmap[1] = 1;
  200. lwp->generation = global_generation;
  201. lwp->asid = 1;
  202. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  203. return lwp->asid;
  204. }
  205. #endif
  206. #endif