lwp_arch.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-05-18 Jesven first version
  9. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  10. */
  11. #include <armv8.h>
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <lwp_signal.h>
  17. #ifdef ARCH_MM_MMU
  18. #define DBG_TAG "lwp.arch"
  19. #define DBG_LVL DBG_INFO
  20. #include <rtdbg.h>
  21. #include <lwp_arch.h>
  22. #include <lwp_user_mm.h>
  23. extern size_t MMUTable[];
  24. int arch_user_space_init(struct rt_lwp *lwp)
  25. {
  26. size_t *mmu_table;
  27. mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  28. if (!mmu_table)
  29. {
  30. return -RT_ENOMEM;
  31. }
  32. lwp->end_heap = USER_HEAP_VADDR;
  33. memset(mmu_table, 0, ARCH_PAGE_SIZE);
  34. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
  35. lwp->aspace = rt_aspace_create(
  36. (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
  37. if (!lwp->aspace)
  38. {
  39. return -RT_ERROR;
  40. }
  41. return 0;
  42. }
  43. void *arch_kernel_mmu_table_get(void)
  44. {
  45. return (void *)NULL;
  46. }
  47. void arch_user_space_free(struct rt_lwp *lwp)
  48. {
  49. if (lwp)
  50. {
  51. RT_ASSERT(lwp->aspace);
  52. void *pgtbl = lwp->aspace->page_table;
  53. rt_aspace_delete(lwp->aspace);
  54. /* must be freed after aspace delete, pgtbl is required for unmap */
  55. rt_pages_free(pgtbl, 0);
  56. lwp->aspace = NULL;
  57. }
  58. else
  59. {
  60. LOG_W("%s: NULL lwp as parameter", __func__);
  61. RT_ASSERT(0);
  62. }
  63. }
  64. int arch_expand_user_stack(void *addr)
  65. {
  66. int ret = 0;
  67. size_t stack_addr = (size_t)addr;
  68. stack_addr &= ~ARCH_PAGE_MASK;
  69. if ((stack_addr >= (size_t)USER_STACK_VSTART) &&
  70. (stack_addr < (size_t)USER_STACK_VEND))
  71. {
  72. void *map =
  73. lwp_map_user(lwp_self(), (void *)stack_addr, ARCH_PAGE_SIZE, 0);
  74. if (map || lwp_user_accessable(addr, 1))
  75. {
  76. ret = 1;
  77. }
  78. }
  79. return ret;
  80. }
  81. #endif
  82. #define ALGIN_BYTES (16)
  83. struct signal_ucontext
  84. {
  85. rt_int64_t sigreturn;
  86. lwp_sigset_t save_sigmask;
  87. siginfo_t si;
  88. rt_align(16)
  89. struct rt_hw_exp_stack frame;
  90. };
  91. void *arch_signal_ucontext_restore(rt_base_t user_sp)
  92. {
  93. struct signal_ucontext *new_sp;
  94. new_sp = (void *)user_sp;
  95. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  96. {
  97. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
  98. }
  99. else
  100. {
  101. LOG_I("User frame corrupted during signal handling\nexiting...");
  102. sys_exit_group(EXIT_FAILURE);
  103. }
  104. return (char *)&new_sp->frame + sizeof(struct rt_hw_exp_stack);
  105. }
  106. void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
  107. struct rt_hw_exp_stack *exp_frame,
  108. lwp_sigset_t *save_sig_mask)
  109. {
  110. struct signal_ucontext *new_sp;
  111. new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
  112. if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
  113. {
  114. /* push psiginfo */
  115. if (psiginfo)
  116. {
  117. lwp_memcpy(&new_sp->si, psiginfo, sizeof(*psiginfo));
  118. }
  119. /* exp frame is already aligned as AAPCS64 required */
  120. lwp_memcpy(&new_sp->frame, exp_frame, sizeof(*exp_frame));
  121. /* copy the save_sig_mask */
  122. lwp_memcpy(&new_sp->save_sigmask, save_sig_mask, sizeof(lwp_sigset_t));
  123. /* copy lwp_sigreturn */
  124. const size_t lwp_sigreturn_bytes = 8;
  125. extern void lwp_sigreturn(void);
  126. /* -> ensure that the sigreturn start at the outer most boundary */
  127. lwp_memcpy(&new_sp->sigreturn, &lwp_sigreturn, lwp_sigreturn_bytes);
  128. }
  129. else
  130. {
  131. LOG_I("%s: User stack overflow", __func__);
  132. sys_exit_group(EXIT_FAILURE);
  133. }
  134. return new_sp;
  135. }