mm_fault.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support PRIVATE mapping and COW
  10. */
  11. #include <rtthread.h>
  12. #ifdef RT_USING_SMART
  13. #define DBG_TAG "mm.fault"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <lwp.h>
  17. #include <lwp_syscall.h>
  18. #include "mm_aspace.h"
  19. #include "mm_fault.h"
  20. #include "mm_flag.h"
  21. #include "mm_private.h"
  22. #include <mmu.h>
  23. #include <tlb.h>
  24. static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
  25. {
  26. int err = MM_FAULT_FIXABLE_FALSE;
  27. if (varea->mem_obj && varea->mem_obj->on_page_fault)
  28. {
  29. varea->mem_obj->on_page_fault(varea, msg);
  30. err = rt_varea_map_with_msg(varea, msg);
  31. err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
  32. }
  33. return err;
  34. }
  35. static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  36. {
  37. int err = MM_FAULT_FIXABLE_FALSE;
  38. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  39. {
  40. RT_ASSERT(pa == ARCH_MAP_FAILED);
  41. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  42. err = _fetch_page(varea, msg);
  43. }
  44. else
  45. {
  46. /* signal a fault to user? */
  47. }
  48. return err;
  49. }
  50. static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  51. {
  52. rt_aspace_t aspace = varea->aspace;
  53. int err = MM_FAULT_FIXABLE_FALSE;
  54. if (rt_varea_is_private_locked(varea))
  55. {
  56. if (VAREA_IS_WRITABLE(varea) && (
  57. msg->fault_type == MM_FAULT_TYPE_RWX_PERM ||
  58. msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
  59. {
  60. RDWR_LOCK(aspace);
  61. err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
  62. RDWR_UNLOCK(aspace);
  63. if (err == MM_FAULT_FIXABLE_FALSE)
  64. LOG_I("%s: fix private failure", __func__);
  65. }
  66. else
  67. {
  68. LOG_I("%s: No permission on %s(attr=0x%lx)", __func__, VAREA_NAME(varea), varea->attr);
  69. }
  70. }
  71. else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  72. {
  73. RT_ASSERT(pa == ARCH_MAP_FAILED);
  74. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  75. err = _fetch_page(varea, msg);
  76. if (err == MM_FAULT_FIXABLE_FALSE)
  77. LOG_I("%s: page fault failure", __func__);
  78. }
  79. else
  80. {
  81. LOG_D("%s: can not fix", __func__);
  82. /* signal a fault to user? */
  83. }
  84. return err;
  85. }
  86. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  87. {
  88. int err = MM_FAULT_FIXABLE_FALSE;
  89. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  90. {
  91. RT_ASSERT(pa == ARCH_MAP_FAILED);
  92. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  93. err = _fetch_page(varea, msg);
  94. }
  95. return err;
  96. }
  97. static void _determine_precise_fault_type(struct rt_aspace_fault_msg *msg, rt_ubase_t pa, rt_varea_t varea)
  98. {
  99. if (msg->fault_type == MM_FAULT_TYPE_GENERIC_MMU)
  100. {
  101. rt_base_t requesting_perm;
  102. switch (msg->fault_op)
  103. {
  104. case MM_FAULT_OP_READ:
  105. requesting_perm = RT_HW_MMU_PROT_READ | RT_HW_MMU_PROT_USER;
  106. break;
  107. case MM_FAULT_OP_WRITE:
  108. requesting_perm = RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER;
  109. break;
  110. case MM_FAULT_OP_EXECUTE:
  111. requesting_perm = RT_HW_MMU_PROT_EXECUTE | RT_HW_MMU_PROT_USER;
  112. break;
  113. }
  114. /**
  115. * always checking the user privileges since dynamic permission is not
  116. * supported in kernel. So those faults are never fixable. Hence, adding
  117. * permission check never changes the result of checking. In other
  118. * words, { 0 && (expr) } is always false.
  119. */
  120. if (rt_hw_mmu_attr_test_perm(varea->attr, requesting_perm))
  121. {
  122. if (pa == (rt_ubase_t)ARCH_MAP_FAILED)
  123. {
  124. msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  125. }
  126. else
  127. {
  128. msg->fault_type = MM_FAULT_TYPE_RWX_PERM;
  129. }
  130. }
  131. }
  132. }
  133. int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
  134. {
  135. int err = MM_FAULT_FIXABLE_FALSE;
  136. uintptr_t va = (uintptr_t)msg->fault_vaddr;
  137. va &= ~ARCH_PAGE_MASK;
  138. msg->fault_vaddr = (void *)va;
  139. rt_mm_fault_res_init(&msg->response);
  140. RT_DEBUG_SCHEDULER_AVAILABLE(1);
  141. if (aspace)
  142. {
  143. rt_varea_t varea;
  144. RD_LOCK(aspace);
  145. varea = _aspace_bst_search(aspace, msg->fault_vaddr);
  146. if (varea)
  147. {
  148. void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  149. _determine_precise_fault_type(msg, (rt_ubase_t)pa, varea);
  150. if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  151. {
  152. LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
  153. err = MM_FAULT_FIXABLE_TRUE;
  154. }
  155. else
  156. {
  157. LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
  158. msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
  159. /* permission checked by fault op */
  160. switch (msg->fault_op)
  161. {
  162. case MM_FAULT_OP_READ:
  163. err = _read_fault(varea, pa, msg);
  164. break;
  165. case MM_FAULT_OP_WRITE:
  166. err = _write_fault(varea, pa, msg);
  167. break;
  168. case MM_FAULT_OP_EXECUTE:
  169. err = _exec_fault(varea, pa, msg);
  170. break;
  171. }
  172. }
  173. }
  174. else
  175. {
  176. LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
  177. }
  178. RD_UNLOCK(aspace);
  179. }
  180. return err;
  181. }
  182. #endif /* RT_USING_SMART */