mm_fault.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support PRIVATE mapping and COW
  10. */
  11. #include <rtthread.h>
  12. #ifdef RT_USING_SMART
  13. #define DBG_TAG "mm.fault"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <lwp.h>
  17. #include <lwp_syscall.h>
  18. #include "mm_aspace.h"
  19. #include "mm_fault.h"
  20. #include "mm_flag.h"
  21. #include "mm_private.h"
  22. #include <mmu.h>
  23. #include <tlb.h>
  24. static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
  25. {
  26. int err = MM_FAULT_FIXABLE_FALSE;
  27. if (varea->mem_obj && varea->mem_obj->on_page_fault)
  28. {
  29. varea->mem_obj->on_page_fault(varea, msg);
  30. err = rt_varea_map_with_msg(varea, msg);
  31. err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
  32. }
  33. return err;
  34. }
  35. static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  36. {
  37. int err = MM_FAULT_FIXABLE_FALSE;
  38. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  39. {
  40. RT_ASSERT(pa == ARCH_MAP_FAILED);
  41. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  42. err = _fetch_page(varea, msg);
  43. }
  44. else
  45. {
  46. /* signal a fault to user? */
  47. }
  48. return err;
  49. }
  50. static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  51. {
  52. rt_aspace_t aspace = varea->aspace;
  53. int err = MM_FAULT_FIXABLE_FALSE;
  54. if (rt_varea_is_private_locked(varea))
  55. {
  56. if (VAREA_IS_WRITABLE(varea) && (
  57. msg->fault_type == MM_FAULT_TYPE_RWX_PERM ||
  58. msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
  59. {
  60. RDWR_LOCK(aspace);
  61. err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
  62. RDWR_UNLOCK(aspace);
  63. if (err == MM_FAULT_FIXABLE_FALSE)
  64. LOG_I("%s: fix private failure", __func__);
  65. }
  66. else
  67. {
  68. LOG_I("%s: No permission on %s(attr=0x%lx,writable=%s,fault_type=%d)",
  69. __func__, VAREA_NAME(varea), varea->attr,
  70. VAREA_IS_WRITABLE(varea) ? "True" : "False", msg->fault_type);
  71. }
  72. }
  73. else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  74. {
  75. RT_ASSERT(pa == ARCH_MAP_FAILED);
  76. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  77. err = _fetch_page(varea, msg);
  78. if (err == MM_FAULT_FIXABLE_FALSE)
  79. LOG_I("%s: page fault failure", __func__);
  80. }
  81. else
  82. {
  83. LOG_D("%s: can not fix", __func__);
  84. /* signal a fault to user? */
  85. }
  86. return err;
  87. }
  88. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  89. {
  90. int err = MM_FAULT_FIXABLE_FALSE;
  91. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  92. {
  93. RT_ASSERT(pa == ARCH_MAP_FAILED);
  94. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  95. err = _fetch_page(varea, msg);
  96. }
  97. return err;
  98. }
  99. static void _determine_precise_fault_type(struct rt_aspace_fault_msg *msg, rt_ubase_t pa, rt_varea_t varea)
  100. {
  101. if (msg->fault_type == MM_FAULT_TYPE_GENERIC_MMU)
  102. {
  103. rt_base_t requesting_perm;
  104. switch (msg->fault_op)
  105. {
  106. case MM_FAULT_OP_READ:
  107. requesting_perm = RT_HW_MMU_PROT_READ | RT_HW_MMU_PROT_USER;
  108. break;
  109. case MM_FAULT_OP_WRITE:
  110. requesting_perm = RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER;
  111. break;
  112. case MM_FAULT_OP_EXECUTE:
  113. requesting_perm = RT_HW_MMU_PROT_EXECUTE | RT_HW_MMU_PROT_USER;
  114. break;
  115. }
  116. /**
  117. * always checking the user privileges since dynamic permission is not
  118. * supported in kernel. So those faults are never fixable. Hence, adding
  119. * permission check never changes the result of checking. In other
  120. * words, { 0 && (expr) } is always false.
  121. */
  122. if (rt_hw_mmu_attr_test_perm(varea->attr, requesting_perm))
  123. {
  124. if (pa == (rt_ubase_t)ARCH_MAP_FAILED)
  125. {
  126. msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  127. }
  128. else
  129. {
  130. msg->fault_type = MM_FAULT_TYPE_RWX_PERM;
  131. }
  132. }
  133. }
  134. }
  135. int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
  136. {
  137. int err = MM_FAULT_FIXABLE_FALSE;
  138. uintptr_t va = (uintptr_t)msg->fault_vaddr;
  139. va &= ~ARCH_PAGE_MASK;
  140. msg->fault_vaddr = (void *)va;
  141. rt_mm_fault_res_init(&msg->response);
  142. RT_DEBUG_SCHEDULER_AVAILABLE(1);
  143. if (aspace)
  144. {
  145. rt_varea_t varea;
  146. RD_LOCK(aspace);
  147. varea = _aspace_bst_search(aspace, msg->fault_vaddr);
  148. if (varea)
  149. {
  150. void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  151. _determine_precise_fault_type(msg, (rt_ubase_t)pa, varea);
  152. if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  153. {
  154. LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
  155. err = MM_FAULT_FIXABLE_TRUE;
  156. }
  157. else
  158. {
  159. LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
  160. msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
  161. /* permission checked by fault op */
  162. switch (msg->fault_op)
  163. {
  164. case MM_FAULT_OP_READ:
  165. err = _read_fault(varea, pa, msg);
  166. break;
  167. case MM_FAULT_OP_WRITE:
  168. err = _write_fault(varea, pa, msg);
  169. break;
  170. case MM_FAULT_OP_EXECUTE:
  171. err = _exec_fault(varea, pa, msg);
  172. break;
  173. }
  174. }
  175. }
  176. else
  177. {
  178. LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
  179. }
  180. RD_UNLOCK(aspace);
  181. }
  182. return err;
  183. }
  184. #endif /* RT_USING_SMART */