mm_fault.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support PRIVATE mapping and COW
  10. */
  11. #include <rtthread.h>
  12. #ifdef RT_USING_SMART
  13. #define DBG_TAG "mm.fault"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <lwp.h>
  17. #include <lwp_syscall.h>
  18. #include "mm_aspace.h"
  19. #include "mm_fault.h"
  20. #include "mm_flag.h"
  21. #include "mm_private.h"
  22. #include <mmu.h>
  23. #include <tlb.h>
  24. static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
  25. {
  26. int err = MM_FAULT_FIXABLE_FALSE;
  27. if (varea->mem_obj && varea->mem_obj->on_page_fault)
  28. {
  29. varea->mem_obj->on_page_fault(varea, msg);
  30. err = rt_varea_map_with_msg(varea, msg);
  31. err = (err == RT_EOK ? MM_FAULT_FIXABLE_TRUE : MM_FAULT_FIXABLE_FALSE);
  32. }
  33. return err;
  34. }
  35. static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  36. {
  37. int err = MM_FAULT_FIXABLE_FALSE;
  38. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  39. {
  40. RT_ASSERT(pa == ARCH_MAP_FAILED);
  41. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  42. err = _fetch_page(varea, msg);
  43. }
  44. else
  45. {
  46. /* signal a fault to user? */
  47. }
  48. return err;
  49. }
  50. static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  51. {
  52. rt_aspace_t aspace = varea->aspace;
  53. int err = MM_FAULT_FIXABLE_FALSE;
  54. if (rt_varea_is_private_locked(varea))
  55. {
  56. if (VAREA_IS_WRITABLE(varea) && (
  57. msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT ||
  58. msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
  59. {
  60. RDWR_LOCK(aspace);
  61. err = rt_varea_fix_private_locked(varea, pa, msg, RT_FALSE);
  62. RDWR_UNLOCK(aspace);
  63. if (err == MM_FAULT_FIXABLE_FALSE)
  64. LOG_I("%s: fix private failure", __func__);
  65. }
  66. else
  67. {
  68. LOG_I("%s: No permission on %s(attr=0x%lx)", __func__, VAREA_NAME(varea), varea->attr);
  69. }
  70. }
  71. else if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  72. {
  73. RT_ASSERT(pa == ARCH_MAP_FAILED);
  74. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  75. err = _fetch_page(varea, msg);
  76. if (err == MM_FAULT_FIXABLE_FALSE)
  77. LOG_I("%s: page fault failure", __func__);
  78. }
  79. else
  80. {
  81. LOG_D("%s: can not fix", __func__);
  82. /* signal a fault to user? */
  83. }
  84. return err;
  85. }
  86. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  87. {
  88. int err = MM_FAULT_FIXABLE_FALSE;
  89. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  90. {
  91. RT_ASSERT(pa == ARCH_MAP_FAILED);
  92. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  93. err = _fetch_page(varea, msg);
  94. }
  95. return err;
  96. }
  97. int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
  98. {
  99. int err = MM_FAULT_FIXABLE_FALSE;
  100. uintptr_t va = (uintptr_t)msg->fault_vaddr;
  101. va &= ~ARCH_PAGE_MASK;
  102. msg->fault_vaddr = (void *)va;
  103. rt_mm_fault_res_init(&msg->response);
  104. RT_DEBUG_SCHEDULER_AVAILABLE(1);
  105. if (aspace)
  106. {
  107. rt_varea_t varea;
  108. RD_LOCK(aspace);
  109. varea = _aspace_bst_search(aspace, msg->fault_vaddr);
  110. if (varea)
  111. {
  112. void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  113. if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  114. {
  115. LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);
  116. err = MM_FAULT_FIXABLE_TRUE;
  117. }
  118. else
  119. {
  120. LOG_D("%s(varea=%s,fault=%p,fault_op=%d,phy=%p)", __func__, VAREA_NAME(varea), msg->fault_vaddr, msg->fault_op, pa);
  121. msg->off = varea->offset + ((long)msg->fault_vaddr - (long)varea->start) / ARCH_PAGE_SIZE;
  122. /* permission checked by fault op */
  123. switch (msg->fault_op)
  124. {
  125. case MM_FAULT_OP_READ:
  126. err = _read_fault(varea, pa, msg);
  127. break;
  128. case MM_FAULT_OP_WRITE:
  129. err = _write_fault(varea, pa, msg);
  130. break;
  131. case MM_FAULT_OP_EXECUTE:
  132. err = _exec_fault(varea, pa, msg);
  133. break;
  134. }
  135. }
  136. }
  137. else
  138. {
  139. LOG_I("%s: varea not found at 0x%lx", __func__, msg->fault_vaddr);
  140. }
  141. RD_UNLOCK(aspace);
  142. }
  143. return err;
  144. }
  145. #endif /* RT_USING_SMART */