mm_fault.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. */
  10. #include <rtthread.h>
  11. #ifdef RT_USING_SMART
  12. #define DBG_TAG "mm.fault"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <lwp.h>
  16. #include <lwp_syscall.h>
  17. #include "mm_aspace.h"
  18. #include "mm_fault.h"
  19. #include "mm_flag.h"
  20. #include "mm_private.h"
  21. #include <mmu.h>
  22. #include <tlb.h>
  23. #define UNRECOVERABLE 0
  24. #define RECOVERABLE 1
  25. static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg)
  26. {
  27. int err = UNRECOVERABLE;
  28. if (varea->mem_obj && varea->mem_obj->on_page_fault)
  29. {
  30. varea->mem_obj->on_page_fault(varea, msg);
  31. if (msg->response.status == MM_FAULT_STATUS_OK)
  32. {
  33. void *store = msg->response.vaddr;
  34. rt_size_t store_sz = msg->response.size;
  35. if (msg->vaddr + store_sz > varea->start + varea->size)
  36. {
  37. LOG_W("%s more size of buffer is provided than varea", __func__);
  38. }
  39. else
  40. {
  41. rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
  42. store_sz, varea->attr);
  43. rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
  44. ARCH_PAGE_SIZE);
  45. err = RECOVERABLE;
  46. }
  47. }
  48. }
  49. return err;
  50. }
  51. static int _read_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  52. {
  53. int err = UNRECOVERABLE;
  54. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  55. {
  56. RT_ASSERT(pa == ARCH_MAP_FAILED);
  57. err = _fetch_page(varea, msg);
  58. }
  59. else
  60. {
  61. /* signal a fault to user? */
  62. }
  63. return err;
  64. }
  65. static int _write_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  66. {
  67. int err = UNRECOVERABLE;
  68. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  69. {
  70. RT_ASSERT(pa == ARCH_MAP_FAILED);
  71. err = _fetch_page(varea, msg);
  72. }
  73. else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
  74. varea->flag & MMF_COW)
  75. {
  76. }
  77. else
  78. {
  79. /* signal a fault to user? */
  80. }
  81. return err;
  82. }
  83. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  84. {
  85. int err = UNRECOVERABLE;
  86. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  87. {
  88. RT_ASSERT(pa == ARCH_MAP_FAILED);
  89. err = _fetch_page(varea, msg);
  90. }
  91. return err;
  92. }
  93. int rt_mm_fault_try_fix(struct rt_mm_fault_msg *msg)
  94. {
  95. struct rt_lwp *lwp = lwp_self();
  96. int err = UNRECOVERABLE;
  97. uintptr_t va = (uintptr_t)msg->vaddr;
  98. va &= ~ARCH_PAGE_MASK;
  99. msg->vaddr = (void *)va;
  100. if (lwp)
  101. {
  102. rt_aspace_t aspace = lwp->aspace;
  103. rt_varea_t varea = _aspace_bst_search(aspace, msg->vaddr);
  104. if (varea)
  105. {
  106. void *pa = rt_hw_mmu_v2p(aspace, msg->vaddr);
  107. msg->off = (msg->vaddr - varea->start) >> ARCH_PAGE_SHIFT;
  108. /* permission checked by fault op */
  109. switch (msg->fault_op)
  110. {
  111. case MM_FAULT_OP_READ:
  112. err = _read_fault(varea, pa, msg);
  113. break;
  114. case MM_FAULT_OP_WRITE:
  115. err = _write_fault(varea, pa, msg);
  116. break;
  117. case MM_FAULT_OP_EXECUTE:
  118. err = _exec_fault(varea, pa, msg);
  119. break;
  120. }
  121. }
  122. }
  123. return err;
  124. }
  125. #endif /* RT_USING_SMART */