mm_fault.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. */
  10. #include <rtthread.h>
  11. #ifdef RT_USING_SMART
  12. #include <lwp.h>
  13. #include <lwp_syscall.h>
  14. #include "mm_aspace.h"
  15. #include "mm_fault.h"
  16. #include "mm_flag.h"
  17. #include "mm_private.h"
  18. #include <mmu.h>
  19. #include <tlb.h>
  20. #define DBG_TAG "mm.fault"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #define UNRECOVERABLE 0
  24. #define RECOVERABLE 1
  25. static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg)
  26. {
  27. int err = UNRECOVERABLE;
  28. varea->mem_obj->on_page_fault(varea, msg);
  29. if (msg->response.status == MM_FAULT_STATUS_OK)
  30. {
  31. void *store = msg->response.vaddr;
  32. rt_size_t store_sz = msg->response.size;
  33. if (msg->vaddr + store_sz > varea->start + varea->size)
  34. {
  35. LOG_W("%s more size of buffer is provided than varea", __func__);
  36. }
  37. else
  38. {
  39. rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
  40. store_sz, varea->attr);
  41. rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
  42. ARCH_PAGE_SIZE);
  43. err = RECOVERABLE;
  44. }
  45. }
  46. return err;
  47. }
  48. static int _read_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  49. {
  50. int err = UNRECOVERABLE;
  51. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  52. {
  53. RT_ASSERT(pa == ARCH_MAP_FAILED);
  54. err = _fetch_page(varea, msg);
  55. }
  56. else
  57. {
  58. /* signal a fault to user? */
  59. }
  60. return err;
  61. }
  62. static int _write_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  63. {
  64. int err = UNRECOVERABLE;
  65. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  66. {
  67. RT_ASSERT(pa == ARCH_MAP_FAILED);
  68. err = _fetch_page(varea, msg);
  69. }
  70. else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
  71. varea->flag & MMF_COW)
  72. {
  73. }
  74. else
  75. {
  76. /* signal a fault to user? */
  77. }
  78. return err;
  79. }
  80. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
  81. {
  82. int err = UNRECOVERABLE;
  83. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  84. {
  85. RT_ASSERT(pa == ARCH_MAP_FAILED);
  86. err = _fetch_page(varea, msg);
  87. }
  88. return err;
  89. }
  90. int rt_mm_fault_try_fix(struct rt_mm_fault_msg *msg)
  91. {
  92. struct rt_lwp *lwp = lwp_self();
  93. int err = UNRECOVERABLE;
  94. uintptr_t va = (uintptr_t)msg->vaddr;
  95. va &= ~ARCH_PAGE_MASK;
  96. msg->vaddr = (void *)va;
  97. if (lwp)
  98. {
  99. rt_aspace_t aspace = lwp->aspace;
  100. rt_varea_t varea = _aspace_bst_search(aspace, msg->vaddr);
  101. if (varea)
  102. {
  103. void *pa = rt_hw_mmu_v2p(aspace, msg->vaddr);
  104. msg->off = (msg->vaddr - varea->start) >> ARCH_PAGE_SHIFT;
  105. /* permission checked by fault op */
  106. switch (msg->fault_op)
  107. {
  108. case MM_FAULT_OP_READ:
  109. err = _read_fault(varea, pa, msg);
  110. break;
  111. case MM_FAULT_OP_WRITE:
  112. err = _write_fault(varea, pa, msg);
  113. break;
  114. case MM_FAULT_OP_EXECUTE:
  115. err = _exec_fault(varea, pa, msg);
  116. break;
  117. }
  118. }
  119. }
  120. return err;
  121. }
  122. #endif /* RT_USING_SMART */