mm_fault.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-12-06 WangXiaoyao the first version
  9. */
  10. #include <rtthread.h>
  11. #ifdef RT_USING_SMART
  12. #define DBG_TAG "mm.fault"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <lwp.h>
  16. #include <lwp_syscall.h>
  17. #include "mm_aspace.h"
  18. #include "mm_fault.h"
  19. #include "mm_flag.h"
  20. #include "mm_private.h"
  21. #include <mmu.h>
  22. #include <tlb.h>
  23. #define UNRECOVERABLE 0
  24. #define RECOVERABLE 1
  25. static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
  26. {
  27. int err;
  28. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  29. msg->response.vaddr = 0;
  30. msg->response.size = 0;
  31. if (varea->mem_obj && varea->mem_obj->on_page_fault)
  32. {
  33. varea->mem_obj->on_page_fault(varea, msg);
  34. err = _varea_map_with_msg(varea, msg);
  35. err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
  36. }
  37. return err;
  38. }
  39. static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  40. {
  41. int err = UNRECOVERABLE;
  42. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  43. {
  44. RT_ASSERT(pa == ARCH_MAP_FAILED);
  45. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  46. err = _fetch_page(varea, msg);
  47. }
  48. else
  49. {
  50. /* signal a fault to user? */
  51. }
  52. return err;
  53. }
  54. static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  55. {
  56. int err = UNRECOVERABLE;
  57. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  58. {
  59. RT_ASSERT(pa == ARCH_MAP_FAILED);
  60. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  61. err = _fetch_page(varea, msg);
  62. }
  63. else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
  64. varea->flag & MMF_COW)
  65. {
  66. }
  67. else
  68. {
  69. /* signal a fault to user? */
  70. }
  71. return err;
  72. }
  73. static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
  74. {
  75. int err = UNRECOVERABLE;
  76. if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
  77. {
  78. RT_ASSERT(pa == ARCH_MAP_FAILED);
  79. RT_ASSERT(!(varea->flag & MMF_PREFETCH));
  80. err = _fetch_page(varea, msg);
  81. }
  82. return err;
  83. }
  84. int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
  85. {
  86. struct rt_lwp *lwp = lwp_self();
  87. int err = UNRECOVERABLE;
  88. uintptr_t va = (uintptr_t)msg->fault_vaddr;
  89. va &= ~ARCH_PAGE_MASK;
  90. msg->fault_vaddr = (void *)va;
  91. if (lwp)
  92. {
  93. rt_aspace_t aspace = lwp->aspace;
  94. rt_varea_t varea = _aspace_bst_search(aspace, msg->fault_vaddr);
  95. if (varea)
  96. {
  97. void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  98. msg->off = (msg->fault_vaddr - varea->start) >> ARCH_PAGE_SHIFT;
  99. /* permission checked by fault op */
  100. switch (msg->fault_op)
  101. {
  102. case MM_FAULT_OP_READ:
  103. err = _read_fault(varea, pa, msg);
  104. break;
  105. case MM_FAULT_OP_WRITE:
  106. err = _write_fault(varea, pa, msg);
  107. break;
  108. case MM_FAULT_OP_EXECUTE:
  109. err = _exec_fault(varea, pa, msg);
  110. break;
  111. }
  112. }
  113. }
  114. return err;
  115. }
  116. #endif /* RT_USING_SMART */