mm_object.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-30 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support varea modification handler
  10. * 2023-10-13 Shell Replace the page management algorithm of pgmgr
  11. */
  12. #define DBG_TAG "mm.object"
  13. #define DBG_LVL DBG_INFO
  14. #include "rtdbg.h"
  15. #include <rtthread.h>
  16. #include "mm_aspace.h"
  17. #include "mm_fault.h"
  18. #include "mm_page.h"
  19. #include <mmu.h>
  20. #include <string.h>
  21. #include <stdlib.h>
  22. /** varea based dummy memory object whose data comes directly from page frame */
  23. static const char *get_name(rt_varea_t varea)
  24. {
  25. return "dummy-mapper";
  26. }
  27. void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
  28. {
  29. /* each mapping of page frame in the varea is binding with a reference */
  30. rt_page_ref_inc(page_addr, 0);
  31. }
  32. /* resource recycling of page frames */
  33. void rt_varea_pgmgr_pop_all(rt_varea_t varea)
  34. {
  35. rt_aspace_t aspace = varea->aspace;
  36. char *end_addr = varea->start + varea->size;
  37. RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
  38. for (char *iter = varea->start; iter != end_addr; iter += ARCH_PAGE_SIZE)
  39. {
  40. void *page_pa = rt_hw_mmu_v2p(aspace, iter);
  41. char *page_va = rt_kmem_p2v(page_pa);
  42. if (page_pa != ARCH_MAP_FAILED && page_va)
  43. {
  44. rt_hw_mmu_unmap(aspace, iter, ARCH_PAGE_SIZE);
  45. rt_pages_free(page_va, 0);
  46. }
  47. }
  48. }
  49. static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  50. {
  51. void *page;
  52. page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  53. if (!page)
  54. {
  55. LOG_W("%s: page alloc failed", __func__);
  56. return;
  57. }
  58. msg->response.status = MM_FAULT_STATUS_OK;
  59. msg->response.size = ARCH_PAGE_SIZE;
  60. msg->response.vaddr = page;
  61. }
  62. static void on_varea_open(struct rt_varea *varea)
  63. {
  64. varea->data = NULL;
  65. }
  66. static void on_varea_close(struct rt_varea *varea)
  67. {
  68. /* unmap and dereference page frames in the varea region */
  69. rt_varea_pgmgr_pop_all(varea);
  70. }
  71. static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  72. {
  73. return RT_EOK;
  74. }
  75. static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
  76. {
  77. void *page_va;
  78. RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
  79. RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
  80. while (rm_start != rm_end)
  81. {
  82. page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
  83. if (page_va != ARCH_MAP_FAILED)
  84. {
  85. page_va -= PV_OFFSET;
  86. LOG_D("%s: free page %p", __func__, page_va);
  87. rt_varea_unmap_page(varea, rm_start);
  88. rt_pages_free(page_va, 0);
  89. }
  90. rm_start += ARCH_PAGE_SIZE;
  91. }
  92. }
  93. static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  94. {
  95. char *varea_start = varea->start;
  96. void *rm_start;
  97. void *rm_end;
  98. if (varea_start == (char *)new_start)
  99. {
  100. rm_start = varea_start + size;
  101. rm_end = varea_start + varea->size;
  102. }
  103. else /* if (varea_start < (char *)new_start) */
  104. {
  105. RT_ASSERT(varea_start < (char *)new_start);
  106. rm_start = varea_start;
  107. rm_end = new_start;
  108. }
  109. _remove_pages(varea, rm_start, rm_end);
  110. return RT_EOK;
  111. }
  112. static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  113. {
  114. /* remove the resource in the unmap region, and do nothing for the subset */
  115. _remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
  116. return RT_EOK;
  117. }
  118. static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
  119. {
  120. /* do nothing for the migration */
  121. return RT_EOK;
  122. }
  123. static void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  124. {
  125. char *dst_k;
  126. rt_aspace_t aspace = varea->aspace;
  127. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  128. if (dst_k != ARCH_MAP_FAILED)
  129. {
  130. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  131. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  132. memcpy(msg->buffer_vaddr, dst_k, ARCH_PAGE_SIZE);
  133. msg->response.status = MM_FAULT_STATUS_OK;
  134. }
  135. }
  136. static void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  137. {
  138. void *dst_k;
  139. rt_aspace_t aspace = varea->aspace;
  140. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  141. if (dst_k != ARCH_MAP_FAILED)
  142. {
  143. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  144. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  145. memcpy(dst_k, msg->buffer_vaddr, ARCH_PAGE_SIZE);
  146. msg->response.status = MM_FAULT_STATUS_OK;
  147. }
  148. }
  149. struct rt_mem_obj rt_mm_dummy_mapper = {
  150. .get_name = get_name,
  151. .on_page_fault = on_page_fault,
  152. .hint_free = NULL,
  153. .on_varea_open = on_varea_open,
  154. .on_varea_close = on_varea_close,
  155. .on_varea_shrink = on_varea_shrink,
  156. .on_varea_split = on_varea_split,
  157. .on_varea_expand = on_varea_expand,
  158. .on_varea_merge = on_varea_merge,
  159. .page_write = page_write,
  160. .page_read = page_read,
  161. };