mm_object.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-30 WangXiaoyao the first version
  9. * 2023-08-19 Shell Support varea modification handler
  10. */
  11. #define DBG_TAG "mm.object"
  12. #define DBG_LVL DBG_INFO
  13. #include "rtdbg.h"
  14. #include <rtthread.h>
  15. #include "mm_aspace.h"
  16. #include "mm_fault.h"
  17. #include "mm_page.h"
  18. #include <mmu.h>
  19. #include <string.h>
  20. #include <stdlib.h>
  21. /** varea based dummy memory object whose data comes directly from page frame */
  22. static const char *get_name(rt_varea_t varea)
  23. {
  24. return "dummy-mapper";
  25. }
  26. static rt_bool_t _varea_pgmgr_frame_is_member(rt_varea_t varea, rt_page_t frame)
  27. {
  28. rt_page_t iter;
  29. rt_bool_t rc = RT_FALSE;
  30. if (varea->frames)
  31. {
  32. iter = varea->frames;
  33. do
  34. {
  35. if (iter == frame)
  36. {
  37. rc = RT_TRUE;
  38. break;
  39. }
  40. iter = iter->next;
  41. } while (iter);
  42. }
  43. return rc;
  44. }
  45. void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
  46. {
  47. rt_page_t page = rt_page_addr2page(page_addr);
  48. if (varea->frames == NULL)
  49. {
  50. varea->frames = page;
  51. page->pre = RT_NULL;
  52. page->next = RT_NULL;
  53. }
  54. else
  55. {
  56. page->pre = RT_NULL;
  57. varea->frames->pre = page;
  58. page->next = varea->frames;
  59. varea->frames = page;
  60. }
  61. }
  62. void rt_varea_pgmgr_pop_all(rt_varea_t varea)
  63. {
  64. rt_page_t page = varea->frames;
  65. while (page)
  66. {
  67. rt_page_t next = page->next;
  68. void *pg_va = rt_page_page2addr(page);
  69. rt_pages_free(pg_va, 0);
  70. page = next;
  71. }
  72. varea->frames = RT_NULL;
  73. }
  74. void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
  75. {
  76. void *vend = (char *)vaddr + size;
  77. while (vaddr != vend)
  78. {
  79. rt_page_t page = rt_page_addr2page(vaddr);
  80. if (_varea_pgmgr_frame_is_member(varea, page))
  81. {
  82. if (page->pre)
  83. page->pre->next = page->next;
  84. if (page->next)
  85. page->next->pre = page->pre;
  86. if (varea->frames == page)
  87. varea->frames = page->next;
  88. rt_pages_free(vaddr, 0);
  89. }
  90. vaddr = (char *)vaddr + ARCH_PAGE_SIZE;
  91. }
  92. }
  93. static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  94. {
  95. void *page;
  96. page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  97. if (!page)
  98. {
  99. LOG_W("%s: page alloc failed", __func__);
  100. return;
  101. }
  102. msg->response.status = MM_FAULT_STATUS_OK;
  103. msg->response.size = ARCH_PAGE_SIZE;
  104. msg->response.vaddr = page;
  105. rt_varea_pgmgr_insert(varea, page);
  106. }
  107. static void on_varea_open(struct rt_varea *varea)
  108. {
  109. varea->data = NULL;
  110. }
  111. static void on_varea_close(struct rt_varea *varea)
  112. {
  113. }
  114. static rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  115. {
  116. return RT_EOK;
  117. }
  118. static void _remove_pages(rt_varea_t varea, void *rm_start, void *rm_end)
  119. {
  120. void *page_va;
  121. RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
  122. RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
  123. while (rm_start != rm_end)
  124. {
  125. page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
  126. if (page_va != ARCH_MAP_FAILED)
  127. {
  128. page_va -= PV_OFFSET;
  129. LOG_D("%s: free page %p", __func__, page_va);
  130. rt_varea_unmap_page(varea, rm_start);
  131. rt_varea_pgmgr_pop(varea, page_va, ARCH_PAGE_SIZE);
  132. }
  133. rm_start += ARCH_PAGE_SIZE;
  134. }
  135. }
  136. static rt_err_t on_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  137. {
  138. char *varea_start = varea->start;
  139. void *rm_start;
  140. void *rm_end;
  141. if (varea_start == (char *)new_start)
  142. {
  143. rm_start = varea_start + size;
  144. rm_end = varea_start + varea->size;
  145. }
  146. else /* if (varea_start < (char *)new_start) */
  147. {
  148. RT_ASSERT(varea_start < (char *)new_start);
  149. rm_start = varea_start;
  150. rm_end = new_start;
  151. }
  152. _remove_pages(varea, rm_start, rm_end);
  153. return RT_EOK;
  154. }
  155. static rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  156. {
  157. void *sub_start = subset->start;
  158. void *sub_end = sub_start + subset->size;
  159. void *page_va;
  160. _remove_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
  161. RT_ASSERT(!((rt_ubase_t)sub_start & ARCH_PAGE_MASK));
  162. RT_ASSERT(!((rt_ubase_t)sub_end & ARCH_PAGE_MASK));
  163. while (sub_start != sub_end)
  164. {
  165. page_va = rt_hw_mmu_v2p(existed->aspace, sub_start);
  166. if (page_va != ARCH_MAP_FAILED)
  167. {
  168. rt_page_t frame;
  169. page_va = rt_kmem_p2v(page_va);
  170. if (page_va)
  171. {
  172. frame = rt_page_addr2page(page_va);
  173. if (frame && _varea_pgmgr_frame_is_member(existed, frame))
  174. {
  175. LOG_D("%s: free page %p", __func__, page_va);
  176. rt_page_ref_inc(page_va, 0);
  177. rt_varea_pgmgr_pop(existed, page_va, ARCH_PAGE_SIZE);
  178. rt_varea_pgmgr_insert(subset, page_va);
  179. }
  180. }
  181. }
  182. sub_start += ARCH_PAGE_SIZE;
  183. }
  184. return RT_EOK;
  185. }
  186. static rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
  187. {
  188. /* transport page */
  189. void *mr_start = merge_from->start;
  190. void *mr_end = mr_start + merge_from->size;
  191. void *page_va;
  192. RT_ASSERT(!((rt_ubase_t)mr_start & ARCH_PAGE_MASK));
  193. RT_ASSERT(!((rt_ubase_t)mr_end & ARCH_PAGE_MASK));
  194. while (mr_start != mr_end)
  195. {
  196. page_va = rt_hw_mmu_v2p(merge_from->aspace, mr_start);
  197. if (page_va != ARCH_MAP_FAILED)
  198. {
  199. rt_page_t frame;
  200. page_va = rt_kmem_p2v(page_va);
  201. if (page_va)
  202. {
  203. frame = rt_page_addr2page(page_va);
  204. if (frame && _varea_pgmgr_frame_is_member(merge_from, frame))
  205. {
  206. LOG_D("%s: free page %p", __func__, page_va);
  207. rt_page_ref_inc(page_va, 0);
  208. rt_varea_pgmgr_pop(merge_from, page_va, ARCH_PAGE_SIZE);
  209. rt_varea_pgmgr_insert(merge_to, page_va);
  210. }
  211. }
  212. }
  213. mr_start += ARCH_PAGE_SIZE;
  214. }
  215. return RT_EOK;
  216. }
  217. static void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  218. {
  219. char *dst_k;
  220. rt_aspace_t aspace = varea->aspace;
  221. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  222. if (dst_k != ARCH_MAP_FAILED)
  223. {
  224. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  225. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  226. memcpy(msg->buffer_vaddr, dst_k, ARCH_PAGE_SIZE);
  227. msg->response.status = MM_FAULT_STATUS_OK;
  228. }
  229. }
  230. static void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  231. {
  232. void *dst_k;
  233. rt_aspace_t aspace = varea->aspace;
  234. dst_k = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
  235. if (dst_k != ARCH_MAP_FAILED)
  236. {
  237. RT_ASSERT(!((long)dst_k & ARCH_PAGE_MASK));
  238. dst_k = (void *)((char *)dst_k - PV_OFFSET);
  239. memcpy(dst_k, msg->buffer_vaddr, ARCH_PAGE_SIZE);
  240. msg->response.status = MM_FAULT_STATUS_OK;
  241. }
  242. }
  243. struct rt_mem_obj rt_mm_dummy_mapper = {
  244. .get_name = get_name,
  245. .on_page_fault = on_page_fault,
  246. .hint_free = NULL,
  247. .on_varea_open = on_varea_open,
  248. .on_varea_close = on_varea_close,
  249. .on_varea_shrink = on_varea_shrink,
  250. .on_varea_split = on_varea_split,
  251. .on_varea_expand = on_varea_expand,
  252. .on_varea_merge = on_varea_merge,
  253. .page_write = page_write,
  254. .page_read = page_read,
  255. };