mm_aspace.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-14 WangXiaoyao the first version
  9. */
  10. /**
  11. * @brief Virtual Address Space
  12. */
  13. #include <rtdef.h>
  14. #include <rtthread.h>
  15. #include <stddef.h>
  16. #include <stdint.h>
  17. #define DBG_TAG "mm.aspace"
  18. #define DBG_LVL DBG_INFO
  19. #include <rtdbg.h>
  20. #include "avl_adpt.h"
  21. #include "mm_aspace.h"
  22. #include "mm_fault.h"
  23. #include "mm_flag.h"
  24. #include "mm_page.h"
  25. #include "mm_private.h"
  26. #include <mmu.h>
  27. #include <tlb.h>
  28. #ifndef RT_USING_SMART
  29. #define PV_OFFSET 0
  30. #endif
  31. static void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
  32. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  33. void *limit_start, rt_size_t limit_size,
  34. mm_flag_t flags);
  35. struct rt_aspace rt_kernel_space;
  36. rt_varea_t _varea_create(void *start, rt_size_t size)
  37. {
  38. rt_varea_t varea;
  39. varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
  40. if (varea)
  41. {
  42. varea->start = start;
  43. varea->size = size;
  44. }
  45. return varea;
  46. }
  47. static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
  48. rt_size_t attr, rt_size_t flags,
  49. rt_mem_obj_t mem_obj, rt_size_t offset)
  50. {
  51. varea->aspace = aspace;
  52. varea->attr = attr;
  53. varea->mem_obj = mem_obj;
  54. varea->flag = flags;
  55. varea->offset = offset;
  56. varea->frames = NULL;
  57. if (varea->mem_obj && varea->mem_obj->on_varea_open)
  58. varea->mem_obj->on_varea_open(varea);
  59. }
  60. int _init_lock(rt_aspace_t aspace)
  61. {
  62. MM_PGTBL_LOCK_INIT(aspace);
  63. rt_mutex_init(&aspace->bst_lock, "", RT_IPC_FLAG_FIFO);
  64. return RT_EOK;
  65. }
  66. rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
  67. {
  68. rt_aspace_t aspace = NULL;
  69. void *page_table = pgtbl;
  70. if (page_table)
  71. {
  72. aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
  73. if (aspace)
  74. {
  75. aspace->page_table = page_table;
  76. aspace->start = start;
  77. aspace->size = length;
  78. if (_init_lock(aspace) != RT_EOK ||
  79. _aspace_bst_init(aspace) != RT_EOK)
  80. {
  81. rt_free(aspace);
  82. aspace = NULL;
  83. }
  84. }
  85. }
  86. return aspace;
  87. }
  88. rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
  89. void *pgtbl)
  90. {
  91. void *page_table = pgtbl;
  92. LOG_D("%s", __func__);
  93. if (page_table)
  94. {
  95. aspace->page_table = page_table;
  96. aspace->start = start;
  97. aspace->size = length;
  98. if (_init_lock(aspace) != RT_EOK || _aspace_bst_init(aspace) != RT_EOK)
  99. {
  100. aspace = NULL;
  101. }
  102. }
  103. return aspace;
  104. }
  105. void rt_aspace_detach(rt_aspace_t aspace)
  106. {
  107. _aspace_unmap(aspace, aspace->start, aspace->size);
  108. rt_mutex_detach(&aspace->bst_lock);
  109. }
  110. void rt_aspace_delete(rt_aspace_t aspace)
  111. {
  112. if (aspace)
  113. {
  114. rt_aspace_detach(aspace);
  115. rt_free(aspace);
  116. }
  117. }
  118. static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
  119. rt_size_t offset, rt_size_t attr)
  120. {
  121. LOG_D("%s: va %p length %p", __func__, vaddr, length);
  122. int err = RT_EOK;
  123. /* it's ensured by caller that (void*)end will not overflow */
  124. void *end = vaddr + length;
  125. void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
  126. while (vaddr != end)
  127. {
  128. /* TODO try to map with huge TLB, when flag & HUGEPAGE */
  129. rt_size_t pgsz = ARCH_PAGE_SIZE;
  130. rt_hw_mmu_map(aspace, vaddr, phyaddr, pgsz, attr);
  131. vaddr += pgsz;
  132. phyaddr += pgsz;
  133. }
  134. rt_hw_tlb_invalidate_range(aspace, vaddr, length, ARCH_PAGE_SIZE);
  135. return err;
  136. }
  137. rt_inline void _do_page_fault(struct rt_mm_fault_msg *msg, rt_size_t off,
  138. void *vaddr, rt_mem_obj_t mem_obj,
  139. rt_varea_t varea)
  140. {
  141. msg->off = off;
  142. msg->vaddr = vaddr;
  143. msg->fault_op = MM_FAULT_OP_READ;
  144. msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  145. msg->response.status = -1;
  146. mem_obj->on_page_fault(varea, msg);
  147. }
  148. /* allocate memory page for mapping range */
  149. static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
  150. rt_size_t size)
  151. {
  152. int err = RT_EOK;
  153. /* it's ensured by caller that start & size ara page-aligned */
  154. void *end = start + size;
  155. void *vaddr = start;
  156. rt_size_t off = varea->offset + ((start - varea->start) >> ARCH_PAGE_SHIFT);
  157. while (vaddr != end)
  158. {
  159. /* TODO try to map with huge TLB, when flag & HUGEPAGE */
  160. struct rt_mm_fault_msg msg;
  161. _do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
  162. if (msg.response.status == MM_FAULT_STATUS_OK)
  163. {
  164. void *store = msg.response.vaddr;
  165. rt_size_t store_sz = msg.response.size;
  166. if (store_sz + vaddr > end)
  167. {
  168. LOG_W("%s: too much (0x%lx) of buffer at vaddr %p is provided",
  169. __func__, store_sz, vaddr);
  170. break;
  171. }
  172. void *map = rt_hw_mmu_map(aspace, vaddr, store + PV_OFFSET,
  173. store_sz, varea->attr);
  174. if (!map)
  175. {
  176. LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
  177. vaddr, store + PV_OFFSET, store_sz);
  178. }
  179. vaddr += store_sz;
  180. off += store_sz >> ARCH_PAGE_SHIFT;
  181. rt_hw_tlb_invalidate_range(aspace, vaddr, store_sz, ARCH_PAGE_SIZE);
  182. }
  183. else
  184. {
  185. err = -RT_ENOMEM;
  186. LOG_W("%s failed because no memory is provided", __func__);
  187. break;
  188. }
  189. }
  190. return err;
  191. }
  192. int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
  193. {
  194. void *alloc_va;
  195. int err = RT_EOK;
  196. /**
  197. * find a suitable va range.
  198. * even though this is sleepable, it's still ok for startup routine
  199. */
  200. alloc_va =
  201. _find_free(aspace, hint->prefer, hint->map_size, hint->limit_start,
  202. hint->limit_range_size, hint->flags);
  203. /* TODO try merge surrounding regions to optimize memory footprint */
  204. if (alloc_va != ARCH_MAP_FAILED)
  205. {
  206. varea->start = alloc_va;
  207. _aspace_bst_insert(aspace, varea);
  208. }
  209. else
  210. {
  211. err = -RT_ENOSPC;
  212. }
  213. return err;
  214. }
  215. static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
  216. mm_flag_t flags, rt_mem_obj_t mem_obj,
  217. rt_size_t offset)
  218. {
  219. int err = RT_EOK;
  220. WR_LOCK(aspace);
  221. struct rt_mm_va_hint hint = {.prefer = varea->start,
  222. .map_size = varea->size,
  223. .limit_start = aspace->start,
  224. .limit_range_size = aspace->size,
  225. .flags = flags};
  226. if (mem_obj->hint_free)
  227. {
  228. mem_obj->hint_free(&hint);
  229. }
  230. err = _varea_install(aspace, varea, &hint);
  231. WR_UNLOCK(aspace);
  232. if (err == RT_EOK)
  233. {
  234. _varea_post_install(varea, aspace, attr, flags, mem_obj, offset);
  235. if (MMF_TEST_CNTL(flags, MMF_PREFETCH))
  236. {
  237. err = _do_prefetch(aspace, varea, varea->start, varea->size);
  238. }
  239. }
  240. return err;
  241. }
  242. static inline int _not_in_range(void *start, rt_size_t length,
  243. void *limit_start, rt_size_t limit_size)
  244. {
  245. LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
  246. /* assuming (base + length) will not overflow except (0) */
  247. return start != ARCH_MAP_FAILED
  248. ? ((length > (0ul - (uintptr_t)start)) || start < limit_start ||
  249. (length + (rt_size_t)(start - limit_start)) > limit_size)
  250. : length > limit_size;
  251. }
  252. static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
  253. {
  254. return (start != ARCH_MAP_FAILED) &&
  255. (((uintptr_t)start & mask) || (length & mask));
  256. }
  257. static inline int _not_support(rt_size_t flags)
  258. {
  259. rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT);
  260. return flags & ~(support_ops | _MMF_ALIGN_MASK);
  261. }
  262. int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
  263. rt_size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj,
  264. rt_size_t offset)
  265. {
  266. /* TODO check not in atomic context: irq, spinlock, local intr disable... */
  267. int err;
  268. rt_varea_t varea;
  269. if (!aspace || !addr || !mem_obj || length == 0 ||
  270. _not_in_range(*addr, length, aspace->start, aspace->size))
  271. {
  272. err = -RT_EINVAL;
  273. LOG_I("%s: Invalid input", __func__);
  274. }
  275. else if (_not_support(flags))
  276. {
  277. LOG_I("%s: no support flags 0x%p", __func__, flags);
  278. err = -RT_ENOSYS;
  279. }
  280. else
  281. {
  282. varea = _varea_create(*addr, length);
  283. if (varea)
  284. {
  285. err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
  286. }
  287. else
  288. {
  289. LOG_W("%s: mm aspace map failed", __func__);
  290. err = -RT_ENOMEM;
  291. }
  292. }
  293. if (err != RT_EOK)
  294. {
  295. *addr = NULL;
  296. }
  297. else
  298. {
  299. *addr = varea->start;
  300. }
  301. return err;
  302. }
  303. int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  304. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  305. rt_mem_obj_t mem_obj, rt_size_t offset)
  306. {
  307. int err;
  308. if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
  309. _not_in_range(*addr, length, aspace->start, aspace->size))
  310. {
  311. err = -RT_EINVAL;
  312. LOG_W("%s: Invalid input", __func__);
  313. }
  314. else if (_not_support(flags))
  315. {
  316. LOG_W("%s: no support flags", __func__);
  317. err = -RT_ENOSYS;
  318. }
  319. else
  320. {
  321. varea->size = length;
  322. varea->start = *addr;
  323. err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
  324. }
  325. if (err != RT_EOK)
  326. {
  327. *addr = NULL;
  328. }
  329. else
  330. {
  331. *addr = varea->start;
  332. }
  333. return err;
  334. }
  335. int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
  336. rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
  337. void **ret_va)
  338. {
  339. int err;
  340. void *vaddr;
  341. if (!aspace || !hint || !hint->limit_range_size || !hint->map_size)
  342. {
  343. LOG_W("%s: Invalid input", __func__);
  344. err = -RT_EINVAL;
  345. }
  346. else if (_not_align(hint->prefer, hint->map_size, ARCH_PAGE_MASK))
  347. {
  348. LOG_W("%s: not aligned", __func__);
  349. err = -RT_EINVAL;
  350. }
  351. else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start,
  352. aspace->size) ||
  353. _not_in_range(hint->prefer, hint->map_size, aspace->start,
  354. aspace->size))
  355. {
  356. LOG_W("%s: not in range", __func__);
  357. err = -RT_EINVAL;
  358. }
  359. else
  360. {
  361. WR_LOCK(aspace);
  362. err = _varea_install(aspace, varea, hint);
  363. WR_UNLOCK(aspace);
  364. if (err == RT_EOK)
  365. {
  366. _varea_post_install(varea, aspace, attr, 0, NULL, pa_off);
  367. vaddr = varea->start;
  368. err = _do_named_map(aspace, varea->start, varea->size,
  369. (rt_size_t)pa_off, attr);
  370. if (err != RT_EOK)
  371. {
  372. _aspace_unmap(aspace, varea->start, varea->size);
  373. rt_free(varea);
  374. }
  375. }
  376. }
  377. if (ret_va)
  378. {
  379. if (err == RT_EOK)
  380. *ret_va = vaddr;
  381. else
  382. *ret_va = ARCH_MAP_FAILED;
  383. }
  384. return err;
  385. }
  386. int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
  387. rt_size_t pa_off, void **ret_va)
  388. {
  389. int err;
  390. if (hint)
  391. {
  392. rt_varea_t varea = _varea_create(hint->prefer, hint->map_size);
  393. if (varea)
  394. {
  395. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  396. if (err != RT_EOK)
  397. {
  398. rt_free(varea);
  399. }
  400. }
  401. else
  402. {
  403. err = -RT_ENOMEM;
  404. }
  405. }
  406. else
  407. {
  408. err = -RT_EINVAL;
  409. }
  410. return err;
  411. }
  412. int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  413. rt_mm_va_hint_t hint, rt_size_t attr,
  414. rt_size_t pa_off, void **ret_va)
  415. {
  416. int err;
  417. if (varea && hint)
  418. {
  419. varea->start = hint->prefer;
  420. varea->size = hint->map_size;
  421. hint->flags |= MMF_MAP_FIXED;
  422. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  423. }
  424. else
  425. {
  426. err = -RT_EINVAL;
  427. }
  428. return err;
  429. }
  430. void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
  431. {
  432. struct _mm_range range = {addr, addr + length - 1};
  433. rt_varea_t varea = _aspace_bst_search_overlap(aspace, range);
  434. while (varea)
  435. {
  436. if (varea->mem_obj && varea->mem_obj->on_varea_close)
  437. varea->mem_obj->on_varea_close(varea);
  438. rt_varea_free_pages(varea);
  439. WR_LOCK(aspace);
  440. _aspace_bst_remove(aspace, varea);
  441. WR_UNLOCK(aspace);
  442. rt_hw_mmu_unmap(aspace, varea->start, varea->size);
  443. rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size,
  444. ARCH_PAGE_SIZE);
  445. rt_free(varea);
  446. varea = _aspace_bst_search_overlap(aspace, range);
  447. }
  448. }
  449. int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
  450. {
  451. if (!aspace)
  452. {
  453. LOG_I("%s: Invalid input", __func__);
  454. return -RT_EINVAL;
  455. }
  456. if (_not_in_range(addr, length, aspace->start, aspace->size))
  457. {
  458. LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
  459. aspace->start, aspace->start + aspace->size);
  460. return -RT_EINVAL;
  461. }
  462. _aspace_unmap(aspace, addr, length);
  463. return RT_EOK;
  464. }
  465. static inline void *_lower(void *a, void *b)
  466. {
  467. return a < b ? a : b;
  468. }
  469. static inline void *_align(void *va, rt_ubase_t align_mask)
  470. {
  471. return (void *)((rt_ubase_t)(va + ~align_mask) & align_mask);
  472. }
  473. static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
  474. rt_ubase_t align_mask, struct _mm_range limit)
  475. {
  476. void *ret = ARCH_MAP_FAILED;
  477. while (varea && varea->start < limit.end)
  478. {
  479. void *candidate = varea->start + varea->size;
  480. candidate = _align(candidate, align_mask);
  481. if (candidate > limit.end || limit.end - candidate + 1 < req_size)
  482. break;
  483. rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
  484. if (nx_va)
  485. {
  486. rt_size_t gap_size =
  487. _lower(limit.end, nx_va->start - 1) - candidate + 1;
  488. if (gap_size >= req_size)
  489. {
  490. ret = candidate;
  491. break;
  492. }
  493. }
  494. else
  495. {
  496. ret = candidate;
  497. }
  498. varea = nx_va;
  499. }
  500. return ret;
  501. }
  502. /** find suitable place in [limit_start, limit_end] */
  503. static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
  504. rt_ubase_t align_mask,
  505. struct _mm_range limit)
  506. {
  507. void *va = ARCH_MAP_FAILED;
  508. rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
  509. if (varea)
  510. {
  511. void *candidate = _align(limit.start, align_mask);
  512. rt_size_t gap_size = varea->start - candidate;
  513. if (gap_size >= req_size)
  514. {
  515. rt_varea_t former = _aspace_bst_search(aspace, limit.start);
  516. if (former)
  517. {
  518. candidate = _align(former->start + former->size, align_mask);
  519. gap_size = varea->start - candidate;
  520. if (gap_size >= req_size)
  521. va = candidate;
  522. else
  523. va = _ascending_search(varea, req_size, align_mask, limit);
  524. }
  525. else
  526. {
  527. va = candidate;
  528. }
  529. }
  530. else
  531. {
  532. va = _ascending_search(varea, req_size, align_mask, limit);
  533. }
  534. }
  535. else
  536. {
  537. void *candidate;
  538. rt_size_t gap_size;
  539. rt_varea_t former = _aspace_bst_search(aspace, limit.start);
  540. candidate = former ? former->start + former->size : limit.start;
  541. candidate = _align(candidate, align_mask);
  542. gap_size = limit.end - candidate + 1;
  543. if (gap_size >= req_size)
  544. va = candidate;
  545. }
  546. return va;
  547. }
  548. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  549. void *limit_start, rt_size_t limit_size,
  550. mm_flag_t flags)
  551. {
  552. rt_varea_t varea = NULL;
  553. void *va = ARCH_MAP_FAILED;
  554. struct _mm_range limit = {limit_start, limit_start + limit_size - 1};
  555. rt_ubase_t align_mask = ~0ul;
  556. if (flags & MMF_REQUEST_ALIGN)
  557. {
  558. align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
  559. }
  560. if (prefer != ARCH_MAP_FAILED)
  561. {
  562. prefer = _align(prefer, align_mask);
  563. struct _mm_range range = {prefer, prefer + req_size - 1};
  564. varea = _aspace_bst_search_overlap(aspace, range);
  565. if (!varea)
  566. {
  567. va = prefer;
  568. }
  569. else if (flags & MMF_MAP_FIXED)
  570. {
  571. }
  572. else
  573. {
  574. va = _ascending_search(varea, req_size, align_mask, limit);
  575. if (va == ARCH_MAP_FAILED)
  576. {
  577. limit.end = varea->start - 1;
  578. va = _find_head_and_asc_search(aspace, req_size, align_mask,
  579. limit);
  580. }
  581. }
  582. }
  583. else
  584. {
  585. va = _find_head_and_asc_search(aspace, req_size, align_mask, limit);
  586. }
  587. return va;
  588. }
  589. int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  590. {
  591. int err = RT_EOK;
  592. rt_varea_t varea = _aspace_bst_search(aspace, addr);
  593. void *end = addr + (npage << ARCH_PAGE_SHIFT);
  594. if (!varea)
  595. {
  596. LOG_W("%s: varea not exist", __func__);
  597. err = -RT_ENOENT;
  598. }
  599. else if (addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
  600. _not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
  601. varea->size))
  602. {
  603. LOG_W("%s: Unaligned parameter or out of range", __func__);
  604. err = -RT_EINVAL;
  605. }
  606. else
  607. {
  608. err = _do_prefetch(aspace, varea, addr, npage << ARCH_PAGE_SHIFT);
  609. }
  610. return err;
  611. }
  612. int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  613. {
  614. return -RT_ENOSYS;
  615. }
  616. int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
  617. {
  618. int err;
  619. rt_varea_t varea = _aspace_bst_search(aspace, addr);
  620. if (varea)
  621. {
  622. err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
  623. }
  624. else
  625. {
  626. err = -RT_ENOENT;
  627. }
  628. return err;
  629. }
  630. int rt_aspace_traversal(rt_aspace_t aspace,
  631. int (*fn)(rt_varea_t varea, void *arg), void *arg)
  632. {
  633. rt_varea_t varea = ASPACE_VAREA_FIRST(aspace);
  634. while (varea)
  635. {
  636. fn(varea, arg);
  637. varea = ASPACE_VAREA_NEXT(varea);
  638. }
  639. return 0;
  640. }
  641. static int _dump(rt_varea_t varea, void *arg)
  642. {
  643. rt_kprintf("%s[%p - %p]\n", varea->mem_obj->get_name(varea), varea->start,
  644. varea->start + varea->size);
  645. return 0;
  646. }
  647. void rt_aspace_print_all(rt_aspace_t aspace)
  648. {
  649. rt_aspace_traversal(aspace, _dump, NULL);
  650. }