mm_aspace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-14 WangXiaoyao the first version
  9. */
  10. /**
  11. * @brief Virtual Address Space
  12. */
  13. #include <rtdef.h>
  14. #include <rtthread.h>
  15. #include <stddef.h>
  16. #include <stdint.h>
  17. #define DBG_TAG "mm.aspace"
  18. #define DBG_LVL DBG_INFO
  19. #include <rtdbg.h>
  20. #include "avl_adpt.h"
  21. #include "mm_aspace.h"
  22. #include "mm_fault.h"
  23. #include "mm_flag.h"
  24. #include "mm_page.h"
  25. #include "mm_private.h"
  26. #include <mmu.h>
  27. #include <tlb.h>
  28. static void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
  29. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  30. void *limit_start, rt_size_t limit_size,
  31. mm_flag_t flags);
  32. struct rt_aspace rt_kernel_space;
  33. rt_varea_t _varea_create(void *start, rt_size_t size)
  34. {
  35. rt_varea_t varea;
  36. varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
  37. if (varea)
  38. {
  39. varea->start = start;
  40. varea->size = size;
  41. }
  42. return varea;
  43. }
  44. static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
  45. rt_size_t attr, rt_size_t flags,
  46. rt_mem_obj_t mem_obj, rt_size_t offset)
  47. {
  48. varea->aspace = aspace;
  49. varea->attr = attr;
  50. varea->mem_obj = mem_obj;
  51. varea->flag = flags;
  52. varea->offset = offset;
  53. varea->frames = NULL;
  54. if (varea->mem_obj && varea->mem_obj->on_varea_open)
  55. varea->mem_obj->on_varea_open(varea);
  56. }
  57. /* restore context modified by varea install */
  58. static inline void _varea_uninstall(rt_varea_t varea)
  59. {
  60. rt_aspace_t aspace = varea->aspace;
  61. if (varea->mem_obj && varea->mem_obj->on_varea_close)
  62. varea->mem_obj->on_varea_close(varea);
  63. rt_varea_free_pages(varea);
  64. rt_hw_mmu_unmap(aspace, varea->start, varea->size);
  65. rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
  66. WR_LOCK(aspace);
  67. _aspace_bst_remove(aspace, varea);
  68. WR_UNLOCK(aspace);
  69. }
  70. int _init_lock(rt_aspace_t aspace)
  71. {
  72. MM_PGTBL_LOCK_INIT(aspace);
  73. rt_mutex_init(&aspace->bst_lock, "", RT_IPC_FLAG_FIFO);
  74. return RT_EOK;
  75. }
  76. rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
  77. {
  78. rt_aspace_t aspace = NULL;
  79. void *page_table = pgtbl;
  80. if (page_table)
  81. {
  82. aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
  83. if (aspace)
  84. {
  85. aspace->page_table = page_table;
  86. aspace->start = start;
  87. aspace->size = length;
  88. if (_init_lock(aspace) != RT_EOK ||
  89. _aspace_bst_init(aspace) != RT_EOK)
  90. {
  91. rt_free(aspace);
  92. aspace = NULL;
  93. }
  94. }
  95. }
  96. return aspace;
  97. }
  98. rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
  99. void *pgtbl)
  100. {
  101. void *page_table = pgtbl;
  102. LOG_D("%s", __func__);
  103. if (page_table)
  104. {
  105. aspace->page_table = page_table;
  106. aspace->start = start;
  107. aspace->size = length;
  108. if (_init_lock(aspace) != RT_EOK || _aspace_bst_init(aspace) != RT_EOK)
  109. {
  110. aspace = NULL;
  111. }
  112. }
  113. return aspace;
  114. }
  115. void rt_aspace_detach(rt_aspace_t aspace)
  116. {
  117. _aspace_unmap(aspace, aspace->start, aspace->size);
  118. rt_mutex_detach(&aspace->bst_lock);
  119. }
  120. void rt_aspace_delete(rt_aspace_t aspace)
  121. {
  122. if (aspace)
  123. {
  124. rt_aspace_detach(aspace);
  125. rt_free(aspace);
  126. }
  127. }
  128. static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
  129. rt_size_t offset, rt_size_t attr)
  130. {
  131. LOG_D("%s: va %p length %p", __func__, vaddr, length);
  132. int err = RT_EOK;
  133. /* it's ensured by caller that (void*)end will not overflow */
  134. void *end = vaddr + length;
  135. void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
  136. while (vaddr != end)
  137. {
  138. /* TODO try to map with huge TLB, when flag & HUGEPAGE */
  139. rt_size_t pgsz = ARCH_PAGE_SIZE;
  140. void *ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, pgsz, attr);
  141. if (ret == RT_NULL)
  142. {
  143. err = -RT_ERROR;
  144. break;
  145. }
  146. vaddr += pgsz;
  147. phyaddr += pgsz;
  148. }
  149. if (err == RT_EOK)
  150. rt_hw_tlb_invalidate_range(aspace, end - length, length, ARCH_PAGE_SIZE);
  151. return err;
  152. }
  153. rt_inline void _do_page_fault(struct rt_mm_fault_msg *msg, rt_size_t off,
  154. void *vaddr, rt_mem_obj_t mem_obj,
  155. rt_varea_t varea)
  156. {
  157. msg->off = off;
  158. msg->vaddr = vaddr;
  159. msg->fault_op = MM_FAULT_OP_READ;
  160. msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  161. msg->response.status = -1;
  162. mem_obj->on_page_fault(varea, msg);
  163. }
  164. /* allocate memory page for mapping range */
  165. static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
  166. rt_size_t size)
  167. {
  168. int err = RT_EOK;
  169. /* it's ensured by caller that start & size ara page-aligned */
  170. void *end = start + size;
  171. void *vaddr = start;
  172. rt_size_t off = varea->offset + ((start - varea->start) >> ARCH_PAGE_SHIFT);
  173. while (vaddr != end)
  174. {
  175. /* TODO try to map with huge TLB, when flag & HUGEPAGE */
  176. struct rt_mm_fault_msg msg;
  177. _do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
  178. if (msg.response.status == MM_FAULT_STATUS_OK)
  179. {
  180. void *store = msg.response.vaddr;
  181. rt_size_t store_sz = msg.response.size;
  182. if (store_sz + vaddr > end)
  183. {
  184. LOG_W("%s: too much (0x%lx) of buffer at vaddr %p is provided",
  185. __func__, store_sz, vaddr);
  186. break;
  187. }
  188. void *map = rt_hw_mmu_map(aspace, vaddr, store + PV_OFFSET,
  189. store_sz, varea->attr);
  190. if (!map)
  191. {
  192. LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
  193. vaddr, store + PV_OFFSET, store_sz);
  194. }
  195. else
  196. {
  197. rt_hw_tlb_invalidate_range(aspace, vaddr, store_sz, ARCH_PAGE_SIZE);
  198. }
  199. vaddr += store_sz;
  200. off += store_sz >> ARCH_PAGE_SHIFT;
  201. }
  202. else
  203. {
  204. err = -RT_ENOMEM;
  205. LOG_W("%s failed because no memory is provided", __func__);
  206. break;
  207. }
  208. }
  209. return err;
  210. }
  211. int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
  212. {
  213. void *alloc_va;
  214. int err = RT_EOK;
  215. /**
  216. * find a suitable va range.
  217. * even though this is sleepable, it's still ok for startup routine
  218. */
  219. alloc_va =
  220. _find_free(aspace, hint->prefer, hint->map_size, hint->limit_start,
  221. hint->limit_range_size, hint->flags);
  222. /* TODO try merge surrounding regions to optimize memory footprint */
  223. if (alloc_va != RT_NULL)
  224. {
  225. varea->start = alloc_va;
  226. _aspace_bst_insert(aspace, varea);
  227. }
  228. else
  229. {
  230. err = -RT_ENOSPC;
  231. }
  232. return err;
  233. }
  234. static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
  235. mm_flag_t flags, rt_mem_obj_t mem_obj,
  236. rt_size_t offset)
  237. {
  238. int err = RT_EOK;
  239. WR_LOCK(aspace);
  240. struct rt_mm_va_hint hint = {.prefer = varea->start,
  241. .map_size = varea->size,
  242. .limit_start = aspace->start,
  243. .limit_range_size = aspace->size,
  244. .flags = flags};
  245. if (mem_obj->hint_free)
  246. {
  247. /* mem object can control mapping range and so by modifing hint */
  248. mem_obj->hint_free(&hint);
  249. }
  250. err = _varea_install(aspace, varea, &hint);
  251. WR_UNLOCK(aspace);
  252. if (err == RT_EOK)
  253. {
  254. /* fill in varea data */
  255. _varea_post_install(varea, aspace, attr, flags, mem_obj, offset);
  256. if (MMF_TEST_CNTL(flags, MMF_PREFETCH))
  257. {
  258. /* do the MMU & TLB business */
  259. err = _do_prefetch(aspace, varea, varea->start, varea->size);
  260. if (err)
  261. {
  262. /* restore data structure and MMU */
  263. _varea_uninstall(varea);
  264. }
  265. }
  266. }
  267. return err;
  268. }
  269. #define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
  270. #define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((start) - (limit_start))) > (limit_size))
  271. static inline int _not_in_range(void *start, rt_size_t length,
  272. void *limit_start, rt_size_t limit_size)
  273. {
  274. if (start != RT_NULL)
  275. LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
  276. /* assuming (base + length) will not overflow except (0) */
  277. return start != RT_NULL
  278. ? (_IS_OVERFLOW(start, length) || start < limit_start ||
  279. _IS_OVERSIZE(start, length, limit_start, limit_size))
  280. : length > limit_size;
  281. }
  282. static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
  283. {
  284. return (start != RT_NULL) &&
  285. (((uintptr_t)start & mask) || (length & mask));
  286. }
  287. static inline int _not_support(rt_size_t flags)
  288. {
  289. rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT);
  290. return flags & ~(support_ops | _MMF_ALIGN_MASK);
  291. }
  292. int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
  293. rt_size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj,
  294. rt_size_t offset)
  295. {
  296. /* TODO check not in atomic context: irq, spinlock, local intr disable... */
  297. int err;
  298. rt_varea_t varea;
  299. if (!aspace || !addr || !mem_obj || length == 0 ||
  300. _not_in_range(*addr, length, aspace->start, aspace->size))
  301. {
  302. err = -RT_EINVAL;
  303. LOG_I("%s: Invalid input", __func__);
  304. }
  305. else if (_not_support(flags))
  306. {
  307. LOG_I("%s: no support flags 0x%p", __func__, flags);
  308. err = -RT_ENOSYS;
  309. }
  310. else
  311. {
  312. varea = _varea_create(*addr, length);
  313. if (varea)
  314. {
  315. err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
  316. if (err != RT_EOK)
  317. {
  318. rt_free(varea);
  319. }
  320. }
  321. else
  322. {
  323. LOG_W("%s: mm aspace map failed", __func__);
  324. err = -RT_ENOMEM;
  325. }
  326. }
  327. if (err != RT_EOK)
  328. {
  329. *addr = NULL;
  330. }
  331. else
  332. {
  333. *addr = varea->start;
  334. }
  335. return err;
  336. }
  337. int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  338. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  339. rt_mem_obj_t mem_obj, rt_size_t offset)
  340. {
  341. int err;
  342. if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
  343. _not_in_range(*addr, length, aspace->start, aspace->size))
  344. {
  345. err = -RT_EINVAL;
  346. LOG_W("%s: Invalid input", __func__);
  347. }
  348. else if (_not_support(flags))
  349. {
  350. LOG_W("%s: no support flags", __func__);
  351. err = -RT_ENOSYS;
  352. }
  353. else
  354. {
  355. varea->size = length;
  356. varea->start = *addr;
  357. flags |= MMF_STATIC_ALLOC;
  358. err = _mm_aspace_map(aspace, varea, attr, flags, mem_obj, offset);
  359. }
  360. if (err != RT_EOK)
  361. {
  362. *addr = NULL;
  363. }
  364. else
  365. {
  366. *addr = varea->start;
  367. }
  368. return err;
  369. }
  370. int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
  371. rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
  372. void **ret_va)
  373. {
  374. int err;
  375. void *vaddr;
  376. if (!aspace || !hint || !hint->limit_range_size || !hint->map_size)
  377. {
  378. LOG_W("%s: Invalid input", __func__);
  379. err = -RT_EINVAL;
  380. }
  381. else if (_not_align(hint->prefer, hint->map_size, ARCH_PAGE_MASK))
  382. {
  383. LOG_W("%s: not aligned", __func__);
  384. err = -RT_EINVAL;
  385. }
  386. else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start,
  387. aspace->size) ||
  388. _not_in_range(hint->prefer, hint->map_size, aspace->start,
  389. aspace->size))
  390. {
  391. LOG_W("%s: not in range", __func__);
  392. err = -RT_EINVAL;
  393. }
  394. else
  395. {
  396. WR_LOCK(aspace);
  397. err = _varea_install(aspace, varea, hint);
  398. WR_UNLOCK(aspace);
  399. if (err == RT_EOK)
  400. {
  401. _varea_post_install(varea, aspace, attr, 0, NULL, pa_off);
  402. vaddr = varea->start;
  403. err = _do_named_map(aspace, varea->start, varea->size,
  404. (rt_size_t)pa_off, attr);
  405. if (err != RT_EOK)
  406. {
  407. _varea_uninstall(varea);
  408. }
  409. }
  410. }
  411. if (ret_va)
  412. {
  413. if (err == RT_EOK)
  414. *ret_va = vaddr;
  415. else
  416. *ret_va = RT_NULL;
  417. }
  418. return err;
  419. }
  420. int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
  421. rt_size_t pa_off, void **ret_va)
  422. {
  423. int err;
  424. if (hint)
  425. {
  426. rt_varea_t varea = _varea_create(hint->prefer, hint->map_size);
  427. if (varea)
  428. {
  429. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  430. if (err != RT_EOK)
  431. {
  432. rt_free(varea);
  433. }
  434. }
  435. else
  436. {
  437. err = -RT_ENOMEM;
  438. }
  439. }
  440. else
  441. {
  442. err = -RT_EINVAL;
  443. }
  444. return err;
  445. }
  446. int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  447. rt_mm_va_hint_t hint, rt_size_t attr,
  448. rt_size_t pa_off, void **ret_va)
  449. {
  450. int err;
  451. if (varea && hint)
  452. {
  453. varea->start = hint->prefer;
  454. varea->size = hint->map_size;
  455. hint->flags |= (MMF_MAP_FIXED | MMF_STATIC_ALLOC);
  456. LOG_D("%s: start %p size %p phy at %p", __func__, varea->start, varea->size, pa_off << MM_PAGE_SHIFT);
  457. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  458. }
  459. else
  460. {
  461. err = -RT_EINVAL;
  462. }
  463. return err;
  464. }
  465. void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
  466. {
  467. struct _mm_range range = {addr, addr + length - 1};
  468. rt_varea_t varea = _aspace_bst_search_overlap(aspace, range);
  469. if (varea == RT_NULL)
  470. {
  471. LOG_I("%s: No such entry found at %p with %lx bytes\n", __func__, addr, length);
  472. }
  473. while (varea)
  474. {
  475. _varea_uninstall(varea);
  476. if (!(varea->flag & MMF_STATIC_ALLOC))
  477. {
  478. rt_free(varea);
  479. }
  480. varea = _aspace_bst_search_overlap(aspace, range);
  481. }
  482. }
  483. int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
  484. {
  485. if (!aspace)
  486. {
  487. LOG_I("%s: Invalid input", __func__);
  488. return -RT_EINVAL;
  489. }
  490. if (_not_in_range(addr, length, aspace->start, aspace->size))
  491. {
  492. LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
  493. aspace->start, aspace->start + aspace->size);
  494. return -RT_EINVAL;
  495. }
  496. _aspace_unmap(aspace, addr, length);
  497. return RT_EOK;
  498. }
  499. static inline void *_lower(void *a, void *b)
  500. {
  501. return a < b ? a : b;
  502. }
  503. static inline void *_align(void *va, rt_ubase_t align_mask)
  504. {
  505. return (void *)((rt_ubase_t)(va + ~align_mask) & align_mask);
  506. }
  507. static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
  508. rt_ubase_t align_mask, struct _mm_range limit)
  509. {
  510. void *ret = RT_NULL;
  511. while (varea && varea->start < limit.end)
  512. {
  513. void *candidate = varea->start + varea->size;
  514. candidate = _align(candidate, align_mask);
  515. if (candidate > limit.end || limit.end - candidate + 1 < req_size)
  516. break;
  517. rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
  518. if (nx_va)
  519. {
  520. rt_size_t gap_size =
  521. _lower(limit.end, nx_va->start - 1) - candidate + 1;
  522. if (gap_size >= req_size)
  523. {
  524. ret = candidate;
  525. break;
  526. }
  527. }
  528. else
  529. {
  530. ret = candidate;
  531. }
  532. varea = nx_va;
  533. }
  534. return ret;
  535. }
  536. /** find suitable place in [limit_start, limit_end] */
  537. static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
  538. rt_ubase_t align_mask,
  539. struct _mm_range limit)
  540. {
  541. void *va = RT_NULL;
  542. rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
  543. if (varea)
  544. {
  545. void *candidate = _align(limit.start, align_mask);
  546. rt_size_t gap_size = varea->start - candidate;
  547. if (gap_size >= req_size)
  548. {
  549. rt_varea_t former = _aspace_bst_search(aspace, limit.start);
  550. if (former)
  551. {
  552. candidate = _align(former->start + former->size, align_mask);
  553. gap_size = varea->start - candidate;
  554. if (gap_size >= req_size)
  555. va = candidate;
  556. else
  557. va = _ascending_search(varea, req_size, align_mask, limit);
  558. }
  559. else
  560. {
  561. va = candidate;
  562. }
  563. }
  564. else
  565. {
  566. va = _ascending_search(varea, req_size, align_mask, limit);
  567. }
  568. }
  569. else
  570. {
  571. void *candidate;
  572. rt_size_t gap_size;
  573. rt_varea_t former = _aspace_bst_search(aspace, limit.start);
  574. candidate = former ? former->start + former->size : limit.start;
  575. candidate = _align(candidate, align_mask);
  576. gap_size = limit.end - candidate + 1;
  577. if (gap_size >= req_size)
  578. va = candidate;
  579. }
  580. return va;
  581. }
  582. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  583. void *limit_start, rt_size_t limit_size,
  584. mm_flag_t flags)
  585. {
  586. rt_varea_t varea = NULL;
  587. void *va = RT_NULL;
  588. struct _mm_range limit = {limit_start, limit_start + limit_size - 1};
  589. rt_ubase_t align_mask = ~0ul;
  590. if (flags & MMF_REQUEST_ALIGN)
  591. {
  592. align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
  593. }
  594. if (prefer != RT_NULL)
  595. {
  596. prefer = _align(prefer, align_mask);
  597. struct _mm_range range = {prefer, prefer + req_size - 1};
  598. varea = _aspace_bst_search_overlap(aspace, range);
  599. if (!varea)
  600. {
  601. va = prefer;
  602. }
  603. else if (flags & MMF_MAP_FIXED)
  604. {
  605. /* OVERLAP */
  606. }
  607. else
  608. {
  609. va = _ascending_search(varea, req_size, align_mask, limit);
  610. if (va == RT_NULL)
  611. {
  612. limit.end = varea->start - 1;
  613. va = _find_head_and_asc_search(aspace, req_size, align_mask,
  614. limit);
  615. }
  616. }
  617. }
  618. else
  619. {
  620. va = _find_head_and_asc_search(aspace, req_size, align_mask, limit);
  621. }
  622. return va;
  623. }
  624. int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  625. {
  626. int err = RT_EOK;
  627. rt_varea_t varea = _aspace_bst_search(aspace, addr);
  628. void *end = addr + (npage << ARCH_PAGE_SHIFT);
  629. if (!varea)
  630. {
  631. LOG_W("%s: varea not exist", __func__);
  632. err = -RT_ENOENT;
  633. }
  634. else if (addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
  635. _not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
  636. varea->size))
  637. {
  638. LOG_W("%s: Unaligned parameter or out of range", __func__);
  639. err = -RT_EINVAL;
  640. }
  641. else
  642. {
  643. err = _do_prefetch(aspace, varea, addr, npage << ARCH_PAGE_SHIFT);
  644. }
  645. return err;
  646. }
  647. int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  648. {
  649. return -RT_ENOSYS;
  650. }
  651. int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
  652. {
  653. int err;
  654. rt_varea_t varea = _aspace_bst_search(aspace, addr);
  655. if (varea)
  656. {
  657. err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
  658. }
  659. else
  660. {
  661. err = -RT_ENOENT;
  662. }
  663. return err;
  664. }
  665. int rt_aspace_traversal(rt_aspace_t aspace,
  666. int (*fn)(rt_varea_t varea, void *arg), void *arg)
  667. {
  668. rt_varea_t varea = ASPACE_VAREA_FIRST(aspace);
  669. while (varea)
  670. {
  671. fn(varea, arg);
  672. varea = ASPACE_VAREA_NEXT(varea);
  673. }
  674. return 0;
  675. }
  676. static int _dump(rt_varea_t varea, void *arg)
  677. {
  678. if (varea->mem_obj && varea->mem_obj->get_name)
  679. {
  680. rt_kprintf("[%p - %p] %s\n", varea->start, varea->start + varea->size,
  681. varea->mem_obj->get_name(varea));
  682. }
  683. else
  684. {
  685. rt_kprintf("[%p - %p] phy-map\n", varea->start, varea->start + varea->size);
  686. rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
  687. }
  688. return 0;
  689. }
  690. void rt_aspace_print_all(rt_aspace_t aspace)
  691. {
  692. rt_aspace_traversal(aspace, _dump, NULL);
  693. }