mm_aspace.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-14 WangXiaoyao the first version
  9. * 2023-08-17 Shell Add unmap_range for MAP_PRIVATE
  10. * Support MAP_FIXED in aspace_map(), and
  11. * Add better support of permission in mmap
  12. */
  13. /**
  14. * @brief Virtual Address Space
  15. */
  16. #include <rtthread.h>
  17. #define DBG_TAG "mm.aspace"
  18. #define DBG_LVL DBG_INFO
  19. #include <rtdbg.h>
  20. #include "avl_adpt.h"
  21. #include "mm_private.h"
  22. #include <mmu.h>
  23. #include <tlb.h>
  24. #include <stddef.h>
  25. #include <stdint.h>
  26. #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
  27. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  28. void *limit_start, rt_size_t limit_size,
  29. mm_flag_t flags);
  30. static int _unmap_range_locked(rt_aspace_t aspace, void *addr, size_t length);
  31. struct rt_aspace rt_kernel_space;
  32. static int _init_lock(rt_aspace_t aspace)
  33. {
  34. int err;
  35. MM_PGTBL_LOCK_INIT(aspace);
  36. err = rt_mutex_init(&aspace->bst_lock, "aspace", RT_IPC_FLAG_FIFO);
  37. return err;
  38. }
  39. rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl)
  40. {
  41. int err = RT_EOK;
  42. if (pgtbl)
  43. {
  44. aspace->page_table = pgtbl;
  45. aspace->start = start;
  46. aspace->size = length;
  47. aspace->private_object = RT_NULL;
  48. err = _aspace_bst_init(aspace);
  49. if (err == RT_EOK)
  50. {
  51. /**
  52. * It has the side effect that lock will be added to object
  53. * system management. So it must be paired with a detach once
  54. * the initialization return successfully.
  55. */
  56. err = _init_lock(aspace);
  57. }
  58. }
  59. else
  60. {
  61. err = -RT_EINVAL;
  62. }
  63. return err;
  64. }
  65. rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
  66. {
  67. rt_aspace_t aspace = NULL;
  68. int err;
  69. RT_ASSERT(length <= 0 - (rt_size_t)start);
  70. aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
  71. if (aspace)
  72. {
  73. rt_memset(aspace, 0, sizeof(*aspace));
  74. err = rt_aspace_init(aspace, start, length, pgtbl);
  75. if (err != RT_EOK)
  76. {
  77. LOG_W("%s(%p, %lx, %p): failed with code %d\n", __func__,
  78. start, length, pgtbl, err);
  79. rt_free(aspace);
  80. aspace = RT_NULL;
  81. }
  82. }
  83. return aspace;
  84. }
  85. void rt_aspace_detach(rt_aspace_t aspace)
  86. {
  87. rt_varea_t varea;
  88. WR_LOCK(aspace);
  89. varea = ASPACE_VAREA_FIRST(aspace);
  90. while (varea)
  91. {
  92. rt_varea_t prev = varea;
  93. varea = ASPACE_VAREA_NEXT(varea);
  94. _varea_uninstall_locked(prev);
  95. if (VAREA_NOT_STATIC(prev))
  96. {
  97. rt_free(prev);
  98. }
  99. }
  100. WR_UNLOCK(aspace);
  101. rt_aspace_anon_ref_dec(aspace->private_object);
  102. rt_mutex_detach(&aspace->bst_lock);
  103. }
  104. void rt_aspace_delete(rt_aspace_t aspace)
  105. {
  106. RT_ASSERT(aspace);
  107. rt_aspace_detach(aspace);
  108. rt_free(aspace);
  109. }
  110. rt_inline rt_size_t _get_effect_attr(rt_aspace_t aspace, rt_varea_t varea)
  111. {
  112. rt_size_t attr = varea->attr;
  113. /* not write permission for user on private mapping */
  114. if (rt_varea_is_private_locked(varea))
  115. attr = rt_hw_mmu_attr_rm_perm(attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
  116. return attr;
  117. }
  118. static int _do_named_map(rt_aspace_t aspace, rt_varea_t varea, void *vaddr,
  119. rt_size_t length, rt_size_t offset, rt_size_t attr)
  120. {
  121. LOG_D("%s: va %p length %p", __func__, vaddr, length);
  122. int err = RT_EOK;
  123. /* it's ensured by caller that (void*)end will not overflow */
  124. void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
  125. void *ret;
  126. attr = _get_effect_attr(aspace, varea);
  127. ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, length, attr);
  128. if (ret == RT_NULL)
  129. {
  130. err = -RT_ERROR;
  131. }
  132. if (err == RT_EOK)
  133. rt_hw_tlb_invalidate_range(aspace, vaddr, length, ARCH_PAGE_SIZE);
  134. return err;
  135. }
  136. rt_inline void _do_page_fault(struct rt_aspace_fault_msg *msg, rt_size_t off,
  137. void *vaddr, rt_mem_obj_t mem_obj,
  138. rt_varea_t varea)
  139. {
  140. msg->off = off;
  141. msg->fault_vaddr = vaddr;
  142. msg->fault_op = MM_FAULT_OP_READ;
  143. msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  144. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  145. msg->response.vaddr = 0;
  146. msg->response.size = 0;
  147. RT_ASSERT(mem_obj->on_page_fault);
  148. mem_obj->on_page_fault(varea, msg);
  149. }
  150. int rt_varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
  151. {
  152. rt_aspace_t aspace;
  153. int err = -RT_ERROR;
  154. if (msg->response.status == MM_FAULT_STATUS_OK)
  155. {
  156. /**
  157. * the page returned by handler is not checked
  158. * cause no much assumption can make on it
  159. */
  160. char *store = msg->response.vaddr;
  161. rt_size_t store_sz = msg->response.size;
  162. if ((char *)msg->fault_vaddr + store_sz > (char *)varea->start + varea->size)
  163. {
  164. LOG_W("%s: too much (0x%lx) of buffer on vaddr %p is provided",
  165. __func__, store_sz, msg->fault_vaddr);
  166. }
  167. else
  168. {
  169. void *map;
  170. rt_size_t attr;
  171. void *v_addr = msg->fault_vaddr;
  172. void *p_addr = store + PV_OFFSET;
  173. aspace = varea->aspace;
  174. RT_ASSERT(aspace);
  175. attr = _get_effect_attr(aspace, varea);
  176. map = rt_hw_mmu_map(aspace, v_addr, p_addr, store_sz, attr);
  177. if (!map)
  178. {
  179. LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
  180. msg->fault_vaddr, store + PV_OFFSET, store_sz);
  181. }
  182. else
  183. {
  184. rt_hw_tlb_invalidate_range(varea->aspace, v_addr, store_sz, ARCH_PAGE_SIZE);
  185. err = RT_EOK;
  186. }
  187. }
  188. }
  189. else if (msg->response.status == MM_FAULT_STATUS_OK_MAPPED)
  190. {
  191. if (rt_hw_mmu_v2p(varea->aspace, msg->fault_vaddr) == ARCH_MAP_FAILED)
  192. {
  193. LOG_D("%s: no page is mapped on %p", __func__, msg->fault_vaddr);
  194. }
  195. err = RT_EOK;
  196. }
  197. else
  198. {
  199. LOG_W("%s: failed on va %p inside varea %p(%s)", __func__, msg->fault_vaddr, varea,
  200. varea->mem_obj->get_name ? varea->mem_obj->get_name(varea) : "unknow");
  201. }
  202. return err;
  203. }
  204. /* allocate memory page for mapping range */
  205. static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
  206. rt_size_t size)
  207. {
  208. int err = RT_EOK;
  209. /* it's ensured by caller that start & size ara page-aligned */
  210. char *end = (char *)start + size;
  211. char *vaddr = start;
  212. rt_size_t off = varea->offset + ((vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT);
  213. while (vaddr != end)
  214. {
  215. /* TODO try to map with huge TLB, when flag & HUGEPAGE */
  216. struct rt_aspace_fault_msg msg;
  217. _do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
  218. if (rt_varea_map_with_msg(varea, &msg))
  219. {
  220. err = -RT_ENOMEM;
  221. break;
  222. }
  223. /**
  224. * It's hard to identify the mapping pattern on a customized handler
  225. * So we terminate the prefetch process on that case
  226. */
  227. if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
  228. break;
  229. vaddr += msg.response.size;
  230. off += msg.response.size >> ARCH_PAGE_SHIFT;
  231. }
  232. return err;
  233. }
  234. static rt_err_t _expand_varea(rt_varea_t varea, void *new_va, rt_size_t size)
  235. {
  236. rt_err_t error;
  237. rt_aspace_t aspace;
  238. void *old_va;
  239. if (varea->mem_obj && varea->mem_obj->on_varea_expand)
  240. error = varea->mem_obj->on_varea_expand(varea, new_va, size);
  241. else
  242. error = -RT_EPERM;
  243. if (error == RT_EOK)
  244. {
  245. aspace = varea->aspace;
  246. old_va = varea->start;
  247. varea->size = size;
  248. if (old_va != new_va)
  249. {
  250. varea->start = new_va;
  251. varea->offset += ((long)new_va - (long)old_va) >> MM_PAGE_SHIFT;
  252. _aspace_bst_remove(aspace, varea);
  253. _aspace_bst_insert(aspace, varea);
  254. }
  255. }
  256. return error;
  257. }
  258. struct _mapping_property {
  259. rt_size_t attr;
  260. rt_size_t flags;
  261. rt_size_t offset;
  262. struct rt_mem_obj *mem_obj;
  263. };
  264. #define INIT_PROP(obj,off,fl,attr) \
  265. {.mem_obj = (obj), \
  266. .offset = (off), \
  267. .flags = (fl), \
  268. .attr = (attr),}
  269. static rt_bool_t _contiguous_offset(rt_varea_t neighbour, rt_size_t map_size,
  270. struct _mapping_property *prop)
  271. {
  272. rt_size_t n_off = neighbour->offset;
  273. rt_size_t map_off = prop->offset;
  274. return n_off < map_off ?
  275. n_off + (neighbour->size >> MM_PAGE_SHIFT) == map_off :
  276. map_off + (map_size >> MM_PAGE_SHIFT) == n_off;
  277. }
  278. static rt_bool_t _compatible(rt_varea_t neighbour, rt_size_t map_size,
  279. struct _mapping_property *prop)
  280. {
  281. return (prop->attr == neighbour->attr && prop->flags == neighbour->flag &&
  282. prop->mem_obj == neighbour->mem_obj &&
  283. _contiguous_offset(neighbour, map_size, prop));
  284. }
  285. rt_inline rt_err_t _migrate_and_release_varea(rt_aspace_t aspace, rt_varea_t to, rt_varea_t from,
  286. rt_err_t (*on_varea_merge)(struct rt_varea *to, struct rt_varea *from))
  287. {
  288. rt_err_t error;
  289. error = on_varea_merge(to, from);
  290. if (error == RT_EOK)
  291. {
  292. /* uninstall operand & release the varea */
  293. _aspace_bst_remove(aspace, from);
  294. to->size += from->size;
  295. if (VAREA_NOT_STATIC(from))
  296. rt_free(from);
  297. }
  298. return error;
  299. }
  300. static rt_varea_t _merge_surrounding(rt_aspace_t aspace, rt_varea_t operand,
  301. struct _mapping_property *prop)
  302. {
  303. int again;
  304. rt_err_t error;
  305. int can_merge_fw;
  306. int can_merge_bw;
  307. rt_varea_t neighbour;
  308. char *operand_start;
  309. size_t operand_size;
  310. rt_err_t (*on_varea_merge)(struct rt_varea *to, struct rt_varea *from);
  311. if (operand->mem_obj && operand->mem_obj->on_varea_merge)
  312. {
  313. on_varea_merge = operand->mem_obj->on_varea_merge;
  314. do {
  315. operand_start = operand->start;
  316. operand_size = operand->size;
  317. LOG_D("search op_start=%p,op_size=0x%lx", operand_start, operand_size);
  318. /* find a compatible neighbour if any and setup direction */
  319. can_merge_fw = can_merge_bw = 0;
  320. neighbour = _aspace_bst_search(aspace, operand_start - 1);
  321. if (!neighbour || !_compatible(neighbour, operand_size, prop))
  322. {
  323. neighbour = _aspace_bst_search(aspace, operand_start + operand_size);
  324. if (neighbour && _compatible(neighbour, operand_size, prop))
  325. can_merge_bw = 1;
  326. }
  327. else
  328. can_merge_fw = 1;
  329. if (can_merge_fw || can_merge_bw)
  330. {
  331. /* merge operand with its predecessor or successor */
  332. if (can_merge_fw)
  333. {
  334. error = _migrate_and_release_varea(aspace, neighbour, operand, on_varea_merge);
  335. operand = neighbour;
  336. }
  337. else
  338. error = _migrate_and_release_varea(aspace, operand, neighbour, on_varea_merge);
  339. if (error == RT_EOK)
  340. again = 1;
  341. }
  342. else
  343. again = 0;
  344. } while (again);
  345. }
  346. return operand;
  347. }
  348. /**
  349. * Brief: expand and merge surrounding until not possible and
  350. * setup the pvarea if new virt address region is installed
  351. */
  352. static rt_bool_t _try_expand_and_merge_okay(rt_aspace_t aspace, rt_varea_t *pvarea,
  353. void *alloc_va, rt_mm_va_hint_t hint,
  354. struct _mapping_property *prop)
  355. {
  356. int can_expand_fw;
  357. int can_expand_bw;
  358. rt_varea_t neighbour;
  359. rt_varea_t new_region_at = RT_NULL;
  360. rt_bool_t install_ok = RT_FALSE;
  361. char *operand_start = alloc_va;
  362. size_t operand_size = hint->map_size;
  363. /* find a compatible neighbour if any and setup direction */
  364. LOG_D("search op_start=%p,op_size=0x%lx", operand_start, operand_size);
  365. can_expand_fw = can_expand_bw = 0;
  366. neighbour = _aspace_bst_search(aspace, operand_start - 1);
  367. if (!neighbour || !_compatible(neighbour, operand_size, prop))
  368. {
  369. neighbour = _aspace_bst_search(aspace, operand_start + operand_size);
  370. if (neighbour && _compatible(neighbour, operand_size, prop))
  371. can_expand_bw = 1;
  372. }
  373. else
  374. can_expand_fw = 1;
  375. if (can_expand_fw || can_expand_bw)
  376. {
  377. /* expand varea at head or tailing */
  378. if (can_expand_fw)
  379. operand_start = neighbour->start;
  380. operand_size += neighbour->size;
  381. LOG_D("expand op_start=%p,op_size=0x%lx", operand_start, operand_size);
  382. if (_expand_varea(neighbour, operand_start, operand_size) == RT_EOK)
  383. {
  384. new_region_at = _merge_surrounding(aspace, neighbour, prop);
  385. *pvarea = new_region_at;
  386. install_ok = RT_TRUE;
  387. }
  388. }
  389. return install_ok;
  390. }
  391. static rt_varea_t _varea_create(void *start, rt_size_t size);
  392. static int _insert_new_varea(rt_aspace_t aspace, rt_varea_t *pvarea,
  393. void *alloc_va, rt_mm_va_hint_t hint)
  394. {
  395. int err;
  396. rt_varea_t varea = *pvarea;
  397. if (varea == RT_NULL)
  398. {
  399. /* no preallocate buffer is provided, then create one */
  400. varea = _varea_create(hint->prefer, hint->map_size);
  401. hint->flags &= ~MMF_STATIC_ALLOC;
  402. *pvarea = varea;
  403. }
  404. if (varea)
  405. {
  406. varea->start = alloc_va;
  407. _aspace_bst_insert(aspace, varea);
  408. err = RT_EOK;
  409. }
  410. else
  411. {
  412. LOG_W("%s: Out of memory", __func__);
  413. err = -RT_ENOMEM;
  414. }
  415. return err;
  416. }
  417. static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
  418. rt_size_t attr, rt_size_t flags,
  419. rt_mem_obj_t mem_obj, rt_size_t offset)
  420. {
  421. varea->aspace = aspace;
  422. varea->attr = attr;
  423. varea->mem_obj = mem_obj;
  424. varea->flag = flags;
  425. varea->offset = offset;
  426. if (varea->mem_obj && varea->mem_obj->on_varea_open)
  427. varea->mem_obj->on_varea_open(varea);
  428. }
  429. /**
  430. * Install new virtual address region into address space
  431. * The pvarea will be set to the varea where new virt memory installed which can
  432. * be a newly created or existed one.
  433. *
  434. * Note: caller must hold the aspace lock
  435. */
  436. static int _varea_install(rt_aspace_t aspace, rt_varea_t *pvarea,
  437. rt_mm_va_hint_t hint, struct _mapping_property *prop,
  438. void **pva)
  439. {
  440. void *alloc_va;
  441. int err = RT_EOK;
  442. if (hint->flags & MMF_MAP_FIXED)
  443. {
  444. alloc_va = hint->prefer;
  445. err = _unmap_range_locked(aspace, alloc_va, hint->map_size);
  446. if (err != RT_EOK)
  447. {
  448. /* Note: MAP_FIXED must failed if unable to unmap existing mapping */
  449. LOG_I("%s: unmap range failed in %p with size 0x%lx, error=%d", __func__, alloc_va, hint->map_size, err);
  450. }
  451. }
  452. else
  453. {
  454. alloc_va =
  455. _find_free(aspace, hint->prefer, hint->map_size, hint->limit_start,
  456. hint->limit_range_size, hint->flags);
  457. if (alloc_va == RT_NULL)
  458. err = -RT_ENOSPC;
  459. }
  460. if (alloc_va != RT_NULL)
  461. {
  462. /* TODO: fix to private mapping directly */
  463. if (!_try_expand_and_merge_okay(aspace, pvarea, alloc_va, hint, prop))
  464. {
  465. err = _insert_new_varea(aspace, pvarea, alloc_va, hint);
  466. if (err == RT_EOK)
  467. _varea_post_install(*pvarea, aspace, prop->attr, prop->flags,
  468. prop->mem_obj, prop->offset);
  469. }
  470. if (err == RT_EOK)
  471. {
  472. RT_ASSERT(*pvarea);
  473. *pva = alloc_va;
  474. }
  475. }
  476. return err;
  477. }
  478. /**
  479. * restore context modified by varea install
  480. */
  481. void _varea_uninstall_locked(rt_varea_t varea)
  482. {
  483. rt_aspace_t aspace = varea->aspace;
  484. if (varea->mem_obj && varea->mem_obj->on_varea_close)
  485. varea->mem_obj->on_varea_close(varea);
  486. else
  487. {
  488. rt_hw_mmu_unmap(aspace, varea->start, varea->size);
  489. rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
  490. }
  491. _aspace_bst_remove(aspace, varea);
  492. }
  493. int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t *pvarea, void **addr,
  494. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  495. rt_mem_obj_t mem_obj, rt_size_t offset)
  496. {
  497. int err = RT_EOK;
  498. rt_varea_t varea;
  499. struct _mapping_property prop = INIT_PROP(mem_obj, offset, flags, attr);
  500. WR_LOCK(aspace);
  501. /**
  502. * @brief .prefer & .map_size are scratched from varea which setup by caller
  503. * .limit_start & .limit_range_size have default to be in range of aspace
  504. * .flags is from parameter, and will be fill in varea if install successfully
  505. */
  506. struct rt_mm_va_hint hint = {.prefer = *addr,
  507. .map_size = length,
  508. .limit_start = aspace->start,
  509. .limit_range_size = aspace->size,
  510. .flags = flags};
  511. if (mem_obj->hint_free)
  512. {
  513. /* mem object can control mapping range and so by modifing hint */
  514. mem_obj->hint_free(&hint);
  515. }
  516. /* try to allocate a virtual address region for varea */
  517. err = _varea_install(aspace, pvarea, &hint, &prop, addr);
  518. if (err == RT_EOK)
  519. {
  520. varea = *pvarea;
  521. if (MMF_TEST_CNTL(flags, MMF_PREFETCH))
  522. {
  523. /* do the MMU & TLB business */
  524. err = _do_prefetch(aspace, varea, varea->start, varea->size);
  525. if (err)
  526. {
  527. LOG_I("%s: failed to prefetch page into varea(%s)",
  528. __func__, VAREA_NAME(varea));
  529. /* restore data structure and MMU */
  530. _varea_uninstall_locked(varea);
  531. if (!(varea->flag & MMF_STATIC_ALLOC))
  532. rt_free(varea);
  533. }
  534. }
  535. }
  536. WR_UNLOCK(aspace);
  537. return err;
  538. }
  539. static rt_varea_t _varea_create(void *start, rt_size_t size)
  540. {
  541. rt_varea_t varea;
  542. varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
  543. if (varea)
  544. {
  545. varea->start = start;
  546. varea->size = size;
  547. }
  548. return varea;
  549. }
  550. #define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
  551. #define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size))
  552. static inline int _not_in_range(void *start, rt_size_t length,
  553. void *limit_start, rt_size_t limit_size)
  554. {
  555. /* assuming (base + length) will not overflow except (0) */
  556. int rc = start != RT_NULL
  557. ? (_IS_OVERFLOW(start, length) || start < limit_start ||
  558. _IS_OVERSIZE(start, length, limit_start, limit_size))
  559. : length > limit_size;
  560. if (rc)
  561. LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
  562. return rc;
  563. }
  564. static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
  565. {
  566. return (start != RT_NULL) &&
  567. (((uintptr_t)start & mask) || (length & mask));
  568. }
  569. /** if the flag is currently supported */
  570. static inline int _not_support(rt_size_t flags)
  571. {
  572. rt_size_t support_ops = MMF_CREATE(((__MMF_INVALID - 1) << 1) - 1, 1);
  573. return flags & ~(support_ops);
  574. }
  575. int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
  576. rt_size_t attr, mm_flag_t flags, rt_mem_obj_t mem_obj,
  577. rt_size_t offset)
  578. {
  579. int err;
  580. rt_varea_t varea = RT_NULL;
  581. RT_DEBUG_SCHEDULER_AVAILABLE(1);
  582. if (!aspace || !addr || !mem_obj || length == 0)
  583. {
  584. err = -RT_EINVAL;
  585. LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input",
  586. __func__, aspace, addr, length, attr, flags, mem_obj, offset);
  587. }
  588. else if (_not_in_range(*addr, length, aspace->start, aspace->size))
  589. {
  590. err = -RT_EINVAL;
  591. LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length);
  592. }
  593. else if (_not_support(flags))
  594. {
  595. LOG_I("%s: no support flags 0x%lx", __func__, flags);
  596. err = -RT_ENOSYS;
  597. }
  598. else
  599. {
  600. RT_ASSERT((length & ARCH_PAGE_MASK) == 0);
  601. RT_ASSERT(((long)*addr & ARCH_PAGE_MASK) == 0);
  602. err = _mm_aspace_map(aspace, &varea, addr, length, attr, flags, mem_obj, offset);
  603. }
  604. if (err != RT_EOK)
  605. {
  606. *addr = NULL;
  607. }
  608. return err;
  609. }
  610. int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  611. rt_size_t length, rt_size_t attr, mm_flag_t flags,
  612. rt_mem_obj_t mem_obj, rt_size_t offset)
  613. {
  614. int err;
  615. if (!aspace || !varea || !addr || !mem_obj || length == 0 ||
  616. _not_in_range(*addr, length, aspace->start, aspace->size))
  617. {
  618. err = -RT_EINVAL;
  619. LOG_W("%s: Invalid input", __func__);
  620. }
  621. else if (_not_support(flags))
  622. {
  623. LOG_W("%s: no support flags", __func__);
  624. err = -RT_ENOSYS;
  625. }
  626. else
  627. {
  628. varea->size = length;
  629. varea->start = *addr;
  630. flags |= MMF_STATIC_ALLOC;
  631. /**
  632. * todo: fix if mapping expand, the static varea is not used at all
  633. */
  634. err = _mm_aspace_map(aspace, &varea, addr, length, attr, flags, mem_obj, offset);
  635. }
  636. if (err != RT_EOK)
  637. {
  638. *addr = NULL;
  639. }
  640. else
  641. {
  642. *addr = varea->start;
  643. }
  644. return err;
  645. }
  646. int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
  647. rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
  648. void **ret_va)
  649. {
  650. int err;
  651. void *vaddr;
  652. if (!aspace || !hint || !hint->limit_range_size || !hint->map_size)
  653. {
  654. LOG_W("%s: Invalid input", __func__);
  655. err = -RT_EINVAL;
  656. }
  657. else if (_not_align(hint->prefer, hint->map_size, ARCH_PAGE_MASK))
  658. {
  659. LOG_W("%s: not aligned", __func__);
  660. err = -RT_EINVAL;
  661. }
  662. else if (_not_in_range(hint->limit_start, hint->limit_range_size, aspace->start,
  663. aspace->size) ||
  664. _not_in_range(hint->prefer, hint->map_size, aspace->start,
  665. aspace->size))
  666. {
  667. LOG_W("%s: not in range", __func__);
  668. err = -RT_EINVAL;
  669. }
  670. else
  671. {
  672. struct _mapping_property prop = INIT_PROP(0, pa_off, hint->flags, attr);
  673. WR_LOCK(aspace);
  674. err = _varea_install(aspace, &varea, hint, &prop, &vaddr);
  675. if (err == RT_EOK)
  676. {
  677. err = _do_named_map(aspace, varea, varea->start, varea->size,
  678. (rt_size_t)pa_off, attr);
  679. if (err != RT_EOK)
  680. {
  681. _varea_uninstall_locked(varea);
  682. }
  683. }
  684. WR_UNLOCK(aspace);
  685. }
  686. if (ret_va)
  687. {
  688. if (err == RT_EOK)
  689. *ret_va = vaddr;
  690. else
  691. *ret_va = RT_NULL;
  692. }
  693. return err;
  694. }
  695. int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
  696. rt_size_t pa_off, void **ret_va)
  697. {
  698. int err;
  699. if (hint)
  700. {
  701. rt_varea_t varea = _varea_create(hint->prefer, hint->map_size);
  702. if (varea)
  703. {
  704. hint->flags &= ~MMF_STATIC_ALLOC;
  705. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  706. if (err != RT_EOK)
  707. {
  708. rt_free(varea);
  709. }
  710. }
  711. else
  712. {
  713. err = -RT_ENOMEM;
  714. }
  715. }
  716. else
  717. {
  718. err = -RT_EINVAL;
  719. }
  720. return err;
  721. }
  722. int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  723. rt_mm_va_hint_t hint, rt_size_t attr,
  724. rt_size_t pa_off, void **ret_va)
  725. {
  726. int err;
  727. if (varea && hint)
  728. {
  729. varea->start = hint->prefer;
  730. varea->size = hint->map_size;
  731. hint->flags |= (MMF_STATIC_ALLOC);
  732. LOG_D("%s: start %p size %p phy at %p", __func__, varea->start, varea->size, pa_off << MM_PAGE_SHIFT);
  733. err = _mm_aspace_map_phy(aspace, varea, hint, attr, pa_off, ret_va);
  734. }
  735. else
  736. {
  737. err = -RT_EINVAL;
  738. }
  739. return err;
  740. }
  741. int _aspace_unmap(rt_aspace_t aspace, void *addr)
  742. {
  743. int error;
  744. rt_varea_t varea;
  745. WR_LOCK(aspace);
  746. varea = _aspace_bst_search(aspace, addr);
  747. if (varea == RT_NULL)
  748. {
  749. LOG_D("%s: No such entry found at %p\n", __func__, addr);
  750. error = -RT_ENOENT;
  751. }
  752. else
  753. {
  754. _varea_uninstall_locked(varea);
  755. if (!(varea->flag & MMF_STATIC_ALLOC))
  756. {
  757. rt_free(varea);
  758. }
  759. error = RT_EOK;
  760. }
  761. WR_UNLOCK(aspace);
  762. return error;
  763. }
  764. int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
  765. {
  766. int error;
  767. if (!aspace)
  768. {
  769. LOG_I("%s: Invalid input", __func__);
  770. error = -RT_EINVAL;
  771. }
  772. else if (_not_in_range(addr, 1, aspace->start, aspace->size))
  773. {
  774. LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
  775. aspace->start, (char *)aspace->start + aspace->size);
  776. error = -RT_EINVAL;
  777. }
  778. else
  779. {
  780. error = _aspace_unmap(aspace, addr);
  781. }
  782. return error;
  783. }
  784. /**
  785. * modify the property of existed varea by shrink its size. Mem_obj is
  786. * notified to released the resource.
  787. */
  788. static rt_err_t _shrink_varea(rt_varea_t varea, void *new_va, rt_size_t size)
  789. {
  790. rt_err_t error;
  791. rt_aspace_t aspace;
  792. void *old_va;
  793. if (varea->mem_obj && varea->mem_obj->on_varea_shrink)
  794. error = varea->mem_obj->on_varea_shrink(varea, new_va, size);
  795. else
  796. error = -RT_EPERM;
  797. if (error == RT_EOK)
  798. {
  799. aspace = varea->aspace;
  800. old_va = varea->start;
  801. varea->size = size;
  802. if (old_va != new_va)
  803. {
  804. varea->start = new_va;
  805. varea->offset += ((long)new_va - (long)old_va) >> MM_PAGE_SHIFT;
  806. _aspace_bst_remove(aspace, varea);
  807. _aspace_bst_insert(aspace, varea);
  808. }
  809. }
  810. return error;
  811. }
  812. static rt_err_t _split_varea(rt_varea_t existed, char *ex_end, char *unmap_start, char *unmap_end, rt_size_t unmap_len)
  813. {
  814. int error;
  815. size_t rela_offset;
  816. rt_varea_t subset;
  817. char *subset_start;
  818. size_t subset_size;
  819. if (existed->mem_obj && existed->mem_obj->on_varea_split)
  820. {
  821. subset_start = unmap_end;
  822. subset_size = ex_end - subset_start;
  823. subset = _varea_create(subset_start, subset_size);
  824. if (subset)
  825. {
  826. rela_offset = MM_PA_TO_OFF(subset_start - (char *)existed->start);
  827. subset->aspace = existed->aspace;
  828. subset->attr = existed->attr;
  829. subset->mem_obj = existed->mem_obj;
  830. subset->flag = existed->flag & ~MMF_STATIC_ALLOC;
  831. subset->offset = existed->offset + rela_offset;
  832. error = existed->mem_obj->on_varea_split(existed, unmap_start, unmap_len, subset);
  833. if (error == RT_EOK)
  834. {
  835. existed->size = unmap_start - (char *)existed->start;
  836. _aspace_bst_insert(existed->aspace, subset);
  837. }
  838. if (error != RT_EOK)
  839. rt_free(subset);
  840. }
  841. else
  842. error = -RT_ENOMEM;
  843. }
  844. else
  845. error = -RT_EPERM;
  846. return error;
  847. }
  848. /* remove overlapped pages from varea */
  849. static int _remove_overlapped_varea(rt_varea_t existed, char *unmap_start, rt_size_t unmap_len)
  850. {
  851. int error;
  852. char *ex_start = existed->start;
  853. char *ex_end = ex_start + existed->size;
  854. char *unmap_end = unmap_start + unmap_len;
  855. if (ex_start < unmap_start)
  856. {
  857. if (ex_end > unmap_end)
  858. error = _split_varea(existed, ex_end, unmap_start, unmap_end, unmap_len);
  859. else
  860. error = _shrink_varea(existed, ex_start, unmap_start - ex_start);
  861. }
  862. else if (ex_end > unmap_end)
  863. error = _shrink_varea(existed, unmap_end, ex_end - unmap_end);
  864. else
  865. {
  866. _varea_uninstall_locked(existed);
  867. if (VAREA_NOT_STATIC(existed))
  868. {
  869. rt_free(existed);
  870. }
  871. error = RT_EOK;
  872. }
  873. return error;
  874. }
  875. static int _unmap_range_locked(rt_aspace_t aspace, void *addr, size_t length)
  876. {
  877. int error = RT_EOK;
  878. rt_varea_t existed;
  879. struct _mm_range unmap_range;
  880. unmap_range.start = addr;
  881. unmap_range.end = addr + length - 1;
  882. existed = _aspace_bst_search_overlap(aspace, unmap_range);
  883. while (existed)
  884. {
  885. error = _remove_overlapped_varea(existed, addr, length);
  886. if (error == RT_EOK)
  887. existed = _aspace_bst_search_overlap(aspace, unmap_range);
  888. else
  889. break;
  890. }
  891. return error;
  892. }
  893. int rt_aspace_unmap_range(rt_aspace_t aspace, void *addr, size_t length)
  894. {
  895. int error;
  896. if (!aspace)
  897. {
  898. LOG_I("%s: Invalid input", __func__);
  899. error = -RT_EINVAL;
  900. }
  901. else if (_not_in_range(addr, length, aspace->start, aspace->size))
  902. {
  903. LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
  904. aspace->start, (char *)aspace->start + aspace->size);
  905. error = -RT_EINVAL;
  906. }
  907. else if (!ALIGNED(addr))
  908. {
  909. LOG_I("%s(addr=%p): Unaligned address", __func__, addr);
  910. error = -RT_EINVAL;
  911. }
  912. else
  913. {
  914. /**
  915. * Brief: re-arrange the address space to remove existing pages mapping
  916. * in [unmap_start, unmap_start + unmap_len)
  917. */
  918. length = RT_ALIGN(length, ARCH_PAGE_SIZE);
  919. WR_LOCK(aspace);
  920. error = _unmap_range_locked(aspace, addr, length);
  921. WR_UNLOCK(aspace);
  922. }
  923. return error;
  924. }
  925. static inline void *_lower(void *a, void *b)
  926. {
  927. return a < b ? a : b;
  928. }
  929. static inline void *_align(void *va, rt_ubase_t align_mask)
  930. {
  931. return (void *)((rt_ubase_t)((char *)va + ~align_mask) & align_mask);
  932. }
  933. static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
  934. rt_ubase_t align_mask, struct _mm_range limit)
  935. {
  936. void *ret = RT_NULL;
  937. while (varea && varea->start < limit.end)
  938. {
  939. char *candidate = (char *)varea->start + varea->size;
  940. candidate = _align(candidate, align_mask);
  941. if (candidate > (char *)limit.end || (char *)limit.end - candidate + 1 < req_size)
  942. break;
  943. rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
  944. if (nx_va)
  945. {
  946. rt_size_t gap_size =
  947. (char *)_lower(limit.end, (char *)nx_va->start - 1) - candidate + 1;
  948. if (gap_size >= req_size)
  949. {
  950. ret = candidate;
  951. break;
  952. }
  953. }
  954. else
  955. {
  956. ret = candidate;
  957. }
  958. varea = nx_va;
  959. }
  960. return ret;
  961. }
  962. /** find suitable place in [limit_start, limit_end] */
  963. static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
  964. rt_ubase_t align_mask,
  965. struct _mm_range limit)
  966. {
  967. void *va = RT_NULL;
  968. rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
  969. if (varea)
  970. {
  971. char *candidate = _align(limit.start, align_mask);
  972. rt_size_t gap_size = (char *)varea->start - candidate;
  973. if (gap_size >= req_size)
  974. {
  975. rt_varea_t former = _aspace_bst_search(aspace, limit.start);
  976. if (former)
  977. {
  978. candidate = _align((char *)former->start + former->size, align_mask);
  979. gap_size = (char *)varea->start - candidate;
  980. if (gap_size >= req_size)
  981. va = candidate;
  982. else
  983. va = _ascending_search(varea, req_size, align_mask, limit);
  984. }
  985. else
  986. {
  987. va = candidate;
  988. }
  989. }
  990. else
  991. {
  992. va = _ascending_search(varea, req_size, align_mask, limit);
  993. }
  994. }
  995. else
  996. {
  997. char *candidate;
  998. rt_size_t gap_size;
  999. candidate = limit.start;
  1000. candidate = _align(candidate, align_mask);
  1001. gap_size = (char *)limit.end - candidate + 1;
  1002. if (gap_size >= req_size)
  1003. va = candidate;
  1004. }
  1005. return va;
  1006. }
  1007. static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
  1008. void *limit_start, rt_size_t limit_size,
  1009. mm_flag_t flags)
  1010. {
  1011. rt_varea_t varea = NULL;
  1012. void *va = RT_NULL;
  1013. struct _mm_range limit = {limit_start, (char *)limit_start + limit_size - 1};
  1014. rt_ubase_t align_mask = ~0ul;
  1015. if (flags & MMF_REQUEST_ALIGN)
  1016. {
  1017. align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
  1018. }
  1019. if (prefer != RT_NULL)
  1020. {
  1021. /* if prefer and free, just return the prefer region */
  1022. prefer = _align(prefer, align_mask);
  1023. struct _mm_range range = {prefer, (char *)prefer + req_size - 1};
  1024. varea = _aspace_bst_search_overlap(aspace, range);
  1025. if (!varea)
  1026. {
  1027. va = prefer;
  1028. }
  1029. else if (flags & MMF_MAP_FIXED)
  1030. {
  1031. /* OVERLAP */
  1032. }
  1033. else
  1034. {
  1035. /* search from `varea` in ascending order */
  1036. va = _ascending_search(varea, req_size, align_mask, limit);
  1037. if (va == RT_NULL)
  1038. {
  1039. /* rewind to first range */
  1040. limit.end = (char *)varea->start - 1;
  1041. va = _find_head_and_asc_search(aspace, req_size, align_mask,
  1042. limit);
  1043. }
  1044. }
  1045. }
  1046. else
  1047. {
  1048. va = _find_head_and_asc_search(aspace, req_size, align_mask, limit);
  1049. }
  1050. return va;
  1051. }
  1052. int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  1053. {
  1054. int err = RT_EOK;
  1055. rt_varea_t varea;
  1056. char *end = (char *)addr + (npage << ARCH_PAGE_SHIFT);
  1057. WR_LOCK(aspace);
  1058. varea = _aspace_bst_search(aspace, addr);
  1059. WR_UNLOCK(aspace);
  1060. if (!varea)
  1061. {
  1062. LOG_W("%s: varea not exist", __func__);
  1063. err = -RT_ENOENT;
  1064. }
  1065. else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
  1066. _not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
  1067. varea->size))
  1068. {
  1069. LOG_W("%s: Unaligned parameter or out of range", __func__);
  1070. err = -RT_EINVAL;
  1071. }
  1072. else
  1073. {
  1074. err = _do_prefetch(aspace, varea, addr, npage << ARCH_PAGE_SHIFT);
  1075. }
  1076. return err;
  1077. }
  1078. int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
  1079. {
  1080. int err = RT_EOK;
  1081. void *page_pa = rt_kmem_v2p(page);
  1082. if (!varea || !vaddr || !page)
  1083. {
  1084. LOG_W("%s(%p,%p,%p): invalid input", __func__, varea, vaddr, page);
  1085. err = -RT_EINVAL;
  1086. }
  1087. else if (page_pa == ARCH_MAP_FAILED)
  1088. {
  1089. LOG_W("%s: page is not in kernel space", __func__);
  1090. err = -RT_ERROR;
  1091. }
  1092. else if (_not_in_range(vaddr, ARCH_PAGE_SIZE, varea->start, varea->size))
  1093. {
  1094. LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
  1095. vaddr, ARCH_PAGE_SIZE, varea->start, varea->size);
  1096. err = -RT_EINVAL;
  1097. }
  1098. else
  1099. {
  1100. err = _do_named_map(
  1101. varea->aspace,
  1102. varea,
  1103. vaddr,
  1104. ARCH_PAGE_SIZE,
  1105. MM_PA_TO_OFF(page_pa),
  1106. varea->attr
  1107. );
  1108. }
  1109. return err;
  1110. }
  1111. int rt_varea_unmap_page(rt_varea_t varea, void *vaddr)
  1112. {
  1113. void *va_aligned = (void *)RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
  1114. return rt_varea_unmap_range(varea, va_aligned, ARCH_PAGE_SIZE);
  1115. }
  1116. /**
  1117. * @note Caller should take care of synchronization of its varea among all the map/unmap operation
  1118. */
  1119. int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length)
  1120. {
  1121. int err;
  1122. if (!varea || !vaddr || !paddr || !length ||
  1123. !ALIGNED(vaddr) || !ALIGNED(paddr) || !(ALIGNED(length)))
  1124. {
  1125. LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length);
  1126. err = -RT_EINVAL;
  1127. }
  1128. else if (_not_in_range(vaddr, length, varea->start, varea->size))
  1129. {
  1130. LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
  1131. vaddr, length, varea->start, varea->size);
  1132. err = -RT_EINVAL;
  1133. }
  1134. else
  1135. {
  1136. err = _do_named_map(
  1137. varea->aspace,
  1138. varea,
  1139. vaddr,
  1140. length,
  1141. MM_PA_TO_OFF(paddr),
  1142. varea->attr
  1143. );
  1144. }
  1145. return err;
  1146. }
  1147. /**
  1148. * @note Caller should take care of synchronization of its varea among all the map/unmap operation
  1149. */
  1150. int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length)
  1151. {
  1152. int err;
  1153. rt_base_t va_align;
  1154. if (!varea || !vaddr || !length)
  1155. {
  1156. LOG_W("%s(%p,%p,%lx): invalid input", __func__, varea, vaddr, length);
  1157. err = -RT_EINVAL;
  1158. }
  1159. else if (_not_in_range(vaddr, length, varea->start, varea->size))
  1160. {
  1161. LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
  1162. vaddr, length, varea->start, varea->size);
  1163. err = -RT_EINVAL;
  1164. }
  1165. else
  1166. {
  1167. va_align = RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
  1168. rt_hw_mmu_unmap(varea->aspace, (void *)va_align, length);
  1169. rt_hw_tlb_invalidate_range(varea->aspace, (void *)va_align, length, ARCH_PAGE_SIZE);
  1170. err = RT_EOK;
  1171. }
  1172. return err;
  1173. }
  1174. int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
  1175. {
  1176. return -RT_ENOSYS;
  1177. }
  1178. int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
  1179. {
  1180. int err;
  1181. rt_varea_t varea;
  1182. WR_LOCK(aspace);
  1183. varea = _aspace_bst_search(aspace, addr);
  1184. WR_UNLOCK(aspace);
  1185. if (varea)
  1186. {
  1187. err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
  1188. if (err == RT_EOK)
  1189. {
  1190. rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
  1191. }
  1192. }
  1193. else
  1194. {
  1195. err = -RT_ENOENT;
  1196. }
  1197. return err;
  1198. }
  1199. int rt_aspace_traversal(rt_aspace_t aspace,
  1200. int (*fn)(rt_varea_t varea, void *arg), void *arg)
  1201. {
  1202. rt_varea_t varea;
  1203. rt_varea_t next;
  1204. WR_LOCK(aspace);
  1205. varea = ASPACE_VAREA_FIRST(aspace);
  1206. while (varea)
  1207. {
  1208. next = ASPACE_VAREA_NEXT(varea);
  1209. fn(varea, arg);
  1210. varea = next;
  1211. }
  1212. WR_UNLOCK(aspace);
  1213. return 0;
  1214. }
  1215. static int _dump(rt_varea_t varea, void *arg)
  1216. {
  1217. if (varea->mem_obj && varea->mem_obj->get_name)
  1218. {
  1219. rt_kprintf("[%p - %p] %s\n", varea->start, (char *)varea->start + varea->size,
  1220. varea->mem_obj->get_name(varea));
  1221. }
  1222. else
  1223. {
  1224. rt_kprintf("[%p - %p] phy-map\n", varea->start, (char *)varea->start + varea->size);
  1225. rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
  1226. }
  1227. return 0;
  1228. }
  1229. void rt_aspace_print_all(rt_aspace_t aspace)
  1230. {
  1231. rt_aspace_traversal(aspace, _dump, NULL);
  1232. }
  1233. static int _count_vsz(rt_varea_t varea, void *arg)
  1234. {
  1235. rt_base_t *pvsz = arg;
  1236. RT_ASSERT(varea);
  1237. *pvsz = *pvsz + varea->size;
  1238. return 0;
  1239. }
  1240. rt_base_t rt_aspace_count_vsz(rt_aspace_t aspace)
  1241. {
  1242. rt_base_t vsz = 0;
  1243. rt_aspace_traversal(aspace, _count_vsz, &vsz);
  1244. return vsz;
  1245. }
  1246. static int _dup_varea(rt_varea_t src_varea, void *arg)
  1247. {
  1248. int err;
  1249. rt_aspace_t dst = arg;
  1250. rt_aspace_t src = src_varea->aspace;
  1251. void *pa = RT_NULL;
  1252. void *va = RT_NULL;
  1253. rt_mem_obj_t mem_obj = src_varea->mem_obj;
  1254. if (!mem_obj)
  1255. {
  1256. /* duplicate a physical mapping */
  1257. pa = rt_hw_mmu_v2p(src, (void *)src_varea->start);
  1258. RT_ASSERT(pa != ARCH_MAP_FAILED);
  1259. struct rt_mm_va_hint hint = {.flags = src_varea->flag,
  1260. .limit_range_size = dst->size,
  1261. .limit_start = dst->start,
  1262. .prefer = src_varea->start,
  1263. .map_size = src_varea->size};
  1264. err = rt_aspace_map_phy(dst, &hint, src_varea->attr,
  1265. MM_PA_TO_OFF(pa), &va);
  1266. if (err != RT_EOK)
  1267. {
  1268. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  1269. src_varea->start, src_varea->size);
  1270. }
  1271. }
  1272. else
  1273. {
  1274. /* duplicate a mem_obj backing mapping */
  1275. rt_base_t flags = src_varea->flag | MMF_MAP_FIXED;
  1276. flags &= ~MMF_STATIC_ALLOC;
  1277. flags &= ~MMF_PREFETCH;
  1278. va = src_varea->start;
  1279. err = rt_aspace_map(dst, &va, src_varea->size, src_varea->attr,
  1280. flags, mem_obj, src_varea->offset);
  1281. if (err != RT_EOK)
  1282. {
  1283. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  1284. src_varea->start, src_varea->size);
  1285. }
  1286. }
  1287. if (va != (void *)src_varea->start)
  1288. {
  1289. return -1;
  1290. }
  1291. return 0;
  1292. }
  1293. struct _compare_param {
  1294. rt_aspace_t dst;
  1295. int rc;
  1296. };
  1297. rt_err_t rt_aspace_duplicate_locked(rt_aspace_t src, rt_aspace_t dst)
  1298. {
  1299. return rt_aspace_traversal(src, _dup_varea, dst);
  1300. }
  1301. rt_inline int _varea_same(rt_varea_t a, rt_varea_t b)
  1302. {
  1303. return a->attr == b->attr && a->flag == b->flag && a->mem_obj == b->mem_obj;
  1304. }
  1305. rt_inline void _dump_varea(rt_varea_t varea)
  1306. {
  1307. LOG_W("%s(attr=0x%lx, flags=0x%lx, start=0x%lx, size=0x%lx, mem_obj=%p)", VAREA_NAME(varea), varea->attr, varea->flag, varea->start, varea->size, varea->mem_obj);
  1308. }
  1309. static int _compare_varea(rt_varea_t src_varea, void *arg)
  1310. {
  1311. struct _compare_param *param = arg;
  1312. rt_varea_t dst_varea;
  1313. rt_aspace_t dst = param->dst;
  1314. rt_aspace_t src = src_varea->aspace;
  1315. dst_varea = _aspace_bst_search(dst, src_varea->start);
  1316. if (dst_varea)
  1317. {
  1318. char *buf1 = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  1319. char *buf2 = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  1320. char *vend = src_varea->start + src_varea->size;
  1321. for (char *i = src_varea->start; i < vend; i += ARCH_PAGE_SIZE)
  1322. {
  1323. int rc;
  1324. rt_aspace_page_get(src, i, buf1);
  1325. rt_aspace_page_get(dst, i, buf2);
  1326. rc = memcmp(buf1, buf2, ARCH_PAGE_SIZE);
  1327. if (rc)
  1328. {
  1329. if (param->rc == 0)
  1330. param->rc = rc;
  1331. LOG_E("%s(a_varea=%s, b_varea=%s)", __func__, VAREA_NAME(src_varea), VAREA_NAME(dst_varea));
  1332. _dump_varea(src_varea);
  1333. _dump_varea(dst_varea);
  1334. RT_ASSERT(0);
  1335. }
  1336. }
  1337. rt_pages_free(buf1, 0);
  1338. rt_pages_free(buf2, 0);
  1339. }
  1340. else
  1341. {
  1342. param->rc = -RT_ENOENT;
  1343. }
  1344. return 0;
  1345. }
  1346. rt_err_t rt_aspace_compare(rt_aspace_t src, rt_aspace_t dst)
  1347. {
  1348. struct _compare_param param = {.rc = 0, .dst = dst};
  1349. rt_aspace_traversal(src, _compare_varea, &param);
  1350. return param.rc;
  1351. }
  1352. /* dst are page aligned */
  1353. rt_inline rt_err_t _page_put(rt_varea_t varea, void *page_va, void *buffer)
  1354. {
  1355. struct rt_aspace_io_msg iomsg;
  1356. rt_err_t rc;
  1357. rt_mm_io_msg_init(&iomsg, VAREA_VA_TO_OFFSET(varea, page_va), page_va, buffer);
  1358. varea->mem_obj->page_write(varea, &iomsg);
  1359. if (iomsg.response.status == MM_FAULT_STATUS_UNRECOVERABLE)
  1360. rc = -RT_ERROR;
  1361. else
  1362. rc = RT_EOK;
  1363. return rc;
  1364. }
  1365. /* dst are page aligned */
  1366. rt_inline rt_err_t _page_get(rt_varea_t varea, void *page_va, void *buffer)
  1367. {
  1368. struct rt_aspace_io_msg iomsg;
  1369. rt_err_t rc;
  1370. rt_mm_io_msg_init(&iomsg, VAREA_VA_TO_OFFSET(varea, page_va), page_va, buffer);
  1371. varea->mem_obj->page_read(varea, &iomsg);
  1372. if (iomsg.response.status == MM_FAULT_STATUS_UNRECOVERABLE)
  1373. rc = -RT_ERROR;
  1374. else
  1375. rc = RT_EOK;
  1376. return rc;
  1377. }
  1378. #ifdef RT_USING_SMART
  1379. #include "lwp.h"
  1380. rt_inline rt_aspace_t _current_uspace(void)
  1381. {
  1382. rt_lwp_t this_proc = lwp_self();
  1383. return this_proc ? this_proc->aspace : RT_NULL;
  1384. }
  1385. #else
  1386. rt_inline rt_aspace_t _current_uspace(void)
  1387. {
  1388. return RT_NULL;
  1389. }
  1390. #endif
  1391. rt_err_t rt_aspace_page_get_phy(rt_aspace_t aspace, void *page_va, void *buffer)
  1392. {
  1393. rt_err_t rc = -RT_ERROR;
  1394. char *frame_ka = rt_hw_mmu_v2p(aspace, page_va);
  1395. if (frame_ka != ARCH_MAP_FAILED)
  1396. {
  1397. frame_ka = rt_kmem_p2v(frame_ka);
  1398. if (frame_ka)
  1399. {
  1400. rt_memcpy(buffer, frame_ka, ARCH_PAGE_SIZE);
  1401. rc = RT_EOK;
  1402. }
  1403. else if (aspace == _current_uspace() || aspace == &rt_kernel_space)
  1404. {
  1405. /* direct IO */
  1406. rt_memcpy(buffer, page_va, ARCH_PAGE_SIZE);
  1407. rc = RT_EOK;
  1408. }
  1409. else
  1410. {
  1411. /* user memory region remap ? */
  1412. LOG_W("%s(aspace=0x%lx,va=%p): Operation not support",
  1413. __func__, aspace, page_va);
  1414. rc = -RT_ENOSYS;
  1415. }
  1416. }
  1417. else
  1418. {
  1419. LOG_W("%s(aspace=0x%lx,va=%p): PTE not existed",
  1420. __func__, aspace, page_va);
  1421. rc = -RT_ENOENT;
  1422. }
  1423. return rc;
  1424. }
  1425. rt_err_t rt_aspace_page_put_phy(rt_aspace_t aspace, void *page_va, void *buffer)
  1426. {
  1427. rt_err_t rc = -RT_ERROR;
  1428. char *frame_ka = rt_hw_mmu_v2p(aspace, page_va);
  1429. if (frame_ka != ARCH_MAP_FAILED)
  1430. {
  1431. frame_ka = rt_kmem_p2v(frame_ka);
  1432. if (frame_ka)
  1433. {
  1434. rt_memcpy(frame_ka, buffer, ARCH_PAGE_SIZE);
  1435. rc = RT_EOK;
  1436. }
  1437. else if (aspace == _current_uspace() || aspace == &rt_kernel_space)
  1438. {
  1439. /* direct IO */
  1440. rt_memcpy(page_va, buffer, ARCH_PAGE_SIZE);
  1441. rc = RT_EOK;
  1442. }
  1443. else
  1444. {
  1445. /* user memory region remap ? */
  1446. LOG_W("%s(aspace=0x%lx,va=%p): Operation not support",
  1447. __func__, aspace, page_va);
  1448. rc = -RT_ENOSYS;
  1449. }
  1450. }
  1451. else
  1452. {
  1453. LOG_W("%s(aspace=0x%lx,va=%p): PTE not existed",
  1454. __func__, aspace, page_va);
  1455. rc = -RT_ENOENT;
  1456. }
  1457. return rc;
  1458. }
  1459. rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer)
  1460. {
  1461. rt_err_t rc = -RT_ERROR;
  1462. rt_varea_t varea;
  1463. RT_ASSERT(aspace);
  1464. RD_LOCK(aspace);
  1465. varea = _aspace_bst_search(aspace, page_va);
  1466. if (varea && ALIGNED(page_va))
  1467. {
  1468. if (varea->mem_obj)
  1469. {
  1470. if (varea->mem_obj->page_write)
  1471. {
  1472. if (rt_varea_is_private_locked(varea))
  1473. {
  1474. RDWR_LOCK(aspace);
  1475. struct rt_aspace_fault_msg msg;
  1476. msg.fault_op = MM_FAULT_OP_WRITE;
  1477. msg.fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
  1478. msg.fault_vaddr = page_va;
  1479. rc = rt_varea_fix_private_locked(varea, rt_hw_mmu_v2p(aspace, page_va),
  1480. &msg, RT_TRUE);
  1481. RDWR_UNLOCK(aspace);
  1482. if (rc == MM_FAULT_FIXABLE_TRUE)
  1483. {
  1484. varea = _aspace_bst_search(aspace, page_va);
  1485. rc = _page_put(varea, page_va, buffer);
  1486. }
  1487. else
  1488. rc = -RT_ERROR;
  1489. }
  1490. else
  1491. rc = _page_put(varea, page_va, buffer);
  1492. }
  1493. else
  1494. {
  1495. rc = -RT_EINVAL;
  1496. LOG_I("%s: Operation not allowed", __func__);
  1497. }
  1498. }
  1499. else
  1500. {
  1501. rc = rt_aspace_page_put_phy(aspace, page_va, buffer);
  1502. }
  1503. }
  1504. else
  1505. rc = -RT_EINVAL;
  1506. RD_UNLOCK(aspace);
  1507. return rc;
  1508. }
  1509. rt_err_t rt_aspace_page_get(rt_aspace_t aspace, void *page_va, void *buffer)
  1510. {
  1511. rt_err_t rc = -RT_ERROR;
  1512. rt_varea_t varea;
  1513. /* TODO: cache the last search item */
  1514. RT_ASSERT(aspace);
  1515. RD_LOCK(aspace);
  1516. varea = _aspace_bst_search(aspace, page_va);
  1517. if (varea && ALIGNED(page_va))
  1518. {
  1519. if (varea->mem_obj)
  1520. {
  1521. if (varea->mem_obj->page_read)
  1522. {
  1523. rc = _page_get(varea, page_va, buffer);
  1524. }
  1525. else
  1526. {
  1527. LOG_I("%s: Operation not allowed", __func__);
  1528. }
  1529. }
  1530. else
  1531. {
  1532. rc = rt_aspace_page_get_phy(aspace, page_va, buffer);
  1533. }
  1534. }
  1535. else
  1536. {
  1537. rc = -RT_EINVAL;
  1538. LOG_D("%s(va=%p,varea=0x%lx): Invalid address",
  1539. __func__, page_va, varea);
  1540. }
  1541. RD_UNLOCK(aspace);
  1542. return rc;
  1543. }
  1544. rt_varea_t rt_aspace_query(rt_aspace_t aspace, void *vaddr)
  1545. {
  1546. return _aspace_bst_search(aspace, vaddr);
  1547. }