mm_anon.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-08-19 Shell Support PRIVATE mapping and COW
  9. */
  10. #define DBG_TAG "mm.anon"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include <string.h>
  14. #include "mm_private.h"
  15. #include <mmu.h>
  16. /**
  17. * Anonymous Object directly represent the mappings without backup files in the
  18. * aspace. Their only backup is in the aspace->pgtbl.
  19. */
  20. typedef struct rt_private_ctx {
  21. struct rt_mem_obj mem_obj;
  22. rt_aspace_t backup_aspace;
  23. /* both varea and aspace can holds a reference */
  24. rt_atomic_t reference;
  25. /* readonly `private` is shared object */
  26. long readonly;
  27. } *rt_private_ctx_t;
  28. rt_inline rt_aspace_t _anon_obj_get_backup(rt_mem_obj_t mobj)
  29. {
  30. rt_private_ctx_t pctx;
  31. rt_aspace_t backup;
  32. pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  33. backup = pctx->backup_aspace;
  34. return backup;
  35. }
  36. rt_inline rt_atomic_t *_anon_obj_get_reference(rt_mem_obj_t mobj)
  37. {
  38. rt_private_ctx_t pctx;
  39. pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  40. return &pctx->reference;
  41. }
  42. rt_inline rt_private_ctx_t _anon_mobj_to_pctx(rt_mem_obj_t mobj)
  43. {
  44. return rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  45. }
  46. static long rt_aspace_anon_ref_inc(rt_mem_obj_t aobj)
  47. {
  48. long rc;
  49. if (aobj)
  50. {
  51. rc = rt_atomic_add(_anon_obj_get_reference(aobj), 1);
  52. LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, rc + 1);
  53. }
  54. else
  55. rc = -1;
  56. return rc;
  57. }
  58. rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
  59. {
  60. rt_err_t rc;
  61. rt_aspace_t aspace;
  62. rt_private_ctx_t pctx;
  63. long former_reference;
  64. if (aobj)
  65. {
  66. pctx = _anon_mobj_to_pctx(aobj);
  67. RT_ASSERT(pctx);
  68. former_reference = rt_atomic_add(_anon_obj_get_reference(aobj), -1);
  69. LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, former_reference - 1);
  70. if (pctx->readonly)
  71. {
  72. if (former_reference - 1 <= pctx->readonly)
  73. {
  74. void *pgtbl;
  75. RT_ASSERT(former_reference - 1 == pctx->readonly);
  76. aspace = _anon_obj_get_backup(aobj);
  77. pctx->readonly = 0;
  78. pgtbl = aspace->page_table;
  79. rt_aspace_delete(aspace);
  80. rt_hw_mmu_pgtbl_delete(pgtbl);
  81. }
  82. }
  83. else if (former_reference < 2)
  84. {
  85. aspace = _anon_obj_get_backup(aobj);
  86. aspace->private_object = RT_NULL;
  87. rt_free(pctx);
  88. }
  89. rc = RT_EOK;
  90. }
  91. else
  92. {
  93. rc = -RT_EINVAL;
  94. }
  95. return rc;
  96. }
  97. void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
  98. {
  99. /* each mapping of page frame in the varea is binding with a reference */
  100. rt_page_ref_inc(page_addr, 0);
  101. }
  102. /**
  103. * Private unmapping of address space
  104. */
  105. static void _pgmgr_pop_all(rt_varea_t varea)
  106. {
  107. rt_aspace_t aspace = varea->aspace;
  108. char *iter = varea->start;
  109. char *end_addr = iter + varea->size;
  110. RT_ASSERT(iter < end_addr);
  111. RT_ASSERT(!((long)iter & ARCH_PAGE_MASK));
  112. RT_ASSERT(!((long)end_addr & ARCH_PAGE_MASK));
  113. for (; iter != end_addr; iter += ARCH_PAGE_SIZE)
  114. {
  115. void *page_pa = rt_hw_mmu_v2p(aspace, iter);
  116. char *page_va = rt_kmem_p2v(page_pa);
  117. if (page_pa != ARCH_MAP_FAILED && page_va)
  118. {
  119. rt_varea_unmap_page(varea, iter);
  120. rt_pages_free(page_va, 0);
  121. }
  122. }
  123. }
  124. static void _pgmgr_pop_range(rt_varea_t varea, void *rm_start, void *rm_end)
  125. {
  126. void *page_va;
  127. RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
  128. RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
  129. while (rm_start != rm_end)
  130. {
  131. page_va = rt_hw_mmu_v2p(varea->aspace, rm_start);
  132. if (page_va != ARCH_MAP_FAILED)
  133. {
  134. page_va -= PV_OFFSET;
  135. LOG_D("%s: free page %p", __func__, page_va);
  136. rt_varea_unmap_page(varea, rm_start);
  137. rt_pages_free(page_va, 0);
  138. }
  139. rm_start += ARCH_PAGE_SIZE;
  140. }
  141. }
  142. static const char *_anon_get_name(rt_varea_t varea)
  143. {
  144. return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
  145. }
  146. /**
  147. * Migration handler on varea re-construction
  148. */
  149. static void _anon_varea_open(struct rt_varea *varea)
  150. {
  151. rt_aspace_anon_ref_inc(varea->mem_obj);
  152. if (varea->aspace == _anon_obj_get_backup(varea->mem_obj))
  153. varea->offset = MM_PA_TO_OFF(varea->start);
  154. varea->data = NULL;
  155. }
  156. static void _anon_varea_close(struct rt_varea *varea)
  157. {
  158. rt_aspace_anon_ref_dec(varea->mem_obj);
  159. /* unmap and dereference page frames in the varea region */
  160. _pgmgr_pop_all(varea);
  161. }
  162. static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  163. {
  164. return RT_EOK;
  165. }
  166. static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  167. {
  168. char *varea_start = varea->start;
  169. void *rm_start;
  170. void *rm_end;
  171. if (varea_start == (char *)new_start)
  172. {
  173. rm_start = varea_start + size;
  174. rm_end = varea_start + varea->size;
  175. }
  176. else /* if (varea_start < (char *)new_start) */
  177. {
  178. RT_ASSERT(varea_start < (char *)new_start);
  179. rm_start = varea_start;
  180. rm_end = new_start;
  181. }
  182. _pgmgr_pop_range(varea, rm_start, rm_end);
  183. return RT_EOK;
  184. }
  185. static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  186. {
  187. /* remove the resource in the unmap region, and do nothing for the subset */
  188. _pgmgr_pop_range(existed, unmap_start, (char *)unmap_start + unmap_len);
  189. _anon_varea_open(subset);
  190. return RT_EOK;
  191. }
  192. static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
  193. {
  194. /* do nothing for the varea merge */
  195. return RT_EOK;
  196. }
  197. /**
  198. * Private mapping of address space
  199. */
  200. rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
  201. struct rt_aspace_fault_msg *msg, char *fault_addr)
  202. {
  203. char *page_va = msg->response.vaddr;
  204. if (rt_varea_map_page(varea, fault_addr, page_va) == RT_EOK)
  205. {
  206. msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
  207. rt_varea_pgmgr_insert(varea, page_va);
  208. }
  209. else
  210. {
  211. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  212. LOG_W("%s: failed to map page into varea", __func__);
  213. }
  214. }
  215. /* page frame inquiry or allocation in backup address space */
  216. static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
  217. {
  218. void *frame_pa;
  219. char *backup_addr;
  220. rt_varea_t backup_varea;
  221. void *rc = RT_NULL;
  222. backup_addr = (char *)(offset_in_mobj << MM_PAGE_SHIFT);
  223. backup_varea = rt_aspace_query(backup, backup_addr);
  224. if (backup_varea)
  225. {
  226. /* synchronize between multiple request by aspace lock of backup */
  227. WR_LOCK(backup);
  228. frame_pa = rt_hw_mmu_v2p(backup, backup_addr);
  229. if (frame_pa == ARCH_MAP_FAILED)
  230. {
  231. /* provide the page in backup varea */
  232. struct rt_aspace_fault_msg msg;
  233. msg.fault_op = MM_FAULT_OP_WRITE;
  234. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  235. msg.fault_vaddr = backup_addr;
  236. msg.off = offset_in_mobj;
  237. rt_mm_fault_res_init(&msg.response);
  238. rt_mm_dummy_mapper.on_page_fault(backup_varea, &msg);
  239. if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  240. {
  241. _map_page_in_varea(backup, backup_varea, &msg, backup_addr);
  242. if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
  243. {
  244. rc = msg.response.vaddr;
  245. }
  246. rt_pages_free(msg.response.vaddr, 0);
  247. }
  248. }
  249. else
  250. {
  251. rc = rt_kmem_p2v(frame_pa);
  252. if (!rc)
  253. RT_ASSERT(0 && "No kernel address of target page frame");
  254. }
  255. WR_UNLOCK(backup);
  256. }
  257. else
  258. {
  259. /* out of range error */
  260. LOG_E("(backup_addr=%p): Page request out of range", backup_addr);
  261. }
  262. return rc;
  263. }
  264. /* get the backup page in kernel for the address in user space */
  265. static void _fetch_page_for_varea(struct rt_varea *varea, struct rt_aspace_fault_msg *msg, rt_bool_t need_map)
  266. {
  267. void *paddr;
  268. char *frame_ka;
  269. rt_aspace_t curr_aspace = varea->aspace;
  270. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  271. RDWR_LOCK(curr_aspace);
  272. /**
  273. * if the page is already mapped(this may caused by data race while other
  274. * thread success to take the lock and mapped the page before this), return okay
  275. */
  276. paddr = rt_hw_mmu_v2p(curr_aspace, msg->fault_vaddr);
  277. if (paddr == ARCH_MAP_FAILED)
  278. {
  279. if (backup == curr_aspace)
  280. {
  281. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  282. if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  283. {
  284. /* if backup == curr_aspace, a page fetch always binding with a pte filling */
  285. _map_page_in_varea(backup, varea, msg, msg->fault_vaddr);
  286. if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  287. {
  288. rt_pages_free(msg->response.vaddr, 0);
  289. }
  290. }
  291. }
  292. else
  293. {
  294. frame_ka = _get_page_from_backup(backup, msg->off);
  295. if (frame_ka)
  296. {
  297. msg->response.vaddr = frame_ka;
  298. msg->response.size = ARCH_PAGE_SIZE;
  299. if (!need_map)
  300. {
  301. msg->response.status = MM_FAULT_STATUS_OK;
  302. }
  303. else
  304. {
  305. _map_page_in_varea(curr_aspace, varea, msg, msg->fault_vaddr);
  306. }
  307. }
  308. }
  309. }
  310. else
  311. {
  312. msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
  313. }
  314. RDWR_UNLOCK(curr_aspace);
  315. }
  316. static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  317. {
  318. _fetch_page_for_varea(varea, msg, RT_TRUE);
  319. }
  320. static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
  321. {
  322. if (rt_aspace_page_get_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
  323. iomsg->response.status = MM_FAULT_STATUS_OK;
  324. }
  325. static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
  326. {
  327. rt_aspace_t curr_aspace = varea->aspace;
  328. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  329. if (rt_hw_mmu_v2p(curr_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
  330. {
  331. struct rt_aspace_fault_msg msg;
  332. msg.fault_op = MM_FAULT_OP_READ;
  333. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  334. msg.fault_vaddr = iomsg->fault_vaddr;
  335. msg.off = iomsg->off;
  336. rt_mm_fault_res_init(&msg.response);
  337. _fetch_page_for_varea(varea, &msg, RT_FALSE);
  338. if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  339. {
  340. void *saved_fault_va = iomsg->fault_vaddr;
  341. iomsg->fault_vaddr = (void *)(iomsg->off << MM_PAGE_SHIFT);
  342. read_by_mte(backup, iomsg);
  343. iomsg->fault_vaddr = saved_fault_va;
  344. }
  345. }
  346. else
  347. {
  348. read_by_mte(curr_aspace, iomsg);
  349. }
  350. }
  351. static void write_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
  352. {
  353. if (rt_aspace_page_put_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
  354. iomsg->response.status = MM_FAULT_STATUS_OK;
  355. }
  356. static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
  357. {
  358. rt_aspace_t from_aspace = varea->aspace;
  359. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  360. if (from_aspace != backup)
  361. {
  362. /* varea in guest aspace cannot modify the page */
  363. iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  364. }
  365. else if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
  366. {
  367. struct rt_aspace_fault_msg msg;
  368. msg.fault_op = MM_FAULT_OP_WRITE;
  369. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  370. msg.fault_vaddr = iomsg->fault_vaddr;
  371. msg.off = iomsg->off;
  372. rt_mm_fault_res_init(&msg.response);
  373. _fetch_page_for_varea(varea, &msg, RT_TRUE);
  374. if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
  375. {
  376. write_by_mte(backup, iomsg);
  377. }
  378. else
  379. {
  380. /* mapping failed, report an error */
  381. iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  382. }
  383. }
  384. else
  385. {
  386. write_by_mte(backup, iomsg);
  387. }
  388. }
  389. static struct rt_private_ctx _priv_obj = {
  390. .mem_obj.get_name = _anon_get_name,
  391. .mem_obj.on_page_fault = _anon_page_fault,
  392. .mem_obj.hint_free = NULL,
  393. .mem_obj.on_varea_open = _anon_varea_open,
  394. .mem_obj.on_varea_close = _anon_varea_close,
  395. .mem_obj.on_varea_shrink = _anon_varea_shrink,
  396. .mem_obj.on_varea_split = _anon_varea_split,
  397. .mem_obj.on_varea_expand = _anon_varea_expand,
  398. .mem_obj.on_varea_merge = _anon_varea_merge,
  399. .mem_obj.page_read = _anon_page_read,
  400. .mem_obj.page_write = _anon_page_write,
  401. };
  402. rt_inline rt_private_ctx_t rt_private_obj_create_n_bind(rt_aspace_t aspace)
  403. {
  404. rt_private_ctx_t private_object;
  405. private_object = rt_malloc(sizeof(struct rt_private_ctx));
  406. if (private_object)
  407. {
  408. memcpy(&private_object->mem_obj, &_priv_obj, sizeof(_priv_obj));
  409. /* hold a init ref from backup aspace */
  410. rt_atomic_store(&private_object->reference, 1);
  411. private_object->readonly = RT_FALSE;
  412. private_object->backup_aspace = aspace;
  413. aspace->private_object = &private_object->mem_obj;
  414. }
  415. return private_object;
  416. }
  417. rt_inline rt_mem_obj_t _get_private_obj(rt_aspace_t aspace)
  418. {
  419. rt_private_ctx_t priv;
  420. rt_mem_obj_t rc;
  421. rc = aspace->private_object;
  422. if (!aspace->private_object)
  423. {
  424. priv = rt_private_obj_create_n_bind(aspace);
  425. if (priv)
  426. {
  427. rc = &priv->mem_obj;
  428. aspace->private_object = rc;
  429. }
  430. }
  431. return rc;
  432. }
  433. static int _override_map(rt_varea_t varea, rt_aspace_t aspace, void *fault_vaddr, struct rt_aspace_fault_msg *msg, void *page)
  434. {
  435. int rc = MM_FAULT_FIXABLE_FALSE;
  436. rt_mem_obj_t private_object;
  437. rt_varea_t map_varea = RT_NULL;
  438. rt_err_t error;
  439. rt_size_t flags;
  440. rt_size_t attr;
  441. LOG_D("%s", __func__);
  442. private_object = _get_private_obj(aspace);
  443. if (private_object)
  444. {
  445. flags = varea->flag | MMF_MAP_FIXED;
  446. /* don't prefetch and do it latter */
  447. flags &= ~MMF_PREFETCH;
  448. attr = rt_hw_mmu_attr_add_perm(varea->attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
  449. /* override existing mapping at fault_vaddr */
  450. error = _mm_aspace_map(
  451. aspace, &map_varea, &fault_vaddr, ARCH_PAGE_SIZE, attr,
  452. flags, private_object, MM_PA_TO_OFF(fault_vaddr));
  453. if (error == RT_EOK)
  454. {
  455. msg->response.status = MM_FAULT_STATUS_OK;
  456. msg->response.vaddr = page;
  457. msg->response.size = ARCH_PAGE_SIZE;
  458. if (rt_varea_map_with_msg(map_varea, msg) != RT_EOK)
  459. {
  460. LOG_E("%s: fault_va=%p,(priv_va=%p,priv_sz=0x%lx) at %s", __func__, msg->fault_vaddr, map_varea->start, map_varea->size, VAREA_NAME(map_varea));
  461. RT_ASSERT(0 && "should never failed");
  462. }
  463. RT_ASSERT(rt_hw_mmu_v2p(aspace, msg->fault_vaddr) == (page + PV_OFFSET));
  464. rc = MM_FAULT_FIXABLE_TRUE;
  465. rt_varea_pgmgr_insert(map_varea, page);
  466. rt_pages_free(page, 0);
  467. }
  468. else
  469. {
  470. /* private object will be release on destruction of aspace */
  471. rt_free(map_varea);
  472. }
  473. }
  474. else
  475. {
  476. LOG_I("%s: out of memory", __func__);
  477. rc = MM_FAULT_FIXABLE_FALSE;
  478. }
  479. return rc;
  480. }
  481. /**
  482. * replace an existing mapping to a private one, this is identical to:
  483. * => aspace_unmap(ex_varea, )
  484. * => aspace_map()
  485. */
  486. int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
  487. struct rt_aspace_fault_msg *msg,
  488. rt_bool_t dont_copy)
  489. {
  490. /**
  491. * todo: READ -> WRITE lock here
  492. */
  493. void *page;
  494. void *fault_vaddr;
  495. rt_aspace_t aspace;
  496. rt_mem_obj_t ex_obj;
  497. int rc = MM_FAULT_FIXABLE_FALSE;
  498. ex_obj = ex_varea->mem_obj;
  499. if (ex_obj)
  500. {
  501. fault_vaddr = msg->fault_vaddr;
  502. aspace = ex_varea->aspace;
  503. RT_ASSERT(!!aspace);
  504. /**
  505. * todo: what if multiple pages are required?
  506. */
  507. if (aspace->private_object == ex_obj)
  508. {
  509. RT_ASSERT(0 && "recursion");
  510. }
  511. else if (ex_obj->page_read)
  512. {
  513. page = rt_pages_alloc_tagged(0, RT_PAGE_PICK_AFFID(fault_vaddr), PAGE_ANY_AVAILABLE);
  514. if (page)
  515. {
  516. /** setup message & fetch the data from source object */
  517. if (!dont_copy)
  518. {
  519. struct rt_aspace_io_msg io_msg;
  520. rt_mm_io_msg_init(&io_msg, msg->off, msg->fault_vaddr, page);
  521. ex_obj->page_read(ex_varea, &io_msg);
  522. /**
  523. * Note: if ex_obj have mapped into varea, it's still okay since
  524. * we will override it latter
  525. */
  526. if (io_msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  527. {
  528. rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
  529. }
  530. else
  531. {
  532. rt_pages_free(page, 0);
  533. LOG_I("%s: page read(va=%p) fault from %s(start=%p,size=%p)", __func__,
  534. msg->fault_vaddr, VAREA_NAME(ex_varea), ex_varea->start, ex_varea->size);
  535. }
  536. }
  537. else
  538. {
  539. rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
  540. }
  541. }
  542. else
  543. {
  544. LOG_I("%s: pages allocation failed", __func__);
  545. }
  546. }
  547. else
  548. {
  549. LOG_I("%s: no page read method provided from %s", __func__, VAREA_NAME(ex_varea));
  550. }
  551. }
  552. else
  553. {
  554. LOG_I("%s: unavailable memory object", __func__);
  555. }
  556. return rc;
  557. }
  558. int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
  559. rt_size_t attr, mm_flag_t flags)
  560. {
  561. int rc;
  562. rt_mem_obj_t priv_obj;
  563. if (flags & MMF_STATIC_ALLOC)
  564. {
  565. rc = -RT_EINVAL;
  566. }
  567. else
  568. {
  569. priv_obj = _get_private_obj(aspace);
  570. if (priv_obj)
  571. {
  572. flags |= MMF_MAP_PRIVATE;
  573. flags &= ~MMF_PREFETCH;
  574. rc = rt_aspace_map(aspace, addr, length, attr, flags, priv_obj, 0);
  575. }
  576. else
  577. {
  578. rc = -RT_ENOMEM;
  579. }
  580. }
  581. return rc;
  582. }
  583. static int _release_shared(rt_varea_t varea, void *arg)
  584. {
  585. rt_aspace_t src = varea->aspace;
  586. rt_mem_obj_t mem_obj = varea->mem_obj;
  587. if (mem_obj != _get_private_obj(src))
  588. {
  589. _varea_uninstall_locked(varea);
  590. if (VAREA_NOT_STATIC(varea))
  591. {
  592. rt_free(varea);
  593. }
  594. }
  595. return 0;
  596. }
  597. static rt_err_t _convert_readonly(rt_aspace_t aspace, long base_reference)
  598. {
  599. rt_mem_obj_t aobj;
  600. rt_private_ctx_t pctx;
  601. aobj = _get_private_obj(aspace);
  602. pctx = _anon_mobj_to_pctx(aobj);
  603. LOG_D("Ref(cur=%d,base=%d)", pctx->reference, base_reference);
  604. rt_aspace_traversal(aspace, _release_shared, 0);
  605. pctx->readonly = base_reference;
  606. return 0;
  607. }
  608. rt_inline void _switch_aspace(rt_aspace_t *pa, rt_aspace_t *pb)
  609. {
  610. rt_aspace_t temp;
  611. temp = *pa;
  612. *pa = *pb;
  613. *pb = temp;
  614. }
  615. rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst)
  616. {
  617. rt_err_t rc;
  618. void *pgtbl;
  619. rt_aspace_t backup;
  620. rt_aspace_t src = *psrc;
  621. rt_aspace_t dst = *pdst;
  622. long base_reference;
  623. pgtbl = rt_hw_mmu_pgtbl_create();
  624. if (pgtbl)
  625. {
  626. backup = rt_aspace_create(src->start, src->size, pgtbl);
  627. if (backup)
  628. {
  629. WR_LOCK(src);
  630. base_reference = rt_atomic_load(_anon_obj_get_reference(src->private_object));
  631. rc = rt_aspace_duplicate_locked(src, dst);
  632. WR_UNLOCK(src);
  633. if (!rc)
  634. {
  635. /* WR_LOCK(dst) is not necessary since dst is not available currently */
  636. rc = rt_aspace_duplicate_locked(dst, backup);
  637. if (!rc)
  638. {
  639. _switch_aspace(psrc, &backup);
  640. _convert_readonly(backup, base_reference);
  641. }
  642. }
  643. }
  644. else
  645. {
  646. rc = -RT_ENOMEM;
  647. }
  648. }
  649. else
  650. {
  651. rc = -RT_ENOMEM;
  652. }
  653. return rc;
  654. }