mm_anon.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-08-19 Shell Support PRIVATE mapping and COW
  9. */
  10. #define DBG_TAG "mm.anon"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include <string.h>
  14. #include "mm_private.h"
  15. #include <mmu.h>
  16. /**
  17. * Anonymous Object directly represent the mappings without backup files in the
  18. * aspace. Their only backup is in the aspace->pgtbl.
  19. */
  20. typedef struct rt_private_ctx {
  21. struct rt_mem_obj mem_obj;
  22. rt_aspace_t backup_aspace;
  23. /* both varea and aspace can holds a reference */
  24. rt_atomic_t reference;
  25. /* readonly `private` is shared object */
  26. long readonly;
  27. } *rt_private_ctx_t;
  28. rt_inline rt_aspace_t _anon_obj_get_backup(rt_mem_obj_t mobj)
  29. {
  30. rt_private_ctx_t pctx;
  31. rt_aspace_t backup;
  32. pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  33. backup = pctx->backup_aspace;
  34. return backup;
  35. }
  36. rt_inline rt_atomic_t *_anon_obj_get_reference(rt_mem_obj_t mobj)
  37. {
  38. rt_private_ctx_t pctx;
  39. pctx = rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  40. return &pctx->reference;
  41. }
  42. rt_inline rt_private_ctx_t _anon_mobj_to_pctx(rt_mem_obj_t mobj)
  43. {
  44. return rt_container_of(mobj, struct rt_private_ctx, mem_obj);
  45. }
  46. static long rt_aspace_anon_ref_inc(rt_mem_obj_t aobj)
  47. {
  48. long rc;
  49. if (aobj)
  50. {
  51. rc = rt_atomic_add(_anon_obj_get_reference(aobj), 1);
  52. LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, rc + 1);
  53. }
  54. else
  55. rc = -1;
  56. return rc;
  57. }
  58. rt_err_t rt_aspace_anon_ref_dec(rt_mem_obj_t aobj)
  59. {
  60. rt_err_t rc;
  61. rt_aspace_t aspace;
  62. rt_private_ctx_t pctx;
  63. long former_reference;
  64. if (aobj)
  65. {
  66. pctx = _anon_mobj_to_pctx(aobj);
  67. RT_ASSERT(pctx);
  68. former_reference = rt_atomic_add(_anon_obj_get_reference(aobj), -1);
  69. LOG_D("%s(aobj=%p) Cur %ld", __func__, aobj, former_reference - 1);
  70. if (pctx->readonly)
  71. {
  72. if (former_reference - 1 <= pctx->readonly)
  73. {
  74. void *pgtbl;
  75. RT_ASSERT(former_reference - 1 == pctx->readonly);
  76. aspace = _anon_obj_get_backup(aobj);
  77. pctx->readonly = 0;
  78. pgtbl = aspace->page_table;
  79. rt_aspace_delete(aspace);
  80. rt_hw_mmu_pgtbl_delete(pgtbl);
  81. }
  82. }
  83. else if (former_reference < 2)
  84. {
  85. aspace = _anon_obj_get_backup(aobj);
  86. aspace->private_object = RT_NULL;
  87. rt_free(pctx);
  88. }
  89. rc = RT_EOK;
  90. }
  91. else
  92. {
  93. rc = -RT_EINVAL;
  94. }
  95. return rc;
  96. }
  97. static const char *_anon_get_name(rt_varea_t varea)
  98. {
  99. return varea->aspace == _anon_obj_get_backup(varea->mem_obj) ? "anonymous" : "reference";
  100. }
  101. static void _anon_varea_open(struct rt_varea *varea)
  102. {
  103. rt_aspace_anon_ref_inc(varea->mem_obj);
  104. if (varea->aspace == _anon_obj_get_backup(varea->mem_obj))
  105. varea->offset = MM_PA_TO_OFF(varea->start);
  106. varea->data = NULL;
  107. }
  108. static void _anon_varea_close(struct rt_varea *varea)
  109. {
  110. rt_aspace_anon_ref_dec(varea->mem_obj);
  111. rt_mm_dummy_mapper.on_varea_close(varea);
  112. }
  113. static rt_err_t _anon_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  114. {
  115. return RT_EOK;
  116. }
  117. static rt_err_t _anon_varea_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  118. {
  119. return rt_mm_dummy_mapper.on_varea_shrink(varea, new_start, size);
  120. }
  121. static rt_err_t _anon_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  122. {
  123. _anon_varea_open(subset);
  124. return rt_mm_dummy_mapper.on_varea_split(existed, unmap_start, unmap_len, subset);
  125. }
  126. static rt_err_t _anon_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
  127. {
  128. _anon_varea_close(merge_from);
  129. return rt_mm_dummy_mapper.on_varea_merge(merge_to, merge_from);
  130. }
  131. rt_inline void _map_page_in_varea(rt_aspace_t asapce, rt_varea_t varea,
  132. struct rt_aspace_fault_msg *msg, char *fault_addr)
  133. {
  134. char *page_va = msg->response.vaddr;
  135. if (rt_varea_map_page(varea, fault_addr, page_va) == RT_EOK)
  136. {
  137. msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
  138. rt_varea_pgmgr_insert(varea, page_va);
  139. }
  140. else
  141. {
  142. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  143. LOG_W("%s: failed to map page into varea", __func__);
  144. }
  145. }
  146. static void *_get_page_from_backup(rt_aspace_t backup, rt_base_t offset_in_mobj)
  147. {
  148. void *frame_pa;
  149. char *backup_addr;
  150. rt_varea_t backup_varea;
  151. void *rc = RT_NULL;
  152. backup_addr = (char *)(offset_in_mobj << MM_PAGE_SHIFT);
  153. backup_varea = rt_aspace_query(backup, backup_addr);
  154. if (backup_varea)
  155. {
  156. /* synchronize between multiple request by aspace lock of backup */
  157. WR_LOCK(backup);
  158. frame_pa = rt_hw_mmu_v2p(backup, backup_addr);
  159. if (frame_pa == ARCH_MAP_FAILED)
  160. {
  161. /* provide the page in backup varea */
  162. struct rt_aspace_fault_msg msg;
  163. msg.fault_op = MM_FAULT_OP_WRITE;
  164. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  165. msg.fault_vaddr = backup_addr;
  166. msg.off = offset_in_mobj;
  167. rt_mm_fault_res_init(&msg.response);
  168. rt_mm_dummy_mapper.on_page_fault(backup_varea, &msg);
  169. if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  170. {
  171. _map_page_in_varea(backup, backup_varea, &msg, backup_addr);
  172. if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
  173. {
  174. rc = msg.response.vaddr;
  175. }
  176. rt_pages_free(msg.response.vaddr, 0);
  177. }
  178. }
  179. else
  180. {
  181. rc = rt_kmem_p2v(frame_pa);
  182. if (!rc)
  183. RT_ASSERT(0 && "No kernel address of target page frame");
  184. }
  185. WR_UNLOCK(backup);
  186. }
  187. else
  188. {
  189. /* out of range error */
  190. LOG_E("(backup_addr=%p): Page request out of range", backup_addr);
  191. }
  192. return rc;
  193. }
  194. /* get the backup page in kernel for the address in user space */
  195. static void _fetch_page_for_varea(struct rt_varea *varea, struct rt_aspace_fault_msg *msg, rt_bool_t need_map)
  196. {
  197. void *paddr;
  198. char *frame_ka;
  199. rt_aspace_t curr_aspace = varea->aspace;
  200. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  201. RDWR_LOCK(curr_aspace);
  202. /**
  203. * if the page is already mapped(this may caused by data race while other
  204. * thread success to take the lock and mapped the page before this), return okay
  205. */
  206. paddr = rt_hw_mmu_v2p(curr_aspace, msg->fault_vaddr);
  207. if (paddr == ARCH_MAP_FAILED)
  208. {
  209. if (backup == curr_aspace)
  210. {
  211. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  212. if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  213. {
  214. /* if backup == curr_aspace, a page fetch always binding with a pte filling */
  215. _map_page_in_varea(backup, varea, msg, msg->fault_vaddr);
  216. if (msg->response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  217. {
  218. rt_pages_free(msg->response.vaddr, 0);
  219. }
  220. }
  221. }
  222. else
  223. {
  224. frame_ka = _get_page_from_backup(backup, msg->off);
  225. if (frame_ka)
  226. {
  227. msg->response.vaddr = frame_ka;
  228. msg->response.size = ARCH_PAGE_SIZE;
  229. if (!need_map)
  230. {
  231. msg->response.status = MM_FAULT_STATUS_OK;
  232. }
  233. else
  234. {
  235. _map_page_in_varea(curr_aspace, varea, msg, msg->fault_vaddr);
  236. }
  237. }
  238. }
  239. }
  240. else
  241. {
  242. msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
  243. }
  244. RDWR_UNLOCK(curr_aspace);
  245. }
  246. static void _anon_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
  247. {
  248. _fetch_page_for_varea(varea, msg, RT_TRUE);
  249. }
  250. static void read_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
  251. {
  252. if (rt_aspace_page_get_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
  253. iomsg->response.status = MM_FAULT_STATUS_OK;
  254. }
  255. static void _anon_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
  256. {
  257. rt_aspace_t curr_aspace = varea->aspace;
  258. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  259. if (rt_hw_mmu_v2p(curr_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
  260. {
  261. struct rt_aspace_fault_msg msg;
  262. msg.fault_op = MM_FAULT_OP_READ;
  263. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  264. msg.fault_vaddr = iomsg->fault_vaddr;
  265. msg.off = iomsg->off;
  266. rt_mm_fault_res_init(&msg.response);
  267. _fetch_page_for_varea(varea, &msg, RT_FALSE);
  268. if (msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  269. {
  270. void *saved_fault_va = iomsg->fault_vaddr;
  271. iomsg->fault_vaddr = (void *)(iomsg->off << MM_PAGE_SHIFT);
  272. read_by_mte(backup, iomsg);
  273. iomsg->fault_vaddr = saved_fault_va;
  274. }
  275. }
  276. else
  277. {
  278. read_by_mte(curr_aspace, iomsg);
  279. }
  280. }
  281. static void write_by_mte(rt_aspace_t aspace, struct rt_aspace_io_msg *iomsg)
  282. {
  283. if (rt_aspace_page_put_phy(aspace, iomsg->fault_vaddr, iomsg->buffer_vaddr) == RT_EOK)
  284. iomsg->response.status = MM_FAULT_STATUS_OK;
  285. }
  286. static void _anon_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *iomsg)
  287. {
  288. rt_aspace_t from_aspace = varea->aspace;
  289. rt_aspace_t backup = _anon_obj_get_backup(varea->mem_obj);
  290. if (from_aspace != backup)
  291. {
  292. /* varea in guest aspace cannot modify the page */
  293. iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  294. }
  295. else if (rt_hw_mmu_v2p(from_aspace, iomsg->fault_vaddr) == ARCH_MAP_FAILED)
  296. {
  297. struct rt_aspace_fault_msg msg;
  298. msg.fault_op = MM_FAULT_OP_WRITE;
  299. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  300. msg.fault_vaddr = iomsg->fault_vaddr;
  301. msg.off = iomsg->off;
  302. rt_mm_fault_res_init(&msg.response);
  303. _fetch_page_for_varea(varea, &msg, RT_TRUE);
  304. if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
  305. {
  306. write_by_mte(backup, iomsg);
  307. }
  308. else
  309. {
  310. /* mapping failed, report an error */
  311. iomsg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  312. }
  313. }
  314. else
  315. {
  316. write_by_mte(backup, iomsg);
  317. }
  318. }
  319. static struct rt_private_ctx _priv_obj = {
  320. .mem_obj.get_name = _anon_get_name,
  321. .mem_obj.on_page_fault = _anon_page_fault,
  322. .mem_obj.hint_free = NULL,
  323. .mem_obj.on_varea_open = _anon_varea_open,
  324. .mem_obj.on_varea_close = _anon_varea_close,
  325. .mem_obj.on_varea_shrink = _anon_varea_shrink,
  326. .mem_obj.on_varea_split = _anon_varea_split,
  327. .mem_obj.on_varea_expand = _anon_varea_expand,
  328. .mem_obj.on_varea_merge = _anon_varea_merge,
  329. .mem_obj.page_read = _anon_page_read,
  330. .mem_obj.page_write = _anon_page_write,
  331. };
  332. rt_inline rt_private_ctx_t rt_private_obj_create_n_bind(rt_aspace_t aspace)
  333. {
  334. rt_private_ctx_t private_object;
  335. private_object = rt_malloc(sizeof(struct rt_private_ctx));
  336. if (private_object)
  337. {
  338. memcpy(&private_object->mem_obj, &_priv_obj, sizeof(_priv_obj));
  339. /* hold a init ref from backup aspace */
  340. rt_atomic_store(&private_object->reference, 1);
  341. private_object->readonly = RT_FALSE;
  342. private_object->backup_aspace = aspace;
  343. aspace->private_object = &private_object->mem_obj;
  344. }
  345. return private_object;
  346. }
  347. rt_inline rt_mem_obj_t _get_private_obj(rt_aspace_t aspace)
  348. {
  349. rt_private_ctx_t priv;
  350. rt_mem_obj_t rc;
  351. rc = aspace->private_object;
  352. if (!aspace->private_object)
  353. {
  354. priv = rt_private_obj_create_n_bind(aspace);
  355. if (priv)
  356. {
  357. rc = &priv->mem_obj;
  358. aspace->private_object = rc;
  359. }
  360. }
  361. return rc;
  362. }
  363. static int _override_map(rt_varea_t varea, rt_aspace_t aspace, void *fault_vaddr, struct rt_aspace_fault_msg *msg, void *page)
  364. {
  365. int rc = MM_FAULT_FIXABLE_FALSE;
  366. rt_mem_obj_t private_object;
  367. rt_varea_t map_varea = RT_NULL;
  368. rt_err_t error;
  369. rt_size_t flags;
  370. rt_size_t attr;
  371. LOG_D("%s", __func__);
  372. private_object = _get_private_obj(aspace);
  373. if (private_object)
  374. {
  375. flags = varea->flag | MMF_MAP_FIXED;
  376. /* don't prefetch and do it latter */
  377. flags &= ~MMF_PREFETCH;
  378. attr = rt_hw_mmu_attr_add_perm(varea->attr, RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE);
  379. /* override existing mapping at fault_vaddr */
  380. error = _mm_aspace_map(
  381. aspace, &map_varea, &fault_vaddr, ARCH_PAGE_SIZE, attr,
  382. flags, private_object, MM_PA_TO_OFF(fault_vaddr));
  383. if (error == RT_EOK)
  384. {
  385. msg->response.status = MM_FAULT_STATUS_OK;
  386. msg->response.vaddr = page;
  387. msg->response.size = ARCH_PAGE_SIZE;
  388. if (rt_varea_map_with_msg(map_varea, msg) != RT_EOK)
  389. {
  390. LOG_E("%s: fault_va=%p,(priv_va=%p,priv_sz=0x%lx) at %s", __func__, msg->fault_vaddr, map_varea->start, map_varea->size, VAREA_NAME(map_varea));
  391. RT_ASSERT(0 && "should never failed");
  392. }
  393. RT_ASSERT(rt_hw_mmu_v2p(aspace, msg->fault_vaddr) == (page + PV_OFFSET));
  394. rc = MM_FAULT_FIXABLE_TRUE;
  395. rt_varea_pgmgr_insert(map_varea, page);
  396. rt_pages_free(page, 0);
  397. }
  398. else
  399. {
  400. /* private object will be release on destruction of aspace */
  401. rt_free(map_varea);
  402. }
  403. }
  404. else
  405. {
  406. LOG_I("%s: out of memory", __func__);
  407. rc = MM_FAULT_FIXABLE_FALSE;
  408. }
  409. return rc;
  410. }
  411. /**
  412. * replace an existing mapping to a private one, this is identical to:
  413. * => aspace_unmap(ex_varea, )
  414. * => aspace_map()
  415. */
  416. int rt_varea_fix_private_locked(rt_varea_t ex_varea, void *pa,
  417. struct rt_aspace_fault_msg *msg,
  418. rt_bool_t dont_copy)
  419. {
  420. /**
  421. * todo: READ -> WRITE lock here
  422. */
  423. void *page;
  424. void *fault_vaddr;
  425. rt_aspace_t aspace;
  426. rt_mem_obj_t ex_obj;
  427. int rc = MM_FAULT_FIXABLE_FALSE;
  428. ex_obj = ex_varea->mem_obj;
  429. if (ex_obj)
  430. {
  431. fault_vaddr = msg->fault_vaddr;
  432. aspace = ex_varea->aspace;
  433. RT_ASSERT(!!aspace);
  434. /**
  435. * todo: what if multiple pages are required?
  436. */
  437. if (aspace->private_object == ex_obj)
  438. {
  439. RT_ASSERT(0 && "recursion");
  440. }
  441. else if (ex_obj->page_read)
  442. {
  443. page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  444. if (page)
  445. {
  446. /** setup message & fetch the data from source object */
  447. if (!dont_copy)
  448. {
  449. struct rt_aspace_io_msg io_msg;
  450. rt_mm_io_msg_init(&io_msg, msg->off, msg->fault_vaddr, page);
  451. ex_obj->page_read(ex_varea, &io_msg);
  452. /**
  453. * Note: if ex_obj have mapped into varea, it's still okay since
  454. * we will override it latter
  455. */
  456. if (io_msg.response.status != MM_FAULT_STATUS_UNRECOVERABLE)
  457. {
  458. rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
  459. }
  460. else
  461. {
  462. rt_pages_free(page, 0);
  463. LOG_I("%s: page read(va=%p) fault from %s(start=%p,size=%p)", __func__,
  464. msg->fault_vaddr, VAREA_NAME(ex_varea), ex_varea->start, ex_varea->size);
  465. }
  466. }
  467. else
  468. {
  469. rc = _override_map(ex_varea, aspace, fault_vaddr, msg, page);
  470. }
  471. }
  472. else
  473. {
  474. LOG_I("%s: pages allocation failed", __func__);
  475. }
  476. }
  477. else
  478. {
  479. LOG_I("%s: no page read method provided from %s", __func__, VAREA_NAME(ex_varea));
  480. }
  481. }
  482. else
  483. {
  484. LOG_I("%s: unavailable memory object", __func__);
  485. }
  486. return rc;
  487. }
  488. int rt_aspace_map_private(rt_aspace_t aspace, void **addr, rt_size_t length,
  489. rt_size_t attr, mm_flag_t flags)
  490. {
  491. int rc;
  492. rt_mem_obj_t priv_obj;
  493. if (flags & MMF_STATIC_ALLOC)
  494. {
  495. rc = -RT_EINVAL;
  496. }
  497. else
  498. {
  499. priv_obj = _get_private_obj(aspace);
  500. if (priv_obj)
  501. {
  502. flags |= MMF_MAP_PRIVATE;
  503. flags &= ~MMF_PREFETCH;
  504. rc = rt_aspace_map(aspace, addr, length, attr, flags, priv_obj, 0);
  505. }
  506. else
  507. {
  508. rc = -RT_ENOMEM;
  509. }
  510. }
  511. return rc;
  512. }
  513. static int _release_shared(rt_varea_t varea, void *arg)
  514. {
  515. rt_aspace_t src = varea->aspace;
  516. rt_mem_obj_t mem_obj = varea->mem_obj;
  517. if (mem_obj != _get_private_obj(src))
  518. {
  519. _varea_uninstall_locked(varea);
  520. if (VAREA_NOT_STATIC(varea))
  521. {
  522. rt_free(varea);
  523. }
  524. }
  525. return 0;
  526. }
  527. static rt_err_t _convert_readonly(rt_aspace_t aspace, long base_reference)
  528. {
  529. rt_mem_obj_t aobj;
  530. rt_private_ctx_t pctx;
  531. aobj = _get_private_obj(aspace);
  532. pctx = _anon_mobj_to_pctx(aobj);
  533. LOG_D("Ref(cur=%d,base=%d)", pctx->reference, base_reference);
  534. rt_aspace_traversal(aspace, _release_shared, 0);
  535. pctx->readonly = base_reference;
  536. return 0;
  537. }
  538. rt_inline void _switch_aspace(rt_aspace_t *pa, rt_aspace_t *pb)
  539. {
  540. rt_aspace_t temp;
  541. temp = *pa;
  542. *pa = *pb;
  543. *pb = temp;
  544. }
  545. rt_err_t rt_aspace_fork(rt_aspace_t *psrc, rt_aspace_t *pdst)
  546. {
  547. rt_err_t rc;
  548. void *pgtbl;
  549. rt_aspace_t backup;
  550. rt_aspace_t src = *psrc;
  551. rt_aspace_t dst = *pdst;
  552. long base_reference;
  553. pgtbl = rt_hw_mmu_pgtbl_create();
  554. if (pgtbl)
  555. {
  556. backup = rt_aspace_create(src->start, src->size, pgtbl);
  557. if (backup)
  558. {
  559. WR_LOCK(src);
  560. base_reference = rt_atomic_load(_anon_obj_get_reference(src->private_object));
  561. rc = rt_aspace_duplicate_locked(src, dst);
  562. WR_UNLOCK(src);
  563. if (!rc)
  564. {
  565. /* WR_LOCK(dst) is not necessary since dst is not available currently */
  566. rc = rt_aspace_duplicate_locked(dst, backup);
  567. if (!rc)
  568. {
  569. _switch_aspace(psrc, &backup);
  570. _convert_readonly(backup, base_reference);
  571. }
  572. }
  573. }
  574. else
  575. {
  576. rc = -RT_ENOMEM;
  577. }
  578. }
  579. else
  580. {
  581. rc = -RT_ENOMEM;
  582. }
  583. return rc;
  584. }