lwp_user_mm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. * 2022-12-25 wangxiaoyao adapt to new mm
  14. */
  15. #include <rtthread.h>
  16. #include <rthw.h>
  17. #include <string.h>
  18. #ifdef ARCH_MM_MMU
  19. #include <lwp.h>
  20. #include <lwp_arch.h>
  21. #include <lwp_mm.h>
  22. #include <lwp_user_mm.h>
  23. #include <mm_aspace.h>
  24. #include <mm_fault.h>
  25. #include <mm_flag.h>
  26. #include <mm_page.h>
  27. #include <mmu.h>
  28. #include <page.h>
  29. #define DBG_TAG "LwP"
  30. #define DBG_LVL DBG_LOG
  31. #include <rtdbg.h>
  32. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
  33. int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
  34. {
  35. int err = -RT_ENOMEM;
  36. lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
  37. if (lwp->lwp_obj)
  38. {
  39. _init_lwp_objs(lwp->lwp_obj, lwp->aspace);
  40. err = arch_user_space_init(lwp);
  41. if (!is_fork && err == RT_EOK)
  42. {
  43. void *addr = (void *)USER_STACK_VSTART;
  44. err = rt_aspace_map(lwp->aspace, &addr,
  45. USER_STACK_VEND - USER_STACK_VSTART,
  46. MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0);
  47. }
  48. }
  49. return err;
  50. }
  51. void lwp_aspace_switch(struct rt_thread *thread)
  52. {
  53. struct rt_lwp *lwp = RT_NULL;
  54. rt_aspace_t aspace;
  55. void *from_tbl;
  56. if (thread->lwp)
  57. {
  58. lwp = (struct rt_lwp *)thread->lwp;
  59. aspace = lwp->aspace;
  60. }
  61. else
  62. aspace = &rt_kernel_space;
  63. from_tbl = rt_hw_mmu_tbl_get();
  64. if (aspace->page_table != from_tbl)
  65. {
  66. rt_hw_aspace_switch(aspace);
  67. }
  68. }
  69. void lwp_unmap_user_space(struct rt_lwp *lwp)
  70. {
  71. arch_user_space_free(lwp);
  72. rt_free(lwp->lwp_obj);
  73. }
  74. static const char *user_get_name(rt_varea_t varea)
  75. {
  76. char *name;
  77. if (varea->flag & MMF_TEXT)
  78. {
  79. name = "user.text";
  80. }
  81. else
  82. {
  83. if (varea->start == (void *)USER_STACK_VSTART)
  84. {
  85. name = "user.stack";
  86. }
  87. else if (varea->start >= (void *)USER_HEAP_VADDR &&
  88. varea->start < (void *)USER_HEAP_VEND)
  89. {
  90. name = "user.heap";
  91. }
  92. else
  93. {
  94. name = "user.data";
  95. }
  96. }
  97. return name;
  98. }
  99. #define NO_AUTO_FETCH 0x1
  100. #define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
  101. static void _user_do_page_fault(struct rt_varea *varea,
  102. struct rt_aspace_fault_msg *msg)
  103. {
  104. struct rt_lwp_objs *lwp_objs;
  105. lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
  106. if (lwp_objs->source)
  107. {
  108. char *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
  109. if (paddr != ARCH_MAP_FAILED)
  110. {
  111. void *vaddr;
  112. vaddr = paddr - PV_OFFSET;
  113. if (!(varea->flag & MMF_TEXT))
  114. {
  115. void *cp = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  116. if (cp)
  117. {
  118. memcpy(cp, vaddr, ARCH_PAGE_SIZE);
  119. rt_varea_pgmgr_insert(varea, cp);
  120. msg->response.status = MM_FAULT_STATUS_OK;
  121. msg->response.vaddr = cp;
  122. msg->response.size = ARCH_PAGE_SIZE;
  123. }
  124. else
  125. {
  126. LOG_W("%s: page alloc failed at %p", __func__,
  127. varea->start);
  128. }
  129. }
  130. else
  131. {
  132. rt_page_t page = rt_page_addr2page(vaddr);
  133. page->ref_cnt += 1;
  134. rt_varea_pgmgr_insert(varea, vaddr);
  135. msg->response.status = MM_FAULT_STATUS_OK;
  136. msg->response.vaddr = vaddr;
  137. msg->response.size = ARCH_PAGE_SIZE;
  138. }
  139. }
  140. else if (!(varea->flag & MMF_TEXT))
  141. {
  142. /* if data segment not exist in source do a fallback */
  143. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  144. }
  145. }
  146. else if (VAREA_CAN_AUTO_FETCH(varea))
  147. {
  148. /* if (!lwp_objs->source), no aspace as source data */
  149. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  150. }
  151. }
  152. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
  153. {
  154. if (lwp_objs)
  155. {
  156. /**
  157. * @brief one lwp_obj represent an base layout of page based memory in user space
  158. * This is useful on duplication. Where we only have a (lwp_objs and offset) to
  159. * provide identical memory. This is implemented by lwp_objs->source.
  160. */
  161. lwp_objs->source = NULL;
  162. lwp_objs->mem_obj.get_name = user_get_name;
  163. lwp_objs->mem_obj.hint_free = NULL;
  164. lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
  165. lwp_objs->mem_obj.on_page_offload = rt_mm_dummy_mapper.on_page_offload;
  166. lwp_objs->mem_obj.on_varea_open = rt_mm_dummy_mapper.on_varea_open;
  167. lwp_objs->mem_obj.on_varea_close = rt_mm_dummy_mapper.on_varea_close;
  168. }
  169. }
  170. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
  171. int text)
  172. {
  173. void *va = map_va;
  174. int ret = 0;
  175. size_t flags = MMF_PREFETCH;
  176. if (text)
  177. flags |= MMF_TEXT;
  178. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  179. ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
  180. mem_obj, 0);
  181. if (ret != RT_EOK)
  182. {
  183. va = RT_NULL;
  184. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  185. map_size, ret);
  186. }
  187. return va;
  188. }
  189. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  190. {
  191. int err = rt_aspace_unmap(lwp->aspace, va);
  192. return err;
  193. }
  194. static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp,
  195. rt_aspace_t dst)
  196. {
  197. char *vaddr = varea->start;
  198. char *vend = vaddr + varea->size;
  199. if (vaddr < (char *)USER_STACK_VSTART || vaddr >= (char *)USER_STACK_VEND)
  200. {
  201. while (vaddr != vend)
  202. {
  203. void *paddr;
  204. paddr = lwp_v2p(src_lwp, vaddr);
  205. if (paddr != ARCH_MAP_FAILED)
  206. {
  207. rt_aspace_load_page(dst, vaddr, 1);
  208. }
  209. vaddr += ARCH_PAGE_SIZE;
  210. }
  211. }
  212. else
  213. {
  214. while (vaddr != vend)
  215. {
  216. vend -= ARCH_PAGE_SIZE;
  217. void *paddr;
  218. paddr = lwp_v2p(src_lwp, vend);
  219. if (paddr != ARCH_MAP_FAILED)
  220. {
  221. rt_aspace_load_page(dst, vend, 1);
  222. }
  223. else
  224. {
  225. break;
  226. }
  227. }
  228. }
  229. }
  230. int lwp_dup_user(rt_varea_t varea, void *arg)
  231. {
  232. int err;
  233. struct rt_lwp *self_lwp = lwp_self();
  234. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  235. void *pa = RT_NULL;
  236. void *va = RT_NULL;
  237. rt_mem_obj_t mem_obj = varea->mem_obj;
  238. if (!mem_obj)
  239. {
  240. /* duplicate a physical mapping */
  241. pa = lwp_v2p(self_lwp, (void *)varea->start);
  242. RT_ASSERT(pa != ARCH_MAP_FAILED);
  243. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  244. .limit_range_size = new_lwp->aspace->size,
  245. .limit_start = new_lwp->aspace->start,
  246. .prefer = varea->start,
  247. .map_size = varea->size};
  248. err = rt_aspace_map_phy(new_lwp->aspace, &hint, varea->attr,
  249. MM_PA_TO_OFF(pa), &va);
  250. if (err != RT_EOK)
  251. {
  252. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  253. varea->start, varea->size);
  254. }
  255. }
  256. else
  257. {
  258. /* duplicate a mem_obj backing mapping */
  259. va = varea->start;
  260. err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr,
  261. varea->flag, &new_lwp->lwp_obj->mem_obj,
  262. varea->offset);
  263. if (err != RT_EOK)
  264. {
  265. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  266. varea->start, varea->size);
  267. }
  268. else
  269. {
  270. /* loading page frames for !MMF_PREFETCH varea */
  271. if (!(varea->flag & MMF_PREFETCH))
  272. {
  273. _dup_varea(varea, self_lwp, new_lwp->aspace);
  274. }
  275. }
  276. }
  277. if (va != (void *)varea->start)
  278. {
  279. return -1;
  280. }
  281. return 0;
  282. }
  283. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  284. {
  285. return lwp_unmap_user(lwp, va);
  286. }
  287. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  288. {
  289. void *ret = RT_NULL;
  290. size_t offset = 0;
  291. if (!map_size)
  292. {
  293. return 0;
  294. }
  295. offset = (size_t)map_va & ARCH_PAGE_MASK;
  296. map_size += (offset + ARCH_PAGE_SIZE - 1);
  297. map_size &= ~ARCH_PAGE_MASK;
  298. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  299. ret = _lwp_map_user(lwp, map_va, map_size, text);
  300. if (ret)
  301. {
  302. ret = (void *)((char *)ret + offset);
  303. }
  304. return ret;
  305. }
  306. static inline size_t _flags_to_attr(size_t flags)
  307. {
  308. size_t attr;
  309. if (flags & LWP_MAP_FLAG_NOCACHE)
  310. {
  311. attr = MMU_MAP_U_RW;
  312. }
  313. else
  314. {
  315. attr = MMU_MAP_U_RWCB;
  316. }
  317. return attr;
  318. }
  319. static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
  320. {
  321. mm_flag_t mm_flag = 0;
  322. return mm_flag;
  323. }
  324. static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  325. {
  326. void *va = map_va;
  327. int ret = 0;
  328. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  329. rt_varea_t varea;
  330. mm_flag_t mm_flags;
  331. size_t attr;
  332. varea = rt_malloc(sizeof(*varea));
  333. if (varea)
  334. {
  335. attr = _flags_to_attr(flags);
  336. mm_flags = _flags_to_aspace_flag(flags);
  337. ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
  338. attr, mm_flags, mem_obj, 0);
  339. /* let aspace handle the free of varea */
  340. varea->flag &= ~MMF_STATIC_ALLOC;
  341. /* don't apply auto fetch on this */
  342. varea->data = (void *)NO_AUTO_FETCH;
  343. }
  344. else
  345. {
  346. ret = -RT_ENOMEM;
  347. }
  348. if (ret != RT_EOK)
  349. {
  350. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  351. map_size, ret);
  352. }
  353. return varea;
  354. }
  355. static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  356. {
  357. rt_varea_t varea = RT_NULL;
  358. size_t offset = 0;
  359. if (!map_size)
  360. {
  361. return 0;
  362. }
  363. offset = (size_t)map_va & ARCH_PAGE_MASK;
  364. map_size += (offset + ARCH_PAGE_SIZE - 1);
  365. map_size &= ~ARCH_PAGE_MASK;
  366. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  367. varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
  368. return varea;
  369. }
  370. rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  371. {
  372. return _map_user_varea_ext(lwp, map_va, map_size, flags);
  373. }
  374. rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
  375. {
  376. return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
  377. }
  378. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
  379. size_t map_size, int cached)
  380. {
  381. int err;
  382. char *va;
  383. size_t offset = 0;
  384. if (!map_size)
  385. {
  386. return 0;
  387. }
  388. if (map_va)
  389. {
  390. if (((size_t)map_va & ARCH_PAGE_MASK) !=
  391. ((size_t)map_pa & ARCH_PAGE_MASK))
  392. {
  393. return 0;
  394. }
  395. }
  396. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  397. map_size += (offset + ARCH_PAGE_SIZE - 1);
  398. map_size &= ~ARCH_PAGE_MASK;
  399. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  400. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  401. .limit_range_size = lwp->aspace->size,
  402. .limit_start = lwp->aspace->start,
  403. .prefer = map_va,
  404. .map_size = map_size};
  405. rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
  406. err =
  407. rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), (void **)&va);
  408. if (err != RT_EOK)
  409. {
  410. va = RT_NULL;
  411. LOG_W("%s", __func__);
  412. }
  413. else
  414. {
  415. va += offset;
  416. }
  417. return va;
  418. }
  419. rt_base_t lwp_brk(void *addr)
  420. {
  421. rt_base_t ret = -1;
  422. struct rt_lwp *lwp = RT_NULL;
  423. rt_mm_lock();
  424. lwp = rt_thread_self()->lwp;
  425. if ((size_t)addr <= lwp->end_heap)
  426. {
  427. ret = (rt_base_t)lwp->end_heap;
  428. }
  429. else
  430. {
  431. size_t size = 0;
  432. void *va = RT_NULL;
  433. if ((size_t)addr <= USER_HEAP_VEND)
  434. {
  435. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) &
  436. ~ARCH_PAGE_MASK;
  437. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  438. }
  439. if (va)
  440. {
  441. lwp->end_heap += size;
  442. ret = lwp->end_heap;
  443. }
  444. }
  445. rt_mm_unlock();
  446. return ret;
  447. }
  448. #define MAP_ANONYMOUS 0x20
  449. void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
  450. off_t pgoffset)
  451. {
  452. void *ret = (void *)-1;
  453. if (fd == -1)
  454. {
  455. ret = lwp_map_user(lwp_self(), addr, length, 0);
  456. if (ret)
  457. {
  458. if ((flags & MAP_ANONYMOUS) != 0)
  459. {
  460. rt_memset(ret, 0, length);
  461. }
  462. }
  463. else
  464. {
  465. ret = (void *)-1;
  466. }
  467. }
  468. else
  469. {
  470. struct dfs_file *d;
  471. d = fd_get(fd);
  472. if (d && d->vnode->type == FT_DEVICE)
  473. {
  474. struct dfs_mmap2_args mmap2;
  475. mmap2.addr = addr;
  476. mmap2.length = length;
  477. mmap2.prot = prot;
  478. mmap2.flags = flags;
  479. mmap2.pgoffset = pgoffset;
  480. mmap2.ret = (void *)-1;
  481. if (dfs_file_mmap2(d, &mmap2) == 0)
  482. {
  483. ret = mmap2.ret;
  484. }
  485. }
  486. }
  487. return ret;
  488. }
  489. int lwp_munmap(void *addr)
  490. {
  491. int ret = 0;
  492. rt_mm_lock();
  493. ret = lwp_unmap_user(lwp_self(), addr);
  494. rt_mm_unlock();
  495. return ret;
  496. }
  497. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  498. {
  499. struct rt_lwp *lwp = RT_NULL;
  500. /* check src */
  501. if (src < (void *)USER_VADDR_START)
  502. {
  503. return 0;
  504. }
  505. if (src >= (void *)USER_VADDR_TOP)
  506. {
  507. return 0;
  508. }
  509. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  510. {
  511. return 0;
  512. }
  513. lwp = lwp_self();
  514. if (!lwp)
  515. {
  516. return 0;
  517. }
  518. return lwp_data_get(lwp, dst, src, size);
  519. }
  520. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  521. {
  522. struct rt_lwp *lwp = RT_NULL;
  523. /* check dst */
  524. if (dst < (void *)USER_VADDR_START)
  525. {
  526. return 0;
  527. }
  528. if (dst >= (void *)USER_VADDR_TOP)
  529. {
  530. return 0;
  531. }
  532. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  533. {
  534. return 0;
  535. }
  536. lwp = lwp_self();
  537. if (!lwp)
  538. {
  539. return 0;
  540. }
  541. return lwp_data_put(lwp, dst, src, size);
  542. }
  543. int lwp_user_accessable(void *addr, size_t size)
  544. {
  545. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  546. void *tmp_addr = RT_NULL;
  547. struct rt_lwp *lwp = lwp_self();
  548. if (!lwp)
  549. {
  550. return 0;
  551. }
  552. if (!size || !addr)
  553. {
  554. return 0;
  555. }
  556. addr_start = addr;
  557. addr_end = (void *)((char *)addr + size);
  558. #ifdef ARCH_RISCV64
  559. if (addr_start < (void *)USER_VADDR_START)
  560. {
  561. return 0;
  562. }
  563. #else
  564. if (addr_start >= (void *)USER_VADDR_TOP)
  565. {
  566. return 0;
  567. }
  568. if (addr_end > (void *)USER_VADDR_TOP)
  569. {
  570. return 0;
  571. }
  572. #endif
  573. next_page =
  574. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  575. do
  576. {
  577. size_t len = (char *)next_page - (char *)addr_start;
  578. if (size < len)
  579. {
  580. len = size;
  581. }
  582. tmp_addr = lwp_v2p(lwp, addr_start);
  583. if (tmp_addr == ARCH_MAP_FAILED)
  584. {
  585. if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
  586. tmp_addr = *(void **)addr_start;
  587. else
  588. return 0;
  589. }
  590. addr_start = (void *)((char *)addr_start + len);
  591. size -= len;
  592. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  593. } while (addr_start < addr_end);
  594. return 1;
  595. }
  596. /* src is in mmu_info space, dst is in current thread space */
  597. size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  598. {
  599. size_t copy_len = 0;
  600. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  601. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  602. if (!size || !dst)
  603. {
  604. return 0;
  605. }
  606. tmp_dst = dst;
  607. addr_start = src;
  608. addr_end = (void *)((char *)src + size);
  609. next_page =
  610. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  611. do
  612. {
  613. size_t len = (char *)next_page - (char *)addr_start;
  614. if (size < len)
  615. {
  616. len = size;
  617. }
  618. tmp_src = lwp_v2p(lwp, addr_start);
  619. if (tmp_src == ARCH_MAP_FAILED)
  620. {
  621. break;
  622. }
  623. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  624. rt_memcpy(tmp_dst, tmp_src, len);
  625. tmp_dst = (void *)((char *)tmp_dst + len);
  626. addr_start = (void *)((char *)addr_start + len);
  627. size -= len;
  628. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  629. copy_len += len;
  630. } while (addr_start < addr_end);
  631. return copy_len;
  632. }
  633. /* dst is in kernel space, src is in current thread space */
  634. size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  635. {
  636. size_t copy_len = 0;
  637. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  638. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  639. if (!size || !dst)
  640. {
  641. return 0;
  642. }
  643. tmp_src = src;
  644. addr_start = dst;
  645. addr_end = (void *)((char *)dst + size);
  646. next_page =
  647. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  648. do
  649. {
  650. size_t len = (char *)next_page - (char *)addr_start;
  651. if (size < len)
  652. {
  653. len = size;
  654. }
  655. tmp_dst = lwp_v2p(lwp, addr_start);
  656. if (tmp_dst == ARCH_MAP_FAILED)
  657. {
  658. break;
  659. }
  660. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  661. rt_memcpy(tmp_dst, tmp_src, len);
  662. tmp_src = (void *)((char *)tmp_src + len);
  663. addr_start = (void *)((char *)addr_start + len);
  664. size -= len;
  665. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  666. copy_len += len;
  667. } while (addr_start < addr_end);
  668. return copy_len;
  669. }
  670. #endif