lwp_user_mm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. */
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #include <string.h>
  17. #ifdef ARCH_MM_MMU
  18. #include <lwp.h>
  19. #include <lwp_arch.h>
  20. #include <lwp_mm.h>
  21. #include <lwp_user_mm.h>
  22. #include <mm_aspace.h>
  23. #include <mm_fault.h>
  24. #include <mm_flag.h>
  25. #include <mm_page.h>
  26. #include <mmu.h>
  27. #include <page.h>
  28. #define DBG_TAG "LwP"
  29. #define DBG_LVL DBG_LOG
  30. #include <rtdbg.h>
  31. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
  32. int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
  33. {
  34. int err = -RT_ENOMEM;
  35. lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
  36. _init_lwp_objs(lwp->lwp_obj, lwp->aspace);
  37. if (lwp->lwp_obj)
  38. {
  39. err = arch_user_space_init(lwp);
  40. if (!is_fork && err == RT_EOK)
  41. {
  42. void *addr = (void *)USER_STACK_VSTART;
  43. err = rt_aspace_map(lwp->aspace, &addr,
  44. USER_STACK_VEND - USER_STACK_VSTART,
  45. MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0);
  46. }
  47. }
  48. return err;
  49. }
  50. void lwp_aspace_switch(struct rt_thread *thread)
  51. {
  52. struct rt_lwp *lwp = RT_NULL;
  53. rt_aspace_t aspace;
  54. void *from_tbl;
  55. if (thread->lwp)
  56. {
  57. lwp = (struct rt_lwp *)thread->lwp;
  58. aspace = lwp->aspace;
  59. }
  60. else
  61. aspace = &rt_kernel_space;
  62. from_tbl = rt_hw_mmu_tbl_get();
  63. if (aspace->page_table != from_tbl)
  64. {
  65. rt_hw_aspace_switch(aspace);
  66. }
  67. }
  68. void lwp_unmap_user_space(struct rt_lwp *lwp)
  69. {
  70. arch_user_space_free(lwp);
  71. rt_free(lwp->lwp_obj);
  72. }
  73. static const char *user_get_name(rt_varea_t varea)
  74. {
  75. char *name;
  76. if (varea->flag & MMF_TEXT)
  77. {
  78. name = "user.text";
  79. }
  80. else
  81. {
  82. if (varea->start == (void *)USER_STACK_VSTART)
  83. {
  84. name = "user.stack";
  85. }
  86. else if (varea->start >= (void *)USER_HEAP_VADDR &&
  87. varea->start < (void *)USER_HEAP_VEND)
  88. {
  89. name = "user.heap";
  90. }
  91. else
  92. {
  93. name = "user.data";
  94. }
  95. }
  96. return name;
  97. }
  98. #define NO_AUTO_FETCH 0x1
  99. #define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
  100. static void _user_do_page_fault(struct rt_varea *varea,
  101. struct rt_aspace_fault_msg *msg)
  102. {
  103. struct rt_lwp_objs *lwp_objs;
  104. lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
  105. if (lwp_objs->source)
  106. {
  107. void *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
  108. if (paddr != ARCH_MAP_FAILED)
  109. {
  110. void *vaddr;
  111. vaddr = paddr - PV_OFFSET;
  112. if (!(varea->flag & MMF_TEXT))
  113. {
  114. void *cp = rt_pages_alloc(0);
  115. if (cp)
  116. {
  117. memcpy(cp, vaddr, ARCH_PAGE_SIZE);
  118. rt_varea_pgmgr_insert(varea, cp);
  119. msg->response.status = MM_FAULT_STATUS_OK;
  120. msg->response.vaddr = cp;
  121. msg->response.size = ARCH_PAGE_SIZE;
  122. }
  123. else
  124. {
  125. LOG_W("%s: page alloc failed at %p", __func__,
  126. varea->start);
  127. }
  128. }
  129. else
  130. {
  131. rt_page_t page = rt_page_addr2page(vaddr);
  132. page->ref_cnt += 1;
  133. rt_varea_pgmgr_insert(varea, vaddr);
  134. msg->response.status = MM_FAULT_STATUS_OK;
  135. msg->response.vaddr = vaddr;
  136. msg->response.size = ARCH_PAGE_SIZE;
  137. }
  138. }
  139. else if (!(varea->flag & MMF_TEXT))
  140. {
  141. /* if data segment not exist in source do a fallback */
  142. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  143. }
  144. }
  145. else if (VAREA_CAN_AUTO_FETCH(varea))
  146. {
  147. /* if (!lwp_objs->source), no aspace as source data */
  148. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  149. }
  150. }
  151. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
  152. {
  153. /**
  154. * @brief one lwp_obj represent an base layout of page based memory in user space
  155. * This is useful on duplication. Where we only have a (lwp_objs and offset) to
  156. * provide identical memory. This is implemented by lwp_objs->source.
  157. */
  158. lwp_objs->source = NULL;
  159. lwp_objs->mem_obj.get_name = user_get_name;
  160. lwp_objs->mem_obj.hint_free = NULL;
  161. lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
  162. lwp_objs->mem_obj.on_page_offload = rt_mm_dummy_mapper.on_page_offload;
  163. lwp_objs->mem_obj.on_varea_open = rt_mm_dummy_mapper.on_varea_open;
  164. lwp_objs->mem_obj.on_varea_close = rt_mm_dummy_mapper.on_varea_close;
  165. }
  166. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
  167. int text)
  168. {
  169. void *va = map_va;
  170. int ret = 0;
  171. size_t flags = MMF_PREFETCH;
  172. if (text)
  173. flags |= MMF_TEXT;
  174. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  175. ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
  176. mem_obj, 0);
  177. if (ret != RT_EOK)
  178. {
  179. va = RT_NULL;
  180. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  181. map_size, ret);
  182. }
  183. return va;
  184. }
  185. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  186. {
  187. int err;
  188. err = rt_aspace_unmap(lwp->aspace, va);
  189. return err;
  190. }
  191. static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp,
  192. rt_aspace_t dst)
  193. {
  194. void *vaddr = varea->start;
  195. void *vend = vaddr + varea->size;
  196. if (vaddr < (void *)USER_STACK_VSTART || vaddr >= (void *)USER_STACK_VEND)
  197. {
  198. while (vaddr != vend)
  199. {
  200. void *paddr;
  201. paddr = lwp_v2p(src_lwp, vaddr);
  202. if (paddr != ARCH_MAP_FAILED)
  203. {
  204. rt_aspace_load_page(dst, vaddr, 1);
  205. }
  206. vaddr += ARCH_PAGE_SIZE;
  207. }
  208. }
  209. else
  210. {
  211. while (vaddr != vend)
  212. {
  213. vend -= ARCH_PAGE_SIZE;
  214. void *paddr;
  215. paddr = lwp_v2p(src_lwp, vend);
  216. if (paddr != ARCH_MAP_FAILED)
  217. {
  218. rt_aspace_load_page(dst, vend, 1);
  219. }
  220. else
  221. {
  222. break;
  223. }
  224. }
  225. }
  226. }
  227. int lwp_dup_user(rt_varea_t varea, void *arg)
  228. {
  229. int err;
  230. struct rt_lwp *self_lwp = lwp_self();
  231. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  232. void *pa = RT_NULL;
  233. void *va = RT_NULL;
  234. rt_mem_obj_t mem_obj = varea->mem_obj;
  235. if (!mem_obj)
  236. {
  237. /* duplicate a physical mapping */
  238. pa = lwp_v2p(self_lwp, (void *)varea->start);
  239. RT_ASSERT(pa != ARCH_MAP_FAILED);
  240. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  241. .limit_range_size = new_lwp->aspace->size,
  242. .limit_start = new_lwp->aspace->start,
  243. .prefer = varea->start,
  244. .map_size = varea->size};
  245. err = rt_aspace_map_phy(new_lwp->aspace, &hint, varea->attr,
  246. MM_PA_TO_OFF(pa), &va);
  247. if (err != RT_EOK)
  248. {
  249. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  250. varea->start, varea->size);
  251. }
  252. }
  253. else
  254. {
  255. /* duplicate a mem_obj backing mapping */
  256. va = varea->start;
  257. err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr,
  258. varea->flag, &new_lwp->lwp_obj->mem_obj,
  259. varea->offset);
  260. if (err != RT_EOK)
  261. {
  262. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  263. varea->start, varea->size);
  264. }
  265. else
  266. {
  267. /* loading page frames for !MMF_PREFETCH varea */
  268. if (!(varea->flag & MMF_PREFETCH))
  269. {
  270. _dup_varea(varea, self_lwp, new_lwp->aspace);
  271. }
  272. }
  273. }
  274. if (va != (void *)varea->start)
  275. {
  276. return -1;
  277. }
  278. return 0;
  279. }
  280. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  281. {
  282. return lwp_unmap_user(lwp, va);
  283. }
  284. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  285. {
  286. void *ret = RT_NULL;
  287. size_t offset = 0;
  288. if (!map_size)
  289. {
  290. return 0;
  291. }
  292. offset = (size_t)map_va & ARCH_PAGE_MASK;
  293. map_size += (offset + ARCH_PAGE_SIZE - 1);
  294. map_size &= ~ARCH_PAGE_MASK;
  295. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  296. ret = _lwp_map_user(lwp, map_va, map_size, text);
  297. if (ret)
  298. {
  299. ret = (void *)((char *)ret + offset);
  300. }
  301. return ret;
  302. }
  303. static inline size_t _flags_to_attr(size_t flags)
  304. {
  305. size_t attr;
  306. if (flags & LWP_MAP_FLAG_NOCACHE)
  307. {
  308. attr = MMU_MAP_U_RW;
  309. }
  310. else
  311. {
  312. attr = MMU_MAP_U_RWCB;
  313. }
  314. return attr;
  315. }
  316. static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
  317. {
  318. mm_flag_t mm_flag = 0;
  319. return mm_flag;
  320. }
  321. static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  322. {
  323. void *va = map_va;
  324. int ret = 0;
  325. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  326. rt_varea_t varea;
  327. mm_flag_t mm_flags;
  328. size_t attr;
  329. varea = rt_malloc(sizeof(*varea));
  330. if (varea)
  331. {
  332. attr = _flags_to_attr(flags);
  333. mm_flags = _flags_to_aspace_flag(flags);
  334. ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
  335. attr, mm_flags, mem_obj, 0);
  336. /* let aspace handle the free of varea */
  337. varea->flag &= ~MMF_STATIC_ALLOC;
  338. /* don't apply auto fetch on this */
  339. varea->data = (void *)NO_AUTO_FETCH;
  340. }
  341. else
  342. {
  343. ret = -RT_ENOMEM;
  344. }
  345. if (ret != RT_EOK)
  346. {
  347. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  348. map_size, ret);
  349. }
  350. return varea;
  351. }
  352. static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  353. {
  354. rt_varea_t varea = RT_NULL;
  355. size_t offset = 0;
  356. if (!map_size)
  357. {
  358. return 0;
  359. }
  360. offset = (size_t)map_va & ARCH_PAGE_MASK;
  361. map_size += (offset + ARCH_PAGE_SIZE - 1);
  362. map_size &= ~ARCH_PAGE_MASK;
  363. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  364. varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
  365. return varea;
  366. }
  367. rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  368. {
  369. return _map_user_varea_ext(lwp, map_va, map_size, flags);
  370. }
  371. rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
  372. {
  373. return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
  374. }
  375. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
  376. size_t map_size, int cached)
  377. {
  378. int err;
  379. void *va;
  380. size_t offset = 0;
  381. if (!map_size)
  382. {
  383. return 0;
  384. }
  385. if (map_va)
  386. {
  387. if (((size_t)map_va & ARCH_PAGE_MASK) !=
  388. ((size_t)map_pa & ARCH_PAGE_MASK))
  389. {
  390. return 0;
  391. }
  392. }
  393. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  394. map_size += (offset + ARCH_PAGE_SIZE - 1);
  395. map_size &= ~ARCH_PAGE_MASK;
  396. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  397. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  398. .limit_range_size = lwp->aspace->size,
  399. .limit_start = lwp->aspace->start,
  400. .prefer = map_va,
  401. .map_size = map_size};
  402. rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
  403. err =
  404. rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), &va);
  405. if (err != RT_EOK)
  406. {
  407. va = RT_NULL;
  408. LOG_W("%s", __func__);
  409. }
  410. else
  411. {
  412. va += offset;
  413. }
  414. return va;
  415. }
  416. rt_base_t lwp_brk(void *addr)
  417. {
  418. rt_base_t ret = -1;
  419. struct rt_lwp *lwp = RT_NULL;
  420. rt_mm_lock();
  421. lwp = rt_thread_self()->lwp;
  422. if ((size_t)addr <= lwp->end_heap)
  423. {
  424. ret = (rt_base_t)lwp->end_heap;
  425. }
  426. else
  427. {
  428. size_t size = 0;
  429. void *va = RT_NULL;
  430. if ((size_t)addr <= USER_HEAP_VEND)
  431. {
  432. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) &
  433. ~ARCH_PAGE_MASK;
  434. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  435. }
  436. if (va)
  437. {
  438. lwp->end_heap += size;
  439. ret = lwp->end_heap;
  440. }
  441. }
  442. rt_mm_unlock();
  443. return ret;
  444. }
  445. #define MAP_ANONYMOUS 0x20
  446. void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
  447. off_t pgoffset)
  448. {
  449. void *ret = (void *)-1;
  450. if (fd == -1)
  451. {
  452. ret = lwp_map_user(lwp_self(), addr, length, 0);
  453. if (ret)
  454. {
  455. if ((flags & MAP_ANONYMOUS) != 0)
  456. {
  457. rt_memset(ret, 0, length);
  458. }
  459. }
  460. else
  461. {
  462. ret = (void *)-1;
  463. }
  464. }
  465. else
  466. {
  467. struct dfs_fd *d;
  468. d = fd_get(fd);
  469. if (d && d->vnode->type == FT_DEVICE)
  470. {
  471. struct dfs_mmap2_args mmap2;
  472. mmap2.addr = addr;
  473. mmap2.length = length;
  474. mmap2.prot = prot;
  475. mmap2.flags = flags;
  476. mmap2.pgoffset = pgoffset;
  477. mmap2.ret = (void *)-1;
  478. if (dfs_file_mmap2(d, &mmap2) == 0)
  479. {
  480. ret = mmap2.ret;
  481. }
  482. }
  483. }
  484. return ret;
  485. }
  486. int lwp_munmap(void *addr)
  487. {
  488. int ret = 0;
  489. rt_mm_lock();
  490. ret = lwp_unmap_user(lwp_self(), addr);
  491. rt_mm_unlock();
  492. return ret;
  493. }
  494. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  495. {
  496. struct rt_lwp *lwp = RT_NULL;
  497. /* check src */
  498. if (src < (void *)USER_VADDR_START)
  499. {
  500. return 0;
  501. }
  502. if (src >= (void *)USER_VADDR_TOP)
  503. {
  504. return 0;
  505. }
  506. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  507. {
  508. return 0;
  509. }
  510. lwp = lwp_self();
  511. if (!lwp)
  512. {
  513. return 0;
  514. }
  515. return lwp_data_get(lwp, dst, src, size);
  516. }
  517. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  518. {
  519. struct rt_lwp *lwp = RT_NULL;
  520. /* check dst */
  521. if (dst < (void *)USER_VADDR_START)
  522. {
  523. return 0;
  524. }
  525. if (dst >= (void *)USER_VADDR_TOP)
  526. {
  527. return 0;
  528. }
  529. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  530. {
  531. return 0;
  532. }
  533. lwp = lwp_self();
  534. if (!lwp)
  535. {
  536. return 0;
  537. }
  538. return lwp_data_put(lwp, dst, src, size);
  539. }
  540. int lwp_user_accessable(void *addr, size_t size)
  541. {
  542. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  543. void *tmp_addr = RT_NULL;
  544. struct rt_lwp *lwp = lwp_self();
  545. if (!lwp)
  546. {
  547. return 0;
  548. }
  549. if (!size || !addr)
  550. {
  551. return 0;
  552. }
  553. addr_start = addr;
  554. addr_end = (void *)((char *)addr + size);
  555. #ifdef ARCH_RISCV64
  556. if (addr_start < (void *)USER_VADDR_START)
  557. {
  558. return 0;
  559. }
  560. #else
  561. if (addr_start >= (void *)USER_VADDR_TOP)
  562. {
  563. return 0;
  564. }
  565. if (addr_end > (void *)USER_VADDR_TOP)
  566. {
  567. return 0;
  568. }
  569. #endif
  570. next_page =
  571. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  572. do
  573. {
  574. size_t len = (char *)next_page - (char *)addr_start;
  575. if (size < len)
  576. {
  577. len = size;
  578. }
  579. tmp_addr = lwp_v2p(lwp, addr_start);
  580. if (tmp_addr == ARCH_MAP_FAILED)
  581. {
  582. if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
  583. tmp_addr = *(void **)addr_start;
  584. else
  585. return 0;
  586. }
  587. addr_start = (void *)((char *)addr_start + len);
  588. size -= len;
  589. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  590. } while (addr_start < addr_end);
  591. return 1;
  592. }
  593. /* src is in mmu_info space, dst is in current thread space */
  594. size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  595. {
  596. size_t copy_len = 0;
  597. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  598. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  599. if (!size || !dst)
  600. {
  601. return 0;
  602. }
  603. tmp_dst = dst;
  604. addr_start = src;
  605. addr_end = (void *)((char *)src + size);
  606. next_page =
  607. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  608. do
  609. {
  610. size_t len = (char *)next_page - (char *)addr_start;
  611. if (size < len)
  612. {
  613. len = size;
  614. }
  615. tmp_src = lwp_v2p(lwp, addr_start);
  616. if (tmp_src == ARCH_MAP_FAILED)
  617. {
  618. break;
  619. }
  620. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  621. rt_memcpy(tmp_dst, tmp_src, len);
  622. tmp_dst = (void *)((char *)tmp_dst + len);
  623. addr_start = (void *)((char *)addr_start + len);
  624. size -= len;
  625. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  626. copy_len += len;
  627. } while (addr_start < addr_end);
  628. return copy_len;
  629. }
  630. /* dst is in kernel space, src is in current thread space */
  631. size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  632. {
  633. size_t copy_len = 0;
  634. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  635. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  636. if (!size || !dst)
  637. {
  638. return 0;
  639. }
  640. tmp_src = src;
  641. addr_start = dst;
  642. addr_end = (void *)((char *)dst + size);
  643. next_page =
  644. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  645. do
  646. {
  647. size_t len = (char *)next_page - (char *)addr_start;
  648. if (size < len)
  649. {
  650. len = size;
  651. }
  652. tmp_dst = lwp_v2p(lwp, addr_start);
  653. if (tmp_dst == ARCH_MAP_FAILED)
  654. {
  655. break;
  656. }
  657. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  658. rt_memcpy(tmp_dst, tmp_src, len);
  659. tmp_src = (void *)((char *)tmp_src + len);
  660. addr_start = (void *)((char *)addr_start + len);
  661. size -= len;
  662. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  663. copy_len += len;
  664. } while (addr_start < addr_end);
  665. return copy_len;
  666. }
  667. #endif