lwp_user_mm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. */
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #include <string.h>
  17. #ifdef ARCH_MM_MMU
  18. #include <lwp.h>
  19. #include <lwp_arch.h>
  20. #include <lwp_mm.h>
  21. #include <lwp_user_mm.h>
  22. #include <mm_aspace.h>
  23. #include <mm_fault.h>
  24. #include <mm_flag.h>
  25. #include <mm_page.h>
  26. #include <mmu.h>
  27. #include <page.h>
  28. #define DBG_TAG "LwP"
  29. #define DBG_LVL DBG_LOG
  30. #include <rtdbg.h>
  31. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace);
  32. int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
  33. {
  34. int err = -RT_ENOMEM;
  35. lwp->lwp_obj = rt_malloc(sizeof(struct rt_lwp_objs));
  36. if (lwp->lwp_obj)
  37. {
  38. _init_lwp_objs(lwp->lwp_obj, lwp->aspace);
  39. err = arch_user_space_init(lwp);
  40. if (!is_fork && err == RT_EOK)
  41. {
  42. void *addr = (void *)USER_STACK_VSTART;
  43. err = rt_aspace_map(lwp->aspace, &addr,
  44. USER_STACK_VEND - USER_STACK_VSTART,
  45. MMU_MAP_U_RWCB, 0, &lwp->lwp_obj->mem_obj, 0);
  46. }
  47. }
  48. return err;
  49. }
  50. void lwp_aspace_switch(struct rt_thread *thread)
  51. {
  52. struct rt_lwp *lwp = RT_NULL;
  53. rt_aspace_t aspace;
  54. void *from_tbl;
  55. if (thread->lwp)
  56. {
  57. lwp = (struct rt_lwp *)thread->lwp;
  58. aspace = lwp->aspace;
  59. }
  60. else
  61. aspace = &rt_kernel_space;
  62. from_tbl = rt_hw_mmu_tbl_get();
  63. if (aspace->page_table != from_tbl)
  64. {
  65. rt_hw_aspace_switch(aspace);
  66. }
  67. }
  68. void lwp_unmap_user_space(struct rt_lwp *lwp)
  69. {
  70. arch_user_space_free(lwp);
  71. rt_free(lwp->lwp_obj);
  72. }
  73. static const char *user_get_name(rt_varea_t varea)
  74. {
  75. char *name;
  76. if (varea->flag & MMF_TEXT)
  77. {
  78. name = "user.text";
  79. }
  80. else
  81. {
  82. if (varea->start == (void *)USER_STACK_VSTART)
  83. {
  84. name = "user.stack";
  85. }
  86. else if (varea->start >= (void *)USER_HEAP_VADDR &&
  87. varea->start < (void *)USER_HEAP_VEND)
  88. {
  89. name = "user.heap";
  90. }
  91. else
  92. {
  93. name = "user.data";
  94. }
  95. }
  96. return name;
  97. }
  98. #define NO_AUTO_FETCH 0x1
  99. #define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
  100. static void _user_do_page_fault(struct rt_varea *varea,
  101. struct rt_aspace_fault_msg *msg)
  102. {
  103. struct rt_lwp_objs *lwp_objs;
  104. lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
  105. if (lwp_objs->source)
  106. {
  107. void *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
  108. if (paddr != ARCH_MAP_FAILED)
  109. {
  110. void *vaddr;
  111. vaddr = paddr - PV_OFFSET;
  112. if (!(varea->flag & MMF_TEXT))
  113. {
  114. void *cp = rt_pages_alloc(0);
  115. if (cp)
  116. {
  117. memcpy(cp, vaddr, ARCH_PAGE_SIZE);
  118. rt_varea_pgmgr_insert(varea, cp);
  119. msg->response.status = MM_FAULT_STATUS_OK;
  120. msg->response.vaddr = cp;
  121. msg->response.size = ARCH_PAGE_SIZE;
  122. }
  123. else
  124. {
  125. LOG_W("%s: page alloc failed at %p", __func__,
  126. varea->start);
  127. }
  128. }
  129. else
  130. {
  131. rt_page_t page = rt_page_addr2page(vaddr);
  132. page->ref_cnt += 1;
  133. rt_varea_pgmgr_insert(varea, vaddr);
  134. msg->response.status = MM_FAULT_STATUS_OK;
  135. msg->response.vaddr = vaddr;
  136. msg->response.size = ARCH_PAGE_SIZE;
  137. }
  138. }
  139. else if (!(varea->flag & MMF_TEXT))
  140. {
  141. /* if data segment not exist in source do a fallback */
  142. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  143. }
  144. }
  145. else if (VAREA_CAN_AUTO_FETCH(varea))
  146. {
  147. /* if (!lwp_objs->source), no aspace as source data */
  148. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  149. }
  150. }
  151. static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
  152. {
  153. if (lwp_objs)
  154. {
  155. /**
  156. * @brief one lwp_obj represent an base layout of page based memory in user space
  157. * This is useful on duplication. Where we only have a (lwp_objs and offset) to
  158. * provide identical memory. This is implemented by lwp_objs->source.
  159. */
  160. lwp_objs->source = NULL;
  161. lwp_objs->mem_obj.get_name = user_get_name;
  162. lwp_objs->mem_obj.hint_free = NULL;
  163. lwp_objs->mem_obj.on_page_fault = _user_do_page_fault;
  164. lwp_objs->mem_obj.on_page_offload = rt_mm_dummy_mapper.on_page_offload;
  165. lwp_objs->mem_obj.on_varea_open = rt_mm_dummy_mapper.on_varea_open;
  166. lwp_objs->mem_obj.on_varea_close = rt_mm_dummy_mapper.on_varea_close;
  167. }
  168. }
  169. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
  170. int text)
  171. {
  172. void *va = map_va;
  173. int ret = 0;
  174. size_t flags = MMF_PREFETCH;
  175. if (text)
  176. flags |= MMF_TEXT;
  177. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  178. ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
  179. mem_obj, 0);
  180. if (ret != RT_EOK)
  181. {
  182. va = RT_NULL;
  183. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  184. map_size, ret);
  185. }
  186. return va;
  187. }
  188. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  189. {
  190. int err = rt_aspace_unmap(lwp->aspace, va);
  191. return err;
  192. }
  193. static void _dup_varea(rt_varea_t varea, struct rt_lwp *src_lwp,
  194. rt_aspace_t dst)
  195. {
  196. void *vaddr = varea->start;
  197. void *vend = vaddr + varea->size;
  198. if (vaddr < (void *)USER_STACK_VSTART || vaddr >= (void *)USER_STACK_VEND)
  199. {
  200. while (vaddr != vend)
  201. {
  202. void *paddr;
  203. paddr = lwp_v2p(src_lwp, vaddr);
  204. if (paddr != ARCH_MAP_FAILED)
  205. {
  206. rt_aspace_load_page(dst, vaddr, 1);
  207. }
  208. vaddr += ARCH_PAGE_SIZE;
  209. }
  210. }
  211. else
  212. {
  213. while (vaddr != vend)
  214. {
  215. vend -= ARCH_PAGE_SIZE;
  216. void *paddr;
  217. paddr = lwp_v2p(src_lwp, vend);
  218. if (paddr != ARCH_MAP_FAILED)
  219. {
  220. rt_aspace_load_page(dst, vend, 1);
  221. }
  222. else
  223. {
  224. break;
  225. }
  226. }
  227. }
  228. }
  229. int lwp_dup_user(rt_varea_t varea, void *arg)
  230. {
  231. int err;
  232. struct rt_lwp *self_lwp = lwp_self();
  233. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  234. void *pa = RT_NULL;
  235. void *va = RT_NULL;
  236. rt_mem_obj_t mem_obj = varea->mem_obj;
  237. if (!mem_obj)
  238. {
  239. /* duplicate a physical mapping */
  240. pa = lwp_v2p(self_lwp, (void *)varea->start);
  241. RT_ASSERT(pa != ARCH_MAP_FAILED);
  242. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  243. .limit_range_size = new_lwp->aspace->size,
  244. .limit_start = new_lwp->aspace->start,
  245. .prefer = varea->start,
  246. .map_size = varea->size};
  247. err = rt_aspace_map_phy(new_lwp->aspace, &hint, varea->attr,
  248. MM_PA_TO_OFF(pa), &va);
  249. if (err != RT_EOK)
  250. {
  251. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  252. varea->start, varea->size);
  253. }
  254. }
  255. else
  256. {
  257. /* duplicate a mem_obj backing mapping */
  258. va = varea->start;
  259. err = rt_aspace_map(new_lwp->aspace, &va, varea->size, varea->attr,
  260. varea->flag, &new_lwp->lwp_obj->mem_obj,
  261. varea->offset);
  262. if (err != RT_EOK)
  263. {
  264. LOG_W("%s: aspace map failed at %p with size %p", __func__,
  265. varea->start, varea->size);
  266. }
  267. else
  268. {
  269. /* loading page frames for !MMF_PREFETCH varea */
  270. if (!(varea->flag & MMF_PREFETCH))
  271. {
  272. _dup_varea(varea, self_lwp, new_lwp->aspace);
  273. }
  274. }
  275. }
  276. if (va != (void *)varea->start)
  277. {
  278. return -1;
  279. }
  280. return 0;
  281. }
  282. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  283. {
  284. return lwp_unmap_user(lwp, va);
  285. }
  286. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  287. {
  288. void *ret = RT_NULL;
  289. size_t offset = 0;
  290. if (!map_size)
  291. {
  292. return 0;
  293. }
  294. offset = (size_t)map_va & ARCH_PAGE_MASK;
  295. map_size += (offset + ARCH_PAGE_SIZE - 1);
  296. map_size &= ~ARCH_PAGE_MASK;
  297. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  298. ret = _lwp_map_user(lwp, map_va, map_size, text);
  299. if (ret)
  300. {
  301. ret = (void *)((char *)ret + offset);
  302. }
  303. return ret;
  304. }
  305. static inline size_t _flags_to_attr(size_t flags)
  306. {
  307. size_t attr;
  308. if (flags & LWP_MAP_FLAG_NOCACHE)
  309. {
  310. attr = MMU_MAP_U_RW;
  311. }
  312. else
  313. {
  314. attr = MMU_MAP_U_RWCB;
  315. }
  316. return attr;
  317. }
  318. static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
  319. {
  320. mm_flag_t mm_flag = 0;
  321. return mm_flag;
  322. }
  323. static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  324. {
  325. void *va = map_va;
  326. int ret = 0;
  327. rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
  328. rt_varea_t varea;
  329. mm_flag_t mm_flags;
  330. size_t attr;
  331. varea = rt_malloc(sizeof(*varea));
  332. if (varea)
  333. {
  334. attr = _flags_to_attr(flags);
  335. mm_flags = _flags_to_aspace_flag(flags);
  336. ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
  337. attr, mm_flags, mem_obj, 0);
  338. /* let aspace handle the free of varea */
  339. varea->flag &= ~MMF_STATIC_ALLOC;
  340. /* don't apply auto fetch on this */
  341. varea->data = (void *)NO_AUTO_FETCH;
  342. }
  343. else
  344. {
  345. ret = -RT_ENOMEM;
  346. }
  347. if (ret != RT_EOK)
  348. {
  349. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  350. map_size, ret);
  351. }
  352. return varea;
  353. }
  354. static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  355. {
  356. rt_varea_t varea = RT_NULL;
  357. size_t offset = 0;
  358. if (!map_size)
  359. {
  360. return 0;
  361. }
  362. offset = (size_t)map_va & ARCH_PAGE_MASK;
  363. map_size += (offset + ARCH_PAGE_SIZE - 1);
  364. map_size &= ~ARCH_PAGE_MASK;
  365. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  366. varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
  367. return varea;
  368. }
  369. rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  370. {
  371. return _map_user_varea_ext(lwp, map_va, map_size, flags);
  372. }
  373. rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
  374. {
  375. return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
  376. }
  377. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
  378. size_t map_size, int cached)
  379. {
  380. int err;
  381. void *va;
  382. size_t offset = 0;
  383. if (!map_size)
  384. {
  385. return 0;
  386. }
  387. if (map_va)
  388. {
  389. if (((size_t)map_va & ARCH_PAGE_MASK) !=
  390. ((size_t)map_pa & ARCH_PAGE_MASK))
  391. {
  392. return 0;
  393. }
  394. }
  395. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  396. map_size += (offset + ARCH_PAGE_SIZE - 1);
  397. map_size &= ~ARCH_PAGE_MASK;
  398. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  399. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  400. .limit_range_size = lwp->aspace->size,
  401. .limit_start = lwp->aspace->start,
  402. .prefer = map_va,
  403. .map_size = map_size};
  404. rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
  405. err =
  406. rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), &va);
  407. if (err != RT_EOK)
  408. {
  409. va = RT_NULL;
  410. LOG_W("%s", __func__);
  411. }
  412. else
  413. {
  414. va += offset;
  415. }
  416. return va;
  417. }
  418. rt_base_t lwp_brk(void *addr)
  419. {
  420. rt_base_t ret = -1;
  421. struct rt_lwp *lwp = RT_NULL;
  422. rt_mm_lock();
  423. lwp = rt_thread_self()->lwp;
  424. if ((size_t)addr <= lwp->end_heap)
  425. {
  426. ret = (rt_base_t)lwp->end_heap;
  427. }
  428. else
  429. {
  430. size_t size = 0;
  431. void *va = RT_NULL;
  432. if ((size_t)addr <= USER_HEAP_VEND)
  433. {
  434. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) &
  435. ~ARCH_PAGE_MASK;
  436. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  437. }
  438. if (va)
  439. {
  440. lwp->end_heap += size;
  441. ret = lwp->end_heap;
  442. }
  443. }
  444. rt_mm_unlock();
  445. return ret;
  446. }
  447. #define MAP_ANONYMOUS 0x20
  448. void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
  449. off_t pgoffset)
  450. {
  451. void *ret = (void *)-1;
  452. if (fd == -1)
  453. {
  454. ret = lwp_map_user(lwp_self(), addr, length, 0);
  455. if (ret)
  456. {
  457. if ((flags & MAP_ANONYMOUS) != 0)
  458. {
  459. rt_memset(ret, 0, length);
  460. }
  461. }
  462. else
  463. {
  464. ret = (void *)-1;
  465. }
  466. }
  467. else
  468. {
  469. struct dfs_file *d;
  470. d = fd_get(fd);
  471. if (d && d->vnode->type == FT_DEVICE)
  472. {
  473. struct dfs_mmap2_args mmap2;
  474. mmap2.addr = addr;
  475. mmap2.length = length;
  476. mmap2.prot = prot;
  477. mmap2.flags = flags;
  478. mmap2.pgoffset = pgoffset;
  479. mmap2.ret = (void *)-1;
  480. if (dfs_file_mmap2(d, &mmap2) == 0)
  481. {
  482. ret = mmap2.ret;
  483. }
  484. }
  485. }
  486. return ret;
  487. }
  488. int lwp_munmap(void *addr)
  489. {
  490. int ret = 0;
  491. rt_mm_lock();
  492. ret = lwp_unmap_user(lwp_self(), addr);
  493. rt_mm_unlock();
  494. return ret;
  495. }
  496. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  497. {
  498. struct rt_lwp *lwp = RT_NULL;
  499. /* check src */
  500. if (src < (void *)USER_VADDR_START)
  501. {
  502. return 0;
  503. }
  504. if (src >= (void *)USER_VADDR_TOP)
  505. {
  506. return 0;
  507. }
  508. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  509. {
  510. return 0;
  511. }
  512. lwp = lwp_self();
  513. if (!lwp)
  514. {
  515. return 0;
  516. }
  517. return lwp_data_get(lwp, dst, src, size);
  518. }
  519. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  520. {
  521. struct rt_lwp *lwp = RT_NULL;
  522. /* check dst */
  523. if (dst < (void *)USER_VADDR_START)
  524. {
  525. return 0;
  526. }
  527. if (dst >= (void *)USER_VADDR_TOP)
  528. {
  529. return 0;
  530. }
  531. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  532. {
  533. return 0;
  534. }
  535. lwp = lwp_self();
  536. if (!lwp)
  537. {
  538. return 0;
  539. }
  540. return lwp_data_put(lwp, dst, src, size);
  541. }
  542. int lwp_user_accessable(void *addr, size_t size)
  543. {
  544. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  545. void *tmp_addr = RT_NULL;
  546. struct rt_lwp *lwp = lwp_self();
  547. if (!lwp)
  548. {
  549. return 0;
  550. }
  551. if (!size || !addr)
  552. {
  553. return 0;
  554. }
  555. addr_start = addr;
  556. addr_end = (void *)((char *)addr + size);
  557. #ifdef ARCH_RISCV64
  558. if (addr_start < (void *)USER_VADDR_START)
  559. {
  560. return 0;
  561. }
  562. #else
  563. if (addr_start >= (void *)USER_VADDR_TOP)
  564. {
  565. return 0;
  566. }
  567. if (addr_end > (void *)USER_VADDR_TOP)
  568. {
  569. return 0;
  570. }
  571. #endif
  572. next_page =
  573. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  574. do
  575. {
  576. size_t len = (char *)next_page - (char *)addr_start;
  577. if (size < len)
  578. {
  579. len = size;
  580. }
  581. tmp_addr = lwp_v2p(lwp, addr_start);
  582. if (tmp_addr == ARCH_MAP_FAILED)
  583. {
  584. if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
  585. tmp_addr = *(void **)addr_start;
  586. else
  587. return 0;
  588. }
  589. addr_start = (void *)((char *)addr_start + len);
  590. size -= len;
  591. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  592. } while (addr_start < addr_end);
  593. return 1;
  594. }
  595. /* src is in mmu_info space, dst is in current thread space */
  596. size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  597. {
  598. size_t copy_len = 0;
  599. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  600. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  601. if (!size || !dst)
  602. {
  603. return 0;
  604. }
  605. tmp_dst = dst;
  606. addr_start = src;
  607. addr_end = (void *)((char *)src + size);
  608. next_page =
  609. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  610. do
  611. {
  612. size_t len = (char *)next_page - (char *)addr_start;
  613. if (size < len)
  614. {
  615. len = size;
  616. }
  617. tmp_src = lwp_v2p(lwp, addr_start);
  618. if (tmp_src == ARCH_MAP_FAILED)
  619. {
  620. break;
  621. }
  622. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  623. rt_memcpy(tmp_dst, tmp_src, len);
  624. tmp_dst = (void *)((char *)tmp_dst + len);
  625. addr_start = (void *)((char *)addr_start + len);
  626. size -= len;
  627. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  628. copy_len += len;
  629. } while (addr_start < addr_end);
  630. return copy_len;
  631. }
  632. /* dst is in kernel space, src is in current thread space */
  633. size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  634. {
  635. size_t copy_len = 0;
  636. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  637. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  638. if (!size || !dst)
  639. {
  640. return 0;
  641. }
  642. tmp_src = src;
  643. addr_start = dst;
  644. addr_end = (void *)((char *)dst + size);
  645. next_page =
  646. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  647. do
  648. {
  649. size_t len = (char *)next_page - (char *)addr_start;
  650. if (size < len)
  651. {
  652. len = size;
  653. }
  654. tmp_dst = lwp_v2p(lwp, addr_start);
  655. if (tmp_dst == ARCH_MAP_FAILED)
  656. {
  657. break;
  658. }
  659. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  660. rt_memcpy(tmp_dst, tmp_src, len);
  661. tmp_src = (void *)((char *)tmp_src + len);
  662. addr_start = (void *)((char *)addr_start + len);
  663. size -= len;
  664. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  665. copy_len += len;
  666. } while (addr_start < addr_end);
  667. return copy_len;
  668. }
  669. #endif