1
0

lwp_user_mm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. * 2022-12-25 wangxiaoyao adapt to new mm
  14. * 2023-08-12 Shell Fix parameter passing of lwp_mmap()/lwp_munmap()
  15. * 2023-08-29 Shell Add API accessible()/data_get()/data_set()/data_put()
  16. * 2023-09-13 Shell Add lwp_memcpy and support run-time choice of memcpy base on memory attr
  17. * 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
  18. */
  19. #include <rtthread.h>
  20. #include <rthw.h>
  21. #include <string.h>
  22. #ifdef ARCH_MM_MMU
  23. #include <lwp.h>
  24. #include <lwp_arch.h>
  25. #include <lwp_mm.h>
  26. #include <lwp_user_mm.h>
  27. #include <mm_aspace.h>
  28. #include <mm_fault.h>
  29. #include <mm_flag.h>
  30. #include <mm_page.h>
  31. #include <mmu.h>
  32. #include <page.h>
  33. #ifdef RT_USING_MUSLLIBC
  34. #include "libc_musl.h"
  35. #endif
  36. #define DBG_TAG "LwP.mman"
  37. #define DBG_LVL DBG_INFO
  38. #include <rtdbg.h>
  39. #include <stdlib.h>
  40. #define STACK_OBJ _null_object
  41. static const char *_null_get_name(rt_varea_t varea)
  42. {
  43. return "null";
  44. }
  45. static void _null_page_fault(struct rt_varea *varea,
  46. struct rt_aspace_fault_msg *msg)
  47. {
  48. static void *null_page;
  49. if (!null_page)
  50. {
  51. null_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  52. if (null_page)
  53. memset(null_page, 0, ARCH_PAGE_SIZE);
  54. else
  55. return;
  56. }
  57. msg->response.status = MM_FAULT_STATUS_OK;
  58. msg->response.size = ARCH_PAGE_SIZE;
  59. msg->response.vaddr = null_page;
  60. }
  61. static rt_err_t _null_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  62. {
  63. return RT_EOK;
  64. }
  65. static rt_err_t _null_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  66. {
  67. return RT_EOK;
  68. }
  69. static rt_err_t _null_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  70. {
  71. return RT_EOK;
  72. }
  73. static void _null_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  74. {
  75. void *dest = msg->buffer_vaddr;
  76. memset(dest, 0, ARCH_PAGE_SIZE);
  77. msg->response.status = MM_FAULT_STATUS_OK;
  78. return ;
  79. }
  80. static void _null_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  81. {
  82. /* write operation is not allowed */
  83. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  84. return ;
  85. }
  86. static struct rt_mem_obj _null_object = {
  87. .get_name = _null_get_name,
  88. .hint_free = RT_NULL,
  89. .on_page_fault = _null_page_fault,
  90. .page_read = _null_page_read,
  91. .page_write = _null_page_write,
  92. .on_varea_expand = _null_expand,
  93. .on_varea_shrink = _null_shrink,
  94. .on_varea_split = _null_split,
  95. };
  96. int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
  97. {
  98. void *stk_addr;
  99. int err = -RT_ENOMEM;
  100. const size_t flags = MMF_MAP_PRIVATE;
  101. err = arch_user_space_init(lwp);
  102. if (err == RT_EOK)
  103. {
  104. if (!is_fork)
  105. {
  106. stk_addr = (void *)USER_STACK_VSTART;
  107. err = rt_aspace_map(lwp->aspace, &stk_addr,
  108. USER_STACK_VEND - USER_STACK_VSTART,
  109. MMU_MAP_U_RWCB, flags, &STACK_OBJ, 0);
  110. }
  111. }
  112. return err;
  113. }
  114. void lwp_aspace_switch(struct rt_thread *thread)
  115. {
  116. struct rt_lwp *lwp = RT_NULL;
  117. rt_aspace_t aspace;
  118. void *from_tbl;
  119. if (thread->lwp)
  120. {
  121. lwp = (struct rt_lwp *)thread->lwp;
  122. aspace = lwp->aspace;
  123. }
  124. else
  125. {
  126. aspace = &rt_kernel_space;
  127. }
  128. from_tbl = rt_hw_mmu_tbl_get();
  129. if (aspace->page_table != from_tbl)
  130. {
  131. rt_hw_aspace_switch(aspace);
  132. }
  133. }
  134. void lwp_unmap_user_space(struct rt_lwp *lwp)
  135. {
  136. arch_user_space_free(lwp);
  137. }
  138. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
  139. int text)
  140. {
  141. void *va = map_va;
  142. int ret = 0;
  143. rt_size_t flags = MMF_PREFETCH;
  144. if (text)
  145. flags |= MMF_TEXT;
  146. if (va != RT_NULL)
  147. flags |= MMF_MAP_FIXED;
  148. ret = rt_aspace_map_private(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags);
  149. if (ret != RT_EOK)
  150. {
  151. va = RT_NULL;
  152. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  153. map_size, ret);
  154. }
  155. return va;
  156. }
  157. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  158. {
  159. int err = rt_aspace_unmap(lwp->aspace, va);
  160. return err;
  161. }
  162. /** fork the src_lwp->aspace in current */
  163. int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
  164. {
  165. int err;
  166. err = rt_aspace_fork(&src_lwp->aspace, &dest_lwp->aspace);
  167. if (!err)
  168. {
  169. /* do a explicit aspace switch if the page table is changed */
  170. lwp_aspace_switch(rt_thread_self());
  171. }
  172. return err;
  173. }
  174. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  175. {
  176. return lwp_unmap_user(lwp, va);
  177. }
  178. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  179. {
  180. void *ret = RT_NULL;
  181. size_t offset = 0;
  182. if (!map_size)
  183. {
  184. return 0;
  185. }
  186. offset = (size_t)map_va & ARCH_PAGE_MASK;
  187. map_size += (offset + ARCH_PAGE_SIZE - 1);
  188. map_size &= ~ARCH_PAGE_MASK;
  189. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  190. ret = _lwp_map_user(lwp, map_va, map_size, text);
  191. if (ret)
  192. {
  193. ret = (void *)((char *)ret + offset);
  194. }
  195. return ret;
  196. }
  197. static inline size_t _flags_to_attr(size_t flags)
  198. {
  199. size_t attr;
  200. if (flags & LWP_MAP_FLAG_NOCACHE)
  201. {
  202. attr = MMU_MAP_U_RW;
  203. }
  204. else
  205. {
  206. attr = MMU_MAP_U_RWCB;
  207. }
  208. return attr;
  209. }
  210. static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
  211. {
  212. mm_flag_t mm_flag = 0;
  213. if (flags & LWP_MAP_FLAG_MAP_FIXED)
  214. mm_flag |= MMF_MAP_FIXED;
  215. if (flags & LWP_MAP_FLAG_PREFETCH)
  216. mm_flag |= MMF_PREFETCH;
  217. return mm_flag;
  218. }
  219. static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  220. {
  221. void *va = map_va;
  222. int ret = 0;
  223. rt_varea_t varea = RT_NULL;
  224. mm_flag_t mm_flags;
  225. size_t attr;
  226. attr = _flags_to_attr(flags);
  227. mm_flags = _flags_to_aspace_flag(flags);
  228. ret = rt_aspace_map_private(lwp->aspace, &va, map_size,
  229. attr, mm_flags);
  230. if (ret == RT_EOK)
  231. {
  232. varea = rt_aspace_query(lwp->aspace, va);
  233. }
  234. if (ret != RT_EOK)
  235. {
  236. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  237. map_size, ret);
  238. }
  239. return varea;
  240. }
  241. static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  242. {
  243. size_t offset = 0;
  244. if (!map_size)
  245. {
  246. return 0;
  247. }
  248. offset = (size_t)map_va & ARCH_PAGE_MASK;
  249. map_size += (offset + ARCH_PAGE_SIZE - 1);
  250. map_size &= ~ARCH_PAGE_MASK;
  251. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  252. return _lwp_map_user_varea(lwp, map_va, map_size, flags);
  253. }
  254. rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  255. {
  256. return _map_user_varea_ext(lwp, map_va, map_size, flags);
  257. }
  258. rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
  259. {
  260. return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
  261. }
  262. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
  263. size_t map_size, int cached)
  264. {
  265. int err;
  266. char *va;
  267. size_t offset = 0;
  268. if (!map_size)
  269. {
  270. return 0;
  271. }
  272. if (map_va)
  273. {
  274. if (((size_t)map_va & ARCH_PAGE_MASK) !=
  275. ((size_t)map_pa & ARCH_PAGE_MASK))
  276. {
  277. return 0;
  278. }
  279. }
  280. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  281. map_size += (offset + ARCH_PAGE_SIZE - 1);
  282. map_size &= ~ARCH_PAGE_MASK;
  283. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  284. struct rt_mm_va_hint hint = {.flags = 0,
  285. .limit_range_size = lwp->aspace->size,
  286. .limit_start = lwp->aspace->start,
  287. .prefer = map_va,
  288. .map_size = map_size};
  289. if (map_va != RT_NULL)
  290. hint.flags |= MMF_MAP_FIXED;
  291. rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
  292. err =
  293. rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), (void **)&va);
  294. if (err != RT_EOK)
  295. {
  296. va = RT_NULL;
  297. LOG_W("%s", __func__);
  298. }
  299. else
  300. {
  301. va += offset;
  302. }
  303. return va;
  304. }
  305. rt_base_t lwp_brk(void *addr)
  306. {
  307. rt_base_t ret = -1;
  308. rt_varea_t varea = RT_NULL;
  309. struct rt_lwp *lwp = RT_NULL;
  310. size_t size = 0;
  311. lwp = lwp_self();
  312. if ((size_t)addr == RT_NULL)
  313. {
  314. addr = (char *)lwp->end_heap + 1;
  315. }
  316. if ((size_t)addr <= lwp->end_heap && (size_t)addr > USER_HEAP_VADDR)
  317. {
  318. ret = (size_t)addr;
  319. }
  320. else if ((size_t)addr <= USER_HEAP_VEND)
  321. {
  322. size = RT_ALIGN((size_t)addr - lwp->end_heap, ARCH_PAGE_SIZE);
  323. varea = lwp_map_user_varea_ext(lwp, (void *)lwp->end_heap, size, LWP_MAP_FLAG_PREFETCH);
  324. if (varea)
  325. {
  326. lwp->end_heap = (long)(varea->start + varea->size);
  327. ret = lwp->end_heap;
  328. }
  329. }
  330. return ret;
  331. }
  332. rt_inline rt_mem_obj_t _get_mmap_obj(struct rt_lwp *lwp)
  333. {
  334. return &_null_object;
  335. }
  336. rt_inline rt_bool_t _memory_threshold_ok(void)
  337. {
  338. #define GUARDIAN_BITS (10)
  339. size_t total, free;
  340. rt_page_get_info(&total, &free);
  341. if (free * (0x1000) < 0x100000)
  342. {
  343. LOG_I("%s: low of system memory", __func__);
  344. return RT_FALSE;
  345. }
  346. return RT_TRUE;
  347. }
  348. rt_inline long _uflag_to_kernel(long flag)
  349. {
  350. flag &= ~MMF_MAP_FIXED;
  351. flag &= ~MMF_MAP_PRIVATE;
  352. flag &= ~MMF_MAP_PRIVATE_DONT_SYNC;
  353. return flag;
  354. }
  355. rt_inline long _uattr_to_kernel(long attr)
  356. {
  357. /* Warning: be careful with the case if user attribution is unwritable */
  358. return attr;
  359. }
  360. static void _prefetch_mmap(rt_aspace_t aspace, void *addr, long size)
  361. {
  362. struct rt_aspace_fault_msg msg;
  363. msg.fault_op = MM_FAULT_OP_WRITE;
  364. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  365. for (char *base = addr; size > 0; base += ARCH_PAGE_SIZE, size -= ARCH_PAGE_SIZE)
  366. {
  367. msg.fault_vaddr = base;
  368. msg.off = (long)base >> MM_PAGE_SHIFT;
  369. rt_aspace_fault_try_fix(aspace, &msg);
  370. }
  371. return ;
  372. }
  373. void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length)
  374. {
  375. long kattr;
  376. long kflag;
  377. long offset_in_mobj;
  378. long offset_in_page;
  379. rt_err_t error;
  380. rt_varea_t uarea;
  381. rt_mem_obj_t mobj;
  382. void *kaddr = 0;
  383. uarea = rt_aspace_query(lwp->aspace, uaddr);
  384. if (uarea)
  385. {
  386. /* setup the identical mapping, and align up for address & length */
  387. kattr = _uattr_to_kernel(uarea->attr);
  388. kflag = _uflag_to_kernel(uarea->flag);
  389. offset_in_mobj = uarea->offset + ((long)uaddr - (long)uarea->start) / ARCH_PAGE_SIZE;
  390. mobj = uarea->mem_obj;
  391. offset_in_page = (long)uaddr & ARCH_PAGE_MASK;
  392. length = RT_ALIGN(length + offset_in_page, ARCH_PAGE_SIZE);
  393. error = rt_aspace_map(&rt_kernel_space, &kaddr, length, kattr, kflag, mobj, offset_in_mobj);
  394. if (error)
  395. {
  396. LOG_I("%s(length=0x%lx,attr=0x%lx,flags=0x%lx): do map failed", __func__, length, kattr, kflag);
  397. kaddr = 0;
  398. }
  399. else
  400. {
  401. /* TODO: {make a memory lock?} */
  402. LOG_D("%s(length=0x%lx,attr=0x%lx,flags=0x%lx,offset=0x%lx) => %p", __func__, length, kattr, kflag, offset_in_mobj, kaddr);
  403. _prefetch_mmap(&rt_kernel_space, kaddr, length);
  404. kaddr += offset_in_page;
  405. }
  406. }
  407. return kaddr;
  408. }
  409. void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
  410. int flags, int fd, off_t pgoffset)
  411. {
  412. rt_err_t rc;
  413. rt_size_t k_attr;
  414. rt_size_t k_flags;
  415. rt_size_t k_offset;
  416. rt_aspace_t uspace;
  417. rt_mem_obj_t mem_obj;
  418. void *ret = 0;
  419. LOG_D("%s(addr=0x%lx,length=%ld,fd=%d)", __func__, addr, length, fd);
  420. if (fd == -1)
  421. {
  422. /**
  423. * todo: add threshold
  424. */
  425. if (!_memory_threshold_ok())
  426. return (void *)-ENOMEM;
  427. k_offset = MM_PA_TO_OFF(addr);
  428. k_flags = lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE;
  429. k_attr = lwp_user_mm_attr_to_kernel(prot);
  430. uspace = lwp->aspace;
  431. length = RT_ALIGN(length, ARCH_PAGE_SIZE);
  432. mem_obj = _get_mmap_obj(lwp);
  433. rc = rt_aspace_map(uspace, &addr, length, k_attr, k_flags, mem_obj, k_offset);
  434. if (rc == RT_EOK)
  435. {
  436. ret = addr;
  437. }
  438. else
  439. {
  440. ret = (void *)lwp_errno_to_posix(rc);
  441. }
  442. }
  443. else
  444. {
  445. struct dfs_file *d;
  446. d = fd_get(fd);
  447. if (d)
  448. {
  449. struct dfs_mmap2_args mmap2;
  450. mmap2.addr = addr;
  451. mmap2.length = length;
  452. mmap2.prot = prot;
  453. mmap2.flags = flags;
  454. mmap2.pgoffset = pgoffset;
  455. mmap2.ret = (void *)-1;
  456. mmap2.lwp = lwp;
  457. rc = dfs_file_mmap2(d, &mmap2);
  458. if (rc == RT_EOK)
  459. {
  460. ret = mmap2.ret;
  461. }
  462. else
  463. {
  464. ret = (void *)lwp_errno_to_posix(rc);
  465. }
  466. }
  467. }
  468. if ((long)ret <= 0)
  469. LOG_D("%s() => %ld", __func__, ret);
  470. return ret;
  471. }
  472. int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length)
  473. {
  474. int ret;
  475. RT_ASSERT(lwp);
  476. ret = rt_aspace_unmap_range(lwp->aspace, addr, length);
  477. return lwp_errno_to_posix(ret);
  478. }
  479. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  480. {
  481. struct rt_lwp *lwp = RT_NULL;
  482. /* check src */
  483. if (src < (void *)USER_VADDR_START)
  484. {
  485. return 0;
  486. }
  487. if (src >= (void *)USER_VADDR_TOP)
  488. {
  489. return 0;
  490. }
  491. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  492. {
  493. return 0;
  494. }
  495. lwp = lwp_self();
  496. if (!lwp)
  497. {
  498. return 0;
  499. }
  500. return lwp_data_get(lwp, dst, src, size);
  501. }
  502. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  503. {
  504. struct rt_lwp *lwp = RT_NULL;
  505. /* check dst */
  506. if (dst < (void *)USER_VADDR_START)
  507. {
  508. return 0;
  509. }
  510. if (dst >= (void *)USER_VADDR_TOP)
  511. {
  512. return 0;
  513. }
  514. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  515. {
  516. return 0;
  517. }
  518. lwp = lwp_self();
  519. if (!lwp)
  520. {
  521. return 0;
  522. }
  523. return lwp_data_put(lwp, dst, src, size);
  524. }
  525. rt_inline rt_bool_t _in_user_space(const char *addr)
  526. {
  527. return (addr >= (char *)USER_VADDR_START && addr < (char *)USER_VADDR_TOP);
  528. }
  529. rt_inline rt_bool_t _can_unaligned_access(const char *addr)
  530. {
  531. return rt_kmem_v2p((char *)addr) - PV_OFFSET == addr;
  532. }
  533. void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size)
  534. {
  535. void *rc = dst;
  536. long len;
  537. if (_in_user_space(dst))
  538. {
  539. if (!_in_user_space(src))
  540. {
  541. len = lwp_put_to_user(dst, (void *)src, size);
  542. if (!len)
  543. {
  544. LOG_E("lwp_put_to_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
  545. }
  546. }
  547. else
  548. {
  549. /* not support yet */
  550. LOG_W("%s(dst=%p,src=%p,size=0x%lx): operation not support", dst, src, size, __func__);
  551. }
  552. }
  553. else
  554. {
  555. if (_in_user_space(src))
  556. {
  557. len = lwp_get_from_user(dst, (void *)src, size);
  558. if (!len)
  559. {
  560. LOG_E("lwp_get_from_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
  561. }
  562. }
  563. else
  564. {
  565. if (_can_unaligned_access(dst) && _can_unaligned_access(src))
  566. {
  567. rc = memcpy(dst, src, size);
  568. }
  569. else
  570. {
  571. rt_memcpy(dst, src, size);
  572. }
  573. }
  574. }
  575. return rc;
  576. }
  577. int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size)
  578. {
  579. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  580. void *tmp_addr = RT_NULL;
  581. if (!lwp)
  582. {
  583. return RT_FALSE;
  584. }
  585. if (!size || !addr)
  586. {
  587. return RT_FALSE;
  588. }
  589. addr_start = addr;
  590. addr_end = (void *)((char *)addr + size);
  591. #ifdef ARCH_RISCV64
  592. if (addr_start < (void *)USER_VADDR_START)
  593. {
  594. return RT_FALSE;
  595. }
  596. #else
  597. if (addr_start >= (void *)USER_VADDR_TOP)
  598. {
  599. return RT_FALSE;
  600. }
  601. if (addr_end > (void *)USER_VADDR_TOP)
  602. {
  603. return RT_FALSE;
  604. }
  605. #endif
  606. next_page =
  607. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  608. do
  609. {
  610. size_t len = (char *)next_page - (char *)addr_start;
  611. if (size < len)
  612. {
  613. len = size;
  614. }
  615. tmp_addr = lwp_v2p(lwp, addr_start);
  616. if (tmp_addr == ARCH_MAP_FAILED &&
  617. !rt_aspace_query(lwp->aspace, addr_start))
  618. {
  619. return RT_FALSE;
  620. }
  621. addr_start = (void *)((char *)addr_start + len);
  622. size -= len;
  623. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  624. } while (addr_start < addr_end);
  625. return RT_TRUE;
  626. }
  627. int lwp_user_accessable(void *addr, size_t size)
  628. {
  629. return lwp_user_accessible_ext(lwp_self(), addr, size);
  630. }
  631. #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
  632. /* src is in lwp address space, dst is in current thread space */
  633. size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  634. {
  635. size_t copy_len = 0;
  636. char *temp_page = 0;
  637. char *dst_iter, *dst_next_page;
  638. char *src_copy_end, *src_iter, *src_iter_aligned;
  639. if (!size || !dst)
  640. {
  641. return 0;
  642. }
  643. dst_iter = dst;
  644. src_iter = src;
  645. src_copy_end = src + size;
  646. dst_next_page =
  647. (char *)(((size_t)src_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  648. do
  649. {
  650. size_t bytes_to_copy = (char *)dst_next_page - (char *)src_iter;
  651. if (bytes_to_copy > size)
  652. {
  653. bytes_to_copy = size;
  654. }
  655. if (ALIGNED(src_iter) && bytes_to_copy == ARCH_PAGE_SIZE)
  656. {
  657. /* get page to kernel buffer */
  658. if (rt_aspace_page_get(lwp->aspace, src_iter, dst_iter))
  659. break;
  660. }
  661. else
  662. {
  663. if (!temp_page)
  664. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  665. if (!temp_page)
  666. break;
  667. src_iter_aligned = (char *)((long)src_iter & ~ARCH_PAGE_MASK);
  668. if (rt_aspace_page_get(lwp->aspace, src_iter_aligned, temp_page))
  669. break;
  670. memcpy(dst_iter, temp_page + (src_iter - src_iter_aligned), bytes_to_copy);
  671. }
  672. dst_iter = dst_iter + bytes_to_copy;
  673. src_iter = src_iter + bytes_to_copy;
  674. size -= bytes_to_copy;
  675. dst_next_page = (void *)((char *)dst_next_page + ARCH_PAGE_SIZE);
  676. copy_len += bytes_to_copy;
  677. } while (src_iter < src_copy_end);
  678. if (temp_page)
  679. rt_pages_free(temp_page, 0);
  680. return copy_len;
  681. }
  682. /* dst is in lwp address space, src is in current thread space */
  683. size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  684. {
  685. size_t copy_len = 0;
  686. char *temp_page = 0;
  687. char *dst_iter, *dst_iter_aligned, *dst_next_page;
  688. char *src_put_end, *src_iter;
  689. if (!size || !dst)
  690. {
  691. return 0;
  692. }
  693. src_iter = src;
  694. dst_iter = dst;
  695. src_put_end = dst + size;
  696. dst_next_page =
  697. (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  698. do
  699. {
  700. size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
  701. if (bytes_to_put > size)
  702. {
  703. bytes_to_put = size;
  704. }
  705. if (ALIGNED(dst_iter) && bytes_to_put == ARCH_PAGE_SIZE)
  706. {
  707. /* write to page in kernel */
  708. if (rt_aspace_page_put(lwp->aspace, dst_iter, src_iter))
  709. break;
  710. }
  711. else
  712. {
  713. if (!temp_page)
  714. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  715. if (!temp_page)
  716. break;
  717. dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
  718. if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
  719. break;
  720. memcpy(temp_page + (dst_iter - dst_iter_aligned), src_iter, bytes_to_put);
  721. if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
  722. break;
  723. }
  724. src_iter = src_iter + bytes_to_put;
  725. dst_iter = dst_iter + bytes_to_put;
  726. size -= bytes_to_put;
  727. dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
  728. copy_len += bytes_to_put;
  729. } while (dst_iter < src_put_end);
  730. if (temp_page)
  731. rt_pages_free(temp_page, 0);
  732. return copy_len;
  733. }
  734. /* Set N bytes of S to C */
  735. size_t lwp_data_set(struct rt_lwp *lwp, void *dst, int byte, size_t size)
  736. {
  737. size_t copy_len = 0;
  738. char *temp_page = 0;
  739. char *dst_iter, *dst_iter_aligned, *dst_next_page;
  740. char *dst_put_end;
  741. if (!size || !dst)
  742. {
  743. return 0;
  744. }
  745. dst_iter = dst;
  746. dst_put_end = dst + size;
  747. dst_next_page =
  748. (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  749. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  750. if (temp_page)
  751. {
  752. do
  753. {
  754. size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
  755. if (bytes_to_put > size)
  756. {
  757. bytes_to_put = size;
  758. }
  759. dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
  760. if (!ALIGNED(dst_iter) || bytes_to_put != ARCH_PAGE_SIZE)
  761. if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
  762. break;
  763. memset(temp_page + (dst_iter - dst_iter_aligned), byte, bytes_to_put);
  764. if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
  765. break;
  766. dst_iter = dst_iter + bytes_to_put;
  767. size -= bytes_to_put;
  768. dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
  769. copy_len += bytes_to_put;
  770. } while (dst_iter < dst_put_end);
  771. rt_pages_free(temp_page, 0);
  772. }
  773. return copy_len;
  774. }
  775. size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s)
  776. {
  777. int len = 0;
  778. char *temp_buf = RT_NULL;
  779. void *addr_start = RT_NULL;
  780. int get_bytes = 0;
  781. int index = 0;
  782. if (s == RT_NULL)
  783. return 0;
  784. if (lwp == RT_NULL)
  785. {
  786. LOG_W("%s: lwp is NULL", __func__);
  787. return -1;
  788. }
  789. addr_start = (void *)s;
  790. temp_buf = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  791. if (!temp_buf)
  792. {
  793. LOG_W("%s: No memory", __func__);
  794. return -1;
  795. }
  796. get_bytes = lwp_data_get(lwp, temp_buf, addr_start, ARCH_PAGE_SIZE);
  797. if (get_bytes == 0)
  798. {
  799. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000) failed", lwp, temp_buf, addr_start);
  800. rt_pages_free(temp_buf, 0);
  801. return -1;
  802. }
  803. while (temp_buf[index] != '\0')
  804. {
  805. len++;
  806. index++;
  807. if (index == get_bytes)
  808. {
  809. if (get_bytes == ARCH_PAGE_SIZE)
  810. {
  811. get_bytes = lwp_data_get(lwp, temp_buf, addr_start + len, ARCH_PAGE_SIZE);
  812. if (get_bytes == 0)
  813. {
  814. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
  815. lwp, temp_buf, addr_start);
  816. len = -1;
  817. break;
  818. }
  819. index = 0;
  820. }
  821. else
  822. {
  823. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
  824. lwp, temp_buf, addr_start);
  825. len = -1;
  826. break;
  827. }
  828. }
  829. }
  830. rt_pages_free(temp_buf, 0);
  831. return len;
  832. }
  833. size_t lwp_user_strlen(const char *s)
  834. {
  835. struct rt_lwp *lwp = RT_NULL;
  836. lwp = lwp_self();
  837. RT_ASSERT(lwp != RT_NULL);
  838. return lwp_user_strlen_ext(lwp, s);
  839. }
  840. char** lwp_get_command_line_args(struct rt_lwp *lwp)
  841. {
  842. size_t argc = 0;
  843. char** argv = NULL;
  844. int ret;
  845. size_t i;
  846. size_t len;
  847. if (lwp)
  848. {
  849. ret = lwp_data_get(lwp, &argc, lwp->args, sizeof(argc));
  850. if (ret == 0)
  851. {
  852. return RT_NULL;
  853. }
  854. argv = (char**)rt_malloc((argc + 1) * sizeof(char*));
  855. if (argv)
  856. {
  857. for (i = 0; i < argc; i++)
  858. {
  859. char *argvp = NULL;
  860. ret = lwp_data_get(lwp, &argvp, &((char **)lwp->args)[1 + i], sizeof(argvp));
  861. if (ret == 0)
  862. {
  863. lwp_free_command_line_args(argv);
  864. return RT_NULL;
  865. }
  866. len = lwp_user_strlen_ext(lwp, argvp);
  867. if (len > 0)
  868. {
  869. argv[i] = (char*)rt_malloc(len + 1);
  870. ret = lwp_data_get(lwp, argv[i], argvp, len);
  871. if (ret == 0)
  872. {
  873. lwp_free_command_line_args(argv);
  874. return RT_NULL;
  875. }
  876. argv[i][len] = '\0';
  877. }
  878. else
  879. {
  880. argv[i] = NULL;
  881. }
  882. }
  883. argv[argc] = NULL;
  884. }
  885. }
  886. return argv;
  887. }
  888. void lwp_free_command_line_args(char** argv)
  889. {
  890. size_t i;
  891. if (argv)
  892. {
  893. for (i = 0; argv[i]; i++)
  894. {
  895. rt_free(argv[i]);
  896. }
  897. rt_free(argv);
  898. }
  899. }
  900. #endif