lwp_user_mm.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. * 2022-12-25 wangxiaoyao adapt to new mm
  14. * 2023-08-12 Shell Fix parameter passing of lwp_mmap()/lwp_munmap()
  15. * 2023-08-29 Shell Add API accessible()/data_get()/data_set()/data_put()
  16. * 2023-09-13 Shell Add lwp_memcpy and support run-time choice of memcpy base on memory attr
  17. * 2023-09-19 Shell add lwp_user_memory_remap_to_kernel
  18. */
  19. #include <rtthread.h>
  20. #include <rthw.h>
  21. #include <string.h>
  22. #ifdef ARCH_MM_MMU
  23. #include "lwp_internal.h"
  24. #include <mm_aspace.h>
  25. #include <mm_fault.h>
  26. #include <mm_flag.h>
  27. #include <mm_page.h>
  28. #include <mmu.h>
  29. #include <page.h>
  30. #ifdef RT_USING_MUSLLIBC
  31. #include "libc_musl.h"
  32. #endif
  33. #define DBG_TAG "LwP.mman"
  34. #define DBG_LVL DBG_INFO
  35. #include <rtdbg.h>
  36. #include <stdlib.h>
  37. #define STACK_OBJ _null_object
  38. static const char *_null_get_name(rt_varea_t varea)
  39. {
  40. return "null";
  41. }
  42. static void _null_page_fault(struct rt_varea *varea,
  43. struct rt_aspace_fault_msg *msg)
  44. {
  45. static void *null_page;
  46. if (!null_page)
  47. {
  48. null_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  49. if (null_page)
  50. memset(null_page, 0, ARCH_PAGE_SIZE);
  51. else
  52. return;
  53. }
  54. msg->response.status = MM_FAULT_STATUS_OK;
  55. msg->response.size = ARCH_PAGE_SIZE;
  56. msg->response.vaddr = null_page;
  57. }
  58. static rt_err_t _null_shrink(rt_varea_t varea, void *new_start, rt_size_t size)
  59. {
  60. char *varea_start = varea->start;
  61. void *rm_start;
  62. void *rm_end;
  63. if (varea_start == (char *)new_start)
  64. {
  65. rm_start = varea_start + size;
  66. rm_end = varea_start + varea->size;
  67. }
  68. else /* if (varea_start < (char *)new_start) */
  69. {
  70. RT_ASSERT(varea_start < (char *)new_start);
  71. rm_start = varea_start;
  72. rm_end = new_start;
  73. }
  74. rt_varea_unmap_range(varea, rm_start, rm_end - rm_start);
  75. return RT_EOK;
  76. }
  77. static rt_err_t _null_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
  78. {
  79. rt_varea_unmap_range(existed, unmap_start, unmap_len);
  80. return RT_EOK;
  81. }
  82. static rt_err_t _null_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
  83. {
  84. return RT_EOK;
  85. }
  86. static void _null_page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  87. {
  88. void *dest = msg->buffer_vaddr;
  89. memset(dest, 0, ARCH_PAGE_SIZE);
  90. msg->response.status = MM_FAULT_STATUS_OK;
  91. return ;
  92. }
  93. static void _null_page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
  94. {
  95. /* write operation is not allowed */
  96. msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
  97. return ;
  98. }
  99. static struct rt_mem_obj _null_object = {
  100. .get_name = _null_get_name,
  101. .hint_free = RT_NULL,
  102. .on_page_fault = _null_page_fault,
  103. .page_read = _null_page_read,
  104. .page_write = _null_page_write,
  105. .on_varea_expand = _null_expand,
  106. .on_varea_shrink = _null_shrink,
  107. .on_varea_split = _null_split,
  108. };
  109. int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork)
  110. {
  111. void *stk_addr;
  112. int err = -RT_ENOMEM;
  113. const size_t flags = MMF_MAP_PRIVATE;
  114. err = arch_user_space_init(lwp);
  115. if (err == RT_EOK)
  116. {
  117. if (!is_fork)
  118. {
  119. stk_addr = (void *)USER_STACK_VSTART;
  120. err = rt_aspace_map(lwp->aspace, &stk_addr,
  121. USER_STACK_VEND - USER_STACK_VSTART,
  122. MMU_MAP_U_RWCB, flags, &STACK_OBJ, 0);
  123. }
  124. }
  125. return err;
  126. }
  127. void lwp_aspace_switch(struct rt_thread *thread)
  128. {
  129. struct rt_lwp *lwp = RT_NULL;
  130. rt_aspace_t to_aspace;
  131. void *from_tbl, *to_table;
  132. if (thread->lwp)
  133. {
  134. lwp = (struct rt_lwp *)thread->lwp;
  135. to_aspace = lwp->aspace;
  136. to_table = to_aspace->page_table;
  137. }
  138. else
  139. {
  140. to_aspace = &rt_kernel_space;
  141. /* the page table is arch dependent but not aspace->page_table */
  142. to_table = arch_kernel_mmu_table_get();
  143. }
  144. /* must fetch the effected page table to avoid hot update */
  145. from_tbl = rt_hw_mmu_tbl_get();
  146. if (to_table != from_tbl)
  147. {
  148. rt_hw_aspace_switch(to_aspace);
  149. }
  150. }
  151. void lwp_unmap_user_space(struct rt_lwp *lwp)
  152. {
  153. if (lwp->aspace)
  154. arch_user_space_free(lwp);
  155. }
  156. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
  157. int text)
  158. {
  159. void *va = map_va;
  160. int ret = 0;
  161. rt_size_t flags = MMF_PREFETCH;
  162. if (text)
  163. flags |= MMF_TEXT;
  164. if (va != RT_NULL)
  165. flags |= MMF_MAP_FIXED;
  166. ret = rt_aspace_map_private(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags);
  167. if (ret != RT_EOK)
  168. {
  169. va = RT_NULL;
  170. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  171. map_size, ret);
  172. }
  173. return va;
  174. }
  175. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  176. {
  177. int err = rt_aspace_unmap(lwp->aspace, va);
  178. return err;
  179. }
  180. /** fork the src_lwp->aspace in current */
  181. int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
  182. {
  183. int err;
  184. err = rt_aspace_fork(&src_lwp->aspace, &dest_lwp->aspace);
  185. if (!err)
  186. {
  187. /* do a explicit aspace switch if the page table is changed */
  188. lwp_aspace_switch(rt_thread_self());
  189. }
  190. return err;
  191. }
  192. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  193. {
  194. return lwp_unmap_user(lwp, va);
  195. }
  196. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  197. {
  198. void *ret = RT_NULL;
  199. size_t offset = 0;
  200. if (!map_size)
  201. {
  202. return 0;
  203. }
  204. offset = (size_t)map_va & ARCH_PAGE_MASK;
  205. map_size += (offset + ARCH_PAGE_SIZE - 1);
  206. map_size &= ~ARCH_PAGE_MASK;
  207. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  208. ret = _lwp_map_user(lwp, map_va, map_size, text);
  209. if (ret)
  210. {
  211. ret = (void *)((char *)ret + offset);
  212. }
  213. return ret;
  214. }
  215. static inline size_t _flags_to_attr(size_t flags)
  216. {
  217. size_t attr;
  218. if (flags & LWP_MAP_FLAG_NOCACHE)
  219. {
  220. attr = MMU_MAP_U_RW;
  221. }
  222. else
  223. {
  224. attr = MMU_MAP_U_RWCB;
  225. }
  226. return attr;
  227. }
  228. static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
  229. {
  230. mm_flag_t mm_flag = 0;
  231. if (flags & LWP_MAP_FLAG_MAP_FIXED)
  232. mm_flag |= MMF_MAP_FIXED;
  233. if (flags & LWP_MAP_FLAG_PREFETCH)
  234. mm_flag |= MMF_PREFETCH;
  235. return mm_flag;
  236. }
  237. static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  238. {
  239. void *va = map_va;
  240. int ret = 0;
  241. rt_varea_t varea = RT_NULL;
  242. mm_flag_t mm_flags;
  243. size_t attr;
  244. attr = _flags_to_attr(flags);
  245. mm_flags = _flags_to_aspace_flag(flags);
  246. ret = rt_aspace_map_private(lwp->aspace, &va, map_size,
  247. attr, mm_flags);
  248. if (ret == RT_EOK)
  249. {
  250. varea = rt_aspace_query(lwp->aspace, va);
  251. }
  252. if (ret != RT_EOK)
  253. {
  254. LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
  255. map_size, ret);
  256. }
  257. return varea;
  258. }
  259. static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  260. {
  261. size_t offset = 0;
  262. if (!map_size)
  263. {
  264. return 0;
  265. }
  266. offset = (size_t)map_va & ARCH_PAGE_MASK;
  267. map_size += (offset + ARCH_PAGE_SIZE - 1);
  268. map_size &= ~ARCH_PAGE_MASK;
  269. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  270. return _lwp_map_user_varea(lwp, map_va, map_size, flags);
  271. }
  272. rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
  273. {
  274. return _map_user_varea_ext(lwp, map_va, map_size, flags);
  275. }
  276. rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
  277. {
  278. return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
  279. }
  280. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
  281. size_t map_size, int cached)
  282. {
  283. int err;
  284. char *va;
  285. size_t offset = 0;
  286. if (!map_size)
  287. {
  288. return 0;
  289. }
  290. if (map_va)
  291. {
  292. if (((size_t)map_va & ARCH_PAGE_MASK) !=
  293. ((size_t)map_pa & ARCH_PAGE_MASK))
  294. {
  295. return 0;
  296. }
  297. }
  298. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  299. map_size += (offset + ARCH_PAGE_SIZE - 1);
  300. map_size &= ~ARCH_PAGE_MASK;
  301. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  302. struct rt_mm_va_hint hint = {.flags = 0,
  303. .limit_range_size = lwp->aspace->size,
  304. .limit_start = lwp->aspace->start,
  305. .prefer = map_va,
  306. .map_size = map_size};
  307. if (map_va != RT_NULL)
  308. hint.flags |= MMF_MAP_FIXED;
  309. rt_size_t attr = cached ? MMU_MAP_U_RWCB : MMU_MAP_U_RW;
  310. err =
  311. rt_aspace_map_phy(lwp->aspace, &hint, attr, MM_PA_TO_OFF(map_pa), (void **)&va);
  312. if (err != RT_EOK)
  313. {
  314. va = RT_NULL;
  315. LOG_W("%s", __func__);
  316. }
  317. else
  318. {
  319. va += offset;
  320. }
  321. return va;
  322. }
  323. rt_base_t lwp_brk(void *addr)
  324. {
  325. rt_base_t ret = -1;
  326. rt_varea_t varea = RT_NULL;
  327. struct rt_lwp *lwp = RT_NULL;
  328. size_t size = 0;
  329. lwp = lwp_self();
  330. if ((size_t)addr == RT_NULL)
  331. {
  332. addr = (char *)lwp->end_heap + 1;
  333. }
  334. if ((size_t)addr <= lwp->end_heap && (size_t)addr > USER_HEAP_VADDR)
  335. {
  336. ret = (size_t)addr;
  337. }
  338. else if ((size_t)addr <= USER_HEAP_VEND)
  339. {
  340. size = RT_ALIGN((size_t)addr - lwp->end_heap, ARCH_PAGE_SIZE);
  341. varea = lwp_map_user_varea_ext(lwp, (void *)lwp->end_heap, size, LWP_MAP_FLAG_PREFETCH);
  342. if (varea)
  343. {
  344. lwp->end_heap = (long)(varea->start + varea->size);
  345. ret = lwp->end_heap;
  346. }
  347. }
  348. return ret;
  349. }
  350. rt_inline rt_mem_obj_t _get_mmap_obj(struct rt_lwp *lwp)
  351. {
  352. return &_null_object;
  353. }
  354. rt_inline rt_bool_t _memory_threshold_ok(void)
  355. {
  356. #define GUARDIAN_BITS (10)
  357. size_t total, free;
  358. rt_page_get_info(&total, &free);
  359. if (free * (0x1000) < 0x100000)
  360. {
  361. LOG_I("%s: low of system memory", __func__);
  362. return RT_FALSE;
  363. }
  364. return RT_TRUE;
  365. }
  366. rt_inline long _uflag_to_kernel(long flag)
  367. {
  368. flag &= ~MMF_MAP_FIXED;
  369. flag &= ~MMF_MAP_PRIVATE;
  370. flag &= ~MMF_MAP_PRIVATE_DONT_SYNC;
  371. return flag;
  372. }
  373. rt_inline long _uattr_to_kernel(long attr)
  374. {
  375. /* Warning: be careful with the case if user attribution is unwritable */
  376. return attr;
  377. }
  378. static void _prefetch_mmap(rt_aspace_t aspace, void *addr, long size)
  379. {
  380. struct rt_aspace_fault_msg msg;
  381. msg.fault_op = MM_FAULT_OP_WRITE;
  382. msg.fault_type = MM_FAULT_TYPE_PAGE_FAULT;
  383. for (char *base = addr; size > 0; base += ARCH_PAGE_SIZE, size -= ARCH_PAGE_SIZE)
  384. {
  385. msg.fault_vaddr = base;
  386. msg.off = (long)base >> MM_PAGE_SHIFT;
  387. rt_aspace_fault_try_fix(aspace, &msg);
  388. }
  389. return ;
  390. }
  391. void *lwp_user_memory_remap_to_kernel(rt_lwp_t lwp, void *uaddr, size_t length)
  392. {
  393. long kattr;
  394. long kflag;
  395. long offset_in_mobj;
  396. long offset_in_page;
  397. rt_err_t error;
  398. rt_varea_t uarea;
  399. rt_mem_obj_t mobj;
  400. void *kaddr = 0;
  401. uarea = rt_aspace_query(lwp->aspace, uaddr);
  402. if (uarea)
  403. {
  404. /* setup the identical mapping, and align up for address & length */
  405. kattr = _uattr_to_kernel(uarea->attr);
  406. kflag = _uflag_to_kernel(uarea->flag);
  407. offset_in_mobj = uarea->offset + ((long)uaddr - (long)uarea->start) / ARCH_PAGE_SIZE;
  408. mobj = uarea->mem_obj;
  409. offset_in_page = (long)uaddr & ARCH_PAGE_MASK;
  410. length = RT_ALIGN(length + offset_in_page, ARCH_PAGE_SIZE);
  411. error = rt_aspace_map(&rt_kernel_space, &kaddr, length, kattr, kflag, mobj, offset_in_mobj);
  412. if (error)
  413. {
  414. LOG_I("%s(length=0x%lx,attr=0x%lx,flags=0x%lx): do map failed", __func__, length, kattr, kflag);
  415. kaddr = 0;
  416. }
  417. else
  418. {
  419. /* TODO: {make a memory lock?} */
  420. LOG_D("%s(length=0x%lx,attr=0x%lx,flags=0x%lx,offset=0x%lx) => %p", __func__, length, kattr, kflag, offset_in_mobj, kaddr);
  421. _prefetch_mmap(&rt_kernel_space, kaddr, length);
  422. kaddr += offset_in_page;
  423. }
  424. }
  425. return kaddr;
  426. }
  427. #include <dfs_dentry.h>
  428. #define _AFFBLK_PGOFFSET (RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT)
  429. static rt_base_t _aligned_for_weak_mapping(off_t *ppgoff, rt_size_t *plen, rt_size_t *palign)
  430. {
  431. off_t aligned_pgoffset, pgoffset = *ppgoff;
  432. rt_size_t length = *plen;
  433. rt_size_t min_align_size = *palign;
  434. rt_base_t aligned_size = 0;
  435. if (pgoffset >= 0)
  436. {
  437. /* force an alignment */
  438. aligned_pgoffset =
  439. RT_ALIGN_DOWN(pgoffset, RT_PAGE_AFFINITY_BLOCK_SIZE >> MM_PAGE_SHIFT);
  440. aligned_size = (pgoffset - aligned_pgoffset) << MM_PAGE_SHIFT;
  441. if (aligned_pgoffset != pgoffset)
  442. {
  443. /**
  444. * If requested pgoffset is not sitting on an aligned page offset,
  445. * expand the request mapping to force an alignment.
  446. */
  447. length += aligned_size;
  448. pgoffset = aligned_pgoffset;
  449. }
  450. /**
  451. * As this is a weak mapping, we can pick any reasonable address for our
  452. * requirement.
  453. */
  454. min_align_size = RT_PAGE_AFFINITY_BLOCK_SIZE;
  455. }
  456. else
  457. {
  458. RT_ASSERT(0 && "Unexpected input");
  459. }
  460. *ppgoff = pgoffset;
  461. *plen = length;
  462. *palign = min_align_size;
  463. return aligned_size;
  464. }
  465. void *lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot,
  466. int flags, int fd, off_t pgoffset)
  467. {
  468. rt_err_t rc;
  469. rt_size_t k_attr, k_flags, k_offset, aligned_size = 0;
  470. rt_size_t min_align_size = 1 << MM_PAGE_SHIFT;
  471. rt_aspace_t uspace;
  472. rt_mem_obj_t mem_obj;
  473. void *ret = 0;
  474. LOG_D("%s(addr=0x%lx,length=0x%lx,fd=%d,pgoff=0x%lx)", __func__, addr, length, fd, pgoffset);
  475. /* alignment for affinity page block */
  476. if (flags & MAP_FIXED)
  477. {
  478. if (fd != -1)
  479. {
  480. /* requested mapping address */
  481. rt_base_t va_affid = RT_PAGE_PICK_AFFID(addr);
  482. rt_base_t pgoff_affid = RT_PAGE_PICK_AFFID(pgoffset << MM_PAGE_SHIFT);
  483. /* filter illegal align address */
  484. if (va_affid != pgoff_affid)
  485. {
  486. LOG_W("Unaligned mapping address %p(pgoff=0x%lx) from fd=%d",
  487. addr, pgoffset, fd);
  488. }
  489. }
  490. else
  491. {
  492. /* anonymous mapping can always aligned */
  493. }
  494. }
  495. else
  496. {
  497. /* weak address selection */
  498. aligned_size = _aligned_for_weak_mapping(&pgoffset, &length, &min_align_size);
  499. }
  500. if (fd == -1)
  501. {
  502. #ifdef RT_DEBUGGING_PAGE_THRESHOLD
  503. if (!_memory_threshold_ok())
  504. return (void *)-ENOMEM;
  505. #endif /* RT_DEBUGGING_PAGE_THRESHOLD */
  506. k_offset = MM_PA_TO_OFF(addr);
  507. k_flags = MMF_CREATE(lwp_user_mm_flag_to_kernel(flags) | MMF_MAP_PRIVATE,
  508. min_align_size);
  509. k_attr = lwp_user_mm_attr_to_kernel(prot);
  510. uspace = lwp->aspace;
  511. length = RT_ALIGN(length, ARCH_PAGE_SIZE);
  512. mem_obj = _get_mmap_obj(lwp);
  513. rc = rt_aspace_map(uspace, &addr, length, k_attr, k_flags, mem_obj, k_offset);
  514. if (rc == RT_EOK)
  515. {
  516. ret = addr;
  517. }
  518. else
  519. {
  520. ret = (void *)lwp_errno_to_posix(rc);
  521. }
  522. }
  523. else
  524. {
  525. struct dfs_file *d;
  526. d = fd_get(fd);
  527. if (d)
  528. {
  529. struct dfs_mmap2_args mmap2;
  530. mmap2.addr = addr;
  531. mmap2.length = length;
  532. mmap2.min_align_size = min_align_size;
  533. mmap2.prot = prot;
  534. mmap2.flags = flags;
  535. mmap2.pgoffset = pgoffset;
  536. mmap2.ret = (void *)-1;
  537. mmap2.lwp = lwp;
  538. rc = dfs_file_mmap2(d, &mmap2);
  539. if (rc == RT_EOK)
  540. {
  541. ret = mmap2.ret;
  542. }
  543. else
  544. {
  545. ret = (void *)lwp_errno_to_posix(rc);
  546. }
  547. }
  548. }
  549. if ((long)ret <= 0)
  550. {
  551. LOG_D("%s() => %ld", __func__, ret);
  552. }
  553. else
  554. {
  555. ret = (char *)ret + aligned_size;
  556. LOG_D("%s() => 0x%lx", __func__, ret);
  557. }
  558. return ret;
  559. }
  560. int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length)
  561. {
  562. int ret;
  563. RT_ASSERT(lwp);
  564. ret = rt_aspace_unmap_range(lwp->aspace, addr, length);
  565. return lwp_errno_to_posix(ret);
  566. }
  567. void *lwp_mremap(struct rt_lwp *lwp, void *old_address, size_t old_size,
  568. size_t new_size, int flags, void *new_address)
  569. {
  570. RT_ASSERT(lwp);
  571. return rt_aspace_mremap_range(lwp->aspace, old_address, old_size, new_size, flags, new_address);
  572. }
  573. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  574. {
  575. struct rt_lwp *lwp = RT_NULL;
  576. /* check src */
  577. if (src < (void *)USER_VADDR_START)
  578. {
  579. return 0;
  580. }
  581. if (src >= (void *)USER_VADDR_TOP)
  582. {
  583. return 0;
  584. }
  585. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  586. {
  587. return 0;
  588. }
  589. lwp = lwp_self();
  590. if (!lwp)
  591. {
  592. return 0;
  593. }
  594. return lwp_data_get(lwp, dst, src, size);
  595. }
  596. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  597. {
  598. struct rt_lwp *lwp = RT_NULL;
  599. /* check dst */
  600. if (dst < (void *)USER_VADDR_START)
  601. {
  602. return 0;
  603. }
  604. if (dst >= (void *)USER_VADDR_TOP)
  605. {
  606. return 0;
  607. }
  608. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  609. {
  610. return 0;
  611. }
  612. lwp = lwp_self();
  613. if (!lwp)
  614. {
  615. return 0;
  616. }
  617. return lwp_data_put(lwp, dst, src, size);
  618. }
  619. rt_inline rt_bool_t _can_unaligned_access(const char *addr)
  620. {
  621. return rt_kmem_v2p((char *)addr) - PV_OFFSET == addr;
  622. }
  623. void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size)
  624. {
  625. void *rc = dst;
  626. long len;
  627. if (lwp_in_user_space(dst))
  628. {
  629. if (!lwp_in_user_space(src))
  630. {
  631. len = lwp_put_to_user(dst, (void *)src, size);
  632. if (!len)
  633. {
  634. LOG_E("lwp_put_to_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
  635. }
  636. }
  637. else
  638. {
  639. /* not support yet */
  640. LOG_W("%s(dst=%p,src=%p,size=0x%lx): operation not support", dst, src, size, __func__);
  641. }
  642. }
  643. else
  644. {
  645. if (lwp_in_user_space(src))
  646. {
  647. len = lwp_get_from_user(dst, (void *)src, size);
  648. if (!len)
  649. {
  650. LOG_E("lwp_get_from_user(lwp=%p, dst=%p,src=%p,size=0x%lx) failed", lwp_self(), dst, src, size);
  651. }
  652. }
  653. else
  654. {
  655. if (_can_unaligned_access(dst) && _can_unaligned_access(src))
  656. {
  657. rc = memcpy(dst, src, size);
  658. }
  659. else
  660. {
  661. rt_memcpy(dst, src, size);
  662. }
  663. }
  664. }
  665. return rc;
  666. }
  667. int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size)
  668. {
  669. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  670. void *tmp_addr = RT_NULL;
  671. if (!lwp)
  672. {
  673. return RT_FALSE;
  674. }
  675. if (!size || !addr)
  676. {
  677. return RT_FALSE;
  678. }
  679. addr_start = addr;
  680. addr_end = (void *)((char *)addr + size);
  681. #ifdef ARCH_RISCV64
  682. if (addr_start < (void *)USER_VADDR_START)
  683. {
  684. return RT_FALSE;
  685. }
  686. #else
  687. if (addr_start >= (void *)USER_VADDR_TOP)
  688. {
  689. return RT_FALSE;
  690. }
  691. if (addr_end > (void *)USER_VADDR_TOP)
  692. {
  693. return RT_FALSE;
  694. }
  695. #endif
  696. next_page =
  697. (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  698. do
  699. {
  700. size_t len = (char *)next_page - (char *)addr_start;
  701. if (size < len)
  702. {
  703. len = size;
  704. }
  705. tmp_addr = lwp_v2p(lwp, addr_start);
  706. if (tmp_addr == ARCH_MAP_FAILED &&
  707. !rt_aspace_query(lwp->aspace, addr_start))
  708. {
  709. return RT_FALSE;
  710. }
  711. addr_start = (void *)((char *)addr_start + len);
  712. size -= len;
  713. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  714. } while (addr_start < addr_end);
  715. return RT_TRUE;
  716. }
  717. int lwp_user_accessable(void *addr, size_t size)
  718. {
  719. return lwp_user_accessible_ext(lwp_self(), addr, size);
  720. }
  721. #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
  722. /* src is in lwp address space, dst is in current thread space */
  723. size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  724. {
  725. size_t copy_len = 0;
  726. char *temp_page = 0;
  727. char *dst_iter, *dst_next_page;
  728. char *src_copy_end, *src_iter, *src_iter_aligned;
  729. if (!size || !dst)
  730. {
  731. return 0;
  732. }
  733. dst_iter = dst;
  734. src_iter = src;
  735. src_copy_end = src + size;
  736. dst_next_page =
  737. (char *)(((size_t)src_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  738. do
  739. {
  740. size_t bytes_to_copy = (char *)dst_next_page - (char *)src_iter;
  741. if (bytes_to_copy > size)
  742. {
  743. bytes_to_copy = size;
  744. }
  745. if (ALIGNED(src_iter) && bytes_to_copy == ARCH_PAGE_SIZE)
  746. {
  747. /* get page to kernel buffer */
  748. if (rt_aspace_page_get(lwp->aspace, src_iter, dst_iter))
  749. break;
  750. }
  751. else
  752. {
  753. if (!temp_page)
  754. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  755. if (!temp_page)
  756. break;
  757. src_iter_aligned = (char *)((long)src_iter & ~ARCH_PAGE_MASK);
  758. if (rt_aspace_page_get(lwp->aspace, src_iter_aligned, temp_page))
  759. break;
  760. memcpy(dst_iter, temp_page + (src_iter - src_iter_aligned), bytes_to_copy);
  761. }
  762. dst_iter = dst_iter + bytes_to_copy;
  763. src_iter = src_iter + bytes_to_copy;
  764. size -= bytes_to_copy;
  765. dst_next_page = (void *)((char *)dst_next_page + ARCH_PAGE_SIZE);
  766. copy_len += bytes_to_copy;
  767. } while (src_iter < src_copy_end);
  768. if (temp_page)
  769. rt_pages_free(temp_page, 0);
  770. return copy_len;
  771. }
  772. /* dst is in lwp address space, src is in current thread space */
  773. size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size)
  774. {
  775. size_t copy_len = 0;
  776. char *temp_page = 0;
  777. char *dst_iter, *dst_iter_aligned, *dst_next_page;
  778. char *src_put_end, *src_iter;
  779. if (!size || !dst)
  780. {
  781. return 0;
  782. }
  783. src_iter = src;
  784. dst_iter = dst;
  785. src_put_end = dst + size;
  786. dst_next_page =
  787. (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  788. do
  789. {
  790. size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
  791. if (bytes_to_put > size)
  792. {
  793. bytes_to_put = size;
  794. }
  795. if (ALIGNED(dst_iter) && bytes_to_put == ARCH_PAGE_SIZE)
  796. {
  797. /* write to page in kernel */
  798. if (rt_aspace_page_put(lwp->aspace, dst_iter, src_iter))
  799. break;
  800. }
  801. else
  802. {
  803. if (!temp_page)
  804. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  805. if (!temp_page)
  806. break;
  807. dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
  808. if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
  809. break;
  810. memcpy(temp_page + (dst_iter - dst_iter_aligned), src_iter, bytes_to_put);
  811. if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
  812. break;
  813. }
  814. src_iter = src_iter + bytes_to_put;
  815. dst_iter = dst_iter + bytes_to_put;
  816. size -= bytes_to_put;
  817. dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
  818. copy_len += bytes_to_put;
  819. } while (dst_iter < src_put_end);
  820. if (temp_page)
  821. rt_pages_free(temp_page, 0);
  822. return copy_len;
  823. }
  824. /* Set N bytes of S to C */
  825. size_t lwp_data_set(struct rt_lwp *lwp, void *dst, int byte, size_t size)
  826. {
  827. size_t copy_len = 0;
  828. char *temp_page = 0;
  829. char *dst_iter, *dst_iter_aligned, *dst_next_page;
  830. char *dst_put_end;
  831. if (!size || !dst)
  832. {
  833. return 0;
  834. }
  835. dst_iter = dst;
  836. dst_put_end = dst + size;
  837. dst_next_page =
  838. (char *)(((size_t)dst_iter + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  839. temp_page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  840. if (temp_page)
  841. {
  842. do
  843. {
  844. size_t bytes_to_put = (char *)dst_next_page - (char *)dst_iter;
  845. if (bytes_to_put > size)
  846. {
  847. bytes_to_put = size;
  848. }
  849. dst_iter_aligned = (void *)((long)dst_iter & ~ARCH_PAGE_MASK);
  850. if (!ALIGNED(dst_iter) || bytes_to_put != ARCH_PAGE_SIZE)
  851. if (rt_aspace_page_get(lwp->aspace, dst_iter_aligned, temp_page))
  852. break;
  853. memset(temp_page + (dst_iter - dst_iter_aligned), byte, bytes_to_put);
  854. if (rt_aspace_page_put(lwp->aspace, dst_iter_aligned, temp_page))
  855. break;
  856. dst_iter = dst_iter + bytes_to_put;
  857. size -= bytes_to_put;
  858. dst_next_page = dst_next_page + ARCH_PAGE_SIZE;
  859. copy_len += bytes_to_put;
  860. } while (dst_iter < dst_put_end);
  861. rt_pages_free(temp_page, 0);
  862. }
  863. return copy_len;
  864. }
  865. size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s)
  866. {
  867. int len = 0;
  868. char *temp_buf = RT_NULL;
  869. void *addr_start = RT_NULL;
  870. int get_bytes = 0;
  871. int index = 0;
  872. if (s == RT_NULL)
  873. return 0;
  874. if (lwp == RT_NULL)
  875. {
  876. LOG_W("%s: lwp is NULL", __func__);
  877. return -1;
  878. }
  879. addr_start = (void *)s;
  880. temp_buf = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  881. if (!temp_buf)
  882. {
  883. LOG_W("%s: No memory", __func__);
  884. return -1;
  885. }
  886. get_bytes = lwp_data_get(lwp, temp_buf, addr_start, ARCH_PAGE_SIZE);
  887. if (get_bytes == 0)
  888. {
  889. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000) failed", lwp, temp_buf, addr_start);
  890. rt_pages_free(temp_buf, 0);
  891. return -1;
  892. }
  893. while (temp_buf[index] != '\0')
  894. {
  895. len++;
  896. index++;
  897. if (index == get_bytes)
  898. {
  899. if (get_bytes == ARCH_PAGE_SIZE)
  900. {
  901. get_bytes = lwp_data_get(lwp, temp_buf, addr_start + len, ARCH_PAGE_SIZE);
  902. if (get_bytes == 0)
  903. {
  904. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
  905. lwp, temp_buf, addr_start);
  906. len = -1;
  907. break;
  908. }
  909. index = 0;
  910. }
  911. else
  912. {
  913. LOG_I("lwp_data_get(lwp=%p,dst=0x%lx,src=0x%lx,size=0x1000): user data unaccessible",
  914. lwp, temp_buf, addr_start);
  915. len = -1;
  916. break;
  917. }
  918. }
  919. }
  920. rt_pages_free(temp_buf, 0);
  921. return len;
  922. }
  923. size_t lwp_user_strlen(const char *s)
  924. {
  925. struct rt_lwp *lwp = RT_NULL;
  926. lwp = lwp_self();
  927. RT_ASSERT(lwp != RT_NULL);
  928. return lwp_user_strlen_ext(lwp, s);
  929. }
  930. size_t lwp_strlen(struct rt_lwp *lwp, const char *s)
  931. {
  932. if (lwp_in_user_space(s))
  933. return lwp_user_strlen_ext(lwp, s);
  934. else
  935. return strlen(s);
  936. }
  937. #endif