lwp_user_mm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. */
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #ifdef RT_USING_USERSPACE
  17. #include <mmu.h>
  18. #include <page.h>
  19. #include <lwp_mm_area.h>
  20. #include <lwp_user_mm.h>
  21. #include <lwp_arch.h>
  22. int lwp_user_space_init(struct rt_lwp *lwp)
  23. {
  24. return arch_user_space_init(lwp);
  25. }
  26. void switch_mmu(void *mtable);
  27. void *mmu_table_get(void);
  28. void lwp_mmu_switch(struct rt_thread *thread)
  29. {
  30. struct rt_lwp *l = RT_NULL;
  31. void *pre_mmu_table = RT_NULL, *new_mmu_table = RT_NULL;
  32. if (thread->lwp)
  33. {
  34. l = (struct rt_lwp *)thread->lwp;
  35. new_mmu_table = (void *)((char *)l->mmu_info.vtable + l->mmu_info.pv_off);
  36. }
  37. else
  38. {
  39. new_mmu_table = arch_kernel_mmu_table_get();
  40. }
  41. pre_mmu_table = mmu_table_get();
  42. if (pre_mmu_table != new_mmu_table)
  43. {
  44. switch_mmu(new_mmu_table);
  45. }
  46. }
  47. static void unmap_range(struct rt_lwp *lwp, void *addr, size_t size, int pa_need_free)
  48. {
  49. void *va = RT_NULL, *pa = RT_NULL;
  50. int i = 0;
  51. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  52. {
  53. pa = rt_hw_mmu_v2p(&lwp->mmu_info, va);
  54. if (pa)
  55. {
  56. rt_hw_mmu_unmap(&lwp->mmu_info, va, ARCH_PAGE_SIZE);
  57. if (pa_need_free)
  58. {
  59. rt_pages_free((void *)((char *)pa - PV_OFFSET), 0);
  60. }
  61. }
  62. }
  63. }
  64. void lwp_unmap_user_space(struct rt_lwp *lwp)
  65. {
  66. struct lwp_avl_struct *node = RT_NULL;
  67. while ((node = lwp_map_find_first(lwp->map_area)) != 0)
  68. {
  69. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)node->data;
  70. int pa_need_free = 0;
  71. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  72. switch (ma->type)
  73. {
  74. case MM_AREA_TYPE_DATA:
  75. case MM_AREA_TYPE_TEXT:
  76. pa_need_free = 1;
  77. break;
  78. case MM_AREA_TYPE_SHM:
  79. lwp_shm_ref_dec(lwp, (void *)ma->addr);
  80. break;
  81. }
  82. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  83. lwp_map_area_remove(&lwp->map_area, ma->addr);
  84. }
  85. arch_user_space_vtable_free(lwp);
  86. }
  87. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  88. {
  89. void *va = RT_NULL;
  90. int ret = 0;
  91. rt_mmu_info *m_info = &lwp->mmu_info;
  92. int area_type;
  93. va = rt_hw_mmu_map_auto(m_info, map_va, map_size, MMU_MAP_U_RWCB);
  94. if (!va)
  95. {
  96. return 0;
  97. }
  98. area_type = text ? MM_AREA_TYPE_TEXT : MM_AREA_TYPE_DATA;
  99. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, area_type);
  100. if (ret != 0)
  101. {
  102. unmap_range(lwp, va, map_size, 1);
  103. return 0;
  104. }
  105. return va;
  106. }
  107. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  108. {
  109. struct lwp_avl_struct *ma_avl_node = RT_NULL;
  110. struct rt_mm_area_struct *ma = RT_NULL;
  111. int pa_need_free = 0;
  112. rt_mm_lock();
  113. va = (void *)((size_t)va & ~ARCH_PAGE_MASK);
  114. ma_avl_node = lwp_map_find(lwp->map_area, (size_t)va);
  115. if (!ma_avl_node)
  116. {
  117. rt_mm_unlock();
  118. return -1;
  119. }
  120. ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  121. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  122. if ((ma->type == MM_AREA_TYPE_DATA) || (ma->type == MM_AREA_TYPE_TEXT))
  123. {
  124. pa_need_free = 1;
  125. }
  126. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  127. lwp_map_area_remove(&lwp->map_area, (size_t)va);
  128. rt_mm_unlock();
  129. return 0;
  130. }
  131. int lwp_dup_user(struct lwp_avl_struct *ptree, void *arg)
  132. {
  133. struct rt_lwp *self_lwp = lwp_self();
  134. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  135. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ptree->data;
  136. void *pa = RT_NULL;
  137. void *va = RT_NULL;
  138. switch (ma->type)
  139. {
  140. case MM_AREA_TYPE_PHY:
  141. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  142. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY);
  143. break;
  144. case MM_AREA_TYPE_PHY_CACHED:
  145. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  146. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY_CACHED);
  147. break;
  148. case MM_AREA_TYPE_SHM:
  149. va = (void *)ma->addr;
  150. if (lwp_shm_ref_inc(self_lwp, va) > 0)
  151. {
  152. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, va);
  153. va = lwp_map_user_type(new_lwp, va, pa, ma->size, 1, MM_AREA_TYPE_SHM);
  154. }
  155. break;
  156. case MM_AREA_TYPE_DATA:
  157. va = lwp_map_user(new_lwp, (void *)ma->addr, ma->size, 0);
  158. if (va == (void *)ma->addr)
  159. {
  160. lwp_data_put(&new_lwp->mmu_info, va, va, ma->size);
  161. }
  162. break;
  163. case MM_AREA_TYPE_TEXT:
  164. {
  165. char *addr = (char *)ma->addr;
  166. size_t size = ma->size;
  167. while (size)
  168. {
  169. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)addr);
  170. rt_page_ref_inc((char *)pa - self_lwp->mmu_info.pv_off, 0);
  171. va = lwp_map_user_type(new_lwp, addr, pa, ARCH_PAGE_SIZE, 1, MM_AREA_TYPE_TEXT);
  172. if (va != addr)
  173. {
  174. return -1;
  175. }
  176. addr += ARCH_PAGE_SIZE;
  177. size -= ARCH_PAGE_SIZE;
  178. }
  179. va = (void *)ma->addr;
  180. }
  181. break;
  182. default:
  183. RT_ASSERT(0);
  184. break;
  185. }
  186. if (va != (void *)ma->addr)
  187. {
  188. return -1;
  189. }
  190. return 0;
  191. }
  192. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  193. {
  194. return lwp_unmap_user(lwp, va);
  195. }
  196. int lwp_unmap_user_type(struct rt_lwp *lwp, void *va)
  197. {
  198. return lwp_unmap_user(lwp, va);
  199. }
  200. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  201. {
  202. void *ret = RT_NULL;
  203. size_t offset = 0;
  204. if (!map_size)
  205. {
  206. return 0;
  207. }
  208. offset = (size_t)map_va & ARCH_PAGE_MASK;
  209. map_size += (offset + ARCH_PAGE_SIZE - 1);
  210. map_size &= ~ARCH_PAGE_MASK;
  211. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  212. rt_mm_lock();
  213. ret = _lwp_map_user(lwp, map_va, map_size, text);
  214. rt_mm_unlock();
  215. if (ret)
  216. {
  217. ret = (void *)((char *)ret + offset);
  218. }
  219. return ret;
  220. }
  221. static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  222. {
  223. void *va = RT_NULL;
  224. rt_mmu_info *m_info = &lwp->mmu_info;
  225. size_t attr = 0;
  226. int ret = 0;
  227. if (cached)
  228. {
  229. attr = MMU_MAP_U_RWCB;
  230. if (type == MM_AREA_TYPE_PHY)
  231. {
  232. type = MM_AREA_TYPE_PHY_CACHED;
  233. }
  234. }
  235. else
  236. {
  237. attr = MMU_MAP_U_RW;
  238. }
  239. va = rt_hw_mmu_map(m_info, map_va, map_pa, map_size, attr);
  240. if (va)
  241. {
  242. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, type);
  243. if (ret != 0)
  244. {
  245. unmap_range(lwp, va, map_size, 0);
  246. return 0;
  247. }
  248. }
  249. return va;
  250. }
  251. void *lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  252. {
  253. void *ret = RT_NULL;
  254. size_t offset = 0;
  255. if (!map_size)
  256. {
  257. return 0;
  258. }
  259. if (map_va)
  260. {
  261. if (((size_t)map_va & ARCH_PAGE_MASK) != ((size_t)map_pa & ARCH_PAGE_MASK))
  262. {
  263. return 0;
  264. }
  265. }
  266. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  267. map_size += (offset + ARCH_PAGE_SIZE - 1);
  268. map_size &= ~ARCH_PAGE_MASK;
  269. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  270. rt_mm_lock();
  271. ret = _lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, type);
  272. rt_mm_unlock();
  273. if (ret)
  274. {
  275. ret = (void *)((char *)ret + offset);
  276. }
  277. return ret;
  278. }
  279. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached)
  280. {
  281. return lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, MM_AREA_TYPE_PHY);
  282. }
  283. rt_base_t lwp_brk(void *addr)
  284. {
  285. rt_base_t ret = -1;
  286. struct rt_lwp *lwp = RT_NULL;
  287. rt_mm_lock();
  288. lwp = rt_thread_self()->lwp;
  289. if ((size_t)addr <= lwp->end_heap)
  290. {
  291. ret = (rt_base_t)lwp->end_heap;
  292. }
  293. else
  294. {
  295. size_t size = 0;
  296. void *va = RT_NULL;
  297. if ((size_t)addr <= USER_HEAP_VEND)
  298. {
  299. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK;
  300. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  301. }
  302. if (va)
  303. {
  304. lwp->end_heap += size;
  305. ret = lwp->end_heap;
  306. }
  307. }
  308. rt_mm_unlock();
  309. return ret;
  310. }
  311. #define MAP_ANONYMOUS 0x20
  312. void* lwp_mmap2(void *addr, size_t length, int prot,
  313. int flags, int fd, off_t pgoffset)
  314. {
  315. void *ret = (void *)-1;
  316. if (fd == -1)
  317. {
  318. rt_mm_lock();
  319. ret = lwp_map_user(lwp_self(), addr, length, 0);
  320. rt_mm_unlock();
  321. if (ret)
  322. {
  323. if ((flags & MAP_ANONYMOUS) != 0)
  324. {
  325. rt_memset(ret, 0, length);
  326. }
  327. }
  328. else
  329. {
  330. ret = (void *)-1;
  331. }
  332. }
  333. else
  334. {
  335. struct dfs_fd *d;
  336. d = fd_get(fd);
  337. if (d && d->fnode->type == FT_DEVICE)
  338. {
  339. struct dfs_mmap2_args mmap2;
  340. mmap2.addr = addr;
  341. mmap2.length = length;
  342. mmap2.prot = prot;
  343. mmap2.flags = flags;
  344. mmap2.pgoffset = pgoffset;
  345. mmap2.ret = (void*) -1;
  346. if (dfs_file_mmap2(d, &mmap2) == 0)
  347. {
  348. ret = mmap2.ret;
  349. }
  350. }
  351. }
  352. return ret;
  353. }
  354. int lwp_munmap(void *addr)
  355. {
  356. int ret = 0;
  357. rt_mm_lock();
  358. ret = lwp_unmap_user(lwp_self(), addr);
  359. rt_mm_unlock();
  360. return ret;
  361. }
  362. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  363. {
  364. struct rt_lwp *lwp = RT_NULL;
  365. rt_mmu_info *m_info = RT_NULL;
  366. /* check src */
  367. if (src < (void *)USER_VADDR_START)
  368. {
  369. return 0;
  370. }
  371. if (src >= (void *)USER_VADDR_TOP)
  372. {
  373. return 0;
  374. }
  375. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  376. {
  377. return 0;
  378. }
  379. lwp = lwp_self();
  380. if (!lwp)
  381. {
  382. return 0;
  383. }
  384. m_info = &lwp->mmu_info;
  385. return lwp_data_get(m_info, dst, src, size);
  386. }
  387. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  388. {
  389. struct rt_lwp *lwp = RT_NULL;
  390. rt_mmu_info *m_info = RT_NULL;
  391. /* check dst */
  392. if (dst < (void *)USER_VADDR_START)
  393. {
  394. return 0;
  395. }
  396. if (dst >= (void *)USER_VADDR_TOP)
  397. {
  398. return 0;
  399. }
  400. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  401. {
  402. return 0;
  403. }
  404. lwp = lwp_self();
  405. if (!lwp)
  406. {
  407. return 0;
  408. }
  409. m_info = &lwp->mmu_info;
  410. return lwp_data_put(m_info, dst, src, size);
  411. }
  412. int lwp_user_accessable(void *addr, size_t size)
  413. {
  414. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  415. void *tmp_addr = RT_NULL;
  416. struct rt_lwp *lwp = lwp_self();
  417. rt_mmu_info *mmu_info = RT_NULL;
  418. if (!lwp)
  419. {
  420. return 0;
  421. }
  422. if (!size || !addr)
  423. {
  424. return 0;
  425. }
  426. addr_start = addr;
  427. addr_end = (void *)((char *)addr + size);
  428. #ifdef ARCH_RISCV64
  429. if(addr_start < (void *)USER_VADDR_START)
  430. {
  431. return 0;
  432. }
  433. #else
  434. if (addr_start >= (void *)USER_VADDR_TOP)
  435. {
  436. return 0;
  437. }
  438. if (addr_end > (void *)USER_VADDR_TOP)
  439. {
  440. return 0;
  441. }
  442. #endif
  443. mmu_info = &lwp->mmu_info;
  444. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  445. do
  446. {
  447. size_t len = (char *)next_page - (char *)addr_start;
  448. if (size < len)
  449. {
  450. len = size;
  451. }
  452. tmp_addr = rt_hw_mmu_v2p(mmu_info, addr_start);
  453. if (!tmp_addr)
  454. {
  455. return 0;
  456. }
  457. addr_start = (void *)((char *)addr_start + len);
  458. size -= len;
  459. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  460. } while (addr_start < addr_end);
  461. return 1;
  462. }
  463. /* src is in mmu_info space, dst is in current thread space */
  464. size_t lwp_data_get(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  465. {
  466. size_t copy_len = 0;
  467. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  468. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  469. if (!size || !dst)
  470. {
  471. return 0;
  472. }
  473. tmp_dst = dst;
  474. addr_start = src;
  475. addr_end = (void *)((char *)src + size);
  476. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  477. do
  478. {
  479. size_t len = (char *)next_page - (char *)addr_start;
  480. if (size < len)
  481. {
  482. len = size;
  483. }
  484. tmp_src = rt_hw_mmu_v2p(mmu_info, addr_start);
  485. if (!tmp_src)
  486. {
  487. break;
  488. }
  489. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  490. rt_memcpy(tmp_dst, tmp_src, len);
  491. tmp_dst = (void *)((char *)tmp_dst + len);
  492. addr_start = (void *)((char *)addr_start + len);
  493. size -= len;
  494. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  495. copy_len += len;
  496. } while (addr_start < addr_end);
  497. return copy_len;
  498. }
  499. /* dst is in mmu_info space, src is in current thread space */
  500. size_t lwp_data_put(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  501. {
  502. size_t copy_len = 0;
  503. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  504. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  505. if (!size || !dst)
  506. {
  507. return 0;
  508. }
  509. tmp_src = src;
  510. addr_start = dst;
  511. addr_end = (void *)((char *)dst + size);
  512. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  513. do
  514. {
  515. size_t len = (char *)next_page - (char *)addr_start;
  516. if (size < len)
  517. {
  518. len = size;
  519. }
  520. tmp_dst = rt_hw_mmu_v2p(mmu_info, addr_start);
  521. if (!tmp_dst)
  522. {
  523. break;
  524. }
  525. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  526. rt_memcpy(tmp_dst, tmp_src, len);
  527. tmp_src = (void *)((char *)tmp_src + len);
  528. addr_start = (void *)((char *)addr_start + len);
  529. size -= len;
  530. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  531. copy_len += len;
  532. } while (addr_start < addr_end);
  533. return copy_len;
  534. }
  535. void lwp_data_cache_flush(rt_mmu_info *mmu_info, void *vaddr, size_t size)
  536. {
  537. void *paddr = RT_NULL;
  538. paddr = rt_hw_mmu_v2p(mmu_info, vaddr);
  539. paddr = (void *)((char *)paddr - PV_OFFSET);
  540. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, paddr, size);
  541. }
  542. #endif