lwp_user_mm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. * 2021-06-07 lizhirui modify user space bound check
  13. */
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #ifdef RT_USING_USERSPACE
  17. #include <mmu.h>
  18. #include <page.h>
  19. #include <lwp_mm_area.h>
  20. #include <lwp_user_mm.h>
  21. #include <lwp_arch.h>
  22. int lwp_user_space_init(struct rt_lwp *lwp)
  23. {
  24. return arch_user_space_init(lwp);
  25. }
  26. void switch_mmu(void *mtable);
  27. void *mmu_table_get(void);
  28. void lwp_mmu_switch(struct rt_thread *thread)
  29. {
  30. struct rt_lwp *l = RT_NULL;
  31. void *pre_mmu_table = RT_NULL, *new_mmu_table = RT_NULL;
  32. if (thread->lwp)
  33. {
  34. l = (struct rt_lwp *)thread->lwp;
  35. new_mmu_table = (void *)((char *)l->mmu_info.vtable + l->mmu_info.pv_off);
  36. }
  37. else
  38. {
  39. new_mmu_table = arch_kernel_mmu_table_get();
  40. }
  41. pre_mmu_table = mmu_table_get();
  42. if (pre_mmu_table != new_mmu_table)
  43. {
  44. switch_mmu(new_mmu_table);
  45. }
  46. }
  47. static void unmap_range(struct rt_lwp *lwp, void *addr, size_t size, int pa_need_free)
  48. {
  49. void *va = RT_NULL, *pa = RT_NULL;
  50. int i = 0;
  51. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  52. {
  53. pa = rt_hw_mmu_v2p(&lwp->mmu_info, va);
  54. if (pa)
  55. {
  56. rt_hw_mmu_unmap(&lwp->mmu_info, va, ARCH_PAGE_SIZE);
  57. if (pa_need_free)
  58. {
  59. rt_pages_free((void *)((char *)pa - PV_OFFSET), 0);
  60. }
  61. }
  62. }
  63. }
  64. void lwp_unmap_user_space(struct rt_lwp *lwp)
  65. {
  66. struct lwp_avl_struct *node = RT_NULL;
  67. while ((node = lwp_map_find_first(lwp->map_area)) != 0)
  68. {
  69. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)node->data;
  70. int pa_need_free = 0;
  71. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  72. switch (ma->type)
  73. {
  74. case MM_AREA_TYPE_DATA:
  75. case MM_AREA_TYPE_TEXT:
  76. pa_need_free = 1;
  77. break;
  78. case MM_AREA_TYPE_SHM:
  79. lwp_shm_ref_dec(lwp, (void *)ma->addr);
  80. break;
  81. }
  82. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  83. lwp_map_area_remove(&lwp->map_area, ma->addr);
  84. }
  85. arch_user_space_vtable_free(lwp);
  86. }
  87. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  88. {
  89. void *va = RT_NULL;
  90. int ret = 0;
  91. rt_mmu_info *m_info = &lwp->mmu_info;
  92. int area_type;
  93. va = rt_hw_mmu_map_auto(m_info, map_va, map_size, MMU_MAP_U_RWCB);
  94. if (!va)
  95. {
  96. return 0;
  97. }
  98. area_type = text ? MM_AREA_TYPE_TEXT : MM_AREA_TYPE_DATA;
  99. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, area_type);
  100. if (ret != 0)
  101. {
  102. unmap_range(lwp, va, map_size, 1);
  103. return 0;
  104. }
  105. return va;
  106. }
  107. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  108. {
  109. struct lwp_avl_struct *ma_avl_node = RT_NULL;
  110. struct rt_mm_area_struct *ma = RT_NULL;
  111. int pa_need_free = 0;
  112. rt_mm_lock();
  113. ma_avl_node = lwp_map_find(lwp->map_area, (size_t)va);
  114. if (!ma_avl_node)
  115. {
  116. rt_mm_unlock();
  117. return -1;
  118. }
  119. ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  120. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  121. if ((ma->type == MM_AREA_TYPE_DATA) || (ma->type == MM_AREA_TYPE_TEXT))
  122. {
  123. pa_need_free = 1;
  124. }
  125. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  126. lwp_map_area_remove(&lwp->map_area, (size_t)va);
  127. rt_mm_unlock();
  128. return 0;
  129. }
  130. int lwp_dup_user(struct lwp_avl_struct *ptree, void *arg)
  131. {
  132. struct rt_lwp *self_lwp = lwp_self();
  133. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  134. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ptree->data;
  135. void *pa = RT_NULL;
  136. void *va = RT_NULL;
  137. switch (ma->type)
  138. {
  139. case MM_AREA_TYPE_PHY:
  140. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  141. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY);
  142. break;
  143. case MM_AREA_TYPE_PHY_CACHED:
  144. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  145. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY_CACHED);
  146. break;
  147. case MM_AREA_TYPE_SHM:
  148. va = (void *)ma->addr;
  149. if (lwp_shm_ref_inc(self_lwp, va) > 0)
  150. {
  151. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, va);
  152. va = lwp_map_user_type(new_lwp, va, pa, ma->size, 1, MM_AREA_TYPE_SHM);
  153. }
  154. break;
  155. case MM_AREA_TYPE_DATA:
  156. va = lwp_map_user(new_lwp, (void *)ma->addr, ma->size, 0);
  157. if (va == (void *)ma->addr)
  158. {
  159. lwp_data_put(&new_lwp->mmu_info, va, va, ma->size);
  160. }
  161. break;
  162. case MM_AREA_TYPE_TEXT:
  163. {
  164. char *addr = (char *)ma->addr;
  165. size_t size = ma->size;
  166. while (size)
  167. {
  168. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)addr);
  169. rt_page_ref_inc((char *)pa - self_lwp->mmu_info.pv_off, 0);
  170. va = lwp_map_user_type(new_lwp, addr, pa, ARCH_PAGE_SIZE, 1, MM_AREA_TYPE_TEXT);
  171. if (va != addr)
  172. {
  173. return -1;
  174. }
  175. addr += ARCH_PAGE_SIZE;
  176. size -= ARCH_PAGE_SIZE;
  177. }
  178. va = (void *)ma->addr;
  179. }
  180. break;
  181. default:
  182. RT_ASSERT(0);
  183. break;
  184. }
  185. if (va != (void *)ma->addr)
  186. {
  187. return -1;
  188. }
  189. return 0;
  190. }
  191. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  192. {
  193. return lwp_unmap_user(lwp, va);
  194. }
  195. int lwp_unmap_user_type(struct rt_lwp *lwp, void *va)
  196. {
  197. return lwp_unmap_user(lwp, va);
  198. }
  199. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  200. {
  201. void *ret = RT_NULL;
  202. size_t offset = 0;
  203. if (!map_size)
  204. {
  205. return 0;
  206. }
  207. offset = (size_t)map_va & ARCH_PAGE_MASK;
  208. map_size += (offset + ARCH_PAGE_SIZE - 1);
  209. map_size &= ~ARCH_PAGE_MASK;
  210. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  211. rt_mm_lock();
  212. ret = _lwp_map_user(lwp, map_va, map_size, text);
  213. rt_mm_unlock();
  214. if (ret)
  215. {
  216. ret = (void *)((char *)ret + offset);
  217. }
  218. return ret;
  219. }
  220. static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  221. {
  222. void *va = RT_NULL;
  223. rt_mmu_info *m_info = &lwp->mmu_info;
  224. size_t attr = 0;
  225. int ret = 0;
  226. if (cached)
  227. {
  228. attr = MMU_MAP_U_RWCB;
  229. if (type == MM_AREA_TYPE_PHY)
  230. {
  231. type = MM_AREA_TYPE_PHY_CACHED;
  232. }
  233. }
  234. else
  235. {
  236. attr = MMU_MAP_U_RW;
  237. }
  238. va = rt_hw_mmu_map(m_info, map_va, map_pa, map_size, attr);
  239. if (va)
  240. {
  241. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, type);
  242. if (ret != 0)
  243. {
  244. unmap_range(lwp, va, map_size, 0);
  245. return 0;
  246. }
  247. }
  248. return va;
  249. }
  250. void *lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  251. {
  252. void *ret = RT_NULL;
  253. size_t offset = 0;
  254. if (!map_size)
  255. {
  256. return 0;
  257. }
  258. if (map_va)
  259. {
  260. if (((size_t)map_va & ARCH_PAGE_MASK) != ((size_t)map_pa & ARCH_PAGE_MASK))
  261. {
  262. return 0;
  263. }
  264. }
  265. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  266. map_size += (offset + ARCH_PAGE_SIZE - 1);
  267. map_size &= ~ARCH_PAGE_MASK;
  268. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  269. rt_mm_lock();
  270. ret = _lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, type);
  271. rt_mm_unlock();
  272. if (ret)
  273. {
  274. ret = (void *)((char *)ret + offset);
  275. }
  276. return ret;
  277. }
  278. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached)
  279. {
  280. return lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, MM_AREA_TYPE_PHY);
  281. }
  282. rt_base_t lwp_brk(void *addr)
  283. {
  284. rt_base_t ret = -1;
  285. struct rt_lwp *lwp = RT_NULL;
  286. rt_mm_lock();
  287. lwp = rt_thread_self()->lwp;
  288. if ((size_t)addr <= lwp->end_heap)
  289. {
  290. ret = (rt_base_t)lwp->end_heap;
  291. }
  292. else
  293. {
  294. size_t size = 0;
  295. void *va = RT_NULL;
  296. if ((size_t)addr <= USER_HEAP_VEND)
  297. {
  298. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK;
  299. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  300. }
  301. if (va)
  302. {
  303. lwp->end_heap += size;
  304. ret = lwp->end_heap;
  305. }
  306. }
  307. rt_mm_unlock();
  308. return ret;
  309. }
  310. #define MAP_ANONYMOUS 0x20
  311. void* lwp_mmap2(void *addr, size_t length, int prot,
  312. int flags, int fd, off_t pgoffset)
  313. {
  314. void *ret = (void *)-1;
  315. struct rt_lwp *lwp = RT_NULL;
  316. if (fd == -1)
  317. {
  318. lwp = rt_thread_self()->lwp;
  319. rt_mm_lock();
  320. ret = lwp_map_user(lwp, addr, length, 0);
  321. rt_mm_unlock();
  322. if (ret)
  323. {
  324. if ((flags & MAP_ANONYMOUS) != 0)
  325. {
  326. rt_memset(ret, 0, length);
  327. }
  328. }
  329. else
  330. {
  331. ret = (void *)-1;
  332. }
  333. }
  334. return ret;
  335. }
  336. int lwp_munmap(void *addr)
  337. {
  338. int ret = 0;
  339. struct rt_lwp *lwp = RT_NULL;
  340. rt_mm_lock();
  341. lwp = rt_thread_self()->lwp;
  342. ret = lwp_unmap_user(lwp, addr);
  343. rt_mm_unlock();
  344. return ret;
  345. }
  346. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  347. {
  348. struct rt_lwp *lwp = RT_NULL;
  349. rt_mmu_info *m_info = RT_NULL;
  350. /* check src */
  351. if (src < (void *)USER_VADDR_START)
  352. {
  353. return 0;
  354. }
  355. if (src >= (void *)USER_VADDR_TOP)
  356. {
  357. return 0;
  358. }
  359. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  360. {
  361. return 0;
  362. }
  363. lwp = lwp_self();
  364. if (!lwp)
  365. {
  366. return 0;
  367. }
  368. m_info = &lwp->mmu_info;
  369. return lwp_data_get(m_info, dst, src, size);
  370. }
  371. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  372. {
  373. struct rt_lwp *lwp = RT_NULL;
  374. rt_mmu_info *m_info = RT_NULL;
  375. /* check dst */
  376. if (dst < (void *)USER_VADDR_START)
  377. {
  378. return 0;
  379. }
  380. if (dst >= (void *)USER_VADDR_TOP)
  381. {
  382. return 0;
  383. }
  384. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  385. {
  386. return 0;
  387. }
  388. lwp = lwp_self();
  389. if (!lwp)
  390. {
  391. return 0;
  392. }
  393. m_info = &lwp->mmu_info;
  394. return lwp_data_put(m_info, dst, src, size);
  395. }
  396. int lwp_user_accessable(void *addr, size_t size)
  397. {
  398. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  399. void *tmp_addr = RT_NULL;
  400. struct rt_lwp *lwp = lwp_self();
  401. rt_mmu_info *mmu_info = RT_NULL;
  402. if (!lwp)
  403. {
  404. return 0;
  405. }
  406. if (!size || !addr)
  407. {
  408. return 0;
  409. }
  410. addr_start = addr;
  411. addr_end = (void *)((char *)addr + size);
  412. #ifdef ARCH_RISCV64
  413. if(addr_start < (void *)USER_VADDR_START)
  414. {
  415. return 0;
  416. }
  417. #else
  418. if (addr_start >= (void *)USER_VADDR_TOP)
  419. {
  420. return 0;
  421. }
  422. if (addr_end > (void *)USER_VADDR_TOP)
  423. {
  424. return 0;
  425. }
  426. #endif
  427. mmu_info = &lwp->mmu_info;
  428. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  429. do
  430. {
  431. size_t len = (char *)next_page - (char *)addr_start;
  432. if (size < len)
  433. {
  434. len = size;
  435. }
  436. tmp_addr = rt_hw_mmu_v2p(mmu_info, addr_start);
  437. if (!tmp_addr)
  438. {
  439. return 0;
  440. }
  441. addr_start = (void *)((char *)addr_start + len);
  442. size -= len;
  443. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  444. } while (addr_start < addr_end);
  445. return 1;
  446. }
  447. /* src is in mmu_info space, dst is in current thread space */
  448. size_t lwp_data_get(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  449. {
  450. size_t copy_len = 0;
  451. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  452. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  453. if (!size || !dst)
  454. {
  455. return 0;
  456. }
  457. tmp_dst = dst;
  458. addr_start = src;
  459. addr_end = (void *)((char *)src + size);
  460. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  461. do
  462. {
  463. size_t len = (char *)next_page - (char *)addr_start;
  464. if (size < len)
  465. {
  466. len = size;
  467. }
  468. tmp_src = rt_hw_mmu_v2p(mmu_info, addr_start);
  469. if (!tmp_src)
  470. {
  471. break;
  472. }
  473. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  474. rt_memcpy(tmp_dst, tmp_src, len);
  475. tmp_dst = (void *)((char *)tmp_dst + len);
  476. addr_start = (void *)((char *)addr_start + len);
  477. size -= len;
  478. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  479. copy_len += len;
  480. } while (addr_start < addr_end);
  481. return copy_len;
  482. }
  483. /* dst is in mmu_info space, src is in current thread space */
  484. size_t lwp_data_put(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  485. {
  486. size_t copy_len = 0;
  487. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  488. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  489. if (!size || !dst)
  490. {
  491. return 0;
  492. }
  493. tmp_src = src;
  494. addr_start = dst;
  495. addr_end = (void *)((char *)dst + size);
  496. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  497. do
  498. {
  499. size_t len = (char *)next_page - (char *)addr_start;
  500. if (size < len)
  501. {
  502. len = size;
  503. }
  504. tmp_dst = rt_hw_mmu_v2p(mmu_info, addr_start);
  505. if (!tmp_dst)
  506. {
  507. break;
  508. }
  509. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  510. rt_memcpy(tmp_dst, tmp_src, len);
  511. tmp_src = (void *)((char *)tmp_src + len);
  512. addr_start = (void *)((char *)addr_start + len);
  513. size -= len;
  514. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  515. copy_len += len;
  516. } while (addr_start < addr_end);
  517. return copy_len;
  518. }
  519. void lwp_data_cache_flush(rt_mmu_info *mmu_info, void *vaddr, size_t size)
  520. {
  521. void *paddr = RT_NULL;
  522. paddr = rt_hw_mmu_v2p(mmu_info, vaddr);
  523. paddr = (void *)((char *)paddr - PV_OFFSET);
  524. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, paddr, size);
  525. }
  526. #endif