lwp_user_mm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-28 Jesven first version
  9. * 2021-02-06 lizhirui fixed fixed vtable size problem
  10. * 2021-02-12 lizhirui add 64-bit support for lwp_brk
  11. * 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
  12. */
  13. #include <rtthread.h>
  14. #include <rthw.h>
  15. #ifdef RT_USING_USERSPACE
  16. #include <mmu.h>
  17. #include <page.h>
  18. #include <lwp_mm_area.h>
  19. #include <lwp_user_mm.h>
  20. #include <lwp_arch.h>
  21. #ifdef RT_USING_GDBSERVER
  22. #include <lwp_gdbserver.h>
  23. #include <hw_breakpoint.h>
  24. #endif
  25. int lwp_user_space_init(struct rt_lwp *lwp)
  26. {
  27. return arch_user_space_init(lwp);
  28. }
  29. void switch_mmu(void *mtable);
  30. void *mmu_table_get(void);
  31. void lwp_mmu_switch(struct rt_thread *thread)
  32. {
  33. struct rt_lwp *l = RT_NULL;
  34. void *pre_mmu_table = RT_NULL, *new_mmu_table = RT_NULL;
  35. if (thread->lwp)
  36. {
  37. l = (struct rt_lwp *)thread->lwp;
  38. new_mmu_table = (void *)((char *)l->mmu_info.vtable + l->mmu_info.pv_off);
  39. }
  40. else
  41. {
  42. new_mmu_table = arch_kernel_mmu_table_get();
  43. }
  44. pre_mmu_table = mmu_table_get();
  45. if (pre_mmu_table != new_mmu_table)
  46. {
  47. switch_mmu(new_mmu_table);
  48. }
  49. }
  50. static void unmap_range(struct rt_lwp *lwp, void *addr, size_t size, int pa_need_free)
  51. {
  52. void *va = RT_NULL, *pa = RT_NULL;
  53. int i = 0;
  54. for (va = addr, i = 0; i < size; va = (void *)((char *)va + ARCH_PAGE_SIZE), i += ARCH_PAGE_SIZE)
  55. {
  56. pa = rt_hw_mmu_v2p(&lwp->mmu_info, va);
  57. if (pa)
  58. {
  59. rt_hw_mmu_unmap(&lwp->mmu_info, va, ARCH_PAGE_SIZE);
  60. if (pa_need_free)
  61. {
  62. rt_pages_free((void *)((char *)pa - PV_OFFSET), 0);
  63. }
  64. }
  65. }
  66. }
  67. void lwp_unmap_user_space(struct rt_lwp *lwp)
  68. {
  69. struct lwp_avl_struct *node = RT_NULL;
  70. while ((node = lwp_map_find_first(lwp->map_area)) != 0)
  71. {
  72. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)node->data;
  73. int pa_need_free = 0;
  74. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  75. switch (ma->type)
  76. {
  77. case MM_AREA_TYPE_DATA:
  78. case MM_AREA_TYPE_TEXT:
  79. pa_need_free = 1;
  80. break;
  81. case MM_AREA_TYPE_SHM:
  82. lwp_shm_ref_dec(lwp, (void *)ma->addr);
  83. break;
  84. }
  85. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  86. lwp_map_area_remove(&lwp->map_area, ma->addr);
  87. }
  88. arch_user_space_vtable_free(lwp);
  89. }
  90. static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  91. {
  92. void *va = RT_NULL;
  93. int ret = 0;
  94. rt_mmu_info *m_info = &lwp->mmu_info;
  95. int area_type;
  96. va = rt_hw_mmu_map_auto(m_info, map_va, map_size, MMU_MAP_U_RWCB);
  97. if (!va)
  98. {
  99. return 0;
  100. }
  101. area_type = text ? MM_AREA_TYPE_TEXT : MM_AREA_TYPE_DATA;
  102. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, area_type);
  103. if (ret != 0)
  104. {
  105. unmap_range(lwp, va, map_size, 1);
  106. return 0;
  107. }
  108. return va;
  109. }
  110. int lwp_unmap_user(struct rt_lwp *lwp, void *va)
  111. {
  112. rt_base_t level = 0;
  113. struct lwp_avl_struct *ma_avl_node = RT_NULL;
  114. struct rt_mm_area_struct *ma = RT_NULL;
  115. int pa_need_free = 0;
  116. level = rt_hw_interrupt_disable();
  117. ma_avl_node = lwp_map_find(lwp->map_area, (size_t)va);
  118. if (!ma_avl_node)
  119. {
  120. rt_hw_interrupt_enable(level);
  121. return -1;
  122. }
  123. ma = (struct rt_mm_area_struct *)ma_avl_node->data;
  124. RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
  125. if ((ma->type == MM_AREA_TYPE_DATA) || (ma->type == MM_AREA_TYPE_TEXT))
  126. {
  127. pa_need_free = 1;
  128. }
  129. unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
  130. lwp_map_area_remove(&lwp->map_area, (size_t)va);
  131. rt_hw_interrupt_enable(level);
  132. return 0;
  133. }
  134. int lwp_dup_user(struct lwp_avl_struct *ptree, void *arg)
  135. {
  136. struct rt_lwp *self_lwp = lwp_self();
  137. struct rt_lwp *new_lwp = (struct rt_lwp *)arg;
  138. struct rt_mm_area_struct *ma = (struct rt_mm_area_struct *)ptree->data;
  139. void *pa = RT_NULL;
  140. void *va = RT_NULL;
  141. switch (ma->type)
  142. {
  143. case MM_AREA_TYPE_PHY:
  144. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  145. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY);
  146. break;
  147. case MM_AREA_TYPE_PHY_CACHED:
  148. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)ma->addr);
  149. va = lwp_map_user_type(new_lwp, (void *)ma->addr, pa, ma->size, 0, MM_AREA_TYPE_PHY_CACHED);
  150. break;
  151. case MM_AREA_TYPE_SHM:
  152. va = (void *)ma->addr;
  153. if (lwp_shm_ref_inc(self_lwp, va) > 0)
  154. {
  155. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, va);
  156. va = lwp_map_user_type(new_lwp, va, pa, ma->size, 1, MM_AREA_TYPE_SHM);
  157. }
  158. break;
  159. case MM_AREA_TYPE_DATA:
  160. va = lwp_map_user(new_lwp, (void *)ma->addr, ma->size, 0);
  161. if (va == (void *)ma->addr)
  162. {
  163. lwp_data_put(&new_lwp->mmu_info, va, va, ma->size);
  164. }
  165. break;
  166. case MM_AREA_TYPE_TEXT:
  167. {
  168. char *addr = (char *)ma->addr;
  169. size_t size = ma->size;
  170. while (size)
  171. {
  172. pa = rt_hw_mmu_v2p(&self_lwp->mmu_info, (void *)addr);
  173. rt_page_ref_inc((char *)pa - self_lwp->mmu_info.pv_off, 0);
  174. va = lwp_map_user_type(new_lwp, addr, pa, ARCH_PAGE_SIZE, 1, MM_AREA_TYPE_TEXT);
  175. if (va != addr)
  176. {
  177. return -1;
  178. }
  179. addr += ARCH_PAGE_SIZE;
  180. size -= ARCH_PAGE_SIZE;
  181. }
  182. va = (void *)ma->addr;
  183. }
  184. break;
  185. default:
  186. RT_ASSERT(0);
  187. break;
  188. }
  189. if (va != (void *)ma->addr)
  190. {
  191. return -1;
  192. }
  193. return 0;
  194. }
  195. int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va)
  196. {
  197. return lwp_unmap_user(lwp, va);
  198. }
  199. int lwp_unmap_user_type(struct rt_lwp *lwp, void *va)
  200. {
  201. return lwp_unmap_user(lwp, va);
  202. }
  203. void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
  204. {
  205. rt_base_t level = 0;
  206. void *ret = RT_NULL;
  207. size_t offset = 0;
  208. if (!map_size)
  209. {
  210. return 0;
  211. }
  212. offset = (size_t)map_va & ARCH_PAGE_MASK;
  213. map_size += (offset + ARCH_PAGE_SIZE - 1);
  214. map_size &= ~ARCH_PAGE_MASK;
  215. map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
  216. level = rt_hw_interrupt_disable();
  217. ret = _lwp_map_user(lwp, map_va, map_size, text);
  218. rt_hw_interrupt_enable(level);
  219. if (ret)
  220. {
  221. ret = (void *)((char *)ret + offset);
  222. }
  223. return ret;
  224. }
  225. static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  226. {
  227. void *va = RT_NULL;
  228. rt_mmu_info *m_info = &lwp->mmu_info;
  229. size_t attr = 0;
  230. int ret = 0;
  231. if (cached)
  232. {
  233. attr = MMU_MAP_U_RWCB;
  234. if (type == MM_AREA_TYPE_PHY)
  235. {
  236. type = MM_AREA_TYPE_PHY_CACHED;
  237. }
  238. }
  239. else
  240. {
  241. attr = MMU_MAP_U_RW;
  242. }
  243. va = rt_hw_mmu_map(m_info, map_va, map_pa, map_size, attr);
  244. if (va)
  245. {
  246. ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, type);
  247. if (ret != 0)
  248. {
  249. unmap_range(lwp, va, map_size, 0);
  250. return 0;
  251. }
  252. }
  253. return va;
  254. }
  255. void *lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
  256. {
  257. rt_base_t level = 0;
  258. void *ret = RT_NULL;
  259. size_t offset = 0;
  260. if (!map_size)
  261. {
  262. return 0;
  263. }
  264. if (map_va)
  265. {
  266. if (((size_t)map_va & ARCH_PAGE_MASK) != ((size_t)map_pa & ARCH_PAGE_MASK))
  267. {
  268. return 0;
  269. }
  270. }
  271. offset = (size_t)map_pa & ARCH_PAGE_MASK;
  272. map_size += (offset + ARCH_PAGE_SIZE - 1);
  273. map_size &= ~ARCH_PAGE_MASK;
  274. map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
  275. level = rt_hw_interrupt_disable();
  276. ret = _lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, type);
  277. rt_hw_interrupt_enable(level);
  278. if (ret)
  279. {
  280. ret = (void *)((char *)ret + offset);
  281. }
  282. return ret;
  283. }
  284. void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached)
  285. {
  286. return lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, MM_AREA_TYPE_PHY);
  287. }
  288. rt_base_t lwp_brk(void *addr)
  289. {
  290. rt_base_t level = 0;
  291. rt_base_t ret = -1;
  292. struct rt_lwp *lwp = RT_NULL;
  293. level = rt_hw_interrupt_disable();
  294. lwp = rt_thread_self()->lwp;
  295. if ((size_t)addr <= lwp->end_heap)
  296. {
  297. ret = (rt_base_t)lwp->end_heap;
  298. }
  299. else
  300. {
  301. size_t size = 0;
  302. void *va = RT_NULL;
  303. if ((size_t)addr <= USER_HEAP_VEND)
  304. {
  305. size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK;
  306. va = lwp_map_user(lwp, (void *)lwp->end_heap, size, 0);
  307. }
  308. if (va)
  309. {
  310. lwp->end_heap += size;
  311. ret = lwp->end_heap;
  312. }
  313. }
  314. rt_hw_interrupt_enable(level);
  315. return ret;
  316. }
  317. #define MAP_ANONYMOUS 0x20
  318. void* lwp_mmap2(void *addr, size_t length, int prot,
  319. int flags, int fd, off_t pgoffset)
  320. {
  321. rt_base_t level = 0;
  322. void *ret = (void *)-1;
  323. struct rt_lwp *lwp = RT_NULL;
  324. if (fd == -1)
  325. {
  326. lwp = rt_thread_self()->lwp;
  327. level = rt_hw_interrupt_disable();
  328. ret = lwp_map_user(lwp, addr, length, 0);
  329. rt_hw_interrupt_enable(level);
  330. if (ret)
  331. {
  332. if ((flags & MAP_ANONYMOUS) != 0)
  333. {
  334. rt_memset(ret, 0, length);
  335. }
  336. }
  337. else
  338. {
  339. ret = (void *)-1;
  340. }
  341. }
  342. return ret;
  343. }
  344. int lwp_munmap(void *addr)
  345. {
  346. rt_base_t level = 0;
  347. int ret = 0;
  348. struct rt_lwp *lwp = RT_NULL;
  349. level = rt_hw_interrupt_disable();
  350. lwp = rt_thread_self()->lwp;
  351. ret = lwp_unmap_user(lwp, addr);
  352. rt_hw_interrupt_enable(level);
  353. return ret;
  354. }
  355. size_t lwp_get_from_user(void *dst, void *src, size_t size)
  356. {
  357. struct rt_lwp *lwp = RT_NULL;
  358. rt_mmu_info *m_info = RT_NULL;
  359. /* check src */
  360. #ifdef ARCH_RISCV64
  361. if(src < (void *)USER_VADDR_START)
  362. {
  363. return 0;
  364. }
  365. #else
  366. if (src >= (void *)USER_VADDR_TOP)
  367. {
  368. return 0;
  369. }
  370. if ((void *)((char *)src + size) > (void *)USER_VADDR_TOP)
  371. {
  372. return 0;
  373. }
  374. #endif
  375. lwp = lwp_self();
  376. if (!lwp)
  377. {
  378. return 0;
  379. }
  380. m_info = &lwp->mmu_info;
  381. return lwp_data_get(m_info, dst, src, size);
  382. }
  383. size_t lwp_put_to_user(void *dst, void *src, size_t size)
  384. {
  385. struct rt_lwp *lwp = RT_NULL;
  386. rt_mmu_info *m_info = RT_NULL;
  387. /* check dst */
  388. if (dst >= (void *)USER_VADDR_TOP)
  389. {
  390. return 0;
  391. }
  392. if ((void *)((char *)dst + size) > (void *)USER_VADDR_TOP)
  393. {
  394. return 0;
  395. }
  396. lwp = lwp_self();
  397. if (!lwp)
  398. {
  399. return 0;
  400. }
  401. m_info = &lwp->mmu_info;
  402. return lwp_data_put(m_info, dst, src, size);
  403. }
  404. int lwp_user_accessable(void *addr, size_t size)
  405. {
  406. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  407. void *tmp_addr = RT_NULL;
  408. struct rt_lwp *lwp = lwp_self();
  409. rt_mmu_info *mmu_info = RT_NULL;
  410. if (!lwp)
  411. {
  412. return 0;
  413. }
  414. if (!size || !addr)
  415. {
  416. return 0;
  417. }
  418. addr_start = addr;
  419. addr_end = (void *)((char *)addr + size);
  420. #ifdef ARCH_RISCV64
  421. if(addr_start < (void *)USER_VADDR_START)
  422. {
  423. return 0;
  424. }
  425. #else
  426. if (addr_start >= (void *)USER_VADDR_TOP)
  427. {
  428. return 0;
  429. }
  430. if (addr_end > (void *)USER_VADDR_TOP)
  431. {
  432. return 0;
  433. }
  434. #endif
  435. mmu_info = &lwp->mmu_info;
  436. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  437. do
  438. {
  439. size_t len = (char *)next_page - (char *)addr_start;
  440. if (size < len)
  441. {
  442. len = size;
  443. }
  444. tmp_addr = rt_hw_mmu_v2p(mmu_info, addr_start);
  445. if (!tmp_addr)
  446. {
  447. return 0;
  448. }
  449. addr_start = (void *)((char *)addr_start + len);
  450. size -= len;
  451. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  452. } while (addr_start < addr_end);
  453. return 1;
  454. }
  455. /* src is in mmu_info space, dst is in current thread space */
  456. size_t lwp_data_get(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  457. {
  458. size_t copy_len = 0;
  459. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  460. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  461. if (!size || !dst)
  462. {
  463. return 0;
  464. }
  465. tmp_dst = dst;
  466. addr_start = src;
  467. addr_end = (void *)((char *)src + size);
  468. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  469. do
  470. {
  471. size_t len = (char *)next_page - (char *)addr_start;
  472. if (size < len)
  473. {
  474. len = size;
  475. }
  476. tmp_src = rt_hw_mmu_v2p(mmu_info, addr_start);
  477. if (!tmp_src)
  478. {
  479. break;
  480. }
  481. tmp_src = (void *)((char *)tmp_src - PV_OFFSET);
  482. rt_memcpy(tmp_dst, tmp_src, len);
  483. tmp_dst = (void *)((char *)tmp_dst + len);
  484. addr_start = (void *)((char *)addr_start + len);
  485. size -= len;
  486. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  487. copy_len += len;
  488. } while (addr_start < addr_end);
  489. return copy_len;
  490. }
  491. /* dst is in mmu_info space, src is in current thread space */
  492. size_t lwp_data_put(rt_mmu_info *mmu_info, void *dst, void *src, size_t size)
  493. {
  494. size_t copy_len = 0;
  495. void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
  496. void *tmp_dst = RT_NULL, *tmp_src = RT_NULL;
  497. if (!size || !dst)
  498. {
  499. return 0;
  500. }
  501. tmp_src = src;
  502. addr_start = dst;
  503. addr_end = (void *)((char *)dst + size);
  504. next_page = (void *)(((size_t)addr_start + ARCH_PAGE_SIZE) & ~(ARCH_PAGE_SIZE - 1));
  505. do
  506. {
  507. size_t len = (char *)next_page - (char *)addr_start;
  508. if (size < len)
  509. {
  510. len = size;
  511. }
  512. tmp_dst = rt_hw_mmu_v2p(mmu_info, addr_start);
  513. if (!tmp_dst)
  514. {
  515. break;
  516. }
  517. tmp_dst = (void *)((char *)tmp_dst - PV_OFFSET);
  518. rt_memcpy(tmp_dst, tmp_src, len);
  519. tmp_src = (void *)((char *)tmp_src + len);
  520. addr_start = (void *)((char *)addr_start + len);
  521. size -= len;
  522. next_page = (void *)((char *)next_page + ARCH_PAGE_SIZE);
  523. copy_len += len;
  524. } while (addr_start < addr_end);
  525. return copy_len;
  526. }
  527. void lwp_data_cache_flush(rt_mmu_info *mmu_info, void *vaddr, size_t size)
  528. {
  529. void *paddr = RT_NULL;
  530. paddr = rt_hw_mmu_v2p(mmu_info, vaddr);
  531. paddr = (void *)((char *)paddr - PV_OFFSET);
  532. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, paddr, size);
  533. }
  534. #endif