mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-01-30 lizhirui first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "page.h"
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include "riscv.h"
  17. #include "riscv_mmu.h"
  18. #include "mmu.h"
  19. void *current_mmu_table = RT_NULL;
  20. void rt_hw_cpu_icache_invalidate_all();
  21. void rt_hw_cpu_dcache_flush_all();
  22. void rt_hw_cpu_dcache_clean(void *addr,rt_size_t size);
  23. static void rt_hw_cpu_tlb_invalidate()
  24. {
  25. rt_size_t satpv = read_csr(satp);
  26. write_csr(satp,satpv);
  27. mmu_flush_tlb();
  28. }
  29. void *mmu_table_get()
  30. {
  31. return current_mmu_table;
  32. }
  33. void switch_mmu(void *mmu_table)
  34. {
  35. current_mmu_table = mmu_table;
  36. RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
  37. mmu_set_pagetable((rt_ubase_t)mmu_table);
  38. rt_hw_cpu_dcache_flush_all();
  39. rt_hw_cpu_icache_invalidate_all();
  40. }
  41. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off)
  42. {
  43. size_t l1_off,va_s,va_e;
  44. rt_base_t level;
  45. if((!mmu_info) || (!vtable))
  46. {
  47. return -1;
  48. }
  49. va_s = (rt_size_t)v_address;
  50. va_e = ((rt_size_t)v_address) + size - 1;
  51. if(va_e < va_s)
  52. {
  53. return -1;
  54. }
  55. //convert address to level 1 page frame id
  56. va_s = GET_L1(va_s);
  57. va_e = GET_L1(va_e);
  58. if(va_s == 0)
  59. {
  60. return -1;
  61. }
  62. level = rt_hw_interrupt_disable();
  63. //vtable initialization check
  64. for(l1_off = va_s;l1_off <= va_e;l1_off++)
  65. {
  66. size_t v = vtable[l1_off];
  67. if(v)
  68. {
  69. rt_hw_interrupt_enable(level);
  70. return 0;
  71. }
  72. }
  73. mmu_info -> vtable = vtable;
  74. mmu_info -> vstart = va_s;
  75. mmu_info -> vend = va_e;
  76. mmu_info -> pv_off = pv_off;
  77. rt_hw_interrupt_enable(level);
  78. return 0;
  79. }
  80. void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size)
  81. {
  82. rt_size_t paddr_start = __UMASKVALUE(VPN_TO_PPN(vaddr_start,mmu_info -> pv_off),PAGE_OFFSET_MASK);
  83. rt_size_t va_s = GET_L1(vaddr_start);
  84. rt_size_t va_e = GET_L1(vaddr_start + size - 1);
  85. rt_size_t i;
  86. for(i = va_s;i <= va_e;i++)
  87. {
  88. mmu_info -> vtable[i] = COMBINEPTE(paddr_start,PAGE_ATTR_RWX | PTE_G | PTE_V);
  89. paddr_start += L1_PAGE_SIZE;
  90. }
  91. rt_hw_cpu_tlb_invalidate();
  92. }
  93. //find a range of free virtual address specified by pages
  94. static rt_size_t find_vaddr(rt_mmu_info *mmu_info,rt_size_t pages)
  95. {
  96. rt_size_t l1_off,l2_off,l3_off;
  97. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  98. rt_size_t find_l1 = 0,find_l2 = 0,find_l3 = 0;
  99. rt_size_t n = 0;
  100. if(!pages)
  101. {
  102. return 0;
  103. }
  104. if(!mmu_info)
  105. {
  106. return 0;
  107. }
  108. for(l1_off = mmu_info -> vstart;l1_off <= mmu_info -> vend;l1_off++)
  109. {
  110. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  111. if(PTE_USED(*mmu_l1))
  112. {
  113. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  114. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  115. for(l2_off = 0;l2_off < __SIZE(VPN1_BIT);l2_off++)
  116. {
  117. if(PTE_USED(*(mmu_l2 + l2_off)))
  118. {
  119. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  120. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  121. for(l3_off = 0;l3_off < __SIZE(VPN0_BIT);l3_off++)
  122. {
  123. if(PTE_USED(*(mmu_l3 + l3_off)))
  124. {
  125. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
  126. n = 0;//in use
  127. }
  128. else
  129. {
  130. if(!n)
  131. {
  132. find_l1 = l1_off;
  133. find_l2 = l2_off;
  134. find_l3 = l3_off;
  135. }
  136. n++;
  137. if(n >= pages)
  138. {
  139. return COMBINEVADDR(find_l1,find_l2,find_l3);
  140. }
  141. }
  142. }
  143. }
  144. else
  145. {
  146. if(!n)
  147. {
  148. find_l1 = l1_off;
  149. find_l2 = l2_off;
  150. find_l3 = 0;
  151. }
  152. n += __SIZE(VPN0_BIT);
  153. if(n >= pages)
  154. {
  155. return COMBINEVADDR(find_l1,find_l2,find_l3);
  156. }
  157. }
  158. }
  159. }
  160. else
  161. {
  162. if(!n)
  163. {
  164. find_l1 = l1_off;
  165. find_l2 = 0;
  166. find_l3 = 0;
  167. }
  168. n += __SIZE(VPN1_BIT);
  169. if(n >= pages)
  170. {
  171. return COMBINEVADDR(find_l1,find_l2,find_l3);
  172. }
  173. }
  174. }
  175. return 0;
  176. }
  177. //check whether the range of virtual address are free
  178. static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
  179. {
  180. rt_size_t loop_va = __UMASKVALUE((rt_size_t)va,PAGE_OFFSET_MASK);
  181. rt_size_t l1_off,l2_off,l3_off;
  182. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  183. if(!pages)
  184. {
  185. return -1;
  186. }
  187. if(!mmu_info)
  188. {
  189. return -1;
  190. }
  191. while(pages--)
  192. {
  193. l1_off = GET_L1(loop_va);
  194. l2_off = GET_L2(loop_va);
  195. l3_off = GET_L3(loop_va);
  196. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  197. if(PTE_USED(*mmu_l1))
  198. {
  199. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  200. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off) + l2_off;
  201. if(PTE_USED(*mmu_l2))
  202. {
  203. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  204. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off) + l3_off;
  205. if(PTE_USED(*mmu_l3))
  206. {
  207. RT_ASSERT(PAGE_IS_LEAF(*mmu_l3));
  208. return -1;
  209. }
  210. }
  211. }
  212. loop_va += PAGE_SIZE;
  213. }
  214. return 0;
  215. }
  216. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages)
  217. {
  218. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  219. rt_size_t l1_off,l2_off,l3_off;
  220. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  221. rt_size_t *ref_cnt;
  222. RT_ASSERT(mmu_info);
  223. while(npages--)
  224. {
  225. l1_off = (rt_size_t)GET_L1(loop_va);
  226. RT_ASSERT((l1_off >= mmu_info -> vstart) && (l1_off <= mmu_info -> vend));
  227. l2_off = (rt_size_t)GET_L2(loop_va);
  228. l3_off = (rt_size_t)GET_L3(loop_va);
  229. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  230. RT_ASSERT(PTE_USED(*mmu_l1))
  231. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  232. mmu_l2 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off)) + l2_off;
  233. RT_ASSERT(PTE_USED(*mmu_l2));
  234. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  235. mmu_l3 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off)) + l3_off;
  236. RT_ASSERT(PTE_USED(*mmu_l3));
  237. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3)));
  238. *mmu_l3 = 0;
  239. rt_hw_cpu_dcache_clean(mmu_l3,sizeof(*mmu_l3));
  240. mmu_l3 -= l3_off;
  241. ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
  242. (*ref_cnt)--;
  243. if(!*ref_cnt)
  244. {
  245. //release level 3 page
  246. rt_pages_free(mmu_l3,1);//entry page and ref_cnt page
  247. *mmu_l2 = 0;
  248. rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
  249. mmu_l2 -= l2_off;
  250. ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
  251. (*ref_cnt)--;
  252. if(!*ref_cnt)
  253. {
  254. //release level 2 page
  255. rt_pages_free(mmu_l2,1);//entry page and ref_cnt page
  256. *mmu_l1 = 0;
  257. rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
  258. }
  259. }
  260. loop_va += PAGE_SIZE;
  261. }
  262. }
  263. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t npages,rt_size_t attr)
  264. {
  265. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  266. rt_size_t loop_pa = __UMASKVALUE((rt_size_t)p_addr,PAGE_OFFSET_MASK);
  267. rt_size_t l1_off,l2_off,l3_off;
  268. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  269. rt_size_t *ref_cnt;
  270. //rt_kprintf("v_addr = 0x%p,p_addr = 0x%p,npages = %lu\n",v_addr,p_addr,npages);
  271. if(!mmu_info)
  272. {
  273. return -1;
  274. }
  275. while(npages--)
  276. {
  277. l1_off = GET_L1(loop_va);
  278. l2_off = GET_L2(loop_va);
  279. l3_off = GET_L3(loop_va);
  280. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  281. if(PTE_USED(*mmu_l1))
  282. {
  283. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  284. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  285. }
  286. else
  287. {
  288. mmu_l2 = (rt_size_t *)rt_pages_alloc(1);
  289. if(mmu_l2)
  290. {
  291. rt_memset(mmu_l2,0,PAGE_SIZE * 2);
  292. rt_hw_cpu_dcache_clean(mmu_l2,PAGE_SIZE * 2);
  293. *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
  294. rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
  295. }
  296. else
  297. {
  298. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  299. return -1;
  300. }
  301. }
  302. if(PTE_USED(*(mmu_l2 + l2_off)))
  303. {
  304. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  305. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  306. }
  307. else
  308. {
  309. mmu_l3 = (rt_size_t *)rt_pages_alloc(1);
  310. if(mmu_l3)
  311. {
  312. rt_memset(mmu_l3,0,PAGE_SIZE * 2);
  313. rt_hw_cpu_dcache_clean(mmu_l3,PAGE_SIZE * 2);
  314. *(mmu_l2 + l2_off) = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
  315. rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
  316. ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
  317. (*ref_cnt)++;
  318. }
  319. else
  320. {
  321. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  322. return -1;
  323. }
  324. }
  325. RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
  326. ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
  327. (*ref_cnt)++;
  328. *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)loop_pa,PAGE_DEFAULT_ATTR_LEAF);
  329. rt_hw_cpu_dcache_clean(mmu_l3 + l3_off,sizeof(*(mmu_l3 + l3_off)));
  330. loop_va += PAGE_SIZE;
  331. loop_pa += PAGE_SIZE;
  332. }
  333. return 0;
  334. }
  335. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  336. {
  337. rt_size_t pa_s,pa_e;
  338. rt_size_t vaddr;
  339. rt_size_t pages;
  340. int ret;
  341. if(!size)
  342. {
  343. return 0;
  344. }
  345. pa_s = (rt_size_t)p_addr;
  346. pa_e = ((rt_size_t)p_addr) + size - 1;
  347. pa_s = GET_PF_ID(pa_s);
  348. pa_e = GET_PF_ID(pa_e);
  349. pages = pa_e - pa_s + 1;
  350. if(v_addr)
  351. {
  352. vaddr = (rt_size_t)v_addr;
  353. pa_s = (rt_size_t)p_addr;
  354. if(GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
  355. {
  356. return 0;
  357. }
  358. vaddr = __UMASKVALUE(vaddr,PAGE_OFFSET_MASK);
  359. if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
  360. {
  361. return 0;
  362. }
  363. }
  364. else
  365. {
  366. vaddr = find_vaddr(mmu_info,pages);
  367. }
  368. if(vaddr)
  369. {
  370. ret = __rt_hw_mmu_map(mmu_info,(void *)vaddr,p_addr,pages,attr);
  371. if(ret == 0)
  372. {
  373. rt_hw_cpu_tlb_invalidate();
  374. return (void *)(vaddr | GET_PF_OFFSET((rt_size_t)p_addr));
  375. }
  376. }
  377. return 0;
  378. }
  379. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
  380. {
  381. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  382. rt_size_t loop_pa;
  383. rt_size_t l1_off,l2_off,l3_off;
  384. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  385. rt_size_t *ref_cnt;
  386. rt_size_t i;
  387. void *va,*pa;
  388. if(!mmu_info)
  389. {
  390. return -1;
  391. }
  392. while(npages--)
  393. {
  394. loop_pa = (rt_size_t)rt_pages_alloc(0);
  395. if(!loop_pa)
  396. {
  397. goto err;
  398. }
  399. if(__rt_hw_mmu_map(mmu_info,(void *)loop_va,(void *)loop_pa,1,attr) < 0)
  400. {
  401. goto err;
  402. }
  403. loop_va += PAGE_SIZE;
  404. }
  405. return 0;
  406. err:
  407. va = (void *)__UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  408. for(i = 0;i < npages;i++)
  409. {
  410. pa = rt_hw_mmu_v2p(mmu_info,va);
  411. if(pa)
  412. {
  413. rt_pages_free((void *)PPN_TO_VPN(pa,mmu_info -> pv_off),0);
  414. }
  415. va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
  416. }
  417. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  418. return -1;
  419. }
  420. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  421. {
  422. rt_size_t vaddr;
  423. rt_size_t offset;
  424. rt_size_t pages;
  425. int ret;
  426. if(!size)
  427. {
  428. return 0;
  429. }
  430. offset = GET_PF_OFFSET((rt_size_t)v_addr);
  431. size += (offset + PAGE_SIZE - 1);
  432. pages = size >> PAGE_OFFSET_BIT;
  433. if(v_addr)
  434. {
  435. vaddr = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  436. if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
  437. {
  438. return 0;
  439. }
  440. }
  441. else
  442. {
  443. vaddr = find_vaddr(mmu_info,pages);
  444. }
  445. if(vaddr)
  446. {
  447. ret = __rt_hw_mmu_map_auto(mmu_info,(void *)vaddr,pages,attr);
  448. if(ret == 0)
  449. {
  450. rt_hw_cpu_tlb_invalidate();
  451. return (void *)(vaddr | offset);
  452. }
  453. }
  454. return 0;
  455. }
  456. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  457. {
  458. rt_size_t va_s,va_e;
  459. rt_size_t pages;
  460. va_s = ((rt_size_t)v_addr) >> PAGE_OFFSET_BIT;
  461. va_e = (((rt_size_t)v_addr) + size - 1) >> PAGE_OFFSET_BIT;
  462. pages = va_e - va_s + 1;
  463. __rt_hw_mmu_unmap(mmu_info,v_addr,pages);
  464. rt_hw_cpu_tlb_invalidate();
  465. }
  466. void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  467. {
  468. void *ret;
  469. rt_base_t level;
  470. level = rt_hw_interrupt_disable();
  471. ret = _rt_hw_mmu_map(mmu_info,v_addr,p_addr,size,attr);
  472. rt_hw_interrupt_enable(level);
  473. return ret;
  474. }
  475. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  476. {
  477. void *ret;
  478. rt_base_t level;
  479. level = rt_hw_interrupt_disable();
  480. ret = _rt_hw_mmu_map_auto(mmu_info,v_addr,size,attr);
  481. rt_hw_interrupt_enable(level);
  482. return ret;
  483. }
  484. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  485. {
  486. rt_base_t level;
  487. level = rt_hw_interrupt_disable();
  488. _rt_hw_mmu_unmap(mmu_info,v_addr,size);
  489. rt_hw_interrupt_enable(level);
  490. }
  491. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
  492. {
  493. rt_size_t l1_off,l2_off,l3_off;
  494. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  495. rt_size_t pa;
  496. l1_off = GET_L1((rt_size_t)v_addr);
  497. l2_off = GET_L2((rt_size_t)v_addr);
  498. l3_off = GET_L3((rt_size_t)v_addr);
  499. if(!mmu_info)
  500. {
  501. return RT_NULL;
  502. }
  503. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  504. if(PTE_USED(*mmu_l1))
  505. {
  506. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  507. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  508. if(PTE_USED(*(mmu_l2 + l2_off)))
  509. {
  510. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  511. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  512. if(PTE_USED(*(mmu_l3 + l3_off)))
  513. {
  514. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
  515. return (void *)(GET_PADDR(*(mmu_l3 + l3_off)) | GET_PF_OFFSET((rt_size_t)v_addr));
  516. }
  517. }
  518. }
  519. return RT_NULL;
  520. }
  521. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
  522. {
  523. void *ret;
  524. rt_base_t level;
  525. level = rt_hw_interrupt_disable();
  526. ret = _rt_hw_mmu_v2p(mmu_info,v_addr);
  527. rt_hw_interrupt_enable(level);
  528. return ret;
  529. }