mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-01-30 lizhirui first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "page.h"
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <cache.h>
  17. #include "riscv.h"
  18. #include "riscv_mmu.h"
  19. #include "mmu.h"
  20. void *current_mmu_table = RT_NULL;
  21. static void rt_hw_cpu_tlb_invalidate()
  22. {
  23. rt_size_t satpv = read_csr(satp);
  24. write_csr(satp,satpv);
  25. mmu_flush_tlb();
  26. }
  27. void *rt_hw_mmu_tbl_get()
  28. {
  29. return current_mmu_table;
  30. }
  31. void rt_hw_mmu_switch(void *mmu_table)
  32. {
  33. current_mmu_table = mmu_table;
  34. RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
  35. mmu_set_pagetable((rt_ubase_t)mmu_table);
  36. rt_hw_cpu_dcache_clean_all();
  37. rt_hw_cpu_icache_invalidate_all();
  38. }
  39. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off)
  40. {
  41. size_t l1_off,va_s,va_e;
  42. rt_base_t level;
  43. if((!mmu_info) || (!vtable))
  44. {
  45. return -1;
  46. }
  47. va_s = (rt_size_t)v_address;
  48. va_e = ((rt_size_t)v_address) + size - 1;
  49. if(va_e < va_s)
  50. {
  51. return -1;
  52. }
  53. //convert address to level 1 page frame id
  54. va_s = GET_L1(va_s);
  55. va_e = GET_L1(va_e);
  56. if(va_s == 0)
  57. {
  58. return -1;
  59. }
  60. level = rt_hw_interrupt_disable();
  61. //vtable initialization check
  62. for(l1_off = va_s;l1_off <= va_e;l1_off++)
  63. {
  64. size_t v = vtable[l1_off];
  65. if(v)
  66. {
  67. rt_hw_interrupt_enable(level);
  68. return 0;
  69. }
  70. }
  71. mmu_info -> vtable = vtable;
  72. mmu_info -> vstart = va_s;
  73. mmu_info -> vend = va_e;
  74. mmu_info -> pv_off = pv_off;
  75. rt_hw_interrupt_enable(level);
  76. return 0;
  77. }
  78. void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size)
  79. {
  80. rt_size_t paddr_start = __UMASKVALUE(VPN_TO_PPN(vaddr_start,mmu_info -> pv_off),PAGE_OFFSET_MASK);
  81. rt_size_t va_s = GET_L1(vaddr_start);
  82. rt_size_t va_e = GET_L1(vaddr_start + size - 1);
  83. rt_size_t i;
  84. for(i = va_s;i <= va_e;i++)
  85. {
  86. mmu_info -> vtable[i] = COMBINEPTE(paddr_start,PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE | PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
  87. paddr_start += L1_PAGE_SIZE;
  88. }
  89. rt_hw_cpu_tlb_invalidate();
  90. }
  91. //find a range of free virtual address specified by pages
  92. static rt_size_t find_vaddr(rt_mmu_info *mmu_info,rt_size_t pages)
  93. {
  94. rt_size_t l1_off,l2_off,l3_off;
  95. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  96. rt_size_t find_l1 = 0,find_l2 = 0,find_l3 = 0;
  97. rt_size_t n = 0;
  98. if(!pages)
  99. {
  100. return 0;
  101. }
  102. if(!mmu_info)
  103. {
  104. return 0;
  105. }
  106. for(l1_off = mmu_info -> vstart;l1_off <= mmu_info -> vend;l1_off++)
  107. {
  108. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  109. if(PTE_USED(*mmu_l1))
  110. {
  111. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  112. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  113. for(l2_off = 0;l2_off < __SIZE(VPN1_BIT);l2_off++)
  114. {
  115. if(PTE_USED(*(mmu_l2 + l2_off)))
  116. {
  117. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  118. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  119. for(l3_off = 0;l3_off < __SIZE(VPN0_BIT);l3_off++)
  120. {
  121. if(PTE_USED(*(mmu_l3 + l3_off)))
  122. {
  123. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
  124. n = 0;//in use
  125. }
  126. else
  127. {
  128. if(!n)
  129. {
  130. find_l1 = l1_off;
  131. find_l2 = l2_off;
  132. find_l3 = l3_off;
  133. }
  134. n++;
  135. if(n >= pages)
  136. {
  137. return COMBINEVADDR(find_l1,find_l2,find_l3);
  138. }
  139. }
  140. }
  141. }
  142. else
  143. {
  144. if(!n)
  145. {
  146. find_l1 = l1_off;
  147. find_l2 = l2_off;
  148. find_l3 = 0;
  149. }
  150. n += __SIZE(VPN0_BIT);
  151. if(n >= pages)
  152. {
  153. return COMBINEVADDR(find_l1,find_l2,find_l3);
  154. }
  155. }
  156. }
  157. }
  158. else
  159. {
  160. if(!n)
  161. {
  162. find_l1 = l1_off;
  163. find_l2 = 0;
  164. find_l3 = 0;
  165. }
  166. n += __SIZE(VPN1_BIT);
  167. if(n >= pages)
  168. {
  169. return COMBINEVADDR(find_l1,find_l2,find_l3);
  170. }
  171. }
  172. }
  173. return 0;
  174. }
  175. //check whether the range of virtual address are free
  176. static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
  177. {
  178. rt_size_t loop_va = __UMASKVALUE((rt_size_t)va,PAGE_OFFSET_MASK);
  179. rt_size_t l1_off,l2_off,l3_off;
  180. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  181. if(!pages)
  182. {
  183. return -1;
  184. }
  185. if(!mmu_info)
  186. {
  187. return -1;
  188. }
  189. while(pages--)
  190. {
  191. l1_off = GET_L1(loop_va);
  192. l2_off = GET_L2(loop_va);
  193. l3_off = GET_L3(loop_va);
  194. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  195. if(PTE_USED(*mmu_l1))
  196. {
  197. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  198. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off) + l2_off;
  199. if(PTE_USED(*mmu_l2))
  200. {
  201. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  202. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off) + l3_off;
  203. if(PTE_USED(*mmu_l3))
  204. {
  205. RT_ASSERT(PAGE_IS_LEAF(*mmu_l3));
  206. return -1;
  207. }
  208. }
  209. }
  210. loop_va += PAGE_SIZE;
  211. }
  212. return 0;
  213. }
  214. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages)
  215. {
  216. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  217. rt_size_t l1_off,l2_off,l3_off;
  218. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  219. rt_size_t *ref_cnt;
  220. RT_ASSERT(mmu_info);
  221. while(npages--)
  222. {
  223. l1_off = (rt_size_t)GET_L1(loop_va);
  224. RT_ASSERT((l1_off >= mmu_info -> vstart) && (l1_off <= mmu_info -> vend));
  225. l2_off = (rt_size_t)GET_L2(loop_va);
  226. l3_off = (rt_size_t)GET_L3(loop_va);
  227. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  228. RT_ASSERT(PTE_USED(*mmu_l1))
  229. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  230. mmu_l2 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off)) + l2_off;
  231. RT_ASSERT(PTE_USED(*mmu_l2));
  232. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  233. mmu_l3 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off)) + l3_off;
  234. RT_ASSERT(PTE_USED(*mmu_l3));
  235. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3)));
  236. *mmu_l3 = 0;
  237. rt_hw_cpu_dcache_clean(mmu_l3,sizeof(*mmu_l3));
  238. mmu_l3 -= l3_off;
  239. ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
  240. (*ref_cnt)--;
  241. if(!*ref_cnt)
  242. {
  243. //release level 3 page
  244. rt_pages_free(mmu_l3,1);//entry page and ref_cnt page
  245. *mmu_l2 = 0;
  246. rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
  247. mmu_l2 -= l2_off;
  248. ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
  249. (*ref_cnt)--;
  250. if(!*ref_cnt)
  251. {
  252. //release level 2 page
  253. rt_pages_free(mmu_l2,1);//entry page and ref_cnt page
  254. *mmu_l1 = 0;
  255. rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
  256. }
  257. }
  258. loop_va += PAGE_SIZE;
  259. }
  260. }
  261. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t npages,rt_size_t attr)
  262. {
  263. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  264. rt_size_t loop_pa = __UMASKVALUE((rt_size_t)p_addr,PAGE_OFFSET_MASK);
  265. rt_size_t l1_off,l2_off,l3_off;
  266. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  267. rt_size_t *ref_cnt;
  268. //rt_kprintf("v_addr = 0x%p,p_addr = 0x%p,npages = %lu\n",v_addr,p_addr,npages);
  269. if(!mmu_info)
  270. {
  271. return -1;
  272. }
  273. while(npages--)
  274. {
  275. l1_off = GET_L1(loop_va);
  276. l2_off = GET_L2(loop_va);
  277. l3_off = GET_L3(loop_va);
  278. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  279. if(PTE_USED(*mmu_l1))
  280. {
  281. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  282. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  283. }
  284. else
  285. {
  286. mmu_l2 = (rt_size_t *)rt_pages_alloc(1);
  287. if(mmu_l2)
  288. {
  289. rt_memset(mmu_l2,0,PAGE_SIZE * 2);
  290. rt_hw_cpu_dcache_clean(mmu_l2,PAGE_SIZE * 2);
  291. *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
  292. rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
  293. }
  294. else
  295. {
  296. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  297. return -1;
  298. }
  299. }
  300. if(PTE_USED(*(mmu_l2 + l2_off)))
  301. {
  302. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  303. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  304. }
  305. else
  306. {
  307. mmu_l3 = (rt_size_t *)rt_pages_alloc(1);
  308. if(mmu_l3)
  309. {
  310. rt_memset(mmu_l3,0,PAGE_SIZE * 2);
  311. rt_hw_cpu_dcache_clean(mmu_l3,PAGE_SIZE * 2);
  312. *(mmu_l2 + l2_off) = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
  313. rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
  314. ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
  315. (*ref_cnt)++;
  316. }
  317. else
  318. {
  319. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  320. return -1;
  321. }
  322. }
  323. RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
  324. ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
  325. (*ref_cnt)++;
  326. *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)loop_pa,PAGE_DEFAULT_ATTR_LEAF);
  327. rt_hw_cpu_dcache_clean(mmu_l3 + l3_off,sizeof(*(mmu_l3 + l3_off)));
  328. loop_va += PAGE_SIZE;
  329. loop_pa += PAGE_SIZE;
  330. }
  331. return 0;
  332. }
  333. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  334. {
  335. rt_size_t pa_s,pa_e;
  336. rt_size_t vaddr;
  337. rt_size_t pages;
  338. int ret;
  339. if(!size)
  340. {
  341. return 0;
  342. }
  343. pa_s = (rt_size_t)p_addr;
  344. pa_e = ((rt_size_t)p_addr) + size - 1;
  345. pa_s = GET_PF_ID(pa_s);
  346. pa_e = GET_PF_ID(pa_e);
  347. pages = pa_e - pa_s + 1;
  348. if(v_addr)
  349. {
  350. vaddr = (rt_size_t)v_addr;
  351. pa_s = (rt_size_t)p_addr;
  352. if(GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
  353. {
  354. return 0;
  355. }
  356. vaddr = __UMASKVALUE(vaddr,PAGE_OFFSET_MASK);
  357. if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
  358. {
  359. return 0;
  360. }
  361. }
  362. else
  363. {
  364. vaddr = find_vaddr(mmu_info,pages);
  365. }
  366. if(vaddr)
  367. {
  368. ret = __rt_hw_mmu_map(mmu_info,(void *)vaddr,p_addr,pages,attr);
  369. if(ret == 0)
  370. {
  371. rt_hw_cpu_tlb_invalidate();
  372. return (void *)(vaddr | GET_PF_OFFSET((rt_size_t)p_addr));
  373. }
  374. }
  375. return 0;
  376. }
  377. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
  378. {
  379. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  380. rt_size_t loop_pa;
  381. rt_size_t l1_off,l2_off,l3_off;
  382. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  383. rt_size_t *ref_cnt;
  384. rt_size_t i;
  385. void *va,*pa;
  386. if(!mmu_info)
  387. {
  388. return -1;
  389. }
  390. while(npages--)
  391. {
  392. loop_pa = (rt_size_t)rt_pages_alloc(0);
  393. if(!loop_pa)
  394. {
  395. goto err;
  396. }
  397. if(__rt_hw_mmu_map(mmu_info,(void *)loop_va,(void *)loop_pa,1,attr) < 0)
  398. {
  399. goto err;
  400. }
  401. loop_va += PAGE_SIZE;
  402. }
  403. return 0;
  404. err:
  405. va = (void *)__UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  406. for(i = 0;i < npages;i++)
  407. {
  408. pa = rt_hw_mmu_v2p(mmu_info,va);
  409. if(pa)
  410. {
  411. rt_pages_free((void *)PPN_TO_VPN(pa,mmu_info -> pv_off),0);
  412. }
  413. va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
  414. }
  415. __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
  416. return -1;
  417. }
  418. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  419. {
  420. rt_size_t vaddr;
  421. rt_size_t offset;
  422. rt_size_t pages;
  423. int ret;
  424. if(!size)
  425. {
  426. return 0;
  427. }
  428. offset = GET_PF_OFFSET((rt_size_t)v_addr);
  429. size += (offset + PAGE_SIZE - 1);
  430. pages = size >> PAGE_OFFSET_BIT;
  431. if(v_addr)
  432. {
  433. vaddr = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
  434. if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
  435. {
  436. return 0;
  437. }
  438. }
  439. else
  440. {
  441. vaddr = find_vaddr(mmu_info,pages);
  442. }
  443. if(vaddr)
  444. {
  445. ret = __rt_hw_mmu_map_auto(mmu_info,(void *)vaddr,pages,attr);
  446. if(ret == 0)
  447. {
  448. rt_hw_cpu_tlb_invalidate();
  449. return (void *)(vaddr | offset);
  450. }
  451. }
  452. return 0;
  453. }
  454. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  455. {
  456. rt_size_t va_s,va_e;
  457. rt_size_t pages;
  458. va_s = ((rt_size_t)v_addr) >> PAGE_OFFSET_BIT;
  459. va_e = (((rt_size_t)v_addr) + size - 1) >> PAGE_OFFSET_BIT;
  460. pages = va_e - va_s + 1;
  461. __rt_hw_mmu_unmap(mmu_info,v_addr,pages);
  462. rt_hw_cpu_tlb_invalidate();
  463. }
  464. void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  465. {
  466. void *ret;
  467. rt_base_t level;
  468. level = rt_hw_interrupt_disable();
  469. ret = _rt_hw_mmu_map(mmu_info,v_addr,p_addr,size,attr);
  470. rt_hw_interrupt_enable(level);
  471. return ret;
  472. }
  473. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  474. {
  475. void *ret;
  476. rt_base_t level;
  477. level = rt_hw_interrupt_disable();
  478. ret = _rt_hw_mmu_map_auto(mmu_info,v_addr,size,attr);
  479. rt_hw_interrupt_enable(level);
  480. return ret;
  481. }
  482. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  483. {
  484. rt_base_t level;
  485. level = rt_hw_interrupt_disable();
  486. _rt_hw_mmu_unmap(mmu_info,v_addr,size);
  487. rt_hw_interrupt_enable(level);
  488. }
  489. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
  490. {
  491. rt_size_t l1_off,l2_off,l3_off;
  492. rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
  493. rt_size_t pa;
  494. l1_off = GET_L1((rt_size_t)v_addr);
  495. l2_off = GET_L2((rt_size_t)v_addr);
  496. l3_off = GET_L3((rt_size_t)v_addr);
  497. if(!mmu_info)
  498. {
  499. return RT_NULL;
  500. }
  501. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  502. if(PTE_USED(*mmu_l1))
  503. {
  504. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  505. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
  506. if(PTE_USED(*(mmu_l2 + l2_off)))
  507. {
  508. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  509. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
  510. if(PTE_USED(*(mmu_l3 + l3_off)))
  511. {
  512. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
  513. return (void *)(GET_PADDR(*(mmu_l3 + l3_off)) | GET_PF_OFFSET((rt_size_t)v_addr));
  514. }
  515. }
  516. }
  517. return RT_NULL;
  518. }
  519. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
  520. {
  521. void *ret;
  522. rt_base_t level;
  523. level = rt_hw_interrupt_disable();
  524. ret = _rt_hw_mmu_v2p(mmu_info,v_addr);
  525. rt_hw_interrupt_enable(level);
  526. return ret;
  527. }