mmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-07-14 JasonHu first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <stdlib.h>
  13. #include <string.h>
  14. #include <rtdbg.h>
  15. #include "mmu.h"
  16. #include "cache.h"
  17. #include "i386.h"
  18. #ifdef RT_USING_USERSPACE
  19. #include "page.h"
  20. #endif /* RT_USING_USERSPACE */
  21. // #define RT_DEBUG_MMU_X86
  22. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages);
  23. #ifdef RT_USING_USERSPACE
  24. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
  25. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
  26. #else
  27. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
  28. #endif
  29. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
  30. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);
  31. void *current_mmu_table = RT_NULL;
  32. static void rt_hw_cpu_tlb_invalidate()
  33. {
  34. mmu_flush_tlb();
  35. }
  36. void *rt_hw_mmu_tbl_get()
  37. {
  38. return current_mmu_table;
  39. }
  40. void rt_hw_mmu_switch(void *mmu_table)
  41. {
  42. current_mmu_table = mmu_table;
  43. if (mmu_table == RT_NULL)
  44. {
  45. dbg_log(DBG_ERROR, "rt_hw_mmu_switch: NULL mmu table!\n");
  46. }
  47. else
  48. {
  49. RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
  50. mmu_set_pagetable((rt_ubase_t)mmu_table);
  51. }
  52. }
  53. /**
  54. * init page table, check vaddr whether used.
  55. */
  56. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off)
  57. {
  58. size_t l1_off,va_s,va_e;
  59. rt_base_t level;
  60. if((!mmu_info) || (!vtable))
  61. {
  62. return -1;
  63. }
  64. va_s = (rt_size_t)v_address;
  65. va_e = ((rt_size_t)v_address) + size - 1;
  66. if(va_e < va_s)
  67. {
  68. dbg_log(DBG_ERROR, "end=%p lower than start=%p\n", va_e, va_s);
  69. return -1;
  70. }
  71. //convert address to level 1 page frame id
  72. va_s = GET_L1(va_s);
  73. va_e = GET_L1(va_e);
  74. if(va_s == 0)
  75. {
  76. return -1;
  77. }
  78. level = rt_hw_interrupt_disable();
  79. //vtable initialization check
  80. for(l1_off = va_s;l1_off <= va_e;l1_off++)
  81. {
  82. size_t v = vtable[l1_off];
  83. if(PTE_USED(v))
  84. {
  85. rt_hw_interrupt_enable(level);
  86. return -1;
  87. }
  88. }
  89. va_s = (rt_size_t)v_address;
  90. va_e = ((rt_size_t)v_address) + size;
  91. mmu_info -> vtable = vtable;
  92. mmu_info -> vstart = va_s;
  93. mmu_info -> vend = va_e;
  94. mmu_info -> pv_off = pv_off;
  95. rt_hw_interrupt_enable(level);
  96. return 0;
  97. }
  98. void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size)
  99. {
  100. vaddr_start = vaddr_start & PAGE_ADDR_MASK;
  101. rt_size_t paddr_start = vaddr_start;
  102. rt_size_t vaddr_end = vaddr_start + __ALIGNUP(size, PAGE_OFFSET_BIT);
  103. rt_kprintf("kernel: map on [%p~%p]\n", vaddr_start, vaddr_end);
  104. pde_t *pdt = (pde_t *)mmu_info->vtable;
  105. rt_size_t pde_nr = (vaddr_end - vaddr_start) / (PTE_PER_PAGE * PAGE_SIZE);
  106. rt_size_t pte_nr = ((vaddr_end - vaddr_start) / PAGE_SIZE) % PTE_PER_PAGE;
  107. rt_size_t *pte_addr = (rt_size_t *) PAGE_TABLE_VADDR;
  108. rt_size_t pde_off = GET_L1(vaddr_start);
  109. int i, j;
  110. for (i = 0; i < pde_nr; i++)
  111. {
  112. pdt[pde_off + i] = MAKE_PTE(pte_addr, KERNEL_PAGE_ATTR);
  113. for (j = 0; j < PTE_PER_PAGE; j++)
  114. {
  115. pte_addr[j] = MAKE_PTE(paddr_start, KERNEL_PAGE_ATTR);
  116. paddr_start += PAGE_SIZE;
  117. }
  118. pte_addr += PAGE_SIZE;
  119. }
  120. if (pte_nr > 0)
  121. {
  122. pdt[pde_off + i] = MAKE_PTE(pte_addr, KERNEL_PAGE_ATTR);
  123. for (j = 0; j < pte_nr; j++)
  124. {
  125. pte_addr[j] = MAKE_PTE(paddr_start, KERNEL_PAGE_ATTR);
  126. paddr_start += PAGE_SIZE;
  127. }
  128. }
  129. }
  130. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t npages,rt_size_t attr)
  131. {
  132. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  133. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  134. size_t l1_off, l2_off;
  135. size_t *mmu_l1, *mmu_l2;
  136. if (!mmu_info)
  137. {
  138. return -1;
  139. }
  140. while (npages--)
  141. {
  142. l1_off = GET_L1(loop_va);
  143. l2_off = GET_L2(loop_va);
  144. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  145. if(PTE_USED(*mmu_l1))
  146. {
  147. mmu_l2 = ((size_t *)GET_PADDR(*mmu_l1));
  148. rt_page_ref_inc(mmu_l2, 0); /* mmu l2 ref inc when map */
  149. mmu_l2 += l2_off;
  150. }
  151. else
  152. {
  153. mmu_l2 = (size_t*)rt_pages_alloc(0);
  154. if (mmu_l2)
  155. {
  156. rt_memset(mmu_l2, 0, ARCH_PAGE_SIZE);
  157. /* cache maintain */
  158. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_SIZE);
  159. *mmu_l1 = MAKE_PTE((size_t)mmu_l2, attr | PTE_P);
  160. /* cache maintain */
  161. rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
  162. mmu_l2 += l2_off;
  163. }
  164. else
  165. {
  166. /* error, unmap and quit */
  167. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  168. return -1;
  169. }
  170. }
  171. *mmu_l2 = MAKE_PTE(loop_pa, attr | PTE_P);
  172. /* cache maintain */
  173. rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
  174. loop_va += ARCH_PAGE_SIZE;
  175. loop_pa += ARCH_PAGE_SIZE;
  176. }
  177. return 0;
  178. }
  179. #ifdef RT_USING_USERSPACE
  180. //check whether the range of virtual address are free
  181. static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
  182. {
  183. rt_size_t loop_va = __UMASKVALUE((rt_size_t)va,PAGE_OFFSET_MASK);
  184. rt_size_t l1_off, l2_off;
  185. rt_size_t *mmu_l1,*mmu_l2;
  186. if(!pages)
  187. {
  188. dbg_log(DBG_ERROR, "%s: check vaddr=%p pages=zero!\n", __func__, va);
  189. return -1;
  190. }
  191. if(!mmu_info)
  192. {
  193. dbg_log(DBG_ERROR, "%s: check vaddr=%p pages=%d mmu NULL!\n", __func__, va, pages);
  194. return -1;
  195. }
  196. while(pages--)
  197. {
  198. l1_off = GET_L1(loop_va);
  199. l2_off = GET_L2(loop_va);
  200. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  201. if(PTE_USED(*mmu_l1))
  202. {
  203. mmu_l2 = ((rt_size_t *)GET_PADDR(*mmu_l1)) + l2_off;
  204. if(PTE_USED(*mmu_l2))
  205. {
  206. dbg_log(DBG_ERROR, "%s: check vaddr=%p pages=%d mmu l2 used %p->%x!\n", __func__, va, pages, mmu_l2, *mmu_l2);
  207. return -1;
  208. }
  209. }
  210. loop_va += PAGE_SIZE;
  211. }
  212. return 0;
  213. }
  214. #endif /* RT_USING_USERSPACE */
  215. //find a range of free virtual address specified by pages
  216. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  217. {
  218. size_t va;
  219. size_t find_va = 0;
  220. int n = 0;
  221. size_t start, end;
  222. if (!pages)
  223. {
  224. return 0;
  225. }
  226. if (!mmu_info)
  227. {
  228. return 0;
  229. }
  230. start = mmu_info->vstart;
  231. end = mmu_info->vend;
  232. va = mmu_info->vstart;
  233. for (; start < end; start += ARCH_PAGE_SIZE, va += ARCH_PAGE_SIZE)
  234. {
  235. if (_rt_hw_mmu_v2p(mmu_info, (void *)va))
  236. {
  237. n = 0;
  238. find_va = 0;
  239. continue;
  240. }
  241. if (!find_va)
  242. {
  243. find_va = va;
  244. }
  245. n++;
  246. if (n >= pages)
  247. {
  248. return find_va;
  249. }
  250. }
  251. return 0;
  252. }
  253. #ifdef RT_USING_USERSPACE
  254. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  255. {
  256. rt_size_t pa_s,pa_e;
  257. rt_size_t vaddr;
  258. rt_size_t pages;
  259. int ret;
  260. if(!size)
  261. {
  262. return 0;
  263. }
  264. pa_s = (rt_size_t)p_addr;
  265. pa_e = ((rt_size_t)p_addr) + size - 1;
  266. pa_s = GET_PF_ID(pa_s);
  267. pa_e = GET_PF_ID(pa_e);
  268. pages = pa_e - pa_s + 1;
  269. if(v_addr)
  270. {
  271. vaddr = (rt_size_t)v_addr;
  272. pa_s = (rt_size_t)p_addr;
  273. if(GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
  274. {
  275. return 0;
  276. }
  277. vaddr = __UMASKVALUE(vaddr,PAGE_OFFSET_MASK);
  278. if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
  279. {
  280. dbg_log(DBG_ERROR, "%s: check vaddr=%p pages=%d failed!\n", __func__, vaddr, pages);
  281. return 0;
  282. }
  283. }
  284. else
  285. {
  286. vaddr = find_vaddr(mmu_info,pages);
  287. }
  288. if(vaddr)
  289. {
  290. ret = __rt_hw_mmu_map(mmu_info,(void *)vaddr,p_addr,pages,attr);
  291. if(ret == 0)
  292. {
  293. rt_hw_cpu_tlb_invalidate();
  294. return (void *)(vaddr | GET_PF_OFFSET((rt_size_t)p_addr));
  295. }
  296. }
  297. return 0;
  298. }
  299. #else
  300. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  301. {
  302. size_t pa_s, pa_e;
  303. size_t vaddr;
  304. int pages;
  305. int ret;
  306. pa_s = (size_t)p_addr;
  307. pa_e = (size_t)p_addr + size - 1;
  308. pa_s >>= ARCH_PAGE_SHIFT;
  309. pa_e >>= ARCH_PAGE_SHIFT;
  310. pages = pa_e - pa_s + 1;
  311. vaddr = find_vaddr(mmu_info, pages);
  312. if (vaddr) {
  313. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  314. if (ret == 0)
  315. {
  316. rt_hw_cpu_tlb_invalidate();
  317. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  318. }
  319. }
  320. return 0;
  321. }
  322. #endif /* RT_USING_USERSPACE */
  323. #ifdef RT_USING_USERSPACE
  324. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
  325. {
  326. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  327. rt_size_t loop_pa;
  328. rt_size_t i;
  329. rt_size_t left_npages = npages;
  330. rt_size_t used_npages;
  331. void *va,*pa;
  332. if (!mmu_info)
  333. {
  334. return -1;
  335. }
  336. while (left_npages)
  337. {
  338. loop_pa = (rt_size_t)rt_pages_alloc(0);
  339. if (!loop_pa)
  340. {
  341. goto err;
  342. }
  343. rt_memset((void *)loop_pa, 0, ARCH_PAGE_SIZE);
  344. if (__rt_hw_mmu_map(mmu_info, (void *)loop_va, (void *)loop_pa, 1, attr) < 0)
  345. {
  346. rt_pages_free((void *)loop_pa, 0); /* free unmaped phy page first */
  347. goto err;
  348. }
  349. --left_npages;
  350. loop_va += PAGE_SIZE;
  351. }
  352. return 0;
  353. err:
  354. va = (void *)__UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  355. used_npages = npages - left_npages;
  356. for (i = 0; i < used_npages; i++)
  357. {
  358. pa = rt_hw_mmu_v2p(mmu_info, va);
  359. if (pa)
  360. {
  361. rt_pages_free(pa, 0);
  362. }
  363. va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
  364. }
  365. __rt_hw_mmu_unmap(mmu_info,v_addr, used_npages);
  366. return -1;
  367. }
  368. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  369. {
  370. rt_size_t vaddr;
  371. rt_size_t offset;
  372. rt_size_t pages;
  373. int ret;
  374. if(!size)
  375. {
  376. return 0;
  377. }
  378. offset = GET_PF_OFFSET((rt_size_t)v_addr);
  379. size += (offset + ARCH_PAGE_SIZE - 1);
  380. pages = size >> PAGE_OFFSET_BIT;
  381. if(v_addr)
  382. {
  383. vaddr = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  384. if(check_vaddr(mmu_info,(void *)vaddr, pages) != 0)
  385. {
  386. dbg_log(DBG_ERROR, "_rt_hw_mmu_map_auto: check vaddr %p on pages %d failed!\n", vaddr, pages);
  387. return 0;
  388. }
  389. }
  390. else
  391. {
  392. vaddr = find_vaddr(mmu_info,pages);
  393. }
  394. if(vaddr)
  395. {
  396. ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
  397. if(ret == 0)
  398. {
  399. rt_hw_cpu_tlb_invalidate();
  400. return (void *)(vaddr | offset);
  401. }
  402. dbg_log(DBG_ERROR, "_rt_hw_mmu_map_auto: do __rt_hw_mmu_map_auto failed!\n");
  403. }
  404. else
  405. {
  406. dbg_log(DBG_ERROR, "_rt_hw_mmu_map_auto: get vaddr failed!\n");
  407. }
  408. return 0;
  409. }
  410. #endif /* RT_USING_USERSPACE */
  411. /**
  412. * unmap page on v_addr, free page if unmapped, further more, if page table empty, need free it.
  413. */
  414. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages)
  415. {
  416. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  417. rt_size_t l1_off, l2_off;
  418. rt_size_t *mmu_l1, *mmu_l2;
  419. RT_ASSERT(mmu_info);
  420. if ((rt_size_t)v_addr < mmu_info->vstart || (rt_size_t)v_addr >= mmu_info -> vend)
  421. {
  422. dbg_log(DBG_ERROR, "unmap vaddr %p out of range [%p~%p)\n", v_addr, mmu_info->vstart, mmu_info->vend);
  423. return;
  424. }
  425. while(npages--)
  426. {
  427. l1_off = (rt_size_t)GET_L1(loop_va);
  428. l2_off = (rt_size_t)GET_L2(loop_va);
  429. mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
  430. if (!PTE_USED(*mmu_l1))
  431. {
  432. dbg_log(DBG_ERROR, "unmap vaddr %p mmu l1 unused %p->%x\n", v_addr, mmu_l1, *mmu_l1);
  433. }
  434. RT_ASSERT(PTE_USED(*mmu_l1))
  435. mmu_l2 = (rt_size_t *)(GET_PADDR(*mmu_l1)) + l2_off;
  436. if (!PTE_USED(*mmu_l2))
  437. {
  438. dbg_log(DBG_ERROR, "unmap vaddr %p mmu l2 unused %p->%x\n", v_addr, mmu_l2, *mmu_l2);
  439. }
  440. RT_ASSERT(PTE_USED(*mmu_l2));
  441. *mmu_l2 = 0; /* clear page table entry */
  442. rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
  443. mmu_l2 -= l2_off; /* get base addr on page aligned */
  444. if(rt_pages_free(mmu_l2, 0)) /* page table no phy page, empty */
  445. {
  446. *mmu_l1 = 0; /* clear page dir table entry */
  447. rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
  448. }
  449. loop_va += PAGE_SIZE;
  450. }
  451. }
  452. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  453. {
  454. rt_size_t va_s,va_e;
  455. rt_size_t pages;
  456. va_s = ((rt_size_t)v_addr) >> PAGE_OFFSET_BIT;
  457. va_e = (((rt_size_t)v_addr) + size - 1) >> PAGE_OFFSET_BIT;
  458. pages = va_e - va_s + 1;
  459. __rt_hw_mmu_unmap(mmu_info,v_addr,pages);
  460. rt_hw_cpu_tlb_invalidate();
  461. }
  462. #ifdef RT_USING_USERSPACE
  463. /**
  464. * map vaddr in vtable with size and attr, this need a phy addr
  465. *
  466. * if v_addr == RT_NULL, get a valid vaddr to map.
  467. *
  468. * success return start vaddr, failed return RT_NULL
  469. */
  470. void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
  471. {
  472. void *ret;
  473. rt_base_t level;
  474. level = rt_hw_interrupt_disable();
  475. ret = _rt_hw_mmu_map(mmu_info,v_addr,p_addr,size,attr);
  476. rt_hw_interrupt_enable(level);
  477. return ret;
  478. }
  479. /**
  480. * map vaddr in vtable with size and attr, this will auto alloc phy addr
  481. *
  482. * if v_addr == RT_NULL, get a valid vaddr to map.
  483. *
  484. * success return start vaddr, failed return RT_NULL
  485. */
  486. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
  487. {
  488. void *ret;
  489. rt_base_t level;
  490. level = rt_hw_interrupt_disable();
  491. ret = _rt_hw_mmu_map_auto(mmu_info,v_addr,size,attr);
  492. rt_hw_interrupt_enable(level);
  493. return ret;
  494. }
  495. #else
  496. /**
  497. * map vaddr in vtable with size and attr, this need a phy addr
  498. *
  499. * success return start vaddr, failed return RT_NULL
  500. */
  501. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  502. {
  503. void *ret;
  504. rt_base_t level;
  505. level = rt_hw_interrupt_disable();
  506. ret = _rt_hw_mmu_map(mmu_info, p_addr, size, attr);
  507. rt_hw_interrupt_enable(level);
  508. return ret;
  509. }
  510. #endif
  511. /**
  512. * unmap vaddr in vtable, free phyaddr and page table
  513. */
  514. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
  515. {
  516. rt_base_t level;
  517. level = rt_hw_interrupt_disable();
  518. _rt_hw_mmu_unmap(mmu_info,v_addr,size);
  519. rt_hw_interrupt_enable(level);
  520. }
  521. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  522. {
  523. size_t l1 = GET_L1((size_t)v_addr);
  524. pde_t *pde = &mmu_info->vtable[l1];
  525. if (*pde & PTE_P)
  526. {
  527. size_t *pte_addr = (size_t *)GET_PADDR(*pde);
  528. size_t l2 = GET_L2((size_t)v_addr);
  529. pte_t *pte = (pte_t *)&pte_addr[l2];
  530. if (*pte & PTE_P)
  531. {
  532. return (void *)(GET_PADDR(*pte) | GET_PF_OFFSET((rt_size_t)v_addr));
  533. }
  534. }
  535. return RT_NULL;
  536. }
  537. #ifdef RT_DEBUG_MMU_X86
  538. void *_rt_hw_mmu_v2p_with_dbg(rt_mmu_info *mmu_info, void *v_addr)
  539. {
  540. rt_kprintf("v2p: mmu vtable=%p, vaddr=%p\n", mmu_info->vtable, v_addr);
  541. size_t l1 = GET_L1((size_t)v_addr);
  542. rt_kprintf("=>L1=%d ", l1);
  543. pde_t *pde = &mmu_info->vtable[l1];
  544. rt_kprintf("pde=>%p:%x (%x|%x)\n", pde, *pde, GET_PADDR(*pde), GET_PATTR(*pde));
  545. if (*pde & PTE_P)
  546. {
  547. size_t *pte_addr = (size_t *)GET_PADDR(*pde);
  548. size_t l2 = GET_L2((size_t)v_addr);
  549. rt_kprintf(" =>L2=%d ", l2);
  550. pte_t *pte = (pte_t *)&pte_addr[l2];
  551. rt_kprintf("pte=>%p:%x (%x|%x)\n", pte, *pte, GET_PADDR(*pte), GET_PATTR(*pte));
  552. if (*pte & PTE_P)
  553. {
  554. rt_kprintf(" =>paddr:%p\n", GET_PADDR(*pte));
  555. return (void *)GET_PADDR(*pte);
  556. }
  557. }
  558. rt_kprintf("v2p: mmu v2p %p failed!\n", v_addr);
  559. return RT_NULL;
  560. }
  561. #endif
  562. /**
  563. * virtual addr to physical addr
  564. *
  565. * success return phyaddr, failed return RT_NULL
  566. */
  567. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
  568. {
  569. void *ret;
  570. rt_base_t level;
  571. level = rt_hw_interrupt_disable();
  572. #ifdef RT_DEBUG_MMU_X86
  573. ret = _rt_hw_mmu_v2p_with_dbg(mmu_info,v_addr);
  574. #else
  575. ret = _rt_hw_mmu_v2p(mmu_info,v_addr);
  576. #endif
  577. rt_hw_interrupt_enable(level);
  578. return ret;
  579. }
  580. void mmu_set_pagetable(rt_ubase_t addr)
  581. {
  582. /* set new pgdir will flush tlb */
  583. write_cr3(addr);
  584. }
  585. void mmu_enable_user_page_access()
  586. {
  587. }
  588. void mmu_disable_user_page_access()
  589. {
  590. }
  591. void mmu_enable()
  592. {
  593. write_cr0(read_cr0() | CR0_PG);
  594. }