mmu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. * 2020-07-26 lizhirui porting to ls2k
  10. */
  11. #include <rtthread.h>
  12. #include <rthw.h>
  13. #include <board.h>
  14. #include <stddef.h>
  15. #include "cache.h"
  16. #include "mips_mmu.h"
  17. #include "mmu.h"
  18. void *current_mmu_table = RT_NULL;
  19. void *mmu_table_get()
  20. {
  21. return current_mmu_table;
  22. }
  23. void switch_mmu(void *mmu_table)
  24. {
  25. current_mmu_table = mmu_table;
  26. mmu_clear_tlb();
  27. mmu_clear_itlb();
  28. }
  29. /* dump 2nd level page table */
  30. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  31. {
  32. int i;
  33. int fcnt = 0;
  34. for (i = 0; i < 256; i++)
  35. {
  36. rt_uint32_t pte2 = ptb[i];
  37. if ((pte2 & 0x3) == 0)
  38. {
  39. if (fcnt == 0)
  40. rt_kprintf(" ");
  41. rt_kprintf("%04x: ", i);
  42. fcnt++;
  43. if (fcnt == 16)
  44. {
  45. rt_kprintf("fault\n");
  46. fcnt = 0;
  47. }
  48. continue;
  49. }
  50. if (fcnt != 0)
  51. {
  52. rt_kprintf("fault\n");
  53. fcnt = 0;
  54. }
  55. rt_kprintf(" %04x: %x: ", i, pte2);
  56. if ((pte2 & 0x3) == 0x1)
  57. {
  58. rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
  59. ((pte2 >> 7) | (pte2 >> 4))& 0xf,
  60. (pte2 >> 15) & 0x1,
  61. ((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
  62. }
  63. else
  64. {
  65. rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
  66. ((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
  67. ((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
  68. }
  69. }
  70. }
  71. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  72. {
  73. int i;
  74. int fcnt = 0;
  75. rt_kprintf("page table@%p\n", ptb);
  76. for (i = 0; i < 1024*4; i++)
  77. {
  78. rt_uint32_t pte1 = ptb[i];
  79. if ((pte1 & 0x3) == 0)
  80. {
  81. rt_kprintf("%03x: ", i);
  82. fcnt++;
  83. if (fcnt == 16)
  84. {
  85. rt_kprintf("fault\n");
  86. fcnt = 0;
  87. }
  88. continue;
  89. }
  90. if (fcnt != 0)
  91. {
  92. rt_kprintf("fault\n");
  93. fcnt = 0;
  94. }
  95. rt_kprintf("%03x: %08x: ", i, pte1);
  96. if ((pte1 & 0x3) == 0x3)
  97. {
  98. rt_kprintf("LPAE\n");
  99. }
  100. else if ((pte1 & 0x3) == 0x1)
  101. {
  102. rt_kprintf("pte,ns:%d,domain:%d\n",
  103. (pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
  104. /*
  105. *rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
  106. * - 0x80000000 + 0xC0000000));
  107. */
  108. }
  109. else if (pte1 & (1 << 18))
  110. {
  111. rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
  112. (pte1 >> 19) & 0x1,
  113. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  114. (pte1 >> 4) & 0x1,
  115. ((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
  116. }
  117. else
  118. {
  119. rt_kprintf("section,ns:%d,ap:%x,"
  120. "xn:%d,texcb:%02x,domain:%d\n",
  121. (pte1 >> 19) & 0x1,
  122. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  123. (pte1 >> 4) & 0x1,
  124. (((pte1 & (0x7 << 12)) >> 10) |
  125. ((pte1 & 0x0c) >> 2)) & 0x1f,
  126. (pte1 >> 5) & 0xf);
  127. }
  128. }
  129. }
  130. /* level1 page table, each entry for 1MB memory. */
  131. volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
  132. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
  133. rt_uint32_t vaddrEnd,
  134. rt_uint32_t paddrStart,
  135. rt_uint32_t attr)
  136. {
  137. volatile rt_uint32_t *pTT;
  138. volatile int i, nSec;
  139. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  140. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  141. for(i = 0; i <= nSec; i++)
  142. {
  143. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  144. pTT++;
  145. }
  146. }
  147. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  148. {
  149. unsigned long old_domain;
  150. //asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  151. //asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  152. return old_domain;
  153. }
  154. void rt_hw_cpu_dcache_clean(void *addr, int size);
  155. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  156. {
  157. /* set page table */
  158. for(; size > 0; size--)
  159. {
  160. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  161. mdesc->paddr_start, mdesc->attr);
  162. mdesc++;
  163. }
  164. rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
  165. }
  166. void rt_hw_mmu_init(void)
  167. {
  168. rt_cpu_dcache_clean_flush();
  169. rt_cpu_icache_flush();
  170. rt_hw_cpu_dcache_disable();
  171. rt_hw_cpu_icache_disable();
  172. rt_cpu_mmu_disable();
  173. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  174. rt_hw_set_domain_register(0x55555555);
  175. rt_cpu_tlb_set(MMUTable);
  176. rt_cpu_mmu_enable();
  177. rt_hw_cpu_icache_enable();
  178. rt_hw_cpu_dcache_enable();
  179. }
  180. /*
  181. mem map
  182. */
  183. void rt_hw_cpu_dcache_clean(void *addr, int size);
  184. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  185. {
  186. size_t l1_off, va_s, va_e;
  187. rt_base_t level;
  188. if (!mmu_info || !vtable)
  189. {
  190. return -1;
  191. }
  192. va_s = (size_t)v_address;
  193. va_e = (size_t)v_address + size - 1;
  194. if ( va_e < va_s)
  195. {
  196. return -1;
  197. }
  198. va_s >>= ARCH_SECTION_SHIFT;
  199. va_e >>= ARCH_SECTION_SHIFT;
  200. if (va_s == 0)
  201. {
  202. return -1;
  203. }
  204. level = rt_hw_interrupt_disable();
  205. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  206. {
  207. size_t v = vtable[l1_off];
  208. if (v & ARCH_MMU_USED_MASK)
  209. {
  210. rt_kprintf("Error:vtable[%d] = 0x%p(is not zero),va_s = 0x%p,va_e = 0x%p!\n",l1_off,v,va_s,va_e);
  211. rt_hw_interrupt_enable(level);
  212. return -1;
  213. }
  214. }
  215. mmu_info->vtable = vtable;
  216. mmu_info->vstart = va_s;
  217. mmu_info->vend = va_e;
  218. mmu_info->pv_off = pv_off;
  219. rt_hw_interrupt_enable(level);
  220. return 0;
  221. }
  222. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  223. {
  224. size_t l1_off, l2_off;
  225. size_t *mmu_l1, *mmu_l2;
  226. size_t find_off = 0;
  227. size_t find_va = 0;
  228. int n = 0;
  229. if (!pages)
  230. {
  231. return 0;
  232. }
  233. if (!mmu_info)
  234. {
  235. return 0;
  236. }
  237. for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
  238. {
  239. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  240. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  241. {
  242. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  243. for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
  244. {
  245. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  246. {
  247. /* in use */
  248. n = 0;
  249. }
  250. else
  251. {
  252. if (!n)
  253. {
  254. find_va = l1_off;
  255. find_off = l2_off;
  256. }
  257. n++;
  258. if (n >= pages)
  259. {
  260. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  261. }
  262. }
  263. }
  264. }
  265. else
  266. {
  267. if (!n)
  268. {
  269. find_va = l1_off;
  270. find_off = 0;
  271. }
  272. n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  273. if (n >= pages)
  274. {
  275. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  276. }
  277. }
  278. }
  279. return 0;
  280. }
  281. #ifdef RT_USING_SMART
  282. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  283. {
  284. size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  285. size_t l1_off, l2_off;
  286. size_t *mmu_l1, *mmu_l2;
  287. if (!pages)
  288. {
  289. return -1;
  290. }
  291. if (!mmu_info)
  292. {
  293. return -1;
  294. }
  295. while (pages--)
  296. {
  297. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  298. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  299. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  300. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  301. {
  302. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  303. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  304. {
  305. return -1;
  306. }
  307. }
  308. loop_va += ARCH_PAGE_SIZE;
  309. }
  310. return 0;
  311. }
  312. #endif
  313. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
  314. {
  315. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  316. size_t l1_off, l2_off;
  317. size_t *mmu_l1, *mmu_l2;
  318. size_t *ref_cnt;
  319. if (!mmu_info)
  320. {
  321. return;
  322. }
  323. while (npages--)
  324. {
  325. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  326. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
  327. {
  328. return;
  329. }
  330. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  331. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  332. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  333. {
  334. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  335. }
  336. else
  337. {
  338. return;
  339. }
  340. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  341. {
  342. *(mmu_l2 + l2_off) = 0;
  343. /* cache maintain */
  344. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  345. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  346. (*ref_cnt)--;
  347. if (!*ref_cnt)
  348. {
  349. #ifdef RT_USING_SMART
  350. rt_pages_free(mmu_l2, 0);
  351. #else
  352. rt_free_align(mmu_l2);
  353. #endif
  354. *mmu_l1 = 0;
  355. /* cache maintain */
  356. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  357. }
  358. }
  359. loop_va += ARCH_PAGE_SIZE;
  360. }
  361. }
  362. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
  363. {
  364. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  365. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  366. size_t l1_off, l2_off;
  367. size_t *mmu_l1, *mmu_l2;
  368. size_t *ref_cnt;
  369. if (!mmu_info)
  370. {
  371. return -1;
  372. }
  373. while (npages--)
  374. {
  375. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  376. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  377. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  378. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  379. {
  380. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  381. }
  382. else
  383. {
  384. #ifdef RT_USING_SMART
  385. mmu_l2 = (size_t*)rt_pages_alloc(0);
  386. #else
  387. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  388. #endif
  389. if (mmu_l2)
  390. {
  391. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  392. /* cache maintain */
  393. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  394. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  395. /* cache maintain */
  396. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  397. }
  398. else
  399. {
  400. /* error, unmap and quit */
  401. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  402. return -1;
  403. }
  404. }
  405. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  406. (*ref_cnt)++;
  407. *(mmu_l2 + l2_off) = (loop_pa | attr);
  408. /* cache maintain */
  409. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  410. loop_va += ARCH_PAGE_SIZE;
  411. loop_pa += ARCH_PAGE_SIZE;
  412. }
  413. return 0;
  414. }
  415. static void rt_hw_cpu_tlb_invalidate(void)
  416. {
  417. mmu_clear_tlb();
  418. mmu_clear_itlb();
  419. }
  420. #ifdef RT_USING_SMART
  421. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  422. {
  423. size_t pa_s, pa_e;
  424. size_t vaddr;
  425. int pages;
  426. int ret;
  427. if (!size)
  428. {
  429. return 0;
  430. }
  431. pa_s = (size_t)p_addr;
  432. pa_e = (size_t)p_addr + size - 1;
  433. pa_s >>= ARCH_PAGE_SHIFT;
  434. pa_e >>= ARCH_PAGE_SHIFT;
  435. pages = pa_e - pa_s + 1;
  436. if (v_addr)
  437. {
  438. vaddr = (size_t)v_addr;
  439. pa_s = (size_t)p_addr;
  440. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  441. {
  442. return 0;
  443. }
  444. vaddr &= ~ARCH_PAGE_MASK;
  445. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  446. {
  447. return 0;
  448. }
  449. }
  450. else
  451. {
  452. vaddr = find_vaddr(mmu_info, pages);
  453. }
  454. if (vaddr) {
  455. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  456. if (ret == 0)
  457. {
  458. rt_hw_cpu_tlb_invalidate();
  459. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  460. }
  461. }
  462. return 0;
  463. }
  464. #else
  465. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  466. {
  467. size_t pa_s, pa_e;
  468. size_t vaddr;
  469. int pages;
  470. int ret;
  471. pa_s = (size_t)p_addr;
  472. pa_e = (size_t)p_addr + size - 1;
  473. pa_s >>= ARCH_PAGE_SHIFT;
  474. pa_e >>= ARCH_PAGE_SHIFT;
  475. pages = pa_e - pa_s + 1;
  476. vaddr = find_vaddr(mmu_info, pages);
  477. if (vaddr) {
  478. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  479. if (ret == 0)
  480. {
  481. rt_hw_cpu_tlb_invalidate();
  482. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  483. }
  484. }
  485. return 0;
  486. }
  487. #endif
  488. #ifdef RT_USING_SMART
  489. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
  490. {
  491. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  492. size_t loop_pa;
  493. size_t l1_off, l2_off;
  494. size_t *mmu_l1, *mmu_l2;
  495. size_t *ref_cnt;
  496. if (!mmu_info)
  497. {
  498. return -1;
  499. }
  500. while (npages--)
  501. {
  502. loop_pa = (size_t)rt_pages_alloc(0) + mmu_info->pv_off;
  503. if (!loop_pa)
  504. goto err;
  505. //rt_kprintf("vaddr = %08x is mapped to paddr = %08x\n",v_addr,loop_pa);
  506. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  507. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  508. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  509. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  510. {
  511. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  512. }
  513. else
  514. {
  515. //mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  516. mmu_l2 = (size_t*)rt_pages_alloc(0);
  517. if (mmu_l2)
  518. {
  519. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  520. /* cache maintain */
  521. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  522. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  523. /* cache maintain */
  524. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  525. }
  526. else
  527. goto err;
  528. }
  529. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  530. (*ref_cnt)++;
  531. //loop_pa += mmu_info->pv_off;
  532. *(mmu_l2 + l2_off) = (loop_pa | attr);
  533. /* cache maintain */
  534. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  535. loop_va += ARCH_PAGE_SIZE;
  536. }
  537. return 0;
  538. err:
  539. {
  540. /* error, unmap and quit */
  541. int i;
  542. void *va, *pa;
  543. va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
  544. for (i = 0; i < npages; i++)
  545. {
  546. pa = rt_hw_mmu_v2p(mmu_info, va);
  547. pa -= mmu_info->pv_off;
  548. rt_pages_free(pa, 0);
  549. va += ARCH_PAGE_SIZE;
  550. }
  551. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  552. return -1;
  553. }
  554. }
  555. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  556. {
  557. size_t vaddr;
  558. size_t offset;
  559. int pages;
  560. int ret;
  561. if (!size)
  562. {
  563. return 0;
  564. }
  565. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  566. size += (offset + ARCH_PAGE_SIZE - 1);
  567. pages = (size >> ARCH_PAGE_SHIFT);
  568. if (v_addr)
  569. {
  570. vaddr = (size_t)v_addr;
  571. vaddr &= ~ARCH_PAGE_MASK;
  572. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  573. {
  574. return 0;
  575. }
  576. }
  577. else
  578. {
  579. vaddr = find_vaddr(mmu_info, pages);
  580. }
  581. if (vaddr) {
  582. ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
  583. if (ret == 0)
  584. {
  585. rt_hw_cpu_tlb_invalidate();
  586. return (void*)vaddr + offset;
  587. }
  588. }
  589. return 0;
  590. }
  591. #endif
  592. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  593. {
  594. size_t va_s, va_e;
  595. int pages;
  596. va_s = (size_t)v_addr;
  597. va_e = (size_t)v_addr + size - 1;
  598. va_s >>= ARCH_PAGE_SHIFT;
  599. va_e >>= ARCH_PAGE_SHIFT;
  600. pages = va_e - va_s + 1;
  601. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  602. rt_hw_cpu_tlb_invalidate();
  603. }
  604. //va --> pa
  605. void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
  606. {
  607. void *p_addr = 0;
  608. return p_addr;
  609. }
  610. //pa --> va
  611. void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
  612. {
  613. void *v_addr = 0;
  614. #ifdef RT_USING_SMART
  615. extern rt_mmu_info mmu_info;
  616. v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
  617. #else
  618. v_addr = p_addr;
  619. #endif
  620. return v_addr;
  621. }
  622. #ifdef RT_USING_SMART
  623. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  624. {
  625. void *ret;
  626. rt_base_t level;
  627. level = rt_hw_interrupt_disable();
  628. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  629. rt_hw_interrupt_enable(level);
  630. return ret;
  631. }
  632. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  633. {
  634. void *ret;
  635. rt_base_t level;
  636. level = rt_hw_interrupt_disable();
  637. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  638. rt_hw_interrupt_enable(level);
  639. return ret;
  640. }
  641. #endif
  642. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  643. {
  644. rt_base_t level;
  645. level = rt_hw_interrupt_disable();
  646. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  647. rt_hw_interrupt_enable(level);
  648. }
  649. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  650. {
  651. size_t l1_off, l2_off;
  652. size_t *mmu_l1, *mmu_l2;
  653. size_t tmp;
  654. size_t pa;
  655. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  656. if (!mmu_info)
  657. {
  658. return (void*)0;
  659. }
  660. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  661. tmp = *mmu_l1;
  662. switch (tmp & ARCH_MMU_USED_MASK)
  663. {
  664. case 0: /* not used */
  665. break;
  666. case 1: /* page table */
  667. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  668. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  669. pa = *(mmu_l2 + l2_off);
  670. if (pa & ARCH_MMU_USED_MASK)
  671. {
  672. if ((pa & ARCH_MMU_USED_MASK) == 1)
  673. {
  674. /* lage page, not support */
  675. break;
  676. }
  677. pa &= ~(ARCH_PAGE_MASK);
  678. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  679. return (void*)pa;
  680. }
  681. break;
  682. case 2:
  683. case 3:
  684. /* section */
  685. if (tmp & ARCH_TYPE_SUPERSECTION)
  686. {
  687. /* super section, not support */
  688. break;
  689. }
  690. pa = (tmp & ~ARCH_SECTION_MASK);
  691. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  692. return (void*)pa;
  693. }
  694. return (void*)0;
  695. }
  696. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  697. {
  698. void *ret;
  699. rt_base_t level;
  700. level = rt_hw_interrupt_disable();
  701. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  702. rt_hw_interrupt_enable(level);
  703. return ret;
  704. }
  705. #ifdef RT_USING_SMART
  706. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
  707. {
  708. unsigned int va;
  709. for (va = 0; va < 0x1000; va++) {
  710. unsigned int vaddr = (va << 20);
  711. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
  712. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  713. } else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
  714. mtbl[va] = (va << 20) | NORMAL_MEM;
  715. } else {
  716. mtbl[va] = 0;
  717. }
  718. }
  719. }
  720. #endif