mmu.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "cp15.h"
  14. #include "mmu.h"
  15. #ifdef RT_USING_USERSPACE
  16. #include "page.h"
  17. #endif
  18. /* dump 2nd level page table */
  19. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  20. {
  21. int i;
  22. int fcnt = 0;
  23. for (i = 0; i < 256; i++)
  24. {
  25. rt_uint32_t pte2 = ptb[i];
  26. if ((pte2 & 0x3) == 0)
  27. {
  28. if (fcnt == 0)
  29. rt_kprintf(" ");
  30. rt_kprintf("%04x: ", i);
  31. fcnt++;
  32. if (fcnt == 16)
  33. {
  34. rt_kprintf("fault\n");
  35. fcnt = 0;
  36. }
  37. continue;
  38. }
  39. if (fcnt != 0)
  40. {
  41. rt_kprintf("fault\n");
  42. fcnt = 0;
  43. }
  44. rt_kprintf(" %04x: %x: ", i, pte2);
  45. if ((pte2 & 0x3) == 0x1)
  46. {
  47. rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
  48. ((pte2 >> 7) | (pte2 >> 4))& 0xf,
  49. (pte2 >> 15) & 0x1,
  50. ((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
  51. }
  52. else
  53. {
  54. rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
  55. ((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
  56. ((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
  57. }
  58. }
  59. }
  60. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  61. {
  62. int i;
  63. int fcnt = 0;
  64. rt_kprintf("page table@%p\n", ptb);
  65. for (i = 0; i < 1024*4; i++)
  66. {
  67. rt_uint32_t pte1 = ptb[i];
  68. if ((pte1 & 0x3) == 0)
  69. {
  70. rt_kprintf("%03x: ", i);
  71. fcnt++;
  72. if (fcnt == 16)
  73. {
  74. rt_kprintf("fault\n");
  75. fcnt = 0;
  76. }
  77. continue;
  78. }
  79. if (fcnt != 0)
  80. {
  81. rt_kprintf("fault\n");
  82. fcnt = 0;
  83. }
  84. rt_kprintf("%03x: %08x: ", i, pte1);
  85. if ((pte1 & 0x3) == 0x3)
  86. {
  87. rt_kprintf("LPAE\n");
  88. }
  89. else if ((pte1 & 0x3) == 0x1)
  90. {
  91. rt_kprintf("pte,ns:%d,domain:%d\n",
  92. (pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
  93. /*
  94. *rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
  95. * - 0x80000000 + 0xC0000000));
  96. */
  97. }
  98. else if (pte1 & (1 << 18))
  99. {
  100. rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
  101. (pte1 >> 19) & 0x1,
  102. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  103. (pte1 >> 4) & 0x1,
  104. ((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
  105. }
  106. else
  107. {
  108. rt_kprintf("section,ns:%d,ap:%x,"
  109. "xn:%d,texcb:%02x,domain:%d\n",
  110. (pte1 >> 19) & 0x1,
  111. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  112. (pte1 >> 4) & 0x1,
  113. (((pte1 & (0x7 << 12)) >> 10) |
  114. ((pte1 & 0x0c) >> 2)) & 0x1f,
  115. (pte1 >> 5) & 0xf);
  116. }
  117. }
  118. }
  119. /* level1 page table, each entry for 1MB memory. */
  120. volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
  121. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
  122. rt_uint32_t vaddrEnd,
  123. rt_uint32_t paddrStart,
  124. rt_uint32_t attr)
  125. {
  126. volatile rt_uint32_t *pTT;
  127. volatile int i, nSec;
  128. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  129. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  130. for(i = 0; i <= nSec; i++)
  131. {
  132. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  133. pTT++;
  134. }
  135. }
  136. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  137. {
  138. unsigned long old_domain;
  139. asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  140. asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  141. return old_domain;
  142. }
  143. void rt_hw_cpu_dcache_clean(void *addr, int size);
  144. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  145. {
  146. /* set page table */
  147. for(; size > 0; size--)
  148. {
  149. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  150. mdesc->paddr_start, mdesc->attr);
  151. mdesc++;
  152. }
  153. rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
  154. }
  155. void rt_hw_mmu_init(void)
  156. {
  157. rt_cpu_dcache_clean_flush();
  158. rt_cpu_icache_flush();
  159. rt_hw_cpu_dcache_disable();
  160. rt_hw_cpu_icache_disable();
  161. rt_cpu_mmu_disable();
  162. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  163. rt_hw_set_domain_register(0x55555555);
  164. rt_cpu_tlb_set(MMUTable);
  165. rt_cpu_mmu_enable();
  166. rt_hw_cpu_icache_enable();
  167. rt_hw_cpu_dcache_enable();
  168. }
  169. /*
  170. mem map
  171. */
  172. void rt_hw_cpu_dcache_clean(void *addr, int size);
  173. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  174. {
  175. size_t l1_off, va_s, va_e;
  176. rt_base_t level;
  177. if (!mmu_info || !vtable)
  178. {
  179. return -1;
  180. }
  181. va_s = (size_t)v_address;
  182. va_e = (size_t)v_address + size - 1;
  183. if ( va_e < va_s)
  184. {
  185. return -1;
  186. }
  187. va_s >>= ARCH_SECTION_SHIFT;
  188. va_e >>= ARCH_SECTION_SHIFT;
  189. if (va_s == 0)
  190. {
  191. return -1;
  192. }
  193. level = rt_hw_interrupt_disable();
  194. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  195. {
  196. size_t v = vtable[l1_off];
  197. if (v & ARCH_MMU_USED_MASK)
  198. {
  199. rt_hw_interrupt_enable(level);
  200. return -1;
  201. }
  202. }
  203. mmu_info->vtable = vtable;
  204. mmu_info->vstart = va_s;
  205. mmu_info->vend = va_e;
  206. mmu_info->pv_off = pv_off;
  207. rt_hw_interrupt_enable(level);
  208. return 0;
  209. }
  210. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
  211. {
  212. #ifdef RT_IOREMAP_LATE
  213. size_t loop_va;
  214. size_t l1_off;
  215. size_t *mmu_l1, *mmu_l2;
  216. size_t sections;
  217. size_t *ref_cnt;
  218. /* for kernel ioremap */
  219. if ((size_t)v_address < KERNEL_VADDR_START)
  220. {
  221. return -1;
  222. }
  223. /* must align to section */
  224. if ((size_t)v_address & ARCH_SECTION_MASK)
  225. {
  226. return -1;
  227. }
  228. /* must align to section */
  229. if (size & ARCH_SECTION_MASK)
  230. {
  231. return -1;
  232. }
  233. loop_va = (size_t)v_address;
  234. sections = (size >> ARCH_SECTION_SHIFT);
  235. while (sections--)
  236. {
  237. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  238. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  239. RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
  240. #ifdef RT_USING_USERSPACE
  241. mmu_l2 = (size_t*)rt_pages_alloc(0);
  242. #else
  243. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  244. #endif
  245. if (mmu_l2)
  246. {
  247. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  248. /* cache maintain */
  249. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  250. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  251. /* cache maintain */
  252. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  253. }
  254. else
  255. {
  256. /* error */
  257. return -1;
  258. }
  259. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  260. *ref_cnt = 1;
  261. loop_va += ARCH_SECTION_SIZE;
  262. }
  263. #endif
  264. return 0;
  265. }
  266. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  267. {
  268. size_t l1_off, l2_off;
  269. size_t *mmu_l1, *mmu_l2;
  270. size_t find_off = 0;
  271. size_t find_va = 0;
  272. int n = 0;
  273. if (!pages)
  274. {
  275. return 0;
  276. }
  277. if (!mmu_info)
  278. {
  279. return 0;
  280. }
  281. for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
  282. {
  283. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  284. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  285. {
  286. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  287. for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
  288. {
  289. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  290. {
  291. /* in use */
  292. n = 0;
  293. }
  294. else
  295. {
  296. if (!n)
  297. {
  298. find_va = l1_off;
  299. find_off = l2_off;
  300. }
  301. n++;
  302. if (n >= pages)
  303. {
  304. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  305. }
  306. }
  307. }
  308. }
  309. else
  310. {
  311. if (!n)
  312. {
  313. find_va = l1_off;
  314. find_off = 0;
  315. }
  316. n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  317. if (n >= pages)
  318. {
  319. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  320. }
  321. }
  322. }
  323. return 0;
  324. }
  325. #ifdef RT_USING_USERSPACE
  326. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  327. {
  328. size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  329. size_t l1_off, l2_off;
  330. size_t *mmu_l1, *mmu_l2;
  331. if (!pages)
  332. {
  333. return -1;
  334. }
  335. if (!mmu_info)
  336. {
  337. return -1;
  338. }
  339. while (pages--)
  340. {
  341. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  342. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  343. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  344. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  345. {
  346. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  347. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  348. {
  349. return -1;
  350. }
  351. }
  352. loop_va += ARCH_PAGE_SIZE;
  353. }
  354. return 0;
  355. }
  356. #endif
  357. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
  358. {
  359. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  360. size_t l1_off, l2_off;
  361. size_t *mmu_l1, *mmu_l2;
  362. size_t *ref_cnt;
  363. if (!mmu_info)
  364. {
  365. return;
  366. }
  367. while (npages--)
  368. {
  369. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  370. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
  371. {
  372. return;
  373. }
  374. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  375. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  376. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  377. {
  378. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  379. }
  380. else
  381. {
  382. return;
  383. }
  384. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  385. {
  386. *(mmu_l2 + l2_off) = 0;
  387. /* cache maintain */
  388. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  389. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  390. (*ref_cnt)--;
  391. if (!*ref_cnt)
  392. {
  393. #ifdef RT_USING_USERSPACE
  394. rt_pages_free(mmu_l2, 0);
  395. #else
  396. rt_free_align(mmu_l2);
  397. #endif
  398. *mmu_l1 = 0;
  399. /* cache maintain */
  400. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  401. }
  402. }
  403. loop_va += ARCH_PAGE_SIZE;
  404. }
  405. }
  406. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
  407. {
  408. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  409. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  410. size_t l1_off, l2_off;
  411. size_t *mmu_l1, *mmu_l2;
  412. size_t *ref_cnt;
  413. if (!mmu_info)
  414. {
  415. return -1;
  416. }
  417. while (npages--)
  418. {
  419. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  420. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  421. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  422. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  423. {
  424. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  425. }
  426. else
  427. {
  428. #ifdef RT_USING_USERSPACE
  429. mmu_l2 = (size_t*)rt_pages_alloc(0);
  430. #else
  431. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  432. #endif
  433. if (mmu_l2)
  434. {
  435. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  436. /* cache maintain */
  437. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  438. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  439. /* cache maintain */
  440. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  441. }
  442. else
  443. {
  444. /* error, unmap and quit */
  445. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  446. return -1;
  447. }
  448. }
  449. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  450. (*ref_cnt)++;
  451. *(mmu_l2 + l2_off) = (loop_pa | attr);
  452. /* cache maintain */
  453. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  454. loop_va += ARCH_PAGE_SIZE;
  455. loop_pa += ARCH_PAGE_SIZE;
  456. }
  457. return 0;
  458. }
  459. static void rt_hw_cpu_tlb_invalidate(void)
  460. {
  461. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  462. }
  463. #ifdef RT_USING_USERSPACE
  464. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  465. {
  466. size_t pa_s, pa_e;
  467. size_t vaddr;
  468. int pages;
  469. int ret;
  470. if (!size)
  471. {
  472. return 0;
  473. }
  474. pa_s = (size_t)p_addr;
  475. pa_e = (size_t)p_addr + size - 1;
  476. pa_s >>= ARCH_PAGE_SHIFT;
  477. pa_e >>= ARCH_PAGE_SHIFT;
  478. pages = pa_e - pa_s + 1;
  479. if (v_addr)
  480. {
  481. vaddr = (size_t)v_addr;
  482. pa_s = (size_t)p_addr;
  483. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  484. {
  485. return 0;
  486. }
  487. vaddr &= ~ARCH_PAGE_MASK;
  488. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  489. {
  490. return 0;
  491. }
  492. }
  493. else
  494. {
  495. vaddr = find_vaddr(mmu_info, pages);
  496. }
  497. if (vaddr) {
  498. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  499. if (ret == 0)
  500. {
  501. rt_hw_cpu_tlb_invalidate();
  502. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  503. }
  504. }
  505. return 0;
  506. }
  507. #else
  508. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  509. {
  510. size_t pa_s, pa_e;
  511. size_t vaddr;
  512. int pages;
  513. int ret;
  514. pa_s = (size_t)p_addr;
  515. pa_e = (size_t)p_addr + size - 1;
  516. pa_s >>= ARCH_PAGE_SHIFT;
  517. pa_e >>= ARCH_PAGE_SHIFT;
  518. pages = pa_e - pa_s + 1;
  519. vaddr = find_vaddr(mmu_info, pages);
  520. if (vaddr) {
  521. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  522. if (ret == 0)
  523. {
  524. rt_hw_cpu_tlb_invalidate();
  525. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  526. }
  527. }
  528. return 0;
  529. }
  530. #endif
  531. #ifdef RT_USING_USERSPACE
  532. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
  533. {
  534. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  535. size_t loop_pa;
  536. size_t l1_off, l2_off;
  537. size_t *mmu_l1, *mmu_l2;
  538. size_t *ref_cnt;
  539. if (!mmu_info)
  540. {
  541. return -1;
  542. }
  543. while (npages--)
  544. {
  545. loop_pa = (size_t)rt_pages_alloc(0);
  546. if (!loop_pa)
  547. goto err;
  548. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  549. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  550. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  551. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  552. {
  553. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  554. }
  555. else
  556. {
  557. //mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  558. mmu_l2 = (size_t*)rt_pages_alloc(0);
  559. if (mmu_l2)
  560. {
  561. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  562. /* cache maintain */
  563. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  564. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  565. /* cache maintain */
  566. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  567. }
  568. else
  569. goto err;
  570. }
  571. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  572. (*ref_cnt)++;
  573. loop_pa += mmu_info->pv_off;
  574. *(mmu_l2 + l2_off) = (loop_pa | attr);
  575. /* cache maintain */
  576. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  577. loop_va += ARCH_PAGE_SIZE;
  578. }
  579. return 0;
  580. err:
  581. {
  582. /* error, unmap and quit */
  583. int i;
  584. void *va, *pa;
  585. va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
  586. for (i = 0; i < npages; i++)
  587. {
  588. pa = rt_hw_mmu_v2p(mmu_info, va);
  589. pa -= mmu_info->pv_off;
  590. rt_pages_free(pa, 0);
  591. va += ARCH_PAGE_SIZE;
  592. }
  593. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  594. return -1;
  595. }
  596. }
  597. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  598. {
  599. size_t vaddr;
  600. size_t offset;
  601. int pages;
  602. int ret;
  603. if (!size)
  604. {
  605. return 0;
  606. }
  607. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  608. size += (offset + ARCH_PAGE_SIZE - 1);
  609. pages = (size >> ARCH_PAGE_SHIFT);
  610. if (v_addr)
  611. {
  612. vaddr = (size_t)v_addr;
  613. vaddr &= ~ARCH_PAGE_MASK;
  614. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  615. {
  616. return 0;
  617. }
  618. }
  619. else
  620. {
  621. vaddr = find_vaddr(mmu_info, pages);
  622. }
  623. if (vaddr) {
  624. ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
  625. if (ret == 0)
  626. {
  627. rt_hw_cpu_tlb_invalidate();
  628. return (void*)vaddr + offset;
  629. }
  630. }
  631. return 0;
  632. }
  633. #endif
  634. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  635. {
  636. size_t va_s, va_e;
  637. int pages;
  638. va_s = (size_t)v_addr;
  639. va_e = (size_t)v_addr + size - 1;
  640. va_s >>= ARCH_PAGE_SHIFT;
  641. va_e >>= ARCH_PAGE_SHIFT;
  642. pages = va_e - va_s + 1;
  643. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  644. rt_hw_cpu_tlb_invalidate();
  645. }
  646. extern void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
  647. //va --> pa
  648. // void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
  649. void *rt_hw_kernel_virt_to_phys(void *v_addr)
  650. {
  651. void *p_addr = 0;
  652. #ifdef RT_USING_USERSPACE
  653. rt_base_t level;
  654. extern rt_mmu_info mmu_info;
  655. level = rt_hw_interrupt_disable();
  656. p_addr = _rt_hw_mmu_v2p(&mmu_info, v_addr);
  657. rt_hw_interrupt_enable(level);
  658. #else
  659. p_addr = v_addr;
  660. #endif
  661. return p_addr;
  662. }
  663. //pa --> va
  664. void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
  665. {
  666. void *v_addr = 0;
  667. #ifdef RT_USING_USERSPACE
  668. extern rt_mmu_info mmu_info;
  669. v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
  670. #else
  671. v_addr = p_addr;
  672. #endif
  673. return v_addr;
  674. }
  675. #ifdef RT_USING_USERSPACE
  676. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  677. {
  678. void *ret;
  679. rt_base_t level;
  680. level = rt_hw_interrupt_disable();
  681. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  682. rt_hw_interrupt_enable(level);
  683. return ret;
  684. }
  685. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  686. {
  687. void *ret;
  688. rt_base_t level;
  689. level = rt_hw_interrupt_disable();
  690. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  691. rt_hw_interrupt_enable(level);
  692. return ret;
  693. }
  694. #endif
  695. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  696. {
  697. rt_base_t level;
  698. level = rt_hw_interrupt_disable();
  699. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  700. rt_hw_interrupt_enable(level);
  701. }
  702. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  703. {
  704. size_t l1_off, l2_off;
  705. size_t *mmu_l1, *mmu_l2;
  706. size_t tmp;
  707. size_t pa;
  708. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  709. if (!mmu_info)
  710. {
  711. return (void*)0;
  712. }
  713. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  714. tmp = *mmu_l1;
  715. switch (tmp & ARCH_MMU_USED_MASK)
  716. {
  717. case 0: /* not used */
  718. break;
  719. case 1: /* page table */
  720. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  721. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  722. pa = *(mmu_l2 + l2_off);
  723. if (pa & ARCH_MMU_USED_MASK)
  724. {
  725. if ((pa & ARCH_MMU_USED_MASK) == 1)
  726. {
  727. /* large page, not support */
  728. break;
  729. }
  730. pa &= ~(ARCH_PAGE_MASK);
  731. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  732. return (void*)pa;
  733. }
  734. break;
  735. case 2:
  736. case 3:
  737. /* section */
  738. if (tmp & ARCH_TYPE_SUPERSECTION)
  739. {
  740. /* super section, not support */
  741. break;
  742. }
  743. pa = (tmp & ~ARCH_SECTION_MASK);
  744. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  745. return (void*)pa;
  746. }
  747. return (void*)0;
  748. }
  749. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  750. {
  751. void *ret;
  752. rt_base_t level;
  753. level = rt_hw_interrupt_disable();
  754. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  755. rt_hw_interrupt_enable(level);
  756. return ret;
  757. }
  758. #ifdef RT_USING_USERSPACE
  759. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
  760. unsigned int va;
  761. for (va = 0; va < 0x1000; va++) {
  762. unsigned int vaddr = (va << 20);
  763. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
  764. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  765. } else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
  766. mtbl[va] = (va << 20) | NORMAL_MEM;
  767. } else {
  768. mtbl[va] = 0;
  769. }
  770. }
  771. }
  772. #endif