1
0

mmu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "cp15.h"
  14. #include "mmu.h"
  15. #ifdef RT_USING_USERSPACE
  16. #include "page.h"
  17. #endif
  18. /* dump 2nd level page table */
  19. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  20. {
  21. int i;
  22. int fcnt = 0;
  23. for (i = 0; i < 256; i++)
  24. {
  25. rt_uint32_t pte2 = ptb[i];
  26. if ((pte2 & 0x3) == 0)
  27. {
  28. if (fcnt == 0)
  29. rt_kprintf(" ");
  30. rt_kprintf("%04x: ", i);
  31. fcnt++;
  32. if (fcnt == 16)
  33. {
  34. rt_kprintf("fault\n");
  35. fcnt = 0;
  36. }
  37. continue;
  38. }
  39. if (fcnt != 0)
  40. {
  41. rt_kprintf("fault\n");
  42. fcnt = 0;
  43. }
  44. rt_kprintf(" %04x: %x: ", i, pte2);
  45. if ((pte2 & 0x3) == 0x1)
  46. {
  47. rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
  48. ((pte2 >> 7) | (pte2 >> 4))& 0xf,
  49. (pte2 >> 15) & 0x1,
  50. ((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
  51. }
  52. else
  53. {
  54. rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
  55. ((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
  56. ((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
  57. }
  58. }
  59. }
  60. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  61. {
  62. int i;
  63. int fcnt = 0;
  64. rt_kprintf("page table@%p\n", ptb);
  65. for (i = 0; i < 1024*4; i++)
  66. {
  67. rt_uint32_t pte1 = ptb[i];
  68. if ((pte1 & 0x3) == 0)
  69. {
  70. rt_kprintf("%03x: ", i);
  71. fcnt++;
  72. if (fcnt == 16)
  73. {
  74. rt_kprintf("fault\n");
  75. fcnt = 0;
  76. }
  77. continue;
  78. }
  79. if (fcnt != 0)
  80. {
  81. rt_kprintf("fault\n");
  82. fcnt = 0;
  83. }
  84. rt_kprintf("%03x: %08x: ", i, pte1);
  85. if ((pte1 & 0x3) == 0x3)
  86. {
  87. rt_kprintf("LPAE\n");
  88. }
  89. else if ((pte1 & 0x3) == 0x1)
  90. {
  91. rt_kprintf("pte,ns:%d,domain:%d\n",
  92. (pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
  93. /*
  94. *rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
  95. * - 0x80000000 + 0xC0000000));
  96. */
  97. }
  98. else if (pte1 & (1 << 18))
  99. {
  100. rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
  101. (pte1 >> 19) & 0x1,
  102. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  103. (pte1 >> 4) & 0x1,
  104. ((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
  105. }
  106. else
  107. {
  108. rt_kprintf("section,ns:%d,ap:%x,"
  109. "xn:%d,texcb:%02x,domain:%d\n",
  110. (pte1 >> 19) & 0x1,
  111. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  112. (pte1 >> 4) & 0x1,
  113. (((pte1 & (0x7 << 12)) >> 10) |
  114. ((pte1 & 0x0c) >> 2)) & 0x1f,
  115. (pte1 >> 5) & 0xf);
  116. }
  117. }
  118. }
  119. /* level1 page table, each entry for 1MB memory. */
  120. volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
  121. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
  122. rt_uint32_t vaddrEnd,
  123. rt_uint32_t paddrStart,
  124. rt_uint32_t attr)
  125. {
  126. volatile rt_uint32_t *pTT;
  127. volatile int i, nSec;
  128. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  129. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  130. for(i = 0; i <= nSec; i++)
  131. {
  132. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  133. pTT++;
  134. }
  135. }
  136. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  137. {
  138. unsigned long old_domain;
  139. asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  140. asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  141. return old_domain;
  142. }
  143. void rt_hw_cpu_dcache_clean(void *addr, int size);
  144. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  145. {
  146. /* set page table */
  147. for(; size > 0; size--)
  148. {
  149. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  150. mdesc->paddr_start, mdesc->attr);
  151. mdesc++;
  152. }
  153. rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
  154. }
  155. void rt_hw_mmu_init(void)
  156. {
  157. rt_cpu_dcache_clean_flush();
  158. rt_cpu_icache_flush();
  159. rt_hw_cpu_dcache_disable();
  160. rt_hw_cpu_icache_disable();
  161. rt_cpu_mmu_disable();
  162. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  163. rt_hw_set_domain_register(0x55555555);
  164. rt_cpu_tlb_set(MMUTable);
  165. rt_cpu_mmu_enable();
  166. rt_hw_cpu_icache_enable();
  167. rt_hw_cpu_dcache_enable();
  168. }
  169. /*
  170. mem map
  171. */
  172. void rt_hw_cpu_dcache_clean(void *addr, int size);
  173. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  174. {
  175. size_t l1_off, va_s, va_e;
  176. rt_base_t level;
  177. if (!mmu_info || !vtable)
  178. {
  179. return -1;
  180. }
  181. va_s = (size_t)v_address;
  182. va_e = (size_t)v_address + size - 1;
  183. if ( va_e < va_s)
  184. {
  185. return -1;
  186. }
  187. va_s >>= ARCH_SECTION_SHIFT;
  188. va_e >>= ARCH_SECTION_SHIFT;
  189. if (va_s == 0)
  190. {
  191. return -1;
  192. }
  193. level = rt_hw_interrupt_disable();
  194. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  195. {
  196. size_t v = vtable[l1_off];
  197. if (v & ARCH_MMU_USED_MASK)
  198. {
  199. rt_hw_interrupt_enable(level);
  200. return -1;
  201. }
  202. }
  203. mmu_info->vtable = vtable;
  204. mmu_info->vstart = va_s;
  205. mmu_info->vend = va_e;
  206. mmu_info->pv_off = pv_off;
  207. rt_hw_interrupt_enable(level);
  208. return 0;
  209. }
  210. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
  211. {
  212. #ifdef RT_IOREMAP_LATE
  213. size_t loop_va;
  214. size_t l1_off;
  215. size_t *mmu_l1, *mmu_l2;
  216. size_t sections;
  217. #ifndef RT_USING_USERSPACE
  218. size_t *ref_cnt;
  219. #endif
  220. /* for kernel ioremap */
  221. if ((size_t)v_address < KERNEL_VADDR_START)
  222. {
  223. return -1;
  224. }
  225. /* must align to section */
  226. if ((size_t)v_address & ARCH_SECTION_MASK)
  227. {
  228. return -1;
  229. }
  230. /* must align to section */
  231. if (size & ARCH_SECTION_MASK)
  232. {
  233. return -1;
  234. }
  235. loop_va = (size_t)v_address;
  236. sections = (size >> ARCH_SECTION_SHIFT);
  237. while (sections--)
  238. {
  239. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  240. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  241. RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
  242. #ifdef RT_USING_USERSPACE
  243. mmu_l2 = (size_t*)rt_pages_alloc(0);
  244. #else
  245. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  246. #endif
  247. if (mmu_l2)
  248. {
  249. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  250. /* cache maintain */
  251. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  252. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  253. /* cache maintain */
  254. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  255. }
  256. else
  257. {
  258. /* error */
  259. return -1;
  260. }
  261. #ifndef RT_USING_USERSPACE
  262. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  263. *ref_cnt = 1;
  264. #endif
  265. loop_va += ARCH_SECTION_SIZE;
  266. }
  267. #endif
  268. return 0;
  269. }
  270. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  271. {
  272. size_t l1_off, l2_off;
  273. size_t *mmu_l1, *mmu_l2;
  274. size_t find_off = 0;
  275. size_t find_va = 0;
  276. int n = 0;
  277. if (!pages)
  278. {
  279. return 0;
  280. }
  281. if (!mmu_info)
  282. {
  283. return 0;
  284. }
  285. for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
  286. {
  287. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  288. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  289. {
  290. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  291. for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
  292. {
  293. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  294. {
  295. /* in use */
  296. n = 0;
  297. }
  298. else
  299. {
  300. if (!n)
  301. {
  302. find_va = l1_off;
  303. find_off = l2_off;
  304. }
  305. n++;
  306. if (n >= pages)
  307. {
  308. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  309. }
  310. }
  311. }
  312. }
  313. else
  314. {
  315. if (!n)
  316. {
  317. find_va = l1_off;
  318. find_off = 0;
  319. }
  320. n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  321. if (n >= pages)
  322. {
  323. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  324. }
  325. }
  326. }
  327. return 0;
  328. }
  329. #ifdef RT_USING_USERSPACE
  330. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  331. {
  332. size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  333. size_t l1_off, l2_off;
  334. size_t *mmu_l1, *mmu_l2;
  335. if (!pages)
  336. {
  337. return -1;
  338. }
  339. if (!mmu_info)
  340. {
  341. return -1;
  342. }
  343. while (pages--)
  344. {
  345. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  346. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  347. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  348. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  349. {
  350. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  351. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  352. {
  353. return -1;
  354. }
  355. }
  356. loop_va += ARCH_PAGE_SIZE;
  357. }
  358. return 0;
  359. }
  360. #endif
  361. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
  362. {
  363. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  364. size_t l1_off, l2_off;
  365. size_t *mmu_l1, *mmu_l2;
  366. #ifndef RT_USING_USERSPACE
  367. size_t *ref_cnt;
  368. #endif
  369. if (!mmu_info)
  370. {
  371. return;
  372. }
  373. while (npages--)
  374. {
  375. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  376. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
  377. {
  378. return;
  379. }
  380. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  381. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  382. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  383. {
  384. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  385. }
  386. else
  387. {
  388. return;
  389. }
  390. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  391. {
  392. *(mmu_l2 + l2_off) = 0;
  393. /* cache maintain */
  394. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  395. #ifdef RT_USING_USERSPACE
  396. if (rt_pages_free(mmu_l2, 0))
  397. {
  398. *mmu_l1 = 0;
  399. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  400. }
  401. #else
  402. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  403. (*ref_cnt)--;
  404. if (!*ref_cnt)
  405. {
  406. rt_free_align(mmu_l2);
  407. *mmu_l1 = 0;
  408. /* cache maintain */
  409. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  410. }
  411. #endif
  412. }
  413. loop_va += ARCH_PAGE_SIZE;
  414. }
  415. }
  416. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
  417. {
  418. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  419. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  420. size_t l1_off, l2_off;
  421. size_t *mmu_l1, *mmu_l2;
  422. #ifndef RT_USING_USERSPACE
  423. size_t *ref_cnt;
  424. #endif
  425. if (!mmu_info)
  426. {
  427. return -1;
  428. }
  429. while (npages--)
  430. {
  431. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  432. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  433. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  434. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  435. {
  436. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  437. rt_page_ref_inc(mmu_l2, 0);
  438. }
  439. else
  440. {
  441. #ifdef RT_USING_USERSPACE
  442. mmu_l2 = (size_t*)rt_pages_alloc(0);
  443. #else
  444. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  445. #endif
  446. if (mmu_l2)
  447. {
  448. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  449. /* cache maintain */
  450. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  451. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  452. /* cache maintain */
  453. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  454. }
  455. else
  456. {
  457. /* error, unmap and quit */
  458. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  459. return -1;
  460. }
  461. }
  462. #ifndef RT_USING_USERSPACE
  463. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  464. (*ref_cnt)++;
  465. #endif
  466. *(mmu_l2 + l2_off) = (loop_pa | attr);
  467. /* cache maintain */
  468. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  469. loop_va += ARCH_PAGE_SIZE;
  470. loop_pa += ARCH_PAGE_SIZE;
  471. }
  472. return 0;
  473. }
  474. static void rt_hw_cpu_tlb_invalidate(void)
  475. {
  476. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  477. }
  478. #ifdef RT_USING_USERSPACE
  479. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  480. {
  481. size_t pa_s, pa_e;
  482. size_t vaddr;
  483. int pages;
  484. int ret;
  485. if (!size)
  486. {
  487. return 0;
  488. }
  489. pa_s = (size_t)p_addr;
  490. pa_e = (size_t)p_addr + size - 1;
  491. pa_s >>= ARCH_PAGE_SHIFT;
  492. pa_e >>= ARCH_PAGE_SHIFT;
  493. pages = pa_e - pa_s + 1;
  494. if (v_addr)
  495. {
  496. vaddr = (size_t)v_addr;
  497. pa_s = (size_t)p_addr;
  498. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  499. {
  500. return 0;
  501. }
  502. vaddr &= ~ARCH_PAGE_MASK;
  503. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  504. {
  505. return 0;
  506. }
  507. }
  508. else
  509. {
  510. vaddr = find_vaddr(mmu_info, pages);
  511. }
  512. if (vaddr) {
  513. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  514. if (ret == 0)
  515. {
  516. rt_hw_cpu_tlb_invalidate();
  517. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  518. }
  519. }
  520. return 0;
  521. }
  522. #else
  523. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  524. {
  525. size_t pa_s, pa_e;
  526. size_t vaddr;
  527. int pages;
  528. int ret;
  529. pa_s = (size_t)p_addr;
  530. pa_e = (size_t)p_addr + size - 1;
  531. pa_s >>= ARCH_PAGE_SHIFT;
  532. pa_e >>= ARCH_PAGE_SHIFT;
  533. pages = pa_e - pa_s + 1;
  534. vaddr = find_vaddr(mmu_info, pages);
  535. if (vaddr) {
  536. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  537. if (ret == 0)
  538. {
  539. rt_hw_cpu_tlb_invalidate();
  540. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  541. }
  542. }
  543. return 0;
  544. }
  545. #endif
  546. #ifdef RT_USING_USERSPACE
  547. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
  548. {
  549. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  550. size_t loop_pa;
  551. size_t l1_off, l2_off;
  552. size_t *mmu_l1, *mmu_l2;
  553. if (!mmu_info)
  554. {
  555. return -1;
  556. }
  557. while (npages--)
  558. {
  559. loop_pa = (size_t)rt_pages_alloc(0);
  560. if (!loop_pa)
  561. goto err;
  562. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  563. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  564. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  565. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  566. {
  567. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  568. rt_page_ref_inc(mmu_l2, 0);
  569. }
  570. else
  571. {
  572. //mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  573. mmu_l2 = (size_t*)rt_pages_alloc(0);
  574. if (mmu_l2)
  575. {
  576. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  577. /* cache maintain */
  578. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  579. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  580. /* cache maintain */
  581. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  582. }
  583. else
  584. goto err;
  585. }
  586. loop_pa += mmu_info->pv_off;
  587. *(mmu_l2 + l2_off) = (loop_pa | attr);
  588. /* cache maintain */
  589. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  590. loop_va += ARCH_PAGE_SIZE;
  591. }
  592. return 0;
  593. err:
  594. {
  595. /* error, unmap and quit */
  596. int i;
  597. void *va, *pa;
  598. va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
  599. for (i = 0; i < npages; i++)
  600. {
  601. pa = rt_hw_mmu_v2p(mmu_info, va);
  602. pa = (void*)((char*)pa - mmu_info->pv_off);
  603. rt_pages_free(pa, 0);
  604. va = (void*)((char*)va + ARCH_PAGE_SIZE);
  605. }
  606. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  607. return -1;
  608. }
  609. }
  610. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  611. {
  612. size_t vaddr;
  613. size_t offset;
  614. int pages;
  615. int ret;
  616. if (!size)
  617. {
  618. return 0;
  619. }
  620. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  621. size += (offset + ARCH_PAGE_SIZE - 1);
  622. pages = (size >> ARCH_PAGE_SHIFT);
  623. if (v_addr)
  624. {
  625. vaddr = (size_t)v_addr;
  626. vaddr &= ~ARCH_PAGE_MASK;
  627. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  628. {
  629. return 0;
  630. }
  631. }
  632. else
  633. {
  634. vaddr = find_vaddr(mmu_info, pages);
  635. }
  636. if (vaddr) {
  637. ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
  638. if (ret == 0)
  639. {
  640. rt_hw_cpu_tlb_invalidate();
  641. return (void*)((char*)vaddr + offset);
  642. }
  643. }
  644. return 0;
  645. }
  646. #endif
  647. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  648. {
  649. size_t va_s, va_e;
  650. int pages;
  651. va_s = (size_t)v_addr;
  652. va_e = (size_t)v_addr + size - 1;
  653. va_s >>= ARCH_PAGE_SHIFT;
  654. va_e >>= ARCH_PAGE_SHIFT;
  655. pages = va_e - va_s + 1;
  656. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  657. rt_hw_cpu_tlb_invalidate();
  658. }
  659. extern void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
  660. //va --> pa
  661. // void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
  662. void *rt_hw_kernel_virt_to_phys(void *v_addr)
  663. {
  664. void *p_addr = 0;
  665. #ifdef RT_USING_USERSPACE
  666. rt_base_t level;
  667. extern rt_mmu_info mmu_info;
  668. level = rt_hw_interrupt_disable();
  669. p_addr = _rt_hw_mmu_v2p(&mmu_info, v_addr);
  670. rt_hw_interrupt_enable(level);
  671. #else
  672. p_addr = v_addr;
  673. #endif
  674. return p_addr;
  675. }
  676. //pa --> va
  677. void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
  678. {
  679. void *v_addr = 0;
  680. #ifdef RT_USING_USERSPACE
  681. extern rt_mmu_info mmu_info;
  682. v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
  683. #else
  684. v_addr = p_addr;
  685. #endif
  686. return v_addr;
  687. }
  688. #ifdef RT_USING_USERSPACE
  689. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  690. {
  691. void *ret;
  692. rt_base_t level;
  693. level = rt_hw_interrupt_disable();
  694. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  695. rt_hw_interrupt_enable(level);
  696. return ret;
  697. }
  698. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  699. {
  700. void *ret;
  701. rt_base_t level;
  702. level = rt_hw_interrupt_disable();
  703. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  704. rt_hw_interrupt_enable(level);
  705. return ret;
  706. }
  707. #endif
  708. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  709. {
  710. rt_base_t level;
  711. level = rt_hw_interrupt_disable();
  712. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  713. rt_hw_interrupt_enable(level);
  714. }
  715. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  716. {
  717. size_t l1_off, l2_off;
  718. size_t *mmu_l1, *mmu_l2;
  719. size_t tmp;
  720. size_t pa;
  721. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  722. if (!mmu_info)
  723. {
  724. return (void*)0;
  725. }
  726. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  727. tmp = *mmu_l1;
  728. switch (tmp & ARCH_MMU_USED_MASK)
  729. {
  730. case 0: /* not used */
  731. break;
  732. case 1: /* page table */
  733. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  734. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  735. pa = *(mmu_l2 + l2_off);
  736. if (pa & ARCH_MMU_USED_MASK)
  737. {
  738. if ((pa & ARCH_MMU_USED_MASK) == 1)
  739. {
  740. /* large page, not support */
  741. break;
  742. }
  743. pa &= ~(ARCH_PAGE_MASK);
  744. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  745. return (void*)pa;
  746. }
  747. break;
  748. case 2:
  749. case 3:
  750. /* section */
  751. if (tmp & ARCH_TYPE_SUPERSECTION)
  752. {
  753. /* super section, not support */
  754. break;
  755. }
  756. pa = (tmp & ~ARCH_SECTION_MASK);
  757. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  758. return (void*)pa;
  759. }
  760. return (void*)0;
  761. }
  762. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  763. {
  764. void *ret;
  765. rt_base_t level;
  766. level = rt_hw_interrupt_disable();
  767. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  768. rt_hw_interrupt_enable(level);
  769. return ret;
  770. }
  771. #ifdef RT_USING_USERSPACE
  772. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
  773. unsigned int va;
  774. for (va = 0; va < 0x1000; va++) {
  775. unsigned int vaddr = (va << 20);
  776. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
  777. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  778. } else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
  779. mtbl[va] = (va << 20) | NORMAL_MEM;
  780. } else {
  781. mtbl[va] = 0;
  782. }
  783. }
  784. }
  785. #endif