mmu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "cp15.h"
  14. #include "mmu.h"
  15. #ifdef RT_USING_USERSPACE
  16. #include "page.h"
  17. #endif
  18. static rt_mutex_t mm_lock;
  19. void rt_mm_lock(void)
  20. {
  21. if (rt_thread_self())
  22. {
  23. if (!mm_lock)
  24. {
  25. mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
  26. }
  27. if (mm_lock)
  28. {
  29. rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
  30. }
  31. }
  32. }
  33. void rt_mm_unlock(void)
  34. {
  35. if (rt_thread_self())
  36. {
  37. if (mm_lock)
  38. {
  39. rt_mutex_release(mm_lock);
  40. }
  41. }
  42. }
  43. /* dump 2nd level page table */
  44. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  45. {
  46. int i;
  47. int fcnt = 0;
  48. for (i = 0; i < 256; i++)
  49. {
  50. rt_uint32_t pte2 = ptb[i];
  51. if ((pte2 & 0x3) == 0)
  52. {
  53. if (fcnt == 0)
  54. rt_kprintf(" ");
  55. rt_kprintf("%04x: ", i);
  56. fcnt++;
  57. if (fcnt == 16)
  58. {
  59. rt_kprintf("fault\n");
  60. fcnt = 0;
  61. }
  62. continue;
  63. }
  64. if (fcnt != 0)
  65. {
  66. rt_kprintf("fault\n");
  67. fcnt = 0;
  68. }
  69. rt_kprintf(" %04x: %x: ", i, pte2);
  70. if ((pte2 & 0x3) == 0x1)
  71. {
  72. rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
  73. ((pte2 >> 7) | (pte2 >> 4))& 0xf,
  74. (pte2 >> 15) & 0x1,
  75. ((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
  76. }
  77. else
  78. {
  79. rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
  80. ((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
  81. ((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
  82. }
  83. }
  84. }
  85. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  86. {
  87. int i;
  88. int fcnt = 0;
  89. rt_kprintf("page table@%p\n", ptb);
  90. for (i = 0; i < 1024*4; i++)
  91. {
  92. rt_uint32_t pte1 = ptb[i];
  93. if ((pte1 & 0x3) == 0)
  94. {
  95. rt_kprintf("%03x: ", i);
  96. fcnt++;
  97. if (fcnt == 16)
  98. {
  99. rt_kprintf("fault\n");
  100. fcnt = 0;
  101. }
  102. continue;
  103. }
  104. if (fcnt != 0)
  105. {
  106. rt_kprintf("fault\n");
  107. fcnt = 0;
  108. }
  109. rt_kprintf("%03x: %08x: ", i, pte1);
  110. if ((pte1 & 0x3) == 0x3)
  111. {
  112. rt_kprintf("LPAE\n");
  113. }
  114. else if ((pte1 & 0x3) == 0x1)
  115. {
  116. rt_kprintf("pte,ns:%d,domain:%d\n",
  117. (pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
  118. /*
  119. *rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
  120. * - 0x80000000 + 0xC0000000));
  121. */
  122. }
  123. else if (pte1 & (1 << 18))
  124. {
  125. rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
  126. (pte1 >> 19) & 0x1,
  127. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  128. (pte1 >> 4) & 0x1,
  129. ((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
  130. }
  131. else
  132. {
  133. rt_kprintf("section,ns:%d,ap:%x,"
  134. "xn:%d,texcb:%02x,domain:%d\n",
  135. (pte1 >> 19) & 0x1,
  136. ((pte1 >> 13) | (pte1 >> 10))& 0xf,
  137. (pte1 >> 4) & 0x1,
  138. (((pte1 & (0x7 << 12)) >> 10) |
  139. ((pte1 & 0x0c) >> 2)) & 0x1f,
  140. (pte1 >> 5) & 0xf);
  141. }
  142. }
  143. }
  144. /* level1 page table, each entry for 1MB memory. */
  145. volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
  146. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
  147. rt_uint32_t vaddrEnd,
  148. rt_uint32_t paddrStart,
  149. rt_uint32_t attr)
  150. {
  151. volatile rt_uint32_t *pTT;
  152. volatile int i, nSec;
  153. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  154. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  155. for(i = 0; i <= nSec; i++)
  156. {
  157. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  158. pTT++;
  159. }
  160. }
  161. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  162. {
  163. unsigned long old_domain;
  164. asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  165. asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  166. return old_domain;
  167. }
  168. void rt_hw_cpu_dcache_clean(void *addr, int size);
  169. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  170. {
  171. /* set page table */
  172. for(; size > 0; size--)
  173. {
  174. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  175. mdesc->paddr_start, mdesc->attr);
  176. mdesc++;
  177. }
  178. rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
  179. }
  180. void rt_hw_mmu_init(void)
  181. {
  182. rt_cpu_dcache_clean_flush();
  183. rt_cpu_icache_flush();
  184. rt_hw_cpu_dcache_disable();
  185. rt_hw_cpu_icache_disable();
  186. rt_cpu_mmu_disable();
  187. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  188. rt_hw_set_domain_register(0x55555555);
  189. rt_cpu_tlb_set(MMUTable);
  190. rt_cpu_mmu_enable();
  191. rt_hw_cpu_icache_enable();
  192. rt_hw_cpu_dcache_enable();
  193. }
  194. /*
  195. mem map
  196. */
  197. void rt_hw_cpu_dcache_clean(void *addr, int size);
  198. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  199. {
  200. size_t l1_off, va_s, va_e;
  201. if (!mmu_info || !vtable)
  202. {
  203. return -1;
  204. }
  205. va_s = (size_t)v_address;
  206. va_e = (size_t)v_address + size - 1;
  207. if ( va_e < va_s)
  208. {
  209. return -1;
  210. }
  211. va_s >>= ARCH_SECTION_SHIFT;
  212. va_e >>= ARCH_SECTION_SHIFT;
  213. if (va_s == 0)
  214. {
  215. return -1;
  216. }
  217. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  218. {
  219. size_t v = vtable[l1_off];
  220. if (v & ARCH_MMU_USED_MASK)
  221. {
  222. return -1;
  223. }
  224. }
  225. mmu_info->vtable = vtable;
  226. mmu_info->vstart = va_s;
  227. mmu_info->vend = va_e;
  228. mmu_info->pv_off = pv_off;
  229. return 0;
  230. }
  231. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
  232. {
  233. #ifdef RT_IOREMAP_LATE
  234. size_t loop_va;
  235. size_t l1_off;
  236. size_t *mmu_l1, *mmu_l2;
  237. size_t sections;
  238. #ifndef RT_USING_USERSPACE
  239. size_t *ref_cnt;
  240. #endif
  241. /* for kernel ioremap */
  242. if ((size_t)v_address < KERNEL_VADDR_START)
  243. {
  244. return -1;
  245. }
  246. /* must align to section */
  247. if ((size_t)v_address & ARCH_SECTION_MASK)
  248. {
  249. return -1;
  250. }
  251. /* must align to section */
  252. if (size & ARCH_SECTION_MASK)
  253. {
  254. return -1;
  255. }
  256. loop_va = (size_t)v_address;
  257. sections = (size >> ARCH_SECTION_SHIFT);
  258. while (sections--)
  259. {
  260. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  261. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  262. RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
  263. #ifdef RT_USING_USERSPACE
  264. mmu_l2 = (size_t*)rt_pages_alloc(0);
  265. #else
  266. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  267. #endif
  268. if (mmu_l2)
  269. {
  270. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  271. /* cache maintain */
  272. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  273. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  274. /* cache maintain */
  275. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  276. }
  277. else
  278. {
  279. /* error */
  280. return -1;
  281. }
  282. #ifndef RT_USING_USERSPACE
  283. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  284. *ref_cnt = 1;
  285. #endif
  286. loop_va += ARCH_SECTION_SIZE;
  287. }
  288. #endif
  289. return 0;
  290. }
  291. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  292. {
  293. size_t l1_off, l2_off;
  294. size_t *mmu_l1, *mmu_l2;
  295. size_t find_off = 0;
  296. size_t find_va = 0;
  297. int n = 0;
  298. if (!pages)
  299. {
  300. return 0;
  301. }
  302. if (!mmu_info)
  303. {
  304. return 0;
  305. }
  306. for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
  307. {
  308. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  309. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  310. {
  311. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  312. for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
  313. {
  314. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  315. {
  316. /* in use */
  317. n = 0;
  318. }
  319. else
  320. {
  321. if (!n)
  322. {
  323. find_va = l1_off;
  324. find_off = l2_off;
  325. }
  326. n++;
  327. if (n >= pages)
  328. {
  329. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  330. }
  331. }
  332. }
  333. }
  334. else
  335. {
  336. if (!n)
  337. {
  338. find_va = l1_off;
  339. find_off = 0;
  340. }
  341. n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
  342. if (n >= pages)
  343. {
  344. return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
  345. }
  346. }
  347. }
  348. return 0;
  349. }
  350. #ifdef RT_USING_USERSPACE
  351. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  352. {
  353. size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  354. size_t l1_off, l2_off;
  355. size_t *mmu_l1, *mmu_l2;
  356. if (!pages)
  357. {
  358. return -1;
  359. }
  360. if (!mmu_info)
  361. {
  362. return -1;
  363. }
  364. l1_off = ((size_t)va >> ARCH_SECTION_SHIFT);
  365. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
  366. {
  367. return -1;
  368. }
  369. l1_off += ((pages << ARCH_PAGE_SHIFT) >> ARCH_SECTION_SHIFT);
  370. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend + 1)
  371. {
  372. return -1;
  373. }
  374. while (pages--)
  375. {
  376. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  377. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  378. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  379. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  380. {
  381. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  382. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  383. {
  384. return -1;
  385. }
  386. }
  387. loop_va += ARCH_PAGE_SIZE;
  388. }
  389. return 0;
  390. }
  391. #endif
  392. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
  393. {
  394. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  395. size_t l1_off, l2_off;
  396. size_t *mmu_l1, *mmu_l2;
  397. #ifndef RT_USING_USERSPACE
  398. size_t *ref_cnt;
  399. #endif
  400. if (!mmu_info)
  401. {
  402. return;
  403. }
  404. while (npages--)
  405. {
  406. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  407. if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
  408. {
  409. return;
  410. }
  411. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  412. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  413. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  414. {
  415. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  416. }
  417. else
  418. {
  419. return;
  420. }
  421. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  422. {
  423. *(mmu_l2 + l2_off) = 0;
  424. /* cache maintain */
  425. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  426. #ifdef RT_USING_USERSPACE
  427. if (rt_pages_free(mmu_l2, 0))
  428. {
  429. *mmu_l1 = 0;
  430. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  431. }
  432. #else
  433. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  434. (*ref_cnt)--;
  435. if (!*ref_cnt)
  436. {
  437. rt_free_align(mmu_l2);
  438. *mmu_l1 = 0;
  439. /* cache maintain */
  440. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  441. }
  442. #endif
  443. }
  444. loop_va += ARCH_PAGE_SIZE;
  445. }
  446. }
  447. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
  448. {
  449. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  450. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  451. size_t l1_off, l2_off;
  452. size_t *mmu_l1, *mmu_l2;
  453. #ifndef RT_USING_USERSPACE
  454. size_t *ref_cnt;
  455. #endif
  456. if (!mmu_info)
  457. {
  458. return -1;
  459. }
  460. while (npages--)
  461. {
  462. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  463. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  464. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  465. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  466. {
  467. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  468. #ifdef RT_USING_USERSPACE
  469. rt_page_ref_inc(mmu_l2, 0);
  470. #endif
  471. }
  472. else
  473. {
  474. #ifdef RT_USING_USERSPACE
  475. mmu_l2 = (size_t*)rt_pages_alloc(0);
  476. #else
  477. mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  478. #endif
  479. if (mmu_l2)
  480. {
  481. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  482. /* cache maintain */
  483. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  484. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  485. /* cache maintain */
  486. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  487. }
  488. else
  489. {
  490. /* error, unmap and quit */
  491. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  492. return -1;
  493. }
  494. }
  495. #ifndef RT_USING_USERSPACE
  496. ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
  497. (*ref_cnt)++;
  498. #endif
  499. *(mmu_l2 + l2_off) = (loop_pa | attr);
  500. /* cache maintain */
  501. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  502. loop_va += ARCH_PAGE_SIZE;
  503. loop_pa += ARCH_PAGE_SIZE;
  504. }
  505. return 0;
  506. }
  507. static void rt_hw_cpu_tlb_invalidate(void)
  508. {
  509. asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
  510. }
  511. #ifdef RT_USING_USERSPACE
  512. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  513. {
  514. size_t pa_s, pa_e;
  515. size_t vaddr;
  516. int pages;
  517. int ret;
  518. if (!size)
  519. {
  520. return 0;
  521. }
  522. pa_s = (size_t)p_addr;
  523. pa_e = (size_t)p_addr + size - 1;
  524. pa_s >>= ARCH_PAGE_SHIFT;
  525. pa_e >>= ARCH_PAGE_SHIFT;
  526. pages = pa_e - pa_s + 1;
  527. if (v_addr)
  528. {
  529. vaddr = (size_t)v_addr;
  530. pa_s = (size_t)p_addr;
  531. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  532. {
  533. return 0;
  534. }
  535. vaddr &= ~ARCH_PAGE_MASK;
  536. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  537. {
  538. return 0;
  539. }
  540. }
  541. else
  542. {
  543. vaddr = find_vaddr(mmu_info, pages);
  544. }
  545. if (vaddr) {
  546. rt_enter_critical();
  547. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  548. if (ret == 0)
  549. {
  550. rt_hw_cpu_tlb_invalidate();
  551. rt_exit_critical();
  552. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  553. }
  554. rt_exit_critical();
  555. }
  556. return 0;
  557. }
  558. #else
  559. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  560. {
  561. size_t pa_s, pa_e;
  562. size_t vaddr;
  563. int pages;
  564. int ret;
  565. pa_s = (size_t)p_addr;
  566. pa_e = (size_t)p_addr + size - 1;
  567. pa_s >>= ARCH_PAGE_SHIFT;
  568. pa_e >>= ARCH_PAGE_SHIFT;
  569. pages = pa_e - pa_s + 1;
  570. vaddr = find_vaddr(mmu_info, pages);
  571. if (vaddr) {
  572. rt_enter_critical();
  573. ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
  574. if (ret == 0)
  575. {
  576. rt_hw_cpu_tlb_invalidate();
  577. rt_exit_critical();
  578. return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  579. }
  580. rt_exit_critical();
  581. }
  582. return 0;
  583. }
  584. #endif
  585. #ifdef RT_USING_USERSPACE
  586. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
  587. {
  588. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  589. size_t loop_pa;
  590. size_t l1_off, l2_off;
  591. size_t *mmu_l1, *mmu_l2;
  592. if (!mmu_info)
  593. {
  594. return -1;
  595. }
  596. while (npages--)
  597. {
  598. loop_pa = (size_t)rt_pages_alloc(0);
  599. if (!loop_pa)
  600. goto err;
  601. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  602. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  603. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  604. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  605. {
  606. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  607. rt_page_ref_inc(mmu_l2, 0);
  608. }
  609. else
  610. {
  611. //mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
  612. mmu_l2 = (size_t*)rt_pages_alloc(0);
  613. if (mmu_l2)
  614. {
  615. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  616. /* cache maintain */
  617. rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
  618. *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
  619. /* cache maintain */
  620. rt_hw_cpu_dcache_clean(mmu_l1, 4);
  621. }
  622. else
  623. goto err;
  624. }
  625. loop_pa += mmu_info->pv_off;
  626. *(mmu_l2 + l2_off) = (loop_pa | attr);
  627. /* cache maintain */
  628. rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
  629. loop_va += ARCH_PAGE_SIZE;
  630. }
  631. return 0;
  632. err:
  633. {
  634. /* error, unmap and quit */
  635. int i;
  636. void *va, *pa;
  637. va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
  638. for (i = 0; i < npages; i++)
  639. {
  640. pa = rt_hw_mmu_v2p(mmu_info, va);
  641. pa = (void*)((char*)pa - mmu_info->pv_off);
  642. rt_pages_free(pa, 0);
  643. va = (void*)((char*)va + ARCH_PAGE_SIZE);
  644. }
  645. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  646. return -1;
  647. }
  648. }
  649. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  650. {
  651. size_t vaddr;
  652. size_t offset;
  653. int pages;
  654. int ret;
  655. if (!size)
  656. {
  657. return 0;
  658. }
  659. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  660. size += (offset + ARCH_PAGE_SIZE - 1);
  661. pages = (size >> ARCH_PAGE_SHIFT);
  662. if (v_addr)
  663. {
  664. vaddr = (size_t)v_addr;
  665. vaddr &= ~ARCH_PAGE_MASK;
  666. if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
  667. {
  668. return 0;
  669. }
  670. }
  671. else
  672. {
  673. vaddr = find_vaddr(mmu_info, pages);
  674. }
  675. if (vaddr) {
  676. rt_enter_critical();
  677. ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
  678. if (ret == 0)
  679. {
  680. rt_hw_cpu_tlb_invalidate();
  681. rt_exit_critical();
  682. return (void*)((char*)vaddr + offset);
  683. }
  684. rt_exit_critical();
  685. }
  686. return 0;
  687. }
  688. #endif
  689. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  690. {
  691. size_t va_s, va_e;
  692. int pages;
  693. va_s = (size_t)v_addr;
  694. va_e = (size_t)v_addr + size - 1;
  695. va_s >>= ARCH_PAGE_SHIFT;
  696. va_e >>= ARCH_PAGE_SHIFT;
  697. pages = va_e - va_s + 1;
  698. rt_enter_critical();
  699. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  700. rt_hw_cpu_tlb_invalidate();
  701. rt_exit_critical();
  702. }
  703. #ifdef RT_USING_USERSPACE
  704. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
  705. {
  706. void *ret;
  707. rt_mm_lock();
  708. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  709. rt_mm_unlock();
  710. return ret;
  711. }
  712. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  713. {
  714. void *ret;
  715. rt_mm_lock();
  716. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  717. rt_mm_unlock();
  718. return ret;
  719. }
  720. #endif
  721. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
  722. {
  723. rt_mm_lock();
  724. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  725. rt_mm_unlock();
  726. }
  727. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  728. {
  729. size_t l1_off, l2_off;
  730. size_t *mmu_l1, *mmu_l2;
  731. size_t tmp;
  732. size_t pa;
  733. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  734. if (!mmu_info)
  735. {
  736. return (void*)0;
  737. }
  738. mmu_l1 = (size_t*)mmu_info->vtable + l1_off;
  739. tmp = *mmu_l1;
  740. switch (tmp & ARCH_MMU_USED_MASK)
  741. {
  742. case 0: /* not used */
  743. break;
  744. case 1: /* page table */
  745. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
  746. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  747. pa = *(mmu_l2 + l2_off);
  748. if (pa & ARCH_MMU_USED_MASK)
  749. {
  750. if ((pa & ARCH_MMU_USED_MASK) == 1)
  751. {
  752. /* large page, not support */
  753. break;
  754. }
  755. pa &= ~(ARCH_PAGE_MASK);
  756. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  757. return (void*)pa;
  758. }
  759. break;
  760. case 2:
  761. case 3:
  762. /* section */
  763. if (tmp & ARCH_TYPE_SUPERSECTION)
  764. {
  765. /* super section, not support */
  766. break;
  767. }
  768. pa = (tmp & ~ARCH_SECTION_MASK);
  769. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  770. return (void*)pa;
  771. }
  772. return (void*)0;
  773. }
  774. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
  775. {
  776. void *ret;
  777. rt_mm_lock();
  778. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  779. rt_mm_unlock();
  780. return ret;
  781. }
  782. #ifdef RT_USING_USERSPACE
  783. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
  784. unsigned int va;
  785. for (va = 0; va < 0x1000; va++) {
  786. unsigned int vaddr = (va << 20);
  787. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
  788. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  789. } else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
  790. mtbl[va] = (va << 20) | NORMAL_MEM;
  791. } else {
  792. mtbl[va] = 0;
  793. }
  794. }
  795. }
  796. #endif