mmu.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include <lwp_mm.h>
  14. #include "mmu.h"
  15. #ifdef RT_USING_LWP
  16. #include <page.h>
  17. #endif
  18. #define MMU_LEVEL_MASK 0x1ffUL
  19. #define MMU_LEVEL_SHIFT 9
  20. #define MMU_ADDRESS_BITS 39
  21. #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
  22. #define MMU_ATTRIB_MASK 0xfff0000000000ffcUL
  23. #define MMU_TYPE_MASK 3UL
  24. #define MMU_TYPE_USED 1UL
  25. #define MMU_TYPE_BLOCK 1UL
  26. #define MMU_TYPE_TABLE 3UL
  27. #define MMU_TYPE_PAGE 3UL
  28. #define MMU_TBL_BLOCK_2M_LEVEL 2
  29. #define MMU_TBL_PAGE_4k_LEVEL 3
  30. #define MMU_TBL_LEVEL_NR 4
  31. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
  32. struct page_table
  33. {
  34. unsigned long page[512];
  35. };
  36. #ifndef RT_USING_LWP
  37. #define MMU_TBL_PAGE_NR_MAX 32
  38. #undef PV_OFFSET
  39. #define PV_OFFSET 0
  40. static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096)));
  41. #define rt_page_ref_inc(...)
  42. unsigned long rt_pages_alloc(rt_size_t size_bits)
  43. {
  44. static unsigned long i = 0;
  45. if (i >= MMU_TBL_PAGE_NR_MAX)
  46. {
  47. return RT_NULL;
  48. }
  49. ++i;
  50. return (unsigned long)&MMUPage[i - 1].page;
  51. }
  52. #endif
  53. static struct page_table *__init_page_array;
  54. static unsigned long __page_off = 0UL;
  55. unsigned long get_free_page(void)
  56. {
  57. if (!__init_page_array)
  58. {
  59. unsigned long temp_page_start;
  60. asm volatile("mov %0, sp":"=r"(temp_page_start));
  61. __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
  62. __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
  63. }
  64. __page_off++;
  65. return (unsigned long)(__init_page_array[__page_off - 1].page);
  66. }
  67. void mmu_memset(char *dst, char v, size_t len)
  68. {
  69. while (len--)
  70. {
  71. *dst++ = v;
  72. }
  73. }
  74. static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  75. {
  76. int level;
  77. unsigned long *cur_lv_tbl = lv0_tbl;
  78. unsigned long page;
  79. unsigned long off;
  80. int level_shift = MMU_ADDRESS_BITS;
  81. if (va & ARCH_SECTION_MASK)
  82. {
  83. return MMU_MAP_ERROR_VANOTALIGN;
  84. }
  85. if (pa & ARCH_SECTION_MASK)
  86. {
  87. return MMU_MAP_ERROR_PANOTALIGN;
  88. }
  89. for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
  90. {
  91. off = (va >> level_shift);
  92. off &= MMU_LEVEL_MASK;
  93. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  94. {
  95. page = get_free_page();
  96. if (!page)
  97. {
  98. return MMU_MAP_ERROR_NOPAGE;
  99. }
  100. mmu_memset((char *)page, 0, ARCH_PAGE_SIZE);
  101. cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
  102. }
  103. page = cur_lv_tbl[off];
  104. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  105. {
  106. /* is block! error! */
  107. return MMU_MAP_ERROR_CONFLICT;
  108. }
  109. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  110. level_shift -= MMU_LEVEL_SHIFT;
  111. }
  112. attr &= MMU_ATTRIB_MASK;
  113. pa |= (attr | MMU_TYPE_BLOCK); /* block */
  114. off = (va >> ARCH_SECTION_SHIFT);
  115. off &= MMU_LEVEL_MASK;
  116. cur_lv_tbl[off] = pa;
  117. return 0;
  118. }
  119. int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
  120. {
  121. unsigned long i;
  122. int ret;
  123. if (va & ARCH_SECTION_MASK)
  124. {
  125. return -1;
  126. }
  127. if (pa & ARCH_SECTION_MASK)
  128. {
  129. return -1;
  130. }
  131. for (i = 0; i < count; i++)
  132. {
  133. ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
  134. va += ARCH_SECTION_SIZE;
  135. pa += ARCH_SECTION_SIZE;
  136. if (ret != 0)
  137. {
  138. return ret;
  139. }
  140. }
  141. return 0;
  142. }
  143. static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  144. {
  145. int level;
  146. unsigned long *cur_lv_tbl = lv0_tbl;
  147. unsigned long page;
  148. unsigned long off;
  149. int level_shift = MMU_ADDRESS_BITS;
  150. if (va & ARCH_SECTION_MASK)
  151. {
  152. return MMU_MAP_ERROR_VANOTALIGN;
  153. }
  154. if (pa & ARCH_SECTION_MASK)
  155. {
  156. return MMU_MAP_ERROR_PANOTALIGN;
  157. }
  158. for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
  159. {
  160. off = (va >> level_shift);
  161. off &= MMU_LEVEL_MASK;
  162. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  163. {
  164. page = (unsigned long)rt_pages_alloc(0);
  165. if (!page)
  166. {
  167. return MMU_MAP_ERROR_NOPAGE;
  168. }
  169. rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
  170. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
  171. cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
  172. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  173. }
  174. else
  175. {
  176. page = cur_lv_tbl[off];
  177. page &= MMU_ADDRESS_MASK;
  178. /* page to va */
  179. page -= PV_OFFSET;
  180. rt_page_ref_inc((void *)page, 0);
  181. }
  182. page = cur_lv_tbl[off];
  183. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  184. {
  185. /* is block! error! */
  186. return MMU_MAP_ERROR_CONFLICT;
  187. }
  188. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  189. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  190. level_shift -= MMU_LEVEL_SHIFT;
  191. }
  192. attr &= MMU_ATTRIB_MASK;
  193. pa |= (attr | MMU_TYPE_BLOCK); /* block */
  194. off = (va >> ARCH_SECTION_SHIFT);
  195. off &= MMU_LEVEL_MASK;
  196. cur_lv_tbl[off] = pa;
  197. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  198. return 0;
  199. }
  200. struct mmu_level_info
  201. {
  202. unsigned long *pos;
  203. void *page;
  204. };
  205. #ifdef RT_USING_LWP
  206. static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
  207. {
  208. int level;
  209. unsigned long va = (unsigned long)v_addr;
  210. unsigned long *cur_lv_tbl = lv0_tbl;
  211. unsigned long page;
  212. unsigned long off;
  213. struct mmu_level_info level_info[4];
  214. int ref;
  215. int level_shift = MMU_ADDRESS_BITS;
  216. unsigned long *pos;
  217. rt_memset(level_info, 0, sizeof level_info);
  218. for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
  219. {
  220. off = (va >> level_shift);
  221. off &= MMU_LEVEL_MASK;
  222. page = cur_lv_tbl[off];
  223. if (!(page & MMU_TYPE_USED))
  224. {
  225. break;
  226. }
  227. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  228. {
  229. break;
  230. }
  231. level_info[level].pos = cur_lv_tbl + off;
  232. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  233. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  234. level_info[level].page = cur_lv_tbl;
  235. level_shift -= MMU_LEVEL_SHIFT;
  236. }
  237. level = MMU_TBL_PAGE_4k_LEVEL;
  238. pos = level_info[level].pos;
  239. if (pos)
  240. {
  241. *pos = (unsigned long)RT_NULL;
  242. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
  243. }
  244. level--;
  245. while (level >= 0)
  246. {
  247. pos = level_info[level].pos;
  248. if (pos)
  249. {
  250. void *cur_page = level_info[level].page;
  251. ref = rt_page_ref_get(cur_page, 0);
  252. if (ref == 1)
  253. {
  254. *pos = (unsigned long)RT_NULL;
  255. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
  256. }
  257. rt_pages_free(cur_page, 0);
  258. }
  259. level--;
  260. }
  261. asm volatile("tlbi vae1, %0\ndsb sy"::"r"(v_addr):"memory");
  262. return;
  263. }
  264. static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  265. {
  266. int ret = 0;
  267. int level;
  268. unsigned long *cur_lv_tbl = lv0_tbl;
  269. unsigned long page;
  270. unsigned long off;
  271. int level_shift = MMU_ADDRESS_BITS;
  272. if (va & ARCH_PAGE_MASK)
  273. {
  274. return MMU_MAP_ERROR_VANOTALIGN;
  275. }
  276. if (pa & ARCH_PAGE_MASK)
  277. {
  278. return MMU_MAP_ERROR_PANOTALIGN;
  279. }
  280. for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
  281. {
  282. off = (va >> level_shift);
  283. off &= MMU_LEVEL_MASK;
  284. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  285. {
  286. page = (unsigned long)rt_pages_alloc(0);
  287. if (!page)
  288. {
  289. ret = MMU_MAP_ERROR_NOPAGE;
  290. goto err;
  291. }
  292. rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
  293. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
  294. cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
  295. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  296. }
  297. else
  298. {
  299. page = cur_lv_tbl[off];
  300. page &= MMU_ADDRESS_MASK;
  301. /* page to va */
  302. page -= PV_OFFSET;
  303. rt_page_ref_inc((void *)page, 0);
  304. }
  305. page = cur_lv_tbl[off];
  306. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  307. {
  308. /* is block! error! */
  309. ret = MMU_MAP_ERROR_CONFLICT;
  310. goto err;
  311. }
  312. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  313. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  314. level_shift -= MMU_LEVEL_SHIFT;
  315. }
  316. /* now is level page */
  317. attr &= MMU_ATTRIB_MASK;
  318. pa |= (attr | MMU_TYPE_PAGE); /* page */
  319. off = (va >> ARCH_PAGE_SHIFT);
  320. off &= MMU_LEVEL_MASK;
  321. cur_lv_tbl[off] = pa; /* page */
  322. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  323. return ret;
  324. err:
  325. _kenrel_unmap_4K(lv0_tbl, (void *)va);
  326. return ret;
  327. }
  328. #endif
  329. static int _kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
  330. {
  331. unsigned long i;
  332. int ret;
  333. unsigned long _attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, attr);
  334. if (va & ARCH_SECTION_MASK)
  335. {
  336. return -1;
  337. }
  338. if (pa & ARCH_SECTION_MASK)
  339. {
  340. return -1;
  341. }
  342. for (i = 0; i < count; i++)
  343. {
  344. ret = _kenrel_map_2M(lv0_tbl, va, pa, _attr);
  345. va += ARCH_SECTION_SIZE;
  346. pa += ARCH_SECTION_SIZE;
  347. if (ret != 0)
  348. {
  349. return ret;
  350. }
  351. }
  352. return 0;
  353. }
  354. /************ setting el1 mmu register**************
  355. MAIR_EL1
  356. index 0 : memory outer writeback, write/read alloc
  357. index 1 : memory nocache
  358. index 2 : device nGnRnE
  359. *****************************************************/
  360. void mmu_tcr_init(void)
  361. {
  362. unsigned long val64;
  363. val64 = 0x00447fUL;
  364. __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n"::"r"(val64));
  365. /* TCR_EL1 */
  366. val64 = (16UL << 0) /* t0sz 48bit */
  367. | (0x0UL << 6) /* reserved */
  368. | (0x0UL << 7) /* epd0 */
  369. | (0x3UL << 8) /* t0 wb cacheable */
  370. | (0x3UL << 10) /* inner shareable */
  371. | (0x2UL << 12) /* t0 outer shareable */
  372. | (0x0UL << 14) /* t0 4K */
  373. | (16UL << 16) /* t1sz 48bit */
  374. | (0x0UL << 22) /* define asid use ttbr0.asid */
  375. | (0x0UL << 23) /* epd1 */
  376. | (0x3UL << 24) /* t1 inner wb cacheable */
  377. | (0x3UL << 26) /* t1 outer wb cacheable */
  378. | (0x2UL << 28) /* t1 outer shareable */
  379. | (0x2UL << 30) /* t1 4k */
  380. | (0x1UL << 32) /* 001b 64GB PA */
  381. | (0x0UL << 35) /* reserved */
  382. | (0x1UL << 36) /* as: 0:8bit 1:16bit */
  383. | (0x0UL << 37) /* tbi0 */
  384. | (0x0UL << 38); /* tbi1 */
  385. __asm__ volatile("msr TCR_EL1, %0\n"::"r"(val64));
  386. }
  387. /* dump 2nd level page table */
  388. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  389. {
  390. }
  391. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  392. {
  393. }
  394. volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
  395. void rt_hw_mmu_setmtt(unsigned long vaddrStart,
  396. unsigned long vaddrEnd,
  397. unsigned long paddrStart,
  398. unsigned long attr)
  399. {
  400. unsigned long count;
  401. if (vaddrStart & ARCH_SECTION_MASK)
  402. {
  403. return;
  404. }
  405. if (paddrStart & ARCH_SECTION_MASK)
  406. {
  407. return;
  408. }
  409. if (vaddrStart > vaddrEnd)
  410. {
  411. return;
  412. }
  413. count = vaddrEnd + 1;
  414. if (count & ARCH_SECTION_MASK)
  415. {
  416. return;
  417. }
  418. count -= vaddrStart;
  419. if (count == 0)
  420. {
  421. return;
  422. }
  423. count >>= ARCH_SECTION_SHIFT;
  424. _kernel_map_fixed((unsigned long *)MMUTable, vaddrStart, paddrStart, count, attr);
  425. }
  426. void rt_hw_mmu_ktbl_set(unsigned long tbl)
  427. {
  428. #ifdef RT_USING_LWP
  429. tbl += PV_OFFSET;
  430. __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
  431. #else
  432. __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
  433. #endif
  434. __asm__ volatile("tlbi vmalle1\n dsb sy\nisb":::"memory");
  435. __asm__ volatile("ic ialluis\n dsb sy\nisb":::"memory");
  436. }
  437. void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr)
  438. {
  439. /* set page table */
  440. for (; desc_nr > 0; desc_nr--)
  441. {
  442. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  443. mdesc->paddr_start, mdesc->attr);
  444. mdesc++;
  445. }
  446. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)MMUTable, sizeof MMUTable);
  447. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  448. }
  449. /**
  450. * This function will initialize rt_mmu_info structure.
  451. *
  452. * @param mmu_info rt_mmu_info structure
  453. * @param v_address virtual address
  454. * @param size map size
  455. * @param vtable mmu table
  456. * @param pv_off pv offset in kernel space
  457. *
  458. * @return 0 on successful and -1 for fail
  459. */
  460. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, size_t size, size_t *vtable, size_t pv_off)
  461. {
  462. size_t va_s, va_e;
  463. if (!mmu_info || !vtable)
  464. {
  465. return -1;
  466. }
  467. va_s = (size_t)v_address;
  468. va_e = (size_t)v_address + size - 1;
  469. if (va_e < va_s)
  470. {
  471. return -1;
  472. }
  473. va_s >>= ARCH_SECTION_SHIFT;
  474. va_e >>= ARCH_SECTION_SHIFT;
  475. if (va_s == 0)
  476. {
  477. return -1;
  478. }
  479. mmu_info->vtable = vtable;
  480. mmu_info->vstart = va_s;
  481. mmu_info->vend = va_e;
  482. mmu_info->pv_off = pv_off;
  483. return 0;
  484. }
  485. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void *v_address, size_t size)
  486. {
  487. return 0;
  488. }
  489. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  490. {
  491. size_t loop_pages;
  492. size_t va;
  493. size_t find_va = 0;
  494. int n = 0;
  495. size_t i;
  496. if (!pages)
  497. {
  498. return 0;
  499. }
  500. if (!mmu_info)
  501. {
  502. return 0;
  503. }
  504. loop_pages = mmu_info->vend - mmu_info->vstart + 1;
  505. loop_pages <<= (ARCH_SECTION_SHIFT - ARCH_PAGE_SHIFT);
  506. va = mmu_info->vstart;
  507. va <<= ARCH_SECTION_SHIFT;
  508. for (i = 0; i < loop_pages; i++, va += ARCH_PAGE_SIZE)
  509. {
  510. if (_rt_hw_mmu_v2p(mmu_info, (void *)va))
  511. {
  512. n = 0;
  513. find_va = 0;
  514. continue;
  515. }
  516. if (!find_va)
  517. {
  518. find_va = va;
  519. }
  520. n++;
  521. if (n >= pages)
  522. {
  523. return find_va;
  524. }
  525. }
  526. return 0;
  527. }
  528. #ifdef RT_USING_LWP
  529. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  530. {
  531. size_t loop_va;
  532. if (!pages)
  533. {
  534. return -1;
  535. }
  536. if (!mmu_info)
  537. {
  538. return -1;
  539. }
  540. loop_va = ((size_t)va >> ARCH_SECTION_SHIFT);
  541. if (loop_va < mmu_info->vstart || loop_va > mmu_info->vend)
  542. {
  543. return -1;
  544. }
  545. loop_va += ((pages << ARCH_PAGE_SHIFT) >> ARCH_SECTION_SHIFT);
  546. if (loop_va < mmu_info->vstart || loop_va > mmu_info->vend + 1)
  547. {
  548. return -1;
  549. }
  550. loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  551. while (pages--)
  552. {
  553. if (_rt_hw_mmu_v2p(mmu_info, (void *)loop_va))
  554. {
  555. return -1;
  556. }
  557. loop_va += ARCH_PAGE_SIZE;
  558. }
  559. return 0;
  560. }
  561. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t npages)
  562. {
  563. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  564. if (!mmu_info || !mmu_info->vtable)
  565. {
  566. return;
  567. }
  568. while (npages--)
  569. {
  570. _kenrel_unmap_4K(mmu_info->vtable, (void *)loop_va);
  571. loop_va += ARCH_PAGE_SIZE;
  572. }
  573. }
  574. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t npages, size_t attr)
  575. {
  576. int ret = -1;
  577. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  578. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  579. size_t unmap_va = loop_va;
  580. if (mmu_info)
  581. {
  582. while (npages--)
  583. {
  584. ret = _kenrel_map_4K(mmu_info->vtable, loop_va, loop_pa, attr);
  585. if (ret != 0)
  586. {
  587. /* error, undo map */
  588. while (unmap_va != loop_va)
  589. {
  590. _kenrel_unmap_4K(mmu_info->vtable, (void *)unmap_va);
  591. unmap_va += ARCH_PAGE_SIZE;
  592. }
  593. break;
  594. }
  595. loop_va += ARCH_PAGE_SIZE;
  596. loop_pa += ARCH_PAGE_SIZE;
  597. }
  598. }
  599. return ret;
  600. }
  601. #endif
  602. static void rt_hw_cpu_tlb_invalidate(void)
  603. {
  604. __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
  605. }
  606. #ifdef RT_USING_LWP
  607. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
  608. {
  609. size_t pa_s, pa_e;
  610. size_t vaddr;
  611. int pages;
  612. int ret;
  613. if (!size)
  614. {
  615. return 0;
  616. }
  617. pa_s = (size_t)p_addr;
  618. pa_e = (size_t)p_addr + size - 1;
  619. pa_s >>= ARCH_PAGE_SHIFT;
  620. pa_e >>= ARCH_PAGE_SHIFT;
  621. pages = pa_e - pa_s + 1;
  622. if (v_addr)
  623. {
  624. vaddr = (size_t)v_addr;
  625. pa_s = (size_t)p_addr;
  626. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  627. {
  628. return 0;
  629. }
  630. vaddr &= ~ARCH_PAGE_MASK;
  631. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  632. {
  633. return 0;
  634. }
  635. }
  636. else
  637. {
  638. vaddr = find_vaddr(mmu_info, pages);
  639. }
  640. if (vaddr)
  641. {
  642. rt_enter_critical();
  643. ret = __rt_hw_mmu_map(mmu_info, (void *)vaddr, p_addr, pages, attr);
  644. if (ret == 0)
  645. {
  646. rt_hw_cpu_tlb_invalidate();
  647. rt_exit_critical();
  648. return (void *)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  649. }
  650. rt_exit_critical();
  651. }
  652. return 0;
  653. }
  654. #else
  655. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  656. {
  657. return p_addr;
  658. }
  659. #endif
  660. #ifdef RT_USING_LWP
  661. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t npages, size_t attr)
  662. {
  663. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  664. size_t loop_pa;
  665. if (!mmu_info)
  666. {
  667. return -1;
  668. }
  669. while (npages--)
  670. {
  671. loop_pa = (size_t)rt_pages_alloc(0);
  672. if (!loop_pa)
  673. {
  674. goto err;
  675. }
  676. loop_pa += mmu_info->pv_off;
  677. _kenrel_map_4K(mmu_info->vtable, loop_va, loop_pa, attr);
  678. loop_va += ARCH_PAGE_SIZE;
  679. }
  680. return 0;
  681. err:
  682. {
  683. /* error, unmap and quit */
  684. int i;
  685. void *va, *pa;
  686. va = (void *)((size_t)v_addr & ~ARCH_PAGE_MASK);
  687. for (i = 0; i < npages; i++)
  688. {
  689. pa = rt_hw_mmu_v2p(mmu_info, va);
  690. pa = (void *)((char *)pa - mmu_info->pv_off);
  691. rt_pages_free(pa, 0);
  692. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  693. }
  694. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  695. return -1;
  696. }
  697. }
  698. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  699. {
  700. size_t vaddr;
  701. size_t offset;
  702. int pages;
  703. int ret;
  704. if (!size)
  705. {
  706. return 0;
  707. }
  708. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  709. size += (offset + ARCH_PAGE_SIZE - 1);
  710. pages = (size >> ARCH_PAGE_SHIFT);
  711. if (v_addr)
  712. {
  713. vaddr = (size_t)v_addr;
  714. vaddr &= ~ARCH_PAGE_MASK;
  715. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  716. {
  717. return 0;
  718. }
  719. }
  720. else
  721. {
  722. vaddr = find_vaddr(mmu_info, pages);
  723. }
  724. if (vaddr)
  725. {
  726. rt_enter_critical();
  727. ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
  728. if (ret == 0)
  729. {
  730. rt_hw_cpu_tlb_invalidate();
  731. rt_exit_critical();
  732. return (void *)((char *)vaddr + offset);
  733. }
  734. rt_exit_critical();
  735. }
  736. return 0;
  737. }
  738. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
  739. {
  740. size_t va_s, va_e;
  741. int pages;
  742. va_s = (size_t)v_addr;
  743. va_e = (size_t)v_addr + size - 1;
  744. va_s >>= ARCH_PAGE_SHIFT;
  745. va_e >>= ARCH_PAGE_SHIFT;
  746. pages = va_e - va_s + 1;
  747. rt_enter_critical();
  748. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  749. rt_hw_cpu_tlb_invalidate();
  750. rt_exit_critical();
  751. }
  752. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
  753. {
  754. void *ret;
  755. rt_mm_lock();
  756. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  757. rt_mm_unlock();
  758. return ret;
  759. }
  760. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  761. {
  762. void *ret;
  763. rt_mm_lock();
  764. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  765. rt_mm_unlock();
  766. return ret;
  767. }
  768. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
  769. {
  770. rt_mm_lock();
  771. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  772. rt_mm_unlock();
  773. }
  774. #endif
  775. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  776. {
  777. int level;
  778. unsigned long va = (unsigned long)v_addr;
  779. unsigned long pa;
  780. unsigned long *cur_lv_tbl;
  781. unsigned long page;
  782. unsigned long off;
  783. unsigned long off_addr;
  784. int level_shift = MMU_ADDRESS_BITS;
  785. if (!mmu_info)
  786. {
  787. return (void *)0;
  788. }
  789. cur_lv_tbl = mmu_info->vtable;
  790. for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
  791. {
  792. off = (va >> level_shift);
  793. off &= MMU_LEVEL_MASK;
  794. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  795. {
  796. return (void *)0;
  797. }
  798. page = cur_lv_tbl[off];
  799. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  800. {
  801. off_addr = va & ((1UL << level_shift) - 1);
  802. pa = (page & MMU_ADDRESS_MASK);
  803. pa += off_addr;
  804. return (void *)pa;
  805. }
  806. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  807. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  808. level_shift -= MMU_LEVEL_SHIFT;
  809. }
  810. /* now is level MMU_TBL_PAGE_4k_LEVEL */
  811. off = (va >> ARCH_PAGE_SHIFT);
  812. off &= MMU_LEVEL_MASK;
  813. page = cur_lv_tbl[off];
  814. if (!(page & MMU_TYPE_USED))
  815. {
  816. return (void *)0;
  817. }
  818. pa = (page & MMU_ADDRESS_MASK);
  819. pa += (va & ARCH_PAGE_MASK);
  820. return (void *)pa;
  821. }
  822. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  823. {
  824. void *ret;
  825. rt_mm_lock();
  826. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  827. rt_mm_unlock();
  828. return ret;
  829. }
  830. void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
  831. {
  832. int ret;
  833. unsigned long va = KERNEL_VADDR_START;
  834. unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
  835. unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
  836. /* clean the first two pages */
  837. mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
  838. mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
  839. ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
  840. if (ret != 0)
  841. {
  842. while (1);
  843. }
  844. ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
  845. if (ret != 0)
  846. {
  847. while (1);
  848. }
  849. }