mmu.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include "mmu.h"
  14. #ifdef RT_USING_SMART
  15. #include <lwp_mm.h>
  16. #include <page.h>
  17. #endif
  18. #define MMU_LEVEL_MASK 0x1ffUL
  19. #define MMU_LEVEL_SHIFT 9
  20. #define MMU_ADDRESS_BITS 39
  21. #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
  22. #define MMU_ATTRIB_MASK 0xfff0000000000ffcUL
  23. #define MMU_TYPE_MASK 3UL
  24. #define MMU_TYPE_USED 1UL
  25. #define MMU_TYPE_BLOCK 1UL
  26. #define MMU_TYPE_TABLE 3UL
  27. #define MMU_TYPE_PAGE 3UL
  28. #define MMU_TBL_BLOCK_2M_LEVEL 2
  29. #define MMU_TBL_PAGE_4k_LEVEL 3
  30. #define MMU_TBL_LEVEL_NR 4
  31. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
  32. struct page_table
  33. {
  34. unsigned long page[512];
  35. };
  36. #ifndef RT_USING_SMART
  37. #define MMU_TBL_PAGE_NR_MAX 32
  38. static rt_mutex_t mm_lock = RT_NULL;
  39. void rt_mm_lock(void)
  40. {
  41. if (rt_thread_self())
  42. {
  43. if (!mm_lock)
  44. {
  45. mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
  46. }
  47. if (mm_lock)
  48. {
  49. rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
  50. }
  51. }
  52. }
  53. void rt_mm_unlock(void)
  54. {
  55. if (rt_thread_self())
  56. {
  57. if (mm_lock)
  58. {
  59. rt_mutex_release(mm_lock);
  60. }
  61. }
  62. }
  63. static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096)));
  64. #define rt_page_ref_inc(...)
  65. unsigned long rt_pages_alloc(rt_size_t size_bits)
  66. {
  67. static unsigned long i = 0;
  68. if (i >= MMU_TBL_PAGE_NR_MAX)
  69. {
  70. return RT_NULL;
  71. }
  72. ++i;
  73. return (unsigned long)&MMUPage[i - 1].page;
  74. }
  75. #endif
  76. static struct page_table *__init_page_array;
  77. static unsigned long __page_off = 0UL;
  78. unsigned long get_free_page(void)
  79. {
  80. if (!__init_page_array)
  81. {
  82. unsigned long temp_page_start;
  83. asm volatile("mov %0, sp":"=r"(temp_page_start));
  84. __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
  85. __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
  86. }
  87. __page_off++;
  88. return (unsigned long)(__init_page_array[__page_off - 1].page);
  89. }
  90. void mmu_memset(char *dst, char v, size_t len)
  91. {
  92. while (len--)
  93. {
  94. *dst++ = v;
  95. }
  96. }
  97. static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  98. {
  99. int level;
  100. unsigned long *cur_lv_tbl = lv0_tbl;
  101. unsigned long page;
  102. unsigned long off;
  103. int level_shift = MMU_ADDRESS_BITS;
  104. if (va & ARCH_SECTION_MASK)
  105. {
  106. return MMU_MAP_ERROR_VANOTALIGN;
  107. }
  108. if (pa & ARCH_SECTION_MASK)
  109. {
  110. return MMU_MAP_ERROR_PANOTALIGN;
  111. }
  112. for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
  113. {
  114. off = (va >> level_shift);
  115. off &= MMU_LEVEL_MASK;
  116. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  117. {
  118. page = get_free_page();
  119. if (!page)
  120. {
  121. return MMU_MAP_ERROR_NOPAGE;
  122. }
  123. mmu_memset((char *)page, 0, ARCH_PAGE_SIZE);
  124. cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
  125. }
  126. page = cur_lv_tbl[off];
  127. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  128. {
  129. /* is block! error! */
  130. return MMU_MAP_ERROR_CONFLICT;
  131. }
  132. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  133. level_shift -= MMU_LEVEL_SHIFT;
  134. }
  135. attr &= MMU_ATTRIB_MASK;
  136. pa |= (attr | MMU_TYPE_BLOCK); /* block */
  137. off = (va >> ARCH_SECTION_SHIFT);
  138. off &= MMU_LEVEL_MASK;
  139. cur_lv_tbl[off] = pa;
  140. return 0;
  141. }
  142. int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
  143. {
  144. unsigned long i;
  145. int ret;
  146. if (va & ARCH_SECTION_MASK)
  147. {
  148. return -1;
  149. }
  150. if (pa & ARCH_SECTION_MASK)
  151. {
  152. return -1;
  153. }
  154. for (i = 0; i < count; i++)
  155. {
  156. ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
  157. va += ARCH_SECTION_SIZE;
  158. pa += ARCH_SECTION_SIZE;
  159. if (ret != 0)
  160. {
  161. return ret;
  162. }
  163. }
  164. return 0;
  165. }
  166. static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  167. {
  168. int level;
  169. unsigned long *cur_lv_tbl = lv0_tbl;
  170. unsigned long page;
  171. unsigned long off;
  172. int level_shift = MMU_ADDRESS_BITS;
  173. if (va & ARCH_SECTION_MASK)
  174. {
  175. return MMU_MAP_ERROR_VANOTALIGN;
  176. }
  177. if (pa & ARCH_SECTION_MASK)
  178. {
  179. return MMU_MAP_ERROR_PANOTALIGN;
  180. }
  181. for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
  182. {
  183. off = (va >> level_shift);
  184. off &= MMU_LEVEL_MASK;
  185. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  186. {
  187. page = (unsigned long)rt_pages_alloc(0);
  188. if (!page)
  189. {
  190. return MMU_MAP_ERROR_NOPAGE;
  191. }
  192. rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
  193. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
  194. cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
  195. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  196. }
  197. else
  198. {
  199. page = cur_lv_tbl[off];
  200. page &= MMU_ADDRESS_MASK;
  201. /* page to va */
  202. page -= PV_OFFSET;
  203. rt_page_ref_inc((void *)page, 0);
  204. }
  205. page = cur_lv_tbl[off];
  206. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  207. {
  208. /* is block! error! */
  209. return MMU_MAP_ERROR_CONFLICT;
  210. }
  211. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  212. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  213. level_shift -= MMU_LEVEL_SHIFT;
  214. }
  215. attr &= MMU_ATTRIB_MASK;
  216. pa |= (attr | MMU_TYPE_BLOCK); /* block */
  217. off = (va >> ARCH_SECTION_SHIFT);
  218. off &= MMU_LEVEL_MASK;
  219. cur_lv_tbl[off] = pa;
  220. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  221. return 0;
  222. }
  223. struct mmu_level_info
  224. {
  225. unsigned long *pos;
  226. void *page;
  227. };
  228. #ifdef RT_USING_LWP
  229. static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
  230. {
  231. int level;
  232. unsigned long va = (unsigned long)v_addr;
  233. unsigned long *cur_lv_tbl = lv0_tbl;
  234. unsigned long page;
  235. unsigned long off;
  236. struct mmu_level_info level_info[4];
  237. int ref;
  238. int level_shift = MMU_ADDRESS_BITS;
  239. unsigned long *pos;
  240. rt_memset(level_info, 0, sizeof level_info);
  241. for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
  242. {
  243. off = (va >> level_shift);
  244. off &= MMU_LEVEL_MASK;
  245. page = cur_lv_tbl[off];
  246. if (!(page & MMU_TYPE_USED))
  247. {
  248. break;
  249. }
  250. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  251. {
  252. break;
  253. }
  254. level_info[level].pos = cur_lv_tbl + off;
  255. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  256. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  257. level_info[level].page = cur_lv_tbl;
  258. level_shift -= MMU_LEVEL_SHIFT;
  259. }
  260. level = MMU_TBL_PAGE_4k_LEVEL;
  261. pos = level_info[level].pos;
  262. if (pos)
  263. {
  264. *pos = (unsigned long)RT_NULL;
  265. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
  266. }
  267. level--;
  268. while (level >= 0)
  269. {
  270. pos = level_info[level].pos;
  271. if (pos)
  272. {
  273. void *cur_page = level_info[level].page;
  274. ref = rt_page_ref_get(cur_page, 0);
  275. if (ref == 1)
  276. {
  277. *pos = (unsigned long)RT_NULL;
  278. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
  279. }
  280. rt_pages_free(cur_page, 0);
  281. }
  282. level--;
  283. }
  284. asm volatile("tlbi vae1, %0\ndsb sy"::"r"(v_addr):"memory");
  285. return;
  286. }
  287. static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  288. {
  289. int ret = 0;
  290. int level;
  291. unsigned long *cur_lv_tbl = lv0_tbl;
  292. unsigned long page;
  293. unsigned long off;
  294. int level_shift = MMU_ADDRESS_BITS;
  295. if (va & ARCH_PAGE_MASK)
  296. {
  297. return MMU_MAP_ERROR_VANOTALIGN;
  298. }
  299. if (pa & ARCH_PAGE_MASK)
  300. {
  301. return MMU_MAP_ERROR_PANOTALIGN;
  302. }
  303. for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
  304. {
  305. off = (va >> level_shift);
  306. off &= MMU_LEVEL_MASK;
  307. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  308. {
  309. page = (unsigned long)rt_pages_alloc(0);
  310. if (!page)
  311. {
  312. ret = MMU_MAP_ERROR_NOPAGE;
  313. goto err;
  314. }
  315. rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
  316. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
  317. cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
  318. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  319. }
  320. else
  321. {
  322. page = cur_lv_tbl[off];
  323. page &= MMU_ADDRESS_MASK;
  324. /* page to va */
  325. page -= PV_OFFSET;
  326. rt_page_ref_inc((void *)page, 0);
  327. }
  328. page = cur_lv_tbl[off];
  329. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  330. {
  331. /* is block! error! */
  332. ret = MMU_MAP_ERROR_CONFLICT;
  333. goto err;
  334. }
  335. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  336. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  337. level_shift -= MMU_LEVEL_SHIFT;
  338. }
  339. /* now is level page */
  340. attr &= MMU_ATTRIB_MASK;
  341. pa |= (attr | MMU_TYPE_PAGE); /* page */
  342. off = (va >> ARCH_PAGE_SHIFT);
  343. off &= MMU_LEVEL_MASK;
  344. cur_lv_tbl[off] = pa; /* page */
  345. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
  346. return ret;
  347. err:
  348. _kenrel_unmap_4K(lv0_tbl, (void *)va);
  349. return ret;
  350. }
  351. #endif
  352. static int _kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
  353. {
  354. unsigned long i;
  355. int ret;
  356. unsigned long _attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, attr);
  357. if (va & ARCH_SECTION_MASK)
  358. {
  359. return -1;
  360. }
  361. if (pa & ARCH_SECTION_MASK)
  362. {
  363. return -1;
  364. }
  365. for (i = 0; i < count; i++)
  366. {
  367. ret = _kenrel_map_2M(lv0_tbl, va, pa, _attr);
  368. va += ARCH_SECTION_SIZE;
  369. pa += ARCH_SECTION_SIZE;
  370. if (ret != 0)
  371. {
  372. return ret;
  373. }
  374. }
  375. return 0;
  376. }
  377. /************ setting el1 mmu register**************
  378. MAIR_EL1
  379. index 0 : memory outer writeback, write/read alloc
  380. index 1 : memory nocache
  381. index 2 : device nGnRnE
  382. *****************************************************/
  383. void mmu_tcr_init(void)
  384. {
  385. unsigned long val64;
  386. val64 = 0x00447fUL;
  387. __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n"::"r"(val64));
  388. /* TCR_EL1 */
  389. val64 = (16UL << 0) /* t0sz 48bit */
  390. | (0x0UL << 6) /* reserved */
  391. | (0x0UL << 7) /* epd0 */
  392. | (0x3UL << 8) /* t0 wb cacheable */
  393. | (0x3UL << 10) /* inner shareable */
  394. | (0x2UL << 12) /* t0 outer shareable */
  395. | (0x0UL << 14) /* t0 4K */
  396. | (16UL << 16) /* t1sz 48bit */
  397. | (0x0UL << 22) /* define asid use ttbr0.asid */
  398. | (0x0UL << 23) /* epd1 */
  399. | (0x3UL << 24) /* t1 inner wb cacheable */
  400. | (0x3UL << 26) /* t1 outer wb cacheable */
  401. | (0x2UL << 28) /* t1 outer shareable */
  402. | (0x2UL << 30) /* t1 4k */
  403. | (0x1UL << 32) /* 001b 64GB PA */
  404. | (0x0UL << 35) /* reserved */
  405. | (0x1UL << 36) /* as: 0:8bit 1:16bit */
  406. | (0x0UL << 37) /* tbi0 */
  407. | (0x0UL << 38); /* tbi1 */
  408. __asm__ volatile("msr TCR_EL1, %0\n"::"r"(val64));
  409. }
  410. /* dump 2nd level page table */
  411. void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
  412. {
  413. }
  414. void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
  415. {
  416. }
  417. volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
  418. void rt_hw_mmu_setmtt(unsigned long vaddrStart,
  419. unsigned long vaddrEnd,
  420. unsigned long paddrStart,
  421. unsigned long attr)
  422. {
  423. unsigned long count;
  424. if (vaddrStart & ARCH_SECTION_MASK)
  425. {
  426. while (1);
  427. }
  428. if (paddrStart & ARCH_SECTION_MASK)
  429. {
  430. while (1);
  431. }
  432. if (vaddrStart > vaddrEnd)
  433. {
  434. while (1);
  435. }
  436. count = vaddrEnd + 1;
  437. if (count & ARCH_SECTION_MASK)
  438. {
  439. while (1);
  440. }
  441. count -= vaddrStart;
  442. if (count == 0)
  443. {
  444. while (1);
  445. }
  446. count >>= ARCH_SECTION_SHIFT;
  447. _kernel_map_fixed((unsigned long *)MMUTable, vaddrStart, paddrStart, count, attr);
  448. }
  449. void rt_hw_mmu_ktbl_set(unsigned long tbl)
  450. {
  451. #ifdef RT_USING_SMART
  452. tbl += PV_OFFSET;
  453. __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
  454. #else
  455. __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
  456. #endif
  457. __asm__ volatile("tlbi vmalle1\n dsb sy\nisb":::"memory");
  458. __asm__ volatile("ic ialluis\n dsb sy\nisb":::"memory");
  459. }
  460. void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr)
  461. {
  462. /* set page table */
  463. for (; desc_nr > 0; desc_nr--)
  464. {
  465. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  466. mdesc->paddr_start, mdesc->attr);
  467. mdesc++;
  468. }
  469. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)MMUTable, sizeof MMUTable);
  470. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  471. }
  472. /**
  473. * This function will initialize rt_mmu_info structure.
  474. *
  475. * @param mmu_info rt_mmu_info structure
  476. * @param v_address virtual address
  477. * @param size map size
  478. * @param vtable mmu table
  479. * @param pv_off pv offset in kernel space
  480. *
  481. * @return 0 on successful and -1 for fail
  482. */
  483. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, size_t size, size_t *vtable, size_t pv_off)
  484. {
  485. size_t va_s, va_e;
  486. if (!mmu_info || !vtable)
  487. {
  488. return -1;
  489. }
  490. va_s = (size_t)v_address;
  491. va_e = (size_t)v_address + size - 1;
  492. if (va_e < va_s)
  493. {
  494. return -1;
  495. }
  496. va_s >>= ARCH_SECTION_SHIFT;
  497. va_e >>= ARCH_SECTION_SHIFT;
  498. if (va_s == 0)
  499. {
  500. return -1;
  501. }
  502. mmu_info->vtable = vtable;
  503. mmu_info->vstart = va_s;
  504. mmu_info->vend = va_e;
  505. mmu_info->pv_off = pv_off;
  506. return 0;
  507. }
  508. int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void *v_address, size_t size)
  509. {
  510. return 0;
  511. }
  512. #ifdef RT_USING_SMART
  513. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  514. {
  515. size_t loop_pages;
  516. size_t va;
  517. size_t find_va = 0;
  518. int n = 0;
  519. size_t i;
  520. if (!pages)
  521. {
  522. return 0;
  523. }
  524. if (!mmu_info)
  525. {
  526. return 0;
  527. }
  528. loop_pages = mmu_info->vend - mmu_info->vstart + 1;
  529. loop_pages <<= (ARCH_SECTION_SHIFT - ARCH_PAGE_SHIFT);
  530. va = mmu_info->vstart;
  531. va <<= ARCH_SECTION_SHIFT;
  532. for (i = 0; i < loop_pages; i++, va += ARCH_PAGE_SIZE)
  533. {
  534. if (_rt_hw_mmu_v2p(mmu_info, (void *)va))
  535. {
  536. n = 0;
  537. find_va = 0;
  538. continue;
  539. }
  540. if (!find_va)
  541. {
  542. find_va = va;
  543. }
  544. n++;
  545. if (n >= pages)
  546. {
  547. return find_va;
  548. }
  549. }
  550. return 0;
  551. }
  552. static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
  553. {
  554. size_t loop_va;
  555. if (!pages)
  556. {
  557. return -1;
  558. }
  559. if (!mmu_info)
  560. {
  561. return -1;
  562. }
  563. loop_va = ((size_t)va >> ARCH_SECTION_SHIFT);
  564. if (loop_va < mmu_info->vstart || loop_va > mmu_info->vend)
  565. {
  566. return -1;
  567. }
  568. loop_va += ((pages << ARCH_PAGE_SHIFT) >> ARCH_SECTION_SHIFT);
  569. if (loop_va < mmu_info->vstart || loop_va > mmu_info->vend + 1)
  570. {
  571. return -1;
  572. }
  573. loop_va = (size_t)va & ~ARCH_PAGE_MASK;
  574. while (pages--)
  575. {
  576. if (_rt_hw_mmu_v2p(mmu_info, (void *)loop_va))
  577. {
  578. return -1;
  579. }
  580. loop_va += ARCH_PAGE_SIZE;
  581. }
  582. return 0;
  583. }
  584. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t npages)
  585. {
  586. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  587. if (!mmu_info || !mmu_info->vtable)
  588. {
  589. return;
  590. }
  591. while (npages--)
  592. {
  593. _kenrel_unmap_4K(mmu_info->vtable, (void *)loop_va);
  594. loop_va += ARCH_PAGE_SIZE;
  595. }
  596. }
  597. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t npages, size_t attr)
  598. {
  599. int ret = -1;
  600. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  601. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  602. size_t unmap_va = loop_va;
  603. if (mmu_info)
  604. {
  605. while (npages--)
  606. {
  607. ret = _kenrel_map_4K(mmu_info->vtable, loop_va, loop_pa, attr);
  608. if (ret != 0)
  609. {
  610. /* error, undo map */
  611. while (unmap_va != loop_va)
  612. {
  613. _kenrel_unmap_4K(mmu_info->vtable, (void *)unmap_va);
  614. unmap_va += ARCH_PAGE_SIZE;
  615. }
  616. break;
  617. }
  618. loop_va += ARCH_PAGE_SIZE;
  619. loop_pa += ARCH_PAGE_SIZE;
  620. }
  621. }
  622. return ret;
  623. }
  624. #endif
  625. static void rt_hw_cpu_tlb_invalidate(void)
  626. {
  627. __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
  628. }
  629. #ifdef RT_USING_SMART
  630. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
  631. {
  632. size_t pa_s, pa_e;
  633. size_t vaddr;
  634. int pages;
  635. int ret;
  636. if (!size)
  637. {
  638. return 0;
  639. }
  640. pa_s = (size_t)p_addr;
  641. pa_e = (size_t)p_addr + size - 1;
  642. pa_s >>= ARCH_PAGE_SHIFT;
  643. pa_e >>= ARCH_PAGE_SHIFT;
  644. pages = pa_e - pa_s + 1;
  645. if (v_addr)
  646. {
  647. vaddr = (size_t)v_addr;
  648. pa_s = (size_t)p_addr;
  649. if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
  650. {
  651. return 0;
  652. }
  653. vaddr &= ~ARCH_PAGE_MASK;
  654. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  655. {
  656. return 0;
  657. }
  658. }
  659. else
  660. {
  661. vaddr = find_vaddr(mmu_info, pages);
  662. }
  663. if (vaddr)
  664. {
  665. rt_enter_critical();
  666. ret = __rt_hw_mmu_map(mmu_info, (void *)vaddr, p_addr, pages, attr);
  667. if (ret == 0)
  668. {
  669. rt_hw_cpu_tlb_invalidate();
  670. rt_exit_critical();
  671. return (void *)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
  672. }
  673. rt_exit_critical();
  674. }
  675. return 0;
  676. }
  677. #else
  678. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
  679. {
  680. return p_addr;
  681. }
  682. #endif
  683. #ifdef RT_USING_SMART
  684. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t npages, size_t attr)
  685. {
  686. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  687. size_t loop_pa;
  688. if (!mmu_info)
  689. {
  690. return -1;
  691. }
  692. while (npages--)
  693. {
  694. loop_pa = (size_t)rt_pages_alloc(0);
  695. if (!loop_pa)
  696. {
  697. goto err;
  698. }
  699. loop_pa += mmu_info->pv_off;
  700. _kenrel_map_4K(mmu_info->vtable, loop_va, loop_pa, attr);
  701. loop_va += ARCH_PAGE_SIZE;
  702. }
  703. return 0;
  704. err:
  705. {
  706. /* error, unmap and quit */
  707. int i;
  708. void *va, *pa;
  709. va = (void *)((size_t)v_addr & ~ARCH_PAGE_MASK);
  710. for (i = 0; i < npages; i++)
  711. {
  712. pa = rt_hw_mmu_v2p(mmu_info, va);
  713. pa = (void *)((char *)pa - mmu_info->pv_off);
  714. rt_pages_free(pa, 0);
  715. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  716. }
  717. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  718. return -1;
  719. }
  720. }
  721. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  722. {
  723. size_t vaddr;
  724. size_t offset;
  725. int pages;
  726. int ret;
  727. if (!size)
  728. {
  729. return 0;
  730. }
  731. offset = (size_t)v_addr & ARCH_PAGE_MASK;
  732. size += (offset + ARCH_PAGE_SIZE - 1);
  733. pages = (size >> ARCH_PAGE_SHIFT);
  734. if (v_addr)
  735. {
  736. vaddr = (size_t)v_addr;
  737. vaddr &= ~ARCH_PAGE_MASK;
  738. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  739. {
  740. return 0;
  741. }
  742. }
  743. else
  744. {
  745. vaddr = find_vaddr(mmu_info, pages);
  746. }
  747. if (vaddr)
  748. {
  749. rt_enter_critical();
  750. ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
  751. if (ret == 0)
  752. {
  753. rt_hw_cpu_tlb_invalidate();
  754. rt_exit_critical();
  755. return (void *)((char *)vaddr + offset);
  756. }
  757. rt_exit_critical();
  758. }
  759. return 0;
  760. }
  761. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
  762. {
  763. size_t va_s, va_e;
  764. int pages;
  765. va_s = (size_t)v_addr;
  766. va_e = (size_t)v_addr + size - 1;
  767. va_s >>= ARCH_PAGE_SHIFT;
  768. va_e >>= ARCH_PAGE_SHIFT;
  769. pages = va_e - va_s + 1;
  770. rt_enter_critical();
  771. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  772. rt_hw_cpu_tlb_invalidate();
  773. rt_exit_critical();
  774. }
  775. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
  776. {
  777. void *ret;
  778. rt_mm_lock();
  779. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  780. rt_mm_unlock();
  781. return ret;
  782. }
  783. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
  784. {
  785. void *ret;
  786. rt_mm_lock();
  787. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  788. rt_mm_unlock();
  789. return ret;
  790. }
  791. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
  792. {
  793. rt_mm_lock();
  794. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  795. rt_mm_unlock();
  796. }
  797. #else
  798. #include <cache.h>
  799. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_size_t desc_nr)
  800. {
  801. rt_memset((void *)MMUTable, 0, sizeof(MMUTable));
  802. rt_memset((void *)MMUPage, 0, sizeof(MMUPage));
  803. /* set page table */
  804. for (; desc_nr > 0; --desc_nr)
  805. {
  806. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end, mdesc->paddr_start, mdesc->attr);
  807. ++mdesc;
  808. }
  809. rt_hw_cpu_dcache_clean((void *)MMUTable, sizeof(MMUTable));
  810. }
  811. void rt_hw_mmu_init(void)
  812. {
  813. unsigned long reg_val;
  814. reg_val = 0x00447fUL;
  815. __asm__ volatile("msr mair_el1, %0"::"r"(reg_val));
  816. rt_hw_isb();
  817. reg_val = (16UL << 0) /* t0sz 48bit */
  818. | (0UL << 6) /* reserved */
  819. | (0UL << 7) /* epd0 */
  820. | (3UL << 8) /* t0 wb cacheable */
  821. | (3UL << 10) /* inner shareable */
  822. | (2UL << 12) /* t0 outer shareable */
  823. | (0UL << 14) /* t0 4K */
  824. | (16UL << 16) /* t1sz 48bit */
  825. | (0UL << 22) /* define asid use ttbr0.asid */
  826. | (0UL << 23) /* epd1 */
  827. | (3UL << 24) /* t1 inner wb cacheable */
  828. | (3UL << 26) /* t1 outer wb cacheable */
  829. | (2UL << 28) /* t1 outer shareable */
  830. | (2UL << 30) /* t1 4k */
  831. | (1UL << 32) /* 001b 64GB PA */
  832. | (0UL << 35) /* reserved */
  833. | (1UL << 36) /* as: 0:8bit 1:16bit */
  834. | (0UL << 37) /* tbi0 */
  835. | (0UL << 38); /* tbi1 */
  836. __asm__ volatile("msr tcr_el1, %0"::"r"(reg_val));
  837. rt_hw_isb();
  838. __asm__ volatile ("mrs %0, sctlr_el1":"=r"(reg_val));
  839. reg_val |= 1 << 2; /* enable dcache */
  840. reg_val |= 1 << 0; /* enable mmu */
  841. __asm__ volatile (
  842. "msr ttbr0_el1, %0\n\r"
  843. "msr sctlr_el1, %1\n\r"
  844. "dsb sy\n\r"
  845. "isb sy\n\r"
  846. ::"r"(MMUTable), "r"(reg_val) :"memory");
  847. rt_hw_cpu_tlb_invalidate();
  848. }
  849. #endif
  850. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  851. {
  852. int level;
  853. unsigned long va = (unsigned long)v_addr;
  854. unsigned long pa;
  855. unsigned long *cur_lv_tbl;
  856. unsigned long page;
  857. unsigned long off;
  858. unsigned long off_addr;
  859. int level_shift = MMU_ADDRESS_BITS;
  860. if (!mmu_info)
  861. {
  862. return (void *)0;
  863. }
  864. cur_lv_tbl = mmu_info->vtable;
  865. for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
  866. {
  867. off = (va >> level_shift);
  868. off &= MMU_LEVEL_MASK;
  869. if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
  870. {
  871. return (void *)0;
  872. }
  873. page = cur_lv_tbl[off];
  874. if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
  875. {
  876. off_addr = va & ((1UL << level_shift) - 1);
  877. pa = (page & MMU_ADDRESS_MASK);
  878. pa += off_addr;
  879. return (void *)pa;
  880. }
  881. cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
  882. cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
  883. level_shift -= MMU_LEVEL_SHIFT;
  884. }
  885. /* now is level MMU_TBL_PAGE_4k_LEVEL */
  886. off = (va >> ARCH_PAGE_SHIFT);
  887. off &= MMU_LEVEL_MASK;
  888. page = cur_lv_tbl[off];
  889. if (!(page & MMU_TYPE_USED))
  890. {
  891. return (void *)0;
  892. }
  893. pa = (page & MMU_ADDRESS_MASK);
  894. pa += (va & ARCH_PAGE_MASK);
  895. return (void *)pa;
  896. }
  897. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  898. {
  899. void *ret;
  900. rt_mm_lock();
  901. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  902. rt_mm_unlock();
  903. return ret;
  904. }
  905. void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
  906. {
  907. int ret;
  908. unsigned long va = KERNEL_VADDR_START;
  909. unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
  910. unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
  911. /* clean the first two pages */
  912. mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
  913. mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
  914. ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
  915. if (ret != 0)
  916. {
  917. while (1);
  918. }
  919. ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
  920. if (ret != 0)
  921. {
  922. while (1);
  923. }
  924. }