mmu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. * 2023-10-10 Shell Add permission control API
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #define DBG_TAG "hw.mmu"
  14. #define DBG_LVL DBG_LOG
  15. #include <rtdbg.h>
  16. #include <board.h>
  17. #include "cp15.h"
  18. #include "mm_page.h"
  19. #include "mmu.h"
  20. #include <mm_aspace.h>
  21. #include <tlb.h>
  22. #ifdef RT_USING_SMART
  23. #include <lwp_mm.h>
  24. #include <lwp_arch.h>
  25. #include "ioremap.h"
  26. #else
  27. #define KERNEL_VADDR_START 0
  28. #endif
  29. /* level1 page table, each entry for 1MB memory. */
  30. volatile unsigned long MMUTable[4 * 1024] __attribute__((aligned(16 * 1024)));
  31. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  32. {
  33. unsigned long old_domain;
  34. asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  35. asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  36. return old_domain;
  37. }
  38. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
  39. rt_uint32_t paddrStart, rt_uint32_t attr)
  40. {
  41. volatile rt_uint32_t *pTT;
  42. volatile int i, nSec;
  43. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  44. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  45. for(i = 0; i <= nSec; i++)
  46. {
  47. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  48. pTT++;
  49. }
  50. }
  51. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
  52. {
  53. unsigned int va;
  54. for (va = 0; va < 0x1000; va++)
  55. {
  56. unsigned int vaddr = (va << 20);
  57. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size)
  58. {
  59. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  60. }
  61. else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size)
  62. {
  63. mtbl[va] = (va << 20) | NORMAL_MEM;
  64. }
  65. else
  66. {
  67. mtbl[va] = 0;
  68. }
  69. }
  70. }
  71. #ifndef RT_USING_SMART
  72. static void _init_map_section(rt_uint32_t *mmu_table, rt_uint32_t va,
  73. rt_uint32_t size,rt_uint32_t pa, rt_uint32_t attr)
  74. {
  75. volatile rt_uint32_t *ptt;
  76. volatile int i, num_section;
  77. ptt = (rt_uint32_t *)mmu_table + (va >> ARCH_SECTION_SHIFT);
  78. num_section = size >> ARCH_SECTION_SHIFT;
  79. for(i = 0; i <= num_section; i++)
  80. {
  81. *ptt = attr | (((pa >> ARCH_SECTION_SHIFT) + i) << ARCH_SECTION_SHIFT);
  82. ptt++;
  83. }
  84. }
  85. #endif
  86. void rt_hw_mem_setup_early(rt_uint32_t *early_mmu_talbe,
  87. rt_uint32_t pv_off)
  88. {
  89. rt_uint32_t size = 0;
  90. size = 0x100000 + (rt_uint32_t)&__bss_end;
  91. size &= ~(0x100000 - 1);
  92. #ifdef RT_USING_SMART
  93. size -= KERNEL_VADDR_START;
  94. init_mm_setup(early_mmu_talbe, size, pv_off);
  95. #else
  96. rt_uint32_t normal_attr = NORMAL_MEM;
  97. extern unsigned char _reset;
  98. rt_uint32_t va = (rt_uint32_t) &_reset;
  99. /* The starting virtual address is aligned along 0x1000000. */
  100. va &= (0x1000000 - 1);
  101. size -= va;
  102. _init_map_section(early_mmu_talbe, va, size, va + pv_off, normal_attr);
  103. #endif
  104. }
  105. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  106. {
  107. void *vaddr;
  108. size_t length;
  109. /* init kernel space */
  110. #ifdef RT_USING_SMART
  111. rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, -USER_VADDR_TOP, (void *)MMUTable);
  112. #else
  113. rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, (void *)MMUTable);
  114. #endif /* RT_USING_SMART */
  115. /* set page table */
  116. for(; size > 0; size--)
  117. {
  118. if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
  119. mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
  120. vaddr = (void *)mdesc->vaddr_start;
  121. length = mdesc->vaddr_end - mdesc->vaddr_start;
  122. rt_aspace_map_static(&rt_kernel_space, &mdesc->varea, &vaddr, length,
  123. mdesc->attr, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0);
  124. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  125. mdesc->paddr_start, mdesc->attr);
  126. mdesc++;
  127. }
  128. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void*)MMUTable, sizeof MMUTable);
  129. }
  130. void rt_hw_mmu_init(void)
  131. {
  132. rt_cpu_dcache_clean_flush();
  133. rt_cpu_icache_flush();
  134. rt_hw_cpu_dcache_disable();
  135. rt_hw_cpu_icache_disable();
  136. rt_cpu_mmu_disable();
  137. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  138. rt_hw_set_domain_register(0x55555555);
  139. rt_cpu_tlb_set(MMUTable);
  140. rt_cpu_mmu_enable();
  141. rt_hw_cpu_icache_enable();
  142. rt_hw_cpu_dcache_enable();
  143. }
  144. int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  145. {
  146. size_t l1_off, va_s, va_e;
  147. if (!aspace || !vtable)
  148. {
  149. return -1;
  150. }
  151. va_s = (size_t)v_address;
  152. va_e = (size_t)v_address + size - 1;
  153. if ( va_e < va_s)
  154. {
  155. return -1;
  156. }
  157. va_s >>= ARCH_SECTION_SHIFT;
  158. va_e >>= ARCH_SECTION_SHIFT;
  159. if (va_s == 0)
  160. {
  161. return -1;
  162. }
  163. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  164. {
  165. size_t v = vtable[l1_off];
  166. if (v & ARCH_MMU_USED_MASK)
  167. {
  168. return -1;
  169. }
  170. }
  171. #ifdef RT_USING_SMART
  172. rt_ioremap_start = v_address;
  173. rt_ioremap_size = size;
  174. rt_mpr_start = rt_ioremap_start - rt_mpr_size;
  175. #else
  176. rt_mpr_start = (void *)((rt_size_t)0 - rt_mpr_size);
  177. #endif
  178. return 0;
  179. }
  180. int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
  181. {
  182. #ifdef RT_IOREMAP_LATE
  183. size_t loop_va;
  184. size_t l1_off;
  185. size_t *mmu_l1, *mmu_l2;
  186. size_t sections;
  187. /* for kernel ioremap */
  188. if ((size_t)v_address < KERNEL_VADDR_START)
  189. {
  190. return -1;
  191. }
  192. /* must align to section */
  193. if ((size_t)v_address & ARCH_SECTION_MASK)
  194. {
  195. return -1;
  196. }
  197. /* must align to section */
  198. if (size & ARCH_SECTION_MASK)
  199. {
  200. return -1;
  201. }
  202. loop_va = (size_t)v_address;
  203. sections = (size >> ARCH_SECTION_SHIFT);
  204. while (sections--)
  205. {
  206. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  207. mmu_l1 = (size_t*)aspace->page_table + l1_off;
  208. RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
  209. mmu_l2 = (size_t*)rt_pages_alloc(0);
  210. if (mmu_l2)
  211. {
  212. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  213. /* cache maintain */
  214. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
  215. *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
  216. /* cache maintain */
  217. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  218. }
  219. else
  220. {
  221. /* error */
  222. return -1;
  223. }
  224. loop_va += ARCH_SECTION_SIZE;
  225. }
  226. #endif
  227. return 0;
  228. }
  229. static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
  230. {
  231. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  232. size_t l1_off, l2_off;
  233. size_t *mmu_l1, *mmu_l2;
  234. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  235. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  236. mmu_l1 = (size_t *)lv0_tbl + l1_off;
  237. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  238. {
  239. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  240. }
  241. else
  242. {
  243. return;
  244. }
  245. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  246. {
  247. *(mmu_l2 + l2_off) = 0;
  248. /* cache maintain */
  249. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
  250. if (rt_pages_free(mmu_l2, 0))
  251. {
  252. *mmu_l1 = 0;
  253. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  254. }
  255. }
  256. loop_va += ARCH_PAGE_SIZE;
  257. }
  258. static int _kenrel_map_4K(unsigned long *lv0_tbl, void *v_addr, void *p_addr,
  259. size_t attr)
  260. {
  261. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  262. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  263. size_t l1_off, l2_off;
  264. size_t *mmu_l1, *mmu_l2;
  265. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  266. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  267. mmu_l1 = (size_t *)lv0_tbl + l1_off;
  268. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  269. {
  270. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  271. rt_page_ref_inc(mmu_l2, 0);
  272. }
  273. else
  274. {
  275. mmu_l2 = (size_t *)rt_pages_alloc(0);
  276. if (mmu_l2)
  277. {
  278. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  279. /* cache maintain */
  280. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
  281. *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
  282. /* cache maintain */
  283. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  284. }
  285. else
  286. {
  287. /* error, quit */
  288. return -1;
  289. }
  290. }
  291. *(mmu_l2 + l2_off) = (loop_pa | attr);
  292. /* cache maintain */
  293. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
  294. loop_va += ARCH_PAGE_SIZE;
  295. loop_pa += ARCH_PAGE_SIZE;
  296. return 0;
  297. }
  298. void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
  299. size_t attr)
  300. {
  301. int ret = -1;
  302. void *unmap_va = v_addr;
  303. size_t npages = size >> ARCH_PAGE_SHIFT;
  304. // TODO trying with HUGEPAGE here
  305. while (npages--)
  306. {
  307. ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
  308. if (ret != 0)
  309. {
  310. /* error, undo map */
  311. while (unmap_va != v_addr)
  312. {
  313. rt_enter_critical();
  314. _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
  315. rt_exit_critical();
  316. unmap_va += ARCH_PAGE_SIZE;
  317. }
  318. break;
  319. }
  320. v_addr += ARCH_PAGE_SIZE;
  321. p_addr += ARCH_PAGE_SIZE;
  322. }
  323. if (ret == 0)
  324. {
  325. return v_addr;
  326. }
  327. return NULL;
  328. }
  329. void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
  330. {
  331. // caller guarantee that v_addr & size are page aligned
  332. size_t npages = size >> ARCH_PAGE_SHIFT;
  333. if (!aspace->page_table)
  334. {
  335. return;
  336. }
  337. while (npages--)
  338. {
  339. rt_enter_critical();
  340. _kenrel_unmap_4K(aspace->page_table, v_addr);
  341. rt_exit_critical();
  342. v_addr += ARCH_PAGE_SIZE;
  343. }
  344. }
  345. void rt_hw_aspace_switch(rt_aspace_t aspace)
  346. {
  347. if (aspace != &rt_kernel_space)
  348. {
  349. void *pgtbl = aspace->page_table;
  350. pgtbl = rt_kmem_v2p(pgtbl);
  351. rt_hw_mmu_switch(pgtbl);
  352. rt_hw_tlb_invalidate_all_local();
  353. }
  354. }
  355. void *rt_hw_mmu_v2p(rt_aspace_t aspace, void* v_addr)
  356. {
  357. size_t l1_off, l2_off;
  358. size_t *mmu_l1, *mmu_l2;
  359. size_t tmp;
  360. size_t pa;
  361. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  362. RT_ASSERT(aspace);
  363. mmu_l1 = (size_t*)aspace->page_table + l1_off;
  364. tmp = *mmu_l1;
  365. switch (tmp & ARCH_MMU_USED_MASK)
  366. {
  367. case 0: /* not used */
  368. break;
  369. case 1: /* page table */
  370. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  371. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  372. pa = *(mmu_l2 + l2_off);
  373. if (pa & ARCH_MMU_USED_MASK)
  374. {
  375. if ((pa & ARCH_MMU_USED_MASK) == 1)
  376. {
  377. /* large page, not support */
  378. break;
  379. }
  380. pa &= ~(ARCH_PAGE_MASK);
  381. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  382. return (void*)pa;
  383. }
  384. break;
  385. case 2:
  386. case 3:
  387. /* section */
  388. if (tmp & ARCH_TYPE_SUPERSECTION)
  389. {
  390. /* super section, not support */
  391. break;
  392. }
  393. pa = (tmp & ~ARCH_SECTION_MASK);
  394. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  395. return (void*)pa;
  396. }
  397. return ARCH_MAP_FAILED;
  398. }
  399. int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  400. enum rt_mmu_cntl cmd)
  401. {
  402. return -RT_ENOSYS;
  403. }
  404. #define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
  405. void *rt_hw_mmu_pgtbl_create(void)
  406. {
  407. size_t *mmu_table;
  408. mmu_table = (size_t *)rt_pages_alloc_ext(2, PAGE_ANY_AVAILABLE);
  409. if (!mmu_table)
  410. {
  411. return RT_NULL;
  412. }
  413. rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
  414. rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
  415. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
  416. return mmu_table;
  417. }
  418. void rt_hw_mmu_pgtbl_delete(void *pgtbl)
  419. {
  420. rt_pages_free(pgtbl, 2);
  421. }