mmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-01-10 bernard porting to AM1808
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "hw.mmu"
  13. #define DBG_LVL DBG_LOG
  14. #include <rtdbg.h>
  15. #include <board.h>
  16. #include "cp15.h"
  17. #include "mm_page.h"
  18. #include "mmu.h"
  19. #include <mm_aspace.h>
  20. #include <tlb.h>
  21. #ifdef RT_USING_SMART
  22. #include <lwp_mm.h>
  23. #include <lwp_arch.h>
  24. #include "ioremap.h"
  25. #else
  26. #define KERNEL_VADDR_START 0
  27. #endif
  28. /* level1 page table, each entry for 1MB memory. */
  29. volatile unsigned long MMUTable[4 * 1024] __attribute__((aligned(16 * 1024)));
  30. unsigned long rt_hw_set_domain_register(unsigned long domain_val)
  31. {
  32. unsigned long old_domain;
  33. asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
  34. asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
  35. return old_domain;
  36. }
  37. void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
  38. rt_uint32_t paddrStart, rt_uint32_t attr)
  39. {
  40. volatile rt_uint32_t *pTT;
  41. volatile int i, nSec;
  42. pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
  43. nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
  44. for(i = 0; i <= nSec; i++)
  45. {
  46. *pTT = attr | (((paddrStart >> 20) + i) << 20);
  47. pTT++;
  48. }
  49. }
  50. void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
  51. {
  52. void *vaddr;
  53. size_t length;
  54. /* init kernel space */
  55. #ifdef RT_USING_SMART
  56. rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, -USER_VADDR_TOP, (void *)MMUTable);
  57. #else
  58. rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, (void *)MMUTable);
  59. #endif /* RT_USING_SMART */
  60. /* set page table */
  61. for(; size > 0; size--)
  62. {
  63. if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
  64. mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
  65. vaddr = (void *)mdesc->vaddr_start;
  66. length = mdesc->vaddr_end - mdesc->vaddr_start;
  67. rt_aspace_map_static(&rt_kernel_space, &mdesc->varea, &vaddr, length,
  68. mdesc->attr, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0);
  69. rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
  70. mdesc->paddr_start, mdesc->attr);
  71. mdesc++;
  72. }
  73. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void*)MMUTable, sizeof MMUTable);
  74. }
  75. void rt_hw_mmu_init(void)
  76. {
  77. rt_cpu_dcache_clean_flush();
  78. rt_cpu_icache_flush();
  79. rt_hw_cpu_dcache_disable();
  80. rt_hw_cpu_icache_disable();
  81. rt_cpu_mmu_disable();
  82. /*rt_hw_cpu_dump_page_table(MMUTable);*/
  83. rt_hw_set_domain_register(0x55555555);
  84. rt_cpu_tlb_set(MMUTable);
  85. rt_cpu_mmu_enable();
  86. rt_hw_cpu_icache_enable();
  87. rt_hw_cpu_dcache_enable();
  88. }
  89. int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, size_t *vtable, size_t pv_off)
  90. {
  91. size_t l1_off, va_s, va_e;
  92. if (!aspace || !vtable)
  93. {
  94. return -1;
  95. }
  96. va_s = (size_t)v_address;
  97. va_e = (size_t)v_address + size - 1;
  98. if ( va_e < va_s)
  99. {
  100. return -1;
  101. }
  102. va_s >>= ARCH_SECTION_SHIFT;
  103. va_e >>= ARCH_SECTION_SHIFT;
  104. if (va_s == 0)
  105. {
  106. return -1;
  107. }
  108. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  109. {
  110. size_t v = vtable[l1_off];
  111. if (v & ARCH_MMU_USED_MASK)
  112. {
  113. return -1;
  114. }
  115. }
  116. #ifdef RT_USING_SMART
  117. rt_ioremap_start = v_address;
  118. rt_ioremap_size = size;
  119. rt_mpr_start = rt_ioremap_start - rt_mpr_size;
  120. #else
  121. rt_mpr_start = (void *)0 - rt_mpr_size;
  122. #endif
  123. return 0;
  124. }
  125. int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
  126. {
  127. #ifdef RT_IOREMAP_LATE
  128. size_t loop_va;
  129. size_t l1_off;
  130. size_t *mmu_l1, *mmu_l2;
  131. size_t sections;
  132. /* for kernel ioremap */
  133. if ((size_t)v_address < KERNEL_VADDR_START)
  134. {
  135. return -1;
  136. }
  137. /* must align to section */
  138. if ((size_t)v_address & ARCH_SECTION_MASK)
  139. {
  140. return -1;
  141. }
  142. /* must align to section */
  143. if (size & ARCH_SECTION_MASK)
  144. {
  145. return -1;
  146. }
  147. loop_va = (size_t)v_address;
  148. sections = (size >> ARCH_SECTION_SHIFT);
  149. while (sections--)
  150. {
  151. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  152. mmu_l1 = (size_t*)aspace->page_table + l1_off;
  153. RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
  154. mmu_l2 = (size_t*)rt_pages_alloc(0);
  155. if (mmu_l2)
  156. {
  157. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  158. /* cache maintain */
  159. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
  160. *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
  161. /* cache maintain */
  162. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  163. }
  164. else
  165. {
  166. /* error */
  167. return -1;
  168. }
  169. loop_va += ARCH_SECTION_SIZE;
  170. }
  171. #endif
  172. return 0;
  173. }
  174. static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
  175. {
  176. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  177. size_t l1_off, l2_off;
  178. size_t *mmu_l1, *mmu_l2;
  179. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  180. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  181. mmu_l1 = (size_t *)lv0_tbl + l1_off;
  182. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  183. {
  184. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  185. }
  186. else
  187. {
  188. return;
  189. }
  190. if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
  191. {
  192. *(mmu_l2 + l2_off) = 0;
  193. /* cache maintain */
  194. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
  195. if (rt_pages_free(mmu_l2, 0))
  196. {
  197. *mmu_l1 = 0;
  198. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  199. }
  200. }
  201. loop_va += ARCH_PAGE_SIZE;
  202. }
  203. static int _kenrel_map_4K(unsigned long *lv0_tbl, void *v_addr, void *p_addr,
  204. size_t attr)
  205. {
  206. size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
  207. size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
  208. size_t l1_off, l2_off;
  209. size_t *mmu_l1, *mmu_l2;
  210. l1_off = (loop_va >> ARCH_SECTION_SHIFT);
  211. l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  212. mmu_l1 = (size_t *)lv0_tbl + l1_off;
  213. if (*mmu_l1 & ARCH_MMU_USED_MASK)
  214. {
  215. mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  216. rt_page_ref_inc(mmu_l2, 0);
  217. }
  218. else
  219. {
  220. mmu_l2 = (size_t *)rt_pages_alloc(0);
  221. if (mmu_l2)
  222. {
  223. rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
  224. /* cache maintain */
  225. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
  226. *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
  227. /* cache maintain */
  228. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
  229. }
  230. else
  231. {
  232. /* error, quit */
  233. return -1;
  234. }
  235. }
  236. *(mmu_l2 + l2_off) = (loop_pa | attr);
  237. /* cache maintain */
  238. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
  239. loop_va += ARCH_PAGE_SIZE;
  240. loop_pa += ARCH_PAGE_SIZE;
  241. return 0;
  242. }
  243. void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
  244. size_t attr)
  245. {
  246. int ret = -1;
  247. void *unmap_va = v_addr;
  248. size_t npages = size >> ARCH_PAGE_SHIFT;
  249. // TODO trying with HUGEPAGE here
  250. while (npages--)
  251. {
  252. ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
  253. if (ret != 0)
  254. {
  255. /* error, undo map */
  256. while (unmap_va != v_addr)
  257. {
  258. rt_enter_critical();
  259. _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
  260. rt_exit_critical();
  261. unmap_va += ARCH_PAGE_SIZE;
  262. }
  263. break;
  264. }
  265. v_addr += ARCH_PAGE_SIZE;
  266. p_addr += ARCH_PAGE_SIZE;
  267. }
  268. if (ret == 0)
  269. {
  270. return v_addr;
  271. }
  272. return NULL;
  273. }
  274. void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
  275. {
  276. // caller guarantee that v_addr & size are page aligned
  277. size_t npages = size >> ARCH_PAGE_SHIFT;
  278. if (!aspace->page_table)
  279. {
  280. return;
  281. }
  282. while (npages--)
  283. {
  284. rt_enter_critical();
  285. _kenrel_unmap_4K(aspace->page_table, v_addr);
  286. rt_exit_critical();
  287. v_addr += ARCH_PAGE_SIZE;
  288. }
  289. }
  290. void rt_hw_aspace_switch(rt_aspace_t aspace)
  291. {
  292. if (aspace != &rt_kernel_space)
  293. {
  294. void *pgtbl = aspace->page_table;
  295. pgtbl = rt_kmem_v2p(pgtbl);
  296. rt_hw_mmu_switch(pgtbl);
  297. rt_hw_tlb_invalidate_all_local();
  298. }
  299. }
  300. void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
  301. {
  302. unsigned int va;
  303. for (va = 0; va < 0x1000; va++)
  304. {
  305. unsigned int vaddr = (va << 20);
  306. if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size)
  307. {
  308. mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
  309. }
  310. else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size)
  311. {
  312. mtbl[va] = (va << 20) | NORMAL_MEM;
  313. }
  314. else
  315. {
  316. mtbl[va] = 0;
  317. }
  318. }
  319. }
  320. void *rt_hw_mmu_v2p(rt_aspace_t aspace, void* v_addr)
  321. {
  322. size_t l1_off, l2_off;
  323. size_t *mmu_l1, *mmu_l2;
  324. size_t tmp;
  325. size_t pa;
  326. l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
  327. RT_ASSERT(aspace);
  328. mmu_l1 = (size_t*)aspace->page_table + l1_off;
  329. tmp = *mmu_l1;
  330. switch (tmp & ARCH_MMU_USED_MASK)
  331. {
  332. case 0: /* not used */
  333. break;
  334. case 1: /* page table */
  335. mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
  336. l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
  337. pa = *(mmu_l2 + l2_off);
  338. if (pa & ARCH_MMU_USED_MASK)
  339. {
  340. if ((pa & ARCH_MMU_USED_MASK) == 1)
  341. {
  342. /* large page, not support */
  343. break;
  344. }
  345. pa &= ~(ARCH_PAGE_MASK);
  346. pa += ((size_t)v_addr & ARCH_PAGE_MASK);
  347. return (void*)pa;
  348. }
  349. break;
  350. case 2:
  351. case 3:
  352. /* section */
  353. if (tmp & ARCH_TYPE_SUPERSECTION)
  354. {
  355. /* super section, not support */
  356. break;
  357. }
  358. pa = (tmp & ~ARCH_SECTION_MASK);
  359. pa += ((size_t)v_addr & ARCH_SECTION_MASK);
  360. return (void*)pa;
  361. }
  362. return ARCH_MAP_FAILED;
  363. }
  364. int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  365. enum rt_mmu_cntl cmd)
  366. {
  367. return -RT_ENOSYS;
  368. }