|
|
@@ -14,7 +14,7 @@
|
|
|
|
|
|
#include "mmu.h"
|
|
|
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
+#ifdef RT_USING_LWP
|
|
|
#include <page.h>
|
|
|
#endif
|
|
|
|
|
|
@@ -34,20 +34,24 @@
|
|
|
#define MMU_TBL_PAGE_4k_LEVEL 3
|
|
|
#define MMU_TBL_LEVEL_NR 4
|
|
|
|
|
|
-#define MMU_TBL_PAGE_NR_MAX 32
|
|
|
-
|
|
|
void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
|
|
|
|
|
|
struct page_table
|
|
|
{
|
|
|
- unsigned long entry[512];
|
|
|
+ unsigned long page[512];
|
|
|
};
|
|
|
|
|
|
-/* only map 4G io/memory */
|
|
|
-volatile unsigned long MMUTable[512] __attribute__((aligned(4096)));
|
|
|
+#ifndef RT_USING_LWP
|
|
|
+#define MMU_TBL_PAGE_NR_MAX 32
|
|
|
+
|
|
|
+#undef PV_OFFSET
|
|
|
+#define PV_OFFSET 0
|
|
|
+
|
|
|
static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096)));
|
|
|
|
|
|
-static unsigned long _kernel_free_page(void)
|
|
|
+#define rt_page_ref_inc(...)
|
|
|
+
|
|
|
+unsigned long rt_pages_alloc(rt_size_t size_bits)
|
|
|
{
|
|
|
static unsigned long i = 0;
|
|
|
|
|
|
@@ -58,10 +62,26 @@ static unsigned long _kernel_free_page(void)
|
|
|
|
|
|
++i;
|
|
|
|
|
|
- return (unsigned long)&MMUPage[i - 1].entry;
|
|
|
+ return (unsigned long)&MMUPage[i - 1].page;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
-static void mmu_memset(char *dst, char v, size_t len)
|
|
|
+static struct page_table *__init_page_array;
|
|
|
+static unsigned long __page_off = 0UL;
|
|
|
+unsigned long get_free_page(void)
|
|
|
+{
|
|
|
+ if (!__init_page_array)
|
|
|
+ {
|
|
|
+ unsigned long temp_page_start;
|
|
|
+ asm volatile("mov %0, sp":"=r"(temp_page_start));
|
|
|
+ __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
|
|
|
+ __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
|
|
|
+ }
|
|
|
+ __page_off++;
|
|
|
+ return (unsigned long)(__init_page_array[__page_off - 1].page);
|
|
|
+}
|
|
|
+
|
|
|
+void mmu_memset(char *dst, char v, size_t len)
|
|
|
{
|
|
|
while (len--)
|
|
|
{
|
|
|
@@ -69,8 +89,7 @@ static void mmu_memset(char *dst, char v, size_t len)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#ifdef RT_USING_LWP
|
|
|
-static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
+static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
{
|
|
|
int level;
|
|
|
unsigned long *cur_lv_tbl = lv0_tbl;
|
|
|
@@ -92,23 +111,13 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon
|
|
|
off &= MMU_LEVEL_MASK;
|
|
|
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
|
|
{
|
|
|
- page = (unsigned long)rt_pages_alloc(0);
|
|
|
+ page = get_free_page();
|
|
|
if (!page)
|
|
|
{
|
|
|
return MMU_MAP_ERROR_NOPAGE;
|
|
|
}
|
|
|
- rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
|
|
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
|
|
|
- cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
|
|
|
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
|
|
- }
|
|
|
- else
|
|
|
- {
|
|
|
- page = cur_lv_tbl[off];
|
|
|
- page &= MMU_ADDRESS_MASK;
|
|
|
- /* page to va */
|
|
|
- page -= PV_OFFSET;
|
|
|
- rt_page_ref_inc((void *)page, 0);
|
|
|
+ mmu_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
|
|
+ cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
|
|
|
}
|
|
|
page = cur_lv_tbl[off];
|
|
|
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
|
|
@@ -117,7 +126,6 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon
|
|
|
return MMU_MAP_ERROR_CONFLICT;
|
|
|
}
|
|
|
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
|
|
- cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
|
|
level_shift -= MMU_LEVEL_SHIFT;
|
|
|
}
|
|
|
attr &= MMU_ATTRIB_MASK;
|
|
|
@@ -125,15 +133,39 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon
|
|
|
off = (va >> ARCH_SECTION_SHIFT);
|
|
|
off &= MMU_LEVEL_MASK;
|
|
|
cur_lv_tbl[off] = pa;
|
|
|
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
+int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
|
|
|
+{
|
|
|
+ unsigned long i;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (va & ARCH_SECTION_MASK)
|
|
|
+ {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ if (pa & ARCH_SECTION_MASK)
|
|
|
+ {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ for (i = 0; i < count; i++)
|
|
|
+ {
|
|
|
+ ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
|
|
|
+ va += ARCH_SECTION_SIZE;
|
|
|
+ pa += ARCH_SECTION_SIZE;
|
|
|
+ if (ret != 0)
|
|
|
+ {
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
-#else
|
|
|
-static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
+
|
|
|
+static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
{
|
|
|
int level;
|
|
|
- unsigned long *cur_lv_tbl = tbl;
|
|
|
+ unsigned long *cur_lv_tbl = lv0_tbl;
|
|
|
unsigned long page;
|
|
|
unsigned long off;
|
|
|
int level_shift = MMU_ADDRESS_BITS;
|
|
|
@@ -146,46 +178,42 @@ static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa
|
|
|
{
|
|
|
return MMU_MAP_ERROR_PANOTALIGN;
|
|
|
}
|
|
|
-
|
|
|
- for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; ++level)
|
|
|
+ for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
|
|
|
{
|
|
|
off = (va >> level_shift);
|
|
|
off &= MMU_LEVEL_MASK;
|
|
|
-
|
|
|
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
|
|
{
|
|
|
- page = _kernel_free_page();
|
|
|
-
|
|
|
+ page = (unsigned long)rt_pages_alloc(0);
|
|
|
if (!page)
|
|
|
{
|
|
|
return MMU_MAP_ERROR_NOPAGE;
|
|
|
}
|
|
|
-
|
|
|
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
|
|
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
|
|
|
- cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
|
|
|
+ cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
|
|
|
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
page = cur_lv_tbl[off];
|
|
|
page &= MMU_ADDRESS_MASK;
|
|
|
+ /* page to va */
|
|
|
+ page -= PV_OFFSET;
|
|
|
+ rt_page_ref_inc((void *)page, 0);
|
|
|
}
|
|
|
-
|
|
|
page = cur_lv_tbl[off];
|
|
|
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
|
|
{
|
|
|
/* is block! error! */
|
|
|
return MMU_MAP_ERROR_CONFLICT;
|
|
|
}
|
|
|
-
|
|
|
- /* next level */
|
|
|
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
|
|
+ cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
|
|
|
level_shift -= MMU_LEVEL_SHIFT;
|
|
|
}
|
|
|
-
|
|
|
attr &= MMU_ATTRIB_MASK;
|
|
|
- pa |= (attr | MMU_TYPE_BLOCK);
|
|
|
+ pa |= (attr | MMU_TYPE_BLOCK); /* block */
|
|
|
off = (va >> ARCH_SECTION_SHIFT);
|
|
|
off &= MMU_LEVEL_MASK;
|
|
|
cur_lv_tbl[off] = pa;
|
|
|
@@ -193,7 +221,6 @@ static int _kenrel_map_2M(unsigned long *tbl, unsigned long va, unsigned long pa
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
struct mmu_level_info
|
|
|
{
|
|
|
@@ -329,17 +356,6 @@ err:
|
|
|
_kenrel_unmap_4K(lv0_tbl, (void *)va);
|
|
|
return ret;
|
|
|
}
|
|
|
-#else
|
|
|
-static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
|
|
|
-{
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
-{
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
#endif
|
|
|
|
|
|
int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
|
|
|
@@ -450,7 +466,7 @@ void rt_hw_mmu_setmtt(unsigned long vaddrStart,
|
|
|
|
|
|
void kernel_mmu_switch(unsigned long tbl)
|
|
|
{
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
+#ifdef RT_USING_LWP
|
|
|
tbl += PV_OFFSET;
|
|
|
__asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
|
|
|
#else
|
|
|
@@ -570,7 +586,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
+#ifdef RT_USING_LWP
|
|
|
static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
|
|
|
{
|
|
|
size_t loop_va;
|
|
|
@@ -607,7 +623,6 @@ static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t npages)
|
|
|
{
|
|
|
@@ -653,13 +668,14 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, si
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
static void rt_hw_cpu_tlb_invalidate(void)
|
|
|
{
|
|
|
__asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
|
|
|
}
|
|
|
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
+#ifdef RT_USING_LWP
|
|
|
void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
|
|
|
{
|
|
|
size_t pa_s, pa_e;
|
|
|
@@ -720,7 +736,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
|
|
|
pages = pa_e - pa_s + 1;
|
|
|
vaddr = find_vaddr(mmu_info, pages);
|
|
|
if (vaddr) {
|
|
|
- ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
|
|
|
+ //TODO ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
|
|
|
if (ret == 0)
|
|
|
{
|
|
|
rt_hw_cpu_tlb_invalidate();
|
|
|
@@ -731,7 +747,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
+#ifdef RT_USING_LWP
|
|
|
static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t npages, size_t attr)
|
|
|
{
|
|
|
size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
|
|
|
@@ -812,7 +828,6 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
|
|
|
{
|
|
|
@@ -828,7 +843,6 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
|
|
|
rt_hw_cpu_tlb_invalidate();
|
|
|
}
|
|
|
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
|
|
|
{
|
|
|
void *ret;
|
|
|
@@ -850,7 +864,6 @@ void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
return ret;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
|
|
|
{
|
|
|
@@ -860,6 +873,7 @@ void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
|
|
|
_rt_hw_mmu_unmap(mmu_info, v_addr, size);
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
|
|
|
{
|
|
|
@@ -922,97 +936,6 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
|
|
|
}
|
|
|
|
|
|
|
|
|
-///////////////////////////////////////////////////////
|
|
|
-static struct page_table *__init_page_array;
|
|
|
-static unsigned long __page_off = 0UL;
|
|
|
-
|
|
|
-static unsigned long get_free_page(void)
|
|
|
-{
|
|
|
- if (!__init_page_array)
|
|
|
- {
|
|
|
- unsigned long temp_page_start;
|
|
|
- asm volatile("mov %0, sp":"=r"(temp_page_start));
|
|
|
- __init_page_array = (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
|
|
|
- __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
|
|
|
- }
|
|
|
- __page_off++;
|
|
|
-
|
|
|
- return (unsigned long)(__init_page_array[__page_off - 1].entry);
|
|
|
-}
|
|
|
-
|
|
|
-static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
|
|
|
-{
|
|
|
- int level;
|
|
|
- unsigned long *cur_lv_tbl = lv0_tbl;
|
|
|
- unsigned long page;
|
|
|
- unsigned long off;
|
|
|
- int level_shift = MMU_ADDRESS_BITS;
|
|
|
-
|
|
|
- if (va & ARCH_SECTION_MASK)
|
|
|
- {
|
|
|
- return MMU_MAP_ERROR_VANOTALIGN;
|
|
|
- }
|
|
|
- if (pa & ARCH_SECTION_MASK)
|
|
|
- {
|
|
|
- return MMU_MAP_ERROR_PANOTALIGN;
|
|
|
- }
|
|
|
- for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
|
|
|
- {
|
|
|
- off = (va >> level_shift);
|
|
|
- off &= MMU_LEVEL_MASK;
|
|
|
- if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
|
|
|
- {
|
|
|
- page = get_free_page();
|
|
|
- if (!page)
|
|
|
- {
|
|
|
- return MMU_MAP_ERROR_NOPAGE;
|
|
|
- }
|
|
|
- mmu_memset((char *)page, 0, ARCH_PAGE_SIZE);
|
|
|
- cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
|
|
|
- }
|
|
|
- page = cur_lv_tbl[off];
|
|
|
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
|
|
|
- {
|
|
|
- /* is block! error! */
|
|
|
- return MMU_MAP_ERROR_CONFLICT;
|
|
|
- }
|
|
|
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
|
|
|
- level_shift -= MMU_LEVEL_SHIFT;
|
|
|
- }
|
|
|
- attr &= MMU_ATTRIB_MASK;
|
|
|
- pa |= (attr | MMU_TYPE_BLOCK); /* block */
|
|
|
- off = (va >> ARCH_SECTION_SHIFT);
|
|
|
- off &= MMU_LEVEL_MASK;
|
|
|
- cur_lv_tbl[off] = pa;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
|
|
|
-{
|
|
|
- unsigned long i;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (va & ARCH_SECTION_MASK)
|
|
|
- {
|
|
|
- return -1;
|
|
|
- }
|
|
|
- if (pa & ARCH_SECTION_MASK)
|
|
|
- {
|
|
|
- return -1;
|
|
|
- }
|
|
|
- for (i = 0; i < count; i++)
|
|
|
- {
|
|
|
- ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
|
|
|
- va += ARCH_SECTION_SIZE;
|
|
|
- pa += ARCH_SECTION_SIZE;
|
|
|
- if (ret != 0)
|
|
|
- {
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
|
|
|
{
|
|
|
int ret;
|
|
|
@@ -1022,7 +945,6 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo
|
|
|
|
|
|
/* clean the first two pages */
|
|
|
mmu_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
|
|
|
-#ifdef RT_USING_USERSPACE
|
|
|
mmu_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
|
|
|
|
|
|
ret = armv8_init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
|
|
|
@@ -1030,7 +952,6 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo
|
|
|
{
|
|
|
while (1);
|
|
|
}
|
|
|
-#endif
|
|
|
ret = armv8_init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
|
|
|
if (ret != 0)
|
|
|
{
|