|
@@ -61,28 +61,28 @@ void *rt_hw_mmu_tbl_get()
|
|
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|
size_t attr)
|
|
size_t attr)
|
|
{
|
|
{
|
|
- rt_size_t l1_off, l2_off, l3_off;
|
|
|
|
- rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
|
|
|
|
|
|
+ rt_ubase_t l1_off, l2_off, l3_off;
|
|
|
|
+ rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
|
|
|
|
|
|
l1_off = GET_L1((size_t)va);
|
|
l1_off = GET_L1((size_t)va);
|
|
l2_off = GET_L2((size_t)va);
|
|
l2_off = GET_L2((size_t)va);
|
|
l3_off = GET_L3((size_t)va);
|
|
l3_off = GET_L3((size_t)va);
|
|
|
|
|
|
- mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
|
|
|
|
|
|
+ mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
|
|
|
|
|
|
if (PTE_USED(*mmu_l1))
|
|
if (PTE_USED(*mmu_l1))
|
|
{
|
|
{
|
|
- mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
|
|
|
|
|
+ mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
|
}
|
|
}
|
|
else
|
|
else
|
|
{
|
|
{
|
|
- mmu_l2 = (rt_size_t *)rt_pages_alloc(0);
|
|
|
|
|
|
+ mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
|
|
|
|
|
|
if (mmu_l2)
|
|
if (mmu_l2)
|
|
{
|
|
{
|
|
rt_memset(mmu_l2, 0, PAGE_SIZE);
|
|
rt_memset(mmu_l2, 0, PAGE_SIZE);
|
|
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
|
|
rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
|
|
- *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
|
|
|
|
|
|
+ *mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
|
|
PAGE_DEFAULT_ATTR_NEXT);
|
|
PAGE_DEFAULT_ATTR_NEXT);
|
|
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
|
|
rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
|
|
}
|
|
}
|
|
@@ -96,18 +96,18 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|
{
|
|
{
|
|
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
|
|
RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
|
|
mmu_l3 =
|
|
mmu_l3 =
|
|
- (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
|
|
|
|
|
|
+ (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
|
|
}
|
|
}
|
|
else
|
|
else
|
|
{
|
|
{
|
|
- mmu_l3 = (rt_size_t *)rt_pages_alloc(0);
|
|
|
|
|
|
+ mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
|
|
|
|
|
|
if (mmu_l3)
|
|
if (mmu_l3)
|
|
{
|
|
{
|
|
rt_memset(mmu_l3, 0, PAGE_SIZE);
|
|
rt_memset(mmu_l3, 0, PAGE_SIZE);
|
|
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
|
|
rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
|
|
*(mmu_l2 + l2_off) =
|
|
*(mmu_l2 + l2_off) =
|
|
- COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
|
|
|
|
|
|
+ COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
|
|
PAGE_DEFAULT_ATTR_NEXT);
|
|
PAGE_DEFAULT_ATTR_NEXT);
|
|
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
|
|
rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
|
|
// declares a reference to parent page table
|
|
// declares a reference to parent page table
|
|
@@ -122,7 +122,7 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
|
|
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
|
|
RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
|
|
// declares a reference to parent page table
|
|
// declares a reference to parent page table
|
|
rt_page_ref_inc((void *)mmu_l3, 0);
|
|
rt_page_ref_inc((void *)mmu_l3, 0);
|
|
- *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
|
|
|
|
|
|
+ *(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
|
|
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
|
|
rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -165,7 +165,7 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
|
|
|
|
|
|
+static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
|
|
{
|
|
{
|
|
int loop_flag = 1;
|
|
int loop_flag = 1;
|
|
while (loop_flag)
|
|
while (loop_flag)
|
|
@@ -195,26 +195,26 @@ static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
|
|
|
|
|
|
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
|
|
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
|
|
{
|
|
{
|
|
- rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
|
|
|
|
|
|
+ rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
|
|
size_t unmapped = 0;
|
|
size_t unmapped = 0;
|
|
|
|
|
|
int i = 0;
|
|
int i = 0;
|
|
- rt_size_t lvl_off[3];
|
|
|
|
- rt_size_t *lvl_entry[3];
|
|
|
|
- lvl_off[0] = (rt_size_t)GET_L1(loop_va);
|
|
|
|
- lvl_off[1] = (rt_size_t)GET_L2(loop_va);
|
|
|
|
- lvl_off[2] = (rt_size_t)GET_L3(loop_va);
|
|
|
|
|
|
+ rt_ubase_t lvl_off[3];
|
|
|
|
+ rt_ubase_t *lvl_entry[3];
|
|
|
|
+ lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
|
|
|
|
+ lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
|
|
|
|
+ lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
|
|
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
|
|
unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
|
|
|
|
|
|
- rt_size_t *pentry;
|
|
|
|
- lvl_entry[i] = ((rt_size_t *)aspace->page_table + lvl_off[i]);
|
|
|
|
|
|
+ rt_ubase_t *pentry;
|
|
|
|
+ lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
|
|
pentry = lvl_entry[i];
|
|
pentry = lvl_entry[i];
|
|
|
|
|
|
// find leaf page table entry
|
|
// find leaf page table entry
|
|
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
|
|
while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
|
|
{
|
|
{
|
|
i += 1;
|
|
i += 1;
|
|
- lvl_entry[i] = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
|
|
|
|
|
|
+ lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
|
|
lvl_off[i]);
|
|
lvl_off[i]);
|
|
pentry = lvl_entry[i];
|
|
pentry = lvl_entry[i];
|
|
unmapped >>= ARCH_INDEX_WIDTH;
|
|
unmapped >>= ARCH_INDEX_WIDTH;
|
|
@@ -277,8 +277,8 @@ static inline void _init_region(void *vaddr, size_t size)
|
|
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
|
|
#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
|
|
|
|
- rt_size_t *vtable, rt_size_t pv_off)
|
|
|
|
|
|
+int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
|
|
|
|
+ rt_ubase_t *vtable, rt_ubase_t pv_off)
|
|
{
|
|
{
|
|
size_t l1_off, va_s, va_e;
|
|
size_t l1_off, va_s, va_e;
|
|
rt_base_t level;
|
|
rt_base_t level;
|
|
@@ -288,8 +288,8 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
- va_s = (rt_size_t)v_address;
|
|
|
|
- va_e = ((rt_size_t)v_address) + size - 1;
|
|
|
|
|
|
+ va_s = (rt_ubase_t)v_address;
|
|
|
|
+ va_e = ((rt_ubase_t)v_address) + size - 1;
|
|
|
|
|
|
if (va_e < va_s)
|
|
if (va_e < va_s)
|
|
{
|
|
{
|
|
@@ -330,15 +330,15 @@ static inline uintptr_t _get_level_size(int level)
|
|
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
|
|
return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
|
|
}
|
|
}
|
|
|
|
|
|
-static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
|
|
|
|
+static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
{
|
|
{
|
|
- rt_size_t l1_off, l2_off, l3_off;
|
|
|
|
- rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
|
|
|
|
- rt_size_t pa;
|
|
|
|
|
|
+ rt_ubase_t l1_off, l2_off, l3_off;
|
|
|
|
+ rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
|
|
|
|
+ rt_ubase_t pa;
|
|
|
|
|
|
- l1_off = GET_L1((rt_size_t)vaddr);
|
|
|
|
- l2_off = GET_L2((rt_size_t)vaddr);
|
|
|
|
- l3_off = GET_L3((rt_size_t)vaddr);
|
|
|
|
|
|
+ l1_off = GET_L1((rt_uintptr_t)vaddr);
|
|
|
|
+ l2_off = GET_L2((rt_uintptr_t)vaddr);
|
|
|
|
+ l3_off = GET_L3((rt_uintptr_t)vaddr);
|
|
|
|
|
|
if (!aspace)
|
|
if (!aspace)
|
|
{
|
|
{
|
|
@@ -346,7 +346,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
return RT_NULL;
|
|
return RT_NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
|
|
|
|
|
|
+ mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
|
|
|
|
|
|
if (PTE_USED(*mmu_l1))
|
|
if (PTE_USED(*mmu_l1))
|
|
{
|
|
{
|
|
@@ -356,7 +356,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
return mmu_l1;
|
|
return mmu_l1;
|
|
}
|
|
}
|
|
|
|
|
|
- mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
|
|
|
|
|
+ mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
|
|
|
|
|
|
if (PTE_USED(*(mmu_l2 + l2_off)))
|
|
if (PTE_USED(*(mmu_l2 + l2_off)))
|
|
{
|
|
{
|
|
@@ -366,7 +366,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
return mmu_l2 + l2_off;
|
|
return mmu_l2 + l2_off;
|
|
}
|
|
}
|
|
|
|
|
|
- mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
|
|
|
|
|
|
+ mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
|
|
PV_OFFSET);
|
|
PV_OFFSET);
|
|
|
|
|
|
if (PTE_USED(*(mmu_l3 + l3_off)))
|
|
if (PTE_USED(*(mmu_l3 + l3_off)))
|
|
@@ -383,7 +383,7 @@ static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
|
|
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
|
|
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
|
|
{
|
|
{
|
|
int level;
|
|
int level;
|
|
- uintptr_t *pte = _query(aspace, vaddr, &level);
|
|
|
|
|
|
+ rt_ubase_t *pte = _query(aspace, vaddr, &level);
|
|
uintptr_t paddr;
|
|
uintptr_t paddr;
|
|
|
|
|
|
if (pte)
|
|
if (pte)
|
|
@@ -398,17 +398,17 @@ void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
|
|
return (void *)paddr;
|
|
return (void *)paddr;
|
|
}
|
|
}
|
|
|
|
|
|
-static int _noncache(uintptr_t *pte)
|
|
|
|
|
|
+static int _noncache(rt_base_t *pte)
|
|
{
|
|
{
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int _cache(uintptr_t *pte)
|
|
|
|
|
|
+static int _cache(rt_base_t *pte)
|
|
{
|
|
{
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
|
|
|
|
|
|
+static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte) = {
|
|
[MMU_CNTL_CACHE] = _cache,
|
|
[MMU_CNTL_CACHE] = _cache,
|
|
[MMU_CNTL_NONCACHE] = _noncache,
|
|
[MMU_CNTL_NONCACHE] = _noncache,
|
|
};
|
|
};
|
|
@@ -420,14 +420,14 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
|
|
int err = -RT_EINVAL;
|
|
int err = -RT_EINVAL;
|
|
void *vend = vaddr + size;
|
|
void *vend = vaddr + size;
|
|
|
|
|
|
- int (*handler)(uintptr_t * pte);
|
|
|
|
|
|
+ int (*handler)(rt_base_t *pte);
|
|
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
|
|
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
|
|
{
|
|
{
|
|
handler = control_handler[cmd];
|
|
handler = control_handler[cmd];
|
|
|
|
|
|
while (vaddr < vend)
|
|
while (vaddr < vend)
|
|
{
|
|
{
|
|
- uintptr_t *pte = _query(aspace, vaddr, &level);
|
|
|
|
|
|
+ rt_base_t *pte = _query(aspace, vaddr, &level);
|
|
void *range_end = vaddr + _get_level_size(level);
|
|
void *range_end = vaddr + _get_level_size(level);
|
|
RT_ASSERT(range_end <= vend);
|
|
RT_ASSERT(range_end <= vend);
|
|
|
|
|
|
@@ -487,7 +487,7 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|
mdesc->vaddr_start + 1,
|
|
mdesc->vaddr_start + 1,
|
|
.prefer = (void *)mdesc->vaddr_start};
|
|
.prefer = (void *)mdesc->vaddr_start};
|
|
|
|
|
|
- if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
|
|
|
|
|
|
+ if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
|
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
|
mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
|
|
|
|
|
|
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
|
rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
|
|
@@ -499,13 +499,13 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
|
|
rt_page_cleanup();
|
|
rt_page_cleanup();
|
|
}
|
|
}
|
|
|
|
|
|
-#define SATP_BASE ((size_t)SATP_MODE << SATP_MODE_OFFSET)
|
|
|
|
|
|
+#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
|
|
void rt_hw_mem_setup_early(void)
|
|
void rt_hw_mem_setup_early(void)
|
|
{
|
|
{
|
|
- rt_size_t pv_off;
|
|
|
|
- rt_size_t ps = 0x0;
|
|
|
|
- rt_size_t vs = 0x0;
|
|
|
|
- rt_size_t *early_pgtbl = (size_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
|
|
|
|
|
|
+ rt_ubase_t pv_off;
|
|
|
|
+ rt_ubase_t ps = 0x0;
|
|
|
|
+ rt_ubase_t vs = 0x0;
|
|
|
|
+ rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
|
|
|
|
|
|
/* calculate pv_offset */
|
|
/* calculate pv_offset */
|
|
void *symb_pc;
|
|
void *symb_pc;
|
|
@@ -539,8 +539,8 @@ void rt_hw_mem_setup_early(void)
|
|
vs = ps - pv_off;
|
|
vs = ps - pv_off;
|
|
|
|
|
|
/* relocate region */
|
|
/* relocate region */
|
|
- rt_size_t vs_idx = GET_L1(vs);
|
|
|
|
- rt_size_t ve_idx = GET_L1(vs + 0x80000000);
|
|
|
|
|
|
+ rt_ubase_t vs_idx = GET_L1(vs);
|
|
|
|
+ rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
|
|
for (size_t i = vs_idx; i < ve_idx; i++)
|
|
for (size_t i = vs_idx; i < ve_idx; i++)
|
|
{
|
|
{
|
|
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
|
|
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
|
|
@@ -557,7 +557,7 @@ void rt_hw_mem_setup_early(void)
|
|
|
|
|
|
void *rt_hw_mmu_pgtbl_create(void)
|
|
void *rt_hw_mmu_pgtbl_create(void)
|
|
{
|
|
{
|
|
- size_t *mmu_table;
|
|
|
|
|
|
+ rt_ubase_t *mmu_table;
|
|
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
|
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
|
|
if (!mmu_table)
|
|
if (!mmu_table)
|
|
{
|
|
{
|