123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940 |
- /*
- * Copyright (c) 2006-2023, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date Author Notes
- * 2012-01-10 bernard porting to AM1808
- * 2021-11-28 GuEe-GUI first version
- * 2022-12-10 WangXiaoyao porting to MM
- * 2024-07-08 Shell added support for ASID
- */
- #define DBG_TAG "hw.mmu"
- #define DBG_LVL DBG_INFO
- #include <rtdbg.h>
- #include <rthw.h>
- #include <rtthread.h>
- #include <stddef.h>
- #include <stdint.h>
- #include <string.h>
- #define __MMU_INTERNAL
- #include "mm_aspace.h"
- #include "mm_page.h"
- #include "mmu.h"
- #include "tlb.h"
- #include "ioremap.h"
- #ifdef RT_USING_SMART
- #include <lwp_mm.h>
- #endif
- #define TCR_CONFIG_TBI0 rt_hw_mmu_config_tbi(0)
- #define TCR_CONFIG_TBI1 rt_hw_mmu_config_tbi(1)
- #define MMU_LEVEL_MASK 0x1ffUL
- #define MMU_LEVEL_SHIFT 9
- #define MMU_ADDRESS_BITS 39
- #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
- #define MMU_ATTRIB_MASK 0xfff0000000000ffcUL
- #define MMU_TYPE_MASK 3UL
- #define MMU_TYPE_USED 1UL
- #define MMU_TYPE_BLOCK 1UL
- #define MMU_TYPE_TABLE 3UL
- #define MMU_TYPE_PAGE 3UL
- #define MMU_TBL_BLOCK_2M_LEVEL 2
- #define MMU_TBL_PAGE_4k_LEVEL 3
- #define MMU_TBL_LEVEL_NR 4
- /* restrict virtual address on usage of RT_NULL */
- #ifndef KERNEL_VADDR_START
- #define KERNEL_VADDR_START 0x1000
- #endif
- volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
- struct mmu_level_info
- {
- unsigned long *pos;
- void *page;
- };
- static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
- {
- int level;
- unsigned long va = (unsigned long)v_addr;
- unsigned long *cur_lv_tbl = lv0_tbl;
- unsigned long page;
- unsigned long off;
- struct mmu_level_info level_info[4];
- int ref;
- int level_shift = MMU_ADDRESS_BITS;
- unsigned long *pos;
- rt_memset(level_info, 0, sizeof level_info);
- for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
- {
- off = (va >> level_shift);
- off &= MMU_LEVEL_MASK;
- page = cur_lv_tbl[off];
- if (!(page & MMU_TYPE_USED))
- {
- break;
- }
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
- {
- break;
- }
- /* next table entry in current level */
- level_info[level].pos = cur_lv_tbl + off;
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
- cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
- level_info[level].page = cur_lv_tbl;
- level_shift -= MMU_LEVEL_SHIFT;
- }
- level = MMU_TBL_PAGE_4k_LEVEL;
- pos = level_info[level].pos;
- if (pos)
- {
- *pos = (unsigned long)RT_NULL;
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
- }
- level--;
- while (level >= 0)
- {
- pos = level_info[level].pos;
- if (pos)
- {
- void *cur_page = level_info[level].page;
- ref = rt_page_ref_get(cur_page, 0);
- if (ref == 1)
- {
- *pos = (unsigned long)RT_NULL;
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
- }
- rt_pages_free(cur_page, 0);
- }
- else
- {
- break;
- }
- level--;
- }
- return;
- }
- static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
- {
- int ret = 0;
- int level;
- unsigned long *cur_lv_tbl = lv0_tbl;
- unsigned long page;
- unsigned long off;
- rt_ubase_t va = (rt_ubase_t)vaddr;
- rt_ubase_t pa = (rt_ubase_t)paddr;
- int level_shift = MMU_ADDRESS_BITS;
- if (va & ARCH_PAGE_MASK)
- {
- return MMU_MAP_ERROR_VANOTALIGN;
- }
- if (pa & ARCH_PAGE_MASK)
- {
- return MMU_MAP_ERROR_PANOTALIGN;
- }
- for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
- {
- off = (va >> level_shift);
- off &= MMU_LEVEL_MASK;
- if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
- {
- page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
- if (!page)
- {
- ret = MMU_MAP_ERROR_NOPAGE;
- goto err;
- }
- rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
- cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- }
- else
- {
- page = cur_lv_tbl[off];
- page &= MMU_ADDRESS_MASK;
- /* page to va */
- page -= PV_OFFSET;
- rt_page_ref_inc((void *)page, 0);
- }
- page = cur_lv_tbl[off];
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
- {
- /* is block! error! */
- ret = MMU_MAP_ERROR_CONFLICT;
- goto err;
- }
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
- cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
- level_shift -= MMU_LEVEL_SHIFT;
- }
- /* now is level page */
- attr &= MMU_ATTRIB_MASK;
- pa |= (attr | MMU_TYPE_PAGE); /* page */
- off = (va >> ARCH_PAGE_SHIFT);
- off &= MMU_LEVEL_MASK;
- cur_lv_tbl[off] = pa; /* page */
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- return ret;
- err:
- _kenrel_unmap_4K(lv0_tbl, (void *)va);
- return ret;
- }
- static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
- {
- int ret = 0;
- int level;
- unsigned long *cur_lv_tbl = lv0_tbl;
- unsigned long page;
- unsigned long off;
- unsigned long va = (unsigned long)vaddr;
- unsigned long pa = (unsigned long)paddr;
- int level_shift = MMU_ADDRESS_BITS;
- if (va & ARCH_SECTION_MASK)
- {
- return MMU_MAP_ERROR_VANOTALIGN;
- }
- if (pa & ARCH_PAGE_MASK)
- {
- return MMU_MAP_ERROR_PANOTALIGN;
- }
- for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
- {
- off = (va >> level_shift);
- off &= MMU_LEVEL_MASK;
- if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
- {
- page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
- if (!page)
- {
- ret = MMU_MAP_ERROR_NOPAGE;
- goto err;
- }
- rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
- cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- }
- else
- {
- page = cur_lv_tbl[off];
- page &= MMU_ADDRESS_MASK;
- /* page to va */
- page -= PV_OFFSET;
- rt_page_ref_inc((void *)page, 0);
- }
- page = cur_lv_tbl[off];
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
- {
- /* is block! error! */
- ret = MMU_MAP_ERROR_CONFLICT;
- goto err;
- }
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
- cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
- level_shift -= MMU_LEVEL_SHIFT;
- }
- /* now is level page */
- attr &= MMU_ATTRIB_MASK;
- pa |= (attr | MMU_TYPE_BLOCK); /* block */
- off = (va >> ARCH_SECTION_SHIFT);
- off &= MMU_LEVEL_MASK;
- cur_lv_tbl[off] = pa;
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- return ret;
- err:
- _kenrel_unmap_4K(lv0_tbl, (void *)va);
- return ret;
- }
- void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
- size_t attr)
- {
- int ret = -1;
- void *unmap_va = v_addr;
- size_t npages;
- size_t stride;
- int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
- if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK))
- {
- /* legacy 4k mapping */
- npages = size >> ARCH_PAGE_SHIFT;
- stride = ARCH_PAGE_SIZE;
- mapper = _kernel_map_4K;
- }
- else
- {
- /* 2m huge page */
- npages = size >> ARCH_SECTION_SHIFT;
- stride = ARCH_SECTION_SIZE;
- mapper = _kernel_map_2M;
- }
- while (npages--)
- {
- MM_PGTBL_LOCK(aspace);
- ret = mapper(aspace->page_table, v_addr, p_addr, attr);
- MM_PGTBL_UNLOCK(aspace);
- if (ret != 0)
- {
- /* other types of return value are taken as programming error */
- RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
- /* error, undo map */
- while (unmap_va != v_addr)
- {
- MM_PGTBL_LOCK(aspace);
- _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
- MM_PGTBL_UNLOCK(aspace);
- unmap_va = (char *)unmap_va + stride;
- }
- break;
- }
- v_addr = (char *)v_addr + stride;
- p_addr = (char *)p_addr + stride;
- }
- if (ret == 0)
- {
- return unmap_va;
- }
- return NULL;
- }
- void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
- {
- // caller guarantee that v_addr & size are page aligned
- size_t npages = size >> ARCH_PAGE_SHIFT;
- if (!aspace->page_table)
- {
- return;
- }
- while (npages--)
- {
- MM_PGTBL_LOCK(aspace);
- if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
- _kenrel_unmap_4K(aspace->page_table, v_addr);
- MM_PGTBL_UNLOCK(aspace);
- v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
- }
- }
- #ifdef ARCH_USING_ASID
- /**
- * the asid is to identified specialized address space on TLB.
- * In the best case, each address space has its own exclusive asid. However,
- * ARM only guarantee with 8 bits of ID space, which give us only 254(except
- * the reserved 1 ASID for kernel).
- */
- static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT;
- rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
- {
- static rt_uint16_t _asid_pool = 0;
- rt_uint16_t asid_to, asid_from;
- rt_ubase_t ttbr0_from;
- asid_to = aspace->asid;
- if (asid_to == 0)
- {
- rt_spin_lock(&_asid_lock);
- #define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS)
- if (_asid_pool && _asid_pool < MAX_ASID)
- {
- asid_to = ++_asid_pool;
- LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
- }
- else
- {
- asid_to = _asid_pool = 1;
- LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
- }
- rt_spin_unlock(&_asid_lock);
- aspace->asid = asid_to;
- rt_hw_tlb_invalidate_aspace(aspace);
- }
- __asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from));
- asid_from = ttbr0_from >> MMU_ASID_SHIFT;
- if (asid_from == asid_to)
- {
- LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to);
- rt_hw_tlb_invalidate_aspace(aspace);
- }
- else
- {
- LOG_D("ASID switched. from %d, to %d", asid_from, asid_to);
- }
- return asid_to;
- }
- #else
- rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
- {
- rt_hw_tlb_invalidate_all();
- return 0;
- }
- #endif /* ARCH_USING_ASID */
- #define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT)
- void rt_hw_aspace_switch(rt_aspace_t aspace)
- {
- if (aspace != &rt_kernel_space)
- {
- rt_ubase_t ttbr0;
- void *pgtbl = aspace->page_table;
- pgtbl = rt_kmem_v2p(pgtbl);
- ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace));
- __asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0));
- __asm__ volatile("isb" ::: "memory");
- }
- }
- void rt_hw_mmu_ktbl_set(unsigned long tbl)
- {
- #ifdef RT_USING_SMART
- tbl += PV_OFFSET;
- __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
- #else
- __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
- #endif
- __asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
- __asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
- }
- /**
- * @brief setup Page Table for kernel space. It's a fixed map
- * and all mappings cannot be changed after initialization.
- *
- * Memory region in struct mem_desc must be page aligned,
- * otherwise is a failure and no report will be
- * returned.
- *
- * @param mmu_info
- * @param mdesc
- * @param desc_nr
- */
- void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
- {
- void *err;
- for (size_t i = 0; i < desc_nr; i++)
- {
- size_t attr;
- switch (mdesc->attr)
- {
- case NORMAL_MEM:
- attr = MMU_MAP_K_RWCB;
- break;
- case NORMAL_NOCACHE_MEM:
- attr = MMU_MAP_K_RWCB;
- break;
- case DEVICE_MEM:
- attr = MMU_MAP_K_DEVICE;
- break;
- default:
- attr = MMU_MAP_K_DEVICE;
- }
- struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
- .limit_start = aspace->start,
- .limit_range_size = aspace->size,
- .map_size = mdesc->vaddr_end -
- mdesc->vaddr_start + 1,
- .prefer = (void *)mdesc->vaddr_start};
- if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
- mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
- int retval;
- retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
- mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
- if (retval)
- {
- LOG_E("%s: map failed with code %d", __FUNCTION__, retval);
- RT_ASSERT(0);
- }
- mdesc++;
- }
- rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
- rt_page_cleanup();
- }
- static void _init_region(void *vaddr, size_t size)
- {
- rt_ioremap_start = vaddr;
- rt_ioremap_size = size;
- rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
- }
- /**
- * This function will initialize rt_mmu_info structure.
- *
- * @param mmu_info rt_mmu_info structure
- * @param v_address virtual address
- * @param size map size
- * @param vtable mmu table
- * @param pv_off pv offset in kernel space
- *
- * @return 0 on successful and -1 for fail
- */
- int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
- size_t *vtable, size_t pv_off)
- {
- size_t va_s, va_e;
- if (!aspace || !vtable)
- {
- return -1;
- }
- va_s = (size_t)v_address;
- va_e = (size_t)v_address + size - 1;
- if (va_e < va_s)
- {
- return -1;
- }
- va_s >>= ARCH_SECTION_SHIFT;
- va_e >>= ARCH_SECTION_SHIFT;
- if (va_s == 0)
- {
- return -1;
- }
- rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
- vtable);
- _init_region(v_address, size);
- return 0;
- }
- rt_weak long rt_hw_mmu_config_tbi(int tbi_index)
- {
- return 0;
- }
- /************ setting el1 mmu register**************
- MAIR_EL1
- index 0 : memory outer writeback, write/read alloc
- index 1 : memory nocache
- index 2 : device nGnRnE
- *****************************************************/
- void mmu_tcr_init(void)
- {
- unsigned long val64;
- unsigned long pa_range;
- val64 = 0x00447fUL;
- __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));
- __asm__ volatile ("mrs %0, ID_AA64MMFR0_EL1":"=r"(val64));
- pa_range = val64 & 0xf; /* PARange */
- /* TCR_EL1 */
- val64 = (16UL << 0) /* t0sz 48bit */
- | (0x0UL << 6) /* reserved */
- | (0x0UL << 7) /* epd0 */
- | (0x3UL << 8) /* t0 wb cacheable */
- | (0x3UL << 10) /* inner shareable */
- | (0x2UL << 12) /* t0 outer shareable */
- | (0x0UL << 14) /* t0 4K */
- | (16UL << 16) /* t1sz 48bit */
- | (0x0UL << 22) /* define asid use ttbr0.asid */
- | (0x0UL << 23) /* epd1 */
- | (0x3UL << 24) /* t1 inner wb cacheable */
- | (0x3UL << 26) /* t1 outer wb cacheable */
- | (0x2UL << 28) /* t1 outer shareable */
- | (0x2UL << 30) /* t1 4k */
- | (pa_range << 32) /* PA range */
- | (0x0UL << 35) /* reserved */
- | (0x1UL << 36) /* as: 0:8bit 1:16bit */
- | (TCR_CONFIG_TBI0 << 37) /* tbi0 */
- | (TCR_CONFIG_TBI1 << 38); /* tbi1 */
- __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
- }
- struct page_table
- {
- unsigned long page[512];
- };
- /* */
- static struct page_table* __init_page_array;
- static unsigned long __page_off = 0UL;
- unsigned long get_ttbrn_base(void)
- {
- return (unsigned long) __init_page_array;
- }
- void set_free_page(void *page_array)
- {
- __init_page_array = page_array;
- }
- unsigned long get_free_page(void)
- {
- return (unsigned long) (__init_page_array[__page_off++].page);
- }
- static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
- unsigned long pa, unsigned long attr,
- rt_bool_t flush)
- {
- int level;
- unsigned long *cur_lv_tbl = lv0_tbl;
- unsigned long page;
- unsigned long off;
- int level_shift = MMU_ADDRESS_BITS;
- if (va & ARCH_SECTION_MASK)
- {
- return MMU_MAP_ERROR_VANOTALIGN;
- }
- if (pa & ARCH_PAGE_MASK)
- {
- return MMU_MAP_ERROR_PANOTALIGN;
- }
- for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
- {
- off = (va >> level_shift);
- off &= MMU_LEVEL_MASK;
- if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
- {
- page = get_free_page();
- if (!page)
- {
- return MMU_MAP_ERROR_NOPAGE;
- }
- rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
- cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
- if (flush)
- {
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- }
- }
- page = cur_lv_tbl[off];
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
- {
- /* is block! error! */
- return MMU_MAP_ERROR_CONFLICT;
- }
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
- level_shift -= MMU_LEVEL_SHIFT;
- }
- attr &= MMU_ATTRIB_MASK;
- pa |= (attr | MMU_TYPE_BLOCK); /* block */
- off = (va >> ARCH_SECTION_SHIFT);
- off &= MMU_LEVEL_MASK;
- cur_lv_tbl[off] = pa;
- if (flush)
- {
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
- }
- return 0;
- }
- void *rt_hw_mmu_tbl_get(void)
- {
- uintptr_t tbl;
- __asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
- return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
- }
- void *rt_ioremap_early(void *paddr, size_t size)
- {
- volatile size_t count;
- rt_ubase_t base;
- static void *tbl = RT_NULL;
- if (!size)
- {
- return RT_NULL;
- }
- if (!tbl)
- {
- tbl = rt_hw_mmu_tbl_get();
- }
- /* get the total size required including overhead for alignment */
- count = (size + ((rt_ubase_t)paddr & ARCH_SECTION_MASK)
- + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
- base = (rt_ubase_t)paddr & (~ARCH_SECTION_MASK);
- while (count --> 0)
- {
- if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE, RT_TRUE))
- {
- return RT_NULL;
- }
- base += ARCH_SECTION_SIZE;
- }
- return paddr;
- }
- static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
- unsigned long pa, unsigned long count,
- unsigned long attr)
- {
- unsigned long i;
- int ret;
- if (va & ARCH_SECTION_MASK)
- {
- return -1;
- }
- if (pa & ARCH_SECTION_MASK)
- {
- return -1;
- }
- for (i = 0; i < count; i++)
- {
- ret = _map_single_page_2M(lv0_tbl, va, pa, attr, RT_FALSE);
- va += ARCH_SECTION_SIZE;
- pa += ARCH_SECTION_SIZE;
- if (ret != 0)
- {
- return ret;
- }
- }
- return 0;
- }
- static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
- {
- int level;
- unsigned long va = (unsigned long)vaddr;
- unsigned long *cur_lv_tbl;
- unsigned long page;
- unsigned long off;
- int level_shift = MMU_ADDRESS_BITS;
- cur_lv_tbl = aspace->page_table;
- RT_ASSERT(cur_lv_tbl);
- for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
- {
- off = (va >> level_shift);
- off &= MMU_LEVEL_MASK;
- if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
- {
- *plvl_shf = level_shift;
- return (void *)0;
- }
- page = cur_lv_tbl[off];
- if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
- {
- *plvl_shf = level_shift;
- return &cur_lv_tbl[off];
- }
- cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
- cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
- level_shift -= MMU_LEVEL_SHIFT;
- }
- /* now is level MMU_TBL_PAGE_4k_LEVEL */
- off = (va >> ARCH_PAGE_SHIFT);
- off &= MMU_LEVEL_MASK;
- page = cur_lv_tbl[off];
- *plvl_shf = level_shift;
- if (!(page & MMU_TYPE_USED))
- {
- return (void *)0;
- }
- return &cur_lv_tbl[off];
- }
- void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
- {
- int level_shift;
- unsigned long paddr;
- if (aspace == &rt_kernel_space)
- {
- paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
- }
- else
- {
- unsigned long *pte = _query(aspace, v_addr, &level_shift);
- if (pte)
- {
- paddr = *pte & MMU_ADDRESS_MASK;
- paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
- }
- else
- {
- paddr = (unsigned long)ARCH_MAP_FAILED;
- }
- }
- return (void *)paddr;
- }
- static int _noncache(rt_ubase_t *pte)
- {
- int err = 0;
- const rt_ubase_t idx_shift = 2;
- const rt_ubase_t idx_mask = 0x7 << idx_shift;
- rt_ubase_t entry = *pte;
- if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
- {
- *pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
- }
- else
- {
- // do not support other type to be noncache
- err = -RT_ENOSYS;
- }
- return err;
- }
- static int _cache(rt_ubase_t *pte)
- {
- int err = 0;
- const rt_ubase_t idx_shift = 2;
- const rt_ubase_t idx_mask = 0x7 << idx_shift;
- rt_ubase_t entry = *pte;
- if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
- {
- *pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
- }
- else
- {
- // do not support other type to be cache
- err = -RT_ENOSYS;
- }
- return err;
- }
- static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
- [MMU_CNTL_CACHE] = _cache,
- [MMU_CNTL_NONCACHE] = _noncache,
- };
- int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
- enum rt_mmu_cntl cmd)
- {
- int level_shift;
- int err = -RT_EINVAL;
- rt_ubase_t vstart = (rt_ubase_t)vaddr;
- rt_ubase_t vend = vstart + size;
- int (*handler)(rt_ubase_t * pte);
- if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
- {
- handler = control_handler[cmd];
- while (vstart < vend)
- {
- rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
- rt_ubase_t range_end = vstart + (1ul << level_shift);
- RT_ASSERT(range_end <= vend);
- if (pte)
- {
- err = handler(pte);
- RT_ASSERT(err == RT_EOK);
- }
- vstart = range_end;
- }
- }
- else
- {
- err = -RT_ENOSYS;
- }
- return err;
- }
- void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
- unsigned long size, unsigned long pv_off)
- {
- int ret;
- unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
- unsigned long normal_attr = MMU_MAP_K_RWCB;
- extern unsigned char _start;
- unsigned long va = (unsigned long) &_start - pv_off;
- va = RT_ALIGN_DOWN(va, 0x200000);
- /* setup pv off */
- rt_kmem_pvoff_set(pv_off);
- /* clean the first two pages */
- rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
- rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
- ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
- if (ret != 0)
- {
- while (1);
- }
- ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
- if (ret != 0)
- {
- while (1);
- }
- }
- void *rt_hw_mmu_pgtbl_create(void)
- {
- size_t *mmu_table;
- mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
- if (!mmu_table)
- {
- return RT_NULL;
- }
- memset(mmu_table, 0, ARCH_PAGE_SIZE);
- rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
- return mmu_table;
- }
- void rt_hw_mmu_pgtbl_delete(void *pgtbl)
- {
- rt_pages_free(pgtbl, 0);
- }
|