Browse Source

[fixup] add cache maintenance ops;
fix bugs on cache maintenance when starting user app

wangxiaoyao 2 years ago
parent
commit
484a0d602e

+ 1 - 0
bsp/allwinner/d1s/.config

@@ -272,6 +272,7 @@ CONFIG_RT_USING_POSIX_PIPE_SIZE=512
 # CONFIG_RT_USING_ULOG is not set
 # CONFIG_RT_USING_UTEST is not set
 # CONFIG_RT_USING_VAR_EXPORT is not set
+CONFIG_RT_USING_ADT=y
 # CONFIG_RT_USING_RT_LINK is not set
 # CONFIG_RT_USING_VBUS is not set
 

+ 1 - 0
bsp/allwinner/d1s/rtconfig.h

@@ -160,6 +160,7 @@
 
 /* Utilities */
 
+#define RT_USING_ADT
 
 /* RT-Thread Utestcases */
 

+ 2 - 0
components/lwp/lwp.c

@@ -1097,6 +1097,8 @@ static void _lwp_thread_entry(void *parameter)
         icache_invalid_all();
     }
 
+    rt_hw_icache_invalidate_all();
+
 #ifdef ARCH_MM_MMU
     arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
 #else

+ 4 - 0
components/lwp/lwp_pid.c

@@ -467,6 +467,10 @@ void lwp_free(struct rt_lwp* lwp)
             }
         }
     }
+    else
+    {
+        level = rt_hw_interrupt_disable();
+    }
 
     /* for parent */
     {

+ 48 - 0
components/mm/mm_kmem.c

@@ -8,8 +8,14 @@
  * 2022-11-14     WangXiaoyao  the first version
  */
 
+#include <rtthread.h>
+#define DBG_TAG "mm.kmem"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
 #include "mm_aspace.h"
 #include "mm_private.h"
+#include <mmu.h>
 
 static void list_kernel_space(void)
 {
@@ -30,3 +36,45 @@ void rt_kmem_pvoff_set(rt_ubase_t pvoff)
 {
     rt_pv_offset = pvoff;
 }
+
+#define _KMEM_LO_OFF(addr) ((rt_ubase_t)(addr) & ARCH_PAGE_MASK)
+
+int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr)
+{
+    int err;
+    size_t lo_off;
+    lo_off = _KMEM_LO_OFF(pa);
+
+    if (va == RT_NULL)
+    {
+        LOG_E("%s: va NULL is not a valid input", __func__);
+        err = -RT_EINVAL;
+    }
+    else if (_KMEM_LO_OFF(pa) != _KMEM_LO_OFF(va))
+    {
+        LOG_E("%s: misaligned PA(%p) to VA(%p)", __func__, pa, va);
+        err = -RT_EINVAL;
+    }
+    else
+    {
+        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
+                                    .limit_range_size = rt_kernel_space.size,
+                                    .limit_start = rt_kernel_space.start,
+                                    .prefer = va,
+                                    .map_size = RT_ALIGN(length + lo_off, ARCH_PAGE_SIZE)};
+
+        err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, MM_PA_TO_OFF(pa), &va);
+
+        if (err)
+        {
+            LOG_W("%s: map %p to %p (%p bytes) failed(err %d)", __func__, pa, va, length, err);
+        }
+    }
+
+    return err;
+}
+
+void *rt_kmem_v2p(void *vaddr)
+{
+    return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
+}

+ 7 - 2
components/mm/mm_page.c

@@ -402,13 +402,18 @@ static rt_page_t (*pages_alloc_handler)(rt_uint32_t size_bits);
 
 void *rt_pages_alloc(rt_uint32_t size_bits)
 {
+    void *alloc_buf = RT_NULL;
     struct rt_page *p;
     rt_base_t level;
 
     level = rt_hw_interrupt_disable();
     p = pages_alloc_handler(size_bits);
     rt_hw_interrupt_enable(level);
-    return page_to_addr(p);
+    if (p)
+    {
+        alloc_buf = page_to_addr(p);
+    }
+    return alloc_buf;
 }
 
 int rt_pages_free(void *addr, rt_uint32_t size_bits)
@@ -452,7 +457,7 @@ void list_page(void)
         rt_kprintf("\n");
     }
     rt_hw_interrupt_enable(level);
-    rt_kprintf("free pages is 0x%08x\n", total);
+    rt_kprintf("free pages is 0x%08lx (%ld KB)\n", total, total * ARCH_PAGE_SIZE / 1024);
     rt_kprintf("-------------------------------\n");
 }
 MSH_CMD_EXPORT(list_page, show page info);

+ 1 - 1
include/rthw.h

@@ -55,7 +55,7 @@ enum RT_HW_CACHE_OPS
  */
 #ifdef RT_USING_CACHE
 
-#ifdef ARCH_RISCV64
+#ifdef RT_USING_SMART
 #include <cache.h>
 #endif
 

+ 3 - 2
libcpu/aarch64/common/cache.S

@@ -193,8 +193,9 @@ __asm_invalidate_icache_range:
  */
 .globl __asm_invalidate_icache_all
 __asm_invalidate_icache_all:
-    ic    ialluis
-    isb    sy
+    dsb     sy
+    ic      ialluis
+    isb     sy
     ret
 
 .globl __asm_flush_l3_cache

+ 9 - 3
libcpu/aarch64/common/cache.h

@@ -11,13 +11,19 @@
 #ifndef __CACHE_H__
 #define __CACHE_H__
 
+void __asm_invalidate_icache_all(void);
+
 void rt_hw_dcache_flush_all(void);
 void rt_hw_dcache_invalidate_all(void);
 void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
-void rt_hw_cpu_dcache_clean(void *addr, int size);
-void rt_hw_cpu_dcache_invalidate(unsigned long start_addr,unsigned long size);
+void rt_hw_cpu_dcache_clean(void *addr, unsigned long size);
+void rt_hw_cpu_dcache_invalidate(void *start_addr, unsigned long size);
+
+static inline void rt_hw_icache_invalidate_all(void)
+{
+    __asm_invalidate_icache_all();
+}
 
-void rt_hw_icache_invalidate_all();
 void rt_hw_icache_invalidate_range(unsigned long start_addr, int size);
 
 #endif /* __CACHE_H__ */

+ 11 - 11
libcpu/aarch64/common/cache_ops.c

@@ -12,9 +12,9 @@
 
 void __asm_invalidate_icache_all(void);
 void __asm_flush_dcache_all(void);
-void __asm_flush_dcache_range(unsigned long start, unsigned long end);
-void __asm_invalidate_dcache_range(unsigned long start, unsigned long end);
-void __asm_invalidate_icache_range(unsigned long start, unsigned long end);
+void __asm_flush_dcache_range(rt_size_t start, rt_size_t end);
+void __asm_invalidate_dcache_range(rt_size_t start, rt_size_t end);
+void __asm_invalidate_icache_range(rt_size_t start, rt_size_t end);
 void __asm_invalidate_dcache_all(void);
 void __asm_invalidate_icache_all(void);
 
@@ -28,24 +28,24 @@ rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
     return 0;
 }
 
-void rt_hw_cpu_icache_invalidate(void *addr, int size)
+void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size)
 {
-    __asm_invalidate_icache_range((unsigned long)addr, (unsigned long)addr + size);
+    __asm_invalidate_icache_range((rt_size_t)addr, (rt_size_t)addr + size);
 }
 
-void rt_hw_cpu_dcache_invalidate(void *addr, int size)
+void rt_hw_cpu_dcache_invalidate(void *addr, rt_size_t size)
 {
-    __asm_invalidate_dcache_range((unsigned long)addr, (unsigned long)addr + size);
+    __asm_invalidate_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
 }
 
-void rt_hw_cpu_dcache_clean(void *addr, int size)
+void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size)
 {
-    __asm_flush_dcache_range((unsigned long)addr, (unsigned long)addr + size);
+    __asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
 }
 
-void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, int size)
+void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size)
 {
-    __asm_flush_dcache_range((unsigned long)addr, (unsigned long)addr + size);
+    __asm_flush_dcache_range((rt_size_t)addr, (rt_size_t)addr + size);
 }
 
 void rt_hw_cpu_icache_ops(int ops, void *addr, int size)

+ 5 - 5
libcpu/aarch64/common/cpuport.h

@@ -25,27 +25,27 @@ typedef union {
 
 rt_inline void rt_hw_isb(void)
 {
-    asm volatile ("isb":::"memory");
+    __asm__ volatile ("isb":::"memory");
 }
 
 rt_inline void rt_hw_dmb(void)
 {
-    asm volatile ("dmb ish":::"memory");
+    __asm__ volatile ("dmb ish":::"memory");
 }
 
 rt_inline void rt_hw_wmb(void)
 {
-    asm volatile ("dmb ishst":::"memory");
+    __asm__ volatile ("dmb ishst":::"memory");
 }
 
 rt_inline void rt_hw_rmb(void)
 {
-    asm volatile ("dmb ishld":::"memory");
+    __asm__ volatile ("dmb ishld":::"memory");
 }
 
 rt_inline void rt_hw_dsb(void)
 {
-    asm volatile ("dsb ish":::"memory");
+    __asm__ volatile ("dsb ish":::"memory");
 }
 
 #endif  /*CPUPORT_H__*/

+ 9 - 0
libcpu/aarch64/common/exception.c

@@ -1,3 +1,12 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-02-08     RT-Thread    the first version
+ */
 #include "rtthread.h"
 
 static void data_abort(unsigned long far, unsigned long iss)

+ 17 - 8
libcpu/aarch64/common/mmu.c

@@ -148,11 +148,9 @@ static int _kenrel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr,
                 goto err;
             }
             rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
-            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page,
-                                 ARCH_PAGE_SIZE);
+            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
             cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
-            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off,
-                                 sizeof(void *));
+            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
         }
         else
         {
@@ -197,14 +195,18 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
     // TODO trying with HUGEPAGE here
     while (npages--)
     {
+        MM_PGTBL_LOCK(aspace);
         ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
+        MM_PGTBL_UNLOCK(aspace);
+
         if (ret != 0)
         {
             /* error, undo map */
             while (unmap_va != v_addr)
             {
+                MM_PGTBL_LOCK(aspace);
                 _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
-                unmap_va += ARCH_PAGE_SIZE;
+                MM_PGTBL_UNLOCK(aspace);
             }
             break;
         }
@@ -232,7 +234,9 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
 
     while (npages--)
     {
+        MM_PGTBL_LOCK(aspace);
         _kenrel_unmap_4K(aspace->page_table, v_addr);
+        MM_PGTBL_UNLOCK(aspace);
         v_addr += ARCH_PAGE_SIZE;
     }
 }
@@ -242,7 +246,7 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
     if (aspace != &rt_kernel_space)
     {
         void *pgtbl = aspace->page_table;
-        pgtbl = _rt_kmem_v2p(pgtbl);
+        pgtbl = rt_kmem_v2p(pgtbl);
         uintptr_t tcr;
 
         __asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");
@@ -311,9 +315,14 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
 
         if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
             mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
-
-        rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
+        int retval;
+        retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
                                  mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
+        if (retval)
+        {
+            LOG_E("%s: map failed with code %d", retval);
+            RT_ASSERT(0);
+        }
         mdesc++;
     }
 

+ 0 - 13
libcpu/aarch64/common/mmu.h

@@ -97,19 +97,6 @@ static inline void *rt_hw_mmu_tbl_get()
     return (void *)(tbl & ((1ul << 48) - 2));
 }
 
-static inline void *_rt_kmem_v2p(void *vaddr)
-{
-    return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
-}
-
-static inline void *rt_kmem_v2p(void *vaddr)
-{
-    MM_PGTBL_LOCK(&rt_kernel_space);
-    void *paddr = _rt_kmem_v2p(vaddr);
-    MM_PGTBL_UNLOCK(&rt_kernel_space);
-    return paddr;
-}
-
 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                       enum rt_mmu_cntl cmd);
 

+ 1 - 1
libcpu/aarch64/common/tlb.h

@@ -67,7 +67,7 @@ static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
 static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
                                               size_t size, size_t stride)
 {
-    if (size < ARCH_PAGE_SIZE)
+    if (size <= ARCH_PAGE_SIZE)
     {
         rt_hw_tlb_invalidate_page(aspace, start);
     }

+ 5 - 0
libcpu/aarch64/cortex-a/entry_point.S

@@ -89,6 +89,9 @@ __start:
     /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
     mov     x1, #0x00300000         /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
     msr     cpacr_el1, x1
+    /* applying context change */
+    dsb     ish
+    isb
 
     /* clear bss */
     GET_PHY x1, __bss_start
@@ -270,11 +273,13 @@ _secondary_cpu_entry:
     ret
 
 after_mmu_enable_cpux:
+#ifdef RT_USING_SMART
     mrs x0, tcr_el1          /* disable ttbr0, only using kernel space */
     orr x0, x0, #(1 << 7)
     msr tcr_el1, x0
     msr ttbr0_el1, xzr
     dsb sy
+#endif
 
     mov     x0, #1
     msr     spsel, x0

+ 19 - 0
libcpu/arm/cortex-a/cache.h

@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2019-03-29     quanzhao     the first version
+ */
+
+#ifndef __CACHE_H__
+#define __CACHE_H__
+
+static inline void rt_hw_icache_invalidate_all(void)
+{
+    __asm__ volatile("mcr p15, 0, %0, c7, c5, 0"::"r"(0ul));
+}
+
+#endif /* __CACHE_H__ */

+ 1 - 1
libcpu/arm/cortex-a/mmu.c

@@ -337,7 +337,7 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
     if (aspace != &rt_kernel_space)
     {
         void *pgtbl = aspace->page_table;
-        pgtbl = _rt_kmem_v2p(pgtbl);
+        pgtbl = rt_kmem_v2p(pgtbl);
 
         rt_hw_mmu_switch(pgtbl);
 

+ 0 - 13
libcpu/arm/cortex-a/mmu.h

@@ -112,19 +112,6 @@ void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
 void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, size_t vaddr_start, size_t size);
 void *rt_hw_mmu_tbl_get();
 
-static inline void *_rt_kmem_v2p(void *vaddr)
-{
-    return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
-}
-
-static inline void *rt_kmem_v2p(void *vaddr)
-{
-    MM_PGTBL_LOCK(&rt_kernel_space);
-    void *paddr = _rt_kmem_v2p(vaddr);
-    MM_PGTBL_UNLOCK(&rt_kernel_space);
-    return paddr;
-}
-
 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size, enum rt_mmu_cntl cmd);
 
 #endif

+ 2 - 2
libcpu/risc-v/t-head/c906/backtrace.c

@@ -59,13 +59,13 @@ void rt_hw_backtrace(rt_uint32_t *ffp, rt_ubase_t sepc)
         }
 
         ra = fp - 1;
-        if (!_rt_kmem_v2p(ra) || *ra < vas || *ra > vae)
+        if (!rt_kmem_v2p(ra) || *ra < vas || *ra > vae)
             break;
 
         rt_kprintf(" %p", *ra - 0x04);
 
         fp = fp - 2;
-        if (!_rt_kmem_v2p(fp))
+        if (!rt_kmem_v2p(fp))
             break;
         fp = (rt_ubase_t *)(*fp);
         if (!fp)

+ 2 - 0
libcpu/risc-v/t-head/c906/cache.h

@@ -63,6 +63,8 @@ ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local(void)
     rt_hw_cpu_sync_i();
 }
 
+#define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
+
 /**
  * ========================================
  * Multi-core cache maintainence operations

+ 5 - 2
libcpu/risc-v/t-head/c906/mmu.c

@@ -25,6 +25,7 @@
 #include <tlb.h>
 
 #ifdef RT_USING_SMART
+#include <board.h>
 #include <ioremap.h>
 #include <lwp_user_mm.h>
 #endif
@@ -96,7 +97,7 @@ static rt_uint64_t _asid_check_switch(rt_aspace_t aspace)
 
 void rt_hw_aspace_switch(rt_aspace_t aspace)
 {
-    uintptr_t page_table = (uintptr_t)_rt_kmem_v2p(aspace->page_table);
+    uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
     current_mmu_table = aspace->page_table;
 
     rt_uint64_t asid = _asid_check_switch(aspace);
@@ -191,7 +192,9 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
     // TODO trying with HUGEPAGE here
     while (npages--)
     {
+        MM_PGTBL_LOCK(aspace);
         ret = _map_one_page(aspace, v_addr, p_addr, attr);
+        MM_PGTBL_UNLOCK(aspace);
         if (ret != 0)
         {
             /* error, undo map */
@@ -500,7 +503,7 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  * otherwise is a failure and no report will be
  * returned.
  *
- * @param mmu_info
+ * @param aspace
  * @param mdesc
  * @param desc_nr
  */

+ 0 - 13
libcpu/risc-v/t-head/c906/mmu.h

@@ -67,19 +67,6 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
 void rt_hw_aspace_switch(rt_aspace_t aspace);
 void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
 
-static inline void *_rt_kmem_v2p(void *vaddr)
-{
-    return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
-}
-
-static inline void *rt_kmem_v2p(void *vaddr)
-{
-    MM_PGTBL_LOCK(&rt_kernel_space);
-    void *paddr = _rt_kmem_v2p(vaddr);
-    MM_PGTBL_UNLOCK(&rt_kernel_space);
-    return paddr;
-}
-
 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                       enum rt_mmu_cntl cmd);
 

+ 2 - 2
libcpu/risc-v/virt64/backtrace.c

@@ -59,13 +59,13 @@ void rt_hw_backtrace(rt_uint32_t *ffp, rt_ubase_t sepc)
         }
 
         ra = fp - 1;
-        if (!_rt_kmem_v2p(ra) || *ra < vas || *ra > vae)
+        if (!rt_kmem_v2p(ra) || *ra < vas || *ra > vae)
             break;
 
         rt_kprintf(" %p", *ra - 0x04);
 
         fp = fp - 2;
-        if (!_rt_kmem_v2p(fp))
+        if (!rt_kmem_v2p(fp))
             break;
         fp = (rt_ubase_t *)(*fp);
         if (!fp)

+ 2 - 0
libcpu/risc-v/virt64/cache.h

@@ -45,6 +45,8 @@ ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local() {}
 #define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
 #define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
 
+#define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
+
 /** instruction barrier */
 void rt_hw_cpu_sync(void);
 

+ 5 - 2
libcpu/risc-v/virt64/mmu.c

@@ -17,6 +17,7 @@
 #define DBG_LVL DBG_INFO
 #include <rtdbg.h>
 
+#include <board.h>
 #include <cache.h>
 #include <mm_aspace.h>
 #include <mm_page.h>
@@ -43,7 +44,7 @@ rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
 
 void rt_hw_aspace_switch(rt_aspace_t aspace)
 {
-    uintptr_t page_table = (uintptr_t)_rt_kmem_v2p(aspace->page_table);
+    uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
     current_mmu_table = aspace->page_table;
 
     write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
@@ -136,7 +137,9 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
     // TODO trying with HUGEPAGE here
     while (npages--)
     {
+        MM_PGTBL_LOCK(aspace);
         ret = _map_one_page(aspace, v_addr, p_addr, attr);
+        MM_PGTBL_UNLOCK(aspace);
         if (ret != 0)
         {
             /* error, undo map */
@@ -444,7 +447,7 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
  * otherwise is a failure and no report will be
  * returned.
  *
- * @param mmu_info
+ * @param aspace
  * @param mdesc
  * @param desc_nr
  */

+ 0 - 13
libcpu/risc-v/virt64/mmu.h

@@ -67,19 +67,6 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
 void rt_hw_aspace_switch(rt_aspace_t aspace);
 void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
 
-static inline void *_rt_kmem_v2p(void *vaddr)
-{
-    return rt_hw_mmu_v2p(&rt_kernel_space, vaddr);
-}
-
-static inline void *rt_kmem_v2p(void *vaddr)
-{
-    MM_PGTBL_LOCK(&rt_kernel_space);
-    void *paddr = _rt_kmem_v2p(vaddr);
-    MM_PGTBL_UNLOCK(&rt_kernel_space);
-    return paddr;
-}
-
 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                       enum rt_mmu_cntl cmd);