Browse Source

[riscv64-virt] add virtio support;
virt64 libcpu mmu fix on v2p, ioremap, etc;
bsp transfer to new API;

wangxiaoyao 2 years ago
parent
commit
2020ebc76e

+ 47 - 8
bsp/qemu-virt64-riscv/.config

@@ -62,7 +62,7 @@ CONFIG_RT_USING_HEAP=y
 # Kernel Device Object
 #
 CONFIG_RT_USING_DEVICE=y
-# CONFIG_RT_USING_DEVICE_OPS is not set
+CONFIG_RT_USING_DEVICE_OPS=y
 # CONFIG_RT_USING_DM is not set
 # CONFIG_RT_USING_INTERRUPT_INFO is not set
 CONFIG_RT_USING_CONSOLE=y
@@ -76,7 +76,7 @@ CONFIG_RT_USING_CACHE=y
 # CONFIG_ARCH_CPU_STACK_GROWS_UPWARD is not set
 CONFIG_ARCH_MM_MMU=y
 CONFIG_RT_USING_USERSPACE=y
-CONFIG_KERNEL_VADDR_START=0x150000000
+CONFIG_KERNEL_VADDR_START=0x80000000
 CONFIG_PV_OFFSET=0
 CONFIG_ARCH_RISCV=y
 CONFIG_ARCH_RISCV64=y
@@ -117,11 +117,27 @@ CONFIG_FINSH_ARG_MAX=10
 #
 CONFIG_RT_USING_DFS=y
 CONFIG_DFS_USING_WORKDIR=y
-CONFIG_DFS_FILESYSTEMS_MAX=2
-CONFIG_DFS_FILESYSTEM_TYPES_MAX=2
+CONFIG_DFS_FILESYSTEMS_MAX=4
+CONFIG_DFS_FILESYSTEM_TYPES_MAX=4
 CONFIG_DFS_FD_MAX=32
 # CONFIG_RT_USING_DFS_MNTTABLE is not set
-# CONFIG_RT_USING_DFS_ELMFAT is not set
+CONFIG_RT_USING_DFS_ELMFAT=y
+
+#
+# elm-chan's FatFs, Generic FAT Filesystem Module
+#
+CONFIG_RT_DFS_ELM_CODE_PAGE=437
+CONFIG_RT_DFS_ELM_WORD_ACCESS=y
+# CONFIG_RT_DFS_ELM_USE_LFN_0 is not set
+# CONFIG_RT_DFS_ELM_USE_LFN_1 is not set
+# CONFIG_RT_DFS_ELM_USE_LFN_2 is not set
+CONFIG_RT_DFS_ELM_USE_LFN_3=y
+CONFIG_RT_DFS_ELM_USE_LFN=3
+CONFIG_RT_DFS_ELM_MAX_LFN=255
+CONFIG_RT_DFS_ELM_DRIVES=2
+CONFIG_RT_DFS_ELM_MAX_SECTOR_SIZE=512
+# CONFIG_RT_DFS_ELM_USE_ERASE is not set
+CONFIG_RT_DFS_ELM_REENTRANT=y
 CONFIG_RT_USING_DFS_DEVFS=y
 CONFIG_RT_USING_DFS_ROMFS=y
 # CONFIG_RT_USING_DFS_CROMFS is not set
@@ -171,7 +187,14 @@ CONFIG_RT_USING_RTC=y
 # CONFIG_RT_USING_PULSE_ENCODER is not set
 # CONFIG_RT_USING_INPUT_CAPTURE is not set
 # CONFIG_RT_USING_WIFI is not set
-# CONFIG_RT_USING_VIRTIO is not set
+CONFIG_RT_USING_VIRTIO=y
+CONFIG_RT_USING_VIRTIO10=y
+# CONFIG_RT_USING_VIRTIO_MMIO_ALIGN is not set
+CONFIG_RT_USING_VIRTIO_BLK=y
+# CONFIG_RT_USING_VIRTIO_NET is not set
+# CONFIG_RT_USING_VIRTIO_CONSOLE is not set
+# CONFIG_RT_USING_VIRTIO_GPU is not set
+# CONFIG_RT_USING_VIRTIO_INPUT is not set
 
 #
 # Using USB
@@ -228,7 +251,9 @@ CONFIG_RT_USING_POSIX_CLOCKTIME=y
 #
 # CONFIG_RT_USING_RYM is not set
 # CONFIG_RT_USING_ULOG is not set
-# CONFIG_RT_USING_UTEST is not set
+CONFIG_RT_USING_UTEST=y
+CONFIG_UTEST_THR_STACK_SIZE=4096
+CONFIG_UTEST_THR_PRIORITY=20
 # CONFIG_RT_USING_RT_LINK is not set
 CONFIG_RT_USING_LWP=y
 CONFIG_RT_LWP_MAX_NR=30
@@ -494,6 +519,7 @@ CONFIG_RT_LWP_SHM_MAX_NR=64
 # CONFIG_PKG_USING_UC_COMMON is not set
 # CONFIG_PKG_USING_UC_MODBUS is not set
 # CONFIG_PKG_USING_RTDUINO is not set
+# CONFIG_PKG_USING_FREERTOS_WRAPPER is not set
 # CONFIG_PKG_USING_CAIRO is not set
 # CONFIG_PKG_USING_PIXMAN is not set
 # CONFIG_PKG_USING_PARTITION is not set
@@ -550,6 +576,11 @@ CONFIG_RT_LWP_SHM_MAX_NR=64
 # CONFIG_PKG_USING_NRF5X_SDK is not set
 # CONFIG_PKG_USING_NRFX is not set
 # CONFIG_PKG_USING_WM_LIBRARIES is not set
+
+#
+# Kendryte SDK
+#
+# CONFIG_PKG_USING_K210_SDK is not set
 # CONFIG_PKG_USING_KENDRYTE_SDK is not set
 # CONFIG_PKG_USING_INFRARED is not set
 # CONFIG_PKG_USING_MULTI_INFRARED is not set
@@ -611,6 +642,8 @@ CONFIG_RT_LWP_SHM_MAX_NR=64
 # CONFIG_PKG_USING_MB85RS16 is not set
 # CONFIG_PKG_USING_CW2015 is not set
 # CONFIG_PKG_USING_RFM300 is not set
+# CONFIG_PKG_USING_IO_INPUT_FILTER is not set
+# CONFIG_PKG_USING_RASPBERRYPI_PICO_SDK is not set
 
 #
 # AI packages
@@ -691,7 +724,13 @@ CONFIG_ENABLE_FPU=y
 # CONFIG_RT_USING_USERSPACE_32BIT_LIMIT is not set
 
 #
-# General Purpose UARTs
+# RISC-V QEMU virt64 configs
 #
+CONFIG_RISCV_S_MODE=y
+CONFIG_BSP_USING_VIRTIO_BLK=y
+# CONFIG_BSP_USING_VIRTIO_NET is not set
+# CONFIG_BSP_USING_VIRTIO_CONSOLE is not set
+# CONFIG_BSP_USING_VIRTIO_GPU is not set
+# CONFIG_BSP_USING_VIRTIO_INPUT is not set
 # CONFIG_BSP_USING_UART1 is not set
 CONFIG___STACKSIZE__=16384

+ 4 - 0
bsp/qemu-virt64-riscv/Kconfig

@@ -39,6 +39,10 @@ config RT_USING_USERSPACE_32BIT_LIMIT
     bool "Enable userspace 32bit limit"
     default n
 
+config RT_USING_VIRTIO_MMIO_ALIGN
+    bool "Open packed attribution, this may caused an error on virtio"
+    default n
+
 source "driver/Kconfig"
 
 config __STACKSIZE__

+ 9 - 1
bsp/qemu-virt64-riscv/applications/SConscript

@@ -6,4 +6,12 @@ CPPPATH = [cwd]
 
 group = DefineGroup('Applications', src, depend = [''], CPPPATH = CPPPATH)
 
-Return('group')
+objs = [group]
+
+list = os.listdir(cwd)
+
+for item in list:
+    if os.path.isfile(os.path.join(cwd, item, 'SConscript')):
+        objs = objs + SConscript(os.path.join(item, 'SConscript'))
+
+Return('objs')

+ 17 - 0
bsp/qemu-virt64-riscv/applications/test/SConscript

@@ -0,0 +1,17 @@
+from building import *
+
+cwd     = GetCurrentDir()
+src	= Glob('*.c') + Glob('*.cpp')
+CPPPATH = [cwd]
+
+group = DefineGroup('Testcase', src, depend = [''], CPPPATH = CPPPATH)
+
+list = os.listdir(cwd)
+
+objs = [group]
+
+for item in list:
+    if os.path.isfile(os.path.join(cwd, item, 'SConscript')):
+        objs = objs + SConscript(os.path.join(item, 'SConscript'))
+
+Return('objs')

+ 9 - 0
bsp/qemu-virt64-riscv/applications/test/test_mm/SConscript

@@ -0,0 +1,9 @@
+from building import *
+
+cwd     = GetCurrentDir()
+src	= Glob('*.c') + Glob('*.cpp')
+CPPPATH = [cwd]
+
+group = DefineGroup('Mmu', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 118 - 0
bsp/qemu-virt64-riscv/applications/test/test_mm/test_mm.c

@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+#include <rtthread.h>
+#include <rthw.h>
+
+#ifdef RT_USING_UTEST
+
+#include "riscv_mmu.h"
+#include "mmu.h"
+#include "utest.h"
+
+#define MAPPED 0
+#define UNMAPPED 1
+
+/**
+ * @brief Meta data to test V2P API, its structure:
+ * 
+ * {va_start, va_end(included), pa_start, [mapped/unmapped]}
+ */
+struct mem_desc test_mem_desc[] = {
+    // mapped region
+    {0x80000000, 0x80000000 + 0x10000000 - 1, 0x80000000, MAPPED},  // kernel ram region
+    {0x0, 0x80000000 - 1, 0x0, MAPPED},                             // MMIO region 1
+
+    // unmapped region
+    {0x100000000, 0x110000000 - 1, 0x100000000, UNMAPPED},          // region for IOREMAP
+};
+
+#define NUM_MEM_DESC (sizeof(test_mem_desc) / sizeof(test_mem_desc[0]))
+
+extern rt_mmu_info mmu_info;
+
+#define TEST_GRANULE_POWER 20
+#define TEST_GRANULE_SIZE (1 << TEST_GRANULE_POWER)
+
+// test board mem region
+static void test_v2p(void)
+{
+    struct mem_desc *desc = test_mem_desc;
+
+    // test on mapped region
+    for (size_t i = 0; i < NUM_MEM_DESC; i++, desc += 1)
+    {
+        size_t count = (desc->vaddr_end - desc->vaddr_start + 1) >> TEST_GRANULE_POWER;
+        void *vstart = (void *)desc->vaddr_start;
+        void *pstart = (void *)desc->paddr_start;
+        LOG_I("v2p test on VA region [%016lx-%016lx]", vstart, desc->vaddr_end + 1);
+
+        int err_flag = 0;
+        for (size_t j = 0; j < count; j++, vstart += TEST_GRANULE_SIZE, pstart += TEST_GRANULE_SIZE)
+        {
+            void *pa = rt_hw_mmu_v2p(&mmu_info, vstart);
+            if ((desc->attr == MAPPED && pa != pstart) ||
+                (desc->attr == UNMAPPED && pa != 0))
+                err_flag = 1;
+        }
+        uassert_true(err_flag == 0);
+    }
+}
+
+// TODO use arrary for test region
+#define MAP_PA ((void *)0xe0001000)
+
+// test va recollection after unmap
+static void test_find_vaddr_recol(void)
+{
+    void *map;
+    map = rt_hw_mmu_map(&mmu_info, RT_NULL, MAP_PA, 4096, MMU_MAP_K_RWCB);
+    rt_hw_mmu_unmap(&mmu_info, map, 4096);
+    void *remap;
+    remap = rt_hw_mmu_map(&mmu_info, RT_NULL, MAP_PA, 4096, MMU_MAP_K_RWCB);
+    rt_hw_mmu_unmap(&mmu_info, map, 4096);
+
+    uassert_true(map == remap);
+}
+
+// find vaddr should return a valid VA for rt_hw_mmu_map
+static void test_find_vaddr_valid(void)
+{
+    size_t map;
+    map = (size_t)rt_hw_mmu_map(&mmu_info, RT_NULL, MAP_PA, 4096, MMU_MAP_K_RWCB);
+    rt_hw_mmu_unmap(&mmu_info, (void*)map, 4096);
+    LOG_I("Mapped pa %p to %p, %p", MAP_PA, map, mmu_info.vend);
+    uassert_true((map >= (mmu_info.vstart << 30)) && (map <= (((mmu_info.vend - mmu_info.vstart) ? mmu_info.vend : mmu_info.vend + 1) << 30)));
+}
+
+// ioremap functionality
+static void test_ioremap(void)
+{
+
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(test_v2p);
+    UTEST_UNIT_RUN(test_find_vaddr_recol);
+    UTEST_UNIT_RUN(test_find_vaddr_valid);
+    UTEST_UNIT_RUN(test_ioremap);
+}
+
+UTEST_TC_EXPORT(testcase, "testcases.libcpu.mmu", utest_tc_init, utest_tc_cleanup, 10);
+#endif /* RT_USING_UTEST */

+ 33 - 3
bsp/qemu-virt64-riscv/driver/Kconfig

@@ -1,6 +1,38 @@
+menu "RISC-V QEMU virt64 configs"
 
+config RISCV_S_MODE
+    bool "RT-Thread run in RISC-V S-Mode(supervisor mode)"
+    default y
 
-menu "General Purpose UARTs"
+config RT_USING_VIRTIO
+    bool "Using VirtIO"
+    default y
+    depends on RT_USING_DEVICE_OPS
+
+config BSP_USING_VIRTIO_BLK
+    bool "Using VirtIO BLK"
+    default y
+    depends on RT_USING_VIRTIO
+
+config BSP_USING_VIRTIO_NET
+    bool "Using VirtIO NET"
+    default y
+    depends on RT_USING_VIRTIO
+
+config BSP_USING_VIRTIO_CONSOLE
+    bool "Using VirtIO Console"
+    default y
+    depends on RT_USING_VIRTIO
+
+config BSP_USING_VIRTIO_GPU
+    bool "Using VirtIO GPU"
+    default y
+    depends on RT_USING_VIRTIO
+
+config BSP_USING_VIRTIO_INPUT
+    bool "Using VirtIO Input"
+    default y
+    depends on RT_USING_VIRTIO
 
 menuconfig BSP_USING_UART1
     bool "Enable UART1"
@@ -13,6 +45,4 @@ menuconfig BSP_USING_UART1
             int "uart1 RXD pin number"
             default 21
     endif
-
 endmenu
-

+ 145 - 137
bsp/qemu-virt64-riscv/driver/board.c

@@ -24,19 +24,22 @@
 #include "stack.h"
 
 #ifdef RT_USING_USERSPACE
-    #include "riscv_mmu.h"
-    #include "mmu.h"
-    #include "page.h"
-    #include "lwp_arch.h"
+#include "riscv_mmu.h"
+#include "mmu.h"
+#include "page.h"
+#include "lwp_arch.h"
 
-    rt_region_t init_page_region =
-    {
-        (rt_size_t)RT_HW_PAGE_START,
-        (rt_size_t)RT_HW_PAGE_END
+rt_region_t init_page_region = {(rt_size_t)RT_HW_PAGE_START, (rt_size_t)RT_HW_PAGE_END};
+
+rt_mmu_info mmu_info;
+
+extern size_t MMUTable[];
+
+struct mem_desc platform_mem_desc[] = {
+    {KERNEL_VADDR_START, KERNEL_VADDR_START + 0x10000000 - 1, KERNEL_VADDR_START + PV_OFFSET, NORMAL_MEM},
     };
 
-    volatile rt_size_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
-    rt_mmu_info mmu_info;
+#define NUM_MEM_DESC (sizeof(platform_mem_desc) / sizeof(platform_mem_desc[0]))
 
 #endif
 
@@ -61,42 +64,45 @@ void primary_cpu_entry(void)
     entry();
 }
 
-void rt_hw_interrupt_init()
-{
-    /* Enable machine external interrupts. */
-    set_csr(sie, SIP_SEIP);
-}
+#define IOREMAP_SIZE (1ul << 30)
 
 void rt_hw_board_init(void)
 {
-    /* initalize interrupt */
-    rt_hw_interrupt_init();
-    /* initialize hardware interrupt */
+#ifdef RT_USING_USERSPACE
+    rt_page_init(init_page_region);
+    /* init mmu_info structure */
+    rt_hw_mmu_map_init(&mmu_info, (void *)(USER_VADDR_START - IOREMAP_SIZE), IOREMAP_SIZE, (rt_size_t *)MMUTable, 0);
+    // this API is reserved currently since PLIC etc had not been porting completely to MMU version
+    rt_hw_mmu_kernel_map_init(&mmu_info, 0x00000000UL, 0x80000000);
+    /* setup region, and enable MMU */
+    rt_hw_mmu_setup(&mmu_info, platform_mem_desc, NUM_MEM_DESC);
+
+#endif
+
+#ifdef RT_USING_HEAP
+    /* initialize memory system */
+    rt_system_heap_init(RT_HW_HEAP_BEGIN, RT_HW_HEAP_END);
+#endif
+
     rt_hw_uart_init();
 
+#ifdef RT_USING_CONSOLE
+    /* set console device */
+    rt_console_set_device("uart");
+
+#ifdef RT_USING_HEAP
+    rt_kprintf("heap: [0x%08x - 0x%08x]\n", (rt_ubase_t)RT_HW_HEAP_BEGIN, (rt_ubase_t)RT_HW_HEAP_END);
+#endif
+
+    rt_hw_interrupt_init();
+
     rt_hw_tick_init();
 
-    #ifdef RT_USING_HEAP
-        rt_kprintf("heap: [0x%08x - 0x%08x]\n", (rt_ubase_t) RT_HW_HEAP_BEGIN, (rt_ubase_t) RT_HW_HEAP_END);
-        /* initialize memory system */
-        rt_system_heap_init(RT_HW_HEAP_BEGIN, RT_HW_HEAP_END);
-    #endif
-
-    #ifdef RT_USING_CONSOLE
-        /* set console device */
-        rt_console_set_device("uart");
-    #endif /* RT_USING_CONSOLE */
- 
-    #ifdef RT_USING_COMPONENTS_INIT
-        rt_components_board_init();
-    #endif
-
-    #ifdef RT_USING_USERSPACE
-        rt_page_init(init_page_region);
-        rt_hw_mmu_map_init(&mmu_info,(void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, (rt_size_t *)MMUTable, 0);
-        rt_hw_mmu_kernel_map_init(&mmu_info, 0x00000000UL, USER_VADDR_START - 1);
-        switch_mmu((void *)MMUTable);
-    #endif
+#endif /* RT_USING_CONSOLE */
+
+#ifdef RT_USING_COMPONENTS_INIT
+    rt_components_board_init();
+#endif
 
     plic_init();
 }
@@ -105,7 +111,8 @@ void rt_hw_cpu_reset(void)
 {
     sbi_shutdown();
 
-    while(1);
+    while (1)
+        ;
 }
 MSH_CMD_EXPORT_ALIAS(rt_hw_cpu_reset, reboot, reset machine);
 
@@ -114,116 +121,116 @@ void dump_regs(struct rt_hw_stack_frame *regs)
     rt_kprintf("--------------Dump Registers-----------------\n");
 
     rt_kprintf("Function Registers:\n");
-    rt_kprintf("\tra(x1) = 0x%p\tuser_sp = 0x%p\n",regs -> ra,regs -> user_sp_exc_stack);
-    rt_kprintf("\tgp(x3) = 0x%p\ttp(x4) = 0x%p\n",regs -> gp,regs -> tp);
+    rt_kprintf("\tra(x1) = 0x%p\tuser_sp = 0x%p\n", regs->ra, regs->user_sp_exc_stack);
+    rt_kprintf("\tgp(x3) = 0x%p\ttp(x4) = 0x%p\n", regs->gp, regs->tp);
     rt_kprintf("Temporary Registers:\n");
-    rt_kprintf("\tt0(x5) = 0x%p\tt1(x6) = 0x%p\n",regs -> t0,regs -> t1);
-    rt_kprintf("\tt2(x7) = 0x%p\n",regs -> t2);
-    rt_kprintf("\tt3(x28) = 0x%p\tt4(x29) = 0x%p\n",regs -> t3,regs -> t4);
-    rt_kprintf("\tt5(x30) = 0x%p\tt6(x31) = 0x%p\n",regs -> t5,regs -> t6);
+    rt_kprintf("\tt0(x5) = 0x%p\tt1(x6) = 0x%p\n", regs->t0, regs->t1);
+    rt_kprintf("\tt2(x7) = 0x%p\n", regs->t2);
+    rt_kprintf("\tt3(x28) = 0x%p\tt4(x29) = 0x%p\n", regs->t3, regs->t4);
+    rt_kprintf("\tt5(x30) = 0x%p\tt6(x31) = 0x%p\n", regs->t5, regs->t6);
     rt_kprintf("Saved Registers:\n");
-    rt_kprintf("\ts0/fp(x8) = 0x%p\ts1(x9) = 0x%p\n",regs -> s0_fp,regs -> s1);
-    rt_kprintf("\ts2(x18) = 0x%p\ts3(x19) = 0x%p\n",regs -> s2,regs -> s3);
-    rt_kprintf("\ts4(x20) = 0x%p\ts5(x21) = 0x%p\n",regs -> s4,regs -> s5);
-    rt_kprintf("\ts6(x22) = 0x%p\ts7(x23) = 0x%p\n",regs -> s6,regs -> s7);
-    rt_kprintf("\ts8(x24) = 0x%p\ts9(x25) = 0x%p\n",regs -> s8,regs -> s9);
-    rt_kprintf("\ts10(x26) = 0x%p\ts11(x27) = 0x%p\n",regs -> s10,regs -> s11);
+    rt_kprintf("\ts0/fp(x8) = 0x%p\ts1(x9) = 0x%p\n", regs->s0_fp, regs->s1);
+    rt_kprintf("\ts2(x18) = 0x%p\ts3(x19) = 0x%p\n", regs->s2, regs->s3);
+    rt_kprintf("\ts4(x20) = 0x%p\ts5(x21) = 0x%p\n", regs->s4, regs->s5);
+    rt_kprintf("\ts6(x22) = 0x%p\ts7(x23) = 0x%p\n", regs->s6, regs->s7);
+    rt_kprintf("\ts8(x24) = 0x%p\ts9(x25) = 0x%p\n", regs->s8, regs->s9);
+    rt_kprintf("\ts10(x26) = 0x%p\ts11(x27) = 0x%p\n", regs->s10, regs->s11);
     rt_kprintf("Function Arguments Registers:\n");
-    rt_kprintf("\ta0(x10) = 0x%p\ta1(x11) = 0x%p\n",regs -> a0,regs -> a1);
-    rt_kprintf("\ta2(x12) = 0x%p\ta3(x13) = 0x%p\n",regs -> a2,regs -> a3);
-    rt_kprintf("\ta4(x14) = 0x%p\ta5(x15) = 0x%p\n",regs -> a4,regs -> a5);
-    rt_kprintf("\ta6(x16) = 0x%p\ta7(x17) = 0x%p\n",regs -> a6,regs -> a7);
-    rt_kprintf("sstatus = 0x%p\n",regs -> sstatus);
-    rt_kprintf("\t%s\n",(regs -> sstatus & SSTATUS_SIE) ? "Supervisor Interrupt Enabled" : "Supervisor Interrupt Disabled");
-    rt_kprintf("\t%s\n",(regs -> sstatus & SSTATUS_SPIE) ? "Last Time Supervisor Interrupt Enabled" : "Last Time Supervisor Interrupt Disabled");
-    rt_kprintf("\t%s\n",(regs -> sstatus & SSTATUS_SPP) ? "Last Privilege is Supervisor Mode" : "Last Privilege is User Mode");
-    rt_kprintf("\t%s\n",(regs -> sstatus & SSTATUS_PUM) ? "Permit to Access User Page" : "Not Permit to Access User Page");
-    rt_kprintf("\t%s\n",(regs -> sstatus & (1 << 19)) ? "Permit to Read Executable-only Page" : "Not Permit to Read Executable-only Page");
+    rt_kprintf("\ta0(x10) = 0x%p\ta1(x11) = 0x%p\n", regs->a0, regs->a1);
+    rt_kprintf("\ta2(x12) = 0x%p\ta3(x13) = 0x%p\n", regs->a2, regs->a3);
+    rt_kprintf("\ta4(x14) = 0x%p\ta5(x15) = 0x%p\n", regs->a4, regs->a5);
+    rt_kprintf("\ta6(x16) = 0x%p\ta7(x17) = 0x%p\n", regs->a6, regs->a7);
+    rt_kprintf("sstatus = 0x%p\n", regs->sstatus);
+    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SIE) ? "Supervisor Interrupt Enabled" : "Supervisor Interrupt Disabled");
+    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPIE) ? "Last Time Supervisor Interrupt Enabled" : "Last Time Supervisor Interrupt Disabled");
+    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPP) ? "Last Privilege is Supervisor Mode" : "Last Privilege is User Mode");
+    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_PUM) ? "Permit to Access User Page" : "Not Permit to Access User Page");
+    rt_kprintf("\t%s\n", (regs->sstatus & (1 << 19)) ? "Permit to Read Executable-only Page" : "Not Permit to Read Executable-only Page");
     rt_size_t satp_v = read_csr(satp);
-    rt_kprintf("satp = 0x%p\n",satp_v);
-    rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n",__MASKVALUE(satp_v,__MASK(44)) << PAGE_OFFSET_BIT);
-    rt_kprintf("\tCurrent ASID = 0x%p\n",__MASKVALUE(satp_v >> 44,__MASK(16)) << PAGE_OFFSET_BIT);
+    rt_kprintf("satp = 0x%p\n", satp_v);
+    rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n", __MASKVALUE(satp_v, __MASK(44)) << PAGE_OFFSET_BIT);
+    rt_kprintf("\tCurrent ASID = 0x%p\n", __MASKVALUE(satp_v >> 44, __MASK(16)) << PAGE_OFFSET_BIT);
     const char *mode_str = "Unknown Address Translation/Protection Mode";
-    
-    switch(__MASKVALUE(satp_v >> 60,__MASK(4)))
+
+    switch (__MASKVALUE(satp_v >> 60, __MASK(4)))
     {
-        case 0:
-            mode_str = "No Address Translation/Protection Mode";
-            break;
+    case 0:
+        mode_str = "No Address Translation/Protection Mode";
+        break;
 
-        case 8:
-            mode_str = "Page-based 39-bit Virtual Addressing Mode";
-            break;
+    case 8:
+        mode_str = "Page-based 39-bit Virtual Addressing Mode";
+        break;
 
-        case 9:
-            mode_str = "Page-based 48-bit Virtual Addressing Mode";
-            break;
+    case 9:
+        mode_str = "Page-based 48-bit Virtual Addressing Mode";
+        break;
     }
 
-    rt_kprintf("\tMode = %s\n",mode_str);
+    rt_kprintf("\tMode = %s\n", mode_str);
     rt_kprintf("-----------------Dump OK---------------------\n");
 }
 
-static const char *Exception_Name[] = 
-                                {
-                                    "Instruction Address Misaligned",
-                                    "Instruction Access Fault",
-                                    "Illegal Instruction",
-                                    "Breakpoint",
-                                    "Load Address Misaligned",
-                                    "Load Access Fault",
-                                    "Store/AMO Address Misaligned",
-                                    "Store/AMO Access Fault",
-                                    "Environment call from U-mode",
-                                    "Environment call from S-mode",
-                                    "Reserved-10",
-                                    "Reserved-11",
-                                    "Instruction Page Fault",
-                                    "Load Page Fault",
-                                    "Reserved-14",
-                                    "Store/AMO Page Fault"
-                                };
-
-static const char *Interrupt_Name[] = 
-                                {
-                                    "User Software Interrupt",
-                                    "Supervisor Software Interrupt",
-                                    "Reversed-2",
-                                    "Reversed-3",
-                                    "User Timer Interrupt",
-                                    "Supervisor Timer Interrupt",
-                                    "Reversed-6",
-                                    "Reversed-7",
-                                    "User External Interrupt",
-                                    "Supervisor External Interrupt",
-                                    "Reserved-10",
-                                    "Reserved-11",
-                                };
-
-void handle_trap(rt_size_t scause,rt_size_t stval,rt_size_t sepc,struct rt_hw_stack_frame *sp)
+static const char *Exception_Name[] =
+    {
+        "Instruction Address Misaligned",
+        "Instruction Access Fault",
+        "Illegal Instruction",
+        "Breakpoint",
+        "Load Address Misaligned",
+        "Load Access Fault",
+        "Store/AMO Address Misaligned",
+        "Store/AMO Access Fault",
+        "Environment call from U-mode",
+        "Environment call from S-mode",
+        "Reserved-10",
+        "Reserved-11",
+        "Instruction Page Fault",
+        "Load Page Fault",
+        "Reserved-14",
+        "Store/AMO Page Fault"};
+
+static const char *Interrupt_Name[] =
+    {
+        "User Software Interrupt",
+        "Supervisor Software Interrupt",
+        "Reversed-2",
+        "Reversed-3",
+        "User Timer Interrupt",
+        "Supervisor Timer Interrupt",
+        "Reversed-6",
+        "Reversed-7",
+        "User External Interrupt",
+        "Supervisor External Interrupt",
+        "Reserved-10",
+        "Reserved-11",
+};
+
+extern struct rt_irq_desc irq_desc[];
+
+void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw_stack_frame *sp)
 {
     // rt_kprintf(".");
-    if(scause == (uint64_t)(0x8000000000000005))
+    if (scause == (uint64_t)(0x8000000000000005))
     {
         rt_interrupt_enter();
         tick_isr();
         rt_interrupt_leave();
     }
-    /*else if(scause == (uint64_t)(0x8000000000000009))
+    else if (scause == (uint64_t)(0x8000000000000009))
     {
-        rt_kprintf("a\n");
-        while(1);
-        extern struct rt_serial_device  serial1;
-        rt_hw_serial_isr(&serial1,RT_SERIAL_EVENT_RX_IND);
-    }*/
+        int plic_irq = plic_claim();
+        plic_complete(plic_irq);
+        irq_desc[plic_irq].handler(plic_irq, irq_desc[plic_irq].param);
+    }
     else
     {
-        rt_size_t id = __MASKVALUE(scause,__MASK(63UL));
+        rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
         const char *msg;
 
-        if(scause >> 63)
+        if (scause >> 63)
         {
-            if(id < sizeof(Interrupt_Name) / sizeof(const char *))
+            if (id < sizeof(Interrupt_Name) / sizeof(const char *))
             {
                 msg = Interrupt_Name[id];
             }
@@ -232,19 +239,19 @@ void handle_trap(rt_size_t scause,rt_size_t stval,rt_size_t sepc,struct rt_hw_st
                 msg = "Unknown Interrupt";
             }
 
-            rt_kprintf("Unhandled Interrupt %ld:%s\n",id,msg);
+            rt_kprintf("Unhandled Interrupt %ld:%s\n", id, msg);
         }
         else
         {
-            #ifdef RT_USING_USERSPACE
-                if(id == 15)
-                {
-                    arch_expand_user_stack((void *)stval);
-                    return;
-                }
-            #endif
-
-            if(id < sizeof(Exception_Name) / sizeof(const char *))
+#ifdef RT_USING_USERSPACE
+            if (id == 15)
+            {
+                arch_expand_user_stack((void *)stval);
+                return;
+            }
+#endif
+
+            if (id < sizeof(Exception_Name) / sizeof(const char *))
             {
                 msg = Exception_Name[id];
             }
@@ -253,11 +260,12 @@ void handle_trap(rt_size_t scause,rt_size_t stval,rt_size_t sepc,struct rt_hw_st
                 msg = "Unknown Exception";
             }
 
-            rt_kprintf("Unhandled Exception %ld:%s\n",id,msg);
+            rt_kprintf("Unhandled Exception %ld:%s\n", id, msg);
         }
 
-        rt_kprintf("scause:0x%p,stval:0x%p,sepc:0x%p\n",scause,stval,sepc);
+        rt_kprintf("scause:0x%p,stval:0x%p,sepc:0x%p\n", scause, stval, sepc);
         dump_regs(sp);
-        while(1);
+        while (1)
+            ;
     }
 }

+ 2 - 1
bsp/qemu-virt64-riscv/driver/drv_uart.c

@@ -12,6 +12,7 @@
 #include "drv_uart.h"
 
 #include <stdio.h>
+#include <ioremap.h>
 #include "sbi.h"
 
 #define UART_DEFAULT_BAUDRATE               115200
@@ -165,7 +166,7 @@ int rt_hw_uart_init(void)
         serial->config           = config;
         serial->config.baud_rate = UART_DEFAULT_BAUDRATE;
 
-        uart->hw_base   = 0x10000000;
+        uart->hw_base   = (rt_size_t)rt_ioremap((void*)0x10000000, 4096);
         uart->irqno     = 0xa;
 
         rt_hw_serial_register(serial,

+ 101 - 0
bsp/qemu-virt64-riscv/driver/drv_virtio.c

@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2021-11-11     GuEe-GUI     the first version
+ */
+
+#include <rtthread.h>
+#include <virt.h>
+
+#ifdef RT_USING_VIRTIO
+
+#include <virtio.h>
+#ifdef BSP_USING_VIRTIO_BLK
+#include <virtio_blk.h>
+#endif
+#ifdef BSP_USING_VIRTIO_NET
+#include <virtio_net.h>
+#endif
+#ifdef BSP_USING_VIRTIO_CONSOLE
+#include <virtio_console.h>
+#endif
+#ifdef BSP_USING_VIRTIO_GPU
+#include <virtio_gpu.h>
+#endif
+#ifdef BSP_USING_VIRTIO_INPUT
+#include <virtio_input.h>
+#endif
+
+#include <board.h>
+
+static virtio_device_init_handler virtio_device_init_handlers[] =
+{
+#ifdef BSP_USING_VIRTIO_BLK
+    [VIRTIO_DEVICE_ID_BLOCK]    = rt_virtio_blk_init,
+#endif
+#ifdef BSP_USING_VIRTIO_NET
+    [VIRTIO_DEVICE_ID_NET]      = rt_virtio_net_init,
+#endif
+#ifdef BSP_USING_VIRTIO_CONSOLE
+    [VIRTIO_DEVICE_ID_CONSOLE]  = rt_virtio_console_init,
+#endif
+#ifdef BSP_USING_VIRTIO_GPU
+    [VIRTIO_DEVICE_ID_GPU]      = rt_virtio_gpu_init,
+#endif
+#ifdef BSP_USING_VIRTIO_INPUT
+    [VIRTIO_DEVICE_ID_INPUT]    = rt_virtio_input_init,
+#endif
+    [VIRTIO_DEVICE_TYPE_SIZE]   = RT_NULL
+};
+
+int rt_virtio_devices_init(void)
+{
+    int i;
+    rt_uint32_t irq = VIRTIO_IRQ_BASE;
+    rt_ubase_t mmio_base = VIRTIO_MMIO_BASE;
+    struct virtio_mmio_config *mmio_config;
+    virtio_device_init_handler init_handler;
+
+    if (sizeof(virtio_device_init_handlers) == 0)
+    {
+        /* The compiler will optimize the codes after here. */
+        return 0;
+    }
+
+#ifdef RT_USING_LWP
+    mmio_base = (rt_ubase_t)rt_ioremap((void *)mmio_base, VIRTIO_MMIO_SIZE * VIRTIO_MAX_NR);
+
+    if (mmio_base == RT_NULL)
+    {
+        return -RT_ERROR;
+    }
+#endif
+
+    for (i = 0; i < VIRTIO_MAX_NR; ++i, ++irq, mmio_base += VIRTIO_MMIO_SIZE)
+    {
+        mmio_config = (struct virtio_mmio_config *)mmio_base;
+
+        if (mmio_config->magic != VIRTIO_MAGIC_VALUE ||
+            mmio_config->version != RT_USING_VIRTIO_VERSION ||
+            mmio_config->vendor_id != VIRTIO_VENDOR_ID)
+        {
+            continue;
+        }
+
+        init_handler = virtio_device_init_handlers[mmio_config->device_id];
+
+        if (init_handler != RT_NULL)
+        {
+            init_handler((rt_ubase_t *)mmio_base, irq);
+        }
+    }
+    rt_kprintf("rt_virtio_devices_init done!\n");
+
+    return 0;
+}
+INIT_DEVICE_EXPORT(rt_virtio_devices_init);
+#endif  /* RT_USING_VIRTIO */

+ 16 - 0
bsp/qemu-virt64-riscv/driver/drv_virtio.h

@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2021-11-11     GuEe-GUI     the first version
+ */
+
+#ifndef __DRV_VIRTIO_H__
+#define __DRV_VIRTIO_H__
+
+int rt_virtio_devices_init(void);
+
+#endif /* __DRV_VIRTIO_H__ */

+ 31 - 0
bsp/qemu-virt64-riscv/driver/virt.h

@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2021-02-17     GuEe-GUI     the first version
+ */
+
+#ifndef VIRT_H__
+#define VIRT_H__
+
+#include <rtdef.h>
+
+#ifdef RT_USING_LWP
+#include <mmu.h>
+#include <ioremap.h>
+
+extern rt_mmu_info mmu_info;
+#endif
+
+/* VirtIO */
+#define VIRTIO_MMIO_BASE    0x10001000
+#define VIRTIO_MMIO_SIZE    0x00001000
+#define VIRTIO_MAX_NR       8
+#define VIRTIO_IRQ_BASE     1
+#define VIRTIO_VENDOR_ID    0x554d4551  /* "QEMU" */
+
+#define MAX_HANDLERS        128
+#endif

+ 7 - 1
bsp/qemu-virt64-riscv/qemu-nographic.sh

@@ -1 +1,7 @@
-qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin -bios default 
+if [ ! -f "sd.bin" ]; then
+dd if=/dev/zero of=sd.bin bs=1024 count=65536
+fi
+
+qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin \
+-drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \
+-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0

+ 28 - 4
bsp/qemu-virt64-riscv/rtconfig.h

@@ -39,6 +39,7 @@
 /* Kernel Device Object */
 
 #define RT_USING_DEVICE
+#define RT_USING_DEVICE_OPS
 #define RT_USING_CONSOLE
 #define RT_CONSOLEBUF_SIZE 256
 #define RT_CONSOLE_DEVICE_NAME "uart"
@@ -47,7 +48,7 @@
 #define RT_USING_CACHE
 #define ARCH_MM_MMU
 #define RT_USING_USERSPACE
-#define KERNEL_VADDR_START 0x150000000
+#define KERNEL_VADDR_START 0x80000000
 #define PV_OFFSET 0
 #define ARCH_RISCV
 #define ARCH_RISCV64
@@ -81,9 +82,21 @@
 
 #define RT_USING_DFS
 #define DFS_USING_WORKDIR
-#define DFS_FILESYSTEMS_MAX 2
-#define DFS_FILESYSTEM_TYPES_MAX 2
+#define DFS_FILESYSTEMS_MAX 4
+#define DFS_FILESYSTEM_TYPES_MAX 4
 #define DFS_FD_MAX 32
+#define RT_USING_DFS_ELMFAT
+
+/* elm-chan's FatFs, Generic FAT Filesystem Module */
+
+#define RT_DFS_ELM_CODE_PAGE 437
+#define RT_DFS_ELM_WORD_ACCESS
+#define RT_DFS_ELM_USE_LFN_3
+#define RT_DFS_ELM_USE_LFN 3
+#define RT_DFS_ELM_MAX_LFN 255
+#define RT_DFS_ELM_DRIVES 2
+#define RT_DFS_ELM_MAX_SECTOR_SIZE 512
+#define RT_DFS_ELM_REENTRANT
 #define RT_USING_DFS_DEVFS
 #define RT_USING_DFS_ROMFS
 
@@ -101,6 +114,9 @@
 #define RT_USING_ZERO
 #define RT_USING_RANDOM
 #define RT_USING_RTC
+#define RT_USING_VIRTIO
+#define RT_USING_VIRTIO10
+#define RT_USING_VIRTIO_BLK
 
 /* Using USB */
 
@@ -131,6 +147,9 @@
 
 /* Utilities */
 
+#define RT_USING_UTEST
+#define UTEST_THR_STACK_SIZE 4096
+#define UTEST_THR_PRIORITY 20
 #define RT_USING_LWP
 #define RT_LWP_MAX_NR 30
 #define LWP_TASK_STACK_SIZE 16384
@@ -197,6 +216,9 @@
 /* peripheral libraries and drivers */
 
 
+/* Kendryte SDK */
+
+
 /* AI packages */
 
 
@@ -212,8 +234,10 @@
 #define BOARD_virt
 #define ENABLE_FPU
 
-/* General Purpose UARTs */
+/* RISC-V QEMU virt64 configs */
 
+#define RISCV_S_MODE
+#define BSP_USING_VIRTIO_BLK
 #define __STACKSIZE__ 16384
 
 #endif

+ 4 - 0
components/drivers/virtio/virtio_net.h

@@ -11,6 +11,8 @@
 #ifndef __VIRTIO_NET_H__
 #define __VIRTIO_NET_H__
 
+#ifdef RT_USING_VIRTIO_NET
+
 #include <rtdef.h>
 #include <netif/ethernetif.h>
 
@@ -112,4 +114,6 @@ struct virtio_net_device
 
 rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq);
 
+#endif /* RT_USING_VIRTIO_NET */
+
 #endif /* __VIRTIO_NET_H__ */

+ 7 - 0
components/lwp/arch/risc-v/rv64/lwp_arch.h

@@ -11,6 +11,7 @@
 #define  LWP_ARCH_H__
 
 #include <lwp.h>
+#include <riscv_mmu.h>
 
 #ifdef RT_USING_USERSPACE
 
@@ -34,8 +35,14 @@
 #define LDSO_LOAD_VADDR     0x200000000
 #endif
 
+/* this attribution is cpu specified, and it should be defined in riscv_mmu.h */
+#ifndef MMU_MAP_U_RWCB
 #define MMU_MAP_U_RWCB 0
+#endif
+
+#ifndef MMU_MAP_U_RW
 #define MMU_MAP_U_RW 0
+#endif
 
 #ifdef __cplusplus
 extern "C" {

+ 95 - 0
libcpu/risc-v/virt64/interrupt.c

@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2018/10/01     Bernard      The first version
+ * 2018/12/27     Jesven       Change irq enable/disable to cpu0
+ */
+#include <plic.h>
+#include <mmu.h>
+#include <lwp_arch.h>
+#include "tick.h"
+#include "encoding.h"
+#include "riscv.h"
+#include "interrupt.h"
+
+struct rt_irq_desc irq_desc[MAX_HANDLERS];
+
+static rt_isr_handler_t rt_hw_interrupt_handle(rt_uint32_t vector, void *param)
+{
+    rt_kprintf("UN-handled interrupt %d occurred!!!\n", vector);
+    return RT_NULL;
+}
+
+int rt_hw_plic_irq_enable(int irq_number)
+{
+    plic_irq_enable(irq_number);
+    return 0;
+}
+
+int rt_hw_plic_irq_disable(int irq_number)
+{
+    plic_irq_disable(irq_number);
+    return 0;
+}
+
+/**
+ * This function will un-mask a interrupt.
+ * @param vector the interrupt number
+ */
+void rt_hw_interrupt_umask(int vector)
+{
+    plic_set_priority(vector, 1);
+
+    rt_hw_plic_irq_enable(vector);
+}
+
+/**
+ * This function will install a interrupt service routine to a interrupt.
+ * @param vector the interrupt number
+ * @param new_handler the interrupt service routine to be installed
+ * @param old_handler the old interrupt service routine
+ */
+rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
+        void *param, const char *name)
+{
+    rt_isr_handler_t old_handler = RT_NULL;
+
+    if(vector < MAX_HANDLERS)
+    {
+        old_handler = irq_desc[vector].handler;
+        if (handler != RT_NULL)
+        {
+            irq_desc[vector].handler = (rt_isr_handler_t)handler;
+            irq_desc[vector].param = param;
+#ifdef RT_USING_INTERRUPT_INFO
+            rt_snprintf(irq_desc[vector].name, RT_NAME_MAX - 1, "%s", name);
+            irq_desc[vector].counter = 0;
+#endif
+        }
+    }
+
+    return old_handler;
+}
+
+void rt_hw_interrupt_init()
+{
+    /* Enable machine external interrupts. */
+    // set_csr(sie, SIP_SEIP);
+    int idx = 0;
+    /* init exceptions table */
+    for (idx = 0; idx < MAX_HANDLERS; idx++)
+    {
+        irq_desc[idx].handler = (rt_isr_handler_t)rt_hw_interrupt_handle;
+        irq_desc[idx].param = RT_NULL;
+#ifdef RT_USING_INTERRUPT_INFO
+        rt_snprintf(irq_desc[idx].name, RT_NAME_MAX - 1, "default");
+        irq_desc[idx].counter = 0;
+#endif
+    }
+
+    plic_set_threshold(0);
+}

+ 29 - 0
libcpu/risc-v/virt64/interrupt.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2021-05-20     bigmagic      The first version
+ */
+
+#ifndef INTERRUPT_H__
+#define INTERRUPT_H__
+
+#define MAX_HANDLERS    128
+
+#include <rthw.h>
+#include "stack.h"
+
+// int rt_hw_clint_ipi_enable(void);
+// int rt_hw_clint_ipi_disable(void);
+int rt_hw_plic_irq_enable(int irq_number);
+int rt_hw_plic_irq_disable(int irq_number);
+void rt_hw_interrupt_init(void);
+void rt_hw_interrupt_mask(int vector);
+rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
+        void *param, const char *name);
+void handle_trap(rt_size_t xcause,rt_size_t xtval,rt_size_t xepc,struct rt_hw_stack_frame *sp);
+
+#endif

+ 226 - 227
libcpu/risc-v/virt64/mmu.c

@@ -11,21 +11,29 @@
 #include <rtthread.h>
 #include <rthw.h>
 #include <board.h>
-#include "page.h"
+#include <page.h>
 #include <stdlib.h>
 #include <string.h>
 
+#define DBG_TAG     "mmu"
+#define DBG_LVL     DBG_INFO
+#include <rtdbg.h>
+
+#include <string.h>
 #include "riscv.h"
 #include "riscv_mmu.h"
 #include "mmu.h"
+#include <string.h>
 
 void *current_mmu_table = RT_NULL;
 void rt_hw_cpu_icache_invalidate_all();
 void rt_hw_cpu_dcache_flush_all();
-void rt_hw_cpu_dcache_clean(void *addr,rt_size_t size);
+void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size);
 
 static rt_mutex_t mm_lock;
 
+volatile rt_ubase_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
+
 void rt_mm_lock(void)
 {
     if (rt_thread_self())
@@ -55,7 +63,7 @@ void rt_mm_unlock(void)
 static void rt_hw_cpu_tlb_invalidate()
 {
     rt_size_t satpv = read_csr(satp);
-    write_csr(satp,satpv);
+    write_csr(satp, satpv);
     mmu_flush_tlb();
 }
 
@@ -67,18 +75,18 @@ void *mmu_table_get()
 void switch_mmu(void *mmu_table)
 {
     current_mmu_table = mmu_table;
-    RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
+    RT_ASSERT(__CHECKALIGN(mmu_table, PAGE_OFFSET_BIT));
     mmu_set_pagetable((rt_ubase_t)mmu_table);
     rt_hw_cpu_dcache_flush_all();
     rt_hw_cpu_icache_invalidate_all();
 }
 
-int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off)
+int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, rt_size_t *vtable, rt_size_t pv_off)
 {
-    size_t l1_off,va_s,va_e;
+    size_t l1_off, va_s, va_e;
     rt_base_t level;
 
-    if((!mmu_info) || (!vtable))
+    if ((!mmu_info) || (!vtable))
     {
         return -1;
     }
@@ -86,192 +94,136 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_s
     va_s = (rt_size_t)v_address;
     va_e = ((rt_size_t)v_address) + size - 1;
 
-    if(va_e < va_s)
+    if (va_e < va_s)
     {
         return -1;
     }
 
-    //convert address to level 1 page frame id
+    // convert address to PPN2 index
     va_s = GET_L1(va_s);
     va_e = GET_L1(va_e);
 
-    if(va_s == 0)
+    if (va_s == 0)
     {
         return -1;
     }
 
     level = rt_hw_interrupt_disable();
 
-    //vtable initialization check
-    for(l1_off = va_s;l1_off <= va_e;l1_off++)
+    // vtable initialization check
+    for (l1_off = va_s; l1_off <= va_e; l1_off++)
     {
         size_t v = vtable[l1_off];
 
-        if(v)
+        if (v)
         {
             rt_hw_interrupt_enable(level);
             return 0;
         }
     }
 
-    mmu_info -> vtable = vtable;
-    mmu_info -> vstart = va_s;
-    mmu_info -> vend = va_e;
-    mmu_info -> pv_off = pv_off;
+    mmu_info->vtable = vtable;
+    mmu_info->vstart = va_s;
+    mmu_info->vend = va_e;
+    mmu_info->pv_off = pv_off;
 
     rt_hw_interrupt_enable(level);
     return 0;
 }
 
-void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size)
+void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info, rt_size_t vaddr_start, rt_size_t size)
 {
-    rt_size_t paddr_start = __UMASKVALUE(VPN_TO_PPN(vaddr_start,mmu_info -> pv_off),PAGE_OFFSET_MASK);
+    rt_size_t paddr_start = __UMASKVALUE(VPN_TO_PPN(vaddr_start, mmu_info->pv_off), PAGE_OFFSET_MASK);
     rt_size_t va_s = GET_L1(vaddr_start);
     rt_size_t va_e = GET_L1(vaddr_start + size - 1);
     rt_size_t i;
 
     for(i = va_s;i <= va_e;i++)
     {
-        mmu_info -> vtable[i] = COMBINEPTE(paddr_start,PAGE_ATTR_RWX | PTE_G | PTE_V);
+        mmu_info->vtable[i] = COMBINEPTE(paddr_start, PAGE_ATTR_RWX | PTE_G | PTE_V);
         paddr_start += L1_PAGE_SIZE;
     }
 
     rt_hw_cpu_tlb_invalidate();
 }
 
-//find a range of free virtual address specified by pages
-static rt_size_t find_vaddr(rt_mmu_info *mmu_info,rt_size_t pages)
+// find a range of free virtual address specified by pages
+
+
+static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
 {
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
-    rt_size_t find_l1 = 0,find_l2 = 0,find_l3 = 0;
-    rt_size_t n = 0;
+    size_t loop_pages;
+    size_t va;
+    size_t find_va = 0;
+    int n = 0;
+    size_t i;
 
-    if(!pages)
+    if (!pages || !mmu_info)
     {
         return 0;
     }
 
-    if(!mmu_info)
-    {
-        return 0;
-    }
+    loop_pages = mmu_info->vend - mmu_info->vstart + 1;
+    loop_pages <<= (ARCH_INDEX_WIDTH * 2);
+    va = mmu_info->vstart;
+    va <<= (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2);
 
-    for(l1_off = mmu_info -> vstart;l1_off <= mmu_info -> vend;l1_off++)
+    for (i = 0; i < loop_pages; i++, va += ARCH_PAGE_SIZE)
     {
-        mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
-
-        if(PTE_USED(*mmu_l1))
+        if (_rt_hw_mmu_v2p(mmu_info, (void *)va))
         {
-            RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
-            mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
-
-            for(l2_off = 0;l2_off < __SIZE(VPN1_BIT);l2_off++)
-            {
-                if(PTE_USED(*(mmu_l2 + l2_off)))
-                {
-                    RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
-                    mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
-
-                    for(l3_off = 0;l3_off < __SIZE(VPN0_BIT);l3_off++)
-                    {
-                        if(PTE_USED(*(mmu_l3 + l3_off)))
-                        {
-                            RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
-                            n = 0;//in use
-                        }
-                        else
-                        {
-                            if(!n)
-                            {
-                                find_l1 = l1_off;
-                                find_l2 = l2_off;
-                                find_l3 = l3_off;
-                            }
-
-                            n++;
-
-                            if(n >= pages)
-                            {
-                                return COMBINEVADDR(find_l1,find_l2,find_l3);
-                            }
-                        }
-                    }
-                }
-                else
-                {
-                    if(!n)
-                    {
-                        find_l1 = l1_off;
-                        find_l2 = l2_off;
-                        find_l3 = 0;
-                    }
-
-                    n += __SIZE(VPN0_BIT);
-
-                    if(n >= pages)
-                    {
-                        return COMBINEVADDR(find_l1,find_l2,find_l3);
-                    }
-                }
-            }
+            n = 0;
+            find_va = 0;
+            continue;
         }
-        else
+        if (!find_va)
         {
-            if(!n)
-            {
-                find_l1 = l1_off;
-                find_l2 = 0;
-                find_l3 = 0;
-            }
-
-            n += __SIZE(VPN1_BIT);
-
-            if(n >= pages)
-            {
-                return COMBINEVADDR(find_l1,find_l2,find_l3);
-            }
+            find_va = va;
+        }
+        n++;
+        if (n >= pages)
+        {
+            return find_va;
         }
     }
-
     return 0;
 }
 
-//check whether the range of virtual address are free
-static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
+// check whether the range of virtual address are free
+static int check_vaddr(rt_mmu_info *mmu_info, void *va, rt_size_t pages)
 {
-    rt_size_t loop_va = __UMASKVALUE((rt_size_t)va,PAGE_OFFSET_MASK);
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
+    rt_size_t loop_va = __UMASKVALUE((rt_size_t)va, PAGE_OFFSET_MASK);
+    rt_size_t l1_off, l2_off, l3_off;
+    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
 
-    if(!pages)
+    if (!pages)
     {
         return -1;
     }
 
-    if(!mmu_info)
+    if (!mmu_info)
     {
         return -1;
     }
 
-    while(pages--)
+    while (pages--)
     {
         l1_off = GET_L1(loop_va);
         l2_off = GET_L2(loop_va);
         l3_off = GET_L3(loop_va);
-        mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
+        mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
 
-        if(PTE_USED(*mmu_l1))
+        if (PTE_USED(*mmu_l1))
         {
             RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
-            mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off) + l2_off;
+            mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off) + l2_off;
 
-            if(PTE_USED(*mmu_l2))
+            if (PTE_USED(*mmu_l2))
             {
                 RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
-                mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off) + l3_off;
+                mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2), mmu_info->pv_off) + l3_off;
 
-                if(PTE_USED(*mmu_l3))
+                if (PTE_USED(*mmu_l3))
                 {
                     RT_ASSERT(PAGE_IS_LEAF(*mmu_l3));
                     return -1;
@@ -285,53 +237,58 @@ static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
     return 0;
 }
 
-static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages)
+// TODO pages ref_cnt problem
+static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t npages)
 {
-    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
+    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
+    rt_size_t l1_off, l2_off, l3_off;
+    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
     rt_size_t *ref_cnt;
 
     RT_ASSERT(mmu_info);
 
-    while(npages--)
+    while (npages--)
     {
         l1_off = (rt_size_t)GET_L1(loop_va);
-        RT_ASSERT((l1_off >= mmu_info -> vstart) && (l1_off <= mmu_info -> vend));
+        RT_ASSERT((l1_off >= mmu_info->vstart) && (l1_off <= mmu_info->vend));
         l2_off = (rt_size_t)GET_L2(loop_va);
         l3_off = (rt_size_t)GET_L3(loop_va);
-        mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
+
+        mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
         RT_ASSERT(PTE_USED(*mmu_l1))
         RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
-        mmu_l2 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off)) + l2_off;
+        mmu_l2 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off)) + l2_off;
         RT_ASSERT(PTE_USED(*mmu_l2));
         RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
-        mmu_l3 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2),mmu_info -> pv_off)) + l3_off;
+        mmu_l3 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2), mmu_info->pv_off)) + l3_off;
         RT_ASSERT(PTE_USED(*mmu_l3));
         RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3)));
+
         *mmu_l3 = 0;
-        rt_hw_cpu_dcache_clean(mmu_l3,sizeof(*mmu_l3));
+        rt_hw_cpu_dcache_clean(mmu_l3, sizeof(*mmu_l3));
+
+        // ref_cnt recalc, page in 8KB size
         mmu_l3 -= l3_off;
         ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
         (*ref_cnt)--;
 
-        if(!*ref_cnt)
+        if (!*ref_cnt)
         {
-            //release level 3 page
-            rt_pages_free(mmu_l3,1);//entry page and ref_cnt page
+            // release level 3 page
+            rt_pages_free(mmu_l3, 1); // entry page and ref_cnt page
             *mmu_l2 = 0;
-            rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
+            rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
             mmu_l2 -= l2_off;
 
             ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
             (*ref_cnt)--;
 
-            if(!*ref_cnt)
+            if (!*ref_cnt)
             {
-                //release level 2 page
-                rt_pages_free(mmu_l2,1);//entry page and ref_cnt page
+                // release level 2 page
+                rt_pages_free(mmu_l2, 1); // entry page and ref_cnt page
                 *mmu_l1 = 0;
-                rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
+                rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
             }
         }
 
@@ -339,71 +296,70 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npage
     }
 }
 
-static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t npages,rt_size_t attr)
+static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t npages, rt_size_t attr)
 {
-    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
-    rt_size_t loop_pa = __UMASKVALUE((rt_size_t)p_addr,PAGE_OFFSET_MASK);
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
+    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
+    rt_size_t loop_pa = __UMASKVALUE((rt_size_t)p_addr, PAGE_OFFSET_MASK);
+    rt_size_t l1_off, l2_off, l3_off;
+    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
     rt_size_t *ref_cnt;
-    //rt_kprintf("v_addr = 0x%p,p_addr = 0x%p,npages = %lu\n",v_addr,p_addr,npages);
 
-    if(!mmu_info)
+    if (!mmu_info)
     {
         return -1;
     }
 
-    while(npages--)
+    while (npages--)
     {
         l1_off = GET_L1(loop_va);
         l2_off = GET_L2(loop_va);
         l3_off = GET_L3(loop_va);
-        mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
+        mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
 
-        if(PTE_USED(*mmu_l1))
+        if (PTE_USED(*mmu_l1))
         {
             RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
-            mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
+            mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off);
         }
         else
         {
             mmu_l2 = (rt_size_t *)rt_pages_alloc(1);
 
-            if(mmu_l2)
+            if (mmu_l2)
             {
-                rt_memset(mmu_l2,0,PAGE_SIZE * 2);
-                rt_hw_cpu_dcache_clean(mmu_l2,PAGE_SIZE * 2);
-                *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
-                rt_hw_cpu_dcache_clean(mmu_l1,sizeof(*mmu_l1));
+                rt_memset(mmu_l2, 0, PAGE_SIZE * 2);
+                rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE * 2);
+                *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, mmu_info->pv_off), PAGE_DEFAULT_ATTR_NEXT);
+                rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
             }
             else
             {
-                __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
+                __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
                 return -1;
             }
         }
 
-        if(PTE_USED(*(mmu_l2 + l2_off)))
+        if (PTE_USED(*(mmu_l2 + l2_off)))
         {
             RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
-            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
+            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), mmu_info->pv_off);
         }
         else
         {
             mmu_l3 = (rt_size_t *)rt_pages_alloc(1);
 
-            if(mmu_l3)
+            if (mmu_l3)
             {
-                rt_memset(mmu_l3,0,PAGE_SIZE * 2);
-                rt_hw_cpu_dcache_clean(mmu_l3,PAGE_SIZE * 2);
-                *(mmu_l2 + l2_off) = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3,mmu_info -> pv_off),PAGE_DEFAULT_ATTR_NEXT);
-                rt_hw_cpu_dcache_clean(mmu_l2,sizeof(*mmu_l2));
+                rt_memset(mmu_l3, 0, PAGE_SIZE * 2);
+                rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE * 2);
+                *(mmu_l2 + l2_off) = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, mmu_info->pv_off), PAGE_DEFAULT_ATTR_NEXT);
+                rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
                 ref_cnt = mmu_l2 + __SIZE(VPN1_BIT);
                 (*ref_cnt)++;
             }
             else
             {
-                __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
+                __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
                 return -1;
             }
         }
@@ -411,8 +367,8 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_si
         RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
         ref_cnt = mmu_l3 + __SIZE(VPN0_BIT);
         (*ref_cnt)++;
-        *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)loop_pa,PAGE_DEFAULT_ATTR_LEAF);
-        rt_hw_cpu_dcache_clean(mmu_l3 + l3_off,sizeof(*(mmu_l3 + l3_off)));
+        *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)loop_pa, attr);
+        rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
 
         loop_va += PAGE_SIZE;
         loop_pa += PAGE_SIZE;
@@ -421,14 +377,14 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_si
     return 0;
 }
 
-void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
+void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr)
 {
-    rt_size_t pa_s,pa_e;
+    rt_size_t pa_s, pa_e;
     rt_size_t vaddr;
     rt_size_t pages;
     int ret;
 
-    if(!size)
+    if (!size)
     {
         return 0;
     }
@@ -439,33 +395,33 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t s
     pa_e = GET_PF_ID(pa_e);
     pages = pa_e - pa_s + 1;
 
-    if(v_addr)
+    if (v_addr)
     {
         vaddr = (rt_size_t)v_addr;
         pa_s = (rt_size_t)p_addr;
 
-        if(GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
+        if (GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
         {
             return 0;
         }
 
-        vaddr = __UMASKVALUE(vaddr,PAGE_OFFSET_MASK);
+        vaddr = __UMASKVALUE(vaddr, PAGE_OFFSET_MASK);
 
-        if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
+        if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
         {
             return 0;
         }
     }
     else
     {
-        vaddr = find_vaddr(mmu_info,pages);
+        vaddr = find_vaddr(mmu_info, pages);
     }
 
-    if(vaddr)
+    if (vaddr)
     {
-        ret = __rt_hw_mmu_map(mmu_info,(void *)vaddr,p_addr,pages,attr);
+        ret = __rt_hw_mmu_map(mmu_info, (void *)vaddr, p_addr, pages, attr);
 
-        if(ret == 0)
+        if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
             return (void *)(vaddr | GET_PF_OFFSET((rt_size_t)p_addr));
@@ -475,31 +431,31 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t s
     return 0;
 }
 
-static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
+static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t npages, rt_size_t attr)
 {
-    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
+    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
     rt_size_t loop_pa;
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
+    rt_size_t l1_off, l2_off, l3_off;
+    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
     rt_size_t *ref_cnt;
     rt_size_t i;
-    void *va,*pa;
+    void *va, *pa;
 
-    if(!mmu_info)
+    if (!mmu_info)
     {
         return -1;
     }
 
-    while(npages--)
+    while (npages--)
     {
         loop_pa = (rt_size_t)rt_pages_alloc(0);
 
-        if(!loop_pa)
+        if (!loop_pa)
         {
             goto err;
         }
 
-        if(__rt_hw_mmu_map(mmu_info,(void *)loop_va,(void *)loop_pa,1,attr) < 0)
+        if (__rt_hw_mmu_map(mmu_info, (void *)loop_va, (void *)loop_pa, 1, attr) < 0)
         {
             goto err;
         }
@@ -509,33 +465,33 @@ static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npa
 
     return 0;
 
-    err:
-        va = (void *)__UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
+err:
+    va = (void *)__UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
 
-        for(i = 0;i < npages;i++)
-        {
-            pa = rt_hw_mmu_v2p(mmu_info,va);
-
-            if(pa)
-            {
-                rt_pages_free((void *)PPN_TO_VPN(pa,mmu_info -> pv_off),0);
-            }
+    for (i = 0; i < npages; i++)
+    {
+        pa = rt_hw_mmu_v2p(mmu_info, va);
 
-            va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
+        if (pa)
+        {
+            rt_pages_free((void *)PPN_TO_VPN(pa, mmu_info->pv_off), 0);
         }
 
-        __rt_hw_mmu_unmap(mmu_info,v_addr,npages);
-        return -1;
+        va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
+    }
+
+    __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
+    return -1;
 }
 
-void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
+void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr)
 {
     rt_size_t vaddr;
     rt_size_t offset;
     rt_size_t pages;
     int ret;
 
-    if(!size)
+    if (!size)
     {
         return 0;
     }
@@ -544,25 +500,25 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_s
     size += (offset + PAGE_SIZE - 1);
     pages = size >> PAGE_OFFSET_BIT;
 
-    if(v_addr)
+    if (v_addr)
     {
-        vaddr = __UMASKVALUE((rt_size_t)v_addr,PAGE_OFFSET_MASK);
+        vaddr = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
 
-        if(check_vaddr(mmu_info,(void *)vaddr,pages) != 0)
+        if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
         {
             return 0;
         }
     }
     else
     {
-        vaddr = find_vaddr(mmu_info,pages);
+        vaddr = find_vaddr(mmu_info, pages);
     }
 
-    if(vaddr)
+    if (vaddr)
     {
-        ret = __rt_hw_mmu_map_auto(mmu_info,(void *)vaddr,pages,attr);
+        ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
 
-        if(ret == 0)
+        if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
             return (void *)(vaddr | offset);
@@ -572,79 +528,82 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_s
     return 0;
 }
 
-void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
+void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size)
 {
-    rt_size_t va_s,va_e;
+    rt_size_t va_s, va_e;
     rt_size_t pages;
 
     va_s = ((rt_size_t)v_addr) >> PAGE_OFFSET_BIT;
     va_e = (((rt_size_t)v_addr) + size - 1) >> PAGE_OFFSET_BIT;
     pages = va_e - va_s + 1;
-    __rt_hw_mmu_unmap(mmu_info,v_addr,pages);
+    __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
     rt_hw_cpu_tlb_invalidate();
 }
 
-void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
+void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr)
 {
     void *ret;
     rt_base_t level;
 
     level = rt_hw_interrupt_disable();
-    ret = _rt_hw_mmu_map(mmu_info,v_addr,p_addr,size,attr);
+    ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
     rt_hw_interrupt_enable(level);
     return ret;
 }
 
-void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr)
+void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr)
 {
     void *ret;
     rt_base_t level;
 
     level = rt_hw_interrupt_disable();
-    ret = _rt_hw_mmu_map_auto(mmu_info,v_addr,size,attr);
+    ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
     rt_hw_interrupt_enable(level);
     return ret;
 }
 
-void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
+void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size)
 {
     rt_base_t level;
 
     level = rt_hw_interrupt_disable();
-    _rt_hw_mmu_unmap(mmu_info,v_addr,size);
+    _rt_hw_mmu_unmap(mmu_info, v_addr, size);
     rt_hw_interrupt_enable(level);
 }
 
-void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
+void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
 {
-    rt_size_t l1_off,l2_off,l3_off;
-    rt_size_t *mmu_l1,*mmu_l2,*mmu_l3;
+    rt_size_t l1_off, l2_off, l3_off;
+    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
     rt_size_t pa;
 
     l1_off = GET_L1((rt_size_t)v_addr);
     l2_off = GET_L2((rt_size_t)v_addr);
     l3_off = GET_L3((rt_size_t)v_addr);
 
-    if(!mmu_info)
+    if (!mmu_info)
     {
         return RT_NULL;
     }
 
-    mmu_l1 = ((rt_size_t *)mmu_info -> vtable) + l1_off;
+    mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
 
-    if(PTE_USED(*mmu_l1))
+    if (PTE_USED(*mmu_l1))
     {
-        RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
-        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1),mmu_info -> pv_off);
+        if (*mmu_l1 & PTE_XWR_MASK)
+            return (void *)(GET_PADDR(*mmu_l1) | ((rt_size_t)v_addr & ((1 << 30) - 1)));
+
+        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off);
 
-        if(PTE_USED(*(mmu_l2 + l2_off)))
+        if (PTE_USED(*(mmu_l2 + l2_off)))
         {
-            RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
-            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),mmu_info -> pv_off);
+            if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
+                return (void *)(GET_PADDR(*(mmu_l2 + l2_off)) | ((rt_size_t)v_addr & ((1 << 21) - 1)));
+
+            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), mmu_info->pv_off);
 
-            if(PTE_USED(*(mmu_l3 + l3_off)))
+            if (PTE_USED(*(mmu_l3 + l3_off)))
             {
-                RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3 + l3_off)));
                 return (void *)(GET_PADDR(*(mmu_l3 + l3_off)) | GET_PF_OFFSET((rt_size_t)v_addr));
             }
         }
@@ -653,13 +612,53 @@ void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
     return RT_NULL;
 }
 
-void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr)
+void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
 {
     void *ret;
     rt_base_t level;
 
     level = rt_hw_interrupt_disable();
-    ret = _rt_hw_mmu_v2p(mmu_info,v_addr);
+    ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
     rt_hw_interrupt_enable(level);
     return ret;
 }
+
+/**
+ * @brief setup Page Table for kernel space. It's a fixed map
+ * and all mappings cannot be changed after initialization.
+ *
+ * Memory region in struct mem_desc must be page aligned,
+ * otherwise is a failure and no report will be
+ * returned.
+ *
+ * @param mmu_info
+ * @param mdesc
+ * @param desc_nr
+ */
+void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr)
+{
+    void *err;
+    for (size_t i = 0; i < desc_nr; i++)
+    {
+        size_t attr;
+        switch (mdesc->attr)
+        {
+            case NORMAL_MEM:
+                attr = MMU_MAP_K_RWCB;
+                break;
+            case NORMAL_NOCACHE_MEM:
+                attr = MMU_MAP_K_RWCB;
+                break;
+            case DEVICE_MEM:
+                attr = MMU_MAP_K_DEVICE;
+                break;
+            default:
+                attr = MMU_MAP_K_DEVICE;
+        }
+        err = _rt_hw_mmu_map(mmu_info, (void *)mdesc->vaddr_start, (void *)mdesc->paddr_start,
+            mdesc->vaddr_end - mdesc->vaddr_start + 1, attr);
+        mdesc++;
+    }
+
+    switch_mmu((void *)MMUTable);
+}

+ 37 - 20
libcpu/risc-v/virt64/mmu.h

@@ -13,6 +13,14 @@
 
 #include "riscv.h"
 #include "riscv_mmu.h"
+
+/* RAM, Flash, or ROM */
+#define NORMAL_MEM           0
+/* normal nocache memory mapping type */
+#define NORMAL_NOCACHE_MEM   1
+/* MMIO region */
+#define DEVICE_MEM           2
+
 struct mem_desc
 {
     rt_size_t vaddr_start;
@@ -22,16 +30,16 @@ struct mem_desc
 };
 
 #define GET_PF_ID(addr) ((addr) >> PAGE_OFFSET_BIT)
-#define GET_PF_OFFSET(addr) __MASKVALUE(addr,PAGE_OFFSET_MASK)
-#define GET_L1(addr) __PARTBIT(addr,VPN2_SHIFT,VPN2_BIT)
-#define GET_L2(addr) __PARTBIT(addr,VPN1_SHIFT,VPN1_BIT)
-#define GET_L3(addr) __PARTBIT(addr,VPN0_SHIFT,VPN0_BIT)
-#define GET_PPN(pte) (__PARTBIT(pte,PTE_PPN_SHIFT,PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
+#define GET_PF_OFFSET(addr) __MASKVALUE(addr, PAGE_OFFSET_MASK)
+#define GET_L1(addr) __PARTBIT(addr, VPN2_SHIFT, VPN2_BIT)
+#define GET_L2(addr) __PARTBIT(addr, VPN1_SHIFT, VPN1_BIT)
+#define GET_L3(addr) __PARTBIT(addr, VPN0_SHIFT, VPN0_BIT)
+#define GET_PPN(pte) (__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
 #define GET_PADDR(pte) (GET_PPN(pte) << PAGE_OFFSET_BIT)
-#define VPN_TO_PPN(vaddr,pv_off) (((rt_size_t)(vaddr)) + (pv_off))
-#define PPN_TO_VPN(paddr,pv_off) (((rt_size_t)(paddr)) - (pv_off))
-#define COMBINEVADDR(l1_off,l2_off,l3_off) (((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | ((l3_off) << VPN0_SHIFT))
-#define COMBINEPTE(paddr,attr) ((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
+#define VPN_TO_PPN(vaddr, pv_off) (((rt_size_t)(vaddr)) + (pv_off))
+#define PPN_TO_VPN(paddr, pv_off) (((rt_size_t)(paddr)) - (pv_off))
+#define COMBINEVADDR(l1_off, l2_off, l3_off) (((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) | ((l3_off) << VPN0_SHIFT))
+#define COMBINEPTE(paddr, attr) ((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
 
 typedef struct
 {
@@ -39,22 +47,31 @@ typedef struct
     size_t vstart;
     size_t vend;
     size_t pv_off;
-}rt_mmu_info;
+} rt_mmu_info;
 
 void *mmu_table_get();
 void switch_mmu(void *mmu_table);
-int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off);
-void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size);
-void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
-void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
-void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
-void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
-void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
-void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
-void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);
-void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);
+int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, rt_size_t *vtable, rt_size_t pv_off);
+void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info, rt_size_t vaddr_start, rt_size_t size);
+void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr);
+void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr);
+void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size);
+void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr);
+void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr);
+void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size);
+void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
+void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
+
+void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr);
 
 void rt_mm_lock(void);
 void rt_mm_unlock(void);
 
+#define ARCH_ADDRESS_WIDTH_BITS 64
+
+#define MMU_MAP_ERROR_VANOTALIGN  -1
+#define MMU_MAP_ERROR_PANOTALIGN  -2
+#define MMU_MAP_ERROR_NOPAGE      -3
+#define MMU_MAP_ERROR_CONFLICT    -4
+
 #endif

+ 86 - 15
libcpu/risc-v/virt64/plic.c

@@ -5,44 +5,115 @@
  *
  * Change Logs:
  * Date           Author       Notes
- * 2021-01-31     lizhirui     first version
+ * 2021-05-20     bigmagic     first version
  */
-
-
 #include <rthw.h>
 #include <rtthread.h>
-
 #include <stdint.h>
+#include "plic.h"
+#include <riscv_io.h>
+#include "encoding.h"
+
 #include <riscv.h>
 #include <string.h>
 #include <stdlib.h>
-#include <riscv_io.h>
 
-#include "plic.h"
+/*
+* Each PLIC interrupt source can be assigned a priority by writing
+* to its 32-bit memory-mapped priority register.
+* The QEMU-virt (the same as FU540-C000) supports 7 levels of priority.
+* A priority value of 0 is reserved to mean "never interrupt" and
+* effectively disables the interrupt.
+* Priority 1 is the lowest active priority, and priority 7 is the highest.
+* Ties between global interrupts of the same priority are broken by
+* the Interrupt ID; interrupts with the lowest ID have the highest
+* effective priority.
+*/
+void plic_set_priority(int irq, int priority)
+{
+    *(uint32_t*)PLIC_PRIORITY(irq) = priority;
+}
+
+/*
+* Each global interrupt can be enabled by setting the corresponding
+* bit in the enables registers.
+*/
+void plic_irq_enable(int irq)
+{
+    int hart = __raw_hartid();
+    *(uint32_t*)PLIC_ENABLE(hart) = ((*(uint32_t*)PLIC_ENABLE(hart)) | (1 << irq));
+#ifdef  RISCV_S_MODE
+    set_csr(sie, read_csr(sie) | MIP_SEIP);
+#else
+    set_csr(mie, read_csr(mie) | MIP_MEIP);
+#endif
+}
 
-void plic_set_priority(rt_uint32_t source,rt_uint32_t val)
+void plic_irq_disable(int irq)
 {
-    volatile void *plic_priority = (void *)(rt_size_t)(PLIC_BASE_ADDR + PLIC_PRIORITY_BASE + 4 * source);
-    writel(val,plic_priority);
+    int hart = __raw_hartid();
+    *(uint32_t*)PLIC_ENABLE(hart) = (((*(uint32_t*)PLIC_ENABLE(hart)) & (~(1 << irq))));
 }
 
-void plic_set_thresh(rt_uint32_t val)
+/*
+* PLIC will mask all interrupts of a priority less than or equal to threshold.
+* Maximum threshold is 7.
+* For example, a threshold value of zero permits all interrupts with
+* non-zero priority, whereas a value of 7 masks all interrupts.
+* Notice, the threshold is global for PLIC, not for each interrupt source.
+*/
+void plic_set_threshold(int threshold)
+{
+    int hart = __raw_hartid();
+    *(uint32_t*)PLIC_THRESHOLD(hart) = threshold;
+}
+
+/*
+ * DESCRIPTION:
+ *    Query the PLIC what interrupt we should serve.
+ *    Perform an interrupt claim by reading the claim register, which
+ *    returns the ID of the highest-priority pending interrupt or zero if there
+ *    is no pending interrupt.
+ *    A successful claim also atomically clears the corresponding pending bit
+ *    on the interrupt source.
+ * RETURN VALUE:
+ *    the ID of the highest-priority pending interrupt or zero if there
+ *    is no pending interrupt.
+ */
+int plic_claim(void)
+{
+    int hart = __raw_hartid();
+    int irq = *(uint32_t*)PLIC_CLAIM(hart);
+    return irq;
+}
+
+/*
+ * DESCRIPTION:
+  *    Writing the interrupt ID it received from the claim (irq) to the
+ *    complete register would signal the PLIC we've served this IRQ.
+ *    The PLIC does not check whether the completion ID is the same as the
+ *    last claim ID for that target. If the completion ID does not match an
+ *    interrupt source that is currently enabled for the target, the completion
+ *    is silently ignored.
+ * RETURN VALUE: none
+ */
+void plic_complete(int irq)
 {
-    volatile void *plic_thresh = (void *)(rt_size_t)(PLIC_BASE_ADDR + PLIC_CONTEXT_BASE);
-    writel(val,plic_thresh);
+    int hart = __raw_hartid();
+    *(uint32_t*)PLIC_COMPLETE(hart) = irq;
 }
 
-void plic_set_ie(rt_uint32_t word_index,rt_uint32_t val)
+void plic_set_ie(rt_uint32_t word_index, rt_uint32_t val)
 {
     volatile void *plic_ie = (void *)(rt_size_t)(PLIC_BASE_ADDR + PLIC_ENABLE_BASE + word_index * 4);
-    writel(val,plic_ie);
+    writel(val, plic_ie);
 }
 
 void plic_init()
 {
     int i;
 
-    plic_set_thresh(0);
+    plic_set_threshold(0);
 
     for(i = 0;i < 128;i++)
     {

+ 52 - 4
libcpu/risc-v/virt64/plic.h

@@ -5,22 +5,70 @@
  *
  * Change Logs:
  * Date           Author       Notes
- * 2021-01-31     lizhirui     first version
+ * 2021-05-20     bigmagic     first version
+ * 2021-10-20     bernard      fix s-mode issue
  */
 
 #ifndef __PLIC_H__
 #define __PLIC_H__
 
+#include <rtconfig.h>
+#include <rthw.h>
+
+/*
+ * This machine puts platform-level interrupt controller (PLIC) here.
+ * Here only list PLIC registers in Machine mode.
+ *
+ */
+
 #define PLIC_PRIORITY_BASE 0x0
 #define PLIC_PENDING_BASE 0x1000
 #define PLIC_ENABLE_BASE 0x2000
-#define PLIC_ENABLE_STRIDE 0x80
 #define PLIC_CONTEXT_BASE 0x200000
-#define PLIC_CONTEXT_STRIDE 0x1000
 
 #define PLIC_BASE_ADDR 0xC000000
 
-void plic_set_priority(rt_uint32_t source,rt_uint32_t val);
+#define VIRT_PLIC_BASE                  0x0c000000L
+
+#define PLIC_PRIORITY_OFFSET            (0x0)
+#define PLIC_PENDING_OFFSET             (0x1000)
+
+#define PLIC_ENABLE_STRIDE              0x80
+#define PLIC_CONTEXT_STRIDE             0x1000
+
+#ifndef RISCV_S_MODE
+#define PLIC_MENABLE_OFFSET             (0x2000)
+#define PLIC_MTHRESHOLD_OFFSET          (0x200000)
+#define PLIC_MCLAIM_OFFSET              (0x200004)
+#define PLIC_MCOMPLETE_OFFSET           (0x200004)
+
+#define PLIC_ENABLE(hart)               (VIRT_PLIC_BASE + PLIC_MENABLE_OFFSET +     (hart * 2) * PLIC_ENABLE_STRIDE)
+#define PLIC_THRESHOLD(hart)            (VIRT_PLIC_BASE + PLIC_MTHRESHOLD_OFFSET +  (hart * 2) * PLIC_CONTEXT_STRIDE)
+#define PLIC_CLAIM(hart)                (VIRT_PLIC_BASE + PLIC_MCLAIM_OFFSET +      (hart * 2) * PLIC_CONTEXT_STRIDE)
+#define PLIC_COMPLETE(hart)             (VIRT_PLIC_BASE + PLIC_MCOMPLETE_OFFSET +   (hart * 2) * PLIC_CONTEXT_STRIDE)
+
+#else
+#define PLIC_SENABLE_OFFSET             (0x2000   + PLIC_ENABLE_STRIDE)
+#define PLIC_STHRESHOLD_OFFSET          (0x200000 + PLIC_CONTEXT_STRIDE)
+#define PLIC_SCLAIM_OFFSET              (0x200004 + PLIC_CONTEXT_STRIDE)
+#define PLIC_SCOMPLETE_OFFSET           (0x200004 + PLIC_CONTEXT_STRIDE)
+
+#define PLIC_ENABLE(hart)               (VIRT_PLIC_BASE + PLIC_SENABLE_OFFSET +     (hart * 2) * PLIC_ENABLE_STRIDE)
+#define PLIC_THRESHOLD(hart)            (VIRT_PLIC_BASE + PLIC_STHRESHOLD_OFFSET +  (hart * 2) * PLIC_CONTEXT_STRIDE)
+#define PLIC_CLAIM(hart)                (VIRT_PLIC_BASE + PLIC_SCLAIM_OFFSET +      (hart * 2) * PLIC_CONTEXT_STRIDE)
+#define PLIC_COMPLETE(hart)             (VIRT_PLIC_BASE + PLIC_SCOMPLETE_OFFSET +   (hart * 2) * PLIC_CONTEXT_STRIDE)
+#endif
+
+#define PLIC_PRIORITY(id)               (VIRT_PLIC_BASE + PLIC_PRIORITY_OFFSET + (id) * 4)
+#define PLIC_PENDING(id)                (VIRT_PLIC_BASE + PLIC_PENDING_OFFSET + ((id) / 32))
+
+void plic_set_priority(int irq, int priority);
+void plic_irq_enable(int irq);
+void plic_irq_disable(int irq);
+void plic_set_threshold(int mthreshold);
+int  plic_claim(void);
+void plic_complete(int irq);
+
 void plic_set_thresh(rt_uint32_t val);
 void plic_set_ie(rt_uint32_t word_index,rt_uint32_t val);
 void plic_init();

+ 117 - 95
libcpu/risc-v/virt64/riscv_io.h

@@ -10,100 +10,122 @@
 #ifndef __RISCV_IO_H__
 #define __RISCV_IO_H__
 
-    static inline void __raw_writeb(rt_uint8_t val, volatile void *addr)
-    {
-        asm volatile("sb %0, 0(%1)" : : "r"(val), "r"(addr));
-    }
-
-    static inline void __raw_writew(rt_uint16_t val, volatile void *addr)
-    {
-        asm volatile("sh %0, 0(%1)" : : "r"(val), "r"(addr));
-    }
-
-    static inline void __raw_writel(rt_uint32_t val, volatile void *addr)
-    {
-        asm volatile("sw %0, 0(%1)" : : "r"(val), "r"(addr));
-    }
-
-    #if __riscv_xlen != 32
-    static inline void __raw_writeq(rt_uint64_t val, volatile void *addr)
-    {
-        asm volatile("sd %0, 0(%1)" : : "r"(val), "r"(addr));
-    }
-    #endif
-
-    static inline rt_uint8_t __raw_readb(const volatile void *addr)
-    {
-        rt_uint8_t val;
-
-        asm volatile("lb %0, 0(%1)" : "=r"(val) : "r"(addr));
-        return val;
-    }
-
-    static inline rt_uint16_t __raw_readw(const volatile void *addr)
-    {
-        rt_uint16_t val;
-
-        asm volatile("lh %0, 0(%1)" : "=r"(val) : "r"(addr));
-        return val;
-    }
-
-    static inline rt_uint32_t __raw_readl(const volatile void *addr)
-    {
-        rt_uint32_t val;
-
-        asm volatile("lw %0, 0(%1)" : "=r"(val) : "r"(addr));
-        return val;
-    }
-
-    #if __riscv_xlen != 32
-    static inline rt_uint64_t __raw_readq(const volatile void *addr)
-    {
-        rt_uint64_t val;
-
-        asm volatile("ld %0, 0(%1)" : "=r"(val) : "r"(addr));
-        return val;
-    }
-    #endif
-
-    /* FIXME: These are now the same as asm-generic */
-
-    /* clang-format off */
-
-    #define __io_rbr()      do {} while (0)
-    #define __io_rar()      do {} while (0)
-    #define __io_rbw()      do {} while (0)
-    #define __io_raw()      do {} while (0)
-
-    #define readb_relaxed(c)    ({ rt_uint8_t  __v; __io_rbr(); __v = __raw_readb(c); __io_rar(); __v; })
-    #define readw_relaxed(c)    ({ rt_uint16_t __v; __io_rbr(); __v = __raw_readw(c); __io_rar(); __v; })
-    #define readl_relaxed(c)    ({ rt_uint32_t __v; __io_rbr(); __v = __raw_readl(c); __io_rar(); __v; })
-
-    #define writeb_relaxed(v,c) ({ __io_rbw(); __raw_writeb((v),(c)); __io_raw(); })
-    #define writew_relaxed(v,c) ({ __io_rbw(); __raw_writew((v),(c)); __io_raw(); })
-    #define writel_relaxed(v,c) ({ __io_rbw(); __raw_writel((v),(c)); __io_raw(); })
-
-    #if __riscv_xlen != 32
-    #define readq_relaxed(c)    ({ rt_uint64_t __v; __io_rbr(); __v = __raw_readq(c); __io_rar(); __v; })
-    #define writeq_relaxed(v,c) ({ __io_rbw(); __raw_writeq((v),(c)); __io_raw(); })
-    #endif
-
-    #define __io_br()   do {} while (0)
-    #define __io_ar()   __asm__ __volatile__ ("fence i,r" : : : "memory");
-    #define __io_bw()   __asm__ __volatile__ ("fence w,o" : : : "memory");
-    #define __io_aw()   do {} while (0)
-
-    #define readb(c)    ({ rt_uint8_t  __v; __io_br(); __v = __raw_readb(c); __io_ar(); __v; })
-    #define readw(c)    ({ rt_uint16_t __v; __io_br(); __v = __raw_readw(c); __io_ar(); __v; })
-    #define readl(c)    ({ rt_uint32_t __v; __io_br(); __v = __raw_readl(c); __io_ar(); __v; })
-
-    #define writeb(v,c) ({ __io_bw(); __raw_writeb((v),(c)); __io_aw(); })
-    #define writew(v,c) ({ __io_bw(); __raw_writew((v),(c)); __io_aw(); })
-    #define writel(v,c) ({ __io_bw(); __raw_writel((v),(c)); __io_aw(); })
-
-    #if __riscv_xlen != 32
-    #define readq(c)    ({ rt_uint64_t __v; __io_br(); __v = __raw_readq(c); __io_ar(); __v; })
-    #define writeq(v,c) ({ __io_bw(); __raw_writeq((v),(c)); __io_aw(); })
-    #endif
+static inline uint32_t  __raw_hartid(void)
+{
+    extern int boot_hartid;
+    return boot_hartid;
+}
+
+static inline void __raw_writeb(rt_uint8_t val, volatile void *addr)
+{
+    asm volatile("sb %0, 0(%1)"
+                 :
+                 : "r"(val), "r"(addr));
+}
+
+static inline void __raw_writew(rt_uint16_t val, volatile void *addr)
+{
+    asm volatile("sh %0, 0(%1)"
+                 :
+                 : "r"(val), "r"(addr));
+}
+
+static inline void __raw_writel(rt_uint32_t val, volatile void *addr)
+{
+    asm volatile("sw %0, 0(%1)"
+                 :
+                 : "r"(val), "r"(addr));
+}
+
+#if __riscv_xlen != 32
+static inline void __raw_writeq(rt_uint64_t val, volatile void *addr)
+{
+    asm volatile("sd %0, 0(%1)"
+                 :
+                 : "r"(val), "r"(addr));
+}
+#endif
+
+static inline rt_uint8_t __raw_readb(const volatile void *addr)
+{
+    rt_uint8_t val;
+
+    asm volatile("lb %0, 0(%1)"
+                 : "=r"(val)
+                 : "r"(addr));
+    return val;
+}
+
+static inline rt_uint16_t __raw_readw(const volatile void *addr)
+{
+    rt_uint16_t val;
+
+    asm volatile("lh %0, 0(%1)"
+                 : "=r"(val)
+                 : "r"(addr));
+    return val;
+}
+
+static inline rt_uint32_t __raw_readl(const volatile void *addr)
+{
+    rt_uint32_t val;
+
+    asm volatile("lw %0, 0(%1)"
+                 : "=r"(val)
+                 : "r"(addr));
+    return val;
+}
+
+#if __riscv_xlen != 32
+static inline rt_uint64_t __raw_readq(const volatile void *addr)
+{
+    rt_uint64_t val;
+
+    asm volatile("ld %0, 0(%1)"
+                 : "=r"(val)
+                 : "r"(addr));
+    return val;
+}
+#endif
+
+/* FIXME: These are now the same as asm-generic */
+
+/* clang-format off */
+
+#define __io_rbr()      do {} while (0)
+#define __io_rar()      do {} while (0)
+#define __io_rbw()      do {} while (0)
+#define __io_raw()      do {} while (0)
+
+#define readb_relaxed(c)    ({ rt_uint8_t  __v; __io_rbr(); __v = __raw_readb(c); __io_rar(); __v; })
+#define readw_relaxed(c)    ({ rt_uint16_t __v; __io_rbr(); __v = __raw_readw(c); __io_rar(); __v; })
+#define readl_relaxed(c)    ({ rt_uint32_t __v; __io_rbr(); __v = __raw_readl(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v,c) ({ __io_rbw(); __raw_writeb((v),(c)); __io_raw(); })
+#define writew_relaxed(v,c) ({ __io_rbw(); __raw_writew((v),(c)); __io_raw(); })
+#define writel_relaxed(v,c) ({ __io_rbw(); __raw_writel((v),(c)); __io_raw(); })
+
+#if __riscv_xlen != 32
+#define readq_relaxed(c)    ({ rt_uint64_t __v; __io_rbr(); __v = __raw_readq(c); __io_rar(); __v; })
+#define writeq_relaxed(v,c) ({ __io_rbw(); __raw_writeq((v),(c)); __io_raw(); })
+#endif
+
+#define __io_br()   do {} while (0)
+#define __io_ar()   __asm__ __volatile__ ("fence i,r" : : : "memory");
+#define __io_bw()   __asm__ __volatile__ ("fence w,o" : : : "memory");
+#define __io_aw()   do {} while (0)
+
+#define readb(c)    ({ rt_uint8_t  __v; __io_br(); __v = __raw_readb(c); __io_ar(); __v; })
+#define readw(c)    ({ rt_uint16_t __v; __io_br(); __v = __raw_readw(c); __io_ar(); __v; })
+#define readl(c)    ({ rt_uint32_t __v; __io_br(); __v = __raw_readl(c); __io_ar(); __v; })
+
+#define writeb(v,c) ({ __io_bw(); __raw_writeb((v),(c)); __io_aw(); })
+#define writew(v,c) ({ __io_bw(); __raw_writew((v),(c)); __io_aw(); })
+#define writel(v,c) ({ __io_bw(); __raw_writel((v),(c)); __io_aw(); })
+
+#if __riscv_xlen != 32
+#define readq(c)    ({ rt_uint64_t __v; __io_br(); __v = __raw_readq(c); __io_ar(); __v; })
+#define writeq(v,c) ({ __io_bw(); __raw_writeq((v),(c)); __io_aw(); })
+#endif
 
 #endif

+ 7 - 5
libcpu/risc-v/virt64/riscv_mmu.c

@@ -20,18 +20,20 @@
 
 void mmu_set_pagetable(rt_ubase_t addr)
 {
-    RT_ASSERT(__CHECKALIGN(addr,PAGE_OFFSET_BIT));
-    RT_ASSERT(__CHECKUPBOUND(addr,PHYSICAL_ADDRESS_WIDTH_BITS));
-    write_csr(satp,(((size_t)8) << 60) | (addr >> PAGE_OFFSET_BIT));
+    RT_ASSERT(__CHECKALIGN(addr, PAGE_OFFSET_BIT));
+    RT_ASSERT(__CHECKUPBOUND(addr, PHYSICAL_ADDRESS_WIDTH_BITS));
+
+    mmu_flush_tlb();
+    write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) | (addr >> PAGE_OFFSET_BIT));
     mmu_flush_tlb();
 }
 
 void mmu_enable_user_page_access()
 {
-    set_csr(sstatus,SSTATUS_PUM);
+    set_csr(sstatus, SSTATUS_PUM);
 }
 
 void mmu_disable_user_page_access()
 {
-    clear_csr(sstatus,SSTATUS_PUM);
+    clear_csr(sstatus, SSTATUS_PUM);
 }

+ 37 - 10
libcpu/risc-v/virt64/riscv_mmu.h

@@ -11,6 +11,8 @@
 #ifndef __RISCV_MMU_H__
 #define __RISCV_MMU_H__
 
+#include <rtthread.h>
+#include <rthw.h>
 #include "riscv.h"
 
 #undef PAGE_SIZE
@@ -52,18 +54,43 @@
 #define PAGE_DEFAULT_ATTR_LEAF (PAGE_ATTR_RWX | PAGE_ATTR_USER | PTE_V | PTE_G)
 #define PAGE_DEFAULT_ATTR_NEXT (PAGE_ATTR_NEXT_LEVEL | PTE_V | PTE_G)
 
-#define PAGE_IS_LEAF(pte) __MASKVALUE(pte,PAGE_ATTR_RWX)
+#define PAGE_IS_LEAF(pte) __MASKVALUE(pte, PAGE_ATTR_RWX)
 
-#define PTE_USED(pte) __MASKVALUE(pte,PTE_V)
+#define PTE_USED(pte) __MASKVALUE(pte, PTE_V)
 
-#define mmu_flush_tlb() do{asm volatile("sfence.vma x0,x0");}while(0)
-
-//compatible to rt-smart new version
-#define MMU_MAP_K_DEVICE (PAGE_ATTR_RWX | PTE_V | PTE_G)
-#define MMU_MAP_K_RWCB (PAGE_ATTR_RWX | PTE_V | PTE_G)
-#define ARCH_PAGE_SIZE PAGE_SIZE
-#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
-#define ARCH_PAGE_SHIFT PAGE_OFFSET_BIT
+/** 
+ * encoding of SATP (Supervisor Address Translation and Protection register)
+ */
+#define SATP_MODE_OFFSET    60
+#define SATP_MODE_BARE      0
+#define SATP_MODE_SV39      8
+#define SATP_MODE_SV48      9
+#define SATP_MODE_SV57      10
+#define SATP_MODE_SV64      11
+
+#define ARCH_VA_WIDTH           39
+#define SATP_MODE               SATP_MODE_SV39
+
+#define mmu_flush_tlb()                   \
+    do                                    \
+    {                                     \
+        asm volatile("sfence.vma x0,x0"); \
+    } while (0)
+
+
+#define MMU_MAP_K_DEVICE        (PTE_G | PTE_W | PTE_R | PTE_V)
+#define MMU_MAP_K_RWCB          (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
+#define MMU_MAP_U_RWCB          (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
+#define MMU_MAP_U_RW            (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
+
+#define PTE_XWR_MASK            0xe
+
+#define ARCH_PAGE_SIZE          PAGE_SIZE
+#define ARCH_PAGE_MASK          (ARCH_PAGE_SIZE - 1)
+#define ARCH_PAGE_SHIFT         PAGE_OFFSET_BIT
+#define ARCH_INDEX_WIDTH        9
+#define ARCH_INDEX_SIZE         (1ul << ARCH_INDEX_WIDTH)
+#define ARCH_INDEX_MASK         (ARCH_INDEX_SIZE - 1)
 
 void mmu_set_pagetable(rt_ubase_t addr);
 void mmu_enable_user_page_access();

+ 10 - 0
libcpu/risc-v/virt64/startup_gcc.S

@@ -14,6 +14,9 @@
 #define __ASSEMBLY__
 #include <cpuport.h>
 
+boot_hartid: .int
+  .global      boot_hartid
+
   .global	_start
   .section ".start", "ax"
 _start:
@@ -25,8 +28,15 @@ _start:
       .dword 1
       .dword 0
 1:
+  # save hartid
+  la t0, boot_hartid                # global varible rt_boot_hartid
+  mv t1, a0                         # get hartid in S-mode frome a0 register
+  sw t1, (t0)                       # store t1 register low 4 bits in memory address which is stored in t0
+
+  # clear Interrupt Registers
   csrw sie, 0
   csrw sip, 0
+  # set Trap Vector Base Address Register
   la t0, trap_entry
   csrw stvec, t0