Răsfoiți Sursa

!604 [RISC-V] cleanup lwp/arch/rv64 codes
Merge pull request !604 from PolarLush/riscv-posix-fixup

bernard 3 ani în urmă
părinte
comite
188917bab5

+ 1 - 1
bsp/allwinner/libraries/libos/src/os.c

@@ -142,7 +142,7 @@ void awos_arch_mems_clean_dcache_region(unsigned long start, unsigned long len)
 
 void awos_arch_mems_clean_flush_dcache_region(unsigned long start, unsigned long len)
 {
-    rt_hw_cpu_dcache_clean_flush((void *)start, len);
+    rt_hw_cpu_dcache_clean_invalidate((void *)start, len);
 }
 
 void awos_arch_mems_flush_dcache_region(unsigned long start, unsigned long len)

+ 12 - 9
bsp/qemu-virt64-riscv/.config

@@ -93,22 +93,19 @@ CONFIG_RT_MAIN_THREAD_PRIORITY=10
 # C++ features
 #
 # CONFIG_RT_USING_CPLUSPLUS is not set
-
-#
-# Command shell
-#
-CONFIG_RT_USING_FINSH=y
 CONFIG_RT_USING_MSH=y
+CONFIG_RT_USING_FINSH=y
 CONFIG_FINSH_USING_MSH=y
 CONFIG_FINSH_THREAD_NAME="tshell"
+CONFIG_FINSH_THREAD_PRIORITY=20
+CONFIG_FINSH_THREAD_STACK_SIZE=16384
 CONFIG_FINSH_USING_HISTORY=y
 CONFIG_FINSH_HISTORY_LINES=10
 CONFIG_FINSH_USING_SYMTAB=y
+CONFIG_FINSH_CMD_SIZE=80
+CONFIG_MSH_USING_BUILT_IN_COMMANDS=y
 CONFIG_FINSH_USING_DESCRIPTION=y
 # CONFIG_FINSH_ECHO_DISABLE_DEFAULT is not set
-CONFIG_FINSH_THREAD_PRIORITY=20
-CONFIG_FINSH_THREAD_STACK_SIZE=16384
-CONFIG_FINSH_CMD_SIZE=80
 # CONFIG_FINSH_USING_AUTH is not set
 CONFIG_FINSH_ARG_MAX=10
 
@@ -157,6 +154,8 @@ CONFIG_RT_USING_SYSTEM_WORKQUEUE=y
 CONFIG_RT_SYSTEM_WORKQUEUE_STACKSIZE=8192
 CONFIG_RT_SYSTEM_WORKQUEUE_PRIORITY=23
 CONFIG_RT_USING_SERIAL=y
+CONFIG_RT_USING_SERIAL_V1=y
+# CONFIG_RT_USING_SERIAL_V2 is not set
 CONFIG_RT_SERIAL_USING_DMA=y
 CONFIG_RT_SERIAL_RB_BUFSZ=64
 CONFIG_RT_USING_TTY=y
@@ -190,6 +189,7 @@ CONFIG_RT_USING_RTC=y
 # CONFIG_RT_USING_HWCRYPTO is not set
 # CONFIG_RT_USING_PULSE_ENCODER is not set
 # CONFIG_RT_USING_INPUT_CAPTURE is not set
+# CONFIG_RT_USING_DEV_BUS is not set
 # CONFIG_RT_USING_WIFI is not set
 CONFIG_RT_USING_VIRTIO=y
 CONFIG_RT_USING_VIRTIO10=y
@@ -205,6 +205,7 @@ CONFIG_RT_USING_VIRTIO_NET=y
 #
 # CONFIG_RT_USING_USB_HOST is not set
 # CONFIG_RT_USING_USB_DEVICE is not set
+# CONFIG_RT_USING_FAL is not set
 
 #
 # POSIX layer and C standard library
@@ -983,7 +984,9 @@ CONFIG_BSP_USING_VIRTIO_NET=y
 # CONFIG_BSP_USING_UART1 is not set
 CONFIG_BOARD_QEMU_VIRT_RV64=y
 CONFIG_ENABLE_FPU=y
-# CONFIG_ENABLE_VECTOR is not set
+CONFIG_ENABLE_VECTOR=y
+CONFIG_ARCH_VECTOR_VLEN_128=y
+# CONFIG_ARCH_VECTOR_VLEN_256 is not set
 # CONFIG_RT_USING_USERSPACE_32BIT_LIMIT is not set
 CONFIG_ARCH_USING_NEW_CTX_SWITCH=y
 CONFIG___STACKSIZE__=16384

+ 45 - 0
bsp/qemu-virt64-riscv/README.md

@@ -0,0 +1,45 @@
+# RT-Smart QEMU SYSTEM RISC-V RV64 BSP
+
+## 1. Introduction
+
+QEMU can emulate both 32-bit and 64-bit RISC-V CPUs. Use the qemu-system-riscv64 executable to simulate a 64-bit RISC-V machine, qemu-system-riscv32 executable to simulate a 32-bit RISC-V machine.
+
+QEMU has generally good support for RISC-V guests. It has support for several different machines. The reason we support so many is that RISC-V hardware is much more widely varying than x86 hardware. RISC-V CPUs are generally built into “system-on-chip” (SoC) designs created by many different companies with different devices, and these SoCs are then built into machines which can vary still further even if they use the same SoC.
+
+For most boards the CPU type is fixed (matching what the hardware has), so typically you don’t need to specify the CPU type by hand, except for special cases like the virt board.
+
+## 2. Building
+
+It's tedious to properly build a kernel since each RISC-V toolchain is specified to one RISC-V ISA. So you have to use different toolchain for different RISC-V ISAs.
+Here we focus on 2 types of ISA: `rv64imafdcv` and `rv64imac`.
+
+If you are not sure what kinds of ISA you need, then `rv64imac` should satisfied your case most time. Given a riscv toolchain, you can check the ISA it supports like this:
+
+```bash
+root@a9025fd90fd4:/home/rtthread-smart# riscv64-unknown-linux-musl-gcc -v
+Using built-in specs.
+COLLECT_GCC=riscv64-unknown-linux-musl-gcc
+COLLECT_LTO_WRAPPER=/home/rtthread-smart/tools/gnu_gcc/riscv64-linux-musleabi_for_x86_64-pc-linux-gnu/bin/../libexec/gcc/riscv64-unknown-linux-musl/10.1.0/lto-wrapper
+Target: riscv64-unknown-linux-musl
+Configured with: /builds/alliance/risc-v-toolchain/riscv-gcc/configure --target=riscv64-unknown-linux-musl --prefix=/builds/alliance/risc-v-toolchain/install-native/ --with-sysroot=/builds/alliance/risc-v-toolchain/install-native//riscv64-unknown-linux-musl --with-system-zlib --enable-shared --enable-tls --enable-languages=c,c++ --disable-libmudflap --disable-libssp --disable-libquadmath --disable-libsanitizer --disable-nls --disable-bootstrap --src=/builds/alliance/risc-v-toolchain/riscv-gcc --disable-multilib --with-abi=lp64 --with-arch=rv64imac --with-tune=rocket 'CFLAGS_FOR_TARGET=-O2   -mcmodel=medany -march=rv64imac -mabi=lp64 -D __riscv_soft_float' 'CXXFLAGS_FOR_TARGET=-O2   -mcmodel=medany -march=rv64imac -mabi=lp64 -D __riscv_soft_float'
+Thread model: posix
+Supported LTO compression algorithms: zlib
+gcc version 10.1.0 (GCC) 
+```
+
+The `-march=***` is what you are looking for. And the `-mabi=***` is also an important message to configure compiling script.
+
+Steps to build kernel:
+
+1. in `$RTT_ROOT/bsp/qemu-virt64-riscv/rtconfig.py:40`, make sure `-march=***` and `-mabi=***` is identical to your toolchain
+1. if your -march contains characters v/d/f, then: configure kernel by typing `scons --menuconfig` and select `Using RISC-V Vector Extension` / `Enable FPU`
+1. `scons`
+
+## 3. Execution
+
+It's recommended to clone the latest QEMU release and build it locally.
+Make sure QEMU is ready by typing `qemu-system-riscv64 --version` in your shell.
+
+Using `qemu-nographic.sh` or `qemu-nographic.bat` to start simulation.
+
+> if your -march contains characters v, using qemu-v-nographic.*

+ 3 - 0
bsp/qemu-virt64-riscv/qemu-v-dbg.sh

@@ -0,0 +1,3 @@
+qemu-system-riscv64 -nographic -machine virt -cpu rv64,v=true,vlen=128,vext_spec=v1.0 -m 256M -kernel rtthread.bin -s -S \
+-drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \
+-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0

+ 8 - 0
bsp/qemu-virt64-riscv/qemu-v-nographic.sh

@@ -0,0 +1,8 @@
+if [ ! -f "sd.bin" ]; then
+dd if=/dev/zero of=sd.bin bs=1024 count=65536
+fi
+
+qemu-system-riscv64 -nographic -machine virt -cpu rv64,v=true,vlen=128,vext_spec=v1.0 -m 256M -kernel rtthread.bin \
+-drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \
+-netdev user,id=tap0 -device virtio-net-device,netdev=tap0,bus=virtio-mmio-bus.1 \
+-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0

+ 8 - 7
bsp/qemu-virt64-riscv/rtconfig.h

@@ -64,20 +64,18 @@
 
 /* C++ features */
 
-
-/* Command shell */
-
-#define RT_USING_FINSH
 #define RT_USING_MSH
+#define RT_USING_FINSH
 #define FINSH_USING_MSH
 #define FINSH_THREAD_NAME "tshell"
+#define FINSH_THREAD_PRIORITY 20
+#define FINSH_THREAD_STACK_SIZE 16384
 #define FINSH_USING_HISTORY
 #define FINSH_HISTORY_LINES 10
 #define FINSH_USING_SYMTAB
-#define FINSH_USING_DESCRIPTION
-#define FINSH_THREAD_PRIORITY 20
-#define FINSH_THREAD_STACK_SIZE 16384
 #define FINSH_CMD_SIZE 80
+#define MSH_USING_BUILT_IN_COMMANDS
+#define FINSH_USING_DESCRIPTION
 #define FINSH_ARG_MAX 10
 
 /* Device virtual file system */
@@ -111,6 +109,7 @@
 #define RT_SYSTEM_WORKQUEUE_STACKSIZE 8192
 #define RT_SYSTEM_WORKQUEUE_PRIORITY 23
 #define RT_USING_SERIAL
+#define RT_USING_SERIAL_V1
 #define RT_SERIAL_USING_DMA
 #define RT_SERIAL_RB_BUFSZ 64
 #define RT_USING_TTY
@@ -339,6 +338,8 @@
 #define BSP_USING_VIRTIO_NET
 #define BOARD_QEMU_VIRT_RV64
 #define ENABLE_FPU
+#define ENABLE_VECTOR
+#define ARCH_VECTOR_VLEN_128
 #define ARCH_USING_NEW_CTX_SWITCH
 #define __STACKSIZE__ 16384
 

+ 1 - 1
bsp/qemu-virt64-riscv/rtconfig.py

@@ -38,7 +38,7 @@ if PLATFORM == 'gcc':
     OBJCPY  = PREFIX + 'objcopy'
 
     DEVICE  = ' -mcmodel=medany -march=rv64imafdc -mabi=lp64 '
-    CFLAGS  = DEVICE + '-ffreestanding -fno-common -ffunction-sections -fdata-sections -fstrict-volatile-bitfields'
+    CFLAGS  = DEVICE + '-ffreestanding -flax-vector-conversions -fno-common -ffunction-sections -fdata-sections -fstrict-volatile-bitfields'
     AFLAGS  = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__ '
     LFLAGS  = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,_start -T link.lds' + ' -lsupc++ -lgcc -static'
     CPATH   = ''

+ 15 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.c

@@ -195,6 +195,14 @@ int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
     rt_thread_t thread = rt_container_of((unsigned long)thread_sp, struct rt_thread, sp);
     syscall_frame->tp = (rt_ubase_t)thread->thread_idr;
 
+#ifdef ARCH_USING_NEW_CTX_SWITCH
+    extern void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus);
+    rt_ubase_t sstatus = read_csr(sstatus) | SSTATUS_SPP;
+    sstatus &= ~SSTATUS_SIE;
+
+    /* compatible to RESTORE_CONTEXT */
+    stk = (void *)_rt_hw_stack_init((rt_ubase_t *)stk, (rt_ubase_t)exit, sstatus);
+#else
     /* build temp thread context */
     stk -= sizeof(struct rt_hw_stack_frame);
 
@@ -216,6 +224,7 @@ int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
     /* set stack as syscall stack */
     thread_frame->user_sp_exc_stack = (rt_ubase_t)syscall_stk;
 
+#endif /* ARCH_USING_NEW_CTX_SWITCH */
     /* save new stack top */
     *thread_sp = (void *)stk;
 
@@ -247,4 +256,9 @@ void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
     arch_start_umode(args, user_entry, (void*)USER_STACK_VEND, kernel_stack);
 }
 
-#endif
+void *arch_get_usp_from_uctx(struct rt_user_context *uctx)
+{
+    return uctx->sp;
+}
+
+#endif /* RT_USING_USERSPACE */

+ 109 - 118
components/lwp/arch/risc-v/rv64/lwp_gcc.S

@@ -8,11 +8,16 @@
  * 2018-12-10     Jesven       first version
  * 2021-02-03     lizhirui     port to riscv64
  * 2021-02-19     lizhirui     port to new version of rt-smart
+ * 2022-11-08     Wangxiaoyao  Cleanup codes;
+ *                             Support new context switch
  */
 
 #include "rtconfig.h"
 
+#ifndef __ASSEMBLY__
 #define __ASSEMBLY__
+#endif /* __ASSEMBLY__ */
+
 #include "cpuport.h"
 #include "encoding.h"
 #include "stackframe.h"
@@ -46,7 +51,7 @@ arch_crt_start_umode:
     csrc sstatus, t0
     li t0, SSTATUS_SPIE // enable interrupt when return to user mode
     csrs sstatus, t0
-    
+
     csrw sepc, a1
     mv s0, a0
     mv s1, a1
@@ -63,152 +68,149 @@ arch_crt_start_umode:
     csrw sscratch, s3
     sret//enter user mode
 
+/**
+ * Unify exit point from kernel mode to enter user space
+ * we handle following things here:
+ * 1. restoring user mode debug state (not support yet)
+ * 2. handling thread's exit request
+ * 3. handling POSIX signal
+ * 4. restoring user context
+ * 5. jump to user mode
+ */
 .global arch_ret_to_user
 arch_ret_to_user:
+    // TODO: we don't support kernel gdb server in risc-v yet
+    // so we don't check debug state here and handle debugging bussiness
+
+    call lwp_check_exit_request
+    beqz a0, 1f
+    mv a0, x0
+    call sys_exit
+
+1:
     call lwp_signal_check
     beqz a0, ret_to_user_exit
-    // now sp is user sp
     J user_do_signal
 
 ret_to_user_exit:
     RESTORE_ALL
-    // `RESTORE_ALL` also reset sp to user sp
+    // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
     sret
 
-/*#ifdef RT_USING_LWP
-.global lwp_check_exit
-lwp_check_exit:
-    push {r0 - r12, lr}
-    bl lwp_check_exit_request
-    cmp r0, #0
-    beq 1f
-    mov r0, #0
-    bl sys_exit
-1:
-    pop {r0 - r12, pc}
-#endif*/
-
-/*#ifdef RT_USING_GDBSERVER
-.global lwp_check_debug
-lwp_check_debug:
-    push {r0 - r12, lr}
-    bl lwp_check_debug_suspend
-    cmp r0, #0
-    beq lwp_check_debug_quit
-
-    cps #Mode_SYS
-    sub sp, #8
-    ldr r0, =lwp_debugreturn
-    ldr r1, [r0]
-    str r1, [sp]
-    ldr r1, [r0, #4]
-    str r1, [sp, #4]
-    mov r0, #0
-    mcr p15, 0, r0, c7, c5, 0   ;//iciallu
-    dsb
-    isb
-    mov r0, sp // lwp_debugreturn
-    cps #Mode_SVC
-
-    mrs r1, spsr
-    push {r1}
-    mov r1, #Mode_USR
-    msr spsr_cxsf, r1
-    movs pc, r0
-ret_from_user:
-    cps #Mode_SYS
-    add sp, #8
-    cps #Mode_SVC*/
-    /*
-    pop {r0 - r3, r12}
-    pop {r4 - r6, lr}
-    */
-    /*add sp, #(4*9)
-    pop {r4}
-    msr spsr_cxsf, r4
-lwp_check_debug_quit:
-    pop {r0 - r12, pc}
-//#endif
-*/
-
+/**
+ * Restore user context from exception frame stroraged in ustack
+ * And handle pending signals;
+ */
 arch_signal_quit:
     call lwp_signal_restore
-    //a0 is user_ctx
+    call arch_get_usp_from_uctx
+    // return value is user sp
     mv sp, a0
+
+    // restore user sp before enter trap
+    addi a0, sp, CTX_REG_NR * REGBYTES 
+    csrw sscratch, a0
+
     RESTORE_ALL
-    csrw sscratch, zero
-    sret
+    SAVE_ALL
+    j arch_ret_to_user
 
+/**
+ * Prepare and enter user signal handler
+ * Move user exception frame and setup signal return
+ * routine in user stack
+ */
 user_do_signal:
-    csrw sscratch, sp
+    /** restore and backup kernel sp carefully to avoid leaking */
+    addi t0, sp, CTX_REG_NR * REGBYTES
+    csrw sscratch, t0
+
     RESTORE_ALL
-    // now sp is user sp
-    // and in interrupt close
     SAVE_ALL
 
-    // save user sp in SAVE_ALL frame
-    mv t0, sp
-    addi t0, t0, CTX_REG_NR * REGBYTES
-    STORE t0, 32 * REGBYTES(sp)
-
-    // save lwp_sigreturn in user memory
+    /**
+     * save lwp_sigreturn in user memory
+     */
     mv s0, sp
-    la t0, lwp_sigreturn//t0 = src
+    la t0, lwp_sigreturn
     la t1, lwp_sigreturn_end
-    sub t1, t1, t0//t1 = size
-    sub s0, s0, t1//s0 = dst
-
+    // t1 <- size
+    sub t1, t1, t0
+    // s0 <- dst
+    sub s0, s0, t1
+    mv s2, t1
 lwp_sigreturn_copy_loop:
-    addi t2, t1, -1//t2 = memory index
-    add t3, t0, t2//t3 = src addr
-    add t4, s0, t2//t4 = dst addr
+    addi t2, t1, -1
+    add t3, t0, t2
+    add t4, s0, t2
     lb t5, 0(t3)
     sb t5, 0(t4)
     mv t1, t2
     bnez t1, lwp_sigreturn_copy_loop
 
-    // restore kernel stack
-    csrrw sp, sscratch, s0
+    /**
+     * 1. clear sscratch & restore kernel sp to 
+     *    enter kernel mode routine
+     * 2. storage exp frame address to restore context, 
+     *    by calling to lwp_signal_backup
+     * 3. storage lwp_sigreturn entry address
+     * 4. get signal id as param for signal handler
+     */
+    mv s1, sp
+    csrrw sp, sscratch, x0
 
     /**
-     * a0: user sp
-     * a1: user_pc (not used)
-     * a2: user_flag (not used)
-     */ 
-    csrr a0, sscratch
+     * synchronize dcache & icache if target is
+     * a Harvard Architecture machine, otherwise
+     * do nothing
+     */
+    mv a0, s0
+    mv a1, s2
+    call rt_hw_sync_cache_local
+
+    /**
+     * backup user sp (point to saved exception frame, skip sigreturn routine)
+     * And get signal id
+
+     * a0: user sp 
+     * a1: user_pc (not used, marked as 0 to avoid abuse)
+     * a2: user_flag (not used, marked as 0 to avoid abuse)
+     */
+    mv a0, s1
     mv a1, zero
     mv a2, zero
     call lwp_signal_backup
-    // a0 <- signal id
 
-    // restore kernel sp to initial, and load `sp` to user stack
-
-    // s2 <- signal id(a0)
+    /**
+     * backup signal id in s2, 
+     * and get sighandler by signal id
+     */
     mv s2, a0
     call lwp_sighandler_get
-    // a0 <- signal_handler
 
-    // ra <- lwp_sigreturn
+    /**
+     * set regiter RA to user signal handler
+     * set sp to user sp & save kernel sp in sscratch
+     */
     mv ra, s0
+    csrw sscratch, sp
+    mv sp, s0
 
+    /**
+     * a0 is signal_handler,
+     * s1 = s0 == NULL ? lwp_sigreturn : s0;
+     */
     mv s1, s0
     beqz a0, skip_user_signal_handler
-    // a0 <- signal_handler
     mv s1, a0
 
 skip_user_signal_handler:
     // enter user mode and enable interrupt when return to user mode
     li t0, SSTATUS_SPP
-    csrc sstatus, t0 
+    csrc sstatus, t0
     li t0, SSTATUS_SPIE
     csrs sstatus, t0
 
-    /**
-     * sp <- user sp
-     * sscratch <- kernel sp
-     */
-    csrrw sp, sscratch, sp
-
     // sepc <- signal_handler
     csrw sepc, s1
     // a0 <- signal id
@@ -239,21 +241,9 @@ lwp_thread_return:
 .global lwp_thread_return_end
 lwp_thread_return_end:
 
-.global check_vfp
-check_vfp:
-    //don't use fpu temporarily
-    li a0, 0
-    ret
-
-.global get_vfp
-get_vfp:
-    //don't use fpu temporarily
-    li a0, 0
-    ret
-
 .globl arch_get_tidr
 arch_get_tidr:
-    mv a0, tp 
+    mv a0, tp
     ret
 
 .global arch_set_thread_area
@@ -277,11 +267,11 @@ syscall_entry:
     andi t0, t0, 0x100
     beqz t0, __restore_sp_from_tcb
 
-__restore_sp_from_sscratch:
+__restore_sp_from_sscratch: // from kernel
     csrr t0, sscratch
     j __move_stack_context
 
-__restore_sp_from_tcb:
+__restore_sp_from_tcb: // from user
     la a0, rt_current_thread
     LOAD a0, 0(a0)
     jal get_thread_kernel_stack_top
@@ -303,9 +293,11 @@ copy_context_loop:
     addi t2, t2, 8
     bnez s0, copy_context_loop
 #endif /* ARCH_USING_NEW_CTX_SWITCH */
-    LOAD s0, 7 * REGBYTES(sp)
-    addi s0, s0, -0xfe
-    beqz s0, arch_signal_quit
+
+    /* fetch SYSCALL ID */
+    LOAD a7, 17 * REGBYTES(sp)
+    addi a7, a7, -0xfe
+    beqz a7, arch_signal_quit
 
 #ifdef RT_USING_USERSPACE
     /* save setting when syscall enter */
@@ -343,4 +335,3 @@ dont_ret_to_user:
     RESTORE_ALL
     csrw sscratch, zero
     sret
-

+ 205 - 181
components/lwp/lwp_syscall.c

@@ -28,18 +28,20 @@
 #include <dfs_select.h>
 #endif
 
+#include "syscall_data.h"
+
 #if (defined(RT_USING_SAL) && defined(SAL_USING_POSIX))
 #include <sys/socket.h>
 
-#define SYSCALL_NET(f)      ((void *)(f))
+#define SYSCALL_NET(f)      f
 #else
-#define SYSCALL_NET(f)      ((void *)sys_notimpl)
+#define SYSCALL_NET(f)      SYSCALL_SIGN(sys_notimpl)
 #endif
 
 #if defined(RT_USING_DFS) && defined(RT_USING_USERSPACE)
-#define SYSCALL_USPACE(f)   ((void *)(f))
+#define SYSCALL_USPACE(f)   f
 #else
-#define SYSCALL_USPACE(f)   ((void *)sys_notimpl)
+#define SYSCALL_USPACE(f)   SYSCALL_SIGN(sys_notimpl)
 #endif
 
 #define DBG_TAG    "SYSCALL"
@@ -52,6 +54,7 @@
 
 #include <sal_netdb.h>
 #include <sal.h>
+#include <sys/socket.h>
 #endif /* RT_USING_SAL */
 
 #include <tty.h>
@@ -4115,200 +4118,200 @@ int sys_fsync(int fd)
 
 const static void* func_table[] =
 {
-    (void *)sys_exit,            /* 01 */
-    (void *)sys_read,
-    (void *)sys_write,
-    (void *)sys_lseek,
-    (void *)sys_open,            /* 05 */
-    (void *)sys_close,
-    (void *)sys_ioctl,
-    (void *)sys_fstat,
-    (void *)sys_poll,
-    (void *)sys_nanosleep,       /* 10 */
-    (void *)sys_gettimeofday,
-    (void *)sys_settimeofday,
-    (void *)sys_exec,
-    (void *)sys_kill,
-    (void *)sys_getpid,          /* 15 */
-    (void *)sys_getpriority,
-    (void *)sys_setpriority,
-    (void *)sys_sem_create,
-    (void *)sys_sem_delete,
-    (void *)sys_sem_take,        /* 20 */
-    (void *)sys_sem_release,
-    (void *)sys_mutex_create,
-    (void *)sys_mutex_delete,
-    (void *)sys_mutex_take,
-    (void *)sys_mutex_release,   /* 25 */
-    (void *)sys_event_create,
-    (void *)sys_event_delete,
-    (void *)sys_event_send,
-    (void *)sys_event_recv,
-    (void *)sys_mb_create,       /* 30 */
-    (void *)sys_mb_delete,
-    (void *)sys_mb_send,
-    (void *)sys_mb_send_wait,
-    (void *)sys_mb_recv,
-    (void *)sys_mq_create,       /* 35 */
-    (void *)sys_mq_delete,
-    (void *)sys_mq_send,
-    (void *)sys_mq_urgent,
-    (void *)sys_mq_recv,
-    (void *)sys_thread_create,   /* 40 */
-    (void *)sys_thread_delete,
-    (void *)sys_thread_startup,
-    (void *)sys_thread_self,
-    (void *)sys_channel_open,
-    (void *)sys_channel_close,   /* 45 */
-    (void *)sys_channel_send,
-    (void *)sys_channel_send_recv_timeout,
-    (void *)sys_channel_reply,
-    (void *)sys_channel_recv_timeout,
-    (void *)sys_enter_critical,  /* 50 */
-    (void *)sys_exit_critical,
-
-    SYSCALL_USPACE(sys_brk),
-    SYSCALL_USPACE(sys_mmap2),
-    SYSCALL_USPACE(sys_munmap),
+    SYSCALL_SIGN(sys_exit),            /* 01 */
+    SYSCALL_SIGN(sys_read),
+    SYSCALL_SIGN(sys_write),
+    SYSCALL_SIGN(sys_lseek),
+    SYSCALL_SIGN(sys_open),            /* 05 */
+    SYSCALL_SIGN(sys_close),
+    SYSCALL_SIGN(sys_ioctl),
+    SYSCALL_SIGN(sys_fstat),
+    SYSCALL_SIGN(sys_poll),
+    SYSCALL_SIGN(sys_nanosleep),       /* 10 */
+    SYSCALL_SIGN(sys_gettimeofday),
+    SYSCALL_SIGN(sys_settimeofday),
+    SYSCALL_SIGN(sys_exec),
+    SYSCALL_SIGN(sys_kill),
+    SYSCALL_SIGN(sys_getpid),          /* 15 */
+    SYSCALL_SIGN(sys_getpriority),
+    SYSCALL_SIGN(sys_setpriority),
+    SYSCALL_SIGN(sys_sem_create),
+    SYSCALL_SIGN(sys_sem_delete),
+    SYSCALL_SIGN(sys_sem_take),        /* 20 */
+    SYSCALL_SIGN(sys_sem_release),
+    SYSCALL_SIGN(sys_mutex_create),
+    SYSCALL_SIGN(sys_mutex_delete),
+    SYSCALL_SIGN(sys_mutex_take),
+    SYSCALL_SIGN(sys_mutex_release),   /* 25 */
+    SYSCALL_SIGN(sys_event_create),
+    SYSCALL_SIGN(sys_event_delete),
+    SYSCALL_SIGN(sys_event_send),
+    SYSCALL_SIGN(sys_event_recv),
+    SYSCALL_SIGN(sys_mb_create),       /* 30 */
+    SYSCALL_SIGN(sys_mb_delete),
+    SYSCALL_SIGN(sys_mb_send),
+    SYSCALL_SIGN(sys_mb_send_wait),
+    SYSCALL_SIGN(sys_mb_recv),
+    SYSCALL_SIGN(sys_mq_create),       /* 35 */
+    SYSCALL_SIGN(sys_mq_delete),
+    SYSCALL_SIGN(sys_mq_send),
+    SYSCALL_SIGN(sys_mq_urgent),
+    SYSCALL_SIGN(sys_mq_recv),
+    SYSCALL_SIGN(sys_thread_create),   /* 40 */
+    SYSCALL_SIGN(sys_thread_delete),
+    SYSCALL_SIGN(sys_thread_startup),
+    SYSCALL_SIGN(sys_thread_self),
+    SYSCALL_SIGN(sys_channel_open),
+    SYSCALL_SIGN(sys_channel_close),   /* 45 */
+    SYSCALL_SIGN(sys_channel_send),
+    SYSCALL_SIGN(sys_channel_send_recv_timeout),
+    SYSCALL_SIGN(sys_channel_reply),
+    SYSCALL_SIGN(sys_channel_recv_timeout),
+    SYSCALL_SIGN(sys_enter_critical),  /* 50 */
+    SYSCALL_SIGN(sys_exit_critical),
+
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_brk)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_mmap2)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_munmap)),
 #ifdef ARCH_MM_MMU
-    SYSCALL_USPACE(sys_shmget), /* 55 */
-    SYSCALL_USPACE(sys_shmrm),
-    SYSCALL_USPACE(sys_shmat),
-    SYSCALL_USPACE(sys_shmdt),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_shmget)), /* 55 */
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_shmrm)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_shmat)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_shmdt)),
 #else
 #ifdef RT_LWP_USING_SHM
-    (void *)sys_shm_alloc,      /* 55 */
-    (void *)sys_shm_free,
-    (void *)sys_shm_retain,
-    (void *)sys_notimpl,
+    SYSCALL_SIGN(sys_shm_alloc),      /* 55 */
+    SYSCALL_SIGN(sys_shm_free),
+    SYSCALL_SIGN(sys_shm_retain),
+    SYSCALL_SIGN(sys_notimpl),
 #else
-    (void *)sys_notimpl,      /* 55 */
-    (void *)sys_notimpl,
-    (void *)sys_notimpl,
-    (void *)sys_notimpl,
+    SYSCALL_SIGN(sys_notimpl),      /* 55 */
+    SYSCALL_SIGN(sys_notimpl),
+    SYSCALL_SIGN(sys_notimpl),
+    SYSCALL_SIGN(sys_notimpl),
 #endif /* RT_LWP_USING_SHM */
 #endif /* ARCH_MM_MMU */
-    (void *)sys_device_init,
-    (void *)sys_device_register, /* 60 */
-    (void *)sys_device_control,
-    (void *)sys_device_find,
-    (void *)sys_device_open,
-    (void *)sys_device_close,
-    (void *)sys_device_read,    /* 65 */
-    (void *)sys_device_write,
-
-    (void *)sys_stat,
-    (void *)sys_thread_find,
-
-    SYSCALL_NET(sys_accept),
-    SYSCALL_NET(sys_bind),      /* 70 */
-    SYSCALL_NET(sys_shutdown),
-    SYSCALL_NET(sys_getpeername),
-    SYSCALL_NET(sys_getsockname),
-    SYSCALL_NET(sys_getsockopt),
-    SYSCALL_NET(sys_setsockopt), /* 75 */
-    SYSCALL_NET(sys_connect),
-    SYSCALL_NET(sys_listen),
-    SYSCALL_NET(sys_recv),
-    SYSCALL_NET(sys_recvfrom),
-    SYSCALL_NET(sys_send),      /* 80 */
-    SYSCALL_NET(sys_sendto),
-    SYSCALL_NET(sys_socket),
-
-    SYSCALL_NET(sys_closesocket),
-    SYSCALL_NET(sys_getaddrinfo),
-    SYSCALL_NET(sys_gethostbyname2_r), /* 85 */
-
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network, /* 90 */
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network,
-    (void *)sys_notimpl,    //(void *)network,
+    SYSCALL_SIGN(sys_device_init),
+    SYSCALL_SIGN(sys_device_register), /* 60 */
+    SYSCALL_SIGN(sys_device_control),
+    SYSCALL_SIGN(sys_device_find),
+    SYSCALL_SIGN(sys_device_open),
+    SYSCALL_SIGN(sys_device_close),
+    SYSCALL_SIGN(sys_device_read),    /* 65 */
+    SYSCALL_SIGN(sys_device_write),
+
+    SYSCALL_SIGN(sys_stat),
+    SYSCALL_SIGN(sys_thread_find),
+
+    SYSCALL_NET(SYSCALL_SIGN(sys_accept)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_bind)),      /* 70 */
+    SYSCALL_NET(SYSCALL_SIGN(sys_shutdown)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_getpeername)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_getsockname)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_getsockopt)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_setsockopt)), /* 75 */
+    SYSCALL_NET(SYSCALL_SIGN(sys_connect)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_listen)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_recv)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_recvfrom)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_send)),      /* 80 */
+    SYSCALL_NET(SYSCALL_SIGN(sys_sendto)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_socket)),
+
+    SYSCALL_NET(SYSCALL_SIGN(sys_closesocket)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_getaddrinfo)),
+    SYSCALL_NET(SYSCALL_SIGN(sys_gethostbyname2_r)), /* 85 */
+
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network, /* 90 */
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network,
+    SYSCALL_SIGN(sys_notimpl),    //network,
 
 #ifdef RT_USING_DFS
-    (void *)sys_select,
+    SYSCALL_SIGN(sys_select),
 #else
-    (void *)sys_notimpl,
+    SYSCALL_SIGN(sys_notimpl),
 #endif
 
-    (void *)sys_notimpl,    //(void *)sys_hw_interrupt_disable, /* 95 */
-    (void *)sys_notimpl,    //(void *)sys_hw_interrupt_enable,
+    SYSCALL_SIGN(sys_notimpl),    //SYSCALL_SIGN(sys_hw_interrupt_disable), /* 95 */
+    SYSCALL_SIGN(sys_notimpl),    //SYSCALL_SIGN(sys_hw_interrupt_enable),
 
-    (void *)sys_tick_get,
-    (void *)sys_exit_group,
+    SYSCALL_SIGN(sys_tick_get),
+    SYSCALL_SIGN(sys_exit_group),
 
-    (void *)sys_notimpl,    //(void *)rt_delayed_work_init,
-    (void *)sys_notimpl,    //(void *)rt_work_submit,           /* 100 */
-    (void *)sys_notimpl,    //(void *)rt_wqueue_wakeup,
-    (void *)sys_thread_mdelay,
-    (void *)sys_sigaction,
-    (void *)sys_sigprocmask,
-    (void *)sys_tkill,             /* 105 */
-    (void *)sys_thread_sigprocmask,
+    SYSCALL_SIGN(sys_notimpl),    //rt_delayed_work_init,
+    SYSCALL_SIGN(sys_notimpl),    //rt_work_submit,           /* 100 */
+    SYSCALL_SIGN(sys_notimpl),    //rt_wqueue_wakeup,
+    SYSCALL_SIGN(sys_thread_mdelay),
+    SYSCALL_SIGN(sys_sigaction),
+    SYSCALL_SIGN(sys_sigprocmask),
+    SYSCALL_SIGN(sys_tkill),             /* 105 */
+    SYSCALL_SIGN(sys_thread_sigprocmask),
 #ifdef ARCH_MM_MMU
-    (void *)sys_cacheflush,
-    (void *)sys_notimpl,
-    (void *)sys_notimpl,
+    SYSCALL_SIGN(sys_cacheflush),
+    SYSCALL_SIGN(sys_notimpl),
+    SYSCALL_SIGN(sys_notimpl),
 #else
-    (void *)sys_notimpl,
-    (void *)sys_lwp_sighandler_set,
-    (void *)sys_thread_sighandler_set,
+    SYSCALL_SIGN(sys_notimpl),
+    SYSCALL_SIGN(sys_lwp_sighandler_set),
+    SYSCALL_SIGN(sys_thread_sighandler_set),
 #endif
-    (void *)sys_waitpid,          /* 110 */
-
-    (void *)sys_timer_create,
-    (void *)sys_timer_delete,
-    (void *)sys_timer_start,
-    (void *)sys_timer_stop,
-    (void *)sys_timer_control,  /* 115 */
-    (void *)sys_getcwd,
-    (void *)sys_chdir,
-    (void *)sys_unlink,
-    (void *)sys_mkdir,
-    (void *)sys_rmdir,          /* 120 */
-    (void *)sys_getdents,
-    (void *)sys_get_errno,
+    SYSCALL_SIGN(sys_waitpid),          /* 110 */
+
+    SYSCALL_SIGN(sys_timer_create),
+    SYSCALL_SIGN(sys_timer_delete),
+    SYSCALL_SIGN(sys_timer_start),
+    SYSCALL_SIGN(sys_timer_stop),
+    SYSCALL_SIGN(sys_timer_control),  /* 115 */
+    SYSCALL_SIGN(sys_getcwd),
+    SYSCALL_SIGN(sys_chdir),
+    SYSCALL_SIGN(sys_unlink),
+    SYSCALL_SIGN(sys_mkdir),
+    SYSCALL_SIGN(sys_rmdir),          /* 120 */
+    SYSCALL_SIGN(sys_getdents),
+    SYSCALL_SIGN(sys_get_errno),
 #ifdef ARCH_MM_MMU
-    (void *)sys_set_thread_area,
-    (void *)sys_set_tid_address,
+    SYSCALL_SIGN(sys_set_thread_area),
+    SYSCALL_SIGN(sys_set_tid_address),
 #else
-    (void *)sys_notimpl,
-    (void *)sys_notimpl,
+    SYSCALL_SIGN(sys_notimpl),
+    SYSCALL_SIGN(sys_notimpl),
 #endif
-    (void *)sys_access,         /* 125 */
-    (void *)sys_pipe,
-    (void *)sys_clock_settime,
-    (void *)sys_clock_gettime,
-    (void *)sys_clock_getres,
-    SYSCALL_USPACE(sys_clone),           /* 130 */
-    SYSCALL_USPACE(sys_futex),
-    SYSCALL_USPACE(sys_pmutex),
-    (void *)sys_dup,
-    (void *)sys_dup2,
-    (void *)sys_rename,         /* 135 */
-    SYSCALL_USPACE(sys_fork),
-    SYSCALL_USPACE(sys_execve),
-    SYSCALL_USPACE(sys_vfork),
-    (void *)sys_gettid,
-    (void *)sys_prlimit64,      /* 140 */
-    (void *)sys_getrlimit,
-    (void *)sys_setrlimit,
-    (void *)sys_setsid,
-    (void *)sys_getrandom,
-    (void *)sys_notimpl,    // (void *)sys_readlink     /* 145 */
-    SYSCALL_USPACE(sys_mremap),
-    SYSCALL_USPACE(sys_madvise),
-    (void *)sys_sched_setparam,
-    (void *)sys_sched_getparam,
-    (void *)sys_sched_get_priority_max,
-    (void *)sys_sched_get_priority_min,
-    (void *)sys_sched_setscheduler,
-    (void *)sys_sched_getscheduler,
-    (void *)sys_setaffinity,
-    (void *)sys_fsync
+    SYSCALL_SIGN(sys_access),         /* 125 */
+    SYSCALL_SIGN(sys_pipe),
+    SYSCALL_SIGN(sys_clock_settime),
+    SYSCALL_SIGN(sys_clock_gettime),
+    SYSCALL_SIGN(sys_clock_getres),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_clone)),           /* 130 */
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_futex)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_pmutex)),
+    SYSCALL_SIGN(sys_dup),
+    SYSCALL_SIGN(sys_dup2),
+    SYSCALL_SIGN(sys_rename),         /* 135 */
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_fork)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_execve)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_vfork)),
+    SYSCALL_SIGN(sys_gettid),
+    SYSCALL_SIGN(sys_prlimit64),      /* 140 */
+    SYSCALL_SIGN(sys_getrlimit),
+    SYSCALL_SIGN(sys_setrlimit),
+    SYSCALL_SIGN(sys_setsid),
+    SYSCALL_SIGN(sys_getrandom),
+    SYSCALL_SIGN(sys_notimpl),    // SYSCALL_SIGN(sys_readlink)     /* 145 */
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_mremap)),
+    SYSCALL_USPACE(SYSCALL_SIGN(sys_madvise)),
+    SYSCALL_SIGN(sys_sched_setparam),
+    SYSCALL_SIGN(sys_sched_getparam),
+    SYSCALL_SIGN(sys_sched_get_priority_max),
+    SYSCALL_SIGN(sys_sched_get_priority_min),
+    SYSCALL_SIGN(sys_sched_setscheduler),
+    SYSCALL_SIGN(sys_sched_getscheduler),
+    SYSCALL_SIGN(sys_setaffinity),
+    SYSCALL_SIGN(sys_fsync)
 };
 
 const void *lwp_get_sys_api(rt_uint32_t number)
@@ -4322,11 +4325,32 @@ const void *lwp_get_sys_api(rt_uint32_t number)
     else
     {
         number -= 1;
-        if (number < sizeof(func_table) / sizeof(func_table[0]))
+        if (number < sizeof(func_table) / sizeof(func_table[0]) / 2)
         {
-            func = func_table[number];
+            func = func_table[number << 1];
         }
     }
 
     return func;
 }
+
+const char *lwp_get_syscall_name(rt_uint32_t number)
+{
+    const char *name = "sys_notimpl";
+
+    if (number == 0xff)
+    {
+        name = "sys_log";
+    }
+    else
+    {
+        number -= 1;
+        if (number < sizeof(func_table) / sizeof(func_table[0]) / 2)
+        {
+            name = (char*)func_table[(number << 1) + 1];
+        }
+    }
+
+    // skip sys_
+    return name + 4;
+}

+ 3 - 0
components/lwp/lwp_syscall.h

@@ -35,6 +35,9 @@ typedef uint32_t id_t;          /* may contain pid, uid or gid */
 #define	PRIO_PGRP	    1
 #define	PRIO_USER	    2
 
+const char *lwp_get_syscall_name(rt_uint32_t number);
+const void *lwp_get_sys_api(rt_uint32_t number);
+
 void sys_exit(int value);
 ssize_t sys_read(int fd, void *buf, size_t nbyte);
 ssize_t sys_write(int fd, const void *buf, size_t nbyte);

+ 32 - 0
components/lwp/syscall_data.h

@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-10     RT-Thread    The first version
+ */
+#ifndef __SYSCALL_DATA_H__
+#define __SYSCALL_DATA_H__
+
+#include <rtthread.h>
+
+/**
+ * @brief signature for syscall, used to locate syscall metadata.
+ *
+ * We don't allocate an exclusive section in ELF like Linux do
+ * to avoid initializing necessary data by iterating that section,
+ * which increases system booting time. We signature a pointer
+ * just below each syscall entry in syscall table to make it
+ * easy to locate every syscall's metadata by using syscall id.
+ *
+ * TODO Currently this adds a dummy pointer to syscall name.
+ * After adding metadata of every syscalls in front of their definition,
+ * this should be replaced by a pointer to that structure
+ */
+#define SYSCALL_SIGN(func) \
+    (void *)func,          \
+        RT_STRINGIFY(func)
+
+#endif /* __SYSCALL_DATA_H__ */

+ 5 - 0
include/rtdef.h

@@ -118,6 +118,11 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define __ARMCC_GNUC__
 #endif
 
+/* Common Utilities */
+
+#define _RT_STRINGIFY(x...)	#x
+#define RT_STRINGIFY(x...) _RT_STRINGIFY(x)
+
 /* Compiler Related Definitions */
 #if defined(__CC_ARM) || defined(__CLANG_ARM)           /* ARM Compiler */
     #include <stdarg.h>

+ 71 - 66
libcpu/risc-v/t-head/c906/cache.c

@@ -7,6 +7,9 @@
  * Date           Author       Notes
  * 2021-01-29     lizhirui     first version
  * 2021-11-05     JasonHu      add c906 cache inst
+ * 2022-11-09     WangXiaoyao  Support cache coherence operations;
+ *                             improve portability and make
+ *                             no assumption on undefined behavior
  */
 
 #include <rthw.h>
@@ -14,6 +17,9 @@
 #include <board.h>
 #include <riscv.h>
 
+#include "opcode.h"
+#include "cache.h"
+
 #define L1_CACHE_BYTES (64)
 
 /**
@@ -25,60 +31,35 @@ static void dcache_inv_range(unsigned long start, unsigned long end) __attribute
 static void dcache_wbinv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
 static void icache_inv_range(unsigned long start, unsigned long end) __attribute__((optimize("O0")));
 
+#define CACHE_OP_RS1 %0
+#define CACHE_OP_RANGE(instr)                                  \
+    {                                                          \
+        register rt_ubase_t i = start & ~(L1_CACHE_BYTES - 1); \
+        for (; i < end; i += L1_CACHE_BYTES)                   \
+        {                                                      \
+            __asm__ volatile(instr ::"r"(i)                    \
+                             : "memory");                      \
+        }                                                      \
+    }
+
 static void dcache_wb_range(unsigned long start, unsigned long end)
 {
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.cva %0\n"::"r"(i):"memory"); */
-        /*
-         * compiler always use a5 = i.
-         * a6 not used, so we use a6 here.
-         */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0257800b");               /* dcache.cva a6 */
-    }
-    asm volatile(".long 0x01b0000b");   /* sync.is */
+    CACHE_OP_RANGE(OPC_DCACHE_CVA(CACHE_OP_RS1));
 }
 
 static void dcache_inv_range(unsigned long start, unsigned long end)
 {
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.iva %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0268000b");               /* dcache.iva a6 */
-    }
-    asm volatile(".long 0x01b0000b");
+    CACHE_OP_RANGE(OPC_DCACHE_IVA(CACHE_OP_RS1));
 }
 
 static void dcache_wbinv_range(unsigned long start, unsigned long end)
 {
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.civa %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0278000b");               /* dcache.civa a6 */
-    }
-    asm volatile(".long 0x01b0000b");
+    CACHE_OP_RANGE(OPC_DCACHE_CIVA(CACHE_OP_RS1));
 }
 
 static void icache_inv_range(unsigned long start, unsigned long end)
 {
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("icache.iva %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0308000b");               /* icache.iva a6 */
-    }
-    asm volatile(".long 0x01b0000b");
+    CACHE_OP_RANGE(OPC_ICACHE_IVA(CACHE_OP_RS1));
 }
 
 rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
@@ -91,65 +72,89 @@ rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
     return L1_CACHE_BYTES;
 }
 
-void rt_hw_cpu_icache_invalidate(void *addr,int size)
+void rt_hw_cpu_icache_invalidate_local(void *addr, int size)
 {
     icache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    rt_hw_cpu_sync_i();
 }
 
-void rt_hw_cpu_dcache_invalidate(void *addr,int size)
+void rt_hw_cpu_dcache_invalidate_local(void *addr, int size)
 {
     dcache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    rt_hw_cpu_sync();
 }
 
-void rt_hw_cpu_dcache_clean(void *addr,int size)
+void rt_hw_cpu_dcache_clean_local(void *addr, int size)
 {
     dcache_wb_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    rt_hw_cpu_sync();
 }
 
-void rt_hw_cpu_dcache_clean_flush(void *addr,int size)
+void rt_hw_cpu_dcache_clean_invalidate_local(void *addr, int size)
 {
     dcache_wbinv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    rt_hw_cpu_sync();
 }
 
-void rt_hw_cpu_icache_ops(int ops,void *addr,int size)
+/**
+ * =====================================================
+ * Architecture Independent API
+ * =====================================================
+ */
+
+void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
 {
-    if(ops == RT_HW_CACHE_INVALIDATE)
+    if (ops == RT_HW_CACHE_INVALIDATE)
     {
-        rt_hw_cpu_icache_invalidate(addr, size);
+        rt_hw_cpu_icache_invalidate_local(addr, size);
     }
 }
 
-void rt_hw_cpu_dcache_ops(int ops,void *addr,int size)
+void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
 {
-    if(ops == RT_HW_CACHE_FLUSH)
+    if (ops == RT_HW_CACHE_FLUSH)
     {
-        rt_hw_cpu_dcache_clean(addr, size);
+        rt_hw_cpu_dcache_clean_local(addr, size);
     }
     else
     {
-        rt_hw_cpu_dcache_invalidate(addr, size);
+        rt_hw_cpu_dcache_invalidate_local(addr, size);
     }
 }
 
-void rt_hw_cpu_dcache_clean_all(void)
-{
-    /* asm volatile("dcache.call\n":::"memory"); */
-    asm volatile(".long 0x0010000b\n":::"memory");
-}
-
-void rt_hw_cpu_dcache_invalidate_all(void)
+void rt_hw_sync_cache_local(void *addr, int size)
 {
-    /* asm volatile("dcache.ciall\n":::"memory"); */
-    asm volatile(".long 0x0030000b\n":::"memory");
+    rt_hw_cpu_dcache_clean_local(addr, size);
+    rt_hw_cpu_icache_invalidate_local(addr, size);
 }
 
-void rt_hw_cpu_icache_invalidate_all(void)
-{
-    /* asm volatile("icache.iall\n":::"memory"); */
-    asm volatile(".long 0x0100000b\n":::"memory");
-}
+#ifdef RT_USING_LWP
+#include <lwp_arch.h>
+#define ICACHE (1 << 0)
+#define DCACHE (1 << 1)
+#define BCACHE (ICACHE | DCACHE)
 
+/**
+ * TODO moving syscall to kernel
+ */
 int sys_cacheflush(void *addr, int size, int cache)
 {
-    return 0;
+    /* must in user space */
+    if ((size_t)addr >= USER_VADDR_START && (size_t)addr + size < USER_VADDR_TOP)
+    {
+        /**
+         * we DO NOT check argument 'cache' invalid error
+         */
+        if ((cache & DCACHE) != 0)
+        {
+            rt_hw_cpu_dcache_clean_invalidate_local(addr, size);
+        }
+        if ((cache & ICACHE) != 0)
+        {
+            rt_hw_cpu_icache_invalidate_local(addr, size);
+        }
+        return 0;
+    }
+    return -RT_ERROR;
 }
+#endif

+ 87 - 4
libcpu/risc-v/t-head/c906/cache.h

@@ -11,13 +11,96 @@
 #ifndef CACHE_H__
 #define CACHE_H__
 
-void rt_hw_cpu_dcache_clean(void *addr,int size);
-void rt_hw_cpu_icache_invalidate(void *addr,int size);
-void rt_hw_cpu_dcache_invalidate(void *addr,int size);
+#include "opcode.h"
+
+#ifndef ALWAYS_INLINE
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#endif
+
+#define rt_hw_cpu_sync() __asm__ volatile(OPC_SYNC:: \
+                                              : "memory")
+
+#define rt_hw_cpu_sync_i() __asm__ volatile(OPC_SYNC_I:: \
+                                                : "memory");
+
+/**
+ * ========================================
+ * Local cpu cache maintainence operations
+ * ========================================
+ */
+
+void rt_hw_cpu_dcache_clean_local(void *addr, int size);
+void rt_hw_cpu_dcache_invalidate_local(void *addr, int size);
+void rt_hw_cpu_dcache_clean_invalidate_local(void *addr, int size);
+
+void rt_hw_cpu_icache_invalidate_local(void *addr, int size);
+
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_all_local(void)
+{
+    __asm__ volatile(OPC_DCACHE_CALL ::
+                         : "memory");
+    rt_hw_cpu_sync();
+}
+
+ALWAYS_INLINE void rt_hw_cpu_dcache_invalidate_all_local(void)
+{
+    __asm__ volatile(OPC_DCACHE_IALL ::
+                         : "memory");
+    rt_hw_cpu_sync();
+}
+
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_invalidate_all_local(void)
+{
+    __asm__ volatile(OPC_DCACHE_CIALL ::
+                         : "memory");
+    rt_hw_cpu_sync();
+}
+
+ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local(void)
+{
+    __asm__ volatile(OPC_ICACHE_IALL ::
+                         : "memory");
+    rt_hw_cpu_sync_i();
+}
+
+/**
+ * ========================================
+ * Multi-core cache maintainence operations
+ * ========================================
+ */
+
+#ifdef RT_USING_SMP
+#error "TODO: cache maintainence have not ported to RISC-V SMP yet"
+
+void rt_hw_cpu_dcache_clean(void *addr, int size);
+void rt_hw_cpu_dcache_invalidate(void *addr, int size);
+void rt_hw_cpu_dcache_clean_invalidate(void *addr, int size);
 
-void rt_hw_cpu_dcache_clean_flush(void *addr,int size);
 void rt_hw_cpu_dcache_clean_all(void);
 void rt_hw_cpu_dcache_invalidate_all(void);
+void rt_hw_cpu_dcache_clean_invalidate_all(void);
+
+void rt_hw_cpu_icache_invalidate(void *addr, int size);
 void rt_hw_cpu_icache_invalidate_all(void);
 
+#else /* !RT_USING_SMP */
+
+#define rt_hw_cpu_dcache_clean rt_hw_cpu_dcache_clean_local
+#define rt_hw_cpu_dcache_invalidate rt_hw_cpu_dcache_invalidate_local
+#define rt_hw_cpu_dcache_clean_invalidate rt_hw_cpu_dcache_clean_invalidate_local
+
+#define rt_hw_cpu_dcache_clean_all rt_hw_cpu_dcache_clean_all_local
+#define rt_hw_cpu_dcache_invalidate_all rt_hw_cpu_dcache_invalidate_all_local
+#define rt_hw_cpu_dcache_clean_invalidate_all rt_hw_cpu_dcache_clean_invalidate_all_local
+
+#define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
+#define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
+
+#endif /* RT_USING_SMP */
+
+/**
+ * @brief Synchronize cache to Point of Coherent
+ */
+void rt_hw_sync_cache_local(void *addr, int size);
+
 #endif /* CACHE_H__ */

+ 2 - 1
libcpu/risc-v/t-head/c906/cpuport.h

@@ -12,6 +12,7 @@
 #define CPUPORT_H__
 
 #include <rtconfig.h>
+#include <opcode.h>
 
 /* bytes of register width  */
 #ifdef ARCH_CPU_64BIT
@@ -48,7 +49,7 @@ rt_inline void rt_hw_dmb()
 
 rt_inline void rt_hw_isb()
 {
-    asm volatile("fence.i":::"memory");
+    asm volatile(OPC_FENCE_I:::"memory");
 }
 
 int rt_hw_cpu_id(void);

+ 81 - 0
libcpu/risc-v/t-head/c906/opcode.h

@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-09     WangXiaoyao  Add portable asm support
+ */
+#ifndef __OPCODE_H__
+#define __OPCODE_H__
+
+/**
+ * @brief binary opcode pseudo operations
+ * Used to bypass toolchain restriction on extension ISA
+ * 
+ * WARNING: Xuantie ISAs are not compatible to each other in opcode.
+ * It's painful to port this file, and should be really careful.
+ */
+
+/**
+ * @brief RISC-V instruction formats
+ */
+
+/** 
+ * R type: .insn r opcode6, func3, func7, rd, rs1, rs2
+ * 
+ * +-------+-----+-----+-------+----+---------+
+ * | func7 | rs2 | rs1 | func3 | rd | opcode6 |
+ * +-------+-----+-----+-------+----+---------+
+ * 31      25    20    15      12   7        0
+ */
+#define __OPC_INSN_FORMAT_R(opcode, func3, func7, rd, rs1, rs2) \
+    ".insn r "RT_STRINGIFY(opcode)","RT_STRINGIFY(func3)","RT_STRINGIFY(func7)","RT_STRINGIFY(rd)","RT_STRINGIFY(rs1)","RT_STRINGIFY(rs2)
+
+/**
+ * @brief Xuantie T-HEAD extension ISA format
+ * Compatible to Xuantie C906R2S1 user manual v06
+ */
+#define __OPC_INSN_FORMAT_CACHE(func7, rs2, rs1) \
+    __OPC_INSN_FORMAT_R(0x0b, 0x0, func7, x0, rs1, rs2)
+
+#ifdef _TOOLCHAIN_SUPP_XTHEADE_ISA_
+#define OPC_SYNC                "sync"
+#define OPC_SYNC_I              "sync.i"
+
+#define OPC_DCACHE_CALL         "dcache.call"
+#define OPC_DCACHE_IALL         "dcache.iall"
+#define OPC_DCACHE_CIALL        "dcache.ciall"
+
+#define OPC_ICACHE_IALL         "icache.iall"
+
+#define OPC_DCACHE_CVA(rs1)     "dcache.cva "RT_STRINGIFY(rs1)
+#define OPC_DCACHE_IVA(rs1)     "dcache.iva "RT_STRINGIFY(rs1)
+#define OPC_DCACHE_CIVA(rs1)    "dcache.civa "RT_STRINGIFY(rs1)
+
+#define OPC_ICACHE_IVA(rs1)     "icache.iva "RT_STRINGIFY(rs1)
+#else /* !_TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
+#define OPC_SYNC                ".long 0x0180000B"
+#define OPC_SYNC_I              ".long 0x01A0000B"
+
+#define OPC_DCACHE_CALL         ".long 0x0010000B"
+#define OPC_DCACHE_IALL         ".long 0x0020000B"
+#define OPC_DCACHE_CIALL        ".long 0x0030000B"
+
+#define OPC_ICACHE_IALL         ".long 0x0100000B"
+
+#define OPC_DCACHE_CVA(rs1)     __OPC_INSN_FORMAT_CACHE(0x1, x4, rs1)
+#define OPC_DCACHE_IVA(rs1)     __OPC_INSN_FORMAT_CACHE(0x1, x6, rs1)
+#define OPC_DCACHE_CIVA(rs1)    __OPC_INSN_FORMAT_CACHE(0x1, x7, rs1)
+
+#define OPC_ICACHE_IVA(rs1)     __OPC_INSN_FORMAT_CACHE(0x1, x16, rs1)
+#endif /* _TOOLCHAIN_NOT_SUPP_THEAD_ISA_ */
+
+#ifdef _TOOLCHAIN_SUPP_ZIFENCEI_ISA_
+#define OPC_FENCE_I             "fence.i"
+#else /* !_TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
+#define OPC_FENCE_I             ".long 0x0000100F"
+#endif /* _TOOLCHAIN_SUPP_ZIFENCEI_ISA_ */
+
+#endif /* __OPCODE_H__ */

+ 24 - 23
libcpu/risc-v/t-head/c906/syscall_c.c

@@ -6,13 +6,14 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-02-03     lizhirui     first version
+ * 2022-11-10     WangXiaoyao  Add readable syscall tracing
  */
 
 #include <rthw.h>
 #include <rtthread.h>
 
-#define DBG_LEVEL DBG_WARNING
-//#define DBG_LEVEL DBG_INFO
+#define DBG_TAG "syscall"
+#define DBG_LVL DBG_WARNING
 #include <rtdbg.h>
 
 #include <stdint.h>
@@ -21,39 +22,39 @@
 #include <lwp_mm_area.h>
 #include <lwp_user_mm.h>
 
-#include <stdio.h>
-
 #include "riscv_mmu.h"
 #include "stack.h"
 
-typedef rt_size_t (*syscallfunc_t)(rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t);
-syscallfunc_t lwp_get_sys_api(uint32_t);
+typedef rt_size_t (*syscallfunc_t)(rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t);
 
 void syscall_handler(struct rt_hw_stack_frame *regs)
 {
-    if(regs -> a7 == 0)
-    {
-        rt_kprintf("syscall id = 0!\n");
-        while(1);
-    }
+    const char *syscall_name;
+    int syscallid = regs->a7;
 
-    if(regs -> a7 == 0xdeadbeef)
+    if (syscallid == 0)
     {
-        rt_kprintf("syscall id = 0xdeadbeef\n");
-        while(1);
+        LOG_E("syscall id = 0!\n");
+        while (1)
+            ;
     }
 
-    syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(regs -> a7);
+    syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(syscallid);
 
-    if(syscallfunc == RT_NULL)
+    if (syscallfunc == RT_NULL)
     {
-        rt_kprintf("unsupported syscall!\n");
-        while(1);
+        LOG_E("unsupported syscall!\n");
+        sys_exit(-1);
     }
 
-    LOG_I("\033[36msyscall id = %d,arg0 = 0x%p,arg1 = 0x%p,arg2 = 0x%p,arg3 = 0x%p,arg4 = 0x%p,arg5 = 0x%p,arg6 = 0x%p\n\033[37m",regs -> a7,regs -> a0,regs -> a1,regs -> a2,regs -> a3,regs -> a4,regs -> a5,regs -> a6);
-    regs -> a0 = syscallfunc(regs -> a0,regs -> a1,regs -> a2,regs -> a3,regs -> a4,regs -> a5,regs -> a6);
-    regs -> a7 = 0;
-    regs -> epc += 4;//skip ecall instruction
-    LOG_I("\033[36msyscall deal ok,ret = 0x%p\n\033[37m",regs -> a0);
+#if DBG_LVL >= DBG_INFO
+    syscall_name = lwp_get_syscall_name(syscallid);
+#endif
+
+    LOG_I("[0x%lx] %s(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)", rt_thread_self(), syscall_name,
+        regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
+    regs->a0 = syscallfunc(regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
+    regs->a7 = 0;
+    regs->epc += 4; // skip ecall instruction
+    LOG_I("[0x%lx] %s ret: 0x%lx", rt_thread_self(), syscall_name, regs->a0);
 }

+ 3 - 2
libcpu/risc-v/virt64/backtrace.c

@@ -58,13 +58,13 @@ void rt_hw_backtrace(rt_uint32_t *ffp, rt_ubase_t sepc)
         }
 
         ra = fp - 1;
-        if (!rt_hw_mmu_v2p(&mmu_info, ra) || *ra < vas || *ra > vae)
+        if (!_rt_hw_mmu_v2p(&mmu_info, ra) || *ra < vas || *ra > vae)
             break;
 
         rt_kprintf(" %p", *ra - 0x04);
 
         fp = fp - 2;
-        if (!rt_hw_mmu_v2p(&mmu_info, fp))
+        if (!_rt_hw_mmu_v2p(&mmu_info, fp))
             break;
         fp = (rt_ubase_t *)(*fp);
         if (!fp)
@@ -76,6 +76,7 @@ void rt_hw_backtrace(rt_uint32_t *ffp, rt_ubase_t sepc)
 
 static void _assert_backtrace_cb(const char *ex, const char *func, rt_size_t line)
 {
+    rt_hw_interrupt_disable();
     rt_kprintf("(%s) assertion failed at function:%s, line number:%d \n", ex, func, line);
 
     rt_hw_backtrace(0, 0);

+ 12 - 32
libcpu/risc-v/virt64/cache.c

@@ -12,6 +12,7 @@
 #include <rtdef.h>
 #include <board.h>
 #include <riscv.h>
+#include <cache.h>
 
 rt_inline rt_uint32_t rt_cpu_icache_line_size()
 {
@@ -23,59 +24,38 @@ rt_inline rt_uint32_t rt_cpu_dcache_line_size()
     return 0;
 }
 
-void rt_hw_cpu_icache_invalidate(void *addr,int size)
+void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
 {
-
-}
-
-void rt_hw_cpu_dcache_invalidate(void *addr,int size)
-{
-
-}
-
-void rt_hw_cpu_dcache_clean(void *addr,int size)
-{
-
-}
-
-void rt_hw_cpu_icache_ops(int ops,void *addr,int size)
-{
-    if(ops == RT_HW_CACHE_INVALIDATE)
+    if (ops == RT_HW_CACHE_INVALIDATE)
     {
-        rt_hw_cpu_icache_invalidate(addr,size);
+        rt_hw_cpu_icache_invalidate(addr, size);
     }
 }
 
-void rt_hw_cpu_dcache_ops(int ops,void *addr,int size)
+void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
 {
-    if(ops == RT_HW_CACHE_FLUSH)
+    if (ops == RT_HW_CACHE_FLUSH)
     {
-        rt_hw_cpu_dcache_clean(addr,size);
+        rt_hw_cpu_dcache_clean(addr, size);
     }
     else
     {
-        rt_hw_cpu_dcache_invalidate(addr,size);
+        rt_hw_cpu_dcache_invalidate(addr, size);
     }
 }
 
-void rt_hw_cpu_dcache_flush_all()
-{
-
-}
-
-void rt_hw_cpu_icache_invalidate_all()
+rt_base_t rt_hw_cpu_icache_status_local()
 {
-
+    return 0;
 }
 
-rt_base_t rt_hw_cpu_icache_status()
+rt_base_t rt_hw_cpu_dcache_status()
 {
     return 0;
 }
 
-rt_base_t rt_hw_cpu_dcache_status()
+void rt_hw_sync_cache_local(void *addr, int size)
 {
-    return 0;
 }
 
 int sys_cacheflush(void *addr, int size, int cache)

+ 59 - 0
libcpu/risc-v/virt64/cache.h

@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-09     RT-Thread    The first version
+ */
+#ifndef __CACHE_H__
+#define __CACHE_H__
+
+#ifndef ALWAYS_INLINE
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#endif
+
+/**
+ * @brief These APIs may not be supported by a specified architecture
+ * But we have to include to all the cases to be 'general purpose'
+ */
+
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_local(void *addr, int size) {}
+ALWAYS_INLINE void rt_hw_cpu_dcache_invalidate_local(void *addr, int size) {}
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_invalidate_local(void *addr, int size) {}
+
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_all_local() {}
+ALWAYS_INLINE void rt_hw_cpu_dcache_invalidate_all_local(void) {}
+ALWAYS_INLINE void rt_hw_cpu_dcache_clean_invalidate_all_local(void) {}
+
+ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_local(void *addr, int size) {}
+ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local() {}
+
+/**
+ * @brief Multi-core
+ */
+
+#define rt_hw_cpu_dcache_clean rt_hw_cpu_dcache_clean_local
+#define rt_hw_cpu_dcache_invalidate rt_hw_cpu_dcache_invalidate_local
+#define rt_hw_cpu_dcache_clean_invalidate rt_hw_cpu_dcache_clean_invalidate_local
+
+#define rt_hw_cpu_dcache_clean_all rt_hw_cpu_dcache_clean_all_local
+#define rt_hw_cpu_dcache_invalidate_all rt_hw_cpu_dcache_invalidate_all_local
+#define rt_hw_cpu_dcache_clean_invalidate_all rt_hw_cpu_dcache_clean_invalidate_all_local
+
+#define rt_hw_cpu_icache_invalidate rt_hw_cpu_icache_invalidate_local
+#define rt_hw_cpu_icache_invalidate_all rt_hw_cpu_icache_invalidate_all_local
+
+/** instruction barrier */
+void rt_hw_cpu_sync(void);
+
+/**
+ * @brief local cpu icahce & dcache synchronization
+ *
+ * @param addr
+ * @param size
+ */
+void rt_hw_sync_cache_local(void *addr, int size);
+
+#endif /* __CACHE_H__ */

+ 22 - 17
libcpu/risc-v/virt64/cpuport.c

@@ -36,6 +36,27 @@ volatile rt_ubase_t rt_interrupt_to_thread = 0;
  */
 volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
 
+void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
+{
+    (*--sp) = 0;                                /* tp */
+    (*--sp) = ra;                               /* ra */
+    (*--sp) = 0;                                /* s0(fp) */
+    (*--sp) = 0;                                /* s1 */
+    (*--sp) = 0;                                /* s2 */
+    (*--sp) = 0;                                /* s3 */
+    (*--sp) = 0;                                /* s4 */
+    (*--sp) = 0;                                /* s5 */
+    (*--sp) = 0;                                /* s6 */
+    (*--sp) = 0;                                /* s7 */
+    (*--sp) = 0;                                /* s8 */
+    (*--sp) = 0;                                /* s9 */
+    (*--sp) = 0;                                /* s10 */
+    (*--sp) = 0;                                /* s11 */
+    (*--sp) = sstatus;                          /* sstatus */
+
+    return (void *)sp;
+}
+
 /**
  * This function will initialize thread stack, we assuming
  * when scheduler restore this new thread, context will restore
@@ -64,23 +85,7 @@ rt_uint8_t *rt_hw_stack_init(void *tentry,
 
     /* compatible to RESTORE_CONTEXT */
     extern void _rt_thread_entry(void);
-    (*--sp) = 0;                                /* tp */
-    (*--sp) = (rt_ubase_t)_rt_thread_entry;     /* ra */
-    (*--sp) = 0;                                /* s0(fp) */
-    (*--sp) = 0;                                /* s1 */
-    (*--sp) = 0;                                /* s2 */
-    (*--sp) = 0;                                /* s3 */
-    (*--sp) = 0;                                /* s4 */
-    (*--sp) = 0;                                /* s5 */
-    (*--sp) = 0;                                /* s6 */
-    (*--sp) = 0;                                /* s7 */
-    (*--sp) = 0;                                /* s8 */
-    (*--sp) = 0;                                /* s9 */
-    (*--sp) = 0;                                /* s10 */
-    (*--sp) = 0;                                /* s11 */
-    (*--sp) = K_SSTATUS_DEFAULT;                /* sstatus */
-
-    return (rt_uint8_t *)sp;
+    return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
 }
 
 /*

+ 1 - 1
libcpu/risc-v/virt64/cpuport.h

@@ -62,7 +62,7 @@ rt_inline void rt_hw_dmb()
 
 rt_inline void rt_hw_isb()
 {
-    asm volatile("fence.i":::"memory");
+    asm volatile(".long 0x0000100F":::"memory");
 }
 
 #endif

+ 2 - 4
libcpu/risc-v/virt64/mmu.c

@@ -15,6 +15,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <lwp_mm.h>
+#include <cache.h>
 
 #define DBG_TAG "mmu"
 #define DBG_LVL DBG_INFO
@@ -25,9 +26,6 @@
 #include "mmu.h"
 
 void *current_mmu_table = RT_NULL;
-void rt_hw_cpu_icache_invalidate_all();
-void rt_hw_cpu_dcache_flush_all();
-void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size);
 
 volatile rt_ubase_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
 
@@ -48,7 +46,7 @@ void rt_hw_mmu_switch(void *mmu_table)
     current_mmu_table = mmu_table;
     RT_ASSERT(__CHECKALIGN(mmu_table, PAGE_OFFSET_BIT));
     mmu_set_pagetable((rt_ubase_t)mmu_table);
-    rt_hw_cpu_dcache_flush_all();
+    rt_hw_cpu_dcache_clean_all();
     rt_hw_cpu_icache_invalidate_all();
 }
 

+ 21 - 23
libcpu/risc-v/virt64/sbi.c

@@ -40,8 +40,8 @@
 #include <rtthread.h>
 
 /* SBI Implementation-Specific Definitions */
-#define OPENSBI_VERSION_MAJOR_OFFSET    16
-#define OPENSBI_VERSION_MINOR_MASK  0xFFFF
+#define OPENSBI_VERSION_MAJOR_OFFSET 16
+#define OPENSBI_VERSION_MINOR_MASK 0xFFFF
 
 unsigned long sbi_spec_version;
 unsigned long sbi_impl_id;
@@ -69,8 +69,7 @@ sbi_get_impl_version(void)
     return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION));
 }
 
-void
-sbi_print_version(void)
+void sbi_print_version(void)
 {
     u_int major;
     u_int minor;
@@ -116,8 +115,7 @@ sbi_print_version(void)
     rt_kprintf("SBI Specification Version: %u.%u\n", major, minor);
 }
 
-void
-sbi_set_timer(uint64_t val)
+void sbi_set_timer(uint64_t val)
 {
     struct sbi_ret ret;
 
@@ -133,8 +131,7 @@ sbi_set_timer(uint64_t val)
     }
 }
 
-void
-sbi_send_ipi(const unsigned long *hart_mask)
+void sbi_send_ipi(const unsigned long *hart_mask)
 {
     struct sbi_ret ret;
 
@@ -151,8 +148,7 @@ sbi_send_ipi(const unsigned long *hart_mask)
     }
 }
 
-void
-sbi_remote_fence_i(const unsigned long *hart_mask)
+void sbi_remote_fence_i(const unsigned long *hart_mask)
 {
     struct sbi_ret ret;
 
@@ -169,8 +165,7 @@ sbi_remote_fence_i(const unsigned long *hart_mask)
     }
 }
 
-void
-sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsigned long size)
+void sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsigned long size)
 {
     struct sbi_ret ret;
 
@@ -188,9 +183,8 @@ sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsig
     }
 }
 
-void
-sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start, unsigned long size,
-                           unsigned long asid)
+void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start, unsigned long size,
+                                unsigned long asid)
 {
     struct sbi_ret ret;
 
@@ -208,8 +202,7 @@ sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start,
     }
 }
 
-int
-sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long priv)
+int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long priv)
 {
     struct sbi_ret ret;
 
@@ -217,14 +210,12 @@ sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long p
     return (ret.error != 0 ? (int)ret.error : 0);
 }
 
-void
-sbi_hsm_hart_stop(void)
+void sbi_hsm_hart_stop(void)
 {
     (void)SBI_CALL0(SBI_EXT_ID_HSM, SBI_HSM_HART_STOP);
 }
 
-int
-sbi_hsm_hart_status(unsigned long hart)
+int sbi_hsm_hart_status(unsigned long hart)
 {
     struct sbi_ret ret;
 
@@ -233,8 +224,7 @@ sbi_hsm_hart_status(unsigned long hart)
     return (ret.error != 0 ? (int)ret.error : (int)ret.value);
 }
 
-void
-sbi_init(void)
+void sbi_init(void)
 {
     struct sbi_ret sret;
 
@@ -263,3 +253,11 @@ sbi_init(void)
     if (sbi_probe_extension(SBI_EXT_ID_RFNC) != 0)
         has_rfnc_extension = true;
 }
+
+void rt_hw_console_output(const char *str)
+{
+    while (*str)
+    {
+        sbi_console_putchar(*str++);
+    }
+}

+ 22 - 22
libcpu/risc-v/virt64/syscall_c.c

@@ -6,13 +6,14 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-02-03     lizhirui     first version
+ * 2022-11-10     WangXiaoyao  Add readable syscall tracing
  */
 
 #include <rthw.h>
 #include <rtthread.h>
 
 #define DBG_TAG "syscall"
-#define DBG_LVL DBG_INFO
+#define DBG_LVL DBG_WARNING
 #include <rtdbg.h>
 
 #include <stdint.h>
@@ -21,40 +22,39 @@
 #include <lwp_mm_area.h>
 #include <lwp_user_mm.h>
 
-#include <stdio.h>
-
 #include "riscv_mmu.h"
 #include "stack.h"
 
-typedef rt_size_t (*syscallfunc_t)(rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t,rt_size_t);
-syscallfunc_t lwp_get_sys_api(uint32_t);
+typedef rt_size_t (*syscallfunc_t)(rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t, rt_size_t);
 
 void syscall_handler(struct rt_hw_stack_frame *regs)
 {
-    if(regs -> a7 == 0)
-    {
-        LOG_E("syscall id = 0!\n");
-        while(1);
-    }
+    const char *syscall_name;
+    int syscallid = regs->a7;
 
-    if(regs -> a7 == 0xdeadbeef)
+    if (syscallid == 0)
     {
-        LOG_E("syscall id = 0xdeadbeef\n");
-        while(1);
+        LOG_E("syscall id = 0!\n");
+        while (1)
+            ;
     }
 
-    syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(regs -> a7);
+    syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(syscallid);
 
-    if(syscallfunc == RT_NULL)
+    if (syscallfunc == RT_NULL)
     {
         LOG_E("unsupported syscall!\n");
-        while(1);
+        sys_exit(-1);
     }
 
-    LOG_D("syscall id = %d,arg0 = 0x%p,arg1 = 0x%p,arg2 = 0x%p,arg3 = 0x%p,arg4 = 0x%p,arg5 = 0x%p,arg6 = 0x%p",regs -> a7,regs -> a0,regs -> a1,regs -> a2,regs -> a3,regs -> a4,regs -> a5,regs -> a6);
-    LOG_D("%p", syscallfunc);
-    regs -> a0 = syscallfunc(regs -> a0,regs -> a1,regs -> a2,regs -> a3,regs -> a4,regs -> a5,regs -> a6);
-    regs -> a7 = 0;
-    regs -> epc += 4;//skip ecall instruction
-    LOG_D("syscall deal ok,ret = 0x%p",regs -> a0);
+#if DBG_LVL >= DBG_INFO
+    syscall_name = lwp_get_syscall_name(syscallid);
+#endif
+
+    LOG_I("[0x%lx] %s(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)", rt_thread_self(), syscall_name,
+        regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
+    regs->a0 = syscallfunc(regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
+    regs->a7 = 0;
+    regs->epc += 4; // skip ecall instruction
+    LOG_I("[0x%lx] %s ret: 0x%lx", rt_thread_self(), syscall_name, regs->a0);
 }