Browse Source

feat: remove redundant codes under virt64, c906

Changes:

- create new folder name `common64` and save all common codes of rv64
  inside

Signed-off-by: Shell <smokewood@qq.com>
Shell 7 months ago
parent
commit
57d002b25e
55 changed files with 16 additions and 4614 deletions
  1. 4 5
      libcpu/risc-v/SConscript
  2. 12 0
      libcpu/risc-v/common64/SConscript
  3. 0 0
      libcpu/risc-v/common64/asid.c
  4. 0 0
      libcpu/risc-v/common64/asm-generic.h
  5. 0 0
      libcpu/risc-v/common64/backtrace.c
  6. 0 0
      libcpu/risc-v/common64/context_gcc.S
  7. 0 0
      libcpu/risc-v/common64/cpuport.c
  8. 0 0
      libcpu/risc-v/common64/cpuport.h
  9. 0 0
      libcpu/risc-v/common64/cpuport_gcc.S
  10. 0 0
      libcpu/risc-v/common64/encoding.h
  11. 0 0
      libcpu/risc-v/common64/ext_context.h
  12. 0 0
      libcpu/risc-v/common64/interrupt_gcc.S
  13. 0 0
      libcpu/risc-v/common64/io.h
  14. 0 0
      libcpu/risc-v/common64/mmu.c
  15. 0 0
      libcpu/risc-v/common64/mmu.h
  16. 0 0
      libcpu/risc-v/common64/riscv.h
  17. 0 0
      libcpu/risc-v/common64/riscv_io.h
  18. 0 0
      libcpu/risc-v/common64/riscv_mmu.c
  19. 0 0
      libcpu/risc-v/common64/sbi.c
  20. 0 0
      libcpu/risc-v/common64/sbi.h
  21. 0 0
      libcpu/risc-v/common64/stack.h
  22. 0 0
      libcpu/risc-v/common64/stackframe.h
  23. 0 0
      libcpu/risc-v/common64/startup_gcc.S
  24. 0 0
      libcpu/risc-v/common64/syscall_c.c
  25. 0 0
      libcpu/risc-v/common64/tick.c
  26. 0 0
      libcpu/risc-v/common64/tick.h
  27. 0 0
      libcpu/risc-v/common64/tlb.h
  28. 0 0
      libcpu/risc-v/common64/trap.c
  29. 0 3
      libcpu/risc-v/virt64/SConscript
  30. 0 85
      libcpu/risc-v/virt64/asid.c
  31. 0 26
      libcpu/risc-v/virt64/asm-generic.h
  32. 0 127
      libcpu/risc-v/virt64/backtrace.c
  33. 0 115
      libcpu/risc-v/virt64/context_gcc.S
  34. 0 133
      libcpu/risc-v/virt64/cpuport.c
  35. 0 52
      libcpu/risc-v/virt64/cpuport.h
  36. 0 25
      libcpu/risc-v/virt64/cpuport_gcc.S
  37. 0 1348
      libcpu/risc-v/virt64/encoding.h
  38. 0 73
      libcpu/risc-v/virt64/ext_context.h
  39. 0 97
      libcpu/risc-v/virt64/interrupt_gcc.S
  40. 0 52
      libcpu/risc-v/virt64/io.h
  41. 0 595
      libcpu/risc-v/virt64/mmu.c
  42. 0 77
      libcpu/risc-v/virt64/mmu.h
  43. 0 32
      libcpu/risc-v/virt64/riscv.h
  44. 0 115
      libcpu/risc-v/virt64/riscv_io.h
  45. 0 29
      libcpu/risc-v/virt64/riscv_mmu.c
  46. 0 264
      libcpu/risc-v/virt64/sbi.c
  47. 0 244
      libcpu/risc-v/virt64/sbi.h
  48. 0 70
      libcpu/risc-v/virt64/stack.h
  49. 0 312
      libcpu/risc-v/virt64/stackframe.h
  50. 0 133
      libcpu/risc-v/virt64/startup_gcc.S
  51. 0 62
      libcpu/risc-v/virt64/syscall_c.c
  52. 0 76
      libcpu/risc-v/virt64/tick.c
  53. 0 17
      libcpu/risc-v/virt64/tick.h
  54. 0 61
      libcpu/risc-v/virt64/tlb.h
  55. 0 386
      libcpu/risc-v/virt64/trap.c

+ 4 - 5
libcpu/risc-v/SConscript

@@ -5,17 +5,16 @@ from building import *
 
 Import('rtconfig')
 
+common64_arch = ['virt64', 'c906']
 cwd   = GetCurrentDir()
 group = []
 list  = os.listdir(cwd)
 
 # add common code files
-if   rtconfig.CPU == "virt64" :
-    group = group
-elif rtconfig.CPU == "c906" :
-    group = group
+if rtconfig.CPU in common64_arch :
+    group += SConscript(os.path.join('common64', 'SConscript'))
 else :
-    group = group + SConscript(os.path.join('common', 'SConscript'))
+    group += SConscript(os.path.join('common', 'SConscript'))
 
 # cpu porting code files
 if  rtconfig.CPU == "c906":

+ 12 - 0
libcpu/risc-v/common64/SConscript

@@ -0,0 +1,12 @@
+from building import *
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
+CPPPATH = [cwd]
+
+if not GetDepend('ARCH_USING_ASID'):
+    SrcRemove(src, ['asid.c'])
+
+group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 0 - 0
libcpu/risc-v/t-head/c906/asid.c → libcpu/risc-v/common64/asid.c


+ 0 - 0
libcpu/risc-v/t-head/c906/asm-generic.h → libcpu/risc-v/common64/asm-generic.h


+ 0 - 0
libcpu/risc-v/t-head/c906/backtrace.c → libcpu/risc-v/common64/backtrace.c


+ 0 - 0
libcpu/risc-v/t-head/c906/context_gcc.S → libcpu/risc-v/common64/context_gcc.S


+ 0 - 0
libcpu/risc-v/t-head/c906/cpuport.c → libcpu/risc-v/common64/cpuport.c


+ 0 - 0
libcpu/risc-v/t-head/c906/cpuport.h → libcpu/risc-v/common64/cpuport.h


+ 0 - 0
libcpu/risc-v/t-head/c906/cpuport_gcc.S → libcpu/risc-v/common64/cpuport_gcc.S


+ 0 - 0
libcpu/risc-v/t-head/c906/encoding.h → libcpu/risc-v/common64/encoding.h


+ 0 - 0
libcpu/risc-v/t-head/c906/ext_context.h → libcpu/risc-v/common64/ext_context.h


+ 0 - 0
libcpu/risc-v/t-head/c906/interrupt_gcc.S → libcpu/risc-v/common64/interrupt_gcc.S


+ 0 - 0
libcpu/risc-v/t-head/c906/io.h → libcpu/risc-v/common64/io.h


+ 0 - 0
libcpu/risc-v/t-head/c906/mmu.c → libcpu/risc-v/common64/mmu.c


+ 0 - 0
libcpu/risc-v/t-head/c906/mmu.h → libcpu/risc-v/common64/mmu.h


+ 0 - 0
libcpu/risc-v/t-head/c906/riscv.h → libcpu/risc-v/common64/riscv.h


+ 0 - 0
libcpu/risc-v/t-head/c906/riscv_io.h → libcpu/risc-v/common64/riscv_io.h


+ 0 - 0
libcpu/risc-v/t-head/c906/riscv_mmu.c → libcpu/risc-v/common64/riscv_mmu.c


+ 0 - 0
libcpu/risc-v/t-head/c906/sbi.c → libcpu/risc-v/common64/sbi.c


+ 0 - 0
libcpu/risc-v/t-head/c906/sbi.h → libcpu/risc-v/common64/sbi.h


+ 0 - 0
libcpu/risc-v/t-head/c906/stack.h → libcpu/risc-v/common64/stack.h


+ 0 - 0
libcpu/risc-v/t-head/c906/stackframe.h → libcpu/risc-v/common64/stackframe.h


+ 0 - 0
libcpu/risc-v/t-head/c906/startup_gcc.S → libcpu/risc-v/common64/startup_gcc.S


+ 0 - 0
libcpu/risc-v/t-head/c906/syscall_c.c → libcpu/risc-v/common64/syscall_c.c


+ 0 - 0
libcpu/risc-v/t-head/c906/tick.c → libcpu/risc-v/common64/tick.c


+ 0 - 0
libcpu/risc-v/t-head/c906/tick.h → libcpu/risc-v/common64/tick.h


+ 0 - 0
libcpu/risc-v/t-head/c906/tlb.h → libcpu/risc-v/common64/tlb.h


+ 0 - 0
libcpu/risc-v/t-head/c906/trap.c → libcpu/risc-v/common64/trap.c


+ 0 - 3
libcpu/risc-v/virt64/SConscript

@@ -5,9 +5,6 @@ cwd     = GetCurrentDir()
 src     = Glob('*.c') + Glob('*.cpp') + Glob('*_gcc.S')
 CPPPATH = [cwd]
 
-if not GetDepend('ARCH_USING_ASID'):
-    SrcRemove(src, ['asid.c'])
-
 if not GetDepend('ARCH_RISCV_VECTOR'):
     SrcRemove(src, ['vector_gcc.S'])
 

+ 0 - 85
libcpu/risc-v/virt64/asid.c

@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2024-07-12     RT-Thread    first version.
- */
-#define DBG_TAG "hw.asid"
-#define DBG_LVL DBG_INFO
-#include <rtdbg.h>
-
-#include <rtthread.h>
-#include <board.h>
-#include <cache.h>
-#include <mm_aspace.h>
-#include <mm_page.h>
-#include <mmu.h>
-#include <riscv_mmu.h>
-#include <tlb.h>
-
-static rt_uint8_t ASID_BITS = 0;
-static rt_uint32_t next_asid;
-static rt_uint64_t global_asid_generation;
-#define ASID_MASK ((1 << ASID_BITS) - 1)
-#define ASID_FIRST_GENERATION (1 << ASID_BITS)
-#define MAX_ASID ASID_FIRST_GENERATION
-
-void rt_hw_asid_init(void)
-{
-    unsigned int satp_reg = read_csr(satp);
-    satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
-    write_csr(satp, satp_reg);
-    unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);
-
-    // The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
-    for (unsigned i = 0; i < 16; i++)
-    {
-        if (!(valid_asid_bit & 0x1))
-        {
-            break;
-        }
-
-        valid_asid_bit >>= 1;
-        ASID_BITS++;
-    }
-
-    global_asid_generation = ASID_FIRST_GENERATION;
-    next_asid = 1;
-}
-
-static rt_uint64_t _asid_acquire(rt_aspace_t aspace)
-{
-    if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
-    {
-        if (next_asid != MAX_ASID)
-        {
-            aspace->asid = global_asid_generation | next_asid;
-            next_asid++;
-        }
-        else
-        {
-            // scroll to next generation
-            global_asid_generation += ASID_FIRST_GENERATION;
-            next_asid = 1;
-            rt_hw_tlb_invalidate_all_local();
-
-            aspace->asid = global_asid_generation | next_asid;
-            next_asid++;
-        }
-    }
-
-    return aspace->asid & ASID_MASK;
-}
-
-void rt_hw_asid_switch_pgtbl(struct rt_aspace *aspace, rt_ubase_t pgtbl)
-{
-    rt_uint64_t asid = _asid_acquire(aspace);
-    write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
-                        (asid << PPN_BITS) |
-                        ((rt_ubase_t)pgtbl >> PAGE_OFFSET_BIT));
-    asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
-}
-

+ 0 - 26
libcpu/risc-v/virt64/asm-generic.h

@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2006-2023 RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2023-03-12     WangXiaoyao  the first version
- */
-#ifndef __ASM_GENERIC_H__
-#define __ASM_GENERIC_H__
-
-/* use to mark a start point where every task start from */
-#define START_POINT(funcname)               \
-    .global funcname;                       \
-    .type funcname, %function;	            \
-    funcname:                               \
-    .cfi_sections .debug_frame, .eh_frame;  \
-    .cfi_startproc;                         \
-    .cfi_undefined ra
-
-#define START_POINT_END(name)   \
-    .cfi_endproc;               \
-    .size name, .-name;
-
-#endif /* __ASM_GENERIC_H__ */

+ 0 - 127
libcpu/risc-v/virt64/backtrace.c

@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2006-2023, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2023-10-18     Shell        Add backtrace support
- */
-
-#define DBG_TAG "hw.backtrace"
-#define DBG_LVL DBG_INFO
-#include <rtdbg.h>
-
-#include <rtthread.h>
-#include <mm_aspace.h>
-#include "riscv_mmu.h"
-#include "stack.h"
-
-#define WORD                            sizeof(rt_base_t)
-#define ARCH_CONTEXT_FETCH(pctx, id)    (*(((unsigned long *)pctx) + (id)))
-
-rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
-{
-    rt_err_t rc;
-    frame->fp = *(fp - 2);
-    frame->pc = *(fp - 1);
-
-    if ((rt_ubase_t)fp == frame->fp)
-    {
-        rc = -RT_ERROR;
-    }
-    else
-    {
-        rc = RT_EOK;
-    }
-    return rc;
-}
-
-#ifdef RT_USING_SMART
-#include <lwp_arch.h>
-#include <lwp_user_mm.h>
-
-rt_inline rt_err_t _bt_uaddr(rt_lwp_t lwp, rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
-{
-    rt_err_t rc;
-    if (lwp_data_get(lwp, &frame->fp, fp - 2, WORD) != WORD)
-    {
-        rc = -RT_EFAULT;
-    }
-    else if (lwp_data_get(lwp, &frame->pc, fp - 1, WORD) != WORD)
-    {
-        rc = -RT_EFAULT;
-    }
-    else if ((rt_ubase_t)fp == frame->fp)
-    {
-        rc = -RT_ERROR;
-    }
-    else
-    {
-        frame->pc -= 0;
-        rc = RT_EOK;
-    }
-    return rc;
-}
-#endif /* RT_USING_SMART */
-
-rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
-{
-    rt_err_t rc = -RT_ERROR;
-    rt_uintptr_t *fp = (rt_uintptr_t *)frame->fp;
-
-    if (fp && !((long)fp & 0x7))
-    {
-#ifdef RT_USING_SMART
-        if (thread->lwp)
-        {
-            void *lwp = thread->lwp;
-            void *this_lwp = lwp_self();
-            if (this_lwp == lwp && rt_hw_mmu_v2p(((rt_lwp_t)lwp)->aspace, fp) != ARCH_MAP_FAILED)
-            {
-                rc = _bt_kaddr(fp, frame);
-            }
-            else if (lwp_user_accessible_ext(lwp, (void *)fp, WORD))
-            {
-                rc = _bt_uaddr(lwp, fp, frame);
-            }
-            else
-            {
-                rc = -RT_EFAULT;
-            }
-        }
-        else
-#endif
-        if ((rt_kmem_v2p(fp) != ARCH_MAP_FAILED))
-        {
-            rc = _bt_kaddr(fp, frame);
-        }
-        else
-        {
-            rc = -RT_EINVAL;
-        }
-    }
-    else
-    {
-        rc = -RT_EFAULT;
-    }
-    return rc;
-}
-
-rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame)
-{
-    rt_err_t rc;
-
-    if (!thread || !frame)
-    {
-        rc = -RT_EINVAL;
-    }
-    else
-    {
-        rt_hw_switch_frame_t sframe = thread->sp;
-        frame->pc = sframe->regs[RT_HW_SWITCH_CONTEXT_RA];
-        frame->fp = sframe->regs[RT_HW_SWITCH_CONTEXT_S0];;
-        rc = RT_EOK;
-    }
-    return rc;
-}

+ 0 - 115
libcpu/risc-v/virt64/context_gcc.S

@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/28     Bernard      The unify RISC-V porting implementation
- * 2018/12/27     Jesven       Add SMP support
- * 2021/02/02     lizhirui     Add userspace support
- * 2022/10/22     Shell        Support User mode RVV;
- *                             Trimming process switch context
- */
-
-#include "cpuport.h"
-#include "stackframe.h"
-#define _REG_IDX(name) RT_HW_SWITCH_CONTEXT_##name
-#define REG_IDX(name) _REG_IDX(name)
-
-.macro SAVE_REG reg, index
-    STORE \reg, \index*REGBYTES(sp)
-.endm
-
-.macro LOAD_REG reg, index
-    LOAD \reg, \index*REGBYTES(sp)
-.endm
-
-.macro RESERVE_CONTEXT
-    addi        sp, sp, -(RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES)
-    SAVE_REG    tp, REG_IDX(TP)
-    SAVE_REG    ra, REG_IDX(RA)
-    SAVE_REG    s0, REG_IDX(S0)
-    SAVE_REG    s1, REG_IDX(S1)
-    SAVE_REG    s2, REG_IDX(S2)
-    SAVE_REG    s3, REG_IDX(S3)
-    SAVE_REG    s4, REG_IDX(S4)
-    SAVE_REG    s5, REG_IDX(S5)
-    SAVE_REG    s6, REG_IDX(S6)
-    SAVE_REG    s7, REG_IDX(S7)
-    SAVE_REG    s8, REG_IDX(S8)
-    SAVE_REG    s9, REG_IDX(S9)
-    SAVE_REG    s10, REG_IDX(S10)
-    SAVE_REG    s11, REG_IDX(S11)
-    csrr        s11, sstatus
-    li          s10, (SSTATUS_SPP)
-    or          s11, s11, s10
-    SAVE_REG    s11, REG_IDX(SSTATUS)
-.endm
-
-.macro RESTORE_CONTEXT
-    LOAD_REG    s11, REG_IDX(SSTATUS)
-    csrw        sstatus, s11
-    LOAD_REG    s11, REG_IDX(S11)
-    LOAD_REG    s10, REG_IDX(S10)
-    LOAD_REG    s9, REG_IDX(S9)
-    LOAD_REG    s8, REG_IDX(S8)
-    LOAD_REG    s7, REG_IDX(S7)
-    LOAD_REG    s6, REG_IDX(S6)
-    LOAD_REG    s5, REG_IDX(S5)
-    LOAD_REG    s4, REG_IDX(S4)
-    LOAD_REG    s3, REG_IDX(S3)
-    LOAD_REG    s2, REG_IDX(S2)
-    LOAD_REG    s1, REG_IDX(S1)
-    LOAD_REG    s0, REG_IDX(S0)
-    LOAD_REG    ra, REG_IDX(RA)
-    LOAD_REG    tp, REG_IDX(TP)
-    addi        sp, sp, RT_HW_SWITCH_CONTEXT_SIZE * REGBYTES
-    csrw        sepc, ra
-.endm
-
-/*
- * void rt_hw_context_switch_to(rt_ubase_t to);
- *
- * a0 --> to SP pointer
- */
-.globl rt_hw_context_switch_to
-rt_hw_context_switch_to:
-    LOAD sp, (a0)
-
-    jal rt_thread_self
-    mv s1, a0
-
-    #ifdef RT_USING_SMART
-        jal lwp_aspace_switch
-    #endif
-
-    RESTORE_CONTEXT
-    sret
-
-/*
- * void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
- *
- * a0 --> from SP pointer
- * a1 --> to SP pointer
- *
- * It should only be used on local interrupt disable
- */
-.globl rt_hw_context_switch
-rt_hw_context_switch:
-    RESERVE_CONTEXT
-    STORE sp, (a0)
-
-    // restore to thread SP
-    LOAD sp, (a1)
-
-    // restore Address Space
-    jal rt_thread_self
-    mv s1, a0
-
-    #ifdef RT_USING_SMART
-        jal lwp_aspace_switch
-    #endif
-
-    RESTORE_CONTEXT
-    sret

+ 0 - 133
libcpu/risc-v/virt64/cpuport.c

@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/28     Bernard      The unify RISC-V porting code.
- * 2021-02-11     lizhirui     add gp support
- * 2021-11-19     JasonHu      add fpu support
- */
-
-#include <rthw.h>
-#include <rtthread.h>
-
-#include "cpuport.h"
-#include "stack.h"
-#include <sbi.h>
-#include <encoding.h>
-
-#ifdef ARCH_RISCV_FPU
-    #define K_SSTATUS_DEFAULT (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
-#else
-    #define K_SSTATUS_DEFAULT (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
-#endif
-#ifdef RT_USING_SMART
-#include <lwp_arch.h>
-#endif
-
-/**
- * @brief from thread used interrupt context switch
- *
- */
-volatile rt_ubase_t rt_interrupt_from_thread = 0;
-/**
- * @brief to thread used interrupt context switch
- *
- */
-volatile rt_ubase_t rt_interrupt_to_thread = 0;
-/**
- * @brief flag to indicate context switch in interrupt or not
- *
- */
-volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
-
-void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
-{
-    rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)
-        ((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
-
-    rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
-
-    frame->regs[RT_HW_SWITCH_CONTEXT_RA] = ra;
-    frame->regs[RT_HW_SWITCH_CONTEXT_SSTATUS] = sstatus;
-
-    return (void *)frame;
-}
-
-int rt_hw_cpu_id(void)
-{
-    return 0;
-}
-
-/**
- * This function will initialize thread stack, we assuming
- * when scheduler restore this new thread, context will restore
- * an entry to user first application
- *
- * s0-s11, ra, sstatus, a0
- * @param tentry the entry of thread
- * @param parameter the parameter of entry
- * @param stack_addr the beginning stack address
- * @param texit the function will be called when thread exit
- *
- * @return stack address
- */
-rt_uint8_t *rt_hw_stack_init(void *tentry,
-                             void *parameter,
-                             rt_uint8_t *stack_addr,
-                             void *texit)
-{
-    rt_ubase_t *sp = (rt_ubase_t *)stack_addr;
-    // we use a strict alignment requirement for Q extension
-    sp = (rt_ubase_t *)RT_ALIGN_DOWN((rt_ubase_t)sp, 16);
-
-    (*--sp) = (rt_ubase_t)tentry;
-    (*--sp) = (rt_ubase_t)parameter;
-    (*--sp) = (rt_ubase_t)texit;
-    --sp;   /* alignment */
-
-    /* compatible to RESTORE_CONTEXT */
-    extern void _rt_thread_entry(void);
-    return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
-}
-
-/*
- * #ifdef RT_USING_SMP
- * void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
- * #else
- * void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to);
- * #endif
- */
-#ifndef RT_USING_SMP
-void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
-{
-    if (rt_thread_switch_interrupt_flag == 0)
-        rt_interrupt_from_thread = from;
-
-    rt_interrupt_to_thread = to;
-    rt_thread_switch_interrupt_flag = 1;
-
-    return;
-}
-#endif /* end of RT_USING_SMP */
-
-/** shutdown CPU */
-void rt_hw_cpu_shutdown(void)
-{
-    rt_uint32_t level;
-    rt_kprintf("shutdown...\n");
-
-    level = rt_hw_interrupt_disable();
-
-    sbi_shutdown();
-
-    while (1)
-        ;
-}
-
-void rt_hw_set_process_id(int pid)
-{
-    // TODO
-}

+ 0 - 52
libcpu/risc-v/virt64/cpuport.h

@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018-10-03     Bernard      The first version
- */
-
-#ifndef CPUPORT_H__
-#define CPUPORT_H__
-
-#include <rtconfig.h>
-#include <opcode.h>
-
-#ifdef RT_USING_SMP
-typedef union {
-    unsigned long slock;
-    struct __arch_tickets {
-        unsigned short owner;
-        unsigned short next;
-    } tickets;
-} rt_hw_spinlock_t;
-#endif
-
-#ifndef __ASSEMBLY__
-#include <rtdef.h>
-
-rt_inline void rt_hw_dsb(void)
-{
-    __asm__ volatile("fence":::"memory");
-}
-
-rt_inline void rt_hw_dmb(void)
-{
-    __asm__ volatile("fence":::"memory");
-}
-
-rt_inline void rt_hw_isb(void)
-{
-    __asm__ volatile(OPC_FENCE_I:::"memory");
-}
-
-int rt_hw_cpu_id(void);
-
-#endif
-
-#endif
-#ifdef RISCV_U_MODE
-#define RISCV_USER_ENTRY 0xFFFFFFE000000000ULL
-#endif

+ 0 - 25
libcpu/risc-v/virt64/cpuport_gcc.S

@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2006-2022, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2022-10-19     RT-Thread    the first version
- */
-
-#include "cpuport.h"
-#include "stackframe.h"
-#include "asm-generic.h"
-
-START_POINT(_rt_thread_entry)
-    LOAD    ra, REGBYTES(sp)    /* thread exit */
-    addi    sp, sp, 2 * REGBYTES
-    LOAD    a0, (sp)            /* parameter */
-    LOAD    t0, REGBYTES(sp)    /* tentry */
-    addi    sp, sp, 2 * REGBYTES
-    mv      s1, ra
-    jalr    t0
-    jalr    s1
-    j       .           /* never here */
-START_POINT_END(_rt_thread_entry)

+ 0 - 1348
libcpu/risc-v/virt64/encoding.h

@@ -1,1348 +0,0 @@
-/*
- * Copyright (c) 2006-2022, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- */
-// See LICENSE for license details.
-
-#ifndef RISCV_CSR_ENCODING_H
-#define RISCV_CSR_ENCODING_H
-
-#include <rtconfig.h>
-
-#ifdef ARCH_RISCV_VECTOR
-#include "vector_encoding.h"
-
-#else
-#define SSTATUS_VS          0   /* fallback */
-#endif /* ARCH_RISCV_VECTOR */
-
-#define MSTATUS_UIE         0x00000001
-#define MSTATUS_SIE         0x00000002
-#define MSTATUS_HIE         0x00000004
-#define MSTATUS_MIE         0x00000008
-#define MSTATUS_UPIE        0x00000010
-#define MSTATUS_SPIE        0x00000020
-#define MSTATUS_HPIE        0x00000040
-#define MSTATUS_MPIE        0x00000080
-#define MSTATUS_SPP         0x00000100
-#define MSTATUS_HPP         0x00000600
-#define MSTATUS_MPP         0x00001800
-#define MSTATUS_FS          0x00006000
-#define MSTATUS_XS          0x00018000
-#define MSTATUS_MPRV        0x00020000
-#define MSTATUS_PUM         0x00040000
-#define MSTATUS_MXR         0x00080000
-#define MSTATUS_VM          0x1F000000
-#define MSTATUS32_SD        0x80000000
-#define MSTATUS64_SD        0x8000000000000000
-
-#define SSTATUS_UIE         0x00000001
-#define SSTATUS_SIE         0x00000002
-#define SSTATUS_UPIE        0x00000010
-#define SSTATUS_SPIE        0x00000020
-#define SSTATUS_SPP         0x00000100
-#define SSTATUS_FS          0x00006000 /* Floating-point Status */
-#define SSTATUS_FS_INITIAL  0x00002000
-#define SSTATUS_FS_CLEAN    0x00004000
-#define SSTATUS_FS_DIRTY    0x00006000
-#define SSTATUS_XS          0x00018000
-#define SSTATUS_SUM         0x00040000
-#define SSTATUS32_SD        0x80000000
-#define SSTATUS64_SD        0x8000000000000000
-
-#define DCSR_XDEBUGVER      (3U<<30)
-#define DCSR_NDRESET        (1<<29)
-#define DCSR_FULLRESET      (1<<28)
-#define DCSR_EBREAKM        (1<<15)
-#define DCSR_EBREAKH        (1<<14)
-#define DCSR_EBREAKS        (1<<13)
-#define DCSR_EBREAKU        (1<<12)
-#define DCSR_STOPCYCLE      (1<<10)
-#define DCSR_STOPTIME       (1<<9)
-#define DCSR_CAUSE          (7<<6)
-#define DCSR_DEBUGINT       (1<<5)
-#define DCSR_HALT           (1<<3)
-#define DCSR_STEP           (1<<2)
-#define DCSR_PRV            (3<<0)
-
-#define DCSR_CAUSE_NONE     0
-#define DCSR_CAUSE_SWBP     1
-#define DCSR_CAUSE_HWBP     2
-#define DCSR_CAUSE_DEBUGINT 3
-#define DCSR_CAUSE_STEP     4
-#define DCSR_CAUSE_HALT     5
-
-#define MCONTROL_TYPE(xlen)    (0xfULL<<((xlen)-4))
-#define MCONTROL_DMODE(xlen)   (1ULL<<((xlen)-5))
-#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11))
-
-#define MCONTROL_SELECT     (1<<19)
-#define MCONTROL_TIMING     (1<<18)
-#define MCONTROL_ACTION     (0x3f<<12)
-#define MCONTROL_CHAIN      (1<<11)
-#define MCONTROL_MATCH      (0xf<<7)
-#define MCONTROL_M          (1<<6)
-#define MCONTROL_H          (1<<5)
-#define MCONTROL_S          (1<<4)
-#define MCONTROL_U          (1<<3)
-#define MCONTROL_EXECUTE    (1<<2)
-#define MCONTROL_STORE      (1<<1)
-#define MCONTROL_LOAD       (1<<0)
-
-#define MCONTROL_TYPE_NONE      0
-#define MCONTROL_TYPE_MATCH     2
-
-#define MCONTROL_ACTION_DEBUG_EXCEPTION   0
-#define MCONTROL_ACTION_DEBUG_MODE        1
-#define MCONTROL_ACTION_TRACE_START       2
-#define MCONTROL_ACTION_TRACE_STOP        3
-#define MCONTROL_ACTION_TRACE_EMIT        4
-
-#define MCONTROL_MATCH_EQUAL     0
-#define MCONTROL_MATCH_NAPOT     1
-#define MCONTROL_MATCH_GE        2
-#define MCONTROL_MATCH_LT        3
-#define MCONTROL_MATCH_MASK_LOW  4
-#define MCONTROL_MATCH_MASK_HIGH 5
-
-#define MIP_SSIP            (1 << IRQ_S_SOFT)
-#define MIP_HSIP            (1 << IRQ_H_SOFT)
-#define MIP_MSIP            (1 << IRQ_M_SOFT)
-#define MIP_STIP            (1 << IRQ_S_TIMER)
-#define MIP_HTIP            (1 << IRQ_H_TIMER)
-#define MIP_MTIP            (1 << IRQ_M_TIMER)
-#define MIP_SEIP            (1 << IRQ_S_EXT)
-#define MIP_HEIP            (1 << IRQ_H_EXT)
-#define MIP_MEIP            (1 << IRQ_M_EXT)
-
-#define SIP_SSIP    MIP_SSIP /* software interrupt */
-#define SIP_STIP    MIP_STIP /* timer interrupt */
-#define SIP_SEIP    MIP_SEIP /* ext interrupt */
-
-#define SIE_SSIE            (1 << IRQ_S_SOFT)
-#define SIE_STIE            (1 << IRQ_S_TIMER)
-#define SIE_SEIE            (1 << IRQ_S_EXT)
-
-#define RISCV_XLEN    64
-
-#define SCAUSE_INTERRUPT    (1UL << (RISCV_XLEN - 1))
-
-#define SCAUSE_S_SOFTWARE_INTR  1
-#define SCAUSE_S_TIMER_INTR     5
-#define SCAUSE_S_EXTERNAL_INTR  9
-
-#define PRV_U 0
-#define PRV_S 1
-#define PRV_H 2
-#define PRV_M 3
-
-#define VM_MBARE 0
-#define VM_MBB   1
-#define VM_MBBID 2
-#define VM_SV32  8
-#define VM_SV39  9
-#define VM_SV48  10
-
-#define IRQ_S_SOFT   1
-#define IRQ_H_SOFT   2
-#define IRQ_M_SOFT   3
-#define IRQ_S_TIMER  5
-#define IRQ_H_TIMER  6
-#define IRQ_M_TIMER  7
-#define IRQ_S_EXT    9
-#define IRQ_H_EXT    10
-#define IRQ_M_EXT    11
-#define IRQ_COP      12
-#define IRQ_HOST     13
-
-#define DEFAULT_RSTVEC     0x00001000
-#define DEFAULT_NMIVEC     0x00001004
-#define DEFAULT_MTVEC      0x00001010
-#define CONFIG_STRING_ADDR 0x0000100C
-#define EXT_IO_BASE        0x40000000
-#define DRAM_BASE          0x80000000
-
-// page table entry (PTE) fields
-#define PTE_V     0x001 // Valid
-#define PTE_R     0x002 // Read
-#define PTE_W     0x004 // Write
-#define PTE_X     0x008 // Execute
-#define PTE_U     0x010 // User
-#define PTE_G     0x020 // Global
-#define PTE_A     0x040 // Accessed
-#define PTE_D     0x080 // Dirty
-#define PTE_SOFT  0x300 // Reserved for Software
-
-#define PTE_PPN_SHIFT 10
-
-#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
-
-#ifdef __riscv
-
-#ifdef __riscv64
-# define MSTATUS_SD MSTATUS64_SD
-# define SSTATUS_SD SSTATUS64_SD
-# define RISCV_PGLEVEL_BITS 9
-#else
-# define MSTATUS_SD MSTATUS32_SD
-# define SSTATUS_SD SSTATUS32_SD
-# define RISCV_PGLEVEL_BITS 10
-#endif /* end of __riscv64 */
-
-#define RISCV_PGSHIFT 12
-#define RISCV_PGSIZE (1 << RISCV_PGSHIFT)
-
-#ifndef __ASSEMBLY__
-
-#ifdef __GNUC__
-
-#define read_csr(reg) ({ unsigned long __tmp; \
-  asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
-  __tmp; })
-
-#define write_csr(reg, val) ({ \
-  if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
-    asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
-  else \
-    asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
-
-#define swap_csr(reg, val) ({ unsigned long __tmp; \
-  if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
-    asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "i"(val)); \
-  else \
-    asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "r"(val)); \
-  __tmp; })
-
-#define set_csr(reg, bit) ({ unsigned long __tmp; \
-  if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
-    asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
-  else \
-    asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
-  __tmp; })
-
-#define clear_csr(reg, bit) ({ unsigned long __tmp; \
-  if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
-    asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
-  else \
-    asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
-  __tmp; })
-
-#define rdtime() read_csr(time)
-#define rdcycle() read_csr(cycle)
-#define rdinstret() read_csr(instret)
-
-#endif /* end of __GNUC__ */
-
-#endif /* end of __ASSEMBLY__ */
-
-#endif /* end of __riscv */
-
-#endif /* end of RISCV_CSR_ENCODING_H */
-
-/* Automatically generated by parse-opcodes */
-#ifndef RISCV_ENCODING_H
-#define RISCV_ENCODING_H
-#define MATCH_BEQ 0x63
-#define MASK_BEQ  0x707f
-#define MATCH_BNE 0x1063
-#define MASK_BNE  0x707f
-#define MATCH_BLT 0x4063
-#define MASK_BLT  0x707f
-#define MATCH_BGE 0x5063
-#define MASK_BGE  0x707f
-#define MATCH_BLTU 0x6063
-#define MASK_BLTU  0x707f
-#define MATCH_BGEU 0x7063
-#define MASK_BGEU  0x707f
-#define MATCH_JALR 0x67
-#define MASK_JALR  0x707f
-#define MATCH_JAL 0x6f
-#define MASK_JAL  0x7f
-#define MATCH_LUI 0x37
-#define MASK_LUI  0x7f
-#define MATCH_AUIPC 0x17
-#define MASK_AUIPC  0x7f
-#define MATCH_ADDI 0x13
-#define MASK_ADDI  0x707f
-#define MATCH_SLLI 0x1013
-#define MASK_SLLI  0xfc00707f
-#define MATCH_SLTI 0x2013
-#define MASK_SLTI  0x707f
-#define MATCH_SLTIU 0x3013
-#define MASK_SLTIU  0x707f
-#define MATCH_XORI 0x4013
-#define MASK_XORI  0x707f
-#define MATCH_SRLI 0x5013
-#define MASK_SRLI  0xfc00707f
-#define MATCH_SRAI 0x40005013
-#define MASK_SRAI  0xfc00707f
-#define MATCH_ORI 0x6013
-#define MASK_ORI  0x707f
-#define MATCH_ANDI 0x7013
-#define MASK_ANDI  0x707f
-#define MATCH_ADD 0x33
-#define MASK_ADD  0xfe00707f
-#define MATCH_SUB 0x40000033
-#define MASK_SUB  0xfe00707f
-#define MATCH_SLL 0x1033
-#define MASK_SLL  0xfe00707f
-#define MATCH_SLT 0x2033
-#define MASK_SLT  0xfe00707f
-#define MATCH_SLTU 0x3033
-#define MASK_SLTU  0xfe00707f
-#define MATCH_XOR 0x4033
-#define MASK_XOR  0xfe00707f
-#define MATCH_SRL 0x5033
-#define MASK_SRL  0xfe00707f
-#define MATCH_SRA 0x40005033
-#define MASK_SRA  0xfe00707f
-#define MATCH_OR 0x6033
-#define MASK_OR  0xfe00707f
-#define MATCH_AND 0x7033
-#define MASK_AND  0xfe00707f
-#define MATCH_ADDIW 0x1b
-#define MASK_ADDIW  0x707f
-#define MATCH_SLLIW 0x101b
-#define MASK_SLLIW  0xfe00707f
-#define MATCH_SRLIW 0x501b
-#define MASK_SRLIW  0xfe00707f
-#define MATCH_SRAIW 0x4000501b
-#define MASK_SRAIW  0xfe00707f
-#define MATCH_ADDW 0x3b
-#define MASK_ADDW  0xfe00707f
-#define MATCH_SUBW 0x4000003b
-#define MASK_SUBW  0xfe00707f
-#define MATCH_SLLW 0x103b
-#define MASK_SLLW  0xfe00707f
-#define MATCH_SRLW 0x503b
-#define MASK_SRLW  0xfe00707f
-#define MATCH_SRAW 0x4000503b
-#define MASK_SRAW  0xfe00707f
-#define MATCH_LB 0x3
-#define MASK_LB  0x707f
-#define MATCH_LH 0x1003
-#define MASK_LH  0x707f
-#define MATCH_LW 0x2003
-#define MASK_LW  0x707f
-#define MATCH_LD 0x3003
-#define MASK_LD  0x707f
-#define MATCH_LBU 0x4003
-#define MASK_LBU  0x707f
-#define MATCH_LHU 0x5003
-#define MASK_LHU  0x707f
-#define MATCH_LWU 0x6003
-#define MASK_LWU  0x707f
-#define MATCH_SB 0x23
-#define MASK_SB  0x707f
-#define MATCH_SH 0x1023
-#define MASK_SH  0x707f
-#define MATCH_SW 0x2023
-#define MASK_SW  0x707f
-#define MATCH_SD 0x3023
-#define MASK_SD  0x707f
-#define MATCH_FENCE 0xf
-#define MASK_FENCE  0x707f
-#define MATCH_FENCE_I 0x100f
-#define MASK_FENCE_I  0x707f
-#define MATCH_MUL 0x2000033
-#define MASK_MUL  0xfe00707f
-#define MATCH_MULH 0x2001033
-#define MASK_MULH  0xfe00707f
-#define MATCH_MULHSU 0x2002033
-#define MASK_MULHSU  0xfe00707f
-#define MATCH_MULHU 0x2003033
-#define MASK_MULHU  0xfe00707f
-#define MATCH_DIV 0x2004033
-#define MASK_DIV  0xfe00707f
-#define MATCH_DIVU 0x2005033
-#define MASK_DIVU  0xfe00707f
-#define MATCH_REM 0x2006033
-#define MASK_REM  0xfe00707f
-#define MATCH_REMU 0x2007033
-#define MASK_REMU  0xfe00707f
-#define MATCH_MULW 0x200003b
-#define MASK_MULW  0xfe00707f
-#define MATCH_DIVW 0x200403b
-#define MASK_DIVW  0xfe00707f
-#define MATCH_DIVUW 0x200503b
-#define MASK_DIVUW  0xfe00707f
-#define MATCH_REMW 0x200603b
-#define MASK_REMW  0xfe00707f
-#define MATCH_REMUW 0x200703b
-#define MASK_REMUW  0xfe00707f
-#define MATCH_AMOADD_W 0x202f
-#define MASK_AMOADD_W  0xf800707f
-#define MATCH_AMOXOR_W 0x2000202f
-#define MASK_AMOXOR_W  0xf800707f
-#define MATCH_AMOOR_W 0x4000202f
-#define MASK_AMOOR_W  0xf800707f
-#define MATCH_AMOAND_W 0x6000202f
-#define MASK_AMOAND_W  0xf800707f
-#define MATCH_AMOMIN_W 0x8000202f
-#define MASK_AMOMIN_W  0xf800707f
-#define MATCH_AMOMAX_W 0xa000202f
-#define MASK_AMOMAX_W  0xf800707f
-#define MATCH_AMOMINU_W 0xc000202f
-#define MASK_AMOMINU_W  0xf800707f
-#define MATCH_AMOMAXU_W 0xe000202f
-#define MASK_AMOMAXU_W  0xf800707f
-#define MATCH_AMOSWAP_W 0x800202f
-#define MASK_AMOSWAP_W  0xf800707f
-#define MATCH_LR_W 0x1000202f
-#define MASK_LR_W  0xf9f0707f
-#define MATCH_SC_W 0x1800202f
-#define MASK_SC_W  0xf800707f
-#define MATCH_AMOADD_D 0x302f
-#define MASK_AMOADD_D  0xf800707f
-#define MATCH_AMOXOR_D 0x2000302f
-#define MASK_AMOXOR_D  0xf800707f
-#define MATCH_AMOOR_D 0x4000302f
-#define MASK_AMOOR_D  0xf800707f
-#define MATCH_AMOAND_D 0x6000302f
-#define MASK_AMOAND_D  0xf800707f
-#define MATCH_AMOMIN_D 0x8000302f
-#define MASK_AMOMIN_D  0xf800707f
-#define MATCH_AMOMAX_D 0xa000302f
-#define MASK_AMOMAX_D  0xf800707f
-#define MATCH_AMOMINU_D 0xc000302f
-#define MASK_AMOMINU_D  0xf800707f
-#define MATCH_AMOMAXU_D 0xe000302f
-#define MASK_AMOMAXU_D  0xf800707f
-#define MATCH_AMOSWAP_D 0x800302f
-#define MASK_AMOSWAP_D  0xf800707f
-#define MATCH_LR_D 0x1000302f
-#define MASK_LR_D  0xf9f0707f
-#define MATCH_SC_D 0x1800302f
-#define MASK_SC_D  0xf800707f
-#define MATCH_ECALL 0x73
-#define MASK_ECALL  0xffffffff
-#define MATCH_EBREAK 0x100073
-#define MASK_EBREAK  0xffffffff
-#define MATCH_URET 0x200073
-#define MASK_URET  0xffffffff
-#define MATCH_SRET 0x10200073
-#define MASK_SRET  0xffffffff
-#define MATCH_HRET 0x20200073
-#define MASK_HRET  0xffffffff
-#define MATCH_MRET 0x30200073
-#define MASK_MRET  0xffffffff
-#define MATCH_DRET 0x7b200073
-#define MASK_DRET  0xffffffff
-#define MATCH_SFENCE_VM 0x10400073
-#define MASK_SFENCE_VM  0xfff07fff
-#define MATCH_WFI 0x10500073
-#define MASK_WFI  0xffffffff
-#define MATCH_CSRRW 0x1073
-#define MASK_CSRRW  0x707f
-#define MATCH_CSRRS 0x2073
-#define MASK_CSRRS  0x707f
-#define MATCH_CSRRC 0x3073
-#define MASK_CSRRC  0x707f
-#define MATCH_CSRRWI 0x5073
-#define MASK_CSRRWI  0x707f
-#define MATCH_CSRRSI 0x6073
-#define MASK_CSRRSI  0x707f
-#define MATCH_CSRRCI 0x7073
-#define MASK_CSRRCI  0x707f
-#define MATCH_FADD_S 0x53
-#define MASK_FADD_S  0xfe00007f
-#define MATCH_FSUB_S 0x8000053
-#define MASK_FSUB_S  0xfe00007f
-#define MATCH_FMUL_S 0x10000053
-#define MASK_FMUL_S  0xfe00007f
-#define MATCH_FDIV_S 0x18000053
-#define MASK_FDIV_S  0xfe00007f
-#define MATCH_FSGNJ_S 0x20000053
-#define MASK_FSGNJ_S  0xfe00707f
-#define MATCH_FSGNJN_S 0x20001053
-#define MASK_FSGNJN_S  0xfe00707f
-#define MATCH_FSGNJX_S 0x20002053
-#define MASK_FSGNJX_S  0xfe00707f
-#define MATCH_FMIN_S 0x28000053
-#define MASK_FMIN_S  0xfe00707f
-#define MATCH_FMAX_S 0x28001053
-#define MASK_FMAX_S  0xfe00707f
-#define MATCH_FSQRT_S 0x58000053
-#define MASK_FSQRT_S  0xfff0007f
-#define MATCH_FADD_D 0x2000053
-#define MASK_FADD_D  0xfe00007f
-#define MATCH_FSUB_D 0xa000053
-#define MASK_FSUB_D  0xfe00007f
-#define MATCH_FMUL_D 0x12000053
-#define MASK_FMUL_D  0xfe00007f
-#define MATCH_FDIV_D 0x1a000053
-#define MASK_FDIV_D  0xfe00007f
-#define MATCH_FSGNJ_D 0x22000053
-#define MASK_FSGNJ_D  0xfe00707f
-#define MATCH_FSGNJN_D 0x22001053
-#define MASK_FSGNJN_D  0xfe00707f
-#define MATCH_FSGNJX_D 0x22002053
-#define MASK_FSGNJX_D  0xfe00707f
-#define MATCH_FMIN_D 0x2a000053
-#define MASK_FMIN_D  0xfe00707f
-#define MATCH_FMAX_D 0x2a001053
-#define MASK_FMAX_D  0xfe00707f
-#define MATCH_FCVT_S_D 0x40100053
-#define MASK_FCVT_S_D  0xfff0007f
-#define MATCH_FCVT_D_S 0x42000053
-#define MASK_FCVT_D_S  0xfff0007f
-#define MATCH_FSQRT_D 0x5a000053
-#define MASK_FSQRT_D  0xfff0007f
-#define MATCH_FLE_S 0xa0000053
-#define MASK_FLE_S  0xfe00707f
-#define MATCH_FLT_S 0xa0001053
-#define MASK_FLT_S  0xfe00707f
-#define MATCH_FEQ_S 0xa0002053
-#define MASK_FEQ_S  0xfe00707f
-#define MATCH_FLE_D 0xa2000053
-#define MASK_FLE_D  0xfe00707f
-#define MATCH_FLT_D 0xa2001053
-#define MASK_FLT_D  0xfe00707f
-#define MATCH_FEQ_D 0xa2002053
-#define MASK_FEQ_D  0xfe00707f
-#define MATCH_FCVT_W_S 0xc0000053
-#define MASK_FCVT_W_S  0xfff0007f
-#define MATCH_FCVT_WU_S 0xc0100053
-#define MASK_FCVT_WU_S  0xfff0007f
-#define MATCH_FCVT_L_S 0xc0200053
-#define MASK_FCVT_L_S  0xfff0007f
-#define MATCH_FCVT_LU_S 0xc0300053
-#define MASK_FCVT_LU_S  0xfff0007f
-#define MATCH_FMV_X_S 0xe0000053
-#define MASK_FMV_X_S  0xfff0707f
-#define MATCH_FCLASS_S 0xe0001053
-#define MASK_FCLASS_S  0xfff0707f
-#define MATCH_FCVT_W_D 0xc2000053
-#define MASK_FCVT_W_D  0xfff0007f
-#define MATCH_FCVT_WU_D 0xc2100053
-#define MASK_FCVT_WU_D  0xfff0007f
-#define MATCH_FCVT_L_D 0xc2200053
-#define MASK_FCVT_L_D  0xfff0007f
-#define MATCH_FCVT_LU_D 0xc2300053
-#define MASK_FCVT_LU_D  0xfff0007f
-#define MATCH_FMV_X_D 0xe2000053
-#define MASK_FMV_X_D  0xfff0707f
-#define MATCH_FCLASS_D 0xe2001053
-#define MASK_FCLASS_D  0xfff0707f
-#define MATCH_FCVT_S_W 0xd0000053
-#define MASK_FCVT_S_W  0xfff0007f
-#define MATCH_FCVT_S_WU 0xd0100053
-#define MASK_FCVT_S_WU  0xfff0007f
-#define MATCH_FCVT_S_L 0xd0200053
-#define MASK_FCVT_S_L  0xfff0007f
-#define MATCH_FCVT_S_LU 0xd0300053
-#define MASK_FCVT_S_LU  0xfff0007f
-#define MATCH_FMV_S_X 0xf0000053
-#define MASK_FMV_S_X  0xfff0707f
-#define MATCH_FCVT_D_W 0xd2000053
-#define MASK_FCVT_D_W  0xfff0007f
-#define MATCH_FCVT_D_WU 0xd2100053
-#define MASK_FCVT_D_WU  0xfff0007f
-#define MATCH_FCVT_D_L 0xd2200053
-#define MASK_FCVT_D_L  0xfff0007f
-#define MATCH_FCVT_D_LU 0xd2300053
-#define MASK_FCVT_D_LU  0xfff0007f
-#define MATCH_FMV_D_X 0xf2000053
-#define MASK_FMV_D_X  0xfff0707f
-#define MATCH_FLW 0x2007
-#define MASK_FLW  0x707f
-#define MATCH_FLD 0x3007
-#define MASK_FLD  0x707f
-#define MATCH_FSW 0x2027
-#define MASK_FSW  0x707f
-#define MATCH_FSD 0x3027
-#define MASK_FSD  0x707f
-#define MATCH_FMADD_S 0x43
-#define MASK_FMADD_S  0x600007f
-#define MATCH_FMSUB_S 0x47
-#define MASK_FMSUB_S  0x600007f
-#define MATCH_FNMSUB_S 0x4b
-#define MASK_FNMSUB_S  0x600007f
-#define MATCH_FNMADD_S 0x4f
-#define MASK_FNMADD_S  0x600007f
-#define MATCH_FMADD_D 0x2000043
-#define MASK_FMADD_D  0x600007f
-#define MATCH_FMSUB_D 0x2000047
-#define MASK_FMSUB_D  0x600007f
-#define MATCH_FNMSUB_D 0x200004b
-#define MASK_FNMSUB_D  0x600007f
-#define MATCH_FNMADD_D 0x200004f
-#define MASK_FNMADD_D  0x600007f
-#define MATCH_C_NOP 0x1
-#define MASK_C_NOP  0xffff
-#define MATCH_C_ADDI16SP 0x6101
-#define MASK_C_ADDI16SP  0xef83
-#define MATCH_C_JR 0x8002
-#define MASK_C_JR  0xf07f
-#define MATCH_C_JALR 0x9002
-#define MASK_C_JALR  0xf07f
-#define MATCH_C_EBREAK 0x9002
-#define MASK_C_EBREAK  0xffff
-#define MATCH_C_LD 0x6000
-#define MASK_C_LD  0xe003
-#define MATCH_C_SD 0xe000
-#define MASK_C_SD  0xe003
-#define MATCH_C_ADDIW 0x2001
-#define MASK_C_ADDIW  0xe003
-#define MATCH_C_LDSP 0x6002
-#define MASK_C_LDSP  0xe003
-#define MATCH_C_SDSP 0xe002
-#define MASK_C_SDSP  0xe003
-#define MATCH_C_ADDI4SPN 0x0
-#define MASK_C_ADDI4SPN  0xe003
-#define MATCH_C_FLD 0x2000
-#define MASK_C_FLD  0xe003
-#define MATCH_C_LW 0x4000
-#define MASK_C_LW  0xe003
-#define MATCH_C_FLW 0x6000
-#define MASK_C_FLW  0xe003
-#define MATCH_C_FSD 0xa000
-#define MASK_C_FSD  0xe003
-#define MATCH_C_SW 0xc000
-#define MASK_C_SW  0xe003
-#define MATCH_C_FSW 0xe000
-#define MASK_C_FSW  0xe003
-#define MATCH_C_ADDI 0x1
-#define MASK_C_ADDI  0xe003
-#define MATCH_C_JAL 0x2001
-#define MASK_C_JAL  0xe003
-#define MATCH_C_LI 0x4001
-#define MASK_C_LI  0xe003
-#define MATCH_C_LUI 0x6001
-#define MASK_C_LUI  0xe003
-#define MATCH_C_SRLI 0x8001
-#define MASK_C_SRLI  0xec03
-#define MATCH_C_SRAI 0x8401
-#define MASK_C_SRAI  0xec03
-#define MATCH_C_ANDI 0x8801
-#define MASK_C_ANDI  0xec03
-#define MATCH_C_SUB 0x8c01
-#define MASK_C_SUB  0xfc63
-#define MATCH_C_XOR 0x8c21
-#define MASK_C_XOR  0xfc63
-#define MATCH_C_OR 0x8c41
-#define MASK_C_OR  0xfc63
-#define MATCH_C_AND 0x8c61
-#define MASK_C_AND  0xfc63
-#define MATCH_C_SUBW 0x9c01
-#define MASK_C_SUBW  0xfc63
-#define MATCH_C_ADDW 0x9c21
-#define MASK_C_ADDW  0xfc63
-#define MATCH_C_J 0xa001
-#define MASK_C_J  0xe003
-#define MATCH_C_BEQZ 0xc001
-#define MASK_C_BEQZ  0xe003
-#define MATCH_C_BNEZ 0xe001
-#define MASK_C_BNEZ  0xe003
-#define MATCH_C_SLLI 0x2
-#define MASK_C_SLLI  0xe003
-#define MATCH_C_FLDSP 0x2002
-#define MASK_C_FLDSP  0xe003
-#define MATCH_C_LWSP 0x4002
-#define MASK_C_LWSP  0xe003
-#define MATCH_C_FLWSP 0x6002
-#define MASK_C_FLWSP  0xe003
-#define MATCH_C_MV 0x8002
-#define MASK_C_MV  0xf003
-#define MATCH_C_ADD 0x9002
-#define MASK_C_ADD  0xf003
-#define MATCH_C_FSDSP 0xa002
-#define MASK_C_FSDSP  0xe003
-#define MATCH_C_SWSP 0xc002
-#define MASK_C_SWSP  0xe003
-#define MATCH_C_FSWSP 0xe002
-#define MASK_C_FSWSP  0xe003
-#define MATCH_CUSTOM0 0xb
-#define MASK_CUSTOM0  0x707f
-#define MATCH_CUSTOM0_RS1 0x200b
-#define MASK_CUSTOM0_RS1  0x707f
-#define MATCH_CUSTOM0_RS1_RS2 0x300b
-#define MASK_CUSTOM0_RS1_RS2  0x707f
-#define MATCH_CUSTOM0_RD 0x400b
-#define MASK_CUSTOM0_RD  0x707f
-#define MATCH_CUSTOM0_RD_RS1 0x600b
-#define MASK_CUSTOM0_RD_RS1  0x707f
-#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
-#define MASK_CUSTOM0_RD_RS1_RS2  0x707f
-#define MATCH_CUSTOM1 0x2b
-#define MASK_CUSTOM1  0x707f
-#define MATCH_CUSTOM1_RS1 0x202b
-#define MASK_CUSTOM1_RS1  0x707f
-#define MATCH_CUSTOM1_RS1_RS2 0x302b
-#define MASK_CUSTOM1_RS1_RS2  0x707f
-#define MATCH_CUSTOM1_RD 0x402b
-#define MASK_CUSTOM1_RD  0x707f
-#define MATCH_CUSTOM1_RD_RS1 0x602b
-#define MASK_CUSTOM1_RD_RS1  0x707f
-#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
-#define MASK_CUSTOM1_RD_RS1_RS2  0x707f
-#define MATCH_CUSTOM2 0x5b
-#define MASK_CUSTOM2  0x707f
-#define MATCH_CUSTOM2_RS1 0x205b
-#define MASK_CUSTOM2_RS1  0x707f
-#define MATCH_CUSTOM2_RS1_RS2 0x305b
-#define MASK_CUSTOM2_RS1_RS2  0x707f
-#define MATCH_CUSTOM2_RD 0x405b
-#define MASK_CUSTOM2_RD  0x707f
-#define MATCH_CUSTOM2_RD_RS1 0x605b
-#define MASK_CUSTOM2_RD_RS1  0x707f
-#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
-#define MASK_CUSTOM2_RD_RS1_RS2  0x707f
-#define MATCH_CUSTOM3 0x7b
-#define MASK_CUSTOM3  0x707f
-#define MATCH_CUSTOM3_RS1 0x207b
-#define MASK_CUSTOM3_RS1  0x707f
-#define MATCH_CUSTOM3_RS1_RS2 0x307b
-#define MASK_CUSTOM3_RS1_RS2  0x707f
-#define MATCH_CUSTOM3_RD 0x407b
-#define MASK_CUSTOM3_RD  0x707f
-#define MATCH_CUSTOM3_RD_RS1 0x607b
-#define MASK_CUSTOM3_RD_RS1  0x707f
-#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
-#define MASK_CUSTOM3_RD_RS1_RS2  0x707f
-#define CSR_FFLAGS 0x1
-#define CSR_FRM 0x2
-#define CSR_FCSR 0x3
-#define CSR_CYCLE 0xc00
-#define CSR_TIME 0xc01
-#define CSR_INSTRET 0xc02
-#define CSR_HPMCOUNTER3 0xc03
-#define CSR_HPMCOUNTER4 0xc04
-#define CSR_HPMCOUNTER5 0xc05
-#define CSR_HPMCOUNTER6 0xc06
-#define CSR_HPMCOUNTER7 0xc07
-#define CSR_HPMCOUNTER8 0xc08
-#define CSR_HPMCOUNTER9 0xc09
-#define CSR_HPMCOUNTER10 0xc0a
-#define CSR_HPMCOUNTER11 0xc0b
-#define CSR_HPMCOUNTER12 0xc0c
-#define CSR_HPMCOUNTER13 0xc0d
-#define CSR_HPMCOUNTER14 0xc0e
-#define CSR_HPMCOUNTER15 0xc0f
-#define CSR_HPMCOUNTER16 0xc10
-#define CSR_HPMCOUNTER17 0xc11
-#define CSR_HPMCOUNTER18 0xc12
-#define CSR_HPMCOUNTER19 0xc13
-#define CSR_HPMCOUNTER20 0xc14
-#define CSR_HPMCOUNTER21 0xc15
-#define CSR_HPMCOUNTER22 0xc16
-#define CSR_HPMCOUNTER23 0xc17
-#define CSR_HPMCOUNTER24 0xc18
-#define CSR_HPMCOUNTER25 0xc19
-#define CSR_HPMCOUNTER26 0xc1a
-#define CSR_HPMCOUNTER27 0xc1b
-#define CSR_HPMCOUNTER28 0xc1c
-#define CSR_HPMCOUNTER29 0xc1d
-#define CSR_HPMCOUNTER30 0xc1e
-#define CSR_HPMCOUNTER31 0xc1f
-#define CSR_SSTATUS 0x100
-#define CSR_SIE 0x104
-#define CSR_STVEC 0x105
-#define CSR_SSCRATCH 0x140
-#define CSR_SEPC 0x141
-#define CSR_SCAUSE 0x142
-#define CSR_SBADADDR 0x143
-#define CSR_SIP 0x144
-#define CSR_SPTBR 0x180
-#define CSR_MSTATUS 0x300
-#define CSR_MISA 0x301
-#define CSR_MEDELEG 0x302
-#define CSR_MIDELEG 0x303
-#define CSR_MIE 0x304
-#define CSR_MTVEC 0x305
-#define CSR_MSCRATCH 0x340
-#define CSR_MEPC 0x341
-#define CSR_MCAUSE 0x342
-#define CSR_MBADADDR 0x343
-#define CSR_MIP 0x344
-#define CSR_TSELECT 0x7a0
-#define CSR_TDATA1 0x7a1
-#define CSR_TDATA2 0x7a2
-#define CSR_TDATA3 0x7a3
-#define CSR_DCSR 0x7b0
-#define CSR_DPC 0x7b1
-#define CSR_DSCRATCH 0x7b2
-#define CSR_MCYCLE 0xb00
-#define CSR_MINSTRET 0xb02
-#define CSR_MHPMCOUNTER3 0xb03
-#define CSR_MHPMCOUNTER4 0xb04
-#define CSR_MHPMCOUNTER5 0xb05
-#define CSR_MHPMCOUNTER6 0xb06
-#define CSR_MHPMCOUNTER7 0xb07
-#define CSR_MHPMCOUNTER8 0xb08
-#define CSR_MHPMCOUNTER9 0xb09
-#define CSR_MHPMCOUNTER10 0xb0a
-#define CSR_MHPMCOUNTER11 0xb0b
-#define CSR_MHPMCOUNTER12 0xb0c
-#define CSR_MHPMCOUNTER13 0xb0d
-#define CSR_MHPMCOUNTER14 0xb0e
-#define CSR_MHPMCOUNTER15 0xb0f
-#define CSR_MHPMCOUNTER16 0xb10
-#define CSR_MHPMCOUNTER17 0xb11
-#define CSR_MHPMCOUNTER18 0xb12
-#define CSR_MHPMCOUNTER19 0xb13
-#define CSR_MHPMCOUNTER20 0xb14
-#define CSR_MHPMCOUNTER21 0xb15
-#define CSR_MHPMCOUNTER22 0xb16
-#define CSR_MHPMCOUNTER23 0xb17
-#define CSR_MHPMCOUNTER24 0xb18
-#define CSR_MHPMCOUNTER25 0xb19
-#define CSR_MHPMCOUNTER26 0xb1a
-#define CSR_MHPMCOUNTER27 0xb1b
-#define CSR_MHPMCOUNTER28 0xb1c
-#define CSR_MHPMCOUNTER29 0xb1d
-#define CSR_MHPMCOUNTER30 0xb1e
-#define CSR_MHPMCOUNTER31 0xb1f
-#define CSR_MUCOUNTEREN 0x320
-#define CSR_MSCOUNTEREN 0x321
-#define CSR_MHPMEVENT3 0x323
-#define CSR_MHPMEVENT4 0x324
-#define CSR_MHPMEVENT5 0x325
-#define CSR_MHPMEVENT6 0x326
-#define CSR_MHPMEVENT7 0x327
-#define CSR_MHPMEVENT8 0x328
-#define CSR_MHPMEVENT9 0x329
-#define CSR_MHPMEVENT10 0x32a
-#define CSR_MHPMEVENT11 0x32b
-#define CSR_MHPMEVENT12 0x32c
-#define CSR_MHPMEVENT13 0x32d
-#define CSR_MHPMEVENT14 0x32e
-#define CSR_MHPMEVENT15 0x32f
-#define CSR_MHPMEVENT16 0x330
-#define CSR_MHPMEVENT17 0x331
-#define CSR_MHPMEVENT18 0x332
-#define CSR_MHPMEVENT19 0x333
-#define CSR_MHPMEVENT20 0x334
-#define CSR_MHPMEVENT21 0x335
-#define CSR_MHPMEVENT22 0x336
-#define CSR_MHPMEVENT23 0x337
-#define CSR_MHPMEVENT24 0x338
-#define CSR_MHPMEVENT25 0x339
-#define CSR_MHPMEVENT26 0x33a
-#define CSR_MHPMEVENT27 0x33b
-#define CSR_MHPMEVENT28 0x33c
-#define CSR_MHPMEVENT29 0x33d
-#define CSR_MHPMEVENT30 0x33e
-#define CSR_MHPMEVENT31 0x33f
-#define CSR_MVENDORID 0xf11
-#define CSR_MARCHID 0xf12
-#define CSR_MIMPID 0xf13
-#define CSR_MHARTID 0xf14
-#define CSR_CYCLEH 0xc80
-#define CSR_TIMEH 0xc81
-#define CSR_INSTRETH 0xc82
-#define CSR_HPMCOUNTER3H 0xc83
-#define CSR_HPMCOUNTER4H 0xc84
-#define CSR_HPMCOUNTER5H 0xc85
-#define CSR_HPMCOUNTER6H 0xc86
-#define CSR_HPMCOUNTER7H 0xc87
-#define CSR_HPMCOUNTER8H 0xc88
-#define CSR_HPMCOUNTER9H 0xc89
-#define CSR_HPMCOUNTER10H 0xc8a
-#define CSR_HPMCOUNTER11H 0xc8b
-#define CSR_HPMCOUNTER12H 0xc8c
-#define CSR_HPMCOUNTER13H 0xc8d
-#define CSR_HPMCOUNTER14H 0xc8e
-#define CSR_HPMCOUNTER15H 0xc8f
-#define CSR_HPMCOUNTER16H 0xc90
-#define CSR_HPMCOUNTER17H 0xc91
-#define CSR_HPMCOUNTER18H 0xc92
-#define CSR_HPMCOUNTER19H 0xc93
-#define CSR_HPMCOUNTER20H 0xc94
-#define CSR_HPMCOUNTER21H 0xc95
-#define CSR_HPMCOUNTER22H 0xc96
-#define CSR_HPMCOUNTER23H 0xc97
-#define CSR_HPMCOUNTER24H 0xc98
-#define CSR_HPMCOUNTER25H 0xc99
-#define CSR_HPMCOUNTER26H 0xc9a
-#define CSR_HPMCOUNTER27H 0xc9b
-#define CSR_HPMCOUNTER28H 0xc9c
-#define CSR_HPMCOUNTER29H 0xc9d
-#define CSR_HPMCOUNTER30H 0xc9e
-#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_MCYCLEH 0xb80
-#define CSR_MINSTRETH 0xb82
-#define CSR_MHPMCOUNTER3H 0xb83
-#define CSR_MHPMCOUNTER4H 0xb84
-#define CSR_MHPMCOUNTER5H 0xb85
-#define CSR_MHPMCOUNTER6H 0xb86
-#define CSR_MHPMCOUNTER7H 0xb87
-#define CSR_MHPMCOUNTER8H 0xb88
-#define CSR_MHPMCOUNTER9H 0xb89
-#define CSR_MHPMCOUNTER10H 0xb8a
-#define CSR_MHPMCOUNTER11H 0xb8b
-#define CSR_MHPMCOUNTER12H 0xb8c
-#define CSR_MHPMCOUNTER13H 0xb8d
-#define CSR_MHPMCOUNTER14H 0xb8e
-#define CSR_MHPMCOUNTER15H 0xb8f
-#define CSR_MHPMCOUNTER16H 0xb90
-#define CSR_MHPMCOUNTER17H 0xb91
-#define CSR_MHPMCOUNTER18H 0xb92
-#define CSR_MHPMCOUNTER19H 0xb93
-#define CSR_MHPMCOUNTER20H 0xb94
-#define CSR_MHPMCOUNTER21H 0xb95
-#define CSR_MHPMCOUNTER22H 0xb96
-#define CSR_MHPMCOUNTER23H 0xb97
-#define CSR_MHPMCOUNTER24H 0xb98
-#define CSR_MHPMCOUNTER25H 0xb99
-#define CSR_MHPMCOUNTER26H 0xb9a
-#define CSR_MHPMCOUNTER27H 0xb9b
-#define CSR_MHPMCOUNTER28H 0xb9c
-#define CSR_MHPMCOUNTER29H 0xb9d
-#define CSR_MHPMCOUNTER30H 0xb9e
-#define CSR_MHPMCOUNTER31H 0xb9f
-#define CAUSE_MISALIGNED_FETCH 0x0
-#define CAUSE_FAULT_FETCH 0x1
-#define CAUSE_ILLEGAL_INSTRUCTION 0x2
-#define CAUSE_BREAKPOINT 0x3
-#define CAUSE_MISALIGNED_LOAD 0x4
-#define CAUSE_FAULT_LOAD 0x5
-#define CAUSE_MISALIGNED_STORE 0x6
-#define CAUSE_FAULT_STORE 0x7
-#define CAUSE_USER_ECALL 0x8
-#define CAUSE_SUPERVISOR_ECALL 0x9
-#define CAUSE_HYPERVISOR_ECALL 0xa
-#define CAUSE_MACHINE_ECALL 0xb
-#endif
-#ifdef DECLARE_INSN
-DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
-DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
-DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
-DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
-DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
-DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
-DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
-DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
-DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
-DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
-DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
-DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
-DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
-DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
-DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
-DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
-DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
-DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
-DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
-DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
-DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
-DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
-DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
-DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
-DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
-DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
-DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
-DECLARE_INSN(or, MATCH_OR, MASK_OR)
-DECLARE_INSN(and, MATCH_AND, MASK_AND)
-DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
-DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
-DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
-DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
-DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
-DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
-DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
-DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
-DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
-DECLARE_INSN(lb, MATCH_LB, MASK_LB)
-DECLARE_INSN(lh, MATCH_LH, MASK_LH)
-DECLARE_INSN(lw, MATCH_LW, MASK_LW)
-DECLARE_INSN(ld, MATCH_LD, MASK_LD)
-DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
-DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
-DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
-DECLARE_INSN(sb, MATCH_SB, MASK_SB)
-DECLARE_INSN(sh, MATCH_SH, MASK_SH)
-DECLARE_INSN(sw, MATCH_SW, MASK_SW)
-DECLARE_INSN(sd, MATCH_SD, MASK_SD)
-DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
-DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
-DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
-DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
-DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
-DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
-DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
-DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
-DECLARE_INSN(rem, MATCH_REM, MASK_REM)
-DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
-DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
-DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
-DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
-DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
-DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
-DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
-DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
-DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
-DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
-DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
-DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
-DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
-DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
-DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
-DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
-DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
-DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
-DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
-DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
-DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
-DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
-DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
-DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
-DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
-DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
-DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
-DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
-DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
-DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
-DECLARE_INSN(uret, MATCH_URET, MASK_URET)
-DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
-DECLARE_INSN(hret, MATCH_HRET, MASK_HRET)
-DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
-DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
-DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
-DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
-DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
-DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
-DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
-DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
-DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
-DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
-DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
-DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
-DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
-DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
-DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
-DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
-DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
-DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
-DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
-DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
-DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
-DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
-DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
-DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
-DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
-DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
-DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
-DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
-DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
-DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
-DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
-DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
-DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
-DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
-DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
-DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
-DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
-DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
-DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
-DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
-DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
-DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
-DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
-DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
-DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
-DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
-DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
-DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
-DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
-DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
-DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
-DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
-DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
-DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
-DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
-DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
-DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
-DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
-DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
-DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
-DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
-DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
-DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
-DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
-DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
-DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
-DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
-DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
-DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
-DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
-DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
-DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
-DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
-DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
-DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
-DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
-DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
-DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
-DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
-DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
-DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
-DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
-DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
-DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
-DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
-DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
-DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
-DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
-DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
-DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
-DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
-DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
-DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
-DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
-DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
-DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
-DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
-DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
-DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
-DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
-DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
-DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
-DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
-DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
-DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
-DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
-DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
-DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
-DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
-DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
-DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
-DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
-DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
-DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
-DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
-DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
-DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
-DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
-DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
-DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
-DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
-DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
-DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
-DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
-DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
-DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
-DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
-DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
-DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
-DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
-DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
-DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
-DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
-DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
-DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
-DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
-DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
-DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
-#endif
-#ifdef DECLARE_CSR
-DECLARE_CSR(fflags, CSR_FFLAGS)
-DECLARE_CSR(frm, CSR_FRM)
-DECLARE_CSR(fcsr, CSR_FCSR)
-DECLARE_CSR(cycle, CSR_CYCLE)
-DECLARE_CSR(time, CSR_TIME)
-DECLARE_CSR(instret, CSR_INSTRET)
-DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3)
-DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4)
-DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5)
-DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6)
-DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7)
-DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8)
-DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9)
-DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10)
-DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11)
-DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12)
-DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13)
-DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14)
-DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15)
-DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16)
-DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17)
-DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18)
-DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19)
-DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20)
-DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21)
-DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22)
-DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23)
-DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24)
-DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25)
-DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26)
-DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27)
-DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28)
-DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29)
-DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30)
-DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31)
-DECLARE_CSR(sstatus, CSR_SSTATUS)
-DECLARE_CSR(sie, CSR_SIE)
-DECLARE_CSR(stvec, CSR_STVEC)
-DECLARE_CSR(sscratch, CSR_SSCRATCH)
-DECLARE_CSR(sepc, CSR_SEPC)
-DECLARE_CSR(scause, CSR_SCAUSE)
-DECLARE_CSR(sbadaddr, CSR_SBADADDR)
-DECLARE_CSR(sip, CSR_SIP)
-DECLARE_CSR(sptbr, CSR_SPTBR)
-DECLARE_CSR(mstatus, CSR_MSTATUS)
-DECLARE_CSR(misa, CSR_MISA)
-DECLARE_CSR(medeleg, CSR_MEDELEG)
-DECLARE_CSR(mideleg, CSR_MIDELEG)
-DECLARE_CSR(mie, CSR_MIE)
-DECLARE_CSR(mtvec, CSR_MTVEC)
-DECLARE_CSR(mscratch, CSR_MSCRATCH)
-DECLARE_CSR(mepc, CSR_MEPC)
-DECLARE_CSR(mcause, CSR_MCAUSE)
-DECLARE_CSR(mbadaddr, CSR_MBADADDR)
-DECLARE_CSR(mip, CSR_MIP)
-DECLARE_CSR(tselect, CSR_TSELECT)
-DECLARE_CSR(tdata1, CSR_TDATA1)
-DECLARE_CSR(tdata2, CSR_TDATA2)
-DECLARE_CSR(tdata3, CSR_TDATA3)
-DECLARE_CSR(dcsr, CSR_DCSR)
-DECLARE_CSR(dpc, CSR_DPC)
-DECLARE_CSR(dscratch, CSR_DSCRATCH)
-DECLARE_CSR(mcycle, CSR_MCYCLE)
-DECLARE_CSR(minstret, CSR_MINSTRET)
-DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3)
-DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4)
-DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5)
-DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6)
-DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7)
-DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8)
-DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9)
-DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10)
-DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11)
-DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12)
-DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13)
-DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14)
-DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15)
-DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16)
-DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17)
-DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18)
-DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19)
-DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20)
-DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21)
-DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22)
-DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23)
-DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24)
-DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25)
-DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26)
-DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27)
-DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28)
-DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29)
-DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30)
-DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31)
-DECLARE_CSR(mucounteren, CSR_MUCOUNTEREN)
-DECLARE_CSR(mscounteren, CSR_MSCOUNTEREN)
-DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3)
-DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4)
-DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5)
-DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6)
-DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7)
-DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8)
-DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9)
-DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10)
-DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11)
-DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12)
-DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13)
-DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14)
-DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15)
-DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16)
-DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17)
-DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18)
-DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19)
-DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20)
-DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21)
-DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22)
-DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23)
-DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24)
-DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25)
-DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26)
-DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27)
-DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28)
-DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29)
-DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30)
-DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31)
-DECLARE_CSR(mvendorid, CSR_MVENDORID)
-DECLARE_CSR(marchid, CSR_MARCHID)
-DECLARE_CSR(mimpid, CSR_MIMPID)
-DECLARE_CSR(mhartid, CSR_MHARTID)
-DECLARE_CSR(cycleh, CSR_CYCLEH)
-DECLARE_CSR(timeh, CSR_TIMEH)
-DECLARE_CSR(instreth, CSR_INSTRETH)
-DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H)
-DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H)
-DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H)
-DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H)
-DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H)
-DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H)
-DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H)
-DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H)
-DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H)
-DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H)
-DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H)
-DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H)
-DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H)
-DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H)
-DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H)
-DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H)
-DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H)
-DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H)
-DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H)
-DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H)
-DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H)
-DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H)
-DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H)
-DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H)
-DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H)
-DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H)
-DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H)
-DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H)
-DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H)
-DECLARE_CSR(mcycleh, CSR_MCYCLEH)
-DECLARE_CSR(minstreth, CSR_MINSTRETH)
-DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H)
-DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H)
-DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H)
-DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H)
-DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H)
-DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H)
-DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H)
-DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H)
-DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H)
-DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H)
-DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H)
-DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H)
-DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H)
-DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H)
-DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H)
-DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H)
-DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H)
-DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H)
-DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H)
-DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H)
-DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H)
-DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H)
-DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H)
-DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H)
-DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H)
-DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H)
-DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H)
-DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H)
-DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H)
-#endif
-#ifdef DECLARE_CAUSE
-DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH)
-DECLARE_CAUSE("fault fetch", CAUSE_FAULT_FETCH)
-DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION)
-DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT)
-DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD)
-DECLARE_CAUSE("fault load", CAUSE_FAULT_LOAD)
-DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE)
-DECLARE_CAUSE("fault store", CAUSE_FAULT_STORE)
-DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL)
-DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL)
-DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL)
-DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL)
-#endif

+ 0 - 73
libcpu/risc-v/virt64/ext_context.h

@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2022-10-10     RT-Thread    the first version
- */
-#ifndef __EXT_CONTEXT_H__
-#define __EXT_CONTEXT_H__
-
-#include <rtconfig.h>
-
-#ifdef ARCH_RISCV_FPU
-/* 32 fpu register */
-#define CTX_FPU_REG_NR  32
-#else
-#define CTX_FPU_REG_NR  0
-#endif /* ARCH_RISCV_FPU */
-
-#ifdef __ASSEMBLY__
-
-/**
- * ==================================
- * RISC-V D ISA (Floating)
- * ==================================
- */
-
-#ifdef ARCH_RISCV_FPU
-#define FPU_CTX_F0_OFF  (REGBYTES * 0)  /* offsetof(fpu_context_t, fpustatus.f[0])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F1_OFF  (REGBYTES * 1)  /* offsetof(fpu_context_t, fpustatus.f[1])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F2_OFF  (REGBYTES * 2)  /* offsetof(fpu_context_t, fpustatus.f[2])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F3_OFF  (REGBYTES * 3)  /* offsetof(fpu_context_t, fpustatus.f[3])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F4_OFF  (REGBYTES * 4)  /* offsetof(fpu_context_t, fpustatus.f[4])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F5_OFF  (REGBYTES * 5)  /* offsetof(fpu_context_t, fpustatus.f[5])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F6_OFF  (REGBYTES * 6)  /* offsetof(fpu_context_t, fpustatus.f[6])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F7_OFF  (REGBYTES * 7)  /* offsetof(fpu_context_t, fpustatus.f[7])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F8_OFF  (REGBYTES * 8)  /* offsetof(fpu_context_t, fpustatus.f[8])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F9_OFF  (REGBYTES * 9)  /* offsetof(fpu_context_t, fpustatus.f[9])  - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F10_OFF (REGBYTES * 10) /* offsetof(fpu_context_t, fpustatus.f[10]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F11_OFF (REGBYTES * 11) /* offsetof(fpu_context_t, fpustatus.f[11]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F12_OFF (REGBYTES * 12) /* offsetof(fpu_context_t, fpustatus.f[12]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F13_OFF (REGBYTES * 13) /* offsetof(fpu_context_t, fpustatus.f[13]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F14_OFF (REGBYTES * 14) /* offsetof(fpu_context_t, fpustatus.f[14]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F15_OFF (REGBYTES * 15) /* offsetof(fpu_context_t, fpustatus.f[15]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F16_OFF (REGBYTES * 16) /* offsetof(fpu_context_t, fpustatus.f[16]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F17_OFF (REGBYTES * 17) /* offsetof(fpu_context_t, fpustatus.f[17]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F18_OFF (REGBYTES * 18) /* offsetof(fpu_context_t, fpustatus.f[18]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F19_OFF (REGBYTES * 19) /* offsetof(fpu_context_t, fpustatus.f[19]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F20_OFF (REGBYTES * 20) /* offsetof(fpu_context_t, fpustatus.f[20]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F21_OFF (REGBYTES * 21) /* offsetof(fpu_context_t, fpustatus.f[21]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F22_OFF (REGBYTES * 22) /* offsetof(fpu_context_t, fpustatus.f[22]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F23_OFF (REGBYTES * 23) /* offsetof(fpu_context_t, fpustatus.f[23]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F24_OFF (REGBYTES * 24) /* offsetof(fpu_context_t, fpustatus.f[24]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F25_OFF (REGBYTES * 25) /* offsetof(fpu_context_t, fpustatus.f[25]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F26_OFF (REGBYTES * 26) /* offsetof(fpu_context_t, fpustatus.f[26]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F27_OFF (REGBYTES * 27) /* offsetof(fpu_context_t, fpustatus.f[27]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F28_OFF (REGBYTES * 28) /* offsetof(fpu_context_t, fpustatus.f[28]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F29_OFF (REGBYTES * 29) /* offsetof(fpu_context_t, fpustatus.f[29]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F30_OFF (REGBYTES * 30) /* offsetof(fpu_context_t, fpustatus.f[30]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#define FPU_CTX_F31_OFF (REGBYTES * 31) /* offsetof(fpu_context_t, fpustatus.f[31]) - offsetof(fpu_context_t, fpustatus.f[0]) */
-#endif /* ARCH_RISCV_FPU */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef ARCH_RISCV_VECTOR
-#include "rvv_context.h"
-#else /* !ARCH_RISCV_VECTOR */
-#define CTX_VECTOR_REG_NR  0
-#endif /* ARCH_RISCV_VECTOR */
-
-#endif /* __EXT_CONTEXT_H__ */

+ 0 - 97
libcpu/risc-v/virt64/interrupt_gcc.S

@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/02     Bernard      The first version
- * 2018/12/27     Jesven       Add SMP schedule
- * 2021/02/02     lizhirui     Add userspace support
- * 2021/12/24     JasonHu      Add user setting save/restore
- * 2022/10/22     Shell        Support kernel mode RVV;
- *                             Rewrite trap handling routine
- */
-
-#include "cpuport.h"
-#include "encoding.h"
-#include "stackframe.h"
-
-    .align 2
-    .global trap_entry
-    .global debug_check_sp
-trap_entry:
-    // distingush exception from kernel or user
-    csrrw   sp, sscratch, sp
-    bnez    sp, _save_context
-
-    // BE REALLY careful with sscratch,
-    // if it's wrong, we could looping here forever
-    // or accessing random memory and seeing things totally
-    // messy after a long time and don't even know why
-_from_kernel:
-    csrr    sp, sscratch
-    j _save_context
-
-_save_context:
-    SAVE_ALL
-    // clear sscratch to say 'now in kernel mode'
-    csrw    sscratch, zero
-
-    RESTORE_SYS_GP
-
-    // now we are ready to enter interrupt / excepiton handler
-_distinguish_syscall:
-    csrr    t0, scause
-#ifdef RT_USING_SMART
-    // TODO swap 8 with config macro name
-    li      t1, 8
-    beq     t0, t1, syscall_entry
-    // syscall never return here
-#endif
-
-_handle_interrupt_and_exception:
-    mv      a0, t0
-    csrrc   a1, stval, zero
-    csrr    a2, sepc
-    // sp as exception frame pointer
-    mv      a3, sp
-    call    handle_trap
-
-_interrupt_exit:
-    la      s0, rt_thread_switch_interrupt_flag
-    lw      s2, 0(s0)
-    beqz    s2, _resume_execution
-    sw      zero, 0(s0)
-
-_context_switch:
-    la      t0, rt_interrupt_from_thread
-    LOAD    a0, 0(t0)
-    la      t0, rt_interrupt_to_thread
-    LOAD    a1, 0(t0)
-    csrr    t0, sstatus
-    andi    t0, t0, ~SSTATUS_SPIE
-    csrw    sstatus, t0
-    jal     rt_hw_context_switch
-
-_resume_execution:
-#ifdef RT_USING_SMART
-    LOAD    t0, FRAME_OFF_SSTATUS(sp)
-    andi    t0, t0, SSTATUS_SPP
-    beqz    t0, arch_ret_to_user
-#endif
-
-_resume_kernel:
-    RESTORE_ALL
-    csrw    sscratch, zero
-    sret
-
-.global rt_hw_interrupt_enable
-rt_hw_interrupt_enable:
-    csrs sstatus, a0    /* restore to old csr */
-    jr ra
-
-.global rt_hw_interrupt_disable
-rt_hw_interrupt_disable:
-    csrrci a0, sstatus, 2   /* clear SIE */
-    jr ra

+ 0 - 52
libcpu/risc-v/virt64/io.h

@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2019-2020, Xim
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- */
-#ifndef ARCH_IO_H
-#define ARCH_IO_H
-#include <rtthread.h>
-#define RISCV_FENCE(p, s) \
-        __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
-
-/* These barriers need to enforce ordering on both devices or memory. */
-#define mb()            RISCV_FENCE(iorw,iorw)
-#define rmb()           RISCV_FENCE(ir,ir)
-#define wmb()           RISCV_FENCE(ow,ow)
-
-#define __arch_getl(a)                  (*(unsigned int *)(a))
-#define __arch_putl(v, a)               (*(unsigned int *)(a) = (v))
-
-#define dmb() mb()
-#define __iormb() rmb()
-#define __iowmb() wmb()
-
-static inline void writel(uint32_t val, volatile void *addr)
-{
-    __iowmb();
-    __arch_putl(val, addr);
-}
-
-static inline uint32_t readl(const volatile void *addr)
-{
-    uint32_t val;
-
-    val = __arch_getl(addr);
-    __iormb();
-    return val;
-}
-
-static inline void write_reg(
-    uint32_t val, volatile void *addr, unsigned offset)
-{
-    writel(val, (void *)((rt_size_t)addr + offset));
-}
-
-static inline uint32_t read_reg(
-    const volatile void *addr, unsigned offset)
-{
-    return readl((void *)((rt_size_t)addr + offset));
-}
-
-#endif // ARCH_IO_H

+ 0 - 595
libcpu/risc-v/virt64/mmu.c

@@ -1,595 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-01-30     lizhirui     first version
- * 2022-12-13     WangXiaoyao  Port to new mm
- * 2023-10-12     Shell        Add permission control API
- */
-
-#include <rtthread.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#define DBG_TAG "hw.mmu"
-#define DBG_LVL DBG_INFO
-#include <rtdbg.h>
-
-#include <board.h>
-#include <cache.h>
-#include <mm_aspace.h>
-#include <mm_page.h>
-#include <mmu.h>
-#include <riscv_mmu.h>
-#include <tlb.h>
-
-#ifdef RT_USING_SMART
-#include <board.h>
-#include <ioremap.h>
-#include <lwp_user_mm.h>
-#endif
-
-#ifndef RT_USING_SMART
-#define USER_VADDR_START 0
-#endif
-
-static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size);
-
-static void *current_mmu_table = RT_NULL;
-
-volatile __attribute__((aligned(4 * 1024)))
-rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
-
-#ifdef ARCH_USING_ASID
-void rt_hw_aspace_switch(rt_aspace_t aspace)
-{
-    uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
-    current_mmu_table = aspace->page_table;
-
-    rt_hw_asid_switch_pgtbl(aspace, page_table);
-}
-
-#else /* !ARCH_USING_ASID */
-void rt_hw_aspace_switch(rt_aspace_t aspace)
-{
-    uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
-    current_mmu_table = aspace->page_table;
-
-    write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
-                        ((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
-    rt_hw_tlb_invalidate_all_local();
-}
-
-void rt_hw_asid_init(void)
-{
-}
-#endif /* ARCH_USING_ASID */
-
-void *rt_hw_mmu_tbl_get()
-{
-    return current_mmu_table;
-}
-
-static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
-                         size_t attr)
-{
-    rt_ubase_t l1_off, l2_off, l3_off;
-    rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
-
-    l1_off = GET_L1((size_t)va);
-    l2_off = GET_L2((size_t)va);
-    l3_off = GET_L3((size_t)va);
-
-    mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
-
-    if (PTE_USED(*mmu_l1))
-    {
-        mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
-    }
-    else
-    {
-        mmu_l2 = (rt_ubase_t *)rt_pages_alloc(0);
-
-        if (mmu_l2)
-        {
-            rt_memset(mmu_l2, 0, PAGE_SIZE);
-            rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
-            *mmu_l1 = COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
-                                 PAGE_DEFAULT_ATTR_NEXT);
-            rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
-        }
-        else
-        {
-            return -1;
-        }
-    }
-
-    if (PTE_USED(*(mmu_l2 + l2_off)))
-    {
-        RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
-        mmu_l3 =
-            (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
-    }
-    else
-    {
-        mmu_l3 = (rt_ubase_t *)rt_pages_alloc(0);
-
-        if (mmu_l3)
-        {
-            rt_memset(mmu_l3, 0, PAGE_SIZE);
-            rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
-            *(mmu_l2 + l2_off) =
-                COMBINEPTE((rt_ubase_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
-                           PAGE_DEFAULT_ATTR_NEXT);
-            rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
-            // declares a reference to parent page table
-            rt_page_ref_inc((void *)mmu_l2, 0);
-        }
-        else
-        {
-            return -1;
-        }
-    }
-
-    RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
-    // declares a reference to parent page table
-    rt_page_ref_inc((void *)mmu_l3, 0);
-    *(mmu_l3 + l3_off) = COMBINEPTE((rt_ubase_t)pa, attr);
-    rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
-    return 0;
-}
-
-/** rt_hw_mmu_map will never override existed page table entry */
-void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
-                    size_t size, size_t attr)
-{
-    int ret = -1;
-    void *unmap_va = v_addr;
-    size_t npages = size >> ARCH_PAGE_SHIFT;
-
-    // TODO trying with HUGEPAGE here
-    while (npages--)
-    {
-        MM_PGTBL_LOCK(aspace);
-        ret = _map_one_page(aspace, v_addr, p_addr, attr);
-        MM_PGTBL_UNLOCK(aspace);
-        if (ret != 0)
-        {
-            /* error, undo map */
-            while (unmap_va != v_addr)
-            {
-                MM_PGTBL_LOCK(aspace);
-                _unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE);
-                MM_PGTBL_UNLOCK(aspace);
-                unmap_va += ARCH_PAGE_SIZE;
-            }
-            break;
-        }
-        v_addr += ARCH_PAGE_SIZE;
-        p_addr += ARCH_PAGE_SIZE;
-    }
-
-    if (ret == 0)
-    {
-        return unmap_va;
-    }
-
-    return NULL;
-}
-
-static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
-{
-    int loop_flag = 1;
-    while (loop_flag)
-    {
-        loop_flag = 0;
-        *pentry = 0;
-        rt_hw_cpu_dcache_clean(pentry, sizeof(*pentry));
-
-        // we don't handle level 0, which is maintained by caller
-        if (level > 0)
-        {
-            void *page = (void *)((rt_ubase_t)pentry & ~ARCH_PAGE_MASK);
-
-            // decrease reference from child page to parent
-            rt_pages_free(page, 0);
-
-            int free = rt_page_ref_get(page, 0);
-            if (free == 1)
-            {
-                rt_pages_free(page, 0);
-                pentry = lvl_entry[--level];
-                loop_flag = 1;
-            }
-        }
-    }
-}
-
-static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
-{
-    rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
-    size_t unmapped = 0;
-
-    int i = 0;
-    rt_ubase_t lvl_off[3];
-    rt_ubase_t *lvl_entry[3];
-    lvl_off[0] = (rt_ubase_t)GET_L1(loop_va);
-    lvl_off[1] = (rt_ubase_t)GET_L2(loop_va);
-    lvl_off[2] = (rt_ubase_t)GET_L3(loop_va);
-    unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);
-
-    rt_ubase_t *pentry;
-    lvl_entry[i] = ((rt_ubase_t *)aspace->page_table + lvl_off[i]);
-    pentry = lvl_entry[i];
-
-    // find leaf page table entry
-    while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
-    {
-        i += 1;
-        lvl_entry[i] = ((rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
-                        lvl_off[i]);
-        pentry = lvl_entry[i];
-        unmapped >>= ARCH_INDEX_WIDTH;
-    }
-
-    // clear PTE & setup its
-    if (PTE_USED(*pentry))
-    {
-        _unmap_pte(pentry, lvl_entry, i);
-    }
-
-    return unmapped;
-}
-
-/** unmap is different from map that it can handle multiple pages */
-void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
-{
-    // caller guarantee that v_addr & size are page aligned
-    if (!aspace->page_table)
-    {
-        return;
-    }
-    size_t unmapped = 0;
-
-    while (size > 0)
-    {
-        MM_PGTBL_LOCK(aspace);
-        unmapped = _unmap_area(aspace, v_addr, size);
-        MM_PGTBL_UNLOCK(aspace);
-
-        // when unmapped == 0, region not exist in pgtbl
-        if (!unmapped || unmapped > size) break;
-
-        size -= unmapped;
-        v_addr += unmapped;
-    }
-}
-
-#ifdef RT_USING_SMART
-static inline void _init_region(void *vaddr, size_t size)
-{
-    rt_ioremap_start = vaddr;
-    rt_ioremap_size = size;
-    rt_mpr_start = rt_ioremap_start - rt_mpr_size;
-    LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start,
-          rt_mpr_start);
-}
-#else
-static inline void _init_region(void *vaddr, size_t size)
-{
-    rt_mpr_start = vaddr - rt_mpr_size;
-}
-#endif
-
-#if defined(RT_USING_SMART) && defined(ARCH_REMAP_KERNEL)
-#define KERN_SPACE_START ((void *)KERNEL_VADDR_START)
-#define KERN_SPACE_SIZE  (0xfffffffffffff000UL - KERNEL_VADDR_START + 0x1000)
-#else
-#define KERN_SPACE_START ((void *)0x1000)
-#define KERN_SPACE_SIZE  ((size_t)USER_VADDR_START - 0x1000)
-#endif
-
-int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
-                       rt_ubase_t *vtable, rt_ubase_t pv_off)
-{
-    size_t l1_off, va_s, va_e;
-    rt_base_t level;
-
-    if ((!aspace) || (!vtable))
-    {
-        return -1;
-    }
-
-    va_s = (rt_ubase_t)v_address;
-    va_e = ((rt_ubase_t)v_address) + size - 1;
-
-    if (va_e < va_s)
-    {
-        return -1;
-    }
-
-    // convert address to PPN2 index
-    va_s = GET_L1(va_s);
-    va_e = GET_L1(va_e);
-
-    if (va_s == 0)
-    {
-        return -1;
-    }
-
-    // vtable initialization check
-    for (l1_off = va_s; l1_off <= va_e; l1_off++)
-    {
-        size_t v = vtable[l1_off];
-
-        if (v)
-        {
-            return -1;
-        }
-    }
-
-    rt_aspace_init(&rt_kernel_space, KERN_SPACE_START, KERN_SPACE_SIZE, vtable);
-
-    _init_region(v_address, size);
-    return 0;
-}
-
-const static int max_level =
-    (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT) / ARCH_INDEX_WIDTH;
-
-static inline uintptr_t _get_level_size(int level)
-{
-    return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
-}
-
-static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
-{
-    rt_ubase_t l1_off, l2_off, l3_off;
-    rt_ubase_t *mmu_l1, *mmu_l2, *mmu_l3;
-    rt_ubase_t pa;
-
-    l1_off = GET_L1((rt_uintptr_t)vaddr);
-    l2_off = GET_L2((rt_uintptr_t)vaddr);
-    l3_off = GET_L3((rt_uintptr_t)vaddr);
-
-    if (!aspace)
-    {
-        LOG_W("%s: no aspace", __func__);
-        return RT_NULL;
-    }
-
-    mmu_l1 = ((rt_ubase_t *)aspace->page_table) + l1_off;
-
-    if (PTE_USED(*mmu_l1))
-    {
-        if (*mmu_l1 & PTE_XWR_MASK)
-        {
-            *level = 1;
-            return mmu_l1;
-        }
-
-        mmu_l2 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
-
-        if (PTE_USED(*(mmu_l2 + l2_off)))
-        {
-            if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
-            {
-                *level = 2;
-                return mmu_l2 + l2_off;
-            }
-
-            mmu_l3 = (rt_ubase_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
-                                             PV_OFFSET);
-
-            if (PTE_USED(*(mmu_l3 + l3_off)))
-            {
-                *level = 3;
-                return mmu_l3 + l3_off;
-            }
-        }
-    }
-
-    return RT_NULL;
-}
-
-void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
-{
-    int level;
-    rt_ubase_t *pte = _query(aspace, vaddr, &level);
-    uintptr_t paddr;
-
-    if (pte)
-    {
-        paddr = GET_PADDR(*pte);
-        paddr |= ((intptr_t)vaddr & (_get_level_size(level) - 1));
-    }
-    else
-    {
-        LOG_D("%s: failed at %p", __func__, vaddr);
-        paddr = (uintptr_t)ARCH_MAP_FAILED;
-    }
-    return (void *)paddr;
-}
-
-static int _noncache(rt_base_t *pte)
-{
-    return 0;
-}
-
-static int _cache(rt_base_t *pte)
-{
-    return 0;
-}
-
-static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_base_t *pte) = {
-    [MMU_CNTL_CACHE] = _cache,
-    [MMU_CNTL_NONCACHE] = _noncache,
-};
-
-int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
-                      enum rt_mmu_cntl cmd)
-{
-    int level;
-    int err = -RT_EINVAL;
-    void *vend = vaddr + size;
-
-    int (*handler)(rt_base_t *pte);
-    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
-    {
-        handler = control_handler[cmd];
-
-        while (vaddr < vend)
-        {
-            rt_base_t *pte = _query(aspace, vaddr, &level);
-            void *range_end = vaddr + _get_level_size(level);
-            RT_ASSERT(range_end <= vend);
-
-            if (pte)
-            {
-                err = handler(pte);
-                RT_ASSERT(err == RT_EOK);
-            }
-            vaddr = range_end;
-        }
-    }
-    else
-    {
-        err = -RT_ENOSYS;
-    }
-
-    return err;
-}
-
-/**
- * @brief setup Page Table for kernel space. It's a fixed map
- * and all mappings cannot be changed after initialization.
- *
- * Memory region in struct mem_desc must be page aligned,
- * otherwise is a failure and no report will be
- * returned.
- *
- * @param aspace
- * @param mdesc
- * @param desc_nr
- */
-void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
-{
-    void *err;
-    for (size_t i = 0; i < desc_nr; i++)
-    {
-        size_t attr;
-        switch (mdesc->attr)
-        {
-            case NORMAL_MEM:
-                attr = MMU_MAP_K_RWCB;
-                break;
-            case NORMAL_NOCACHE_MEM:
-                attr = MMU_MAP_K_RWCB;
-                break;
-            case DEVICE_MEM:
-                attr = MMU_MAP_K_DEVICE;
-                break;
-            default:
-                attr = MMU_MAP_K_DEVICE;
-        }
-
-        struct rt_mm_va_hint hint = {
-            .flags = MMF_MAP_FIXED,
-            .limit_start = aspace->start,
-            .limit_range_size = aspace->size,
-            .map_size = mdesc->vaddr_end - mdesc->vaddr_start + 1,
-            .prefer = (void *)mdesc->vaddr_start};
-
-        if (mdesc->paddr_start == (rt_uintptr_t)ARCH_MAP_FAILED)
-            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
-
-        rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
-                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
-        mdesc++;
-    }
-
-    rt_hw_asid_init();
-
-    rt_hw_aspace_switch(&rt_kernel_space);
-    rt_page_cleanup();
-}
-
-#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
-void rt_hw_mem_setup_early(void)
-{
-    rt_ubase_t pv_off;
-    rt_ubase_t ps = 0x0;
-    rt_ubase_t vs = 0x0;
-    rt_ubase_t *early_pgtbl = (rt_ubase_t *)(((size_t)&__bss_end + 4095) & ~0xfff);
-
-    /* calculate pv_offset */
-    void *symb_pc;
-    void *symb_linker;
-    __asm__ volatile("la %0, _start\n" : "=r"(symb_pc));
-    __asm__ volatile("la %0, _start_link_addr\n" : "=r"(symb_linker));
-    symb_linker = *(void **)symb_linker;
-    pv_off = symb_pc - symb_linker;
-    rt_kmem_pvoff_set(pv_off);
-
-    if (pv_off)
-    {
-        if (pv_off & (1ul << (ARCH_INDEX_WIDTH * 2 + ARCH_PAGE_SHIFT)))
-        {
-            LOG_E("%s: not aligned virtual address. pv_offset %p", __func__,
-                  pv_off);
-            RT_ASSERT(0);
-        }
-
-        /**
-         * identical mapping,
-         * PC are still at lower region before relocating to high memory
-         */
-        for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
-        {
-            early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
-            ps += L1_PAGE_SIZE;
-        }
-
-        /* relocate text region */
-        __asm__ volatile("la %0, _start\n" : "=r"(ps));
-        ps &= ~(L1_PAGE_SIZE - 1);
-        vs = ps - pv_off;
-
-        /* relocate region */
-        rt_ubase_t vs_idx = GET_L1(vs);
-        rt_ubase_t ve_idx = GET_L1(vs + 0x80000000);
-        for (size_t i = vs_idx; i < ve_idx; i++)
-        {
-            early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY);
-            ps += L1_PAGE_SIZE;
-        }
-
-        /* apply new mapping */
-        asm volatile("sfence.vma x0, x0");
-        write_csr(satp, SATP_BASE | ((size_t)early_pgtbl >> PAGE_OFFSET_BIT));
-        asm volatile("sfence.vma x0, x0");
-    }
-    /* return to lower text section */
-}
-
-void *rt_hw_mmu_pgtbl_create(void)
-{
-    rt_ubase_t *mmu_table;
-    mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
-    if (!mmu_table)
-    {
-        return RT_NULL;
-    }
-    rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
-    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
-
-    return mmu_table;
-}
-
-void rt_hw_mmu_pgtbl_delete(void *pgtbl)
-{
-    rt_pages_free(pgtbl, 0);
-}

+ 0 - 77
libcpu/risc-v/virt64/mmu.h

@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-01-30     lizhirui     first version
- * 2023-10-12     Shell        Add permission control API
- */
-
-#ifndef __MMU_H__
-#define __MMU_H__
-
-#include "riscv.h"
-#include "riscv_mmu.h"
-#include <mm_aspace.h>
-#include <stddef.h>
-
-/* RAM, Flash, or ROM */
-#define NORMAL_MEM 0
-/* normal nocache memory mapping type */
-#define NORMAL_NOCACHE_MEM 1
-/* MMIO region */
-#define DEVICE_MEM 2
-
-typedef size_t rt_pte_t;
-
-struct mem_desc
-{
-    rt_size_t vaddr_start;
-    rt_size_t vaddr_end;
-    rt_ubase_t paddr_start;
-    rt_size_t attr;
-    struct rt_varea varea;
-};
-
-#define GET_PF_ID(addr)     ((addr) >> PAGE_OFFSET_BIT)
-#define GET_PF_OFFSET(addr) __MASKVALUE(addr, PAGE_OFFSET_MASK)
-#define GET_L1(addr)        __PARTBIT(addr, VPN2_SHIFT, VPN2_BIT)
-#define GET_L2(addr)        __PARTBIT(addr, VPN1_SHIFT, VPN1_BIT)
-#define GET_L3(addr)        __PARTBIT(addr, VPN0_SHIFT, VPN0_BIT)
-#define GET_PPN(pte)                                                           \
-    (__PARTBIT(pte, PTE_PPN_SHIFT, PHYSICAL_ADDRESS_WIDTH_BITS - PTE_PPN_SHIFT))
-#define GET_PADDR(pte)            (GET_PPN(pte) << PAGE_OFFSET_BIT)
-#define VPN_TO_PPN(vaddr, pv_off) (((rt_uintptr_t)(vaddr)) + (pv_off))
-#define PPN_TO_VPN(paddr, pv_off) (((rt_uintptr_t)(paddr)) - (pv_off))
-#define COMBINEVADDR(l1_off, l2_off, l3_off)                                   \
-    (((l1_off) << VPN2_SHIFT) | ((l2_off) << VPN1_SHIFT) |                     \
-     ((l3_off) << VPN0_SHIFT))
-#define COMBINEPTE(paddr, attr)                                                \
-    ((((paddr) >> PAGE_OFFSET_BIT) << PTE_PPN_SHIFT) | (attr))
-
-#define MMU_MAP_ERROR_VANOTALIGN -1
-#define MMU_MAP_ERROR_PANOTALIGN -2
-#define MMU_MAP_ERROR_NOPAGE     -3
-#define MMU_MAP_ERROR_CONFLICT   -4
-
-void *rt_hw_mmu_tbl_get(void);
-int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
-                       rt_ubase_t *vtable, rt_ubase_t pv_off);
-void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr);
-void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_ubase_t vaddr_start,
-                               rt_ubase_t size);
-void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
-                    size_t attr);
-void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size);
-void rt_hw_aspace_switch(rt_aspace_t aspace);
-void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *vaddr);
-
-int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
-                      enum rt_mmu_cntl cmd);
-
-void *rt_hw_mmu_pgtbl_create(void);
-void rt_hw_mmu_pgtbl_delete(void *pgtbl);
-
-#endif

+ 0 - 32
libcpu/risc-v/virt64/riscv.h

@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-01-30     lizhirui     first version
- * 2024-08-28     RT-Thread    Fit into rv64ilp32 ABI
- */
-
-#ifndef __RISCV_H__
-#define __RISCV_H__
-
-#include <encoding.h>
-
-/* using unsigned long long for the case of rv64ilp32 */
-#define __SIZE(bit) (1ULL << (bit))
-#define __MASK(bit) (__SIZE(bit) - 1ULL)
-
-#define __UMASK(bit) (~(__MASK(bit)))
-#define __MASKVALUE(value,maskvalue) ((value) & (maskvalue))
-#define __UMASKVALUE(value,maskvalue) ((value) & (~(maskvalue)))
-#define __CHECKUPBOUND(value,bit_count) (!(((rt_ubase_t)value) & (~__MASK(bit_count))))
-#define __CHECKALIGN(value,start_bit) (!(((rt_ubase_t)value) & (__MASK(start_bit))))
-
-#define __PARTBIT(value,start_bit,length) (((value) >> (start_bit)) & __MASK(length))
-
-#define __ALIGNUP(value,bit) (((value) + __MASK(bit)) & __UMASK(bit))
-#define __ALIGNDOWN(value,bit) ((value) & __UMASK(bit))
-
-#endif

+ 0 - 115
libcpu/risc-v/virt64/riscv_io.h

@@ -1,115 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2019 Western Digital Corporation or its affiliates.
- *
- * Authors:
- *   Anup Patel <anup.patel@wdc.com>
- */
-
-#ifndef __RISCV_IO_H__
-#define __RISCV_IO_H__
-
-static inline uint32_t __raw_hartid(void)
-{
-    extern int boot_hartid;
-    return boot_hartid;
-}
-
-static inline void __raw_writeb(rt_uint8_t val, volatile void *addr)
-{
-    asm volatile("sb %0, 0(%1)" : : "r"(val), "r"(addr));
-}
-
-static inline void __raw_writew(rt_uint16_t val, volatile void *addr)
-{
-    asm volatile("sh %0, 0(%1)" : : "r"(val), "r"(addr));
-}
-
-static inline void __raw_writel(rt_uint32_t val, volatile void *addr)
-{
-    asm volatile("sw %0, 0(%1)" : : "r"(val), "r"(addr));
-}
-
-#if __riscv_xlen != 32
-static inline void __raw_writeq(rt_uint64_t val, volatile void *addr)
-{
-    asm volatile("sd %0, 0(%1)" : : "r"(val), "r"(addr));
-}
-#endif
-
-static inline rt_uint8_t __raw_readb(const volatile void *addr)
-{
-    rt_uint8_t val;
-
-    asm volatile("lb %0, 0(%1)" : "=r"(val) : "r"(addr));
-    return val;
-}
-
-static inline rt_uint16_t __raw_readw(const volatile void *addr)
-{
-    rt_uint16_t val;
-
-    asm volatile("lh %0, 0(%1)" : "=r"(val) : "r"(addr));
-    return val;
-}
-
-static inline rt_uint32_t __raw_readl(const volatile void *addr)
-{
-    rt_uint32_t val;
-
-    asm volatile("lw %0, 0(%1)" : "=r"(val) : "r"(addr));
-    return val;
-}
-
-#if __riscv_xlen != 32
-static inline rt_uint64_t __raw_readq(const volatile void *addr)
-{
-    rt_uint64_t val;
-
-    asm volatile("ld %0, 0(%1)" : "=r"(val) : "r"(addr));
-    return val;
-}
-#endif
-
-/* FIXME: These are now the same as asm-generic */
-
-/* clang-format off */
-
-#define __io_rbr()      do {} while (0)
-#define __io_rar()      do {} while (0)
-#define __io_rbw()      do {} while (0)
-#define __io_raw()      do {} while (0)
-
-#define readb_relaxed(c)    ({ rt_uint8_t  __v; __io_rbr(); __v = __raw_readb(c); __io_rar(); __v; })
-#define readw_relaxed(c)    ({ rt_uint16_t __v; __io_rbr(); __v = __raw_readw(c); __io_rar(); __v; })
-#define readl_relaxed(c)    ({ rt_uint32_t __v; __io_rbr(); __v = __raw_readl(c); __io_rar(); __v; })
-
-#define writeb_relaxed(v,c) ({ __io_rbw(); __raw_writeb((v),(c)); __io_raw(); })
-#define writew_relaxed(v,c) ({ __io_rbw(); __raw_writew((v),(c)); __io_raw(); })
-#define writel_relaxed(v,c) ({ __io_rbw(); __raw_writel((v),(c)); __io_raw(); })
-
-#if __riscv_xlen != 32
-#define readq_relaxed(c)    ({ rt_uint64_t __v; __io_rbr(); __v = __raw_readq(c); __io_rar(); __v; })
-#define writeq_relaxed(v,c) ({ __io_rbw(); __raw_writeq((v),(c)); __io_raw(); })
-#endif
-
-#define __io_br()   do {} while (0)
-#define __io_ar()   __asm__ __volatile__ ("fence i,r" : : : "memory");
-#define __io_bw()   __asm__ __volatile__ ("fence w,o" : : : "memory");
-#define __io_aw()   do {} while (0)
-
-#define readb(c)    ({ rt_uint8_t  __v; __io_br(); __v = __raw_readb(c); __io_ar(); __v; })
-#define readw(c)    ({ rt_uint16_t __v; __io_br(); __v = __raw_readw(c); __io_ar(); __v; })
-#define readl(c)    ({ rt_uint32_t __v; __io_br(); __v = __raw_readl(c); __io_ar(); __v; })
-
-#define writeb(v,c) ({ __io_bw(); __raw_writeb((v),(c)); __io_aw(); })
-#define writew(v,c) ({ __io_bw(); __raw_writew((v),(c)); __io_aw(); })
-#define writel(v,c) ({ __io_bw(); __raw_writel((v),(c)); __io_aw(); })
-
-#if __riscv_xlen != 32
-#define readq(c)    ({ rt_uint64_t __v; __io_br(); __v = __raw_readq(c); __io_ar(); __v; })
-#define writeq(v,c) ({ __io_bw(); __raw_writeq((v),(c)); __io_aw(); })
-#endif
-
-#endif

+ 0 - 29
libcpu/risc-v/virt64/riscv_mmu.c

@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-01-30     lizhirui     first version
- */
-
-#include <rthw.h>
-#include <rtthread.h>
-
-#include <stdint.h>
-#include <riscv.h>
-#include <string.h>
-#include <stdlib.h>
-
-#include "riscv_mmu.h"
-
-void mmu_enable_user_page_access(void)
-{
-    set_csr(sstatus, SSTATUS_SUM);
-}
-
-void mmu_disable_user_page_access(void)
-{
-    clear_csr(sstatus, SSTATUS_SUM);
-}

+ 0 - 264
libcpu/risc-v/virt64/sbi.c

@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-05-18     Bernard      port from FreeBSD
- */
-
-/*-
- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
- *
- * Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "sbi.h"
-#include <rtthread.h>
-#include <stdbool.h>
-
-/* SBI Implementation-Specific Definitions */
-#define OPENSBI_VERSION_MAJOR_OFFSET 16
-#define OPENSBI_VERSION_MINOR_MASK 0xFFFF
-
-unsigned long sbi_spec_version;
-unsigned long sbi_impl_id;
-unsigned long sbi_impl_version;
-
-static bool has_time_extension = false;
-static bool has_ipi_extension = false;
-static bool has_rfnc_extension = false;
-
-static struct sbi_ret sbi_get_spec_version(void)
-{
-    return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_SPEC_VERSION));
-}
-
-static struct sbi_ret sbi_get_impl_id(void)
-{
-    return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_ID));
-}
-
-static struct sbi_ret sbi_get_impl_version(void)
-{
-    return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION));
-}
-
-void sbi_print_version(void)
-{
-    uint32_t major;
-    uint32_t minor;
-
-    /* For legacy SBI implementations. */
-    if (sbi_spec_version == 0)
-    {
-        rt_kprintf("SBI: Unknown (Legacy) Implementation\n");
-        rt_kprintf("SBI Specification Version: 0.1\n");
-        return;
-    }
-
-    switch (sbi_impl_id)
-    {
-    case (SBI_IMPL_ID_BBL):
-        rt_kprintf("SBI: Berkely Boot Loader %lu\n", sbi_impl_version);
-        break;
-    case (SBI_IMPL_ID_XVISOR):
-        rt_kprintf("SBI: eXtensible Versatile hypervISOR %lu\n",
-                   sbi_impl_version);
-        break;
-    case (SBI_IMPL_ID_KVM):
-        rt_kprintf("SBI: Kernel-based Virtual Machine %lu\n", sbi_impl_version);
-        break;
-    case (SBI_IMPL_ID_RUSTSBI):
-        rt_kprintf("SBI: RustSBI %lu\n", sbi_impl_version);
-        break;
-    case (SBI_IMPL_ID_DIOSIX):
-        rt_kprintf("SBI: Diosix %lu\n", sbi_impl_version);
-        break;
-    case (SBI_IMPL_ID_OPENSBI):
-        major = sbi_impl_version >> OPENSBI_VERSION_MAJOR_OFFSET;
-        minor = sbi_impl_version & OPENSBI_VERSION_MINOR_MASK;
-        rt_kprintf("SBI: OpenSBI v%u.%u\n", major, minor);
-        break;
-    default:
-        rt_kprintf("SBI: Unrecognized Implementation: %lu\n", sbi_impl_id);
-        break;
-    }
-
-    major = (sbi_spec_version & SBI_SPEC_VERS_MAJOR_MASK) >>
-            SBI_SPEC_VERS_MAJOR_OFFSET;
-    minor = (sbi_spec_version & SBI_SPEC_VERS_MINOR_MASK);
-    rt_kprintf("SBI Specification Version: %u.%u\n", major, minor);
-}
-
-void sbi_set_timer(uint64_t val)
-{
-    struct sbi_ret ret;
-
-    /* Use the TIME legacy replacement extension, if available. */
-    if (has_time_extension)
-    {
-        ret = SBI_CALL1(SBI_EXT_ID_TIME, SBI_TIME_SET_TIMER, val);
-        RT_ASSERT(ret.error == SBI_SUCCESS);
-    }
-    else
-    {
-        (void)SBI_CALL1(SBI_SET_TIMER, 0, val);
-    }
-}
-
-void sbi_send_ipi(const unsigned long *hart_mask)
-{
-    struct sbi_ret ret;
-
-    /* Use the IPI legacy replacement extension, if available. */
-    if (has_ipi_extension)
-    {
-        ret = SBI_CALL2(SBI_EXT_ID_IPI, SBI_IPI_SEND_IPI, *hart_mask, 0);
-        RT_ASSERT(ret.error == SBI_SUCCESS);
-    }
-    else
-    {
-        (void)SBI_CALL1(SBI_SEND_IPI, 0, (uint64_t)hart_mask);
-    }
-}
-
-void sbi_remote_fence_i(const unsigned long *hart_mask)
-{
-    struct sbi_ret ret;
-
-    /* Use the RFENCE legacy replacement extension, if available. */
-    if (has_rfnc_extension)
-    {
-        ret =
-            SBI_CALL2(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_FENCE_I, *hart_mask, 0);
-        RT_ASSERT(ret.error == SBI_SUCCESS);
-    }
-    else
-    {
-        (void)SBI_CALL1(SBI_REMOTE_FENCE_I, 0, (uint64_t)hart_mask);
-    }
-}
-
-int sbi_remote_sfence_vma(const unsigned long *hart_mask,
-                          const unsigned long hart_mask_base,
-                          unsigned long start, unsigned long size)
-{
-    struct sbi_ret ret = {.error = SBI_SUCCESS};
-
-    /* Use the RFENCE legacy replacement extension, if available. */
-    if (has_rfnc_extension)
-    {
-        ret = SBI_CALL4(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA, *hart_mask,
-                        hart_mask_base, start, size);
-    }
-    else
-    {
-        (void)SBI_CALL3(SBI_REMOTE_SFENCE_VMA, 0, (uint64_t)hart_mask, start,
-                        size);
-    }
-    return ret.error;
-}
-
-void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
-                                unsigned long start, unsigned long size,
-                                unsigned long asid)
-{
-    struct sbi_ret ret;
-
-    /* Use the RFENCE legacy replacement extension, if available. */
-    if (has_rfnc_extension)
-    {
-        ret = SBI_CALL5(SBI_EXT_ID_RFNC, SBI_RFNC_REMOTE_SFENCE_VMA_ASID,
-                        *hart_mask, 0, start, size, asid);
-        RT_ASSERT(ret.error == SBI_SUCCESS);
-    }
-    else
-    {
-        (void)SBI_CALL4(SBI_REMOTE_SFENCE_VMA_ASID, 0, (uint64_t)hart_mask,
-                        start, size, asid);
-    }
-}
-
-int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr,
-                       unsigned long priv)
-{
-    struct sbi_ret ret;
-
-    ret = SBI_CALL3(SBI_EXT_ID_HSM, SBI_HSM_HART_START, hart, start_addr, priv);
-    return (ret.error != 0 ? (int)ret.error : 0);
-}
-
-void sbi_hsm_hart_stop(void)
-{
-    (void)SBI_CALL0(SBI_EXT_ID_HSM, SBI_HSM_HART_STOP);
-}
-
-int sbi_hsm_hart_status(unsigned long hart)
-{
-    struct sbi_ret ret;
-
-    ret = SBI_CALL1(SBI_EXT_ID_HSM, SBI_HSM_HART_STATUS, hart);
-
-    return (ret.error != 0 ? (int)ret.error : (int)ret.value);
-}
-
-void sbi_init(void)
-{
-    struct sbi_ret sret;
-
-    /*
-     * Get the spec version. For legacy SBI implementations this will
-     * return an error, otherwise it is guaranteed to succeed.
-     */
-    sret = sbi_get_spec_version();
-    if (sret.error != 0)
-    {
-        /* We are running a legacy SBI implementation. */
-        sbi_spec_version = 0;
-        return;
-    }
-
-    /* Set the SBI implementation info. */
-    sbi_spec_version = sret.value;
-    sbi_impl_id = sbi_get_impl_id().value;
-    sbi_impl_version = sbi_get_impl_version().value;
-
-    /* Probe for legacy replacement extensions. */
-    if (sbi_probe_extension(SBI_EXT_ID_TIME) != 0)
-        has_time_extension = true;
-    if (sbi_probe_extension(SBI_EXT_ID_IPI) != 0)
-        has_ipi_extension = true;
-    if (sbi_probe_extension(SBI_EXT_ID_RFNC) != 0)
-        has_rfnc_extension = true;
-}
-
-void rt_hw_console_output(const char *str)
-{
-    while (*str)
-    {
-        sbi_console_putchar(*str++);
-    }
-}

+ 0 - 244
libcpu/risc-v/virt64/sbi.h

@@ -1,244 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-05-18     Bernard      port from FreeBSD
- */
-
-/*-
- * Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- * Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
- *
- * Portions of this software were developed by SRI International and the
- * University of Cambridge Computer Laboratory under DARPA/AFRL contract
- * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Portions of this software were developed by the University of Cambridge
- * Computer Laboratory as part of the CTSRD Project, with support from the
- * UK Higher Education Innovation Fund (HEIF).
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _MACHINE_SBI_H_
-#define _MACHINE_SBI_H_
-
-#include <stdint.h>
-#include <rtdef.h>
-
-/* SBI Specification Version */
-#define SBI_SPEC_VERS_MAJOR_OFFSET  24
-#define SBI_SPEC_VERS_MAJOR_MASK    (0x7F << SBI_SPEC_VERS_MAJOR_OFFSET)
-#define SBI_SPEC_VERS_MINOR_OFFSET  0
-#define SBI_SPEC_VERS_MINOR_MASK    (0xFFFFFF << SBI_SPEC_VERS_MINOR_OFFSET)
-
-/* SBI Implementation IDs */
-#define SBI_IMPL_ID_BBL         0
-#define SBI_IMPL_ID_OPENSBI     1
-#define SBI_IMPL_ID_XVISOR      2
-#define SBI_IMPL_ID_KVM         3
-#define SBI_IMPL_ID_RUSTSBI     4
-#define SBI_IMPL_ID_DIOSIX      5
-
-/* SBI Error Codes */
-#define SBI_SUCCESS                 0
-#define SBI_ERR_FAILURE             -1
-#define SBI_ERR_NOT_SUPPORTED       -2
-#define SBI_ERR_INVALID_PARAM       -3
-#define SBI_ERR_DENIED              -4
-#define SBI_ERR_INVALID_ADDRESS     -5
-#define SBI_ERR_ALREADY_AVAILABLE   -6
-
-/* SBI Base Extension */
-#define SBI_EXT_ID_BASE             0x10
-#define SBI_BASE_GET_SPEC_VERSION   0
-#define SBI_BASE_GET_IMPL_ID        1
-#define SBI_BASE_GET_IMPL_VERSION   2
-#define SBI_BASE_PROBE_EXTENSION    3
-#define SBI_BASE_GET_MVENDORID      4
-#define SBI_BASE_GET_MARCHID        5
-#define SBI_BASE_GET_MIMPID         6
-
-/* Timer (TIME) Extension */
-#define SBI_EXT_ID_TIME         0x54494D45
-#define SBI_TIME_SET_TIMER      0
-
-/* IPI (IPI) Extension */
-#define SBI_EXT_ID_IPI          0x735049
-#define SBI_IPI_SEND_IPI        0
-
-/* RFENCE (RFNC) Extension */
-#define SBI_EXT_ID_RFNC                     0x52464E43
-#define SBI_RFNC_REMOTE_FENCE_I             0
-#define SBI_RFNC_REMOTE_SFENCE_VMA          1
-#define SBI_RFNC_REMOTE_SFENCE_VMA_ASID     2
-#define SBI_RFNC_REMOTE_HFENCE_GVMA_VMID    3
-#define SBI_RFNC_REMOTE_HFENCE_GVMA         4
-#define SBI_RFNC_REMOTE_HFENCE_VVMA_ASID    5
-#define SBI_RFNC_REMOTE_HFENCE_VVMA         6
-
-/* Hart State Management (HSM) Extension */
-#define SBI_EXT_ID_HSM                  0x48534D
-#define SBI_HSM_HART_START              0
-#define SBI_HSM_HART_STOP               1
-#define SBI_HSM_HART_STATUS             2
-#define  SBI_HSM_STATUS_STARTED         0
-#define  SBI_HSM_STATUS_STOPPED         1
-#define  SBI_HSM_STATUS_START_PENDING   2
-#define  SBI_HSM_STATUS_STOP_PENDING    3
-
-/* Legacy Extensions */
-#define SBI_SET_TIMER               0
-#define SBI_CONSOLE_PUTCHAR         1
-#define SBI_CONSOLE_GETCHAR         2
-#define SBI_CLEAR_IPI               3
-#define SBI_SEND_IPI                4
-#define SBI_REMOTE_FENCE_I          5
-#define SBI_REMOTE_SFENCE_VMA       6
-#define SBI_REMOTE_SFENCE_VMA_ASID  7
-#define SBI_SHUTDOWN                8
-
-#define SBI_CALL0(e, f)                     SBI_CALL5(e, f, 0, 0, 0, 0, 0)
-#define SBI_CALL1(e, f, p1)                 SBI_CALL5(e, f, p1, 0, 0, 0, 0)
-#define SBI_CALL2(e, f, p1, p2)             SBI_CALL5(e, f, p1, p2, 0, 0, 0)
-#define SBI_CALL3(e, f, p1, p2, p3)         SBI_CALL5(e, f, p1, p2, p3, 0, 0)
-#define SBI_CALL4(e, f, p1, p2, p3, p4)     SBI_CALL5(e, f, p1, p2, p3, p4, 0)
-#define SBI_CALL5(e, f, p1, p2, p3, p4, p5) sbi_call(e, f, p1, p2, p3, p4, p5)
-
-/*
- * Documentation available at
- * https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc
- */
-
-struct sbi_ret
-{
-    long error;
-    long value;
-};
-
-rt_inline struct sbi_ret
-sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, uint64_t arg1,
-         uint64_t arg2, uint64_t arg3, uint64_t arg4)
-{
-    struct sbi_ret ret;
-
-    register uintptr_t a0 __asm("a0") = (uintptr_t)(arg0);
-    register uintptr_t a1 __asm("a1") = (uintptr_t)(arg1);
-    register uintptr_t a2 __asm("a2") = (uintptr_t)(arg2);
-    register uintptr_t a3 __asm("a3") = (uintptr_t)(arg3);
-    register uintptr_t a4 __asm("a4") = (uintptr_t)(arg4);
-    register uintptr_t a6 __asm("a6") = (uintptr_t)(arg6);
-    register uintptr_t a7 __asm("a7") = (uintptr_t)(arg7);
-
-    __asm __volatile(\
-                     "ecall"                                        \
-                     : "+r"(a0), "+r"(a1)                           \
-                     : "r"(a2), "r"(a3), "r"(a4), "r"(a6), "r"(a7)  \
-                     : "memory");
-
-    ret.error = a0;
-    ret.value = a1;
-    return (ret);
-}
-
-/* Base extension functions and variables. */
-extern unsigned long sbi_spec_version;
-extern unsigned long sbi_impl_id;
-extern unsigned long sbi_impl_version;
-
-static __inline long
-sbi_probe_extension(long id)
-{
-    return (SBI_CALL1(SBI_EXT_ID_BASE, SBI_BASE_PROBE_EXTENSION, id).value);
-}
-
-/* TIME extension functions. */
-void sbi_set_timer(uint64_t val);
-
-/* IPI extension functions. */
-void sbi_send_ipi(const unsigned long *hart_mask);
-
-/* RFENCE extension functions. */
-void sbi_remote_fence_i(const unsigned long *hart_mask);
-int sbi_remote_sfence_vma(const unsigned long *hart_mask,
-                          const unsigned long hart_mask_base,
-                          unsigned long start, unsigned long size);
-void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start,
-                                unsigned long size, unsigned long asid);
-
-/* Hart State Management extension functions. */
-
-/*
- * Start execution on the specified hart at physical address start_addr. The
- * register a0 will contain the hart's ID, and a1 will contain the value of
- * priv.
- */
-int sbi_hsm_hart_start(unsigned long hart, unsigned long start_addr, unsigned long priv);
-
-/*
- * Stop execution on the current hart. Interrupts should be disabled, or this
- * function may return.
- */
-void sbi_hsm_hart_stop(void);
-
-/*
- * Get the execution status of the specified hart. The status will be one of:
- *  - SBI_HSM_STATUS_STARTED
- *  - SBI_HSM_STATUS_STOPPED
- *  - SBI_HSM_STATUS_START_PENDING
- *  - SBI_HSM_STATUS_STOP_PENDING
- */
-int sbi_hsm_hart_status(unsigned long hart);
-
-/* Legacy extension functions. */
-static __inline void
-sbi_console_putchar(int ch)
-{
-    (void)SBI_CALL1(SBI_CONSOLE_PUTCHAR, 0, ch);
-}
-
-static __inline int
-sbi_console_getchar(void)
-{
-    /*
-     * XXX: The "error" is returned here because legacy SBI functions
-     * continue to return their value in a0.
-     */
-    return (SBI_CALL0(SBI_CONSOLE_GETCHAR, 0).error);
-}
-
-static __inline void
-sbi_shutdown(void)
-{
-    (void)SBI_CALL0(SBI_SHUTDOWN, 0);
-}
-
-void sbi_print_version(void);
-void sbi_init(void);
-
-#endif /* !_MACHINE_SBI_H_ */

+ 0 - 70
libcpu/risc-v/virt64/stack.h

@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-01-30     lizhirui     first version
- * 2021-11-18     JasonHu      add fpu member
- * 2022-10-22     Shell        Support kernel mode RVV
- */
-
-#ifndef __STACK_H__
-#define __STACK_H__
-
-#include "stackframe.h"
-
-#include <rtthread.h>
-
-typedef struct rt_hw_switch_frame
-{
-    uint64_t regs[RT_HW_SWITCH_CONTEXT_SIZE];
-} *rt_hw_switch_frame_t;
-
-
-struct rt_hw_stack_frame
-{
-    rt_ubase_t epc;        /* epc - epc    - program counter                     */
-    rt_ubase_t ra;         /* x1  - ra     - return address for jumps            */
-    rt_ubase_t sstatus;    /*              - supervisor status register          */
-    rt_ubase_t gp;         /* x3  - gp     - global pointer                      */
-    rt_ubase_t tp;         /* x4  - tp     - thread pointer                      */
-    rt_ubase_t t0;         /* x5  - t0     - temporary register 0                */
-    rt_ubase_t t1;         /* x6  - t1     - temporary register 1                */
-    rt_ubase_t t2;         /* x7  - t2     - temporary register 2                */
-    rt_ubase_t s0_fp;      /* x8  - s0/fp  - saved register 0 or frame pointer   */
-    rt_ubase_t s1;         /* x9  - s1     - saved register 1                    */
-    rt_ubase_t a0;         /* x10 - a0     - return value or function argument 0 */
-    rt_ubase_t a1;         /* x11 - a1     - return value or function argument 1 */
-    rt_ubase_t a2;         /* x12 - a2     - function argument 2                 */
-    rt_ubase_t a3;         /* x13 - a3     - function argument 3                 */
-    rt_ubase_t a4;         /* x14 - a4     - function argument 4                 */
-    rt_ubase_t a5;         /* x15 - a5     - function argument 5                 */
-    rt_ubase_t a6;         /* x16 - a6     - function argument 6                 */
-    rt_ubase_t a7;         /* x17 - s7     - function argument 7                 */
-    rt_ubase_t s2;         /* x18 - s2     - saved register 2                    */
-    rt_ubase_t s3;         /* x19 - s3     - saved register 3                    */
-    rt_ubase_t s4;         /* x20 - s4     - saved register 4                    */
-    rt_ubase_t s5;         /* x21 - s5     - saved register 5                    */
-    rt_ubase_t s6;         /* x22 - s6     - saved register 6                    */
-    rt_ubase_t s7;         /* x23 - s7     - saved register 7                    */
-    rt_ubase_t s8;         /* x24 - s8     - saved register 8                    */
-    rt_ubase_t s9;         /* x25 - s9     - saved register 9                    */
-    rt_ubase_t s10;        /* x26 - s10    - saved register 10                   */
-    rt_ubase_t s11;        /* x27 - s11    - saved register 11                   */
-    rt_ubase_t t3;         /* x28 - t3     - temporary register 3                */
-    rt_ubase_t t4;         /* x29 - t4     - temporary register 4                */
-    rt_ubase_t t5;         /* x30 - t5     - temporary register 5                */
-    rt_ubase_t t6;         /* x31 - t6     - temporary register 6                */
-    rt_ubase_t user_sp_exc_stack;    /* sscratch - user mode sp/exception stack  */
-    rt_ubase_t __padding;  /* align to 16bytes */
-#ifdef ARCH_RISCV_FPU
-    rt_ubase_t f[CTX_FPU_REG_NR];      /* f0~f31 */
-#endif /* ARCH_RISCV_FPU */
-#ifdef ARCH_RISCV_VECTOR
-    rt_ubase_t v[CTX_VECTOR_REG_NR];
-#endif /* ARCH_RISCV_VECTOR */
-};
-
-#endif

+ 0 - 312
libcpu/risc-v/virt64/stackframe.h

@@ -1,312 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-02-02     lizhirui     first version
- * 2021-02-11     lizhirui     fixed gp save/store bug
- * 2021-11-18     JasonHu      add fpu registers save/restore
- * 2022-10-22     Shell        Support kernel mode RVV
- */
-
-#ifndef __STACKFRAME_H__
-#define __STACKFRAME_H__
-
-#include <rtconfig.h>
-#include "encoding.h"
-#include "ext_context.h"
-
-/* bytes of register width */
-#ifdef ARCH_CPU_64BIT
-#define STORE                   sd
-#define LOAD                    ld
-#define FSTORE                  fsd
-#define FLOAD                   fld
-#define REGBYTES                8
-#else
-// error here, not portable
-#error "Not supported XLEN"
-#endif
-
-/* 33 general register + 1 padding */
-#define CTX_GENERAL_REG_NR  34
-
-/* all context registers */
-#define CTX_REG_NR  (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR + CTX_VECTOR_REG_NR)
-
-#define BYTES(idx)          ((idx) * REGBYTES)
-#define FRAME_OFF_SSTATUS   BYTES(2)
-#define FRAME_OFF_SP        BYTES(32)
-#define FRAME_OFF_GP        BYTES(3)
-
-/* switch frame */
-#define RT_HW_SWITCH_CONTEXT_SSTATUS    0
-#define RT_HW_SWITCH_CONTEXT_S11        1
-#define RT_HW_SWITCH_CONTEXT_S10        2
-#define RT_HW_SWITCH_CONTEXT_S9         3
-#define RT_HW_SWITCH_CONTEXT_S8         4
-#define RT_HW_SWITCH_CONTEXT_S7         5
-#define RT_HW_SWITCH_CONTEXT_S6         6
-#define RT_HW_SWITCH_CONTEXT_S5         7
-#define RT_HW_SWITCH_CONTEXT_S4         8
-#define RT_HW_SWITCH_CONTEXT_S3         9
-#define RT_HW_SWITCH_CONTEXT_S2         10
-#define RT_HW_SWITCH_CONTEXT_S1         11
-#define RT_HW_SWITCH_CONTEXT_S0         12
-#define RT_HW_SWITCH_CONTEXT_RA         13
-#define RT_HW_SWITCH_CONTEXT_TP         14
-#define RT_HW_SWITCH_CONTEXT_ALIGNMENT  15  // Padding for alignment
-#define RT_HW_SWITCH_CONTEXT_SIZE       16  // Total size of the structure
-
-#ifdef __ASSEMBLY__
-
-.macro SAVE_ALL
-
-#ifdef ARCH_RISCV_FPU
-    /* reserve float registers */
-    addi sp, sp, -CTX_FPU_REG_NR * REGBYTES
-#endif /* ARCH_RISCV_FPU */
-#ifdef ARCH_RISCV_VECTOR
-    /* reserve float registers */
-    addi sp, sp, -CTX_VECTOR_REG_NR * REGBYTES
-#endif /* ARCH_RISCV_VECTOR */
-
-    /* save general registers */
-    addi sp, sp, -CTX_GENERAL_REG_NR * REGBYTES
-    STORE x1,   1 * REGBYTES(sp)
-
-    csrr  x1, sstatus
-    STORE x1, FRAME_OFF_SSTATUS(sp)
-
-    csrr  x1, sepc
-    STORE x1,   0 * REGBYTES(sp)
-
-    STORE x3,   3 * REGBYTES(sp)
-    STORE x4,   4 * REGBYTES(sp) /* save tp */
-    STORE x5,   5 * REGBYTES(sp)
-    STORE x6,   6 * REGBYTES(sp)
-    STORE x7,   7 * REGBYTES(sp)
-    STORE x8,   8 * REGBYTES(sp)
-    STORE x9,   9 * REGBYTES(sp)
-    STORE x10, 10 * REGBYTES(sp)
-    STORE x11, 11 * REGBYTES(sp)
-    STORE x12, 12 * REGBYTES(sp)
-    STORE x13, 13 * REGBYTES(sp)
-    STORE x14, 14 * REGBYTES(sp)
-    STORE x15, 15 * REGBYTES(sp)
-    STORE x16, 16 * REGBYTES(sp)
-    STORE x17, 17 * REGBYTES(sp)
-    STORE x18, 18 * REGBYTES(sp)
-    STORE x19, 19 * REGBYTES(sp)
-    STORE x20, 20 * REGBYTES(sp)
-    STORE x21, 21 * REGBYTES(sp)
-    STORE x22, 22 * REGBYTES(sp)
-    STORE x23, 23 * REGBYTES(sp)
-    STORE x24, 24 * REGBYTES(sp)
-    STORE x25, 25 * REGBYTES(sp)
-    STORE x26, 26 * REGBYTES(sp)
-    STORE x27, 27 * REGBYTES(sp)
-    STORE x28, 28 * REGBYTES(sp)
-    STORE x29, 29 * REGBYTES(sp)
-    STORE x30, 30 * REGBYTES(sp)
-    STORE x31, 31 * REGBYTES(sp)
-    csrr t0, sscratch
-    STORE t0, 32 * REGBYTES(sp)
-
-#ifdef ARCH_RISCV_FPU
-    /* backup sp and adjust sp to save float registers */
-    mv t1, sp
-    addi t1, t1, CTX_GENERAL_REG_NR * REGBYTES
-
-    li  t0, SSTATUS_FS
-    csrs sstatus, t0
-    FSTORE f0,  FPU_CTX_F0_OFF(t1)
-    FSTORE f1,  FPU_CTX_F1_OFF(t1)
-    FSTORE f2,  FPU_CTX_F2_OFF(t1)
-    FSTORE f3,  FPU_CTX_F3_OFF(t1)
-    FSTORE f4,  FPU_CTX_F4_OFF(t1)
-    FSTORE f5,  FPU_CTX_F5_OFF(t1)
-    FSTORE f6,  FPU_CTX_F6_OFF(t1)
-    FSTORE f7,  FPU_CTX_F7_OFF(t1)
-    FSTORE f8,  FPU_CTX_F8_OFF(t1)
-    FSTORE f9,  FPU_CTX_F9_OFF(t1)
-    FSTORE f10, FPU_CTX_F10_OFF(t1)
-    FSTORE f11, FPU_CTX_F11_OFF(t1)
-    FSTORE f12, FPU_CTX_F12_OFF(t1)
-    FSTORE f13, FPU_CTX_F13_OFF(t1)
-    FSTORE f14, FPU_CTX_F14_OFF(t1)
-    FSTORE f15, FPU_CTX_F15_OFF(t1)
-    FSTORE f16, FPU_CTX_F16_OFF(t1)
-    FSTORE f17, FPU_CTX_F17_OFF(t1)
-    FSTORE f18, FPU_CTX_F18_OFF(t1)
-    FSTORE f19, FPU_CTX_F19_OFF(t1)
-    FSTORE f20, FPU_CTX_F20_OFF(t1)
-    FSTORE f21, FPU_CTX_F21_OFF(t1)
-    FSTORE f22, FPU_CTX_F22_OFF(t1)
-    FSTORE f23, FPU_CTX_F23_OFF(t1)
-    FSTORE f24, FPU_CTX_F24_OFF(t1)
-    FSTORE f25, FPU_CTX_F25_OFF(t1)
-    FSTORE f26, FPU_CTX_F26_OFF(t1)
-    FSTORE f27, FPU_CTX_F27_OFF(t1)
-    FSTORE f28, FPU_CTX_F28_OFF(t1)
-    FSTORE f29, FPU_CTX_F29_OFF(t1)
-    FSTORE f30, FPU_CTX_F30_OFF(t1)
-    FSTORE f31, FPU_CTX_F31_OFF(t1)
-
-    /* clr FS domain */
-    csrc sstatus, t0
-
-    /* clean status would clr sr_sd; */
-    li t0, SSTATUS_FS_CLEAN
-    csrs sstatus, t0
-
-#endif /* ARCH_RISCV_FPU */
-
-#ifdef ARCH_RISCV_VECTOR
-    csrr    t0, sstatus
-    andi    t0, t0, SSTATUS_VS
-    beqz    t0, 0f
-
-    /* push vector frame */
-    addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
-
-    SAVE_VECTOR t1
-0:
-#endif /* ARCH_RISCV_VECTOR */
-.endm
-
-/**
- * @brief Restore All General Registers, for interrupt handling
- *
- */
-.macro RESTORE_ALL
-
-#ifdef ARCH_RISCV_VECTOR
-    // skip on close
-    ld      t0, 2 * REGBYTES(sp)
-    // cannot use vector on initial
-    andi    t0, t0, SSTATUS_VS_CLEAN
-    beqz    t0, 0f
-
-    /* push vector frame */
-    addi t1, sp, (CTX_GENERAL_REG_NR + CTX_FPU_REG_NR) * REGBYTES
-
-    RESTORE_VECTOR t1
-0:
-#endif /* ARCH_RISCV_VECTOR */
-
-#ifdef ARCH_RISCV_FPU
-    /* restore float register  */
-    addi t2, sp, CTX_GENERAL_REG_NR * REGBYTES
-
-    li  t0, SSTATUS_FS
-    csrs sstatus, t0
-    FLOAD f0,  FPU_CTX_F0_OFF(t2)
-    FLOAD f1,  FPU_CTX_F1_OFF(t2)
-    FLOAD f2,  FPU_CTX_F2_OFF(t2)
-    FLOAD f3,  FPU_CTX_F3_OFF(t2)
-    FLOAD f4,  FPU_CTX_F4_OFF(t2)
-    FLOAD f5,  FPU_CTX_F5_OFF(t2)
-    FLOAD f6,  FPU_CTX_F6_OFF(t2)
-    FLOAD f7,  FPU_CTX_F7_OFF(t2)
-    FLOAD f8,  FPU_CTX_F8_OFF(t2)
-    FLOAD f9,  FPU_CTX_F9_OFF(t2)
-    FLOAD f10, FPU_CTX_F10_OFF(t2)
-    FLOAD f11, FPU_CTX_F11_OFF(t2)
-    FLOAD f12, FPU_CTX_F12_OFF(t2)
-    FLOAD f13, FPU_CTX_F13_OFF(t2)
-    FLOAD f14, FPU_CTX_F14_OFF(t2)
-    FLOAD f15, FPU_CTX_F15_OFF(t2)
-    FLOAD f16, FPU_CTX_F16_OFF(t2)
-    FLOAD f17, FPU_CTX_F17_OFF(t2)
-    FLOAD f18, FPU_CTX_F18_OFF(t2)
-    FLOAD f19, FPU_CTX_F19_OFF(t2)
-    FLOAD f20, FPU_CTX_F20_OFF(t2)
-    FLOAD f21, FPU_CTX_F21_OFF(t2)
-    FLOAD f22, FPU_CTX_F22_OFF(t2)
-    FLOAD f23, FPU_CTX_F23_OFF(t2)
-    FLOAD f24, FPU_CTX_F24_OFF(t2)
-    FLOAD f25, FPU_CTX_F25_OFF(t2)
-    FLOAD f26, FPU_CTX_F26_OFF(t2)
-    FLOAD f27, FPU_CTX_F27_OFF(t2)
-    FLOAD f28, FPU_CTX_F28_OFF(t2)
-    FLOAD f29, FPU_CTX_F29_OFF(t2)
-    FLOAD f30, FPU_CTX_F30_OFF(t2)
-    FLOAD f31, FPU_CTX_F31_OFF(t2)
-
-    /* clr FS domain */
-    csrc sstatus, t0
-
-    /* clean status would clr sr_sd; */
-    li t0, SSTATUS_FS_CLEAN
-    csrs sstatus, t0
-
-#endif /* ARCH_RISCV_FPU */
-
-    /* restore general register */
-    addi t0, sp, CTX_REG_NR * REGBYTES
-    csrw sscratch, t0
-
-    /* resw ra to sepc */
-    LOAD x1, 0 * REGBYTES(sp)
-    csrw sepc, x1
-
-    LOAD x1,   2 * REGBYTES(sp)
-    csrw sstatus, x1
-
-    LOAD x1,   1 * REGBYTES(sp)
-
-    LOAD x3,   3 * REGBYTES(sp)
-    LOAD x4,   4 * REGBYTES(sp) /* restore tp */
-    LOAD x5,   5 * REGBYTES(sp)
-    LOAD x6,   6 * REGBYTES(sp)
-    LOAD x7,   7 * REGBYTES(sp)
-    LOAD x8,   8 * REGBYTES(sp)
-    LOAD x9,   9 * REGBYTES(sp)
-    LOAD x10, 10 * REGBYTES(sp)
-    LOAD x11, 11 * REGBYTES(sp)
-    LOAD x12, 12 * REGBYTES(sp)
-    LOAD x13, 13 * REGBYTES(sp)
-    LOAD x14, 14 * REGBYTES(sp)
-    LOAD x15, 15 * REGBYTES(sp)
-    LOAD x16, 16 * REGBYTES(sp)
-    LOAD x17, 17 * REGBYTES(sp)
-    LOAD x18, 18 * REGBYTES(sp)
-    LOAD x19, 19 * REGBYTES(sp)
-    LOAD x20, 20 * REGBYTES(sp)
-    LOAD x21, 21 * REGBYTES(sp)
-    LOAD x22, 22 * REGBYTES(sp)
-    LOAD x23, 23 * REGBYTES(sp)
-    LOAD x24, 24 * REGBYTES(sp)
-    LOAD x25, 25 * REGBYTES(sp)
-    LOAD x26, 26 * REGBYTES(sp)
-    LOAD x27, 27 * REGBYTES(sp)
-    LOAD x28, 28 * REGBYTES(sp)
-    LOAD x29, 29 * REGBYTES(sp)
-    LOAD x30, 30 * REGBYTES(sp)
-    LOAD x31, 31 * REGBYTES(sp)
-
-    /* restore user sp */
-    LOAD sp, 32 * REGBYTES(sp)
-.endm
-
-.macro RESTORE_SYS_GP
-    .option push
-    .option norelax
-        la gp, __global_pointer$
-    .option pop
-.endm
-
-.macro OPEN_INTERRUPT
-    csrsi sstatus, 2
-.endm
-
-.macro CLOSE_INTERRUPT
-    csrci sstatus, 2
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __STACKFRAME_H__ */

+ 0 - 133
libcpu/risc-v/virt64/startup_gcc.S

@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2006-2018, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/01     Bernard      The first version
- * 2018/12/27     Jesven       Add SMP support
- * 2020/6/12      Xim          Port to QEMU and remove SMP support
- * 2024-06-30     Shell        Support of kernel remapping
- */
-
-#include <encoding.h>
-#include <cpuport.h>
-
-    .data
-    .global boot_hartid    /* global varible rt_boot_hartid in .data section */
-boot_hartid:
-    .word 0xdeadbeef
-
-    .global         _start
-    .section ".start", "ax"
-_start:
-    j 1f
-    .word 0xdeadbeef
-    .align 3
-    .global g_wake_up
-    g_wake_up:
-        .dword 1
-        .dword 0
-1:
-    /* save hartid */
-    la t0, boot_hartid                /* global varible rt_boot_hartid */
-    mv t1, a0                         /* get hartid in S-mode frome a0 register */
-    sw t1, (t0)                       /* store t1 register low 4 bits in memory address which is stored in t0 */
-
-    /* clear Interrupt Registers */
-    csrw sie, 0
-    csrw sip, 0
-    /* set Trap Vector Base Address Register */
-    la t0, trap_entry
-    csrw stvec, t0
-
-    li x1, 0
-    li x2, 0
-    li x3, 0
-    li x4, 0
-    li x5, 0
-    li x6, 0
-    li x7, 0
-    li x8, 0
-    li x9, 0
-    li x10,0
-    li x11,0
-    li x12,0
-    li x13,0
-    li x14,0
-    li x15,0
-    li x16,0
-    li x17,0
-    li x18,0
-    li x19,0
-    li x20,0
-    li x21,0
-    li x22,0
-    li x23,0
-    li x24,0
-    li x25,0
-    li x26,0
-    li x27,0
-    li x28,0
-    li x29,0
-    li x30,0
-    li x31,0
-
-    /* set to disable FPU */
-    li t0, SSTATUS_FS + SSTATUS_VS
-    csrc sstatus, t0
-    li t0, SSTATUS_SUM
-    csrs sstatus, t0
-
-.option push
-.option norelax
-    la gp, __global_pointer$
-.option pop
-
-    /* removed SMP support here */
-    la   sp, __stack_start__
-    li   t0, __STACKSIZE__
-    add  sp, sp, t0
-
-    /**
-     * sscratch is always zero on kernel mode
-     */
-    csrw sscratch, zero
-    call init_bss
-#ifdef ARCH_MM_MMU
-    call    rt_hw_mem_setup_early
-    call    rt_kmem_pvoff
-    /* a0 := pvoff  */
-    beq     a0, zero, 1f
-
-    /* relocate pc */
-    la      x1, _after_pc_relocation
-    sub     x1, x1, a0
-    ret
-_after_pc_relocation:
-    /* relocate gp */
-    sub     gp, gp, a0
-
-    /* relocate context: sp */
-    la      sp, __stack_start__
-    li      t0, __STACKSIZE__
-    add     sp, sp, t0
-
-    /* reset s0-fp */
-    mv      s0, zero
-
-    /* relocate stvec */
-    la      t0, trap_entry
-    csrw    stvec, t0
-1:
-#endif
-    call    sbi_init
-    call    primary_cpu_entry
-
-_never_return_here:
-    j       .
-
-.global _start_link_addr
-_start_link_addr:
-    .dword __text_start

+ 0 - 62
libcpu/risc-v/virt64/syscall_c.c

@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2021-02-03     lizhirui     first version
- * 2022-11-10     WangXiaoyao  Add readable syscall tracing
- */
-
-#include <rthw.h>
-#include <rtthread.h>
-
-#ifdef RT_USING_SMART
-
-#define DBG_TAG "syscall"
-#define DBG_LVL DBG_WARNING
-#include <rtdbg.h>
-
-#include <stdint.h>
-#include <mmu.h>
-#include <page.h>
-#include <lwp_user_mm.h>
-
-#include "riscv_mmu.h"
-#include "stack.h"
-
-typedef rt_ubase_t (*syscallfunc_t)(rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t, rt_ubase_t);
-
-void syscall_handler(struct rt_hw_stack_frame *regs)
-{
-    const char *syscall_name;
-    int syscallid = regs->a7;
-
-    if (syscallid == 0)
-    {
-        LOG_E("syscall id = 0!\n");
-        while (1)
-            ;
-    }
-
-    syscallfunc_t syscallfunc = (syscallfunc_t)lwp_get_sys_api(syscallid);
-
-    if (syscallfunc == RT_NULL)
-    {
-        LOG_E("unsupported syscall!\n");
-        sys_exit_group(-1);
-    }
-
-#if DBG_LVL >= DBG_INFO
-    syscall_name = lwp_get_syscall_name(syscallid);
-#endif
-
-    LOG_I("[0x%lx] %s(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)", rt_thread_self(), syscall_name,
-        regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
-    regs->a0 = syscallfunc(regs->a0, regs->a1, regs->a2, regs->a3, regs->a4, regs->a5, regs->a6);
-    regs->a7 = 0;
-    regs->epc += 4; // skip ecall instruction
-    LOG_I("[0x%lx] %s ret: 0x%lx", rt_thread_self(), syscall_name, regs->a0);
-}
-#endif /* RT_USING_SMART */

+ 0 - 76
libcpu/risc-v/virt64/tick.c

@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2006-2021, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/28     Bernard      The unify RISC-V porting code.
- * 2024/07/08     Shell        Using CPUTIME as tick
- */
-
-#include <rthw.h>
-#include <rtthread.h>
-
-#include <drivers/cputime.h>
-#include <encoding.h>
-#include "sbi.h"
-
-#ifdef RT_USING_KTIME
-#include <ktime.h>
-#endif
-
-static volatile unsigned long tick_cycles = 0;
-
-int tick_isr(void)
-{
-    rt_tick_increase();
-    sbi_set_timer(clock_cpu_gettime() + tick_cycles);
-    return 0;
-}
-
-/* BSP should config clockbase frequency */
-RT_STATIC_ASSERT(defined_clockbase_freq, CPUTIME_TIMER_FREQ != 0);
-
-/* Sets and enable the timer interrupt */
-int rt_hw_tick_init(void)
-{
-    /* calculate the tick cycles */
-    tick_cycles = CPUTIME_TIMER_FREQ / RT_TICK_PER_SECOND;
-
-    /* Clear the Supervisor-Timer bit in SIE */
-    clear_csr(sie, SIP_STIP);
-
-    /* Init riscv timer */
-    riscv_cputime_init();
-
-    /* Set timer */
-    sbi_set_timer(clock_cpu_gettime() + tick_cycles);
-
-#ifdef RT_USING_KTIME
-    rt_ktime_cputimer_init();
-#endif
-    /* Enable the Supervisor-Timer bit in SIE */
-    set_csr(sie, SIP_STIP);
-
-    return 0;
-}
-
-/**
- * This function will delay for some us.
- *
- * @param us the delay time of us
- */
-void rt_hw_us_delay(rt_uint32_t us)
-{
-    unsigned long start_time;
-    unsigned long end_time;
-    unsigned long run_time;
-
-    start_time = clock_cpu_gettime();
-    end_time = start_time + us * (CPUTIME_TIMER_FREQ / 1000000);
-    do
-    {
-        run_time = clock_cpu_gettime();
-    } while(run_time < end_time);
-}

+ 0 - 17
libcpu/risc-v/virt64/tick.h

@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2018/10/28     Bernard      The unify RISC-V porting code.
- */
-
-#ifndef TICK_H__
-#define TICK_H__
-
-int tick_isr(void);
-int rt_hw_tick_init(void);
-
-#endif

+ 0 - 61
libcpu/risc-v/virt64/tlb.h

@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2006-2022, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2022-11-28     WangXiaoyao  the first version
- */
-#ifndef __TLB_H__
-#define __TLB_H__
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <rtthread.h>
-#include <mm_aspace.h>
-#include "sbi.h"
-#include "riscv_mmu.h"
-
-#define HANDLE_FAULT(ret)                                                      \
-    if (__builtin_expect((ret) != SBI_SUCCESS, 0))                             \
-        LOG_W("%s failed", __FUNCTION__);
-
-static inline void rt_hw_tlb_invalidate_all(void)
-{
-    uintptr_t mask = -1ul;
-    HANDLE_FAULT(sbi_remote_sfence_vma(&mask, -1ul, 0, mask));
-}
-
-static inline void rt_hw_tlb_invalidate_all_local(void)
-{
-    __asm__ volatile("sfence.vma" ::: "memory");
-}
-
-static inline void rt_hw_tlb_invalidate_aspace(rt_aspace_t aspace)
-{
-    // TODO ASID
-    rt_hw_tlb_invalidate_all_local();
-}
-
-static inline void rt_hw_tlb_invalidate_page(rt_aspace_t aspace, void *start)
-{
-    __asm__ volatile("sfence.vma %0, zero" ::"r"(start) : "memory");
-}
-
-static inline void rt_hw_tlb_invalidate_range(rt_aspace_t aspace, void *start,
-                                              size_t size, size_t stride)
-{
-    // huge page is taking as normal page
-    if (size <= ARCH_PAGE_SIZE)
-    {
-        rt_hw_tlb_invalidate_page(aspace, start);
-    }
-    else
-    {
-        rt_hw_tlb_invalidate_aspace(aspace);
-    }
-}
-
-#endif /* __TLB_H__ */

+ 0 - 386
libcpu/risc-v/virt64/trap.c

@@ -1,386 +0,0 @@
-/*
- * Copyright (c) 2006-2024, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2022-12-08     RT-Thread    first version
- */
-
-#include <rthw.h>
-#include <rtthread.h>
-#include <stdint.h>
-
-#include <mm_fault.h>
-#include <mmu.h>
-#include <encoding.h>
-#include <stack.h>
-#include <sbi.h>
-#include <riscv.h>
-#include <interrupt.h>
-#include <plic.h>
-#include <tick.h>
-
-#ifdef RT_USING_SMART
-#include <lwp_arch.h>
-#endif
-
-#define DBG_TAG "libcpu.trap"
-#define DBG_LVL DBG_INFO
-#include <rtdbg.h>
-
-void dump_regs(struct rt_hw_stack_frame *regs)
-{
-    rt_kprintf("--------------Dump Registers-----------------\n");
-
-    rt_kprintf("Function Registers:\n");
-    rt_kprintf("\tra(x1) = 0x%p\tuser_sp = 0x%p\n", regs->ra,
-               regs->user_sp_exc_stack);
-    rt_kprintf("\tgp(x3) = 0x%p\ttp(x4) = 0x%p\n", regs->gp, regs->tp);
-    rt_kprintf("Temporary Registers:\n");
-    rt_kprintf("\tt0(x5) = 0x%p\tt1(x6) = 0x%p\n", regs->t0, regs->t1);
-    rt_kprintf("\tt2(x7) = 0x%p\n", regs->t2);
-    rt_kprintf("\tt3(x28) = 0x%p\tt4(x29) = 0x%p\n", regs->t3, regs->t4);
-    rt_kprintf("\tt5(x30) = 0x%p\tt6(x31) = 0x%p\n", regs->t5, regs->t6);
-    rt_kprintf("Saved Registers:\n");
-    rt_kprintf("\ts0/fp(x8) = 0x%p\ts1(x9) = 0x%p\n", regs->s0_fp, regs->s1);
-    rt_kprintf("\ts2(x18) = 0x%p\ts3(x19) = 0x%p\n", regs->s2, regs->s3);
-    rt_kprintf("\ts4(x20) = 0x%p\ts5(x21) = 0x%p\n", regs->s4, regs->s5);
-    rt_kprintf("\ts6(x22) = 0x%p\ts7(x23) = 0x%p\n", regs->s6, regs->s7);
-    rt_kprintf("\ts8(x24) = 0x%p\ts9(x25) = 0x%p\n", regs->s8, regs->s9);
-    rt_kprintf("\ts10(x26) = 0x%p\ts11(x27) = 0x%p\n", regs->s10, regs->s11);
-    rt_kprintf("Function Arguments Registers:\n");
-    rt_kprintf("\ta0(x10) = 0x%p\ta1(x11) = 0x%p\n", regs->a0, regs->a1);
-    rt_kprintf("\ta2(x12) = 0x%p\ta3(x13) = 0x%p\n", regs->a2, regs->a3);
-    rt_kprintf("\ta4(x14) = 0x%p\ta5(x15) = 0x%p\n", regs->a4, regs->a5);
-    rt_kprintf("\ta6(x16) = 0x%p\ta7(x17) = 0x%p\n", regs->a6, regs->a7);
-    rt_kprintf("sstatus = 0x%p\n", regs->sstatus);
-    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SIE)
-                             ? "Supervisor Interrupt Enabled"
-                             : "Supervisor Interrupt Disabled");
-    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPIE)
-                             ? "Last Time Supervisor Interrupt Enabled"
-                             : "Last Time Supervisor Interrupt Disabled");
-    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SPP)
-                             ? "Last Privilege is Supervisor Mode"
-                             : "Last Privilege is User Mode");
-    rt_kprintf("\t%s\n", (regs->sstatus & SSTATUS_SUM)
-                             ? "Permit to Access User Page"
-                             : "Not Permit to Access User Page");
-    rt_kprintf("\t%s\n", (regs->sstatus & (1 << 19))
-                             ? "Permit to Read Executable-only Page"
-                             : "Not Permit to Read Executable-only Page");
-    rt_ubase_t satp_v = read_csr(satp);
-    rt_kprintf("satp = 0x%p\n", satp_v);
-    rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n",
-               __MASKVALUE(satp_v, __MASK(44)) << PAGE_OFFSET_BIT);
-    rt_kprintf("\tCurrent ASID = 0x%p\n", __MASKVALUE(satp_v >> 44, __MASK(16))
-                                              << PAGE_OFFSET_BIT);
-    const char *mode_str = "Unknown Address Translation/Protection Mode";
-
-    switch (__MASKVALUE(satp_v >> 60, __MASK(4)))
-    {
-        case 0:
-            mode_str = "No Address Translation/Protection Mode";
-            break;
-
-        case 8:
-            mode_str = "Page-based 39-bit Virtual Addressing Mode";
-            break;
-
-        case 9:
-            mode_str = "Page-based 48-bit Virtual Addressing Mode";
-            break;
-    }
-
-    rt_kprintf("\tMode = %s\n", mode_str);
-    rt_kprintf("-----------------Dump OK---------------------\n");
-}
-
-static const char *Exception_Name[] = {"Instruction Address Misaligned",
-                                       "Instruction Access Fault",
-                                       "Illegal Instruction",
-                                       "Breakpoint",
-                                       "Load Address Misaligned",
-                                       "Load Access Fault",
-                                       "Store/AMO Address Misaligned",
-                                       "Store/AMO Access Fault",
-                                       "Environment call from U-mode",
-                                       "Environment call from S-mode",
-                                       "Reserved-10",
-                                       "Reserved-11",
-                                       "Instruction Page Fault",
-                                       "Load Page Fault",
-                                       "Reserved-14",
-                                       "Store/AMO Page Fault"};
-
-static const char *Interrupt_Name[] = {
-    "User Software Interrupt",
-    "Supervisor Software Interrupt",
-    "Reversed-2",
-    "Reversed-3",
-    "User Timer Interrupt",
-    "Supervisor Timer Interrupt",
-    "Reversed-6",
-    "Reversed-7",
-    "User External Interrupt",
-    "Supervisor External Interrupt",
-    "Reserved-10",
-    "Reserved-11",
-};
-
-#ifndef RT_USING_SMP
-static volatile int nested = 0;
-#define ENTER_TRAP nested += 1
-#define EXIT_TRAP  nested -= 1
-#define CHECK_NESTED_PANIC(cause, tval, epc, eframe) \
-    if (nested != 1) handle_nested_trap_panic(cause, tval, epc, eframe)
-#endif /* RT_USING_SMP */
-
-static const char *get_exception_msg(int id)
-{
-    const char *msg;
-    if (id < sizeof(Exception_Name) / sizeof(const char *))
-    {
-        msg = Exception_Name[id];
-    }
-    else
-    {
-        msg = "Unknown Exception";
-    }
-    return msg;
-}
-
-#ifdef RT_USING_SMART
-#include "lwp.h"
-void handle_user(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
-                 struct rt_hw_stack_frame *sp)
-{
-    rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
-    struct rt_lwp *lwp;
-
-    /* user page fault */
-    enum rt_mm_fault_op fault_op;
-    enum rt_mm_fault_type fault_type;
-    switch (id)
-    {
-        case EP_LOAD_PAGE_FAULT:
-            fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
-            break;
-        case EP_LOAD_ACCESS_FAULT:
-            fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        case EP_LOAD_ADDRESS_MISALIGNED:
-            fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        case EP_STORE_PAGE_FAULT:
-            fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
-            break;
-        case EP_STORE_ACCESS_FAULT:
-            fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        case EP_STORE_ADDRESS_MISALIGNED:
-            fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        case EP_INSTRUCTION_PAGE_FAULT:
-            fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
-            break;
-        case EP_INSTRUCTION_ACCESS_FAULT:
-            fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        case EP_INSTRUCTION_ADDRESS_MISALIGNED:
-            fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_BUS_ERROR;
-            break;
-        default:
-            fault_op = 0;
-    }
-
-    if (fault_op)
-    {
-        rt_base_t saved_stat;
-        lwp = lwp_self();
-        struct rt_aspace_fault_msg msg = {
-            .fault_op = fault_op,
-            .fault_type = fault_type,
-            .fault_vaddr = (void *)stval,
-        };
-
-        __asm__ volatile("csrrsi %0, sstatus, 2" : "=r"(saved_stat));
-        if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
-        {
-            __asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
-            return;
-        }
-        __asm__ volatile("csrw sstatus, %0" ::"r"(saved_stat));
-    }
-    LOG_E("[FATAL ERROR] Exception %ld:%s\n", id, get_exception_msg(id));
-    LOG_E("scause:0x%p,stval:0x%p,sepc:0x%p\n", scause, stval, sepc);
-    dump_regs(sp);
-
-    rt_thread_t cur_thr = rt_thread_self();
-    struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
-    rt_kprintf("fp = %p\n", frame.fp);
-    lwp_backtrace_frame(cur_thr, &frame);
-
-    LOG_E("User Fault, killing thread: %s", cur_thr->parent.name);
-
-    EXIT_TRAP;
-    sys_exit_group(-1);
-}
-#endif
-
-#ifdef ARCH_RISCV_VECTOR
-static void vector_enable(struct rt_hw_stack_frame *sp)
-{
-    sp->sstatus |= SSTATUS_VS_INITIAL;
-}
-
-/**
- * detect V/D support, and do not distinguish V/D instruction
- */
-static int illegal_inst_recoverable(rt_ubase_t stval,
-                                    struct rt_hw_stack_frame *sp)
-{
-    // first 7 bits is opcode
-    int opcode = stval & 0x7f;
-    int csr = (stval & 0xFFF00000) >> 20;
-    // ref riscv-v-spec-1.0, [Vector Instruction Formats]
-    int width = ((stval & 0x7000) >> 12) - 1;
-    int flag = 0;
-
-    switch (opcode)
-    {
-        case 0x57: // V
-        case 0x27: // scalar FLOAT
-        case 0x07:
-        case 0x73: // CSR
-            flag = 1;
-            break;
-    }
-
-    if (flag)
-    {
-        vector_enable(sp);
-    }
-
-    return flag;
-}
-#endif
-
-static void handle_nested_trap_panic(rt_ubase_t cause, rt_ubase_t tval,
-                                     rt_ubase_t epc,
-                                     struct rt_hw_stack_frame *eframe)
-{
-    LOG_E("\n-------- [SEVER ERROR] --------");
-    LOG_E("Nested trap detected");
-    LOG_E("scause:0x%p,stval:0x%p,sepc:0x%p\n", cause, tval, epc);
-    dump_regs(eframe);
-    rt_hw_cpu_shutdown();
-}
-
-#define IN_USER_SPACE (stval >= USER_VADDR_START && stval < USER_VADDR_TOP)
-#define PAGE_FAULT    (id == EP_LOAD_PAGE_FAULT || id == EP_STORE_PAGE_FAULT)
-
-/* Trap entry */
-void handle_trap(rt_ubase_t scause, rt_ubase_t stval, rt_ubase_t sepc,
-                 struct rt_hw_stack_frame *sp)
-{
-    ENTER_TRAP;
-    rt_ubase_t id = __MASKVALUE(scause, __MASK(63UL));
-    const char *msg;
-
-    /* supervisor external interrupt */
-    if ((SCAUSE_INTERRUPT & scause) &&
-        SCAUSE_S_EXTERNAL_INTR == (scause & 0xff))
-    {
-        rt_interrupt_enter();
-        plic_handle_irq();
-        rt_interrupt_leave();
-    }
-    else if ((SCAUSE_INTERRUPT | SCAUSE_S_TIMER_INTR) == scause)
-    {
-        /* supervisor timer */
-        rt_interrupt_enter();
-        tick_isr();
-        rt_interrupt_leave();
-    }
-    else
-    {
-        if (SCAUSE_INTERRUPT & scause)
-        {
-            if (id < sizeof(Interrupt_Name) / sizeof(const char *))
-            {
-                msg = Interrupt_Name[id];
-            }
-            else
-            {
-                msg = "Unknown Interrupt";
-            }
-
-            LOG_E("Unhandled Interrupt %ld:%s\n", id, msg);
-        }
-        else
-        {
-#ifdef ARCH_RISCV_VECTOR
-            if (scause == 0x2)
-            {
-                if (!(sp->sstatus & SSTATUS_VS) &&
-                    illegal_inst_recoverable(stval, sp))
-                    goto _exit;
-            }
-#endif /* ARCH_RISCV_VECTOR */
-#ifdef RT_USING_SMART
-            if (!(sp->sstatus & 0x100) || (PAGE_FAULT && IN_USER_SPACE))
-            {
-                handle_user(scause, stval, sepc, sp);
-                // if handle_user() return here, jump to u mode then
-                goto _exit;
-            }
-#endif
-
-            // handle kernel exception:
-            rt_kprintf("Unhandled Exception %ld:%s\n", id,
-                       get_exception_msg(id));
-        }
-
-        // trap cannot nested when handling another trap / interrupt
-        CHECK_NESTED_PANIC(scause, stval, sepc, sp);
-
-        rt_kprintf("scause:0x%p,stval:0x%p,sepc:0x%p\n", scause, stval, sepc);
-        dump_regs(sp);
-
-        rt_thread_t cur_thr = rt_thread_self();
-        rt_kprintf("--------------Thread list--------------\n");
-        rt_kprintf("current thread: %s\n", cur_thr->parent.name);
-
-        rt_kprintf("--------------Backtrace--------------\n");
-        struct rt_hw_backtrace_frame frame = {.fp = sp->s0_fp, .pc = sepc};
-
-#ifdef RT_USING_SMART
-        if (!(sp->sstatus & 0x100))
-        {
-            lwp_backtrace_frame(cur_thr, &frame);
-        }
-        else
-#endif
-        {
-            rt_backtrace_frame(cur_thr, &frame);
-        }
-
-        while (1)
-            ;
-    }
-_exit:
-    EXIT_TRAP;
-    return;
-}