Просмотр исходного кода

[riscv] interrupt & exception handling rewrite

wangxiaoyao 3 лет назад
Родитель
Сommit
73f65c9fba

+ 11 - 34
components/lwp/arch/risc-v/rv64/lwp_gcc.S

@@ -24,6 +24,8 @@
 .global arch_start_umode
 .type arch_start_umode, % function
 arch_start_umode:
+    // load kstack for user process
+    csrw sscratch, a3
     li t0, SSTATUS_SPP | SSTATUS_SIE    // set as user mode, close interrupt
     csrc sstatus, t0 
     li t0, SSTATUS_SPIE // enable interrupt when return to user mode
@@ -56,6 +58,8 @@ arch_crt_start_umode:
     mv sp, a0//user_sp
     mv ra, a0//return address
     mv a0, s0//args
+
+    csrw sscratch, a3
     sret//enter user mode
 
 .global arch_ret_to_user
@@ -63,11 +67,13 @@ arch_ret_to_user:
     call lwp_signal_check
     beqz a0, ret_to_user_exit
     RESTORE_ALL
-    //now sp is user sp
+    csrw sscratch, zero
+    // now sp is user sp
     J user_do_signal
 
 ret_to_user_exit:
     RESTORE_ALL
+    csrw sscratch, zero
     sret
 
 /*#ifdef RT_USING_LWP
@@ -132,6 +138,7 @@ arch_signal_quit:
     //a0 is user_ctx
     mv sp, a0
     RESTORE_ALL
+    csrw sscratch, zero
     sret
 
 user_do_signal:
@@ -234,36 +241,6 @@ arch_clone_exit:
 
 .global syscall_entry
 syscall_entry:
-    //swap to thread kernel stack
-    csrr t0, sstatus
-    andi t0, t0, 0x100
-    beqz t0, __restore_sp_from_tcb
-
-__restore_sp_from_sscratch:
-    csrr t0, sscratch
-    j __move_stack_context
-
-__restore_sp_from_tcb:
-    la a0, rt_current_thread
-    LOAD a0, 0(a0)
-    jal get_thread_kernel_stack_top
-    mv t0, a0
-
-__move_stack_context:
-    mv t1, sp//src
-    mv sp, t0//switch stack
-    addi sp, sp, -CTX_REG_NR * REGBYTES
-    //copy context
-    li s0, CTX_REG_NR//cnt
-    mv t2, sp//dst
-
-copy_context_loop:
-    LOAD t0, 0(t1)
-    STORE t0, 0(t2)
-    addi s0, s0, -1
-    addi t1, t1, 8
-    addi t2, t2, 8
-    bnez s0, copy_context_loop
 
     LOAD s0, 7 * REGBYTES(sp)
     addi s0, s0, -0xfe
@@ -285,13 +262,12 @@ arch_syscall_exit:
     CLOSE_INTERRUPT
 
     #if defined(RT_USING_USERSPACE)
-        LOAD s0, 2 * REGBYTES(sp)
+        LOAD s0, FRAME_OFF_SSTATUS(sp)
         andi s0, s0, 0x100
         bnez s0, dont_ret_to_user
-        li s0, 0
         j arch_ret_to_user
-        dont_ret_to_user:
     #endif
+dont_ret_to_user:
 
 #ifdef RT_USING_USERSPACE
     /* restore setting when syscall exit */
@@ -304,5 +280,6 @@ arch_syscall_exit:
 
     //restore context
     RESTORE_ALL
+    csrw sscratch, zero
     sret
 

+ 2 - 0
libcpu/risc-v/virt64/context_gcc.S

@@ -8,6 +8,8 @@
  * 2018/10/28     Bernard      The unify RISC-V porting implementation
  * 2018/12/27     Jesven       Add SMP support
  * 2021/02/02     lizhirui     Add userspace support
+ * 2022/10/22     WangXiaoyao  Support User mode RVV;
+ *                             Trimming process switch context
  */
 
 #include "cpuport.h"

+ 62 - 76
libcpu/risc-v/virt64/interrupt_gcc.S

@@ -9,105 +9,91 @@
  * 2018/12/27     Jesven       Add SMP schedule
  * 2021/02/02     lizhirui     Add userspace support
  * 2021/12/24     JasonHu      Add user setting save/restore
+ * 2022/10/22     WangXiaoyao  Support kernel mode RVV;
+ *                             Rewrite trap handling routine
  */
 
 #include "cpuport.h"
 #include "encoding.h"
 #include "stackframe.h"
 
-  .section      .text.entry
-  .align 2
-  .global trap_entry
-  .extern __stack_cpu0
-  .extern get_current_thread_kernel_stack_top
+#define ARCH_CHECK_SP
+
+    .align 2
+    .global trap_entry
+    .global debug_check_sp
 trap_entry:
-    //backup sp
-    csrrw sp, sscratch, sp
-    //load interrupt stack
-    la sp, __stack_cpu0
-    //backup context
+    // distingush exception from kernel or user
+    csrrw   sp, sscratch, sp
+    bnez    sp, _from_user
+
+    // BE REALLY careful with sscratch,
+    // if it's wrong, we could looping here forever
+    // or accessing random memory and seeing things totally
+    // messy after a long time and don't even know why
+_from_kernel:
+    csrr    sp, sscratch
+    j _save_context
+_from_user:
+    nop
+_save_context:
     SAVE_ALL
+    // clear sscratch to say 'now in kernel mode'
+    csrw    sscratch, zero
 
     RESTORE_SYS_GP
 
-    //check syscall
-    csrr t0, scause
-    li t1, 8//environment call from u-mode
-    beq t0, t1, syscall_entry
-
-    csrr a0, scause
-    csrrc a1, stval, zero
-    csrr  a2, sepc
-    mv    a3, sp
-
-    /* scause, stval, sepc, sp */
-    call  handle_trap
-
-    /* need to switch new thread */
-    la    s0, rt_thread_switch_interrupt_flag
-    lw    s2, 0(s0)
-    beqz  s2, spurious_interrupt
-    sw    zero, 0(s0)
-
-rt_hw_context_switch_interrupt_do:
-
-//swap to thread kernel stack
-    csrr t0, sstatus
-    andi t0, t0, 0x100
-    beqz t0, __restore_sp_from_tcb_interrupt
-
-__restore_sp_from_sscratch_interrupt:
-    csrr t0, sscratch
-    j __move_stack_context_interrupt
-
-__restore_sp_from_tcb_interrupt:
-    la    s0, rt_interrupt_from_thread
-    LOAD  a0, 0(s0)
-    jal rt_thread_sp_to_thread
-    jal get_thread_kernel_stack_top
-    mv t0, a0
-
-__move_stack_context_interrupt:
-    mv t1, sp//src
-    mv sp, t0//switch stack
-    addi sp, sp, -CTX_REG_NR * REGBYTES
-    //copy context
-    li s0, CTX_REG_NR//cnt
-    mv t2, sp//dst
-
-copy_context_loop_interrupt:
-    LOAD t0, 0(t1)
-    STORE t0, 0(t2)
-    addi s0, s0, -1
-    addi t1, t1, 8
-    addi t2, t2, 8
-    bnez s0, copy_context_loop_interrupt
-
-do_ctx_switch:
-    la    t0, rt_interrupt_from_thread
-    LOAD  a0, 0(t0)
-
-    la    t0, rt_interrupt_to_thread
-    LOAD  a1, 0(t0)
-
+#ifdef ARCH_CHECK_SP
+    mv a0, sp
+    li a1, 1
+    call debug_check_sp
+#endif
+
+    // now we are ready to enter interrupt / excepiton handler
+_distinguish_syscall:
+    csrr    t0, scause
+    // TODO swap 8 with config macro name
+    li      t1, 8
+    beq     t0, t1, syscall_entry
+    // syscall never return here
+
+_handle_interrupt_and_exception:
+    mv      a0, t0
+    csrrc   a1, stval, zero
+    csrr    a2, sepc
+    // sp as exception frame pointer
+    mv      a3, sp
+    call    handle_trap
+
+_interrupt_exit:
+    la      s0, rt_thread_switch_interrupt_flag
+    lw      s2, 0(s0)
+    beqz    s2, _resume_execution
+    sw      zero, 0(s0)
+
+_context_switch:
+    la      t0, rt_interrupt_from_thread
+    LOAD    a0, 0(t0)
+    la      t0, rt_interrupt_to_thread
+    LOAD    a1, 0(t0)
     jal     rt_hw_context_switch
 
-spurious_interrupt:
-    LOAD t0, 2 * REGBYTES(sp)
-    andi t0, t0, 0x100
-    beqz t0, arch_ret_to_user
+_resume_execution:
+    LOAD    t0, FRAME_OFF_SSTATUS(sp)
+    andi    t0, t0, SSTATUS_SPP
+    beqz    t0, arch_ret_to_user
 
+_resume_kernel:
     RESTORE_ALL
+    csrw    sscratch, zero
     sret
 
 .global rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-    fence.i
     csrs sstatus, a0    /* restore to old csr */
     jr ra
 
 .global rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
     csrrci a0, sstatus, 2   /* clear SIE */
-    fence.i
     jr ra

+ 20 - 13
libcpu/risc-v/virt64/stackframe.h

@@ -8,6 +8,7 @@
  * 2021-02-02     lizhirui     first version
  * 2021-02-11     lizhirui     fixed gp save/store bug
  * 2021-11-18     JasonHu      add fpu registers save/restore
+ * 2022/10/22     WangXiaoyao  Support kernel mode RVV;
  */
 
 #ifndef __STACKFRAME_H__
@@ -17,13 +18,10 @@
 #include "encoding.h"
 #include "ext_context.h"
 
-/**
- * The register `tp` always save/restore when context switch,
- * we call `lwp_user_setting_save` when syscall enter,
- * call `lwp_user_setting_restore` when syscall exit 
- * and modify context stack after `lwp_user_setting_restore` called
- * so that the `tp` can be the correct thread area value.
- */
+#define BYTES(idx) ((idx) * REGBYTES)
+#define FRAME_OFF_SSTATUS BYTES(2)
+
+#ifdef __ASSEMBLY__
 
 .macro SAVE_ALL
 
@@ -41,10 +39,10 @@
     STORE x1,   1 * REGBYTES(sp)
 
     csrr  x1, sstatus
-    STORE x1,   2 * REGBYTES(sp)
+    STORE x1, FRAME_OFF_SSTATUS(sp)
 
     csrr  x1, sepc
-    STORE x1, 0 * REGBYTES(sp)
+    STORE x1,   0 * REGBYTES(sp)
 
     STORE x3,   3 * REGBYTES(sp)
     STORE x4,   4 * REGBYTES(sp) /* save tp */
@@ -140,12 +138,17 @@
 #endif /* ENABLE_VECTOR */
 .endm
 
+/**
+ * @brief Restore All General Registers, for interrupt handling
+ * 
+ */
 .macro RESTORE_ALL
 
 #ifdef ENABLE_VECTOR
     // skip on close
-    csrr    t0, sstatus
-    andi    t0, t0, SSTATUS_VS
+    ld      t0, 2 * REGBYTES(sp)
+    // cannot use vector on initial
+    andi    t0, t0, SSTATUS_VS_CLEAN
     beqz    t0, 0f
 
     /* push vector frame */
@@ -204,9 +207,11 @@
 #endif /* ENABLE_FPU */
 
     /* restore general register */
+    addi t0, sp, CTX_REG_NR * REGBYTES
+    csrw sscratch, t0
 
     /* resw ra to sepc */
-    LOAD x1,   0 * REGBYTES(sp)
+    LOAD x1, 0 * REGBYTES(sp)
     csrw sepc, x1
 
     LOAD x1,   2 * REGBYTES(sp)
@@ -263,4 +268,6 @@
     csrci sstatus, 2
 .endm
 
-#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* __STACKFRAME_H__ */

+ 5 - 1
libcpu/risc-v/virt64/startup_gcc.S

@@ -86,5 +86,9 @@ _start:
   la   sp, __stack_start__
   li   t0, __STACKSIZE__
   add  sp, sp, t0
-  csrw sscratch, sp
+
+  /**
+   * sscratch is always zero on kernel mode
+   */
+  csrw sscratch, zero
   j primary_cpu_entry

+ 35 - 4
libcpu/risc-v/virt64/trap.c

@@ -177,7 +177,7 @@ static void vector_enable(struct rt_hw_stack_frame *sp)
     sp->sstatus |= SSTATUS_VS_INITIAL;
 }
 
-/** 
+/**
  * detect V/D support, and do not distinguish V/D instruction
  */
 static int illegal_inst_recoverable(rt_ubase_t stval, struct rt_hw_stack_frame *sp)
@@ -207,9 +207,34 @@ static int illegal_inst_recoverable(rt_ubase_t stval, struct rt_hw_stack_frame *
     return flag;
 }
 
+static void handle_nested_trap_panic(
+    rt_size_t cause,
+    rt_size_t tval,
+    rt_size_t epc,
+    struct rt_hw_stack_frame *eframe)
+{
+    LOG_E("\n-------- [SEVER ERROR] --------");
+    LOG_E("Nested trap detected");
+    LOG_E("scause:0x%p,stval:0x%p,sepc:0x%p\n", cause, tval, epc);
+    dump_regs(eframe);
+    rt_hw_cpu_shutdown();
+}
+
+#ifndef RT_USING_SMP
+static volatile int nested = 0;
+#define ENTER_TRAP \
+    nested += 1
+#define EXIT_TRAP \
+    nested -= 1
+#define CHECK_NESTED_PANIC(cause, tval, epc, eframe) \
+    if (nested != 1)                                 \
+    handle_nested_trap_panic(cause, tval, epc, eframe)
+#endif /* RT_USING_SMP */
+
 /* Trap entry */
 void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw_stack_frame *sp)
 {
+    ENTER_TRAP;
     rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
     const char *msg;
 
@@ -230,6 +255,8 @@ void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
     }
     else
     {
+        // trap cannot nested when handling another trap / interrupt
+        CHECK_NESTED_PANIC(scause, stval, sepc, sp);
         rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
         const char *msg;
 
@@ -248,16 +275,18 @@ void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
         }
         else
         {
+#ifdef ENABLE_VECTOR
             if (scause == 0x2)
             {
                 if (!(sp->sstatus & SSTATUS_VS) && illegal_inst_recoverable(stval, sp))
-                    return;
+                    goto _exit;
             }
+#endif /* ENABLE_VECTOR */
             if (!(sp->sstatus & 0x100))
             {
                 handle_user(scause, stval, sepc, sp);
                 // if handle_user() return here, jump to u mode then
-                return;
+                goto _exit;
             }
 
             // handle kernel exception:
@@ -268,7 +297,6 @@ void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
         dump_regs(sp);
         rt_kprintf("--------------Thread list--------------\n");
         rt_kprintf("current thread: %s\n", rt_thread_self()->name);
-        list_process();
 
         extern struct rt_thread *rt_current_thread;
         rt_kprintf("--------------Backtrace--------------\n");
@@ -277,4 +305,7 @@ void handle_trap(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
         while (1)
             ;
     }
+_exit:
+    EXIT_TRAP;
+    return ;
 }