Browse Source

[API] renaming APIs under lwp/arch;
[libcpu/risc-v] moving lwp codes to lwp/arch

wangxiaoyao 3 years ago
parent
commit
0540eb642b

+ 1 - 1
components/lwp/arch/aarch64/common/reloc.c

@@ -23,6 +23,6 @@ typedef struct
     Elf_Half st_shndx;
 } Elf_sym;
 
-void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym)
+void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym)
 {
 }

+ 1 - 6
components/lwp/arch/aarch64/cortex-a/lwp_arch.h

@@ -12,6 +12,7 @@
 #define  LWP_ARCH_H__
 
 #include <lwp.h>
+#include <lwp_arch_comm.h>
 
 #ifdef RT_USING_USERSPACE
 
@@ -28,12 +29,6 @@
 extern "C" {
 #endif
 
-int arch_user_space_init(struct rt_lwp *lwp);
-void arch_user_space_vtable_free(struct rt_lwp *lwp);
-void *arch_kernel_mmu_table_get(void);
-void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
-int arch_expand_user_stack(void *addr);
-
 unsigned long rt_hw_ffz(unsigned long x);
 
 rt_inline void icache_invalid_all(void)

+ 35 - 41
components/lwp/arch/aarch64/cortex-a/lwp_gcc.S

@@ -98,11 +98,11 @@
 .text
 
 /*
- * void lwp_user_entry(args, text, ustack, kstack);
+ * void arch_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_entry
-.type lwp_user_entry, % function
-lwp_user_entry:
+.global arch_start_umode
+.type arch_start_umode, % function
+arch_start_umode:
     mov sp, x3
     mov x4, #(SPSR_Mode(0) | SPSR_A64)
     mov x3, x2 ;/* user stack top */
@@ -114,11 +114,11 @@ lwp_user_entry:
     eret
 
 /*
- * void lwp_user_thread_entry(args, text, ustack, kstack);
+ * void arch_crt_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_thread_entry
-.type lwp_user_thread_entry, % function
-lwp_user_thread_entry:
+.global arch_crt_start_umode
+.type arch_crt_start_umode, % function
+arch_crt_start_umode:
     sub x4, x2, #0x10
     adr x2, lwp_thread_return
     ldr x5, [x2]
@@ -148,10 +148,10 @@ lwp_user_thread_entry:
     eret
 
 /*
-void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
+void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
 */
-.global lwp_set_thread_context
-lwp_set_thread_context:
+.global arch_set_thread_context
+arch_set_thread_context:
     sub x1, x1, #CONTEXT_SIZE
     str x2, [x1, #CONTEXT_OFFSET_SP_EL0]
     sub x1, x1, #CONTEXT_SIZE
@@ -162,26 +162,16 @@ lwp_set_thread_context:
     str x1, [x3]
     ret
 
-.global lwp_get_user_sp
-lwp_get_user_sp:
+.global arch_get_user_sp
+arch_get_user_sp:
     mrs x0, sp_el0
     ret
 
-.global sys_fork
-.global sys_vfork
-.global sys_fork_exit
-sys_fork:
-sys_vfork:
-    bl _sys_fork
-sys_fork_exit:
-    b  svc_exit
-
-.global sys_clone
-.global sys_clone_exit
-sys_clone:
-    bl _sys_clone
-sys_clone_exit:
-    b  svc_exit
+.global arch_fork_exit
+.global arch_clone_exit
+arch_fork_exit:
+arch_clone_exit:
+    b  arch_syscall_exit
 
 /*
 void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
@@ -213,7 +203,7 @@ SVC_Handler:
     ldp x8, x9, [sp, #(CONTEXT_OFFSET_X8)]
     and x0, x8, #0xf000
     cmp x0, #0xe000
-    beq lwp_signal_quit
+    beq arch_signal_quit
 
     cmp x0, #0xf000
     beq ret_from_user
@@ -222,13 +212,17 @@ SVC_Handler:
     bl lwp_get_sys_api
     cmp x0, xzr
     mov x30, x0
-    beq svc_exit
+    beq arch_syscall_exit
     ldp x0, x1, [sp, #(CONTEXT_OFFSET_X0)]
     ldp x2, x3, [sp, #(CONTEXT_OFFSET_X2)]
     ldp x4, x5, [sp, #(CONTEXT_OFFSET_X4)]
     ldp x6, x7, [sp, #(CONTEXT_OFFSET_X6)]
     blr x30
-svc_exit:
+    /* jump explictly, make this code position independant */
+    b arch_syscall_exit
+
+.global arch_syscall_exit
+arch_syscall_exit:
     msr daifset, #3
 
     ldp x2, x3, [sp], #0x10  /* SPSR and ELR. */
@@ -254,8 +248,8 @@ svc_exit:
     add sp, sp, #0x40
     RESTORE_FPU sp
 
-.global ret_to_user
-ret_to_user:
+.global arch_ret_to_user
+arch_ret_to_user:
     SAVE_FPU sp
     stp x0, x1, [sp, #-0x10]!
     stp x2, x3, [sp, #-0x10]!
@@ -475,7 +469,7 @@ lwp_check_debug_quit:
     ldp x29, x30, [sp], #0x10
     ret
 
-lwp_signal_quit:
+arch_signal_quit:
     msr daifset, #3
 /*
     drop stack data
@@ -516,7 +510,7 @@ lwp_signal_quit:
 
     msr spsel, #1
 
-    b ret_to_user
+    b arch_ret_to_user
 
 user_do_signal:
     msr spsel, #0
@@ -586,14 +580,14 @@ lwp_thread_return:
     mov x8, #0x01
     svc #0
 
-.globl rt_cpu_get_thread_idr
-rt_cpu_get_thread_idr:
+.globl arch_get_tidr
+arch_get_tidr:
     mrs x0, tpidr_el0
     ret
 
-.global lwp_set_thread_area
-lwp_set_thread_area:
-.globl rt_cpu_set_thread_idr
-rt_cpu_set_thread_idr:
+.global arch_set_thread_area
+arch_set_thread_area:
+.globl arch_set_tidr
+arch_set_tidr:
     msr tpidr_el0, x0
     ret

+ 2 - 2
components/lwp/arch/arm/common/reloc.c

@@ -18,7 +18,7 @@ typedef struct
 } Elf32_sym;
 
 #ifdef RT_USING_USERSPACE
-void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
+void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
     size_t rel_off;
     void* addr;
@@ -77,7 +77,7 @@ void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, s
 }
 #else
 
-void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
+void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
     size_t rel_off;
 

+ 1 - 6
components/lwp/arch/arm/cortex-a/lwp_arch.h

@@ -11,6 +11,7 @@
 #define  LWP_ARCH_H__
 
 #include <lwp.h>
+#include <lwp_arch_comm.h>
 
 #ifdef RT_USING_USERSPACE
 
@@ -27,12 +28,6 @@
 extern "C" {
 #endif
 
-int arch_user_space_init(struct rt_lwp *lwp);
-void arch_user_space_vtable_free(struct rt_lwp *lwp);
-void *arch_kernel_mmu_table_get(void);
-void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
-int arch_expand_user_stack(void *addr);
-
 rt_inline unsigned long rt_hw_ffz(unsigned long x)
 {
     return __builtin_ffs(~x) - 1;

+ 34 - 33
components/lwp/arch/arm/cortex-a/lwp_gcc.S

@@ -29,11 +29,11 @@
 .text
 
 /*
- * void lwp_user_entry(args, text, ustack, kstack);
+ * void arch_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_entry
-.type lwp_user_entry, % function
-lwp_user_entry:
+.global arch_start_umode
+.type arch_start_umode, % function
+arch_start_umode:
     mrs     r9, cpsr
     bic     r9, #0x1f
     orr     r9, #Mode_USR
@@ -46,11 +46,11 @@ lwp_user_entry:
     movs    pc, r1
 
 /*
- * void lwp_user_thread_entry(args, text, ustack, kstack);
+ * void arch_crt_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_thread_entry
-.type lwp_user_thread_entry, % function
-lwp_user_thread_entry:
+.global arch_crt_start_umode
+.type arch_crt_start_umode, % function
+arch_crt_start_umode:
     cps #Mode_SYS
     sub sp, r2, #16
     ldr r2, =lwp_thread_return
@@ -87,10 +87,10 @@ lwp_user_thread_entry:
     movs    pc, r1
 
 /*
-void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
+void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
 */
-.global lwp_set_thread_context
-lwp_set_thread_context:
+.global arch_set_thread_context
+arch_set_thread_context:
     sub r1, #(10 * 4 + 4 * 4) /* {r4 - r12, lr} , {r4, r5, spsr, u_pc} */
     stmfd r1!, {r0}
     mov r12, #0
@@ -109,8 +109,8 @@ lwp_set_thread_context:
     str r1, [r3]
     mov pc, lr
 
-.global lwp_get_user_sp
-lwp_get_user_sp:
+.global arch_get_user_sp
+arch_get_user_sp:
     cps #Mode_SYS
     mov r0, sp
     cps #Mode_SVC
@@ -118,23 +118,23 @@ lwp_get_user_sp:
 
 .global sys_fork
 .global sys_vfork
-.global sys_fork_exit
+.global arch_fork_exit
 sys_fork:
 sys_vfork:
     push {r4 - r12, lr}
     bl _sys_fork
-sys_fork_exit:
+arch_fork_exit:
     pop {r4 - r12, lr}
-    b svc_exit
+    b arch_syscall_exit
 
 .global sys_clone
-.global sys_clone_exit
+.global arch_clone_exit
 sys_clone:
     push {r4 - r12, lr}
     bl _sys_clone
-sys_clone_exit:
+arch_clone_exit:
     pop {r4 - r12, lr}
-    b svc_exit
+    b arch_syscall_exit
 /*
 void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
 */
@@ -146,7 +146,7 @@ lwp_exec_user:
     mov r2, #Mode_USR
     msr spsr_cxsf, r2
     ldr r3, =0x80000000
-    b ret_to_user
+    b arch_ret_to_user
 
 /*
  * void SVC_Handler(void);
@@ -167,7 +167,7 @@ vector_swi:
 
     and r0, r7, #0xf000
     cmp r0, #0xe000
-    beq lwp_signal_quit
+    beq arch_signal_quit
 
     cmp r0, #0xf000
     beq ret_from_user
@@ -177,17 +177,18 @@ vector_swi:
     mov lr, r0
 
     pop {r0 - r3, r12}
-    beq svc_exit
+    beq arch_syscall_exit
     blx lr
 
-svc_exit:
+.global arch_syscall_exit
+arch_syscall_exit:
     cpsid i
     pop {r4, r5, lr}
     msr spsr_cxsf, lr
     pop {lr}
 
-.global ret_to_user
-ret_to_user:
+.global arch_ret_to_user
+arch_ret_to_user:
     push {r0-r3, r12, lr}
     bl lwp_check_debug
     bl lwp_check_exit_request
@@ -268,7 +269,7 @@ ret_from_user:
 lwp_check_debug_quit:
     pop {pc}
 
-lwp_signal_quit:
+arch_signal_quit:
     cpsid i
     pop {r0 - r3, r12}
     pop {r4, r5, lr}
@@ -284,7 +285,7 @@ lwp_signal_quit:
     mov sp, r1
     pop {r0-r12, lr}
     cps #Mode_SVC
-    b ret_to_user
+    b arch_ret_to_user
 
 user_do_signal:
     mov r0, r0
@@ -359,15 +360,15 @@ get_vfp:
 #endif
     mov pc, lr
 
-.globl rt_cpu_get_thread_idr
-rt_cpu_get_thread_idr:
+.globl arch_get_tidr
+arch_get_tidr:
     mrc p15, 0, r0, c13, c0, 3
     bx lr
 
-.global lwp_set_thread_area
-lwp_set_thread_area:
-.globl rt_cpu_set_thread_idr
-rt_cpu_set_thread_idr:
+.global arch_set_thread_area
+arch_set_thread_area:
+.globl arch_set_tidr
+arch_set_tidr:
     mcr p15, 0, r0, c13, c0, 3
     bx lr
 

+ 15 - 6
components/lwp/arch/risc-v/rv64/lwp_arch.c

@@ -68,7 +68,7 @@ void *lwp_copy_return_code_to_user_stack()
     return RT_NULL;
 }
 
-rt_mmu_info* arch_kernel_get_mmu_info(void)
+rt_mmu_info *arch_kernel_get_mmu_info(void)
 {
     extern rt_mmu_info *mmu_info;
 
@@ -98,7 +98,7 @@ void *get_thread_kernel_stack_top(rt_thread_t thread)
     return (void *)(((rt_size_t)thread->stack_addr) + ((rt_size_t)thread->stack_size));
 }
 
-void *lwp_get_user_sp(void)
+void *arch_get_user_sp(void)
 {
     /* user sp saved in interrupt context */
     rt_thread_t self = rt_thread_self();
@@ -161,9 +161,10 @@ long sys_vfork(void)
 /**
  * set exec context for fork/clone.
  */
-void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
+int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
+                            void *user_stack, void **thread_sp)
 {
-    RT_ASSERT(exit_addr != RT_NULL);
+    RT_ASSERT(exit != RT_NULL);
     RT_ASSERT(user_stack != RT_NULL);
     RT_ASSERT(new_thread_stack != RT_NULL);
     RT_ASSERT(thread_sp != RT_NULL);
@@ -206,7 +207,7 @@ void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_
     }
 
     /* set pc for thread */
-    thread_frame->epc     = (rt_ubase_t)exit_addr;
+    thread_frame->epc     = (rt_ubase_t)exit;
 
     /* set old exception mode as supervisor, because in kernel */
     thread_frame->sstatus = read_csr(sstatus) | SSTATUS_SPP;
@@ -232,10 +233,18 @@ void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_
      * | temp thread stack      |           ^
      * |                        |           |
      * | @sp                    | ---------/
-     * | @epc                   | --> `exit_addr` (sys_clone_exit/sys_fork_exit)
+     * | @epc                   | --> `exit` (arch_clone_exit/arch_fork_exit)
      * |                        |
      * +------------------------+ --> thread sp
      */
 }
 
+/**
+ * void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
+ */
+void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
+{
+    arch_start_umode(args, user_entry, (void*)USER_STACK_VEND, kernel_stack);
+}
+
 #endif

+ 1 - 6
components/lwp/arch/risc-v/rv64/lwp_arch.h

@@ -11,6 +11,7 @@
 #define  LWP_ARCH_H__
 
 #include <lwp.h>
+#include <lwp_arch_comm.h>
 #include <riscv_mmu.h>
 
 #ifdef RT_USING_USERSPACE
@@ -48,12 +49,6 @@
 extern "C" {
 #endif
 
-int arch_user_space_init(struct rt_lwp *lwp);
-void arch_user_space_vtable_free(struct rt_lwp *lwp);
-void *arch_kernel_mmu_table_get(void);
-void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
-int arch_expand_user_stack(void *addr);
-
 rt_mmu_info* arch_kernel_get_mmu_info(void);
 
 rt_inline unsigned long rt_hw_ffz(unsigned long x)

+ 96 - 28
components/lwp/arch/risc-v/rv64/lwp_gcc.S

@@ -20,11 +20,11 @@
 .section      .text.lwp
 
 /*
- * void lwp_user_entry(args, text, ustack, kstack);
+ * void arch_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_entry
-.type lwp_user_entry, % function
-lwp_user_entry:
+.global arch_start_umode
+.type arch_start_umode, % function
+arch_start_umode:
     li t0, SSTATUS_SPP | SSTATUS_SIE    // set as user mode, close interrupt
     csrc sstatus, t0 
     li t0, SSTATUS_SPIE // enable interrupt when return to user mode
@@ -35,11 +35,11 @@ lwp_user_entry:
     sret//enter user mode
 
 /*
- * void lwp_user_thread_entry(args, text, ustack, kstack);
+ * void arch_crt_start_umode(args, text, ustack, kstack);
  */
-.global lwp_user_thread_entry
-.type lwp_user_thread_entry, % function
-lwp_user_thread_entry:
+.global arch_crt_start_umode
+.type arch_crt_start_umode, % function
+arch_crt_start_umode:
     li t0, SSTATUS_SPP | SSTATUS_SIE    // set as user mode, close interrupt
     csrc sstatus, t0
     li t0, SSTATUS_SPIE // enable interrupt when return to user mode
@@ -59,8 +59,8 @@ lwp_user_thread_entry:
     mv a0, s0//args
     sret//enter user mode
 
-.global ret_to_user
-ret_to_user:
+.global arch_ret_to_user
+arch_ret_to_user:
     call lwp_signal_check
     beqz a0, ret_to_user_exit
     RESTORE_ALL
@@ -127,14 +127,13 @@ lwp_check_debug_quit:
 //#endif
 */
 
-.global lwp_signal_quit
-lwp_signal_quit:
+.global arch_signal_quit
+arch_signal_quit:
     call lwp_signal_restore
     //a0 is user_ctx
     mv sp, a0
     RESTORE_ALL
     sret
-    
 
 user_do_signal:
     //now sp is user sp
@@ -216,26 +215,95 @@ get_vfp:
     li a0, 0
     ret
 
-.globl rt_cpu_get_thread_idr
-rt_cpu_get_thread_idr:
+.globl arch_get_tidr
+arch_get_tidr:
     mv a0, tp 
     ret
 
-.global lwp_set_thread_area
-lwp_set_thread_area:
-.globl rt_cpu_set_thread_idr
-rt_cpu_set_thread_idr:
+.global arch_set_thread_area
+arch_set_thread_area:
+.globl arch_set_tidr
+arch_set_tidr:
     mv tp, a0
     ret
 
-.global sys_fork_exit
-sys_fork_exit:
-    j syscall_exit
+.global arch_clone_exit
+.global arch_fork_exit
+arch_fork_exit:
+arch_clone_exit:
+    j arch_syscall_exit
+
+.global syscall_entry
+syscall_entry:
+    //swap to thread kernel stack
+    csrr t0, sstatus
+    andi t0, t0, 0x100
+    beqz t0, __restore_sp_from_tcb
+
+__restore_sp_from_sscratch:
+    csrr t0, sscratch
+    j __move_stack_context
+
+__restore_sp_from_tcb:
+    la a0, rt_current_thread
+    LOAD a0, 0(a0)
+    jal get_thread_kernel_stack_top
+    mv t0, a0
+
+__move_stack_context:
+    mv t1, sp//src
+    mv sp, t0//switch stack
+    addi sp, sp, -CTX_REG_NR * REGBYTES
+    //copy context
+    li s0, CTX_REG_NR//cnt
+    mv t2, sp//dst
+
+copy_context_loop:
+    LOAD t0, 0(t1)
+    STORE t0, 0(t2)
+    addi s0, s0, -1
+    addi t1, t1, 8
+    addi t2, t2, 8
+    bnez s0, copy_context_loop
+
+    LOAD s0, 7 * REGBYTES(sp)
+    addi s0, s0, -0xfe
+    beqz s0, arch_signal_quit
+
+#ifdef RT_USING_USERSPACE
+    /* save setting when syscall enter */
+    call  rt_thread_self
+    call  lwp_user_setting_save
+#endif
+
+    mv a0, sp
+    OPEN_INTERRUPT
+    call syscall_handler
+    j arch_syscall_exit
     
-.global sys_clone_exit
-sys_clone_exit:
-    j syscall_exit
+.global arch_syscall_exit
+arch_syscall_exit:
+    CLOSE_INTERRUPT
+
+    #if defined(RT_USING_USERSPACE)
+        LOAD s0, 2 * REGBYTES(sp)
+        andi s0, s0, 0x100
+        bnez s0, dont_ret_to_user
+        li s0, 0
+        j arch_ret_to_user
+        dont_ret_to_user:
+    #endif
+
+#ifdef RT_USING_USERSPACE
+    /* restore setting when syscall exit */
+    call  rt_thread_self
+    call  lwp_user_setting_restore
+
+    /* after restore the reg `tp`, need modify context */
+    STORE tp, 4 * REGBYTES(sp)
+#endif
+
+    //restore context
+    RESTORE_ALL
+    sret
 
-.global lwp_exec_user
-lwp_exec_user:
-    ret//don't support

+ 2 - 2
components/lwp/arch/risc-v/rv64/reloc.c

@@ -18,7 +18,7 @@ typedef struct
 } Elf64_sym;
 
 #ifdef RT_USING_USERSPACE
-void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
+void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
 {
     size_t rel_off;
     void* addr;
@@ -65,7 +65,7 @@ void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, s
 }
 #else
 
-void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
+void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
 {
     size_t rel_off;
 

+ 9 - 9
components/lwp/arch/x86/i386/lwp_arch.c

@@ -73,7 +73,7 @@ void *get_thread_kernel_stack_top(rt_thread_t thread)
 /**
  * don't support this in i386, it's ok!
  */
-void *lwp_get_user_sp()
+void *arch_get_user_sp()
 {
     return RT_NULL;
 }
@@ -115,14 +115,14 @@ void arch_user_space_vtable_free(struct rt_lwp *lwp)
     }
 }
 
-void lwp_set_thread_area(void *p)
+void arch_set_thread_area(void *p)
 {
     rt_hw_seg_tls_set((rt_ubase_t) p);
     rt_thread_t cur = rt_thread_self();
     cur->thread_idr = p; /* update thread idr after first set */
 }
 
-void *rt_cpu_get_thread_idr(void)
+void *arch_get_tidr(void)
 {
     rt_thread_t cur = rt_thread_self();
     if (!cur->lwp)  /* no lwp, don't get thread idr from tls seg */
@@ -130,7 +130,7 @@ void *rt_cpu_get_thread_idr(void)
     return (void *)rt_hw_seg_tls_get();   /* get thread idr from tls seg */
 }
 
-void rt_cpu_set_thread_idr(void *p)
+void arch_set_tidr(void *p)
 {
     rt_thread_t cur = rt_thread_self();
     if (!cur->lwp) /* no lwp, don't set thread idr to tls seg */
@@ -164,7 +164,7 @@ extern void lwp_switch_to_user(void *frame);
  * in x86, we can set stack, arg, text entry in a stack frame,
  * then pop then into register, final use iret to switch kernel mode to user mode.
  */
-void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack)
+void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack)
 {
     rt_uint8_t *stk = k_stack;
     stk -= sizeof(struct rt_hw_stack_frame);
@@ -180,7 +180,7 @@ void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack)
 
 void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
 {
-    lwp_user_entry(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
+    arch_start_umode(args, (const void *)user_entry, (void *)USER_STACK_VEND, kernel_stack);
 }
 
 extern void lwp_thread_return();
@@ -196,7 +196,7 @@ static void *lwp_copy_return_code_to_user_stack(void *ustack)
 
 /**
  * when called sys_thread_create, need create a thread, after thread stared, will come here,
- * like lwp_user_entry, will enter user mode, but we must set thread exit function. it looks like:
+ * like arch_start_umode, will enter user mode, but we must set thread exit function. it looks like:
  * void func(void *arg)
  * {
  *      ...
@@ -204,7 +204,7 @@ static void *lwp_copy_return_code_to_user_stack(void *ustack)
  * when thread func return, we must call exit code to exit thread, or not the program runs away.
  * so we need copy exit code to user and call exit code when func return.
  */
-void lwp_user_thread_entry(void *args, const void *text, void *ustack, void *k_stack)
+void arch_crt_start_umode(void *args, const void *text, void *ustack, void *k_stack)
 {
     RT_ASSERT(ustack != NULL);
 
@@ -252,7 +252,7 @@ rt_thread_t rt_thread_sp_to_thread(void *spmember_addr)
  * set exec context for fork/clone.
  * user_stack(unused)
  */
-void lwp_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
+void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp)
 {
     /**
      * thread kernel stack was set to tss.esp0, when intrrupt/syscall occur,

+ 1 - 6
components/lwp/arch/x86/i386/lwp_arch.h

@@ -12,6 +12,7 @@
 #define  LWP_ARCH_H__
 
 #include <lwp.h>
+#include <lwp_arch_comm.h>
 #include <stackframe.h>
 
 #ifdef RT_USING_USERSPACE
@@ -30,12 +31,6 @@
 extern "C" {
 #endif
 
-int arch_user_space_init(struct rt_lwp *lwp);
-void arch_user_space_vtable_free(struct rt_lwp *lwp);
-void *arch_kernel_mmu_table_get(void);
-void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
-int arch_expand_user_stack(void *addr);
-
 rt_thread_t rt_thread_sp_to_thread(void *spmember_addr);
 
 void lwp_signal_do_return(rt_hw_stack_frame_t *frame);

+ 7 - 7
components/lwp/arch/x86/i386/lwp_gcc.S

@@ -27,22 +27,22 @@ lwp_switch_to_user:
     addl $4, %esp   // skip error_code
     iret    // enter to user mode
 
-.extern syscall_exit
+.extern arch_syscall_exit
 .global sys_fork
 .global sys_vfork
-.global sys_fork_exit
+.global arch_fork_exit
 sys_fork:
 sys_vfork:
     jmp _sys_fork
-sys_fork_exit:
-    jmp syscall_exit
+arch_fork_exit:
+    jmp arch_syscall_exit
 
 .global sys_clone
-.global sys_clone_exit
+.global arch_clone_exit
 sys_clone:
     jmp _sys_clone
-sys_clone_exit:
-    jmp syscall_exit
+arch_clone_exit:
+    jmp arch_syscall_exit
 
 /**
  * rt thread return code

+ 2 - 2
components/lwp/arch/x86/i386/reloc.c

@@ -28,13 +28,13 @@ typedef struct
 } Elf32_sym;
 
 #ifdef RT_USING_USERSPACE
-void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
+void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
 
 }
 #else
 
-void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
+void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
 
 }

+ 9 - 15
components/lwp/lwp.c

@@ -23,6 +23,7 @@
 
 #include "lwp.h"
 #include "lwp_arch.h"
+#include "lwp_arch_comm.h"
 #include "console.h"
 
 #define DBG_TAG "LWP"
@@ -40,7 +41,6 @@ extern char working_directory[];
 #endif
 static struct termios stdin_termios, old_stdin_termios;
 
-extern void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack);
 int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
 
 struct termios *get_old_termios(void)
@@ -373,12 +373,6 @@ typedef struct
     Elf_Half st_shndx;
 } Elf_sym;
 
-#ifdef RT_USING_USERSPACE
-void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
-#else
-void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
-#endif
-
 #ifdef RT_USING_USERSPACE
 struct map_range
 {
@@ -932,9 +926,9 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
             check_read(read_len, dynsym_size);
         }
 #ifdef RT_USING_USERSPACE
-        lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
+        arch_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
 #else
-        lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
+        arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
 
         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
         rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
@@ -1071,7 +1065,7 @@ static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
     return;
 }
 
-static void lwp_thread_entry(void *parameter)
+static void _lwp_thread_entry(void *parameter)
 {
     rt_thread_t tid;
     struct rt_lwp *lwp;
@@ -1090,9 +1084,9 @@ static void lwp_thread_entry(void *parameter)
     }
 
 #ifdef ARCH_MM_MMU
-    lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
+    arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
 #else
-    lwp_user_entry(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
+    arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
 #endif /* ARCH_MM_MMU */
 }
 
@@ -1192,7 +1186,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
             tick = app_head->tick;
         }
 #endif /* not defined ARCH_MM_MMU */
-        thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
+        thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
                 LWP_TASK_STACK_SIZE, priority, tick);
         if (thread != RT_NULL)
         {
@@ -1323,7 +1317,7 @@ void lwp_user_setting_save(rt_thread_t thread)
 {
     if (thread)
     {
-        thread->thread_idr = rt_cpu_get_thread_idr();
+        thread->thread_idr = arch_get_tidr();
     }
 }
 
@@ -1335,7 +1329,7 @@ void lwp_user_setting_restore(rt_thread_t thread)
     }
 #if !defined(ARCH_RISCV64)
     /* tidr will be set in RESTORE_ALL in risc-v */
-    rt_cpu_set_thread_idr(thread->thread_idr);
+    arch_set_tidr(thread->thread_idr);
 #endif
 
     if (rt_dbg_ops)

+ 1 - 4
components/lwp/lwp.h

@@ -138,16 +138,13 @@ int  lwp_check_exit_request(void);
 void lwp_terminate(struct rt_lwp *lwp);
 void lwp_wait_subthread_exit(void);
 
-void lwp_set_thread_area(void *p);
-void* rt_cpu_get_thread_idr(void);
-void rt_cpu_set_thread_idr(void *p);
-
 int lwp_tid_get(void);
 void lwp_tid_put(int tid);
 rt_thread_t lwp_tid_get_thread(int tid);
 void lwp_tid_set_thread(int tid, rt_thread_t thread);
 
 size_t lwp_user_strlen(const char *s, int *err);
+int lwp_execve(char *filename, int debug, int argc, char **argv, char **envp);
 
 /*create by lwp_setsid.c*/
 int setsid(void);

+ 53 - 0
components/lwp/lwp_arch_comm.h

@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2006-2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+
+#ifndef __LWP_ARCH_COMM__
+#define __LWP_ARCH_COMM__
+
+#include <rtthread.h>
+
+/**
+ * APIs that must port to all architectures
+ */
+
+/* syscall handlers */
+void arch_clone_exit(void);
+void arch_fork_exit(void);
+void arch_syscall_exit();
+void arch_ret_to_user();
+
+/* ELF relocation */
+#ifdef RT_USING_USERSPACE
+void arch_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
+#else
+void arch_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, void *dynsym);
+#endif
+
+/* User entry. enter user program code for the first time */
+void arch_crt_start_umode(void *args, const void *text, void *ustack, void *user_stack);
+void arch_start_umode(void *args, const void *text, void *ustack, void *k_stack);
+
+/* lwp create and setup */
+int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
+                            void *user_stack, void **thread_sp);
+void *arch_get_user_sp(void);
+
+/* user space setup and control */
+int arch_user_space_init(struct rt_lwp *lwp);
+void arch_user_space_vtable_free(struct rt_lwp *lwp);
+void *arch_kernel_mmu_table_get(void);
+void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
+int arch_expand_user_stack(void *addr);
+
+/* thread id register */
+void arch_set_thread_area(void *p);
+void* arch_get_tidr(void);
+void arch_set_tidr(void *p);
+
+#endif /* __LWP_ARCH_COMM__ */

+ 30 - 25
components/lwp/lwp_syscall.c

@@ -80,7 +80,6 @@ void lwp_cleanup(struct rt_thread *tid);
 #ifdef ARCH_MM_MMU
 #define ALLOC_KERNEL_STACK_SIZE 5120
 
-extern void lwp_user_thread_entry(void *args, const void *text, void *ustack, void *user_stack);
 int sys_futex(int *uaddr, int op, int val, void *timeout, void *uaddr2, int val3);
 int sys_pmutex(void *umutex, int op, void *arg);
 int sys_cacheflush(void *addr, int len, int cache);
@@ -98,7 +97,6 @@ static void kmem_put(void *kptr)
 #define ALLOC_KERNEL_STACK_SIZE_MIN 1024
 #define ALLOC_KERNEL_STACK_SIZE_MAX 4096
 
-extern void lwp_user_entry(void *args, const void *text, void *data, void *user_stack);
 extern void set_user_context(void *stack);
 #endif /* ARCH_MM_MMU */
 
@@ -360,7 +358,7 @@ static void convert_sockopt(int *level, int *optname)
     }
 #endif
 
-static void lwp_user_thread(void *parameter)
+static void _crt_thread_entry(void *parameter)
 {
     rt_thread_t tid;
     rt_size_t user_stack;
@@ -371,10 +369,10 @@ static void lwp_user_thread(void *parameter)
     user_stack &= ~7; //align 8
 
 #ifdef ARCH_MM_MMU
-    lwp_user_thread_entry(parameter, tid->user_entry, (void *)user_stack, tid->stack_addr + tid->stack_size);
+    arch_crt_start_umode(parameter, tid->user_entry, (void *)user_stack, tid->stack_addr + tid->stack_size);
 #else
     set_user_context((void*)user_stack);
-    lwp_user_entry(parameter, tid->user_entry, ((struct rt_lwp *)tid->lwp)->data_entry, (void*)user_stack);
+    arch_start_umode(parameter, tid->user_entry, ((struct rt_lwp *)tid->lwp)->data_entry, (void*)user_stack);
 #endif /* ARCH_MM_MMU */
 }
 
@@ -1005,8 +1003,6 @@ int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp)
     return 0;
 }
 
-int lwp_execve(char *filename, int debug, int argc, char **argv, char **envp);
-
 int sys_exec(char *filename, int argc, char **argv, char **envp)
 {
     return lwp_execve(filename, 0, argc, argv, envp);
@@ -1322,7 +1318,7 @@ rt_thread_t sys_thread_create(void *arg[])
         goto fail;
     }
     thread = rt_thread_create((const char *)arg[0],
-            lwp_user_thread,
+            _crt_thread_entry,
             (void *)arg[2],
             ALLOC_KERNEL_STACK_SIZE,
             (rt_uint8_t)(size_t)arg[4],
@@ -1362,7 +1358,7 @@ rt_thread_t sys_thread_create(void *arg[])
         goto fail;
     }
 
-    thread = rt_thread_create((const char *)arg[0], lwp_user_thread, (void *)arg[2], kstack_size, (rt_uint8_t)(size_t)arg[5], (rt_uint32_t)arg[6]);
+    thread = rt_thread_create((const char *)arg[0], _crt_thread_entry, (void *)arg[2], kstack_size, (rt_uint8_t)(size_t)arg[5], (rt_uint32_t)arg[6]);
     if (!thread)
     {
         goto fail;
@@ -1431,11 +1427,7 @@ fail:
  *          start_args
  *          */
 #define SYS_CLONE_ARGS_NR 7
-int lwp_set_thread_context(void (*exit)(void), void *new_thread_stack,
-        void *user_stack, void **thread_sp);
 
-long sys_clone(void *arg[]);
-void sys_clone_exit(void);
 long _sys_clone(void *arg[])
 {
     rt_base_t level = 0;
@@ -1539,7 +1531,7 @@ long _sys_clone(void *arg[])
     /* copy origin stack */
     rt_memcpy(thread->stack_addr, self->stack_addr, thread->stack_size);
     lwp_tid_set_thread(tid, thread);
-    lwp_set_thread_context(sys_clone_exit,
+    arch_set_thread_context(arch_clone_exit,
             (void *)((char *)thread->stack_addr + thread->stack_size),
             user_stack, &thread->sp);
     /* new thread never reach there */
@@ -1554,8 +1546,12 @@ fail:
     return GET_ERRNO();
 }
 
+RT_WEAK long sys_clone(void *arg[])
+{
+    return _sys_clone(arg);
+}
+
 int lwp_dup_user(struct lwp_avl_struct* ptree, void *arg);
-void *lwp_get_user_sp(void);
 
 static int _copy_process(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
 {
@@ -1618,9 +1614,6 @@ static int lwp_copy_files(struct rt_lwp *dst, struct rt_lwp *src)
     return -1;
 }
 
-int sys_fork(void);
-int sys_vfork(void);
-void sys_fork_exit(void);
 int _sys_fork(void)
 {
     rt_base_t level;
@@ -1715,10 +1708,10 @@ int _sys_fork(void)
     lwp_user_object_dup(lwp, self_lwp);
 
     level = rt_hw_interrupt_disable();
-    user_stack = lwp_get_user_sp();
+    user_stack = arch_get_user_sp();
     rt_hw_interrupt_enable(level);
 
-    lwp_set_thread_context(sys_fork_exit,
+    arch_set_thread_context(arch_fork_exit,
             (void *)((char *)thread->stack_addr + thread->stack_size),
             user_stack, &thread->sp);
     /* new thread never reach there */
@@ -1781,9 +1774,20 @@ size_t lwp_user_strlen(const char *s, int *err)
     }
 }
 
+/* arm needs to wrap fork/clone call to preserved lr & caller saved regs */
+
+RT_WEAK int sys_fork(void)
+{
+    return _sys_fork();
+}
+
+RT_WEAK int sys_vfork(void)
+{
+    return sys_fork();
+}
+
 struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp);
 int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux);
-void lwp_exec_user(void *args, void *kernel_stack, void *user_entry);
 void lwp_user_obj_free(struct rt_lwp *lwp);
 
 #define _swap_lwp_data(lwp_used, lwp_new, type, member) \
@@ -2339,9 +2343,10 @@ int sys_execve(const char *path, char *const argv[], char *const envp[])
         rt_hw_interrupt_enable(level);
 
         lwp_ref_dec(new_lwp);
-        lwp_exec_user(lwp->args,
-                thread->stack_addr + thread->stack_size,
-                lwp->text_entry);
+        arch_start_umode(lwp->args,
+                lwp->text_entry,
+                (void*)USER_STACK_VEND,
+                thread->stack_addr + thread->stack_size);
         /* never reach here */
     }
     return -EINVAL;
@@ -3665,7 +3670,7 @@ int sys_set_thread_area(void *p)
 
     thread = rt_thread_self();
     thread->thread_idr = p;
-    lwp_set_thread_area(p);
+    arch_set_thread_area(p);
 
     return 0;
 }

+ 3 - 3
libcpu/aarch64/common/context_gcc.S

@@ -172,7 +172,7 @@ rt_hw_get_gtimer_frq:
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
 #ifdef RT_USING_LWP
-    BEQ     ret_to_user
+    BEQ     arch_ret_to_user
 #endif
     ERET
 .endm
@@ -215,7 +215,7 @@ rt_hw_get_gtimer_frq:
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
 #ifdef RT_USING_LWP
-    BEQ     ret_to_user
+    BEQ     arch_ret_to_user
 #endif
     ERET
 .endm
@@ -251,7 +251,7 @@ rt_hw_get_gtimer_frq:
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
 #ifdef RT_USING_LWP
-    BEQ     ret_to_user
+    BEQ     arch_ret_to_user
 #endif
     ERET
 .endm

+ 1 - 1
libcpu/arm/cortex-a/context_gcc.S

@@ -252,7 +252,7 @@ rt_hw_context_switch_exit:
     bne 1f
     ldmfd   sp!, {r0-r12,lr}
     ldmfd   sp!, {lr}
-    b ret_to_user
+    b arch_ret_to_user
 1:
 #endif
     ldmfd   sp!, {r0-r12,lr,pc}^ /* irq return */

+ 4 - 4
libcpu/arm/cortex-a/start_gcc.S

@@ -395,7 +395,7 @@ vector_irq:
     msr     spsr_csxf, r4
     mov     lr, r5
     pop     {r0-r12}
-    b       ret_to_user
+    b       arch_ret_to_user
 1:
     mov     lr, r7
     cps     #Mode_IRQ
@@ -492,7 +492,7 @@ rt_hw_context_switch_interrupt_do:
     bne     1f
     ldmfd   sp!, {r0-r12,lr}
     ldmfd   sp!, {lr}
-    b       ret_to_user
+    b       arch_ret_to_user
 1:
 #endif
     /* pop new task's r0-r12,lr & pc, copy spsr to cpsr */
@@ -562,7 +562,7 @@ vector_pabt:
     ldr     lr, [sp, #15*4]     /* orign pc */
     ldmia   sp, {r0 - r12}
     add     sp, #17 * 4
-    b       ret_to_user
+    b       arch_ret_to_user
 #else
     bl      rt_hw_trap_pabt
     b       .
@@ -590,7 +590,7 @@ vector_dabt:
     ldr     lr, [sp, #15*4]    /* orign pc */
     ldmia   sp, {r0 - r12}
     add     sp, #17 * 4
-    b       ret_to_user
+    b       arch_ret_to_user
 #else
     bl      rt_hw_trap_dabt
     b       .

+ 0 - 72
libcpu/risc-v/t-head/c906/interrupt_gcc.S

@@ -103,78 +103,6 @@ spurious_interrupt:
     RESTORE_ALL
     sret
 
-syscall_entry:
-    //swap to thread kernel stack
-    csrr t0, sstatus
-    andi t0, t0, 0x100
-    beqz t0, __restore_sp_from_tcb
-
-__restore_sp_from_sscratch:
-    csrr t0, sscratch
-    j __move_stack_context
-
-__restore_sp_from_tcb:
-    la a0, rt_current_thread
-    LOAD a0, 0(a0)
-    jal get_thread_kernel_stack_top
-    mv t0, a0
-
-__move_stack_context:
-    mv t1, sp//src
-    mv sp, t0//switch stack
-    addi sp, sp, -CTX_REG_NR * REGBYTES
-    //copy context
-    li s0, CTX_REG_NR//cnt
-    mv t2, sp//dst
-
-copy_context_loop:
-    LOAD t0, 0(t1)
-    STORE t0, 0(t2)
-    addi s0, s0, -1
-    addi t1, t1, 8
-    addi t2, t2, 8
-    bnez s0, copy_context_loop
-
-    LOAD s0, 7 * REGBYTES(sp)
-    addi s0, s0, -0xfe
-    beqz s0, lwp_signal_quit
-
-#ifdef RT_USING_USERSPACE
-    /* save setting when syscall enter */
-    call  rt_thread_self
-    call  lwp_user_setting_save
-#endif
-
-    mv a0, sp
-    OPEN_INTERRUPT
-    call syscall_handler
-    CLOSE_INTERRUPT
-
-.global syscall_exit
-syscall_exit:
-
-    #if defined(RT_USING_USERSPACE) && defined(RT_USING_SIGNALS)
-        LOAD s0, 2 * REGBYTES(sp)
-        andi s0, s0, 0x100
-        bnez s0, dont_ret_to_user
-        li s0, 0
-        j ret_to_user
-        dont_ret_to_user:
-    #endif
-
-#ifdef RT_USING_USERSPACE
-    /* restore setting when syscall exit */
-    call  rt_thread_self
-    call  lwp_user_setting_restore
-
-    /* after restore the reg `tp`, need modify context */
-    STORE tp, 4 * REGBYTES(sp)
-#endif
-
-    //restore context
-    RESTORE_ALL
-    sret
-
 .global rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
     csrs sstatus, a0    /* restore to old csr */

+ 1 - 1
libcpu/risc-v/virt64/context_gcc.S

@@ -61,7 +61,7 @@ rt_hw_context_switch:
 
     LOAD t0, 2 * REGBYTES(sp)
     andi t0, t0, 0x100
-    beqz t0, ret_to_user
+    beqz t0, arch_ret_to_user
 
     RESTORE_ALL
     sret

+ 1 - 73
libcpu/risc-v/virt64/interrupt_gcc.S

@@ -102,83 +102,11 @@ copy_context_loop_interrupt:
 spurious_interrupt:
     LOAD t0, 2 * REGBYTES(sp)
     andi t0, t0, 0x100
-    beqz t0, ret_to_user
+    beqz t0, arch_ret_to_user
 
     RESTORE_ALL
     sret
 
-syscall_entry:
-    //swap to thread kernel stack
-    csrr t0, sstatus
-    andi t0, t0, 0x100
-    beqz t0, __restore_sp_from_tcb
-
-__restore_sp_from_sscratch:
-    csrr t0, sscratch
-    j __move_stack_context
-
-__restore_sp_from_tcb:
-    la a0, rt_current_thread
-    LOAD a0, 0(a0)
-    jal get_thread_kernel_stack_top
-    mv t0, a0
-
-__move_stack_context:
-    mv t1, sp//src
-    mv sp, t0//switch stack
-    addi sp, sp, -CTX_REG_NR * REGBYTES
-    //copy context
-    li s0, CTX_REG_NR//cnt
-    mv t2, sp//dst
-
-copy_context_loop:
-    LOAD t0, 0(t1)
-    STORE t0, 0(t2)
-    addi s0, s0, -1
-    addi t1, t1, 8
-    addi t2, t2, 8
-    bnez s0, copy_context_loop
-
-    LOAD s0, 7 * REGBYTES(sp)
-    addi s0, s0, -0xfe
-    beqz s0, lwp_signal_quit
-
-#ifdef RT_USING_USERSPACE
-    /* save setting when syscall enter */
-    call  rt_thread_self
-    call  lwp_user_setting_save
-#endif
-
-    mv a0, sp
-    OPEN_INTERRUPT
-    call syscall_handler
-    CLOSE_INTERRUPT
-
-.global syscall_exit
-syscall_exit:
-
-    #if defined(RT_USING_USERSPACE)
-        LOAD s0, 2 * REGBYTES(sp)
-        andi s0, s0, 0x100
-        bnez s0, dont_ret_to_user
-        li s0, 0
-        j ret_to_user
-        dont_ret_to_user:
-    #endif
-
-#ifdef RT_USING_USERSPACE
-    /* restore setting when syscall exit */
-    call  rt_thread_self
-    call  lwp_user_setting_restore
-
-    /* after restore the reg `tp`, need modify context */
-    STORE tp, 4 * REGBYTES(sp)
-#endif
-
-    //restore context
-    RESTORE_ALL
-    sret
-
 .global rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
     fence.i

+ 2 - 2
libcpu/x86/i386/interrupt_gcc.S

@@ -230,8 +230,8 @@ hw_syscall_entry:
     movl $rt_hw_intr_exit, %eax
     jmp *%eax
 
-.global syscall_exit
-syscall_exit:
+.global arch_syscall_exit
+arch_syscall_exit:
 #endif /* RT_USING_USERSPACE */
 .global rt_hw_intr_exit
 rt_hw_intr_exit: