Przeglądaj źródła

添加LWP使能判断

heyuanjie87 3 lat temu
rodzic
commit
02f4830b17

+ 18 - 8
libcpu/aarch64/common/context_gcc.S

@@ -171,22 +171,22 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 #else
 .macro RESTORE_CONTEXT
     /* Set the SP to point to the stack of the task being restored. */
     MOV     SP, X0
-
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     MOV     X19, X0
     BL      lwp_mmu_switch
     MOV     X0, X19
     BL      lwp_user_setting_restore
-
+#endif
     LDP     X2, X3, [SP], #0x10  /* SPSR and ELR. */
 
     TST     X3, #0x1f
@@ -214,9 +214,9 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 #endif
@@ -250,9 +250,9 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 
@@ -301,8 +301,10 @@ rt_hw_context_switch_to:
     MOV     SP, X0
     MOV     X0, X1
     BL      rt_cpus_lock_status_restore
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     BL      lwp_user_setting_restore
+#endif
     B       rt_hw_context_switch_exit
 
 /*
@@ -321,8 +323,10 @@ rt_hw_context_switch:
     MOV    SP, X0
     MOV    X0, X2
     BL     rt_cpus_lock_status_restore
+#ifdef RT_USING_LWP
     BL     rt_thread_self
     BL     lwp_user_setting_restore
+#endif
     B      rt_hw_context_switch_exit
 
 /*
@@ -337,8 +341,10 @@ rt_hw_context_switch_interrupt:
     STP     X0, X1, [SP, #-0x10]!
     STP     X2, X3, [SP, #-0x10]!
     STP     X29, X30, [SP, #-0x10]!
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     BL      lwp_user_setting_save
+#endif
     LDP     X29, X30, [SP], #0x10
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
@@ -349,7 +355,9 @@ rt_hw_context_switch_interrupt:
     MOV     X19, X0
     BL      rt_cpus_lock_status_restore
     MOV     X0, X19
+#ifdef RT_USING_LWP
     BL      lwp_user_setting_restore
+#endif
     B       rt_hw_context_switch_exit
 
 .globl vector_fiq
@@ -420,8 +428,10 @@ rt_hw_context_switch_interrupt:
     MOV     X7, #1              // set rt_thread_switch_interrupt_flag to 1
     STR     X7, [X6]
     STP     X1, X30, [SP, #-0x10]!
+#ifdef RT_USING_LWP
     MOV     X0, X2
     BL      lwp_user_setting_save
+#endif
     LDP     X1, X30, [SP], #0x10
 _reswitch:
     LDR     X6, =rt_interrupt_to_thread     // set rt_interrupt_to_thread

+ 25 - 0
libcpu/aarch64/common/mmu.c

@@ -137,6 +137,7 @@ int armv8_init_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa
     return 0;
 }
 
+#ifdef RT_USING_LWP
 static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
 {
     int level;
@@ -196,6 +197,12 @@ static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned lon
 
     return 0;
 }
+#else
+static int _kenrel_map_2M(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
+{
+    return 0;
+}
+#endif
 
 struct mmu_level_info
 {
@@ -203,6 +210,7 @@ struct mmu_level_info
     void *page;
 };
 
+#ifdef RT_USING_LWP
 static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
 {
     int level;
@@ -330,6 +338,18 @@ err:
     _kenrel_unmap_4K(lv0_tbl, (void *)va);
     return ret;
 }
+#else
+static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
+{
+
+}
+
+static int _kenrel_map_4K(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
+{
+
+    return 0;
+}
+#endif
 
 int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
 {
@@ -928,5 +948,10 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo
     {
         while (1);
     }
+}
+#else
+void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
+{
+
 }
 #endif

+ 4 - 2
libcpu/aarch64/common/trap.c

@@ -236,16 +236,18 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
         SVC_Handler(regs);
         /* never return here */
     }
-
+#ifdef RT_USING_LWP
     if (check_user_stack(esr, regs))
     {
         return;
     }
-
+#endif
     process_exception(esr, regs->pc);
     rt_hw_show_register(regs);
     rt_kprintf("current: %s\n", rt_thread_self()->name);
+#ifdef RT_USING_LWP
     check_user_fault(regs, 0, "user fault");
+#endif
 #ifdef RT_USING_FINSH
     list_thread();
 #endif

+ 6 - 0
libcpu/aarch64/cortex-a/entry_point.S

@@ -76,7 +76,11 @@ __start:
     eret                            /* exception return. from EL2. continue from .L__in_el1 */
 
 .L__in_el1:
+#ifdef RT_USING_LWP
     ldr     x9, =PV_OFFSET
+#else
+    mov     x9, #0
+#endif
     mov     sp, x1                  /* in EL1. Set sp to _start */
 
     /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
@@ -118,7 +122,9 @@ __start:
     dsb sy
 
     ldr x2, =0x40000000      /* map 1G memory for kernel space */
+#ifdef RT_USING_LWP
     ldr x3, =PV_OFFSET
+#endif
     bl rt_hw_mmu_setup_early
 
     ldr x30, =after_mmu_enable  /* set LR to after_mmu_enable function, it's a v_addr */