Browse Source

!504 [aarch64]可配置为非用户态模式
Merge pull request !504 from heyuanjie87/rt-smart

bernard 3 years ago
parent
commit
9ee7d0f116

+ 18 - 8
libcpu/aarch64/common/context_gcc.S

@@ -171,22 +171,22 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 #else
 .macro RESTORE_CONTEXT
     /* Set the SP to point to the stack of the task being restored. */
     MOV     SP, X0
-
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     MOV     X19, X0
     BL      lwp_mmu_switch
     MOV     X0, X19
     BL      lwp_user_setting_restore
-
+#endif
     LDP     X2, X3, [SP], #0x10  /* SPSR and ELR. */
 
     TST     X3, #0x1f
@@ -214,9 +214,9 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 #endif
@@ -250,9 +250,9 @@ rt_hw_get_gtimer_frq:
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
-
+#ifdef RT_USING_LWP
     BEQ     ret_to_user
-
+#endif
     ERET
 .endm
 
@@ -301,8 +301,10 @@ rt_hw_context_switch_to:
     MOV     SP, X0
     MOV     X0, X1
     BL      rt_cpus_lock_status_restore
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     BL      lwp_user_setting_restore
+#endif
     B       rt_hw_context_switch_exit
 
 /*
@@ -321,8 +323,10 @@ rt_hw_context_switch:
     MOV    SP, X0
     MOV    X0, X2
     BL     rt_cpus_lock_status_restore
+#ifdef RT_USING_LWP
     BL     rt_thread_self
     BL     lwp_user_setting_restore
+#endif
     B      rt_hw_context_switch_exit
 
 /*
@@ -337,8 +341,10 @@ rt_hw_context_switch_interrupt:
     STP     X0, X1, [SP, #-0x10]!
     STP     X2, X3, [SP, #-0x10]!
     STP     X29, X30, [SP, #-0x10]!
+#ifdef RT_USING_LWP
     BL      rt_thread_self
     BL      lwp_user_setting_save
+#endif
     LDP     X29, X30, [SP], #0x10
     LDP     X2, X3, [SP], #0x10
     LDP     X0, X1, [SP], #0x10
@@ -349,7 +355,9 @@ rt_hw_context_switch_interrupt:
     MOV     X19, X0
     BL      rt_cpus_lock_status_restore
     MOV     X0, X19
+#ifdef RT_USING_LWP
     BL      lwp_user_setting_restore
+#endif
     B       rt_hw_context_switch_exit
 
 .globl vector_fiq
@@ -420,8 +428,10 @@ rt_hw_context_switch_interrupt:
     MOV     X7, #1              // set rt_thread_switch_interrupt_flag to 1
     STR     X7, [X6]
     STP     X1, X30, [SP, #-0x10]!
+#ifdef RT_USING_LWP
     MOV     X0, X2
     BL      lwp_user_setting_save
+#endif
     LDP     X1, X30, [SP], #0x10
 _reswitch:
     LDR     X6, =rt_interrupt_to_thread     // set rt_interrupt_to_thread

+ 39 - 11
libcpu/aarch64/common/mmu.c

@@ -14,7 +14,7 @@
 
 #include "mmu.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef RT_USING_LWP
 #include <page.h>
 #endif
 
@@ -41,6 +41,31 @@ struct page_table
     unsigned long page[512];
 };
 
+#ifndef RT_USING_LWP
+#define MMU_TBL_PAGE_NR_MAX     32
+
+#undef PV_OFFSET
+#define PV_OFFSET 0
+
+static volatile struct page_table MMUPage[MMU_TBL_PAGE_NR_MAX] __attribute__((aligned(4096)));
+
+#define rt_page_ref_inc(...)
+
+unsigned long rt_pages_alloc(rt_size_t size_bits)
+{
+    static unsigned long i = 0;
+
+    if (i >= MMU_TBL_PAGE_NR_MAX)
+    {
+        return RT_NULL;
+    }
+
+    ++i;
+
+    return (unsigned long)&MMUPage[i - 1].page;
+}
+#endif
+
 static struct page_table *__init_page_array;
 static unsigned long __page_off = 0UL;
 unsigned long get_free_page(void)
@@ -203,6 +228,7 @@ struct mmu_level_info
     void *page;
 };
 
+#ifdef RT_USING_LWP
 static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
 {
     int level;
@@ -330,6 +356,7 @@ err:
     _kenrel_unmap_4K(lv0_tbl, (void *)va);
     return ret;
 }
+#endif
 
 int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
 {
@@ -439,8 +466,12 @@ void rt_hw_mmu_setmtt(unsigned long vaddrStart,
 
 void kernel_mmu_switch(unsigned long tbl)
 {
+#ifdef RT_USING_LWP
     tbl += PV_OFFSET;
     __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
+#else
+    __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb"::"r"(tbl):"memory");
+#endif
     __asm__ volatile("tlbi vmalle1\n dsb sy\nisb":::"memory");
     __asm__ volatile("ic ialluis\n dsb sy\nisb":::"memory");
 }
@@ -555,7 +586,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef RT_USING_LWP
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va;
@@ -592,7 +623,6 @@ static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
     }
     return 0;
 }
-#endif
 
 static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t npages)
 {
@@ -638,13 +668,14 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, si
     }
     return ret;
 }
+#endif
 
 static void rt_hw_cpu_tlb_invalidate(void)
 {
     __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef RT_USING_LWP
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -705,7 +736,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
     pages = pa_e - pa_s + 1;
     vaddr = find_vaddr(mmu_info, pages);
     if (vaddr) {
-        ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
+        //TODO ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
@@ -716,7 +747,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef RT_USING_USERSPACE
+#ifdef RT_USING_LWP
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -797,7 +828,6 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size
     }
     return 0;
 }
-#endif
 
 void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
 {
@@ -813,7 +843,6 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -835,7 +864,6 @@ void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_
     rt_hw_interrupt_enable(level);
     return ret;
 }
-#endif
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
 {
@@ -845,6 +873,7 @@ void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
     _rt_hw_mmu_unmap(mmu_info, v_addr, size);
     rt_hw_interrupt_enable(level);
 }
+#endif
 
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
 {
@@ -906,7 +935,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
     return ret;
 }
 
-#ifdef RT_USING_USERSPACE
+
 void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
 {
     int ret;
@@ -929,4 +958,3 @@ void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned lo
         while (1);
     }
 }
-#endif

+ 4 - 2
libcpu/aarch64/common/trap.c

@@ -236,16 +236,18 @@ void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
         SVC_Handler(regs);
         /* never return here */
     }
-
+#ifdef RT_USING_LWP
     if (check_user_stack(esr, regs))
     {
         return;
     }
-
+#endif
     process_exception(esr, regs->pc);
     rt_hw_show_register(regs);
     rt_kprintf("current: %s\n", rt_thread_self()->name);
+#ifdef RT_USING_LWP
     check_user_fault(regs, 0, "user fault");
+#endif
 #ifdef RT_USING_FINSH
     list_thread();
 #endif

+ 8 - 0
libcpu/aarch64/cortex-a/entry_point.S

@@ -76,7 +76,11 @@ __start:
     eret                            /* exception return. from EL2. continue from .L__in_el1 */
 
 .L__in_el1:
+#ifdef RT_USING_LWP
     ldr     x9, =PV_OFFSET
+#else
+    mov     x9, #0
+#endif
     mov     sp, x1                  /* in EL1. Set sp to _start */
 
     /* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
@@ -118,7 +122,9 @@ __start:
     dsb sy
 
     ldr x2, =0x40000000      /* map 1G memory for kernel space */
+#ifdef RT_USING_LWP
     ldr x3, =PV_OFFSET
+#endif
     bl rt_hw_mmu_setup_early
 
     ldr x30, =after_mmu_enable  /* set LR to after_mmu_enable function, it's a v_addr */
@@ -142,11 +148,13 @@ __start:
     ret
 
 after_mmu_enable:
+#ifdef RT_USING_LWP
     mrs x0, tcr_el1          /* disable ttbr0, only using kernel space */
     orr x0, x0, #(1 << 7)
     msr tcr_el1, x0
     msr ttbr0_el1, xzr
     dsb sy
+#endif
 
     mov     x0, #1
     msr     spsel, x0