Sfoglia il codice sorgente

!575 API 调整
Merge pull request !575 from PolarLush/api-rc

bernard 3 anni fa
parent
commit
a0a008f2cc
39 ha cambiato i file con 216 aggiunte e 211 eliminazioni
  1. 1 1
      bsp/d1-allwinner-nezha/drivers/board.c
  2. 7 1
      bsp/qemu-virt64-aarch64/drivers/board.c
  3. 1 1
      bsp/qemu-virt64-aarch64/drivers/secondary_cpu.c
  4. 1 1
      bsp/x86/drivers/board.c
  5. 1 1
      components/lwp/arch/aarch64/cortex-a/lwp_arch.h
  6. 1 1
      components/lwp/arch/arm/cortex-a/lwp_arch.h
  7. 1 1
      components/lwp/arch/risc-v/rv64/lwp_arch.h
  8. 1 1
      components/lwp/arch/x86/i386/lwp_arch.h
  9. 1 0
      components/lwp/ioremap.c
  10. 2 2
      components/lwp/lwp.c
  11. 1 1
      components/lwp/lwp.h
  12. 40 0
      components/lwp/lwp_mm.c
  13. 16 0
      components/lwp/lwp_mm.h
  14. 1 0
      components/lwp/lwp_shm.c
  15. 3 3
      components/lwp/lwp_signal.c
  16. 5 4
      components/lwp/lwp_user_mm.c
  17. 6 6
      components/lwp/page.c
  18. 7 7
      libcpu/aarch64/common/context_gcc.S
  19. 9 1
      libcpu/aarch64/common/cpu.c
  20. 6 0
      libcpu/aarch64/common/cpu.h
  21. 4 4
      libcpu/aarch64/common/cpu_gcc.S
  22. 6 1
      libcpu/aarch64/common/cpu_psci.c
  23. 5 32
      libcpu/aarch64/common/mmu.c
  24. 3 3
      libcpu/aarch64/common/mmu.h
  25. 40 2
      libcpu/aarch64/common/psci.c
  26. 3 0
      libcpu/aarch64/common/psci_api.h
  27. 1 28
      libcpu/arm/cortex-a/mmu.c
  28. 0 3
      libcpu/arm/cortex-a/mmu.h
  29. 10 10
      libcpu/arm/cortex-a/start_gcc.S
  30. 2 2
      libcpu/mips/gs264/mmu.c
  31. 2 2
      libcpu/mips/gs264/mmu.h
  32. 1 1
      libcpu/risc-v/t-head/c906/cpuport.c
  33. 2 30
      libcpu/risc-v/t-head/c906/mmu.c
  34. 2 5
      libcpu/risc-v/t-head/c906/mmu.h
  35. 1 1
      libcpu/risc-v/virt64/cpuport.c
  36. 16 45
      libcpu/risc-v/virt64/mmu.c
  37. 2 5
      libcpu/risc-v/virt64/mmu.h
  38. 3 3
      libcpu/x86/i386/mmu.c
  39. 2 2
      libcpu/x86/i386/mmu.h

+ 1 - 1
bsp/d1-allwinner-nezha/drivers/board.c

@@ -108,7 +108,7 @@ void rt_hw_board_init(void)
     MMUTable[0] &= ~PTE_CACHE;
     MMUTable[0] &= ~PTE_SHARE;
     MMUTable[0] |= PTE_SO;
-    switch_mmu((void *)MMUTable);
+    rt_hw_mmu_switch((void *)MMUTable);
 #endif
 }
 

+ 7 - 1
bsp/qemu-virt64-aarch64/drivers/board.c

@@ -23,6 +23,7 @@
 #ifdef RT_USING_FDT
 #include "interrupt.h"
 #include "dtb_node.h"
+#include <psci_api.h>
 #include <cpu.h>
 #endif
 
@@ -84,11 +85,16 @@ void rt_hw_board_init(void)
     rt_hw_uart_init();
     rt_console_set_device(RT_CONSOLE_DEVICE_NAME);
 
-#if defined(RT_USING_FDT) && defined(RT_USING_SMP)
+#ifdef RT_USING_FDT
     // TODO 0x44000000 should be replace by a variable
     void * fdt_start = (void *)0x44000000 - PV_OFFSET;
     device_tree_setup(fdt_start);
+
+#ifdef RT_USING_SMP
     rt_hw_cpu_init();
+#else
+    psci_init();
+#endif /* RT_USING_SMP */
 #endif
 
     rt_components_board_init();

+ 1 - 1
bsp/qemu-virt64-aarch64/drivers/secondary_cpu.c

@@ -21,7 +21,7 @@ void rt_hw_secondary_cpu_bsp_start(void)
 {
     rt_hw_spin_lock(&_cpus_lock);
 
-    kernel_mmu_switch((unsigned long)MMUTable);
+    rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
 
     // interrupt init
     rt_hw_vector_init();

+ 1 - 1
bsp/x86/drivers/board.c

@@ -73,7 +73,7 @@ void rt_hw_board_init(void)
     rt_page_init(init_page_region);
     /* map kernel space, then can read/write this area directly. */
     rt_hw_mmu_kernel_map_init(&mmu_info, HW_KERNEL_START, HW_KERNEL_END);
-    switch_mmu((void *)g_mmu_table);
+    rt_hw_mmu_switch((void *)g_mmu_table);
     mmu_enable();
 #endif
 

+ 1 - 1
components/lwp/arch/aarch64/cortex-a/lwp_arch.h

@@ -34,7 +34,7 @@ void *arch_kernel_mmu_table_get(void);
 void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
 int arch_expand_user_stack(void *addr);
 
-unsigned long ffz(unsigned long x);
+unsigned long rt_hw_ffz(unsigned long x);
 
 rt_inline void icache_invalid_all(void)
 {

+ 1 - 1
components/lwp/arch/arm/cortex-a/lwp_arch.h

@@ -33,7 +33,7 @@ void *arch_kernel_mmu_table_get(void);
 void arch_kuser_init(rt_mmu_info *mmu_info, void *vectors);
 int arch_expand_user_stack(void *addr);
 
-rt_inline unsigned long ffz(unsigned long x)
+rt_inline unsigned long rt_hw_ffz(unsigned long x)
 {
     return __builtin_ffs(~x) - 1;
 }

+ 1 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.h

@@ -56,7 +56,7 @@ int arch_expand_user_stack(void *addr);
 
 rt_mmu_info* arch_kernel_get_mmu_info(void);
 
-rt_inline unsigned long ffz(unsigned long x)
+rt_inline unsigned long rt_hw_ffz(unsigned long x)
 {
     return __builtin_ffs(~x) - 1;
 }

+ 1 - 1
components/lwp/arch/x86/i386/lwp_arch.h

@@ -40,7 +40,7 @@ rt_thread_t rt_thread_sp_to_thread(void *spmember_addr);
 
 void lwp_signal_do_return(rt_hw_stack_frame_t *frame);
         
-rt_inline unsigned long ffz(unsigned long x)
+rt_inline unsigned long rt_hw_ffz(unsigned long x)
 {
     return __builtin_ffs(~x) - 1;
 }

+ 1 - 0
components/lwp/ioremap.c

@@ -15,6 +15,7 @@
 #ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <lwp_mm_area.h>
+#include <lwp_mm.h>
 
 static struct lwp_avl_struct *k_map_area;
 extern rt_mmu_info mmu_info;

+ 2 - 2
components/lwp/lwp.c

@@ -1341,11 +1341,11 @@ void lwp_user_setting_restore(rt_thread_t thread)
 
         if (l != 0)
         {
-            set_process_id((size_t)l->pid);
+            rt_hw_set_process_id((size_t)l->pid);
         }
         else
         {
-            set_process_id(0);
+            rt_hw_set_process_id(0);
         }
         if (l && l->debug)
         {

+ 1 - 1
components/lwp/lwp.h

@@ -293,6 +293,6 @@ rt_channel_t gdb_server_channel(void);
 int dbg_step_type(void);
 void dbg_attach_req(void *pc);
 int dbg_check_suspend(void);
-void set_process_id(int pid);
+void rt_hw_set_process_id(int pid);
 
 #endif

+ 40 - 0
components/lwp/lwp_mm.c

@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2006-2018, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+#include "lwp_mm.h"
+
+static rt_mutex_t mm_lock;
+
+void rt_mm_lock(void)
+{
+    if (rt_thread_self())
+    {
+        if (!mm_lock)
+        {
+            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
+        }
+        if (mm_lock)
+        {
+            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
+        }
+    }
+}
+
+void rt_mm_unlock(void)
+{
+    if (rt_thread_self())
+    {
+        if (mm_lock)
+        {
+            rt_mutex_release(mm_lock);
+        }
+    }
+}

+ 16 - 0
components/lwp/lwp_mm.h

@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2006-2020, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ */
+
+#ifndef  __LWP_MM_H__
+#define  __LWP_MM_H__
+
+void rt_mm_lock(void);
+void rt_mm_unlock(void);
+
+#endif  /*__LWP_MM_H__*/

+ 1 - 0
components/lwp/lwp_shm.c

@@ -13,6 +13,7 @@
 #ifdef RT_USING_USERSPACE
 #include <lwp.h>
 #include <lwp_shm.h>
+#include <lwp_mm.h>
 
 #include <lwp_mm_area.h>
 #include <lwp_user_mm.h>

+ 3 - 3
components/lwp/lwp_signal.c

@@ -84,7 +84,7 @@ rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
     x = *s & ~*m;
     if (x)
     {
-        sig = ffz(~x) + 1;
+        sig = rt_hw_ffz(~x) + 1;
         return sig;
     }
 
@@ -96,7 +96,7 @@ rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
                 x = *++s &~ *++m;
                 if (!x)
                     continue;
-                sig = ffz(~x) + i*_LWP_NSIG_BPW + 1;
+                sig = rt_hw_ffz(~x) + i*_LWP_NSIG_BPW + 1;
                 break;
             }
             break;
@@ -105,7 +105,7 @@ rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
             x = s[1] &~ m[1];
             if (!x)
                 break;
-            sig = ffz(~x) + _LWP_NSIG_BPW + 1;
+            sig = rt_hw_ffz(~x) + _LWP_NSIG_BPW + 1;
             break;
 
         case 1:

+ 5 - 4
components/lwp/lwp_user_mm.c

@@ -22,14 +22,15 @@
 #include <lwp_mm_area.h>
 #include <lwp_user_mm.h>
 #include <lwp_arch.h>
+#include <lwp_mm.h>
 
 int lwp_user_space_init(struct rt_lwp *lwp)
 {
     return arch_user_space_init(lwp);
 }
 
-void switch_mmu(void *mtable);
-void *mmu_table_get(void);
+void rt_hw_mmu_switch(void *mtable);
+void *rt_hw_mmu_tbl_get(void);
 void lwp_mmu_switch(struct rt_thread *thread)
 {
     struct rt_lwp *l = RT_NULL;
@@ -45,10 +46,10 @@ void lwp_mmu_switch(struct rt_thread *thread)
         new_mmu_table = arch_kernel_mmu_table_get();
     }
 
-    pre_mmu_table = mmu_table_get();
+    pre_mmu_table = rt_hw_mmu_tbl_get();
     if (pre_mmu_table != new_mmu_table)
     {
-        switch_mmu(new_mmu_table);
+        rt_hw_mmu_switch(new_mmu_table);
     }
 }
 

+ 6 - 6
components/lwp/page.c

@@ -37,7 +37,7 @@ static size_t page_nr;
 
 static struct page *page_list[ARCH_PAGE_LIST_SIZE];
 
-RT_WEAK int rt_clz(size_t n)
+RT_WEAK int rt_hw_clz(size_t n)
 {
     int bits = sizeof(size_t) * 8;
 
@@ -66,20 +66,20 @@ RT_WEAK int rt_clz(size_t n)
     return bits - n;
 }
 
-RT_WEAK int rt_ctz(size_t n)
+RT_WEAK int rt_hw_ctz(size_t n)
 {
     int ret = sizeof(size_t) * 8;
 
     if (n)
     {
-        ret -= (rt_clz(n ^ (n - 1)) + 1);
+        ret -= (rt_hw_clz(n ^ (n - 1)) + 1);
     }
     return ret;
 }
 
 size_t rt_page_bits(size_t size)
 {
-    int bit = sizeof(size_t) * 8 - rt_clz(size) - 1;
+    int bit = sizeof(size_t) * 8 - rt_hw_clz(size) - 1;
 
     if ((size ^ (1UL << bit)) != 0)
     {
@@ -408,8 +408,8 @@ void rt_page_init(rt_region_t reg)
         int align_bits;
         int size_bits;
 
-        size_bits = ARCH_ADDRESS_WIDTH_BITS - 1 - rt_clz(reg.end - reg.start);
-        align_bits = rt_ctz(reg.start);
+        size_bits = ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(reg.end - reg.start);
+        align_bits = rt_hw_ctz(reg.start);
         if (align_bits < size_bits)
         {
             size_bits = align_bits;

+ 7 - 7
libcpu/aarch64/common/context_gcc.S

@@ -31,10 +31,10 @@ rt_hw_cpu_id:
    ret
 
 /*
-void set_process_id(size_t id)
+void rt_hw_set_process_id(size_t id)
 */
-.global set_process_id
-set_process_id:
+.global rt_hw_set_process_id
+rt_hw_set_process_id:
     msr CONTEXTIDR_EL1, x0
     ret
 
@@ -515,8 +515,8 @@ vector_serror:
     BL      rt_hw_trap_serror
     b .
 
-.global switch_mmu
-switch_mmu:
+.global rt_hw_mmu_switch
+rt_hw_mmu_switch:
     MSR TTBR0_EL1, X0
     MRS X1, TCR_EL1
     CMP X0, XZR
@@ -535,7 +535,7 @@ switch_mmu:
     ISB
     RET
 
-.global mmu_table_get
-mmu_table_get:
+.global rt_hw_mmu_tbl_get
+rt_hw_mmu_tbl_get:
     MRS X0, TTBR0_EL1
     RET

+ 9 - 1
libcpu/aarch64/common/cpu.c

@@ -19,6 +19,9 @@
 #include <rtdbg.h>
 #include <string.h>
 #include "cpu.h"
+#include "psci_api.h"
+
+void (*system_off)(void);
 
 #ifdef RT_USING_SMP
 void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
@@ -325,17 +328,22 @@ RT_WEAK void rt_hw_secondary_cpu_idle_exec(void)
  */
 /*@{*/
 
-/** shutdown CPU */
+/** shutdown CPU is used as system shutdown currently */
 void rt_hw_cpu_shutdown()
 {
     rt_uint32_t level;
     rt_kprintf("shutdown...\n");
 
+    if (system_off)
+        system_off();
+    LOG_E("system shutdown failed");
+
     level = rt_hw_interrupt_disable();
     while (level)
     {
         RT_ASSERT(0);
     }
 }
+MSH_CMD_EXPORT_ALIAS(rt_hw_cpu_shutdown, shutdown, shutdown machine);
 
 /*@}*/

+ 6 - 0
libcpu/aarch64/common/cpu.h

@@ -13,6 +13,10 @@
 #include <rtthread.h>
 #include <stdbool.h>
 
+#ifndef RT_CPUS_NR
+#define RT_CPUS_NR 1
+#endif /* RT_CPUS_NR */
+
 #ifdef RT_USING_SMP
 struct cpu_ops_t
 {
@@ -54,4 +58,6 @@ extern struct cpu_ops_t cpu_ops_spin_tbl;
 
 extern void rt_hw_cpu_shutdown(void);
 
+extern void (*system_off)(void);
+
 #endif /* __RT_HW_CPU_H__ */

+ 4 - 4
libcpu/aarch64/common/cpu_gcc.S

@@ -86,15 +86,15 @@ rt_cpu_vector_set_base:
     RET
 
 
-.global ffz
-ffz:
+.global rt_hw_ffz
+rt_hw_ffz:
     mvn x1, x0
     clz x0, x1
     mov x1, #0x3f
     sub x0, x1, x0
     ret
 
-.global rt_clz
-rt_clz:
+.global rt_hw_clz
+rt_hw_clz:
     clz x0, x0
     ret

+ 6 - 1
libcpu/aarch64/common/cpu_psci.c

@@ -55,11 +55,16 @@ static int cpu_psci_cpu_boot(rt_uint32_t cpuid)
     return psci_ops.cpu_on(cpuid_to_hwid(cpuid), secondary_entry_pa);
 }
 
+static void cpu_psci_cpu_shutdown()
+{
+    psci_ops.cpu_off(cpuid_to_hwid(rt_hw_cpu_id()));
+}
+
 struct cpu_ops_t cpu_ops_psci = {
     .method = "psci",
     .cpu_boot = cpu_psci_cpu_boot,
     .cpu_init = cpu_psci_cpu_init,
-    .cpu_shutdown = RT_NULL
+    .cpu_shutdown = cpu_psci_cpu_shutdown,
 };
 
 #endif /* RT_USING_SMP */

+ 5 - 32
libcpu/aarch64/common/mmu.c

@@ -11,6 +11,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 #include <board.h>
+#include <lwp_mm.h>
 
 #include "mmu.h"
 
@@ -34,34 +35,6 @@
 #define MMU_TBL_PAGE_4k_LEVEL  3
 #define MMU_TBL_LEVEL_NR       4
 
-static rt_mutex_t mm_lock;
-
-void rt_mm_lock(void)
-{
-    if (rt_thread_self())
-    {
-        if (!mm_lock)
-        {
-            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
-        }
-        if (mm_lock)
-        {
-            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
-        }
-    }
-}
-
-void rt_mm_unlock(void)
-{
-    if (rt_thread_self())
-    {
-        if (mm_lock)
-        {
-            rt_mutex_release(mm_lock);
-        }
-    }
-}
-
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
 
 struct page_table
@@ -386,7 +359,7 @@ err:
 }
 #endif
 
-int kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
+static int _kernel_map_fixed(unsigned long *lv0_tbl, unsigned long va, unsigned long pa, unsigned long count, unsigned long attr)
 {
     unsigned long i;
     int ret;
@@ -489,10 +462,10 @@ void rt_hw_mmu_setmtt(unsigned long vaddrStart,
         return;
     }
     count >>= ARCH_SECTION_SHIFT;
-    kernel_map_fixed((unsigned long *)MMUTable, vaddrStart, paddrStart, count, attr);
+    _kernel_map_fixed((unsigned long *)MMUTable, vaddrStart, paddrStart, count, attr);
 }
 
-void kernel_mmu_switch(unsigned long tbl)
+void rt_hw_mmu_ktbl_set(unsigned long tbl)
 {
 #ifdef RT_USING_LWP
     tbl += PV_OFFSET;
@@ -514,7 +487,7 @@ void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr)
         mdesc++;
     }
     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)MMUTable, sizeof MMUTable);
-    kernel_mmu_switch((unsigned long)MMUTable);
+    rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
 }
 
 /**

+ 3 - 3
libcpu/aarch64/common/mmu.h

@@ -132,10 +132,10 @@ void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t att
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
-void rt_mm_lock(void);
-void rt_mm_unlock(void);
 
-void kernel_mmu_switch(unsigned long tbl);
+void rt_hw_mmu_ktbl_set(unsigned long tbl);
+void *rt_hw_mmu_tbl_get();
+void rt_hw_mmu_switch(void *mmu_table);
 
 extern rt_mmu_info mmu_info;
 

+ 40 - 2
libcpu/aarch64/common/psci.c

@@ -11,6 +11,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <string.h>
+#include "cpu.h"
 #include "psci.h"
 #include "psci_api.h"
 #include "smccc.h"
@@ -86,11 +87,12 @@ int psci_init()
     psci_node = dtb_node_get_dtb_node_by_path(root, "/psci");
     if (!psci_node)
     {
+        LOG_E("No PSCI node found");
         return -1;
     }
     char *compatible = dtb_node_get_dtb_node_property_value(psci_node, "compatible", NULL);
     char *method = dtb_node_get_dtb_node_property_value(psci_node, "method", NULL);
-    
+
     int retval = 0;
 
     // setup psci-method
@@ -107,6 +109,7 @@ int psci_init()
         LOG_E("Unknown PSCI method: %s", method);
         return -1;
     }
+    LOG_D("Using psci method %s", method);
 
     retval = _psci_probe_version(compatible, &psci_ver_major, &psci_ver_minor);
     if (retval != 0)
@@ -166,7 +169,7 @@ static rt_uint32_t psci_0_2_get_version(void)
 static void psci_0_2_set_basic_ops()
 {
     psci_ops = (struct psci_ops_t){
-        .get_version = psci_0_2_get_version, 
+        .get_version = psci_0_2_get_version,
 
         // followings API are v0.1 compatible
         .cpu_suspend = psci_0_2_cpu_suspend,
@@ -176,19 +179,54 @@ static void psci_0_2_set_basic_ops()
     };
 }
 
+static void psci_0_2_system_off(void)
+{
+    psci_call(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+static void psci_0_2_system_reset(void)
+{
+    psci_call(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
 static int psci_0_2_init()
 {
     psci_0_2_set_basic_ops();
 
     // TODO init other version 0.2 features...
+    // psci system off and reset which controlling machine
+    psci_ops.system_off = psci_0_2_system_off;
+    psci_ops.system_reset = psci_0_2_system_reset;
+
+    system_off = psci_0_2_system_off;
     return 0;
 }
 
+/* PSCI v1.0 & after */
+static int psci_1_0_features(uint32_t psci_func_id)
+{
+    return psci_call(PSCI_1_0_FN_PSCI_FEATURES,
+                     psci_func_id, 0, 0);
+}
+
 static int psci_1_0_init()
 {
     psci_0_2_init();
 
     // TODO init other version 1.0 features...
+    // remove unsupported features
+    if (psci_1_0_features(PSCI_0_2_FN_SYSTEM_OFF) == PSCI_RET_NOT_SUPPORTED)
+    {
+        psci_ops.system_off = RT_NULL;
+        system_off = RT_NULL;
+    }
+    else
+        LOG_D("Using SYSTEM OFF feature");
+    if (psci_1_0_features(PSCI_0_2_FN_SYSTEM_RESET) == PSCI_RET_NOT_SUPPORTED)
+        psci_ops.system_reset = RT_NULL;
+    else
+        LOG_D("Using SYSTEM RESET feature");
+
     return 0;
 }
 

+ 3 - 0
libcpu/aarch64/common/psci_api.h

@@ -22,6 +22,9 @@ struct psci_ops_t
     int32_t (*cpu_off)(uint32_t state);
     int32_t (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
     int32_t (*migrate)(unsigned long cpuid);
+
+    void (*system_off)(void);
+    void (*system_reset)(void);
 };
 
 extern struct psci_ops_t psci_ops;

+ 1 - 28
libcpu/arm/cortex-a/mmu.c

@@ -14,39 +14,12 @@
 
 #include "cp15.h"
 #include "mmu.h"
+#include <lwp_mm.h>
 
 #ifdef RT_USING_USERSPACE
 #include "page.h"
 #endif
 
-static rt_mutex_t mm_lock;
-
-void rt_mm_lock(void)
-{
-    if (rt_thread_self())
-    {
-        if (!mm_lock)
-        {
-            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
-        }
-        if (mm_lock)
-        {
-            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
-        }
-    }
-}
-
-void rt_mm_unlock(void)
-{
-    if (rt_thread_self())
-    {
-        if (mm_lock)
-        {
-            rt_mutex_release(mm_lock);
-        }
-    }
-}
-
 /* dump 2nd level page table */
 void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
 {

+ 0 - 3
libcpu/arm/cortex-a/mmu.h

@@ -108,7 +108,4 @@ void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t att
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
 
-void rt_mm_lock(void);
-void rt_mm_unlock(void);
-
 #endif

+ 10 - 10
libcpu/arm/cortex-a/start_gcc.S

@@ -176,7 +176,7 @@ bss_loop:
 #ifdef RT_USING_USERSPACE
     ldr r0, =MMUTable     /* vaddr    */
     add r0, r5            /* to paddr */
-    bl  switch_mmu
+    bl  rt_hw_mmu_switch
 #else
     bl rt_hw_mmu_init
 #endif
@@ -261,14 +261,14 @@ enable_mmu:
     isb
     mov pc, lr
 
-.global set_process_id
-set_process_id:
+.global rt_hw_set_process_id
+rt_hw_set_process_id:
     LSL r0, r0, #8
     MCR p15, 0, r0, c13, c0, 1
     mov pc, lr
 
-.global switch_mmu
-switch_mmu:
+.global rt_hw_mmu_switch
+rt_hw_mmu_switch:
     orr r0, #0x18
     mcr p15, 0, r0, c2, c0, 0    /* ttbr0 */
 
@@ -281,8 +281,8 @@ switch_mmu:
     dsb
     isb
     mov pc, lr
-.global mmu_table_get
-mmu_table_get:
+.global rt_hw_mmu_tbl_get
+rt_hw_mmu_tbl_get:
     mrc p15, 0, r0, c2, c0, 0    /* ttbr0 */
     bic r0, #0x18
     mov pc, lr
@@ -604,8 +604,8 @@ vector_resv:
     b       .
 
 #ifdef RT_USING_SMP
-.global rt_clz
-rt_clz:
+.global rt_hw_clz
+rt_hw_clz:
     clz r0, r0
     bx lr
 
@@ -622,7 +622,7 @@ rt_secondary_cpu_entry:
 after_enable_mmu_n:
     ldr     r0, =MMUTable
     add     r0, r5
-    bl      switch_mmu
+    bl      rt_hw_mmu_switch
 #endif
 
 #ifdef RT_USING_FPU

+ 2 - 2
libcpu/mips/gs264/mmu.c

@@ -19,12 +19,12 @@
 
 void *current_mmu_table = RT_NULL;
 
-void *mmu_table_get()
+void *rt_hw_mmu_tbl_get()
 {
     return current_mmu_table;
 }
 
-void switch_mmu(void *mmu_table)
+void rt_hw_mmu_switch(void *mmu_table)
 {
     current_mmu_table = mmu_table;
 

+ 2 - 2
libcpu/mips/gs264/mmu.h

@@ -94,8 +94,8 @@ typedef struct
     size_t pv_off;
 } rt_mmu_info;
 
-void *mmu_table_get();
-void switch_mmu(void *mmu_table);
+void *rt_hw_mmu_tbl_get();
+void rt_hw_mmu_switch(void *mmu_table);
 
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
 #ifdef RT_USING_USERSPACE

+ 1 - 1
libcpu/risc-v/t-head/c906/cpuport.c

@@ -121,7 +121,7 @@ int rt_hw_cpu_id(void)
     return 0;   /* d1 has one core */
 }
 
-void set_process_id(int pid)
+void rt_hw_set_process_id(int pid)
 {
     //TODO
 }

+ 2 - 30
libcpu/risc-v/t-head/c906/mmu.c

@@ -22,34 +22,6 @@
 
 void *current_mmu_table = RT_NULL;
 
-static rt_mutex_t mm_lock;
-
-void rt_mm_lock(void)
-{
-    if (rt_thread_self())
-    {
-        if (!mm_lock)
-        {
-            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
-        }
-        if (mm_lock)
-        {
-            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
-        }
-    }
-}
-
-void rt_mm_unlock(void)
-{
-    if (rt_thread_self())
-    {
-        if (mm_lock)
-        {
-            rt_mutex_release(mm_lock);
-        }
-    }
-}
-
 static void rt_hw_cpu_tlb_invalidate()
 {
     rt_size_t satpv = read_csr(satp);
@@ -57,12 +29,12 @@ static void rt_hw_cpu_tlb_invalidate()
     mmu_flush_tlb();
 }
 
-void *mmu_table_get()
+void *rt_hw_mmu_tbl_get()
 {
     return current_mmu_table;
 }
 
-void switch_mmu(void *mmu_table)
+void rt_hw_mmu_switch(void *mmu_table)
 {
     current_mmu_table = mmu_table;
     RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));

+ 2 - 5
libcpu/risc-v/t-head/c906/mmu.h

@@ -41,8 +41,8 @@ typedef struct
     size_t pv_off;
 }rt_mmu_info;
 
-void *mmu_table_get();
-void switch_mmu(void *mmu_table);
+void *rt_hw_mmu_tbl_get();
+void rt_hw_mmu_switch(void *mmu_table);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off);
 void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size);
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
@@ -54,7 +54,4 @@ void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);
 
-void rt_mm_lock(void);
-void rt_mm_unlock(void);
-
 #endif

+ 1 - 1
libcpu/risc-v/virt64/cpuport.c

@@ -115,7 +115,7 @@ void rt_hw_cpu_shutdown()
     }
 }
 
-void set_process_id(int pid)
+void rt_hw_set_process_id(int pid)
 {
     //TODO
 }

+ 16 - 45
libcpu/risc-v/virt64/mmu.c

@@ -14,52 +14,23 @@
 #include <page.h>
 #include <stdlib.h>
 #include <string.h>
+#include <lwp_mm.h>
 
 #define DBG_TAG "mmu"
 #define DBG_LVL DBG_INFO
 #include <rtdbg.h>
 
-#include <string.h>
 #include "riscv.h"
 #include "riscv_mmu.h"
 #include "mmu.h"
-#include <string.h>
 
 void *current_mmu_table = RT_NULL;
 void rt_hw_cpu_icache_invalidate_all();
 void rt_hw_cpu_dcache_flush_all();
 void rt_hw_cpu_dcache_clean(void *addr, rt_size_t size);
 
-static rt_mutex_t mm_lock;
-
 volatile rt_ubase_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
 
-void rt_mm_lock(void)
-{
-    if (rt_thread_self())
-    {
-        if (!mm_lock)
-        {
-            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
-        }
-        if (mm_lock)
-        {
-            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
-        }
-    }
-}
-
-void rt_mm_unlock(void)
-{
-    if (rt_thread_self())
-    {
-        if (mm_lock)
-        {
-            rt_mutex_release(mm_lock);
-        }
-    }
-}
-
 static void rt_hw_cpu_tlb_invalidate()
 {
     rt_size_t satpv = read_csr(satp);
@@ -67,12 +38,12 @@ static void rt_hw_cpu_tlb_invalidate()
     mmu_flush_tlb();
 }
 
-void *mmu_table_get()
+void *rt_hw_mmu_tbl_get()
 {
     return current_mmu_table;
 }
 
-void switch_mmu(void *mmu_table)
+void rt_hw_mmu_switch(void *mmu_table)
 {
     current_mmu_table = mmu_table;
     RT_ASSERT(__CHECKALIGN(mmu_table, PAGE_OFFSET_BIT));
@@ -108,7 +79,7 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, r
         return -1;
     }
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
 
     // vtable initialization check
     for (l1_off = va_s; l1_off <= va_e; l1_off++)
@@ -117,17 +88,17 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, r
 
         if (v)
         {
-            rt_hw_interrupt_enable(level);
-            return 0;
+            rt_mm_unlock();
+            return -1;
         }
     }
 
+    rt_mm_unlock();
     mmu_info->vtable = vtable;
     mmu_info->vstart = va_s;
     mmu_info->vend = va_e;
     mmu_info->pv_off = pv_off;
 
-    rt_hw_interrupt_enable(level);
     return 0;
 }
 
@@ -553,9 +524,9 @@ void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t
     void *ret;
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -564,9 +535,9 @@ void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt
     void *ret;
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -574,9 +545,9 @@ void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size)
 {
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     _rt_hw_mmu_unmap(mmu_info, v_addr, size);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 }
 
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
@@ -625,9 +596,9 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
     void *ret;
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -668,5 +639,5 @@ void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr)
         mdesc++;
     }
 
-    switch_mmu((void *)MMUTable);
+    rt_hw_mmu_switch((void *)MMUTable);
 }

+ 2 - 5
libcpu/risc-v/virt64/mmu.h

@@ -49,8 +49,8 @@ typedef struct
     size_t pv_off;
 } rt_mmu_info;
 
-void *mmu_table_get();
-void switch_mmu(void *mmu_table);
+void *rt_hw_mmu_tbl_get();
+void rt_hw_mmu_switch(void *mmu_table);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, rt_size_t *vtable, rt_size_t pv_off);
 void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info, rt_size_t vaddr_start, rt_size_t size);
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr);
@@ -64,9 +64,6 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
 
 void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr);
 
-void rt_mm_lock(void);
-void rt_mm_unlock(void);
-
 #define ARCH_ADDRESS_WIDTH_BITS 64
 
 #define MMU_MAP_ERROR_VANOTALIGN  -1

+ 3 - 3
libcpu/x86/i386/mmu.c

@@ -43,17 +43,17 @@ static void rt_hw_cpu_tlb_invalidate()
     mmu_flush_tlb();
 }
 
-void *mmu_table_get()
+void *rt_hw_mmu_tbl_get()
 {
     return current_mmu_table;
 }
 
-void switch_mmu(void *mmu_table)
+void rt_hw_mmu_switch(void *mmu_table)
 {
     current_mmu_table = mmu_table;
     if (mmu_table == RT_NULL)
     {
-        dbg_log(DBG_ERROR, "switch_mmu: NULL mmu table!\n");
+        dbg_log(DBG_ERROR, "rt_hw_mmu_switch: NULL mmu table!\n");
     }
     else
     {

+ 2 - 2
libcpu/x86/i386/mmu.h

@@ -127,8 +127,8 @@ void mmu_enable_user_page_access();
 void mmu_disable_user_page_access();
 void mmu_enable();
 
-void *mmu_table_get();
-void switch_mmu(void *mmu_table);
+void *rt_hw_mmu_tbl_get();
+void rt_hw_mmu_switch(void *mmu_table);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off);
 void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size);