Browse Source

【修改】调整部分宏

wuzhonghui 3 years ago
parent
commit
ebba003296
64 changed files with 339 additions and 333 deletions
  1. 1 1
      components/lwp/Kconfig
  2. 1 1
      components/lwp/arch/aarch64/common/reloc.c
  3. 1 1
      components/lwp/arch/aarch64/cortex-a/lwp_arch.c
  4. 1 1
      components/lwp/arch/aarch64/cortex-a/lwp_arch.h
  5. 2 2
      components/lwp/arch/arm/common/reloc.c
  6. 1 1
      components/lwp/arch/arm/cortex-a/lwp_arch.c
  7. 1 2
      components/lwp/arch/arm/cortex-a/lwp_arch.h
  8. 1 1
      components/lwp/arch/risc-v/rv64/lwp_arch.c
  9. 1 1
      components/lwp/arch/risc-v/rv64/lwp_arch.h
  10. 2 2
      components/lwp/arch/risc-v/rv64/reloc.c
  11. 2 2
      components/lwp/arch/x86/i386/lwp_arch.c
  12. 2 2
      components/lwp/arch/x86/i386/lwp_arch.h
  13. 2 2
      components/lwp/arch/x86/i386/reloc.c
  14. 3 3
      components/lwp/ioremap.c
  15. 20 101
      components/lwp/lwp.c
  16. 11 16
      components/lwp/lwp.h
  17. 2 3
      components/lwp/lwp_avl.c
  18. 0 2
      components/lwp/lwp_avl.h
  19. 1 1
      components/lwp/lwp_futex.c
  20. 1 1
      components/lwp/lwp_mm_area.c
  21. 1 1
      components/lwp/lwp_mm_area.h
  22. 7 7
      components/lwp/lwp_pid.c
  23. 1 1
      components/lwp/lwp_pmutex.c
  24. 1 1
      components/lwp/lwp_shm.c
  25. 73 46
      components/lwp/lwp_syscall.c
  26. 1 1
      components/lwp/lwp_tid.c
  27. 1 1
      components/lwp/lwp_user_mm.c
  28. 1 1
      components/lwp/lwp_user_mm.h
  29. 1 0
      include/libc/libc_signal.h
  30. 1 1
      include/rtdef.h
  31. 11 2
      libcpu/Kconfig
  32. 1 1
      libcpu/aarch64/common/interrupt.c
  33. 6 6
      libcpu/aarch64/common/mmu.c
  34. 1 1
      libcpu/aarch64/common/mmu.h
  35. 1 1
      libcpu/aarch64/common/page.c
  36. 1 1
      libcpu/aarch64/common/page.h
  37. 2 2
      libcpu/arm/cortex-a/backtrace.c
  38. 8 8
      libcpu/arm/cortex-a/context_gcc.S
  39. 1 1
      libcpu/arm/cortex-a/interrupt.c
  40. 15 15
      libcpu/arm/cortex-a/mmu.c
  41. 2 2
      libcpu/arm/cortex-a/mmu.h
  42. 1 1
      libcpu/arm/cortex-a/page.c
  43. 1 1
      libcpu/arm/cortex-a/page.h
  44. 9 9
      libcpu/arm/cortex-a/start_gcc.S
  45. 1 1
      libcpu/arm/cortex-a/trap.c
  46. 1 1
      libcpu/arm/cortex-a/vector_gcc.S
  47. 8 8
      libcpu/mips/gs264/mmu.c
  48. 2 2
      libcpu/mips/gs264/mmu.h
  49. 83 22
      libcpu/risc-v/t-head/c906/cache.c
  50. 2 2
      libcpu/risc-v/t-head/c906/context_gcc.S
  51. 2 2
      libcpu/risc-v/t-head/c906/interrupt_gcc.S
  52. 2 4
      libcpu/risc-v/t-head/c906/mmu.c
  53. 3 3
      libcpu/risc-v/t-head/c906/trap.c
  54. 2 2
      libcpu/risc-v/virt64/context_gcc.S
  55. 2 2
      libcpu/risc-v/virt64/interrupt_gcc.S
  56. 4 4
      libcpu/x86/i386/cpuport.c
  57. 2 2
      libcpu/x86/i386/gate.c
  58. 2 2
      libcpu/x86/i386/interrupt_gcc.S
  59. 10 10
      libcpu/x86/i386/mmu.c
  60. 2 2
      libcpu/x86/i386/mmu.h
  61. 2 2
      libcpu/x86/i386/page.c
  62. 2 2
      libcpu/x86/i386/syscall_c.c
  63. 2 2
      src/cpu.c
  64. 1 1
      src/mem.c

+ 1 - 1
components/lwp/Kconfig

@@ -35,7 +35,7 @@ if RT_USING_LWP
             default 64
     endif
 
-    if ARCH_ARM_MPU
+    if ARCH_MM_MPU
         config RT_LWP_MPU_MAX_NR
             int "The maximum number of mpu region"
             default 2

+ 1 - 1
components/lwp/arch/aarch64/common/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <page.h>
 #endif

+ 1 - 1
components/lwp/arch/aarch64/cortex-a/lwp_arch.c

@@ -11,7 +11,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/arch/aarch64/cortex-a/lwp_arch.h

@@ -13,7 +13,7 @@
 
 #include <lwp.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #define USER_VADDR_TOP    0x0001000000000000UL
 #define USER_HEAP_VEND    0x0000ffffB0000000UL

+ 2 - 2
components/lwp/arch/arm/common/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <lwp_elf.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -17,7 +17,7 @@ typedef struct
     Elf32_Half st_shndx;
 } Elf32_sym;
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
     size_t rel_off;

+ 1 - 1
components/lwp/arch/arm/cortex-a/lwp_arch.c

@@ -11,7 +11,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 2
components/lwp/arch/arm/cortex-a/lwp_arch.h

@@ -12,8 +12,7 @@
 
 #include <lwp.h>
 
-#ifdef ARCH_ARM_MMU
-#include "mmu.h"
+#ifdef RT_USING_USERSPACE
 
 #define USER_VADDR_TOP    0xC0000000UL
 #define USER_HEAP_VEND    0xB0000000UL

+ 1 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.c

@@ -17,7 +17,7 @@
 #include <rthw.h>
 #include <stddef.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.h

@@ -12,7 +12,7 @@
 
 #include <lwp.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #define USER_HEAP_VADDR   0x300000000UL
 #define USER_HEAP_VEND 0xffffffffffff0000UL

+ 2 - 2
components/lwp/arch/risc-v/rv64/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -17,7 +17,7 @@ typedef struct
     Elf64_Half st_shndx;
 } Elf64_sym;
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
 {
     size_t rel_off;

+ 2 - 2
components/lwp/arch/x86/i386/lwp_arch.c

@@ -13,7 +13,7 @@
 #include <rtconfig.h>
 #include <rtdbg.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <stackframe.h>
 #include <interrupt.h>
@@ -368,4 +368,4 @@ void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
 }
 #endif /* RT_USING_SIGNALS */
 
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */

+ 2 - 2
components/lwp/arch/x86/i386/lwp_arch.h

@@ -14,7 +14,7 @@
 #include <lwp.h>
 #include <stackframe.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #define USER_VADDR_TOP    0xFFFFF000UL
 #define USER_HEAP_VEND    0xE0000000UL
 #define USER_HEAP_VADDR   0x90000000UL
@@ -49,6 +49,6 @@ rt_inline unsigned long ffz(unsigned long x)
 }
 #endif
 
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */
 
 #endif  /*LWP_ARCH_H__*/

+ 2 - 2
components/lwp/arch/x86/i386/reloc.c

@@ -12,7 +12,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -27,7 +27,7 @@ typedef struct
     Elf32_Half st_shndx;
 } Elf32_sym;
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
 

+ 3 - 3
components/lwp/ioremap.c

@@ -10,14 +10,14 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <mmu.h>
 #include <lwp_mm_area.h>
 #endif
 
 #include <ioremap.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static struct lwp_avl_struct *k_map_area;
 extern rt_mmu_info mmu_info;
 
@@ -105,7 +105,7 @@ void *rt_ioremap_cached(void *paddr, size_t size)
 
 void rt_iounmap(volatile void *vaddr)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     rt_base_t level;
     struct lwp_avl_struct *ma_avl_node;
 

+ 20 - 101
components/lwp/lwp.c

@@ -28,7 +28,7 @@
 #define DBG_LVL DBG_WARNING
 #include <rtdbg.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #ifdef RT_USING_GDBSERVER
 #include <hw_breakpoint.h>
 #include <lwp_gdbserver.h>
@@ -38,9 +38,7 @@
 #include <lwp_user_mm.h>
 #endif
 
-#ifdef ARCH_ARM_MMU
 static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
-#endif
 #ifdef DFS_USING_WORKDIR
 extern char working_directory[];
 #endif
@@ -111,7 +109,7 @@ void lwp_set_kernel_sp(uint32_t *sp)
 
 uint32_t *lwp_get_kernel_sp(void)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     return (uint32_t *)rt_thread_self()->sp;
 #else
     uint32_t* kernel_sp;
@@ -129,7 +127,7 @@ uint32_t *lwp_get_kernel_sp(void)
 #endif
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
 {
     int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
@@ -380,13 +378,13 @@ typedef struct
     Elf_Half st_shndx;
 } Elf_sym;
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
 #else
 void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
 #endif
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 struct map_range
 {
     void *start;
@@ -503,7 +501,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     size_t rel_dyn_size = 0;
     size_t dynsym_off = 0;
     size_t dynsym_size = 0;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
     void *pa, *va;
     void *va_self;
@@ -547,7 +545,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     }
 
     if ((eheader.e_type != ET_DYN)
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         && (eheader.e_type != ET_EXEC)
 #endif
     )
@@ -556,7 +554,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         return -RT_ERROR;
     }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     {
         off = eheader.e_phoff;
         for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
@@ -591,7 +589,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
         off = eheader.e_phoff;
         process_header_size = eheader.e_phnum * sizeof pheader;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
         {
             return -RT_ERROR;
@@ -614,12 +612,12 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         lseek(fd, off, SEEK_SET);
         read_len = load_fread(process_header, 1, process_header_size, fd);
         check_read(read_len, process_header_size);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
 #endif
 
         aux->item[1].key = AT_PAGESZ;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         aux->item[1].value = ARCH_PAGE_SIZE;
 #else
         aux->item[1].value = RT_MM_PAGE_SIZE;
@@ -628,7 +626,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         {
             uint32_t random_value = rt_tick_get();
             uint8_t *random;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             uint8_t *krandom;
 
             random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
@@ -643,7 +641,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
             aux->item[2].value = (size_t)random;
         }
         aux->item[3].key = AT_PHDR;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         aux->item[3].value = (size_t)va;
 #else
         aux->item[3].value = (size_t)process_header;
@@ -652,7 +650,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         aux->item[4].value = eheader.e_phnum;
         aux->item[5].key = AT_PHENT;
         aux->item[5].value = sizeof pheader;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
 #endif
     }
@@ -661,7 +659,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     {
         load_off = (size_t)load_addr;
     }
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     else
     {
         /* map user */
@@ -813,7 +811,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
             check_off(pheader.p_offset, len);
             lseek(fd, pheader.p_offset, SEEK_SET);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             {
                 uint32_t size = pheader.p_filesz;
                 size_t tmp_len = 0;
@@ -840,7 +838,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
             if (pheader.p_filesz < pheader.p_memsz)
             {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
                 uint32_t size = pheader.p_memsz - pheader.p_filesz;
                 uint32_t size_s;
                 uint32_t off;
@@ -927,7 +925,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
             read_len = load_fread(dynsym, 1, dynsym_size, fd);
             check_read(read_len, dynsym_size);
         }
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
 #else
         lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
@@ -960,7 +958,6 @@ int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_
 
 RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
 {
-#ifdef ARCH_ARM_MMU
     uint8_t *ptr;
     int ret = -1;
     int len;
@@ -970,7 +967,7 @@ RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_add
     RT_ASSERT(filename != RT_NULL);
     /* check lwp control block */
     RT_ASSERT(lwp != RT_NULL);
-
+rt_kprintf("duibudui\r\n");
     /* copy file name to process name */
     rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
 
@@ -1012,84 +1009,6 @@ out:
         close(fd);
     }
     return ret;
-#else
-    const char *process_name = RT_NULL;
-    int ret = -1;
-    int fd = 0;
-
-    /* check file name */
-    RT_ASSERT(filename != RT_NULL);
-    /* check lwp control block */
-    RT_ASSERT(lwp != RT_NULL);
-
-    /* copy file name to process name */
-    process_name = strrchr(filename, '/');
-    process_name = process_name? process_name + 1: filename;
-    rt_strncpy(lwp->cmd, process_name, RT_NAME_MAX);
-
-    if ((fd = open(filename, O_RDONLY)) == RT_NULL)
-    {
-        LOG_E("exec file (%s) find error!", filename);
-        goto out;
-    }
-    if (ioctl(fd, RT_FIOGETADDR, &lwp->text_entry) != RT_EOK)
-    {
-        LOG_E("get text addr error!", filename);
-        goto out;
-    }
-    lwp->text_size = lseek(fd, 0, SEEK_END);
-    close(fd);
-
-    lwp->data_size = ((struct lwp_app_head*)lwp->text_entry)->ram_size;
-#ifdef ARCH_ARM_MPU
-    struct stat buf;
-
-    if (stat(filename, &buf) != 0)
-    {
-        goto out;
-    }
-    if (rt_lwp_map_user(lwp, lwp->text_entry, buf.st_size, RT_MPU_ATT_READ) == RT_NULL)
-    {
-        goto out;
-    }
-    {
-        int i;
-        int argc;
-        char **argv;
-        void *new_args;
-        char *args_offset;
-        lwp->data_entry = rt_lwp_alloc_user(lwp, lwp->data_size + lwp->args_length, RT_MPU_ATT_FULL);
-        if (lwp->data_entry == RT_NULL)
-        {
-            rt_free(lwp->args);
-            rt_lwp_umap_user(lwp, lwp->text_entry, buf.st_size);
-            LOG_E("malloc for data section failed!", lwp->text_entry);
-            goto out;
-        }
-
-        argc = *(uint32_t*)(lwp->args);
-        argv = (char **)(lwp->args) + 1;
-        new_args = (void *)((uint32_t)lwp->data_entry + lwp->data_size);
-        args_offset = (char *)((uint32_t)new_args - (uint32_t)lwp->args);
-        for (i=0; i<argc; i++)
-        {
-            argv[i] += (uint32_t)args_offset;
-        }
-        memcpy(new_args, lwp->args, lwp->args_length);
-        rt_free(lwp->args);
-        lwp->args = new_args;
-    }
-#else
-    lwp->data_entry = (void*)rt_malloc_align(lwp->data_size, 8);
-#endif /* ARCH_ARM_MPU */
-
-    LOG_I("lwp->text_entry = 0x%p size:%d", lwp->text_entry, buf.st_size);
-    LOG_I("lwp->data_entry = 0x%p size:%d", lwp->data_entry, lwp->data_size);
-
-    ret = 0;
-out:
-    return ret;
-#endif /* ARCH_ARM_MMU */
 }
 
 void lwp_cleanup(struct rt_thread *tid)
@@ -1211,7 +1130,7 @@ pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
         lwp_ref_dec(lwp);
         return -ENOMEM;
     }
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (lwp_user_space_init(lwp) != 0)
     {
         lwp_tid_put(tid);

+ 11 - 16
components/lwp/lwp.h

@@ -28,17 +28,20 @@
 #include "lwp_signal.h"
 #include "lwp_syscall.h"
 #include "lwp_avl.h"
-#include "lwp_arch.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "lwp_shm.h"
 
 #include "mmu.h"
 #include "page.h"
+#else
+#include "lwp_mpu.h"
 #endif
+#include "lwp_arch.h"
 
 #ifdef RT_USING_MUSL
 #include <locale.h>
+typedef int32_t pid_t;
 #endif
 
 #ifdef __cplusplus
@@ -52,18 +55,16 @@ extern "C" {
 
 #define LWP_ARG_MAX         8
 
-#ifdef RT_USING_MUSL
-typedef int32_t pid_t;
-#endif /* RT_USING_MUSL */
-
 struct rt_lwp
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     rt_mmu_info mmu_info;
     struct lwp_avl_struct *map_area;
     size_t end_heap;
 #endif
-
+#ifdef ARCH_MM_MPU
+    struct rt_mpu_info mpu_info;
+#endif
     uint8_t lwp_type;
     uint8_t reserv[3];
 
@@ -95,12 +96,6 @@ struct rt_lwp
     rt_uint32_t signal_in_process;
     lwp_sighandler_t signal_handler[_LWP_NSIG];
 
-#ifndef ARCH_ARM_MMU
-#ifdef ARCH_ARM_MPU
-    struct rt_mpu_info mpu_info;
-#endif
-#endif /* ARCH_ARM_MMU */
-
     struct lwp_avl_struct *object_root;
     struct rt_mutex object_mutex;
     struct rt_user_context user_ctx;
@@ -141,13 +136,13 @@ void lwp_tid_set_thread(int tid, rt_thread_t thread);
 
 size_t lwp_user_strlen(const char *s, int *err);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void lwp_mmu_switch(struct rt_thread *thread);
 #endif
 void lwp_user_setting_save(rt_thread_t thread);
 void lwp_user_setting_restore(rt_thread_t thread);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 struct __pthread {
     /* Part 1 -- these fields may be external or
      *      * internal (accessed via asm) ABI. Do not change. */

+ 2 - 3
components/lwp/lwp_avl.c

@@ -208,8 +208,7 @@ int lwp_avl_traversal(struct lwp_avl_struct *ptree, int (*fun)(struct lwp_avl_st
     return ret;
 }
 
-#ifndef ARCH_ARM_MMU
-struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
+RT_WEAK struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
 {
     if (ptree == AVL_EMPTY)
     {
@@ -225,4 +224,4 @@ struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
     }
     return ptree;
 }
-#endif /* ARCH_ARM_MMU */
+

+ 0 - 2
components/lwp/lwp_avl.h

@@ -37,9 +37,7 @@ void lwp_avl_remove(struct lwp_avl_struct * node_to_delete, struct lwp_avl_struc
 void lwp_avl_insert (struct lwp_avl_struct * new_node, struct lwp_avl_struct ** ptree);
 struct lwp_avl_struct* lwp_avl_find(avl_key_t key, struct lwp_avl_struct* ptree);
 int lwp_avl_traversal(struct lwp_avl_struct* ptree, int (*fun)(struct lwp_avl_struct*, void *), void *arg);
-#ifndef ARCH_ARM_MMU
 struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree);
-#endif /* ARCH_ARM_MMU */
 
 #ifdef __cplusplus
 }

+ 1 - 1
components/lwp/lwp_futex.c

@@ -10,7 +10,7 @@
 
 #include <rtthread.h>
 #include <lwp.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp_user_mm.h>
 #endif
 #include "clock_time.h"

+ 1 - 1
components/lwp/lwp_mm_area.c

@@ -9,7 +9,7 @@
  */
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp_mm_area.h>
 
 int lwp_map_area_insert(struct lwp_avl_struct **avl_tree, size_t addr, size_t size, int ma_type)

+ 1 - 1
components/lwp/lwp_mm_area.h

@@ -15,7 +15,7 @@
 
 #include <lwp_avl.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #ifdef __cplusplus
 extern "C" {

+ 7 - 7
components/lwp/lwp_pid.c

@@ -17,7 +17,7 @@
 #include "lwp_pid.h"
 #include "lwp_console.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "lwp_user_mm.h"
 
 #ifdef RT_USING_GDBSERVER
@@ -360,11 +360,11 @@ void lwp_free(struct rt_lwp* lwp)
     lwp->finish = 1;
     if (lwp->args != RT_NULL)
     {
-#ifndef ARCH_ARM_MMU
+#ifndef ARCH_MM_MMU
         lwp->args_length = RT_NULL;
-#ifndef ARCH_ARM_MPU
+#ifndef ARCH_MM_MPU
         rt_free(lwp->args);
-#endif /* not defined ARCH_ARM_MPU */
+#endif /* not defined ARCH_MM_MPU */
 #endif /* ARCH_ARM_MMU */
         lwp->args = RT_NULL;
     }
@@ -386,12 +386,12 @@ void lwp_free(struct rt_lwp* lwp)
 #ifdef ARCH_ARM_MMU
         rt_free_align(lwp->data_entry);
 #else
-#ifdef ARCH_ARM_MPU
+#ifdef ARCH_MM_MPU
         rt_lwp_umap_user(lwp, lwp->text_entry, 0);
         rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
 #else
         rt_free_align(lwp->data_entry);
-#endif /* ARCH_ARM_MPU */
+#endif /* ARCH_MM_MPU */
 #endif /* ARCH_ARM_MMU */
         lwp->data_entry = RT_NULL;
     }
@@ -409,7 +409,7 @@ void lwp_free(struct rt_lwp* lwp)
         }
     }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     lwp_unmap_user_space(lwp);
 #endif
 

+ 1 - 1
components/lwp/lwp_pmutex.c

@@ -10,7 +10,7 @@
 
 #include <rtthread.h>
 #include <lwp.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp_user_mm.h>
 #endif
 #include "clock_time.h"

+ 1 - 1
components/lwp/lwp_shm.c

@@ -10,7 +10,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp.h>
 #include <lwp_shm.h>
 

+ 73 - 46
components/lwp/lwp_syscall.c

@@ -17,7 +17,7 @@
 #include <board.h>
 
 #include <lwp.h>
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp_user_mm.h>
 #include <lwp_arch.h>
 #endif
@@ -36,7 +36,7 @@
 #define SYSCALL_NET(f)      ((void *)sys_notimpl)
 #endif
 
-#if defined(RT_USING_DFS) && defined(ARCH_ARM_MMU)
+#if defined(RT_USING_DFS) && defined(RT_USING_USERSPACE)
 #define SYSCALL_USPACE(f)   ((void *)(f))
 #else
 #define SYSCALL_USPACE(f)   ((void *)sys_notimpl)
@@ -393,17 +393,22 @@ void sys_exit(int value)
         lwp_put_to_user(clear_child_tid, &t, sizeof t);
         sys_futex(tid->clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
     }
-#endif /* ARCH_ARM_MMU */
     main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
     if (main_thread == tid)
     {
         lwp_terminate(lwp);
-#ifdef ARCH_ARM_MMU
         lwp_wait_subthread_exit();
+        lwp->lwp_ret = value;
+    }
 #else
+    main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
+    if (main_thread == tid)
+    {
         rt_thread_t sub_thread;
         rt_list_t *list;
 
+        lwp_terminate(lwp);
+
         /* delete all subthread */
         while ((list = tid->sibling.prev) != &lwp->t_grp)
         {
@@ -411,9 +416,9 @@ void sys_exit(int value)
             rt_list_remove(&sub_thread->sibling);
             rt_thread_delete(sub_thread);
         }
-#endif /* ARCH_ARM_MMU */
         lwp->lwp_ret = value;
     }
+#endif /* ARCH_ARM_MMU */
 
     rt_thread_delete(tid);
     rt_schedule();
@@ -431,7 +436,7 @@ void sys_exit_group(int status)
 /* syscall: "read" ret: "ssize_t" args: "int" "void *" "size_t" */
 ssize_t sys_read(int fd, void *buf, size_t nbyte)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     void *kmem = RT_NULL;
     ssize_t ret = -1;
 
@@ -472,7 +477,7 @@ ssize_t sys_read(int fd, void *buf, size_t nbyte)
 /* syscall: "write" ret: "ssize_t" args: "int" "const void *" "size_t" */
 ssize_t sys_write(int fd, const void *buf, size_t nbyte)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     void *kmem = RT_NULL;
     ssize_t ret = -1;
 
@@ -517,7 +522,7 @@ off_t sys_lseek(int fd, off_t offset, int whence)
 /* syscall: "open" ret: "int" args: "const char *" "int" "..." */
 int sys_open(const char *name, int flag, ...)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int ret = -1;
     rt_size_t len = 0;
     char *kname = RT_NULL;
@@ -570,7 +575,7 @@ int sys_ioctl(int fd, unsigned long cmd, void* data)
 
 int sys_fstat(int file, struct stat *buf)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int ret = -1;
     struct stat statbuff;
 
@@ -601,7 +606,7 @@ int sys_fstat(int file, struct stat *buf)
     return (ret < 0 ? GET_ERRNO() : ret);
 #endif
 }
-#ifdef ARCH_ARM_MMU
+
 /* DFS and lwip definitions */
 #define IMPL_POLLIN     (0x01)
 
@@ -693,13 +698,12 @@ static void dfs2musl_events(short *events)
 
     *events = result_e;
 }
-#endif /* ARCH_ARM_MMU */
 
 int sys_poll(struct pollfd *fds, nfds_t nfds, int timeout)
 {
     int ret = -1;
-#ifdef ARCH_ARM_MMU
     int i = 0;
+#ifdef RT_USING_USERSPACE
     struct pollfd *kfds = RT_NULL;
 
     if (!lwp_user_accessable((void *)fds, nfds * sizeof *fds))
@@ -730,20 +734,36 @@ int sys_poll(struct pollfd *fds, nfds_t nfds, int timeout)
     }
 
     kmem_put(kfds);
+    return ret;
 #else
+#ifdef RT_USING_MUSL
+    for (i = 0; i < nfds; i++)
+    {
+        musl2dfs_events(&fds->events);
+    }
+#endif /* RT_USING_MUSL */
     if (!lwp_user_accessable((void *)fds, nfds * sizeof *fds))
     {
         return -EFAULT;
     }
     ret = poll(fds, nfds, timeout);
-#endif /* ARCH_ARM_MMU */
+#ifdef RT_USING_MUSL
+    if (ret > 0)
+    {
+        for (i = 0; i < nfds; i++)
+        {
+            dfs2musl_events(&fds->revents);
+        }
+    }
+#endif /* RT_USING_MUSL */
     return ret;
+#endif /* RT_USING_USERSPACE */
 }
 
 int sys_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout)
 {
+#ifdef RT_USING_USERSPACE
     int ret = -1;
-#ifdef ARCH_ARM_MMU
     fd_set *kreadfds = RT_NULL, *kwritefds = RT_NULL, *kexceptfds = RT_NULL;
 
     if (readfds)
@@ -820,6 +840,8 @@ quit:
     }
     return (ret < 0 ? GET_ERRNO() : ret);
 #else
+    int ret;
+
     if (!lwp_user_accessable((void *)readfds, sizeof *readfds))
     {
         return -EFAULT;
@@ -839,7 +861,7 @@ quit:
 
 int sys_unlink(const char *pathname)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int ret = -1;
     rt_size_t len = 0;
     char *kname = RT_NULL;
@@ -879,7 +901,7 @@ int sys_unlink(const char *pathname)
 int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
 {
     rt_tick_t tick;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     struct timespec rqtp_k;
     struct timespec rmtp_k;
 
@@ -940,7 +962,7 @@ int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
 /* syscall: "gettimeofday" ret: "int" args: "struct timeval *" "struct timezone *" */
 int sys_gettimeofday(struct timeval *tp, struct timezone *tzp)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     struct timeval t_k;
 
     if (tp)
@@ -1089,7 +1111,7 @@ rt_err_t sys_mutex_release(rt_mutex_t mutex)
     return rt_mutex_release(mutex);
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 /* memory allocation */
 extern rt_base_t lwp_brk(void *addr);
 rt_base_t sys_brk(void *addr)
@@ -1282,8 +1304,7 @@ rt_thread_t sys_thread_create(void *arg[])
 
     lwp = rt_thread_self()->lwp;
     lwp_ref_inc(lwp);
-
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     user_stack  = lwp_map_user(lwp, 0, (size_t)arg[3], 0);
     if (!user_stack)
     {
@@ -1341,7 +1362,7 @@ rt_thread_t sys_thread_create(void *arg[])
     thread->user_stack = (void *)user_stack;
     thread->user_stack_size = (uint32_t)arg[4];
     rt_memset(thread->user_stack, '#', thread->user_stack_size);
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
 
     thread->lwp = (void*)lwp;
     thread->tid = tid;
@@ -1520,7 +1541,7 @@ static int _copy_process(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp)
 
 static void lwp_struct_copy(struct rt_lwp *dst, struct rt_lwp *src)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     dst->end_heap = src->end_heap;
 #endif
     dst->lwp_type = src->lwp_type;
@@ -2271,7 +2292,7 @@ int sys_execve(const char *path, char *const argv[], char *const envp[])
 
         rt_pages_free(page, 0);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         _swap_lwp_data(lwp, new_lwp, rt_mmu_info, mmu_info);
         _swap_lwp_data(lwp, new_lwp, struct lwp_avl_struct *, map_area);
         _swap_lwp_data(lwp, new_lwp, size_t, end_heap);
@@ -2457,7 +2478,7 @@ void sys_hw_interrupt_enable(uint32_t level)
     rt_hw_interrupt_enable(level);
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 int sys_shmget(size_t key, size_t size, int create)
 {
     return lwp_shmget(key, size, create);
@@ -2778,13 +2799,13 @@ int sys_recvfrom(int socket, void *mem, size_t len, int flags,
       struct musl_sockaddr *from, socklen_t *fromlen)
 {
     int flgs = 0;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int ret = -1;
     void *kmem = RT_NULL;
 #endif
 
     flgs = netflags_muslc_2_lwip(flags);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!len)
     {
         return -EINVAL;
@@ -2856,13 +2877,13 @@ int sys_sendto(int socket, const void *dataptr, size_t size, int flags,
     const struct musl_sockaddr *to, socklen_t tolen)
 {
     int flgs = 0;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int ret = -1;
     void *kmem = RT_NULL;
 #endif
 
     flgs = netflags_muslc_2_lwip(flags);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!size)
     {
         return -EINVAL;
@@ -3216,7 +3237,7 @@ int sys_thread_sighandler_set(int sig, lwp_sighandler_t func)
 int32_t sys_waitpid(int32_t pid, int *status, int options)
 {
     int ret = -1;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)status, sizeof(int)))
     {
         return -EFAULT;
@@ -3260,11 +3281,11 @@ int sys_getaddrinfo(const char *nodename,
     char *k_nodename = NULL;
     char *k_servname = NULL;
     struct addrinfo *k_hints = NULL;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int err;
 #endif
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)res, sizeof(*res)))
     {
         SET_ERRNO(EFAULT);
@@ -3273,7 +3294,7 @@ int sys_getaddrinfo(const char *nodename,
 #endif
     if (nodename)
     {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         lwp_user_strlen(nodename, &err);
         if (err)
         {
@@ -3290,7 +3311,7 @@ int sys_getaddrinfo(const char *nodename,
     }
     if (servname)
     {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         lwp_user_strlen(servname, &err);
         if (err)
         {
@@ -3308,7 +3329,7 @@ int sys_getaddrinfo(const char *nodename,
 
     if (hints)
     {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         if (!lwp_user_accessable((void *)hints, sizeof(*hints)))
         {
             SET_ERRNO(EFAULT);
@@ -3383,7 +3404,7 @@ int sys_gethostbyname2_r(const char *name, int af, struct hostent *ret,
     char *k_name  = NULL;
     int a_err = 0;
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)err, sizeof(*err)))
     {
         SET_ERRNO(EFAULT);
@@ -3485,7 +3506,6 @@ char *sys_getcwd(char *buf, size_t size)
     {
         return RT_NULL;
     }
-
     getcwd(buf, size);
 
     return (char *)strlen(buf);
@@ -3493,7 +3513,7 @@ char *sys_getcwd(char *buf, size_t size)
 
 int sys_chdir(const char *path)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int err = 0;
 
     lwp_user_strlen(path, &err);
@@ -3511,7 +3531,7 @@ int sys_chdir(const char *path)
 
 int sys_mkdir(const char *path, mode_t mode)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int err = 0;
 
     lwp_user_strlen(path, &err);
@@ -3529,7 +3549,7 @@ int sys_mkdir(const char *path, mode_t mode)
 
 int sys_rmdir(const char *path)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int err = 0;
 
     lwp_user_strlen(path, &err);
@@ -3563,10 +3583,12 @@ int sys_getdents(int fd, struct libc_dirent *dirp, size_t nbytes)
     size_t rtt_nbytes = 0;
     struct dirent *rtt_dirp;
 
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)dirp, sizeof(struct libc_dirent)))
     {
         return -EFAULT;
     }
+#endif
 
     if (cnt == 0)
     {
@@ -3618,11 +3640,12 @@ int sys_set_tid_address(int *tidptr)
 {
     rt_thread_t thread;
 
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)tidptr, sizeof(int)))
     {
         return -EFAULT;
     }
-
+#endif
     thread = rt_thread_self();
     thread->clear_child_tid = tidptr;
     return thread->tid;
@@ -3637,7 +3660,7 @@ int sys_gettid(void)
 int sys_access(const char *filename, int mode)
 {
     int ret = 0;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     rt_size_t len = 0;
     char *kname = RT_NULL;
     int a_err = 0;
@@ -3695,7 +3718,7 @@ int sys_clock_settime(clockid_t clk, const struct timespec *ts)
         return -ENODEV;
     }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     size_t size = sizeof(struct timespec);
     struct timespec *kts = NULL;
 
@@ -3737,7 +3760,7 @@ int sys_clock_gettime(clockid_t clk, struct timespec *ts)
     }
     ret = rt_device_control(device, RT_DEVICE_CTRL_RTC_GET_TIME, &now);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     size_t size = sizeof(struct timespec);
     struct timespec *kts = NULL;
 
@@ -3770,7 +3793,7 @@ int sys_clock_gettime(clockid_t clk, struct timespec *ts)
 
 int sys_clock_getres(clockid_t clk, struct timespec *ts)
 {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     struct timespec kts;
     size_t size = sizeof(struct timespec);
 
@@ -3787,7 +3810,6 @@ int sys_clock_getres(clockid_t clk, struct timespec *ts)
     {
         return -EFAULT;
     }
-
     ts->tv_sec = 1;
     ts->tv_nsec = 0;
 #endif
@@ -3797,7 +3819,7 @@ int sys_clock_getres(clockid_t clk, struct timespec *ts)
 int sys_rename(const char *oldpath, const char *newpath)
 {
     int ret = -1;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     int err;
 
     lwp_user_strlen(oldpath, &err);
@@ -3943,6 +3965,11 @@ const static void* func_table[] =
     (void *)sys_shm_free,
     (void *)sys_shm_retain,
     (void *)sys_notimpl,
+#else
+    (void *)sys_notimpl,      /* 55 */
+    (void *)sys_notimpl,
+    (void *)sys_notimpl,
+    (void *)sys_notimpl,
 #endif /* RT_LWP_USING_SHM */
 #endif /* ARCH_ARM_MMU */
     (void *)sys_device_init,

+ 1 - 1
components/lwp/lwp_tid.c

@@ -13,7 +13,7 @@
 
 #include "lwp.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "lwp_user_mm.h"
 
 #ifdef RT_USING_GDBSERVER

+ 1 - 1
components/lwp/lwp_user_mm.c

@@ -15,7 +15,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/lwp_user_mm.h

@@ -14,7 +14,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp.h>
 #include <lwp_mm_area.h>
 

+ 1 - 0
include/libc/libc_signal.h

@@ -165,6 +165,7 @@ struct sigaction
 {
     _sig_func_ptr sa_handler;
     sigset_t sa_mask;
+    
     int sa_flags;
 };
 

+ 1 - 1
include/rtdef.h

@@ -744,7 +744,7 @@ struct rt_thread
 
     struct rt_wakeup wakeup;                            /**< wakeup data */
     int exit_request;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #ifdef RT_USING_GDBSERVER
     int step_exec;
     int debug_attach_req;

+ 11 - 2
libcpu/Kconfig

@@ -36,6 +36,10 @@ config ARCH_ARM_MPU
     bool
     depends on ARCH_ARM
 
+config ARCH_MM_MPU
+    bool
+    depends on ARCH_ARM_MPU
+
 config ARCH_ARM_CORTEX_M4
     bool
     select ARCH_ARM_CORTEX_M
@@ -55,15 +59,20 @@ config ARCH_ARM_MMU
     select RT_USING_CACHE
     depends on ARCH_ARM
 
+config RT_USING_USERSPACE
+    bool "Isolated user space"
+    default n
+    depends on ARCH_ARM_MMU
+
 config KERNEL_VADDR_START
     hex "The virtural address of kernel start"
     default 0xc0000000
-    depends on ARCH_ARM_MMU
+    depends on RT_USING_USERSPACE
 
 config PV_OFFSET
     hex "The offset of kernel physical address and virtural address"
     default 0
-    depends on ARCH_ARM_MMU
+    depends on RT_USING_USERSPACE
 
 config RT_IOREMAP_LATE
     bool "Support to create IO mapping in the kernel address space after system initlalization."

+ 1 - 1
libcpu/aarch64/common/interrupt.c

@@ -94,7 +94,7 @@ void rt_hw_interrupt_init(void)
     rt_memset(isr_table, 0x00, sizeof(isr_table));
 
     /* initialize ARM GIC */
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     gic_dist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_DEVICE);
     gic_cpu_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_DEVICE);
 #else

+ 6 - 6
libcpu/aarch64/common/mmu.c

@@ -15,7 +15,7 @@
 #include "cp15.h"
 #include "mmu.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "page.h"
 #endif
 
@@ -548,7 +548,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va;
@@ -627,7 +627,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -698,7 +698,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -794,7 +794,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -887,7 +887,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
 {
     int ret;

+ 1 - 1
libcpu/aarch64/common/mmu.h

@@ -123,7 +123,7 @@ void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
 int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 1 - 1
libcpu/aarch64/common/page.c

@@ -12,7 +12,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <page.h>
 #include <mmu.h>

+ 1 - 1
libcpu/aarch64/common/page.h

@@ -11,7 +11,7 @@
 #ifndef  __PAGE_H__
 #define  __PAGE_H__
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 typedef struct tag_region
 {

+ 2 - 2
libcpu/arm/cortex-a/backtrace.c

@@ -17,7 +17,7 @@
 #define DBG_LVL    DBG_INFO
 #include <rtdbg.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp.h>
 #include <lwp_user_mm.h>
 #include <lwp_arch.h>
@@ -522,7 +522,7 @@ void rt_unwind(struct rt_hw_exp_stack *regs, unsigned int pc_adj)
     e_regs.ARM_sp = regs->sp;
     e_regs.ARM_lr = regs->lr;
     e_regs.ARM_pc = regs->pc - pc_adj;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     if (!lwp_user_accessable((void *)e_regs.ARM_pc, sizeof (void *)))
     {
         e_regs.ARM_pc = regs->lr - sizeof(void *);

+ 8 - 8
libcpu/arm/cortex-a/context_gcc.S

@@ -45,12 +45,12 @@ rt_hw_context_switch_to:
 #ifdef RT_USING_SMP
     mov     r0, r1
     bl      rt_cpus_lock_status_restore
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     bl      rt_thread_self
     bl      lwp_user_setting_restore
 #endif
 #else
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     bl      rt_thread_self
     mov     r4, r0
     bl      lwp_mmu_switch
@@ -108,12 +108,12 @@ rt_hw_context_switch:
 #ifdef RT_USING_SMP
     mov     r0, r2
     bl      rt_cpus_lock_status_restore
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     bl      rt_thread_self
     bl      lwp_user_setting_restore
 #endif
 #else
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     bl      rt_thread_self
     mov     r4, r0
     bl      lwp_mmu_switch
@@ -151,7 +151,7 @@ rt_hw_context_switch_interrupt:
      */
 #ifdef RT_USING_LWP
     push {r0 - r3, lr}
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     bl rt_thread_self
     bl lwp_user_setting_save
 #endif
@@ -161,11 +161,11 @@ rt_hw_context_switch_interrupt:
 
     ldr     sp, [r2]
     mov     r0, r3
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     mov     r4, r0
 #endif
     bl      rt_cpus_lock_status_restore
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     mov     r0, r4
     bl      lwp_user_setting_restore
 #endif
@@ -187,7 +187,7 @@ rt_hw_context_switch_interrupt:
     str r0, [r3]
     mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
     str r3, [ip]
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     push {r1, lr}
     mov r0, r2
     bl lwp_user_setting_save

+ 1 - 1
libcpu/arm/cortex-a/interrupt.c

@@ -94,7 +94,7 @@ void rt_hw_interrupt_init(void)
     rt_memset(isr_table, 0x00, sizeof(isr_table));
 
     /* initialize ARM GIC */
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     gic_dist_base = (uint32_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_RW);
     gic_cpu_base = (uint32_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_RW);
 #else

+ 15 - 15
libcpu/arm/cortex-a/mmu.c

@@ -15,7 +15,7 @@
 #include "cp15.h"
 #include "mmu.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "page.h"
 #endif
 
@@ -248,7 +248,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
     size_t l1_off;
     size_t *mmu_l1, *mmu_l2;
     size_t sections;
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
 #endif
 
@@ -276,7 +276,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
 
         RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
         mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -297,7 +297,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
             return -1;
         }
 
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
         *ref_cnt = 1;
 #endif
@@ -371,7 +371,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
@@ -424,7 +424,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
 #endif
 
@@ -459,7 +459,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
             /* cache maintain */
             rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             if (rt_pages_free(mmu_l2, 0))
             {
                 *mmu_l1 = 0;
@@ -488,7 +488,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
     size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
 #endif
 
@@ -506,13 +506,13 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
         if (*mmu_l1 & ARCH_MMU_USED_MASK)
         {
             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             rt_page_ref_inc(mmu_l2, 0);
 #endif
         }
         else
         {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
             mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -535,7 +535,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
             }
         }
 
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
         (*ref_cnt)++;
 #endif
@@ -555,7 +555,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -626,7 +626,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -753,7 +753,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -850,7 +850,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
     unsigned int va;
 

+ 2 - 2
libcpu/arm/cortex-a/mmu.h

@@ -19,7 +19,7 @@
 #define SHAREDEVICE    (1<<2)  /* shared device */
 #define STRONGORDER    (0<<2)  /* strong ordered */
 #define XN             (1<<4)  /* eXecute Never */
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #define AP_RW          (1<<10) /* supervisor=RW, user=No */
 #define AP_RO          ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
 #else
@@ -98,7 +98,7 @@ typedef struct
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
 int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 1 - 1
libcpu/arm/cortex-a/page.c

@@ -12,7 +12,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <page.h>
 #include <mmu.h>

+ 1 - 1
libcpu/arm/cortex-a/page.h

@@ -11,7 +11,7 @@
 #ifndef  __PAGE_H__
 #define  __PAGE_H__
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 typedef struct tag_region
 {

+ 9 - 9
libcpu/arm/cortex-a/start_gcc.S

@@ -43,7 +43,7 @@ stack_start:
 .endr
 stack_top:
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 .data
 .align 14
 init_mtbl:
@@ -122,7 +122,7 @@ continue:
     dsb
     isb
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     ldr r5, =PV_OFFSET
 
     mov r7, #0x100000
@@ -194,7 +194,7 @@ bss_loop:
     ldr r1, [r1]
     bl rt_hw_init_mmu_table
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     ldr r0, =MMUTable     /* vaddr    */
     add r0, r5            /* to paddr */
     bl  switch_mmu
@@ -252,7 +252,7 @@ stack_setup:
     msr     cpsr_c, #Mode_SVC|I_Bit|F_Bit
     bx      lr
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 .align 2
 .global enable_mmu
 enable_mmu:
@@ -476,7 +476,7 @@ rt_hw_context_switch_interrupt_do:
     ldr     sp,  [r6]       /* get new task's stack pointer */
 
     bl      rt_thread_self
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     mov     r4, r0
     bl      lwp_mmu_switch
     mov     r0, r4
@@ -570,7 +570,7 @@ vector_undef:
     .globl  vector_pabt
 vector_pabt:
     push_svc_reg
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     /* cp Mode_ABT stack to SVC */
     sub     sp, sp, #17 * 4     /* Sizeof(struct rt_hw_exp_stack)  */
     mov     lr, r0
@@ -598,7 +598,7 @@ vector_pabt:
     .globl  vector_dabt
 vector_dabt:
     push_svc_reg
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     /* cp Mode_ABT stack to SVC */
     sub     sp, sp, #17 * 4    /* Sizeof(struct rt_hw_exp_stack)  */
     mov     lr, r0
@@ -637,7 +637,7 @@ rt_clz:
 
 .global rt_secondary_cpu_entry
 rt_secondary_cpu_entry:
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     ldr     r5, =PV_OFFSET
 
     ldr     lr, =after_enable_mmu2
@@ -681,7 +681,7 @@ after_enable_mmu2:
     ldr sp, =abt_stack_2_limit
 
     /* initialize the mmu table and enable mmu */
-#ifndef ARCH_ARM_MMU
+#ifndef RT_USING_USERSPACE
     bl rt_hw_mmu_init
 #endif
 

+ 1 - 1
libcpu/arm/cortex-a/trap.c

@@ -163,7 +163,7 @@ void rt_hw_show_register(struct rt_hw_exp_stack *regs)
     rt_kprintf("fp :0x%08x ip :0x%08x\n", regs->fp, regs->ip);
     rt_kprintf("sp :0x%08x lr :0x%08x pc :0x%08x\n", regs->sp, regs->lr, regs->pc);
     rt_kprintf("cpsr:0x%08x\n", regs->cpsr);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     {
         uint32_t v;
         asm volatile ("MRC p15, 0, %0, c5, c0, 0":"=r"(v));

+ 1 - 1
libcpu/arm/cortex-a/vector_gcc.S

@@ -15,7 +15,7 @@
 
 .globl system_vectors
 system_vectors:
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     b _reset
 #else
     ldr pc, _vector_reset

+ 8 - 8
libcpu/mips/gs264/mmu.c

@@ -318,7 +318,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
@@ -397,7 +397,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
             (*ref_cnt)--;
             if (!*ref_cnt)
             {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
                 rt_pages_free(mmu_l2, 0);
 #else
                 rt_free_align(mmu_l2);
@@ -437,7 +437,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
         }
         else
         {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
             mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
             mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -479,7 +479,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     mmu_clear_itlb();
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -550,7 +550,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -694,7 +694,7 @@ void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
 {   
     void *v_addr = 0;
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
     extern rt_mmu_info mmu_info;
     v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
     #else
@@ -704,7 +704,7 @@ void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
     return v_addr;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -801,7 +801,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
     unsigned int va;
 

+ 2 - 2
libcpu/mips/gs264/mmu.h

@@ -20,7 +20,7 @@
 #define SHAREDEVICE    (1<<2)                /* shared device */
 #define STRONGORDER    (0<<2)                /* strong ordered */
 #define XN             (1<<4)                /* execute Never */
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #define AP_RW          (1<<10)               /* supervisor=RW, user=No */
 #define AP_RO          ((1<<10) |(1 << 15))  /* supervisor=RW, user=No */
 #else
@@ -98,7 +98,7 @@ void *mmu_table_get();
 void switch_mmu(void *mmu_table);
 
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 83 - 22
libcpu/risc-v/t-head/c906/cache.c

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-01-29     lizhirui     first version
+ * 2021-11-05     JasonHu      add c906 cache inst
  */
 
 #include <rthw.h>
@@ -13,36 +14,99 @@
 #include <board.h>
 #include <riscv.h>
 
-rt_inline rt_uint32_t rt_cpu_icache_line_size()
+#define L1_CACHE_BYTES (64)
+
+static void dcache_wb_range(unsigned long start, unsigned long end)
 {
-    return 0;
+    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+    for (; i < end; i += L1_CACHE_BYTES)
+    {
+        /* asm volatile("dcache.cva %0\n"::"r"(i):"memory"); */
+        /*
+         * compiler always use a5 = i.
+         * a6 not used, so we use a6 here.
+         */
+        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
+        asm volatile(".long 0x0257800b");               /* dcache.cva a6 */
+    }
+    asm volatile(".long 0x01b0000b");   /* sync.is */
 }
 
-rt_inline rt_uint32_t rt_cpu_dcache_line_size()
+static void dcache_inv_range(unsigned long start, unsigned long end)
 {
-    return 0;
+    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+    for (; i < end; i += L1_CACHE_BYTES)
+    {
+        /* asm volatile("dcache.iva %0\n"::"r"(i):"memory"); */
+        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
+        asm volatile(".long 0x0268000b");               /* dcache.iva a6 */
+    }
+    asm volatile(".long 0x01b0000b");
+}
+
+static void dcache_wbinv_range(unsigned long start, unsigned long end)
+{
+    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+    for (; i < end; i += L1_CACHE_BYTES)
+    {
+        /* asm volatile("dcache.civa %0\n"::"r"(i):"memory"); */
+        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
+        asm volatile(".long 0x0278000b");               /* dcache.civa a6 */
+    }
+    asm volatile(".long 0x01b0000b");
+}
+
+static void icache_inv_range(unsigned long start, unsigned long end)
+{
+    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+    for (; i < end; i += L1_CACHE_BYTES)
+    {
+        /* asm volatile("icache.iva %0\n"::"r"(i):"memory"); */
+        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
+        asm volatile(".long 0x0308000b");               /* icache.iva a6 */
+    }
+    asm volatile(".long 0x01b0000b");
+}
+
+rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
+{
+    return L1_CACHE_BYTES;
+}
+
+rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
+{
+    return L1_CACHE_BYTES;
 }
 
 void rt_hw_cpu_icache_invalidate(void *addr,int size)
 {
-    asm volatile("fence");
+    icache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
 }
 
 void rt_hw_cpu_dcache_invalidate(void *addr,int size)
 {
-    asm volatile("fence");
+    dcache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
 }
 
 void rt_hw_cpu_dcache_clean(void *addr,int size)
 {
-    asm volatile("fence");
+    dcache_wb_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+}
+
+void rt_hw_cpu_dcache_clean_flush(void *addr,int size)
+{
+    dcache_wbinv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
 }
 
 void rt_hw_cpu_icache_ops(int ops,void *addr,int size)
 {
     if(ops == RT_HW_CACHE_INVALIDATE)
     {
-        rt_hw_cpu_icache_invalidate(addr,size);
+        rt_hw_cpu_icache_invalidate(addr, size);
     }
 }
 
@@ -50,33 +114,30 @@ void rt_hw_cpu_dcache_ops(int ops,void *addr,int size)
 {
     if(ops == RT_HW_CACHE_FLUSH)
     {
-        rt_hw_cpu_dcache_clean(addr,size);
+        rt_hw_cpu_dcache_clean(addr, size);
     }
     else
     {
-        rt_hw_cpu_dcache_invalidate(addr,size);
+        rt_hw_cpu_dcache_invalidate(addr, size);
     }
 }
 
-void rt_hw_cpu_dcache_flush_all()
-{
-    asm volatile("fence");
-    //asm volatile("dcache.call");
-}
-
-void rt_hw_cpu_icache_invalidate_all()
+void rt_hw_cpu_dcache_clean_all(void)
 {
-    asm volatile("fence");
+    /* asm volatile("dcache.call\n":::"memory"); */
+    asm volatile(".long 0x0010000b\n":::"memory");
 }
 
-rt_base_t rt_hw_cpu_icache_status()
+void rt_hw_cpu_dcache_invalidate_all(void)
 {
-    return 0;
+    /* asm volatile("dcache.ciall\n":::"memory"); */
+    asm volatile(".long 0x0030000b\n":::"memory");
 }
 
-rt_base_t rt_hw_cpu_dcache_status()
+void rt_hw_cpu_icache_invalidate_all(void)
 {
-    return 0;
+    /* asm volatile("icache.iall\n":::"memory"); */
+    asm volatile(".long 0x0100000b\n":::"memory");
 }
 
 int sys_cacheflush(void *addr, int size, int cache)

+ 2 - 2
libcpu/risc-v/t-head/c906/context_gcc.S

@@ -21,7 +21,7 @@ rt_hw_context_switch_to:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal lwp_mmu_switch
     #endif
@@ -54,7 +54,7 @@ rt_hw_context_switch:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal lwp_mmu_switch
     #endif

+ 2 - 2
libcpu/risc-v/t-head/c906/interrupt_gcc.S

@@ -92,7 +92,7 @@ copy_context_loop_interrupt:
     LOAD  s1, 0(s0)
     LOAD  sp, 0(s1)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal rt_thread_sp_to_thread
         jal lwp_mmu_switch
@@ -146,7 +146,7 @@ copy_context_loop:
 .global syscall_exit
 syscall_exit:
 
-    #if defined(ARCH_ARM_MMU) && defined(RT_USING_SIGNALS)
+    #if defined(RT_USING_USERSPACE) && defined(RT_USING_SIGNALS)
         LOAD s0, 2 * REGBYTES(sp)
         andi s0, s0, 0x100
         bnez s0, dont_ret_to_user

+ 2 - 4
libcpu/risc-v/t-head/c906/mmu.c

@@ -14,15 +14,13 @@
 #include "page.h"
 #include <stdlib.h>
 #include <string.h>
+#include <cache.h>
 
 #include "riscv.h"
 #include "riscv_mmu.h"
 #include "mmu.h"
 
 void *current_mmu_table = RT_NULL;
-void rt_hw_cpu_icache_invalidate_all();
-void rt_hw_cpu_dcache_flush_all();
-void rt_hw_cpu_dcache_clean(void *addr,rt_size_t size);
 
 static void rt_hw_cpu_tlb_invalidate()
 {
@@ -41,7 +39,7 @@ void switch_mmu(void *mmu_table)
     current_mmu_table = mmu_table;
     RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
     mmu_set_pagetable((rt_ubase_t)mmu_table);
-    rt_hw_cpu_dcache_flush_all();
+    rt_hw_cpu_dcache_clean_all();
     rt_hw_cpu_icache_invalidate_all();
 }
 

+ 3 - 3
libcpu/risc-v/t-head/c906/trap.c

@@ -26,7 +26,7 @@
 #include "rt_interrupt.h"
 #include "plic.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     #include "riscv_mmu.h"
     #include "mmu.h"
     #include "page.h"
@@ -144,7 +144,7 @@ void dump_regs(struct rt_hw_stack_frame *regs)
     rt_size_t satp_v = read_csr(satp);
     rt_kprintf("satp = 0x%p\n",satp_v);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n",__MASKVALUE(satp_v,__MASK(44)) << PAGE_OFFSET_BIT);
     rt_kprintf("\tCurrent ASID = 0x%p\n",__MASKVALUE(satp_v >> 44,__MASK(16)) << PAGE_OFFSET_BIT);
 #endif
@@ -273,7 +273,7 @@ void handle_trap(rt_size_t scause,rt_size_t stval,rt_size_t sepc,struct rt_hw_st
     }
     else
     {
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
         /* page fault */
         if (id == EP_LOAD_PAGE_FAULT ||
             id == EP_STORE_PAGE_FAULT)

+ 2 - 2
libcpu/risc-v/virt64/context_gcc.S

@@ -21,7 +21,7 @@ rt_hw_context_switch_to:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal lwp_mmu_switch
     #endif
@@ -54,7 +54,7 @@ rt_hw_context_switch:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal lwp_mmu_switch
     #endif

+ 2 - 2
libcpu/risc-v/virt64/interrupt_gcc.S

@@ -92,7 +92,7 @@ copy_context_loop_interrupt:
     LOAD  s1, 0(s0)
     LOAD  sp, 0(s1)
 
-    #ifdef ARCH_ARM_MMU
+    #ifdef RT_USING_USERSPACE
         mv a0, s1
         jal rt_thread_sp_to_thread
         jal lwp_mmu_switch
@@ -146,7 +146,7 @@ copy_context_loop:
 .global syscall_exit
 syscall_exit:
 
-    #if defined(ARCH_ARM_MMU) && defined(RT_USING_SIGNALS)
+    #if defined(RT_USING_USERSPACE) && defined(RT_USING_SIGNALS)
         LOAD s0, 2 * REGBYTES(sp)
         andi s0, s0, 0x100
         bnez s0, error

+ 4 - 4
libcpu/x86/i386/cpuport.c

@@ -82,7 +82,7 @@ void rt_hw_context_switch_to(rt_ubase_t to)
 {
     rt_thread_t to_thread = rt_thread_sp_to_thread((void *)to);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     /**
      * update kernel esp0 as to thread's kernel stack, to make sure process can't
      * get the correct kernel stack from tss esp0 when interrupt occur in user mode.
@@ -90,7 +90,7 @@ void rt_hw_context_switch_to(rt_ubase_t to)
     rt_ubase_t stacktop = (rt_ubase_t)(to_thread->stack_addr + to_thread->stack_size);
     rt_hw_tss_set_kstacktop(stacktop);
     lwp_mmu_switch(to_thread);  /* switch mmu before switch context */
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
     rt_hw_context_switch_to_real(to);
 }
 
@@ -103,7 +103,7 @@ void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
     lwp_user_setting_save(from_thread);
 #endif /* RT_USING_LWP */
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     /**
      * update kernel esp0 as to thread's kernel stack, to make sure process can't
      * get the correct kernel stack from tss esp0 when interrupt occur in user mode.
@@ -111,7 +111,7 @@ void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
     rt_ubase_t stacktop = (rt_ubase_t)(to_thread->stack_addr + to_thread->stack_size);
     rt_hw_tss_set_kstacktop(stacktop);
     lwp_mmu_switch(to_thread);  /* switch mmu before switch context */
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
 
     rt_hw_context_switch_real(from, to);
 

+ 2 - 2
libcpu/x86/i386/gate.c

@@ -146,10 +146,10 @@ void rt_hw_gate_init(void)
     gate_set(IDT_OFF2PTR(idt, IRQ_INTR_BASE+14), rt_hw_intr_entry0x2e, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL0);
     gate_set(IDT_OFF2PTR(idt, IRQ_INTR_BASE+15), rt_hw_intr_entry0x2f, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL0);
     /* 系统调用处理中断 */
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     extern void hw_syscall_entry(void);
     gate_set(IDT_OFF2PTR(idt, SYSCALL_INTR_BASE), hw_syscall_entry, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL3);
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
 
     extern void load_new_idt(rt_ubase_t size, rt_ubase_t idtr);
     load_new_idt(IDT_LIMIT, IDT_VADDR);

+ 2 - 2
libcpu/x86/i386/interrupt_gcc.S

@@ -186,7 +186,7 @@ rt_hw_intr_thread_switch:
     movl $rt_hw_intr_exit, %eax
     jmp *%eax
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 .extern rt_hw_syscall_dispath
 
@@ -232,7 +232,7 @@ hw_syscall_entry:
 
 .global syscall_exit
 syscall_exit:
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
 .global rt_hw_intr_exit
 rt_hw_intr_exit:
     addl $4, %esp               // skip intr no

+ 10 - 10
libcpu/x86/i386/mmu.c

@@ -18,15 +18,15 @@
 #include "cache.h"
 #include "i386.h"
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include "page.h"
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */
 
 // #define RT_DEBUG_MMU_X86
 
 static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
 void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
 #else
@@ -208,7 +208,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_si
     return 0;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 //check whether the range of virtual address are free
 static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
 {
@@ -250,7 +250,7 @@ static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
 
     return 0;
 }
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */
 
 //find a range of free virtual address specified by pages
 static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
@@ -294,7 +294,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
 {
     rt_size_t pa_s,pa_e;
@@ -372,9 +372,9 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
     }
     return 0;
 }
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
 {
     rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
@@ -471,7 +471,7 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_s
     }
     return 0;
 }
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */
 
 /**
  * unmap page on v_addr, free page if unmapped, further more, if page table empty, need free it.
@@ -535,7 +535,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 /**
  * map vaddr in vtable with size and attr, this need a phy addr
  *

+ 2 - 2
libcpu/x86/i386/mmu.h

@@ -132,12 +132,12 @@ void switch_mmu(void *mmu_table);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off);
 void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size);
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
 #else
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
-#endif  /* ARCH_ARM_MMU */
+#endif  /* RT_USING_USERSPACE */
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);

+ 2 - 2
libcpu/x86/i386/page.c

@@ -14,7 +14,7 @@
 #include <rthw.h>
 #include <board.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include "page.h"
 #include "mmu.h"
@@ -486,4 +486,4 @@ void rt_page_init(rt_region_t reg)
     rt_pages_alloc(0);
 }
 
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */

+ 2 - 2
libcpu/x86/i386/syscall_c.c

@@ -17,7 +17,7 @@
 //#define DBG_LEVEL DBG_INFO
 #include <rtdbg.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 
 #include <stdint.h>
 #include <mmu.h>
@@ -91,4 +91,4 @@ void rt_hw_syscall_dispath(struct rt_hw_stack_frame *frame)
     LOG_I("\033[36msyscall deal ok,ret = 0x%p\n\033[37m",frame->eax);
 }
 
-#endif /* ARCH_ARM_MMU */
+#endif /* RT_USING_USERSPACE */

+ 2 - 2
src/cpu.c

@@ -10,7 +10,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
 #include <lwp.h>
 #endif
 
@@ -200,7 +200,7 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
 {
     struct rt_cpu* pcpu = rt_cpu_self();
 
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     lwp_mmu_switch(thread);
 #endif
     pcpu->current_thread = thread;

+ 1 - 1
src/mem.c

@@ -641,7 +641,7 @@ void rt_memory_info(rt_uint32_t *total,
 void list_mem(void)
 {
     size_t total_pages = 0, free_pages = 0;
-#ifdef ARCH_ARM_MMU
+#ifdef RT_USING_USERSPACE
     rt_page_get_info(&total_pages, &free_pages);
 #endif