Просмотр исходного кода

【修改】整理 smart 代码,优化 lwp 宏依赖

wuzhonghui 4 лет назад
Родитель
Сommit
cc04773d72
70 измененных файлов с 825 добавлено и 413 удалено
  1. 22 1
      components/dfs/filesystems/romfs/dfs_romfs.c
  2. 1 1
      components/dfs/include/dfs_file.h
  3. 1 2
      components/libc/compilers/newlib/SConscript
  4. 44 43
      components/lwp/Kconfig
  5. 7 2
      components/lwp/SConscript
  6. 1 1
      components/lwp/arch/aarch64/common/reloc.c
  7. 1 1
      components/lwp/arch/aarch64/cortex-a/lwp_arch.c
  8. 1 1
      components/lwp/arch/aarch64/cortex-a/lwp_arch.h
  9. 2 2
      components/lwp/arch/arm/common/reloc.c
  10. 1 1
      components/lwp/arch/arm/cortex-a/lwp_arch.c
  11. 2 1
      components/lwp/arch/arm/cortex-a/lwp_arch.h
  12. 1 1
      components/lwp/arch/risc-v/rv64/lwp_arch.c
  13. 1 1
      components/lwp/arch/risc-v/rv64/lwp_arch.h
  14. 2 2
      components/lwp/arch/risc-v/rv64/reloc.c
  15. 2 2
      components/lwp/arch/x86/i386/lwp_arch.c
  16. 2 2
      components/lwp/arch/x86/i386/lwp_arch.h
  17. 2 2
      components/lwp/arch/x86/i386/reloc.c
  18. 3 3
      components/lwp/ioremap.c
  19. 168 34
      components/lwp/lwp.c
  20. 17 8
      components/lwp/lwp.h
  21. 19 0
      components/lwp/lwp_avl.c
  22. 3 0
      components/lwp/lwp_avl.h
  23. 1 1
      components/lwp/lwp_futex.c
  24. 1 1
      components/lwp/lwp_mm_area.c
  25. 1 1
      components/lwp/lwp_mm_area.h
  26. 26 12
      components/lwp/lwp_pid.c
  27. 1 1
      components/lwp/lwp_pmutex.c
  28. 1 1
      components/lwp/lwp_shm.c
  29. 20 0
      components/lwp/lwp_signal.c
  30. 3 0
      components/lwp/lwp_signal.h
  31. 316 88
      components/lwp/lwp_syscall.c
  32. 1 1
      components/lwp/lwp_tid.c
  33. 1 1
      components/lwp/lwp_user_mm.c
  34. 1 1
      components/lwp/lwp_user_mm.h
  35. 4 1
      include/rtdef.h
  36. 2 7
      libcpu/Kconfig
  37. 1 1
      libcpu/aarch64/common/interrupt.c
  38. 6 6
      libcpu/aarch64/common/mmu.c
  39. 1 1
      libcpu/aarch64/common/mmu.h
  40. 1 1
      libcpu/aarch64/common/page.c
  41. 1 1
      libcpu/aarch64/common/page.h
  42. 2 2
      libcpu/arm/cortex-a/backtrace.c
  43. 8 8
      libcpu/arm/cortex-a/context_gcc.S
  44. 1 1
      libcpu/arm/cortex-a/interrupt.c
  45. 15 15
      libcpu/arm/cortex-a/mmu.c
  46. 2 2
      libcpu/arm/cortex-a/mmu.h
  47. 1 1
      libcpu/arm/cortex-a/page.c
  48. 1 1
      libcpu/arm/cortex-a/page.h
  49. 9 9
      libcpu/arm/cortex-a/start_gcc.S
  50. 1 1
      libcpu/arm/cortex-a/trap.c
  51. 1 1
      libcpu/arm/cortex-a/vector_gcc.S
  52. 8 8
      libcpu/mips/gs264/mmu.c
  53. 2 2
      libcpu/mips/gs264/mmu.h
  54. 22 83
      libcpu/risc-v/t-head/c906/cache.c
  55. 2 2
      libcpu/risc-v/t-head/c906/context_gcc.S
  56. 2 2
      libcpu/risc-v/t-head/c906/interrupt_gcc.S
  57. 4 2
      libcpu/risc-v/t-head/c906/mmu.c
  58. 3 3
      libcpu/risc-v/t-head/c906/trap.c
  59. 2 2
      libcpu/risc-v/virt64/context_gcc.S
  60. 2 2
      libcpu/risc-v/virt64/interrupt_gcc.S
  61. 4 4
      libcpu/x86/i386/cpuport.c
  62. 2 2
      libcpu/x86/i386/gate.c
  63. 2 2
      libcpu/x86/i386/interrupt_gcc.S
  64. 10 10
      libcpu/x86/i386/mmu.c
  65. 2 2
      libcpu/x86/i386/mmu.h
  66. 2 2
      libcpu/x86/i386/page.c
  67. 2 2
      libcpu/x86/i386/syscall_c.c
  68. 2 2
      src/cpu.c
  69. 2 3
      src/mem.c
  70. 17 0
      src/scheduler.c

+ 22 - 1
components/dfs/filesystems/romfs/dfs_romfs.c

@@ -34,7 +34,28 @@ int dfs_romfs_unmount(struct dfs_filesystem *fs)
 
 int dfs_romfs_ioctl(struct dfs_fd *file, int cmd, void *args)
 {
-    return -EIO;
+    int ret = RT_EOK;
+    struct romfs_dirent *dirent;
+
+    dirent = (struct romfs_dirent *)file->fnode->data;
+    RT_ASSERT(dirent != NULL);
+
+    switch (cmd)
+    {
+    case RT_FIOGETADDR:
+        {
+            *(rt_uint32_t*)args = (rt_uint32_t)dirent->data;
+            break;
+        }
+    case RT_FIOFTRUNCATE:
+        {
+            break;
+        }
+    default:
+        ret = -RT_EINVAL;
+        break;
+    }
+    return ret;
 }
 
 rt_inline int check_dirent(struct romfs_dirent *dirent)

+ 1 - 1
components/dfs/include/dfs_file.h

@@ -82,7 +82,7 @@ int dfs_file_ftruncate(struct dfs_fd *fd, off_t length);
 
 /* 0x5254 is just a magic number to make these relatively unique ("RT") */
 #define RT_FIOFTRUNCATE  0x52540000U
-#define RT_FIOGETXIPADDR 0x52540001U
+#define RT_FIOGETADDR    0x52540001U
 
 #ifdef __cplusplus
 }

+ 1 - 2
components/libc/compilers/newlib/SConscript

@@ -18,10 +18,9 @@ if rtconfig.PLATFORM == 'gcc' and GetDepend('RT_USING_LIBC'):
         # musl libc is used as a software library.
         src  = []
         LIBS = []
-    elif not GetDepend('RT_USING_NEWLIB'):
+    elif GetDepend('RT_USING_NEWLIB'):
         # RT_USING_NEWLIB is defined already
         CPPPATH = [cwd]
-        CPPDEFINES = ['RT_USING_NEWLIB']
         group = DefineGroup('newlib', src, depend = ['RT_USING_LIBC'], 
             CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES, LIBS = LIBS)
 

+ 44 - 43
components/lwp/Kconfig

@@ -1,5 +1,5 @@
-config RT_USING_LWP
-    bool "Using light-weight process"
+menuconfig RT_USING_LWP
+    bool "light-weight process"
     select RT_USING_DFS
     select RT_USING_LIBC
     select RT_USING_POSIX_CLOCKTIME
@@ -12,53 +12,54 @@ if RT_USING_LWP
     config RT_LWP_MAX_NR
         int "The max number of light-weight process"
         default 30
-endif
 
-config LWP_TASK_STACK_SIZE
-    int "The lwp thread kernel stack size"
-    default 16384
-    depends on RT_USING_LWP
+    config LWP_TASK_STACK_SIZE
+        int "The lwp thread kernel stack size"
+        default 16384
 
-config RT_CH_MSG_MAX_NR
-    int "The maximum number of channel messages"
-    default 1024
-    depends on RT_USING_LWP
+    config RT_CH_MSG_MAX_NR
+        int "The maximum number of channel messages"
+        default 1024
 
-config RT_LWP_SHM_MAX_NR
-    int "The maximum number of shared memory"
-    default 64
-    depends on RT_USING_LWP
+    config LWP_CONSOLE_INPUT_BUFFER_SIZE
+        int "The input buffer size of lwp console device"
+        default 1024
 
-config LWP_CONSOLE_INPUT_BUFFER_SIZE
-    int "The input buffer size of lwp console device"
-    default 1024
-    depends on RT_USING_LWP
+    config LWP_TID_MAX_NR
+        int "The maximum number of lwp thread id"
+        default 64
 
-config LWP_TID_MAX_NR
-    int "The maximum number of lwp thread id"
-    default 64
-    depends on RT_USING_LWP
+    if ARCH_ARM_MMU
+        config RT_LWP_SHM_MAX_NR
+            int "The maximum number of shared memory"
+            default 64
+    endif
 
-config LWP_UNIX98_PTY 
-    bool "The unix98 PTY support"
-    default n
-    depends on RT_USING_LWP
+    if ARCH_ARM_MPU
+        config RT_LWP_MPU_MAX_NR
+            int "The maximum number of mpu region"
+            default 2
+
+        config RT_LWP_USING_SHM
+            bool "Enable shared memory"
+            default y
+    endif
 
-config LWP_PTY_INPUT_BFSZ 
-    int "The unix98 PTY input buffer size"
-    default 1024 
-    depends on RT_USING_LWP
-    depends on LWP_UNIX98_PTY
+    config LWP_UNIX98_PTY
+        bool "The unix98 PTY support"
+        default n
 
-config LWP_PTY_PTS_SIZE 
-    int "The unix98 PTY device max num"
-    default 3 
-    depends on RT_USING_LWP
-    depends on LWP_UNIX98_PTY
+    if LWP_UNIX98_PTY
+        config LWP_PTY_INPUT_BFSZ
+            int "The unix98 PTY input buffer size"
+            default 1024
 
-config LWP_PTY_USING_DEBUG 
-    bool "The unix98 PTY debug output"
-    default n 
-    depends on RT_USING_LWP
-    depends on LWP_UNIX98_PTY
-    
+        config LWP_PTY_PTS_SIZE 
+            int "The unix98 PTY device max num"
+            default 3
+
+        config LWP_PTY_USING_DEBUG 
+            bool "The unix98 PTY debug output"
+            default n
+    endif
+endif

+ 7 - 2
components/lwp/SConscript

@@ -1,5 +1,6 @@
 Import('rtconfig')
 from building import *
+import os
 
 cwd     = GetCurrentDir()
 src     = []
@@ -30,10 +31,14 @@ if platform in platform_file.keys(): # support platforms
     if arch in support_arch.keys() and cpu in support_arch[arch]:
         asm_path = 'arch/' + arch + '/' + cpu + '/*_' + platform_file[platform]
         arch_common = 'arch/' + arch + '/' + 'common/*.c'
-        src += Glob('*.c') + Glob(asm_path) + Glob(arch_common)
+        if not GetDepend('ARCH_ARM_MMU'):
+            excluded_files = ['ioremap.c', 'lwp_futex.c', 'lwp_mm_area.c', 'lwp_pmutex.c', 'lwp_shm.c', 'lwp_user_mm.c']
+            src += [f for f in Glob('*.c') if os.path.basename(str(f)) not in excluded_files] + Glob(asm_path) + Glob(arch_common)
+        else:
+            src += Glob('*.c') + Glob(asm_path) + Glob(arch_common)
         src += Glob('arch/' + arch + '/' + cpu + '/*.c')
         CPPPATH = [cwd]
-        CPPPATH += ['arch/' + arch + '/' + cpu]
+        CPPPATH += [cwd + '/arch/' + arch + '/' + cpu]
 
 group = DefineGroup('lwP', src, depend = ['RT_USING_LWP'], CPPPATH = CPPPATH)
 

+ 1 - 1
components/lwp/arch/aarch64/common/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <mmu.h>
 #include <page.h>
 #endif

+ 1 - 1
components/lwp/arch/aarch64/cortex-a/lwp_arch.c

@@ -11,7 +11,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/arch/aarch64/cortex-a/lwp_arch.h

@@ -13,7 +13,7 @@
 
 #include <lwp.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #define USER_VADDR_TOP    0x0001000000000000UL
 #define USER_HEAP_VEND    0x0000ffffB0000000UL

+ 2 - 2
components/lwp/arch/arm/common/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <lwp_elf.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -17,7 +17,7 @@ typedef struct
     Elf32_Half st_shndx;
 } Elf32_sym;
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
     size_t rel_off;

+ 1 - 1
components/lwp/arch/arm/cortex-a/lwp_arch.c

@@ -11,7 +11,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <mmu.h>
 #include <page.h>

+ 2 - 1
components/lwp/arch/arm/cortex-a/lwp_arch.h

@@ -12,7 +12,8 @@
 
 #include <lwp.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
+#include "mmu.h"
 
 #define USER_VADDR_TOP    0xC0000000UL
 #define USER_HEAP_VEND    0xB0000000UL

+ 1 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.c

@@ -17,7 +17,7 @@
 #include <rthw.h>
 #include <stddef.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/arch/risc-v/rv64/lwp_arch.h

@@ -12,7 +12,7 @@
 
 #include <lwp.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #define USER_HEAP_VADDR   0x300000000UL
 #define USER_HEAP_VEND 0xffffffffffff0000UL

+ 2 - 2
components/lwp/arch/risc-v/rv64/reloc.c

@@ -2,7 +2,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -17,7 +17,7 @@ typedef struct
     Elf64_Half st_shndx;
 } Elf64_sym;
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf64_sym *dynsym)
 {
     size_t rel_off;

+ 2 - 2
components/lwp/arch/x86/i386/lwp_arch.c

@@ -13,7 +13,7 @@
 #include <rtconfig.h>
 #include <rtdbg.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <stackframe.h>
 #include <interrupt.h>
@@ -368,4 +368,4 @@ void lwp_signal_do_return(rt_hw_stack_frame_t *frame)
 }
 #endif /* RT_USING_SIGNALS */
 
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */

+ 2 - 2
components/lwp/arch/x86/i386/lwp_arch.h

@@ -14,7 +14,7 @@
 #include <lwp.h>
 #include <stackframe.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #define USER_VADDR_TOP    0xFFFFF000UL
 #define USER_HEAP_VEND    0xE0000000UL
 #define USER_HEAP_VADDR   0x90000000UL
@@ -49,6 +49,6 @@ rt_inline unsigned long ffz(unsigned long x)
 }
 #endif
 
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */
 
 #endif  /*LWP_ARCH_H__*/

+ 2 - 2
components/lwp/arch/x86/i386/reloc.c

@@ -12,7 +12,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <elf.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <mmu.h>
 #include <page.h>
 #endif
@@ -27,7 +27,7 @@ typedef struct
     Elf32_Half st_shndx;
 } Elf32_sym;
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf32_sym *dynsym)
 {
 

+ 3 - 3
components/lwp/ioremap.c

@@ -10,14 +10,14 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <mmu.h>
 #include <lwp_mm_area.h>
 #endif
 
 #include <ioremap.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static struct lwp_avl_struct *k_map_area;
 extern rt_mmu_info mmu_info;
 
@@ -105,7 +105,7 @@ void *rt_ioremap_cached(void *paddr, size_t size)
 
 void rt_iounmap(volatile void *vaddr)
 {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     rt_base_t level;
     struct lwp_avl_struct *ma_avl_node;
 

+ 168 - 34
components/lwp/lwp.c

@@ -23,13 +23,12 @@
 #endif
 
 #include "lwp.h"
-#include "lwp_arch.h"
 
 #define DBG_TAG "LWP"
 #define DBG_LVL DBG_WARNING
 #include <rtdbg.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #ifdef RT_USING_GDBSERVER
 #include <hw_breakpoint.h>
 #include <lwp_gdbserver.h>
@@ -39,7 +38,9 @@
 #include <lwp_user_mm.h>
 #endif
 
+#ifdef ARCH_ARM_MMU
 static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
+#endif
 #ifdef DFS_USING_WORKDIR
 extern char working_directory[];
 #endif
@@ -110,14 +111,25 @@ void lwp_set_kernel_sp(uint32_t *sp)
 
 uint32_t *lwp_get_kernel_sp(void)
 {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     return (uint32_t *)rt_thread_self()->sp;
 #else
-    return (uint32_t *)rt_thread_self()->kernel_sp;
+    uint32_t* kernel_sp;
+    extern rt_uint32_t rt_interrupt_from_thread;
+    extern rt_uint32_t rt_thread_switch_interrupt_flag;
+    if (rt_thread_switch_interrupt_flag)
+    {
+        kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
+    }
+    else
+    {
+        kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
+    }
+    return kernel_sp;
 #endif
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
 {
     int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
@@ -217,13 +229,17 @@ struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char
 #else
 static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
 {
+#ifdef ARCH_ARM_MMU
     int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
+    struct process_aux *aux;
+#else
+    int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
+#endif /* ARCH_ARM_MMU */
     int *args;
     char *str;
     char **new_argve;
     int i;
     int len;
-    struct process_aux *aux;
 
     for (i = 0; i < argc; i++)
     {
@@ -242,6 +258,7 @@ static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **arg
         }
     }
 
+#ifdef ARCH_ARM_MMU
     /* for aux */
     size += sizeof(struct process_aux);
 
@@ -253,6 +270,14 @@ static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **arg
 
     /* argc, argv[], 0, envp[], 0 */
     str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
+#else
+    args = (int *)rt_malloc(size);
+    if (args == RT_NULL)
+    {
+        return RT_NULL;
+    }
+    str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
+#endif /* ARCH_ARM_MMU */
 
     new_argve = (char **)&args[1];
     args[0] = argc;
@@ -281,7 +306,7 @@ static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **arg
         }
         new_argve[i] = 0;
     }
-
+#ifdef ARCH_ARM_MMU
     /* aux */
     aux = (struct process_aux *)(new_argve + i);
     aux->item[0].key = AT_EXECFN;
@@ -292,9 +317,16 @@ static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **arg
     lwp->args = args;
 
     return aux;
+#else
+    lwp->args = args;
+    lwp->args_length = size;
+
+    return (struct process_aux *)(new_argve + i);
+#endif /* ARCH_ARM_MMU */
 }
 #endif
 
+#ifdef ARCH_ARM_MMU
 #define check_off(voff, vlen)           \
     do                                  \
     {                                   \
@@ -348,13 +380,13 @@ typedef struct
     Elf_Half st_shndx;
 } Elf_sym;
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
 #else
 void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
 #endif
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 struct map_range
 {
     void *start;
@@ -471,13 +503,11 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     size_t rel_dyn_size = 0;
     size_t dynsym_off = 0;
     size_t dynsym_size = 0;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
     void *pa, *va;
     void *va_self;
-#endif
 
-#ifdef RT_USING_USERSPACE
     rt_mmu_info *m_info = &lwp->mmu_info;
 #endif
 
@@ -517,7 +547,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     }
 
     if ((eheader.e_type != ET_DYN)
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         && (eheader.e_type != ET_EXEC)
 #endif
     )
@@ -526,7 +556,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         return -RT_ERROR;
     }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     {
         off = eheader.e_phoff;
         for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
@@ -561,7 +591,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
         off = eheader.e_phoff;
         process_header_size = eheader.e_phnum * sizeof pheader;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
         {
             return -RT_ERROR;
@@ -584,12 +614,12 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         lseek(fd, off, SEEK_SET);
         read_len = load_fread(process_header, 1, process_header_size, fd);
         check_read(read_len, process_header_size);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
 #endif
 
         aux->item[1].key = AT_PAGESZ;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         aux->item[1].value = ARCH_PAGE_SIZE;
 #else
         aux->item[1].value = RT_MM_PAGE_SIZE;
@@ -598,7 +628,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         {
             uint32_t random_value = rt_tick_get();
             uint8_t *random;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             uint8_t *krandom;
 
             random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
@@ -613,7 +643,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
             aux->item[2].value = (size_t)random;
         }
         aux->item[3].key = AT_PHDR;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         aux->item[3].value = (size_t)va;
 #else
         aux->item[3].value = (size_t)process_header;
@@ -622,7 +652,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         aux->item[4].value = eheader.e_phnum;
         aux->item[5].key = AT_PHENT;
         aux->item[5].value = sizeof pheader;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
 #endif
     }
@@ -631,7 +661,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
     {
         load_off = (size_t)load_addr;
     }
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     else
     {
         /* map user */
@@ -783,7 +813,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
             check_off(pheader.p_offset, len);
             lseek(fd, pheader.p_offset, SEEK_SET);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             {
                 uint32_t size = pheader.p_filesz;
                 size_t tmp_len = 0;
@@ -810,7 +840,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
 
             if (pheader.p_filesz < pheader.p_memsz)
             {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
                 uint32_t size = pheader.p_memsz - pheader.p_filesz;
                 uint32_t size_s;
                 uint32_t off;
@@ -897,7 +927,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
             read_len = load_fread(dynsym, 1, dynsym_size, fd);
             check_read(read_len, dynsym_size);
         }
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
 #else
         lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
@@ -924,11 +954,13 @@ _exit:
     }
     return result;
 }
+#endif /* ARCH_ARM_MMU */
 
 int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux);
 
 RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
 {
+#ifdef ARCH_ARM_MMU
     uint8_t *ptr;
     int ret = -1;
     int len;
@@ -980,6 +1012,84 @@ out:
         close(fd);
     }
     return ret;
+#else
+    const char *process_name = RT_NULL;
+    int ret = -1;
+    int fd = 0;
+
+    /* check file name */
+    RT_ASSERT(filename != RT_NULL);
+    /* check lwp control block */
+    RT_ASSERT(lwp != RT_NULL);
+
+    /* copy file name to process name */
+    process_name = strrchr(filename, '/');
+    process_name = process_name? process_name + 1: filename;
+    rt_strncpy(lwp->cmd, process_name, RT_NAME_MAX);
+
+    if ((fd = open(filename, O_RDONLY)) == RT_NULL)
+    {
+        LOG_E("exec file (%s) find error!", filename);
+        goto out;
+    }
+    if (ioctl(fd, RT_FIOGETADDR, &lwp->text_entry) != RT_EOK)
+    {
+        LOG_E("get text addr error!", filename);
+        goto out;
+    }
+    lwp->text_size = lseek(fd, 0, SEEK_END);
+    close(fd);
+
+    lwp->data_size = ((struct lwp_app_head*)lwp->text_entry)->ram_size;
+#ifdef ARCH_ARM_MPU
+    struct stat buf;
+
+    if (stat(filename, &buf) != 0)
+    {
+        goto out;
+    }
+    if (rt_lwp_map_user(lwp, lwp->text_entry, buf.st_size, RT_MPU_ATT_READ) == RT_NULL)
+    {
+        goto out;
+    }
+    {
+        int i;
+        int argc;
+        char **argv;
+        void *new_args;
+        char *args_offset;
+        lwp->data_entry = rt_lwp_alloc_user(lwp, lwp->data_size + lwp->args_length, RT_MPU_ATT_FULL);
+        if (lwp->data_entry == RT_NULL)
+        {
+            rt_free(lwp->args);
+            rt_lwp_umap_user(lwp, lwp->text_entry, buf.st_size);
+            LOG_E("malloc for data section failed!", lwp->text_entry);
+            goto out;
+        }
+
+        argc = *(uint32_t*)(lwp->args);
+        argv = (char **)(lwp->args) + 1;
+        new_args = (void *)((uint32_t)lwp->data_entry + lwp->data_size);
+        args_offset = (char *)((uint32_t)new_args - (uint32_t)lwp->args);
+        for (i=0; i<argc; i++)
+        {
+            argv[i] += (uint32_t)args_offset;
+        }
+        memcpy(new_args, lwp->args, lwp->args_length);
+        rt_free(lwp->args);
+        lwp->args = new_args;
+    }
+#else
+    lwp->data_entry = (void*)rt_malloc_align(lwp->data_size, 8);
+#endif /* ARCH_ARM_MPU */
+
+    LOG_I("lwp->text_entry = 0x%p size:%d", lwp->text_entry, buf.st_size);
+    LOG_I("lwp->data_entry = 0x%p size:%d", lwp->data_entry, lwp->data_size);
+
+    ret = 0;
+out:
+    return ret;
+#endif /* ARCH_ARM_MMU */
 }
 
 void lwp_cleanup(struct rt_thread *tid)
@@ -994,13 +1104,6 @@ void lwp_cleanup(struct rt_thread *tid)
 
     LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
 
-#ifndef RT_USING_USERSPACE
-    if (tid->user_stack != RT_NULL)
-    {
-        rt_free(tid->user_stack);
-    }
-#endif
-
     level = rt_hw_interrupt_disable();
     lwp = (struct rt_lwp *)tid->lwp;
 
@@ -1054,7 +1157,11 @@ static void lwp_thread_entry(void *parameter)
     }
 #endif
 
+#ifdef ARCH_ARM_MMU
     lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
+#else
+    lwp_user_entry(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
+#endif /* ARCH_ARM_MMU */
 }
 
 struct rt_lwp *lwp_self(void)
@@ -1104,7 +1211,7 @@ pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
         lwp_ref_dec(lwp);
         return -ENOMEM;
     }
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     if (lwp_user_space_init(lwp) != 0)
     {
         lwp_tid_put(tid);
@@ -1127,24 +1234,37 @@ pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
     }
 
     result = lwp_load(filename, lwp, RT_NULL, 0, aux);
+#ifdef ARCH_ARM_MMU
     if (result == 1)
     {
         /* dynmaic */
         lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
         result = load_ldso(lwp, filename, argv, envp);
     }
+#endif /* ARCH_ARM_MMU */
     if (result == RT_EOK)
     {
         rt_thread_t thread = RT_NULL;
+        rt_uint32_t priority = 25, tick = 200;
 
         lwp_copy_stdio_fdt(lwp);
 
         /* obtain the base name */
         thread_name = strrchr(filename, '/');
         thread_name = thread_name ? thread_name + 1 : filename;
-
+#ifndef ARCH_ARM_MMU
+        struct lwp_app_head *app_head = lwp->text_entry;
+        if (app_head->priority)
+        {
+            priority = app_head->priority;
+        }
+        if (app_head->tick)
+        {
+            tick = app_head->tick;
+        }
+#endif /* not defined ARCH_ARM_MMU */
         thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
-                LWP_TASK_STACK_SIZE, 25, 200);
+                LWP_TASK_STACK_SIZE, priority, tick);
         if (thread != RT_NULL)
         {
             struct rt_lwp *self_lwp;
@@ -1163,6 +1283,18 @@ pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
                 lwp->parent = self_lwp;
             }
             thread->lwp = lwp;
+#ifndef ARCH_ARM_MMU
+            struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
+            thread->user_stack = app_head->stack_offset ?
+                              (void *)(app_head->stack_offset -
+                                       app_head->data_offset +
+                                       lwp->data_entry) : RT_NULL;
+            thread->user_stack_size = app_head->stack_size;
+            /* init data area */
+            rt_memset(lwp->data_entry, 0, lwp->data_size);
+            /* init user stack */
+            rt_memset(thread->user_stack, '#', thread->user_stack_size);
+#endif /* not defined ARCH_ARM_MMU */
             rt_list_insert_after(&lwp->t_grp, &thread->sibling);
 
 #ifdef RT_USING_GDBSERVER
@@ -1201,6 +1333,7 @@ pid_t exec(char *filename, int argc, char **argv)
 }
 #endif
 
+#ifdef ARCH_ARM_MMU
 void lwp_user_setting_save(rt_thread_t thread)
 {
     if (thread)
@@ -1240,3 +1373,4 @@ void lwp_user_setting_restore(rt_thread_t thread)
     }
 #endif
 }
+#endif /* ARCH_ARM_MMU */

+ 17 - 8
components/lwp/lwp.h

@@ -27,16 +27,19 @@
 #include "lwp_ipc.h"
 #include "lwp_signal.h"
 #include "lwp_syscall.h"
+#include "lwp_avl.h"
+#include "lwp_arch.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "lwp_shm.h"
 
 #include "mmu.h"
 #include "page.h"
-#include "lwp_arch.h"
 #endif
 
+#ifdef RT_USING_MUSL
 #include <locale.h>
+#endif
 
 #ifdef __cplusplus
 extern "C" {
@@ -49,11 +52,13 @@ extern "C" {
 
 #define LWP_ARG_MAX         8
 
+#ifdef RT_USING_MUSL
 typedef int32_t pid_t;
+#endif /* RT_USING_MUSL */
 
 struct rt_lwp
 {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     rt_mmu_info mmu_info;
     struct lwp_avl_struct *map_area;
     size_t end_heap;
@@ -74,12 +79,10 @@ struct rt_lwp
     uint32_t text_size;
     void *data_entry;
     uint32_t data_size;
-#ifndef RT_USING_USERSPACE
-    size_t load_off;
-#endif
 
     int ref;
     void *args;
+    uint32_t args_length;
     pid_t pid;
     rt_list_t t_grp;
     struct dfs_fdtable fdt;
@@ -92,6 +95,12 @@ struct rt_lwp
     rt_uint32_t signal_in_process;
     lwp_sighandler_t signal_handler[_LWP_NSIG];
 
+#ifndef ARCH_ARM_MMU
+#ifdef ARCH_ARM_MPU
+    struct rt_mpu_info mpu_info;
+#endif
+#endif /* ARCH_ARM_MMU */
+
     struct lwp_avl_struct *object_root;
     struct rt_mutex object_mutex;
     struct rt_user_context user_ctx;
@@ -132,13 +141,13 @@ void lwp_tid_set_thread(int tid, rt_thread_t thread);
 
 size_t lwp_user_strlen(const char *s, int *err);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void lwp_mmu_switch(struct rt_thread *thread);
 #endif
 void lwp_user_setting_save(rt_thread_t thread);
 void lwp_user_setting_restore(rt_thread_t thread);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 struct __pthread {
     /* Part 1 -- these fields may be external or
      *      * internal (accessed via asm) ABI. Do not change. */

+ 19 - 0
components/lwp/lwp_avl.c

@@ -207,3 +207,22 @@ int lwp_avl_traversal(struct lwp_avl_struct *ptree, int (*fun)(struct lwp_avl_st
     }
     return ret;
 }
+
+#ifndef ARCH_ARM_MMU
+struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree)
+{
+    if (ptree == AVL_EMPTY)
+    {
+        return (struct lwp_avl_struct *)0;
+    }
+    while (1)
+    {
+        if (!ptree->avl_left)
+        {
+            break;
+        }
+        ptree = ptree->avl_left;
+    }
+    return ptree;
+}
+#endif /* ARCH_ARM_MMU */

+ 3 - 0
components/lwp/lwp_avl.h

@@ -37,6 +37,9 @@ void lwp_avl_remove(struct lwp_avl_struct * node_to_delete, struct lwp_avl_struc
 void lwp_avl_insert (struct lwp_avl_struct * new_node, struct lwp_avl_struct ** ptree);
 struct lwp_avl_struct* lwp_avl_find(avl_key_t key, struct lwp_avl_struct* ptree);
 int lwp_avl_traversal(struct lwp_avl_struct* ptree, int (*fun)(struct lwp_avl_struct*, void *), void *arg);
+#ifndef ARCH_ARM_MMU
+struct lwp_avl_struct* lwp_map_find_first(struct lwp_avl_struct* ptree);
+#endif /* ARCH_ARM_MMU */
 
 #ifdef __cplusplus
 }

+ 1 - 1
components/lwp/lwp_futex.c

@@ -10,7 +10,7 @@
 
 #include <rtthread.h>
 #include <lwp.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp_user_mm.h>
 #endif
 #include "clock_time.h"

+ 1 - 1
components/lwp/lwp_mm_area.c

@@ -9,7 +9,7 @@
  */
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp_mm_area.h>
 
 int lwp_map_area_insert(struct lwp_avl_struct **avl_tree, size_t addr, size_t size, int ma_type)

+ 1 - 1
components/lwp/lwp_mm_area.h

@@ -15,7 +15,7 @@
 
 #include <lwp_avl.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #ifdef __cplusplus
 extern "C" {

+ 26 - 12
components/lwp/lwp_pid.c

@@ -17,7 +17,7 @@
 #include "lwp_pid.h"
 #include "lwp_console.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "lwp_user_mm.h"
 
 #ifdef RT_USING_GDBSERVER
@@ -360,9 +360,12 @@ void lwp_free(struct rt_lwp* lwp)
     lwp->finish = 1;
     if (lwp->args != RT_NULL)
     {
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
+        lwp->args_length = RT_NULL;
+#ifndef ARCH_ARM_MPU
         rt_free(lwp->args);
-#endif
+#endif /* not defined ARCH_ARM_MPU */
+#endif /* ARCH_ARM_MMU */
         lwp->args = RT_NULL;
     }
 
@@ -380,7 +383,16 @@ void lwp_free(struct rt_lwp* lwp)
     /* free data section */
     if (lwp->data_entry != RT_NULL)
     {
+#ifdef ARCH_ARM_MMU
         rt_free_align(lwp->data_entry);
+#else
+#ifdef ARCH_ARM_MPU
+        rt_lwp_umap_user(lwp, lwp->text_entry, 0);
+        rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
+#else
+        rt_free_align(lwp->data_entry);
+#endif /* ARCH_ARM_MPU */
+#endif /* ARCH_ARM_MMU */
         lwp->data_entry = RT_NULL;
     }
 
@@ -390,18 +402,14 @@ void lwp_free(struct rt_lwp* lwp)
         if (lwp->text_entry)
         {
             LOG_D("lwp text free: %p", lwp->text_entry);
-#ifndef RT_USING_USERSPACE
-#ifdef RT_USING_CACHE
-            rt_free_align((void*)lwp->load_off);
-#else
-            rt_free((void*)lwp->load_off);
-#endif
-#endif
+#ifndef ARCH_ARM_MMU
+            rt_free((void*)lwp->text_entry);
+#endif /* not defined ARCH_ARM_MMU */
             lwp->text_entry = RT_NULL;
         }
     }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     lwp_unmap_user_space(lwp);
 #endif
 
@@ -493,7 +501,13 @@ void lwp_ref_dec(struct rt_lwp *lwp)
                 memset(&msg, 0, sizeof msg);
                 rt_raw_channel_send(gdb_get_server_channel(), &msg);
             }
-#endif
+#endif /* RT_USING_GDBSERVER */
+
+#ifndef ARCH_ARM_MMU
+#ifdef RT_LWP_USING_SHM
+            lwp_shm_lwp_free(lwp);
+#endif /* RT_LWP_USING_SHM */
+#endif /* not defined ARCH_ARM_MMU */
             lwp_free(lwp);
         }
     }

+ 1 - 1
components/lwp/lwp_pmutex.c

@@ -10,7 +10,7 @@
 
 #include <rtthread.h>
 #include <lwp.h>
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp_user_mm.h>
 #endif
 #include "clock_time.h"

+ 1 - 1
components/lwp/lwp_shm.c

@@ -10,7 +10,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp.h>
 #include <lwp_shm.h>
 

+ 20 - 0
components/lwp/lwp_signal.c

@@ -286,6 +286,13 @@ lwp_sighandler_t lwp_sighandler_get(int sig)
     }
     level = rt_hw_interrupt_disable();
     thread = rt_thread_self();
+#ifndef ARCH_ARM_MMU
+    if (thread->signal_in_process)
+    {
+        func = thread->signal_handler[sig - 1];
+        goto out;
+    }
+#endif
     lwp = (struct rt_lwp*)thread->lwp;
 
     func = lwp->signal_handler[sig - 1];
@@ -324,6 +331,19 @@ void lwp_sighandler_set(int sig, lwp_sighandler_t func)
     rt_hw_interrupt_enable(level);
 }
 
+#ifndef ARCH_ARM_MMU
+void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
+{
+    rt_base_t level;
+
+    if (sig == 0 || sig > _LWP_NSIG)
+        return;
+    level = rt_hw_interrupt_disable();
+    rt_thread_self()->signal_handler[sig - 1] = func;
+    rt_hw_interrupt_enable(level);
+}
+#endif
+
 int lwp_sigaction(int sig, const struct lwp_sigaction *act,
              struct lwp_sigaction *oact, size_t sigsetsize)
 {

+ 3 - 0
components/lwp/lwp_signal.h

@@ -22,6 +22,9 @@ int lwp_signal_backup(void *user_sp, void *user_pc, void* user_flag);
 struct rt_user_context *lwp_signal_restore(void);
 lwp_sighandler_t lwp_sighandler_get(int sig);
 void lwp_sighandler_set(int sig, lwp_sighandler_t func);
+#ifndef ARCH_ARM_MMU
+void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func);
+#endif
 int lwp_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset);
 int lwp_sigaction(int sig, const struct lwp_sigaction *act, struct lwp_sigaction * oact, size_t sigsetsize);
 int lwp_thread_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset);

Разница между файлами не показана из-за своего большого размера
+ 316 - 88
components/lwp/lwp_syscall.c


+ 1 - 1
components/lwp/lwp_tid.c

@@ -13,7 +13,7 @@
 
 #include "lwp.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "lwp_user_mm.h"
 
 #ifdef RT_USING_GDBSERVER

+ 1 - 1
components/lwp/lwp_user_mm.c

@@ -15,7 +15,7 @@
 #include <rtthread.h>
 #include <rthw.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <mmu.h>
 #include <page.h>

+ 1 - 1
components/lwp/lwp_user_mm.h

@@ -14,7 +14,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp.h>
 #include <lwp_mm_area.h>
 

+ 4 - 1
include/rtdef.h

@@ -737,11 +737,14 @@ struct rt_thread
     lwp_sigset_t signal_mask;
     int signal_mask_bak;
     rt_uint32_t signal_in_process;
+#ifndef ARCH_ARM_MMU
+    lwp_sighandler_t signal_handler[32];
+#endif
     struct rt_user_context user_ctx;
 
     struct rt_wakeup wakeup;                            /**< wakeup data */
     int exit_request;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #ifdef RT_USING_GDBSERVER
     int step_exec;
     int debug_attach_req;

+ 2 - 7
libcpu/Kconfig

@@ -55,20 +55,15 @@ config ARCH_ARM_MMU
     select RT_USING_CACHE
     depends on ARCH_ARM
 
-config RT_USING_USERSPACE
-    bool "Isolated user space"
-    default n
-    depends on ARCH_ARM_MMU
-
 config KERNEL_VADDR_START
     hex "The virtural address of kernel start"
     default 0xc0000000
-    depends on RT_USING_USERSPACE
+    depends on ARCH_ARM_MMU
 
 config PV_OFFSET
     hex "The offset of kernel physical address and virtural address"
     default 0
-    depends on RT_USING_USERSPACE
+    depends on ARCH_ARM_MMU
 
 config RT_IOREMAP_LATE
     bool "Support to create IO mapping in the kernel address space after system initlalization."

+ 1 - 1
libcpu/aarch64/common/interrupt.c

@@ -94,7 +94,7 @@ void rt_hw_interrupt_init(void)
     rt_memset(isr_table, 0x00, sizeof(isr_table));
 
     /* initialize ARM GIC */
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     gic_dist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_DEVICE);
     gic_cpu_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_DEVICE);
 #else

+ 6 - 6
libcpu/aarch64/common/mmu.c

@@ -15,7 +15,7 @@
 #include "cp15.h"
 #include "mmu.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "page.h"
 #endif
 
@@ -548,7 +548,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va;
@@ -627,7 +627,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -698,7 +698,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -794,7 +794,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -887,7 +887,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off)
 {
     int ret;

+ 1 - 1
libcpu/aarch64/common/mmu.h

@@ -123,7 +123,7 @@ void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
 int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 1 - 1
libcpu/aarch64/common/page.c

@@ -12,7 +12,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <page.h>
 #include <mmu.h>

+ 1 - 1
libcpu/aarch64/common/page.h

@@ -11,7 +11,7 @@
 #ifndef  __PAGE_H__
 #define  __PAGE_H__
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 typedef struct tag_region
 {

+ 2 - 2
libcpu/arm/cortex-a/backtrace.c

@@ -17,7 +17,7 @@
 #define DBG_LVL    DBG_INFO
 #include <rtdbg.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp.h>
 #include <lwp_user_mm.h>
 #include <lwp_arch.h>
@@ -522,7 +522,7 @@ void rt_unwind(struct rt_hw_exp_stack *regs, unsigned int pc_adj)
     e_regs.ARM_sp = regs->sp;
     e_regs.ARM_lr = regs->lr;
     e_regs.ARM_pc = regs->pc - pc_adj;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     if (!lwp_user_accessable((void *)e_regs.ARM_pc, sizeof (void *)))
     {
         e_regs.ARM_pc = regs->lr - sizeof(void *);

+ 8 - 8
libcpu/arm/cortex-a/context_gcc.S

@@ -45,12 +45,12 @@ rt_hw_context_switch_to:
 #ifdef RT_USING_SMP
     mov     r0, r1
     bl      rt_cpus_lock_status_restore
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     bl      rt_thread_self
     bl      lwp_user_setting_restore
 #endif
 #else
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     bl      rt_thread_self
     mov     r4, r0
     bl      lwp_mmu_switch
@@ -108,12 +108,12 @@ rt_hw_context_switch:
 #ifdef RT_USING_SMP
     mov     r0, r2
     bl      rt_cpus_lock_status_restore
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     bl      rt_thread_self
     bl      lwp_user_setting_restore
 #endif
 #else
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     bl      rt_thread_self
     mov     r4, r0
     bl      lwp_mmu_switch
@@ -151,7 +151,7 @@ rt_hw_context_switch_interrupt:
      */
 #ifdef RT_USING_LWP
     push {r0 - r3, lr}
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     bl rt_thread_self
     bl lwp_user_setting_save
 #endif
@@ -161,11 +161,11 @@ rt_hw_context_switch_interrupt:
 
     ldr     sp, [r2]
     mov     r0, r3
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     mov     r4, r0
 #endif
     bl      rt_cpus_lock_status_restore
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     mov     r0, r4
     bl      lwp_user_setting_restore
 #endif
@@ -187,7 +187,7 @@ rt_hw_context_switch_interrupt:
     str r0, [r3]
     mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
     str r3, [ip]
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     push {r1, lr}
     mov r0, r2
     bl lwp_user_setting_save

+ 1 - 1
libcpu/arm/cortex-a/interrupt.c

@@ -94,7 +94,7 @@ void rt_hw_interrupt_init(void)
     rt_memset(isr_table, 0x00, sizeof(isr_table));
 
     /* initialize ARM GIC */
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     gic_dist_base = (uint32_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_RW);
     gic_cpu_base = (uint32_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_RW);
 #else

+ 15 - 15
libcpu/arm/cortex-a/mmu.c

@@ -15,7 +15,7 @@
 #include "cp15.h"
 #include "mmu.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "page.h"
 #endif
 
@@ -248,7 +248,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
     size_t l1_off;
     size_t *mmu_l1, *mmu_l2;
     size_t sections;
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
     size_t *ref_cnt;
 #endif
 
@@ -276,7 +276,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
 
         RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
         mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -297,7 +297,7 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
             return -1;
         }
 
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
         *ref_cnt = 1;
 #endif
@@ -371,7 +371,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
@@ -424,7 +424,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
     size_t *ref_cnt;
 #endif
 
@@ -459,7 +459,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
             /* cache maintain */
             rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             if (rt_pages_free(mmu_l2, 0))
             {
                 *mmu_l1 = 0;
@@ -488,7 +488,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
     size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
     size_t *ref_cnt;
 #endif
 
@@ -506,13 +506,13 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
         if (*mmu_l1 & ARCH_MMU_USED_MASK)
         {
             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             rt_page_ref_inc(mmu_l2, 0);
 #endif
         }
         else
         {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
             mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -535,7 +535,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
             }
         }
 
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE / ARCH_PAGE_SIZE);
         (*ref_cnt)++;
 #endif
@@ -555,7 +555,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     asm volatile ("mcr p15, 0, r0, c8, c7, 0\ndsb\nisb" ::: "memory");
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -626,7 +626,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -753,7 +753,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -850,7 +850,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
     unsigned int va;
 

+ 2 - 2
libcpu/arm/cortex-a/mmu.h

@@ -19,7 +19,7 @@
 #define SHAREDEVICE    (1<<2)  /* shared device */
 #define STRONGORDER    (0<<2)  /* strong ordered */
 #define XN             (1<<4)  /* eXecute Never */
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #define AP_RW          (1<<10) /* supervisor=RW, user=No */
 #define AP_RO          ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
 #else
@@ -98,7 +98,7 @@ typedef struct
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
 int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 1 - 1
libcpu/arm/cortex-a/page.c

@@ -12,7 +12,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <page.h>
 #include <mmu.h>

+ 1 - 1
libcpu/arm/cortex-a/page.h

@@ -11,7 +11,7 @@
 #ifndef  __PAGE_H__
 #define  __PAGE_H__
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 typedef struct tag_region
 {

+ 9 - 9
libcpu/arm/cortex-a/start_gcc.S

@@ -43,7 +43,7 @@ stack_start:
 .endr
 stack_top:
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 .data
 .align 14
 init_mtbl:
@@ -122,7 +122,7 @@ continue:
     dsb
     isb
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     ldr r5, =PV_OFFSET
 
     mov r7, #0x100000
@@ -194,7 +194,7 @@ bss_loop:
     ldr r1, [r1]
     bl rt_hw_init_mmu_table
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     ldr r0, =MMUTable     /* vaddr    */
     add r0, r5            /* to paddr */
     bl  switch_mmu
@@ -252,7 +252,7 @@ stack_setup:
     msr     cpsr_c, #Mode_SVC|I_Bit|F_Bit
     bx      lr
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 .align 2
 .global enable_mmu
 enable_mmu:
@@ -476,7 +476,7 @@ rt_hw_context_switch_interrupt_do:
     ldr     sp,  [r6]       /* get new task's stack pointer */
 
     bl      rt_thread_self
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     mov     r4, r0
     bl      lwp_mmu_switch
     mov     r0, r4
@@ -570,7 +570,7 @@ vector_undef:
     .globl  vector_pabt
 vector_pabt:
     push_svc_reg
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     /* cp Mode_ABT stack to SVC */
     sub     sp, sp, #17 * 4     /* Sizeof(struct rt_hw_exp_stack)  */
     mov     lr, r0
@@ -598,7 +598,7 @@ vector_pabt:
     .globl  vector_dabt
 vector_dabt:
     push_svc_reg
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     /* cp Mode_ABT stack to SVC */
     sub     sp, sp, #17 * 4    /* Sizeof(struct rt_hw_exp_stack)  */
     mov     lr, r0
@@ -637,7 +637,7 @@ rt_clz:
 
 .global rt_secondary_cpu_entry
 rt_secondary_cpu_entry:
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     ldr     r5, =PV_OFFSET
 
     ldr     lr, =after_enable_mmu2
@@ -681,7 +681,7 @@ after_enable_mmu2:
     ldr sp, =abt_stack_2_limit
 
     /* initialize the mmu table and enable mmu */
-#ifndef RT_USING_USERSPACE
+#ifndef ARCH_ARM_MMU
     bl rt_hw_mmu_init
 #endif
 

+ 1 - 1
libcpu/arm/cortex-a/trap.c

@@ -163,7 +163,7 @@ void rt_hw_show_register(struct rt_hw_exp_stack *regs)
     rt_kprintf("fp :0x%08x ip :0x%08x\n", regs->fp, regs->ip);
     rt_kprintf("sp :0x%08x lr :0x%08x pc :0x%08x\n", regs->sp, regs->lr, regs->pc);
     rt_kprintf("cpsr:0x%08x\n", regs->cpsr);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     {
         uint32_t v;
         asm volatile ("MRC p15, 0, %0, c5, c0, 0":"=r"(v));

+ 1 - 1
libcpu/arm/cortex-a/vector_gcc.S

@@ -15,7 +15,7 @@
 
 .globl system_vectors
 system_vectors:
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     b _reset
 #else
     ldr pc, _vector_reset

+ 8 - 8
libcpu/mips/gs264/mmu.c

@@ -318,7 +318,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
 {
     size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
@@ -397,7 +397,7 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
             (*ref_cnt)--;
             if (!*ref_cnt)
             {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
                 rt_pages_free(mmu_l2, 0);
 #else
                 rt_free_align(mmu_l2);
@@ -437,7 +437,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
         }
         else
         {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
             mmu_l2 = (size_t*)rt_pages_alloc(0);
 #else
             mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
@@ -479,7 +479,7 @@ static void rt_hw_cpu_tlb_invalidate(void)
     mmu_clear_itlb();
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     size_t pa_s, pa_e;
@@ -550,7 +550,7 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
 }
 #endif
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
 {
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
@@ -694,7 +694,7 @@ void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
 {   
     void *v_addr = 0;
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
     extern rt_mmu_info mmu_info;
     v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
     #else
@@ -704,7 +704,7 @@ void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
     return v_addr;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
@@ -801,7 +801,7 @@ void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
     return ret;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off) {
     unsigned int va;
 

+ 2 - 2
libcpu/mips/gs264/mmu.h

@@ -20,7 +20,7 @@
 #define SHAREDEVICE    (1<<2)                /* shared device */
 #define STRONGORDER    (0<<2)                /* strong ordered */
 #define XN             (1<<4)                /* execute Never */
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #define AP_RW          (1<<10)               /* supervisor=RW, user=No */
 #define AP_RO          ((1<<10) |(1 << 15))  /* supervisor=RW, user=No */
 #else
@@ -98,7 +98,7 @@ void *mmu_table_get();
 void switch_mmu(void *mmu_table);
 
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
 #else

+ 22 - 83
libcpu/risc-v/t-head/c906/cache.c

@@ -6,7 +6,6 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-01-29     lizhirui     first version
- * 2021-11-05     JasonHu      add c906 cache inst
  */
 
 #include <rthw.h>
@@ -14,99 +13,36 @@
 #include <board.h>
 #include <riscv.h>
 
-#define L1_CACHE_BYTES (64)
-
-static void dcache_wb_range(unsigned long start, unsigned long end)
-{
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.cva %0\n"::"r"(i):"memory"); */
-        /*
-         * compiler always use a5 = i.
-         * a6 not used, so we use a6 here.
-         */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0257800b");               /* dcache.cva a6 */
-    }
-    asm volatile(".long 0x01b0000b");   /* sync.is */
-}
-
-static void dcache_inv_range(unsigned long start, unsigned long end)
-{
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.iva %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0268000b");               /* dcache.iva a6 */
-    }
-    asm volatile(".long 0x01b0000b");
-}
-
-static void dcache_wbinv_range(unsigned long start, unsigned long end)
+rt_inline rt_uint32_t rt_cpu_icache_line_size()
 {
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("dcache.civa %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0278000b");               /* dcache.civa a6 */
-    }
-    asm volatile(".long 0x01b0000b");
-}
-
-static void icache_inv_range(unsigned long start, unsigned long end)
-{
-    unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
-    for (; i < end; i += L1_CACHE_BYTES)
-    {
-        /* asm volatile("icache.iva %0\n"::"r"(i):"memory"); */
-        asm volatile("mv a6, %0\n"::"r"(i):"memory");   /* a6 = a5(i) */
-        asm volatile(".long 0x0308000b");               /* icache.iva a6 */
-    }
-    asm volatile(".long 0x01b0000b");
-}
-
-rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
-{
-    return L1_CACHE_BYTES;
+    return 0;
 }
 
-rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
+rt_inline rt_uint32_t rt_cpu_dcache_line_size()
 {
-    return L1_CACHE_BYTES;
+    return 0;
 }
 
 void rt_hw_cpu_icache_invalidate(void *addr,int size)
 {
-    icache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    asm volatile("fence");
 }
 
 void rt_hw_cpu_dcache_invalidate(void *addr,int size)
 {
-    dcache_inv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    asm volatile("fence");
 }
 
 void rt_hw_cpu_dcache_clean(void *addr,int size)
 {
-    dcache_wb_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
-}
-
-void rt_hw_cpu_dcache_clean_flush(void *addr,int size)
-{
-    dcache_wbinv_range((unsigned long)addr, (unsigned long)((unsigned char *)addr + size));
+    asm volatile("fence");
 }
 
 void rt_hw_cpu_icache_ops(int ops,void *addr,int size)
 {
     if(ops == RT_HW_CACHE_INVALIDATE)
     {
-        rt_hw_cpu_icache_invalidate(addr, size);
+        rt_hw_cpu_icache_invalidate(addr,size);
     }
 }
 
@@ -114,30 +50,33 @@ void rt_hw_cpu_dcache_ops(int ops,void *addr,int size)
 {
     if(ops == RT_HW_CACHE_FLUSH)
     {
-        rt_hw_cpu_dcache_clean(addr, size);
+        rt_hw_cpu_dcache_clean(addr,size);
     }
     else
     {
-        rt_hw_cpu_dcache_invalidate(addr, size);
+        rt_hw_cpu_dcache_invalidate(addr,size);
     }
 }
 
-void rt_hw_cpu_dcache_clean_all(void)
+void rt_hw_cpu_dcache_flush_all()
+{
+    asm volatile("fence");
+    //asm volatile("dcache.call");
+}
+
+void rt_hw_cpu_icache_invalidate_all()
 {
-    /* asm volatile("dcache.ciall\n":::"memory"); */
-    asm volatile(".long 0x0030000b\n":::"memory");
+    asm volatile("fence");
 }
 
-void rt_hw_cpu_dcache_invalidate_all(void)
+rt_base_t rt_hw_cpu_icache_status()
 {
-    /* asm volatile("dcache.iall\n":::"memory"); */
-    asm volatile(".long 0x0020000b\n":::"memory");
+    return 0;
 }
 
-void rt_hw_cpu_icache_invalidate_all(void)
+rt_base_t rt_hw_cpu_dcache_status()
 {
-    /* asm volatile("icache.iall\n":::"memory"); */
-    asm volatile(".long 0x0100000b\n":::"memory");
+    return 0;
 }
 
 int sys_cacheflush(void *addr, int size, int cache)

+ 2 - 2
libcpu/risc-v/t-head/c906/context_gcc.S

@@ -21,7 +21,7 @@ rt_hw_context_switch_to:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal lwp_mmu_switch
     #endif
@@ -54,7 +54,7 @@ rt_hw_context_switch:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal lwp_mmu_switch
     #endif

+ 2 - 2
libcpu/risc-v/t-head/c906/interrupt_gcc.S

@@ -92,7 +92,7 @@ copy_context_loop_interrupt:
     LOAD  s1, 0(s0)
     LOAD  sp, 0(s1)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal rt_thread_sp_to_thread
         jal lwp_mmu_switch
@@ -146,7 +146,7 @@ copy_context_loop:
 .global syscall_exit
 syscall_exit:
 
-    #if defined(RT_USING_USERSPACE) && defined(RT_USING_SIGNALS)
+    #if defined(ARCH_ARM_MMU) && defined(RT_USING_SIGNALS)
         LOAD s0, 2 * REGBYTES(sp)
         andi s0, s0, 0x100
         bnez s0, dont_ret_to_user

+ 4 - 2
libcpu/risc-v/t-head/c906/mmu.c

@@ -14,13 +14,15 @@
 #include "page.h"
 #include <stdlib.h>
 #include <string.h>
-#include <cache.h>
 
 #include "riscv.h"
 #include "riscv_mmu.h"
 #include "mmu.h"
 
 void *current_mmu_table = RT_NULL;
+void rt_hw_cpu_icache_invalidate_all();
+void rt_hw_cpu_dcache_flush_all();
+void rt_hw_cpu_dcache_clean(void *addr,rt_size_t size);
 
 static void rt_hw_cpu_tlb_invalidate()
 {
@@ -39,7 +41,7 @@ void switch_mmu(void *mmu_table)
     current_mmu_table = mmu_table;
     RT_ASSERT(__CHECKALIGN(mmu_table,PAGE_OFFSET_BIT));
     mmu_set_pagetable((rt_ubase_t)mmu_table);
-    rt_hw_cpu_dcache_clean_all();
+    rt_hw_cpu_dcache_flush_all();
     rt_hw_cpu_icache_invalidate_all();
 }
 

+ 3 - 3
libcpu/risc-v/t-head/c906/trap.c

@@ -26,7 +26,7 @@
 #include "rt_interrupt.h"
 #include "plic.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     #include "riscv_mmu.h"
     #include "mmu.h"
     #include "page.h"
@@ -144,7 +144,7 @@ void dump_regs(struct rt_hw_stack_frame *regs)
     rt_size_t satp_v = read_csr(satp);
     rt_kprintf("satp = 0x%p\n",satp_v);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     rt_kprintf("\tCurrent Page Table(Physical) = 0x%p\n",__MASKVALUE(satp_v,__MASK(44)) << PAGE_OFFSET_BIT);
     rt_kprintf("\tCurrent ASID = 0x%p\n",__MASKVALUE(satp_v >> 44,__MASK(16)) << PAGE_OFFSET_BIT);
 #endif
@@ -273,7 +273,7 @@ void handle_trap(rt_size_t scause,rt_size_t stval,rt_size_t sepc,struct rt_hw_st
     }
     else
     {
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
         /* page fault */
         if (id == EP_LOAD_PAGE_FAULT ||
             id == EP_STORE_PAGE_FAULT)

+ 2 - 2
libcpu/risc-v/virt64/context_gcc.S

@@ -21,7 +21,7 @@ rt_hw_context_switch_to:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal lwp_mmu_switch
     #endif
@@ -54,7 +54,7 @@ rt_hw_context_switch:
     la s0, rt_current_thread
     LOAD s1, (s0)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal lwp_mmu_switch
     #endif

+ 2 - 2
libcpu/risc-v/virt64/interrupt_gcc.S

@@ -92,7 +92,7 @@ copy_context_loop_interrupt:
     LOAD  s1, 0(s0)
     LOAD  sp, 0(s1)
 
-    #ifdef RT_USING_USERSPACE
+    #ifdef ARCH_ARM_MMU
         mv a0, s1
         jal rt_thread_sp_to_thread
         jal lwp_mmu_switch
@@ -146,7 +146,7 @@ copy_context_loop:
 .global syscall_exit
 syscall_exit:
 
-    #if defined(RT_USING_USERSPACE) && defined(RT_USING_SIGNALS)
+    #if defined(ARCH_ARM_MMU) && defined(RT_USING_SIGNALS)
         LOAD s0, 2 * REGBYTES(sp)
         andi s0, s0, 0x100
         bnez s0, error

+ 4 - 4
libcpu/x86/i386/cpuport.c

@@ -82,7 +82,7 @@ void rt_hw_context_switch_to(rt_ubase_t to)
 {
     rt_thread_t to_thread = rt_thread_sp_to_thread((void *)to);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     /**
      * update kernel esp0 as to thread's kernel stack, to make sure process can't
      * get the correct kernel stack from tss esp0 when interrupt occur in user mode.
@@ -90,7 +90,7 @@ void rt_hw_context_switch_to(rt_ubase_t to)
     rt_ubase_t stacktop = (rt_ubase_t)(to_thread->stack_addr + to_thread->stack_size);
     rt_hw_tss_set_kstacktop(stacktop);
     lwp_mmu_switch(to_thread);  /* switch mmu before switch context */
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */
     rt_hw_context_switch_to_real(to);
 }
 
@@ -103,7 +103,7 @@ void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
     lwp_user_setting_save(from_thread);
 #endif /* RT_USING_LWP */
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     /**
      * update kernel esp0 as to thread's kernel stack, to make sure process can't
      * get the correct kernel stack from tss esp0 when interrupt occur in user mode.
@@ -111,7 +111,7 @@ void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
     rt_ubase_t stacktop = (rt_ubase_t)(to_thread->stack_addr + to_thread->stack_size);
     rt_hw_tss_set_kstacktop(stacktop);
     lwp_mmu_switch(to_thread);  /* switch mmu before switch context */
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */
 
     rt_hw_context_switch_real(from, to);
 

+ 2 - 2
libcpu/x86/i386/gate.c

@@ -146,10 +146,10 @@ void rt_hw_gate_init(void)
     gate_set(IDT_OFF2PTR(idt, IRQ_INTR_BASE+14), rt_hw_intr_entry0x2e, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL0);
     gate_set(IDT_OFF2PTR(idt, IRQ_INTR_BASE+15), rt_hw_intr_entry0x2f, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL0);
     /* 系统调用处理中断 */
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     extern void hw_syscall_entry(void);
     gate_set(IDT_OFF2PTR(idt, SYSCALL_INTR_BASE), hw_syscall_entry, KERNEL_CODE_SEL, DA_386_INTR_GATE, DA_GATE_DPL3);
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */
 
     extern void load_new_idt(rt_ubase_t size, rt_ubase_t idtr);
     load_new_idt(IDT_LIMIT, IDT_VADDR);

+ 2 - 2
libcpu/x86/i386/interrupt_gcc.S

@@ -186,7 +186,7 @@ rt_hw_intr_thread_switch:
     movl $rt_hw_intr_exit, %eax
     jmp *%eax
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 .extern rt_hw_syscall_dispath
 
@@ -232,7 +232,7 @@ hw_syscall_entry:
 
 .global syscall_exit
 syscall_exit:
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */
 .global rt_hw_intr_exit
 rt_hw_intr_exit:
     addl $4, %esp               // skip intr no

+ 10 - 10
libcpu/x86/i386/mmu.c

@@ -18,15 +18,15 @@
 #include "cache.h"
 #include "i386.h"
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include "page.h"
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */
 
 // #define RT_DEBUG_MMU_X86
 
 static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
 void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
 #else
@@ -208,7 +208,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_si
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 //check whether the range of virtual address are free
 static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
 {
@@ -250,7 +250,7 @@ static int check_vaddr(rt_mmu_info *mmu_info,void *va,rt_size_t pages)
 
     return 0;
 }
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */
 
 //find a range of free virtual address specified by pages
 static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
@@ -294,7 +294,7 @@ static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
     return 0;
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr)
 {
     rt_size_t pa_s,pa_e;
@@ -372,9 +372,9 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
     }
     return 0;
 }
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t npages,rt_size_t attr)
 {
     rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
@@ -471,7 +471,7 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_s
     }
     return 0;
 }
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */
 
 /**
  * unmap page on v_addr, free page if unmapped, further more, if page table empty, need free it.
@@ -535,7 +535,7 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size)
     rt_hw_cpu_tlb_invalidate();
 }
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 /**
  * map vaddr in vtable with size and attr, this need a phy addr
  *

+ 2 - 2
libcpu/x86/i386/mmu.h

@@ -132,12 +132,12 @@ void switch_mmu(void *mmu_table);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info,void *v_address,rt_size_t size,rt_size_t *vtable,rt_size_t pv_off);
 void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info,rt_size_t vaddr_start,rt_size_t size);
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info,void *v_addr,void *p_addr,rt_size_t size,rt_size_t attr);
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size,rt_size_t attr);
 #else
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
-#endif  /* RT_USING_USERSPACE */
+#endif  /* ARCH_ARM_MMU */
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info,void *v_addr,rt_size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info,void *v_addr);

+ 2 - 2
libcpu/x86/i386/page.c

@@ -14,7 +14,7 @@
 #include <rthw.h>
 #include <board.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include "page.h"
 #include "mmu.h"
@@ -486,4 +486,4 @@ void rt_page_init(rt_region_t reg)
     rt_pages_alloc(0);
 }
 
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */

+ 2 - 2
libcpu/x86/i386/syscall_c.c

@@ -17,7 +17,7 @@
 //#define DBG_LEVEL DBG_INFO
 #include <rtdbg.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 
 #include <stdint.h>
 #include <mmu.h>
@@ -91,4 +91,4 @@ void rt_hw_syscall_dispath(struct rt_hw_stack_frame *frame)
     LOG_I("\033[36msyscall deal ok,ret = 0x%p\n\033[37m",frame->eax);
 }
 
-#endif /* RT_USING_USERSPACE */
+#endif /* ARCH_ARM_MMU */

+ 2 - 2
src/cpu.c

@@ -10,7 +10,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
 #include <lwp.h>
 #endif
 
@@ -200,7 +200,7 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
 {
     struct rt_cpu* pcpu = rt_cpu_self();
 
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     lwp_mmu_switch(thread);
 #endif
     pcpu->current_thread = thread;

+ 2 - 3
src/mem.c

@@ -632,17 +632,16 @@ void rt_memory_info(rt_uint32_t *total,
 
 #ifdef RT_USING_LWP
 #include <lwp.h>
-#else
+#endif
 #ifndef ARCH_PAGE_SIZE
 #define ARCH_PAGE_SIZE  0
-#endif
 
 #endif
 
 void list_mem(void)
 {
     size_t total_pages = 0, free_pages = 0;
-#ifdef RT_USING_USERSPACE
+#ifdef ARCH_ARM_MMU
     rt_page_get_info(&total_pages, &free_pages);
 #endif
 

+ 17 - 0
src/scheduler.c

@@ -33,6 +33,10 @@
 #include <rtthread.h>
 #include <rthw.h>
 
+#ifdef RT_USING_LWP
+#include <lwp.h>
+#endif /* RT_USING_LWP */
+
 rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
 rt_uint32_t rt_thread_ready_priority_group;
 #if RT_THREAD_PRIORITY_MAX > 32
@@ -82,6 +86,19 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread)
 {
     RT_ASSERT(thread != RT_NULL);
 
+#ifdef RT_USING_LWP
+#ifndef ARCH_ARM_MMU
+    struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
+
+    /* if stack pointer locate in user data section skip stack check. */
+    if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
+    (rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
+    {
+        return;
+    }
+#endif /* not defined ARCH_ARM_MMU */
+#endif /* RT_USING_LWP */
+
 #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
     if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
 #else

Некоторые файлы не были показаны из-за большого количества измененных файлов