Bläddra i källkod

[components/mm] add unmap page API (#7834)

Signed-off-by: Shell <smokewood@qq.com>
Shell 2 år sedan
förälder
incheckning
d862816a51

+ 1 - 1
components/dfs/dfs_v1/filesystems/nfs/rpc/types.h

@@ -60,7 +60,7 @@ typedef unsigned long u_long;
 typedef int bool_t;
 typedef int enum_t;
 
-#if !defined(RT_USING_NEWLIB) && !defined(RT_USING_MUSL)
+#if !defined(RT_USING_NEWLIB) && !defined(RT_USING_MUSLLIBC)
 typedef unsigned long dev_t;
 #endif
 

+ 100 - 0
components/lwp/libc_musl.h

@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-07-11     RT-Thread    first version
+ */
+
+#ifndef __LIBC_MUSL_H__
+#define __LIBC_MUSL_H__
+
+/* from internal/futex.h */
+
+#define FUTEX_WAIT        0
+#define FUTEX_WAKE        1
+#define FUTEX_FD        2
+#define FUTEX_REQUEUE        3
+#define FUTEX_CMP_REQUEUE    4
+#define FUTEX_WAKE_OP        5
+#define FUTEX_LOCK_PI        6
+#define FUTEX_UNLOCK_PI        7
+#define FUTEX_TRYLOCK_PI    8
+#define FUTEX_WAIT_BITSET    9
+
+#define FUTEX_PRIVATE 128
+
+#define FUTEX_CLOCK_REALTIME 256
+
+/* for pmutex op */
+#define PMUTEX_INIT    0
+#define PMUTEX_LOCK    1
+#define PMUTEX_UNLOCK  2
+#define PMUTEX_DESTROY 3
+
+/* for sys/mman.h */
+#define MAP_FAILED ((void *) -1)
+
+#define MAP_SHARED     0x01
+#define MAP_PRIVATE    0x02
+#define MAP_SHARED_VALIDATE 0x03
+#define MAP_TYPE       0x0f
+#define MAP_FIXED      0x10
+#define MAP_ANON       0x20
+#define MAP_ANONYMOUS  MAP_ANON
+#define MAP_NORESERVE  0x4000
+#define MAP_GROWSDOWN  0x0100
+#define MAP_DENYWRITE  0x0800
+#define MAP_EXECUTABLE 0x1000
+#define MAP_LOCKED     0x2000
+#define MAP_POPULATE   0x8000
+#define MAP_NONBLOCK   0x10000
+#define MAP_STACK      0x20000
+#define MAP_HUGETLB    0x40000
+#define MAP_SYNC       0x80000
+#define MAP_FIXED_NOREPLACE 0x100000
+#define MAP_FILE       0
+
+#define MAP_UNINITIALIZED 0x4000000 /** For anonymous mmap, memory could be
+                                     * uninitialized */
+
+#define MAP_HUGE_SHIFT 26
+#define MAP_HUGE_MASK  0x3f
+#define MAP_HUGE_16KB  (14 << 26)
+#define MAP_HUGE_64KB  (16 << 26)
+#define MAP_HUGE_512KB (19 << 26)
+#define MAP_HUGE_1MB   (20 << 26)
+#define MAP_HUGE_2MB   (21 << 26)
+#define MAP_HUGE_8MB   (23 << 26)
+#define MAP_HUGE_16MB  (24 << 26)
+#define MAP_HUGE_32MB  (25 << 26)
+#define MAP_HUGE_256MB (28 << 26)
+#define MAP_HUGE_512MB (29 << 26)
+#define MAP_HUGE_1GB   (30 << 26)
+#define MAP_HUGE_2GB   (31 << 26)
+#define MAP_HUGE_16GB  (34U << 26)
+
+#define PROT_NONE      0
+#define PROT_READ      1
+#define PROT_WRITE     2
+#define PROT_EXEC      4
+#define PROT_GROWSDOWN 0x01000000
+#define PROT_GROWSUP   0x02000000
+
+#define MS_ASYNC       1
+#define MS_INVALIDATE  2
+#define MS_SYNC        4
+
+#define MCL_CURRENT    1
+#define MCL_FUTURE     2
+#define MCL_ONFAULT    4
+
+#define POSIX_MADV_NORMAL     0
+#define POSIX_MADV_RANDOM     1
+#define POSIX_MADV_SEQUENTIAL 2
+#define POSIX_MADV_WILLNEED   3
+#define POSIX_MADV_DONTNEED   4
+
+#endif /* __LIBC_MUSL_H__ */

+ 1 - 1
components/lwp/lwp.c

@@ -1341,7 +1341,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
     return -RT_ERROR;
 }
 
-#ifdef RT_USING_MUSL
+#ifdef RT_USING_MUSLLIBC
 extern char **__environ;
 #else
 char **__environ = 0;

+ 10 - 6
components/lwp/lwp.h

@@ -23,6 +23,7 @@
 #include <rtthread.h>
 #include <dfs.h>
 
+#include "lwp_arch.h"
 #include "lwp_pid.h"
 #include "lwp_ipc.h"
 #include "lwp_signal.h"
@@ -30,22 +31,25 @@
 #include "lwp_avl.h"
 #include "mm_aspace.h"
 
+#ifdef RT_USING_MUSLLIBC
+#include "libc_musl.h"
+#endif /* RT_USING_MUSLLIBC */
+
 #ifdef ARCH_MM_MMU
 #include "lwp_shm.h"
-
 #include "mmu.h"
 #include "page.h"
 #else
 #include "lwp_mpu.h"
-#endif
-#include "lwp_arch.h"
+#endif /* ARCH_MM_MMU */
 
-#ifdef RT_USING_MUSL
+#ifdef RT_USING_MUSLLIBC
 #include <locale.h>
-#endif
+#endif /* RT_USING_MUSLLIBC */
+
 #ifdef  RT_USING_TTY
 struct tty_struct;
-#endif
+#endif /* RT_USING_TTY */
 
 #ifdef __cplusplus
 extern "C" {

+ 5 - 5
components/lwp/lwp_syscall.c

@@ -764,18 +764,18 @@ sysret_t sys_poll(struct pollfd *fds, nfds_t nfds, int timeout)
     kmem_put(kfds);
     return ret;
 #else
-#ifdef RT_USING_MUSL
+#ifdef RT_USING_MUSLLIBC
     for (i = 0; i < nfds; i++)
     {
         musl2dfs_events(&fds->events);
     }
-#endif /* RT_USING_MUSL */
+#endif /* RT_USING_MUSLLIBC */
     if (!lwp_user_accessable((void *)fds, nfds * sizeof *fds))
     {
         return -EFAULT;
     }
     ret = poll(fds, nfds, timeout);
-#ifdef RT_USING_MUSL
+#ifdef RT_USING_MUSLLIBC
     if (ret > 0)
     {
         for (i = 0; i < nfds; i++)
@@ -783,7 +783,7 @@ sysret_t sys_poll(struct pollfd *fds, nfds_t nfds, int timeout)
             dfs2musl_events(&fds->revents);
         }
     }
-#endif /* RT_USING_MUSL */
+#endif /* RT_USING_MUSLLIBC */
     return ret;
 #endif /* ARCH_MM_MMU */
 }
@@ -3986,7 +3986,7 @@ sysret_t sys_rmdir(const char *path)
 #endif
 }
 
-#ifdef RT_USING_MUSL
+#ifdef RT_USING_MUSLLIBC
 typedef uint64_t ino_t;
 #endif
 

+ 19 - 5
components/lwp/lwp_user_mm.c

@@ -31,6 +31,10 @@
 #include <mmu.h>
 #include <page.h>
 
+#ifdef RT_USING_MUSLLIBC
+#include "libc_musl.h"
+#endif
+
 #define DBG_TAG "LwP"
 #define DBG_LVL DBG_LOG
 #include <rtdbg.h>
@@ -506,8 +510,6 @@ rt_base_t lwp_brk(void *addr)
     return ret;
 }
 
-#define MAP_ANONYMOUS 0x20
-
 void *lwp_mmap2(void *addr, size_t length, int prot, int flags, int fd,
                 off_t pgoffset)
 {
@@ -622,11 +624,10 @@ size_t lwp_put_to_user(void *dst, void *src, size_t size)
     return lwp_data_put(lwp, dst, src, size);
 }
 
-int lwp_user_accessable(void *addr, size_t size)
+int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size)
 {
     void *addr_start = RT_NULL, *addr_end = RT_NULL, *next_page = RT_NULL;
     void *tmp_addr = RT_NULL;
-    struct rt_lwp *lwp = lwp_self();
 
     if (!lwp)
     {
@@ -669,7 +670,15 @@ int lwp_user_accessable(void *addr, size_t size)
         if (tmp_addr == ARCH_MAP_FAILED)
         {
             if ((rt_ubase_t)addr_start >= USER_STACK_VSTART && (rt_ubase_t)addr_start < USER_STACK_VEND)
-                tmp_addr = *(void **)addr_start;
+            {
+                struct rt_aspace_fault_msg msg = {
+                    .fault_op = MM_FAULT_OP_WRITE,
+                    .fault_type = MM_FAULT_TYPE_PAGE_FAULT,
+                    .fault_vaddr = addr_start,
+                };
+                if (!rt_aspace_fault_try_fix(lwp->aspace, &msg))
+                    return 0;
+            }
             else
                 return 0;
         }
@@ -680,6 +689,11 @@ int lwp_user_accessable(void *addr, size_t size)
     return 1;
 }
 
+int lwp_user_accessable(void *addr, size_t size)
+{
+    return lwp_user_accessible_ext(lwp_self(), addr, size);
+}
+
 /* src is in mmu_info space, dst is in current thread space */
 size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size)
 {

+ 1 - 0
components/lwp/lwp_user_mm.h

@@ -48,6 +48,7 @@ int lwp_munmap(void *addr);
 size_t lwp_get_from_user(void *dst, void *src, size_t size);
 size_t lwp_put_to_user(void *dst, void *src, size_t size);
 int lwp_user_accessable(void *addr, size_t size);
+int lwp_user_accessible_ext(struct rt_lwp *lwp, void *addr, size_t size);
 
 size_t lwp_data_get(struct rt_lwp *lwp, void *dst, void *src, size_t size);
 size_t lwp_data_put(struct rt_lwp *lwp, void *dst, void *src, size_t size);

+ 41 - 3
components/mm/mm_aspace.c

@@ -400,11 +400,11 @@ static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
            (((uintptr_t)start & mask) || (length & mask));
 }
 
+/** if the flag is currently supported */
 static inline int _not_support(rt_size_t flags)
 {
-    rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT |
-        MMF_STATIC_ALLOC | MMF_REQUEST_ALIGN);
-    return flags & ~(support_ops | _MMF_ALIGN_MASK);
+    rt_size_t support_ops = MMF_CREATE(((__MMF_INVALID - 1) << 1) - 1, 1);
+    return flags & ~(support_ops);
 }
 
 int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
@@ -855,6 +855,15 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
 
 #define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
 
+int rt_varea_unmap_page(rt_varea_t varea, void *vaddr)
+{
+    void *va_aligned = (void *)RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
+    return rt_varea_unmap_range(varea, va_aligned, ARCH_PAGE_SIZE);
+}
+
+/**
+ * @note Caller should take care of synchronization of its varea among all the map/unmap operation
+ */
 int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length)
 {
     int err;
@@ -883,6 +892,35 @@ int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t len
     return err;
 }
 
+/**
+ * @note Caller should take care of synchronization of its varea among all the map/unmap operation
+ */
+int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length)
+{
+    int err;
+    rt_base_t va_align;
+
+    if (!varea || !vaddr || !length)
+    {
+        LOG_W("%s(%p,%p,%lx): invalid input", __func__, varea, vaddr, length);
+        err = -RT_EINVAL;
+    }
+    else if (_not_in_range(vaddr, length, varea->start, varea->size))
+    {
+        LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
+            vaddr, length, varea->start, varea->size);
+        err = -RT_EINVAL;
+    }
+    else
+    {
+        va_align = RT_ALIGN_DOWN((rt_base_t)vaddr, ARCH_PAGE_SIZE);
+        rt_hw_mmu_unmap(varea->aspace, (void *)va_align, length);
+        rt_hw_tlb_invalidate_range(varea->aspace, (void *)va_align, length, ARCH_PAGE_SIZE);
+        err = RT_EOK;
+    }
+    return err;
+}
+
 int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
 {
     return -RT_ENOSYS;

+ 26 - 0
components/mm/mm_aspace.h

@@ -206,9 +206,22 @@ void rt_aspace_print_all(rt_aspace_t aspace);
  */
 int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
 
+/**
+ * @brief Unmap one page in varea
+ *
+ * @param varea target varea
+ * @param addr user address
+ * @param page the page frame to be mapped
+ * @return int
+ */
+int rt_varea_unmap_page(rt_varea_t varea, void *vaddr);
+
 /**
  * @brief Map a range of physical address to varea
  *
+ * @warning Caller should take care of synchronization of its varea among all
+ *          the map/unmap operation
+ *
  * @param varea target varea
  * @param vaddr user address
  * @param paddr physical address
@@ -217,6 +230,19 @@ int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
  */
 int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
 
+/**
+ * @brief Unmap a range of physical address in varea
+ *
+ * @warning Caller should take care of synchronization of its varea among all
+ *          the map/unmap operation
+ *
+ * @param varea target varea
+ * @param vaddr user address
+ * @param length map range
+ * @return int
+ */
+int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);
+
 /**
  * @brief Insert page to page manager of varea
  * The page will be freed by varea on uninstall automatically

+ 7 - 5
components/mm/mm_fault.c

@@ -89,18 +89,19 @@ static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
     return err;
 }
 
-int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
+int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
 {
-    struct rt_lwp *lwp = lwp_self();
     int err = UNRECOVERABLE;
     uintptr_t va = (uintptr_t)msg->fault_vaddr;
     va &= ~ARCH_PAGE_MASK;
     msg->fault_vaddr = (void *)va;
 
-    if (lwp)
+    if (aspace)
     {
-        rt_aspace_t aspace = lwp->aspace;
-        rt_varea_t varea = _aspace_bst_search(aspace, msg->fault_vaddr);
+        rt_varea_t varea;
+
+        RD_LOCK(aspace);
+        varea = _aspace_bst_search(aspace, msg->fault_vaddr);
         if (varea)
         {
             void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
@@ -120,6 +121,7 @@ int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
                 break;
             }
         }
+        RD_UNLOCK(aspace);
     }
 
     return err;

+ 3 - 2
components/mm/mm_fault.h

@@ -52,7 +52,8 @@ struct rt_aspace_fault_msg
     struct rt_mm_fault_res response;
 };
 
-/* MMU base page fault handler, return 1 is */
-int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg);
+struct rt_aspace;
+/* MMU base page fault handler, return 1 is fixable */
+int rt_aspace_fault_try_fix(struct rt_aspace *aspace, struct rt_aspace_fault_msg *msg);
 
 #endif /* __MM_FAULT_H__ */

+ 17 - 3
components/mm/mm_flag.h

@@ -47,22 +47,36 @@ enum mm_flag_cntl
      */
     MMF_PREFETCH = _DEF_FLAG(3),
 
+    /**
+     * @brief Allocate the mapping using "huge" pages
+     */
     MMF_HUGEPAGE = _DEF_FLAG(4),
 
+    /** internal reserved flags */
     MMF_TEXT = _DEF_FLAG(5),
 
+    /** internal reserved flags */
     MMF_STATIC_ALLOC = _DEF_FLAG(6),
 
+    /**
+     * @brief Shared mapping. Updates to the mapping are visible to other
+     * processes mapping the same region, and are carried through to the
+     * underlying file.
+     */
+    MMF_MAP_SHARED = _DEF_FLAG(7),
+
     /**
      * @brief a non-locked memory can be swapped out when required, this is
      * reserved for future
      */
-    MMF_NONLOCKED = _DEF_FLAG(20),
+    MMF_NONLOCKED = _DEF_FLAG(8),
 
     /**
      * @brief An alignment is specified in flags that the mapping must admit
      */
-    MMF_REQUEST_ALIGN = _DEF_FLAG(21),
+    MMF_REQUEST_ALIGN = _DEF_FLAG(9),
+
+    __MMF_INVALID,
 };
 
 #define MMF_GET_ALIGN(src) ((src & _MMF_ALIGN_MASK))
@@ -78,7 +92,7 @@ enum mm_flag_cntl
  * @brief Create Flags
  *
  * example: MMF_CREATE(0, 0)
- *          MMF_CREATE(MM_MAP_FIXED, 0x2000)
+ *          MMF_CREATE(MMF_MAP_FIXED, 0x2000)
  *
  * Direct use of flag is also acceptable: (MMF_MAP_FIXED | MMF_PREFETCH)
  */

+ 2 - 2
examples/utest/testcases/mm/SConscript

@@ -5,10 +5,10 @@ cwd     = GetCurrentDir()
 src     = []
 CPPPATH = [cwd]
 
-if GetDepend(['UTEST_MM_API_TC']):
+if GetDepend(['UTEST_MM_API_TC', 'RT_USING_SMART']):
     src += ['mm_api_tc.c', 'mm_libcpu_tc.c']
 
-if GetDepend(['UTEST_MM_LWP_TC']):
+if GetDepend(['UTEST_MM_LWP_TC', 'RT_USING_SMART']):
     src += ['mm_lwp_tc.c']
 
 group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)

+ 4 - 1
examples/utest/testcases/mm/common.h

@@ -19,10 +19,13 @@
 #include <board.h>
 #include <rtthread.h>
 #include <rthw.h>
-#include <lwp_arch.h>
 #include <mmu.h>
 #include <tlb.h>
 
+#ifdef RT_USING_SMART
+#include <lwp_arch.h>
+#endif
+
 #include <ioremap.h>
 #include <mm_aspace.h>
 #include <mm_flag.h>

+ 27 - 3
examples/utest/testcases/mm/mm_lwp_tc.c

@@ -52,9 +52,9 @@ static void test_user_map_varea(void)
     rt_varea_t varea;
     lwp = lwp_new();
 
+    /* prepare environment */
     uassert_true(!!lwp);
-    uassert_true(!lwp_user_space_init(lwp, 0));
-
+    uassert_true(!lwp_user_space_init(lwp, 1));
     TEST_VAREA_INSERT(
         varea = lwp_map_user_varea(lwp, 0, buf_sz),
         lwp->aspace);
@@ -77,7 +77,7 @@ static void test_user_map_varea_ext(void)
     lwp = lwp_new();
 
     uassert_true(!!lwp);
-    uassert_true(!lwp_user_space_init(lwp, 0));
+    uassert_true(!lwp_user_space_init(lwp, 1));
 
     TEST_VAREA_INSERT(
         varea = lwp_map_user_varea_ext(lwp, 0, buf_sz, LWP_MAP_FLAG_NOCACHE),
@@ -99,6 +99,29 @@ static void user_map_varea_tc(void)
     CONSIST_HEAP(test_user_map_varea_ext());
 }
 
+static void test_user_accessible(void)
+{
+    /* Prepare Environment */
+    char *test_address = (char *)(USER_STACK_VEND);
+    struct rt_lwp *lwp;
+    lwp = lwp_new();
+    uassert_true(!!lwp);
+    uassert_true(!lwp_user_space_init(lwp, 0));
+
+    /* test if user accessible can operate */
+    uassert_true(!lwp_user_accessible_ext(lwp, test_address + 0x1, 0x1));
+    /* test if mapping exist, accessible can fill the page and return True */
+    uassert_true(lwp_user_accessible_ext(lwp, test_address - 0x10, 0x10));
+
+    /* Cleanup */
+    lwp_ref_dec(lwp);
+}
+
+static void accessible_tc(void)
+{
+    CONSIST_HEAP(test_user_accessible());
+}
+
 static rt_err_t utest_tc_init(void)
 {
     return RT_EOK;
@@ -112,5 +135,6 @@ static rt_err_t utest_tc_cleanup(void)
 static void testcase(void)
 {
     UTEST_UNIT_RUN(user_map_varea_tc);
+    UTEST_UNIT_RUN(accessible_tc);
 }
 UTEST_TC_EXPORT(testcase, "testcases.lwp.mm_tc", utest_tc_init, utest_tc_cleanup, 20);

+ 60 - 1
examples/utest/testcases/mm/test_aspace_api.h

@@ -11,7 +11,6 @@
 #define __TEST_ASPACE_API_H__
 
 #include "common.h"
-#include "lwp_arch.h"
 #include "test_aspace_api_internal.h"
 #include "test_synchronization.h"
 
@@ -181,6 +180,7 @@ static void test_varea_map_page(void)
         void *page = rt_pages_alloc(0);
         uassert_true(!!page);
         uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
+        uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
 
         /* let page manager handle the free of page */
         rt_varea_pgmgr_insert(varea, page);
@@ -210,10 +210,69 @@ static void test_varea_map_range(void)
     uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
 }
 
+/**
+ * @brief rt_varea_unmap_page
+ * Requirements: cancel the page table entry
+ */
+static void test_varea_unmap_page(void)
+{
+    /* Prepare environment */
+    const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
+    rt_varea_t varea = _create_varea(buf_sz);
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        void *page = rt_pages_alloc(0);
+        uassert_true(!!page);
+        uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
+
+        /* let page manager handle the free of page */
+        rt_varea_pgmgr_insert(varea, page);
+        uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
+    }
+
+    /* test if unmap is success */
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        uassert_true(rt_varea_unmap_page(varea, varea->start + i) == RT_EOK);
+        uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
+    }
+
+    uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
+}
+
+/**
+ * @brief rt_varea_map_range
+ * Requirements: complete the page table entry
+ */
+static void test_varea_unmap_range(void)
+{
+    const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
+    rt_varea_t varea = _create_varea(buf_sz);
+    void *page = rt_pages_alloc(rt_page_bits(buf_sz));
+    uassert_true(!!page);
+    uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
+    }
+
+    /* test if unmap is success */
+    uassert_true(rt_varea_unmap_range(varea, varea->start, buf_sz) == RT_EOK);
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
+    }
+
+    uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
+    uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
+}
+
 static void varea_map_tc(void)
 {
     CONSIST_HEAP(test_varea_map_page());
     CONSIST_HEAP(test_varea_map_range());
+    CONSIST_HEAP(test_varea_unmap_page());
+    CONSIST_HEAP(test_varea_unmap_range());
 }
 
 static void aspace_traversal_tc(void)

+ 0 - 1
examples/utest/testcases/mm/test_bst_adpt.h

@@ -11,7 +11,6 @@
 #define __TEST_BST_ADPT_H__
 
 #include "common.h"
-#include "lwp_arch.h"
 
 #ifdef RT_USING_SMART
 #include "lwp_user_mm.h"

+ 2 - 1
libcpu/aarch64/common/mmu.c

@@ -327,7 +327,8 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
     while (npages--)
     {
         MM_PGTBL_LOCK(aspace);
-        _kenrel_unmap_4K(aspace->page_table, v_addr);
+        if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
+            _kenrel_unmap_4K(aspace->page_table, v_addr);
         MM_PGTBL_UNLOCK(aspace);
         v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
     }

+ 6 - 2
libcpu/aarch64/common/trap.c

@@ -14,7 +14,7 @@
 
 #include <armv8.h>
 #include "interrupt.h"
-#include "mm_fault.h"
+#include "mm_aspace.h"
 
 #include <backtrace.h>
 
@@ -82,6 +82,8 @@ int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
     ec = (unsigned char)((esr >> 26) & 0x3fU);
     enum rt_mm_fault_op fault_op;
     enum rt_mm_fault_type fault_type;
+    struct rt_lwp *lwp;
+
     switch (ec)
     {
     case 0x20:
@@ -107,7 +109,9 @@ int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
             .fault_type = fault_type,
             .fault_vaddr = dfar,
         };
-        if (rt_aspace_fault_try_fix(&msg))
+        lwp = lwp_self();
+        RT_ASSERT(lwp);
+        if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
         {
             ret = 1;
         }

+ 3 - 1
libcpu/arm/cortex-a/trap.c

@@ -46,6 +46,7 @@ void check_user_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
 int check_user_stack(struct rt_hw_exp_stack *regs)
 {
     void *dfar = RT_NULL;
+    struct rt_lwp *lwp;
     asm volatile("MRC p15, 0, %0, c6, c0, 0" : "=r"(dfar));
 
     if ((dfar >= (void *)USER_STACK_VSTART) && (dfar < (void *)USER_STACK_VEND))
@@ -55,7 +56,8 @@ int check_user_stack(struct rt_hw_exp_stack *regs)
             .fault_type = MM_FAULT_TYPE_PAGE_FAULT,
             .fault_vaddr = dfar,
         };
-        if (rt_aspace_fault_try_fix(&msg))
+        lwp = lwp_self();
+        if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
         {
             regs->pc -= 8;
             return 1;

+ 3 - 1
libcpu/risc-v/t-head/c906/trap.c

@@ -155,6 +155,7 @@ static const char *get_exception_msg(int id)
 void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw_stack_frame *sp)
 {
     rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
+    struct rt_lwp *lwp;
 
     /* user page fault */
     enum rt_mm_fault_op fault_op;
@@ -203,13 +204,14 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
 
     if (fault_op)
     {
+        lwp = lwp_self();
         struct rt_aspace_fault_msg msg = {
             .fault_op = fault_op,
             .fault_type = fault_type,
             .fault_vaddr = (void *)stval,
         };
 
-        if (rt_aspace_fault_try_fix(&msg))
+        if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
         {
             return;
         }

+ 3 - 1
libcpu/risc-v/virt64/trap.c

@@ -152,6 +152,7 @@ static const char *get_exception_msg(int id)
 void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw_stack_frame *sp)
 {
     rt_size_t id = __MASKVALUE(scause, __MASK(63UL));
+    struct rt_lwp *lwp;
 
     /* user page fault */
     enum rt_mm_fault_op fault_op;
@@ -200,13 +201,14 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
 
     if (fault_op)
     {
+        lwp = lwp_self();
         struct rt_aspace_fault_msg msg = {
             .fault_op = fault_op,
             .fault_type = fault_type,
             .fault_vaddr = (void *)stval,
         };
 
-        if (rt_aspace_fault_try_fix(&msg))
+        if (lwp && rt_aspace_fault_try_fix(lwp->aspace, &msg))
         {
             return;
         }