Ver Fonte

[rt-smart] testcase & improvements for memory management (#7099)

* [utest/mm] add testcase for create/init
format codes of create/init in components/mm

* [libcpu/aarch64] fix user stack check routine

* [kservice] export API for utest

* [utest/mm] testcase for aspace_map
format & modify the files under components/mm related with aspace_map

* [lwp/user_mm] add user_map_varea for mmap feature

* [mm] rename rt_mm_fault_try_fix to rt_aspace_fault_try_fix

* [utest/mm] testcase for synchronization

* [mm] modify unmap api to improve throughput

* [utest/mm] testcases for cache and varea map

* [format] remove extra space

* [utest/mm] fix testcase problem in header

* [lwp] extend map_user_varea with a flag

* [utest/mm] testcase for lwp_map_user_varea

* [libcpu/arm/cortex-a] fix kernel space layout

* [utest/mm] adjust for armv7 arch
Shell há 2 anos atrás
pai
commit
eec78d9f5d
38 ficheiros alterados com 1880 adições e 233 exclusões
  1. 1 1
      bsp/qemu-vexpress-a9/drivers/board.c
  2. 16 5
      components/lwp/lwp_shm.c
  3. 102 6
      components/lwp/lwp_user_mm.c
  4. 7 0
      components/lwp/lwp_user_mm.h
  5. 2 0
      components/mm/avl_adpt.c
  6. 1 1
      components/mm/ioremap.c
  7. 296 140
      components/mm/mm_aspace.c
  8. 33 9
      components/mm/mm_aspace.h
  9. 19 29
      components/mm/mm_fault.c
  10. 8 5
      components/mm/mm_fault.h
  11. 3 3
      components/mm/mm_flag.h
  12. 3 3
      components/mm/mm_kmem.c
  13. 6 6
      components/mm/mm_object.c
  14. 3 3
      components/mm/mm_page.c
  15. 11 6
      components/mm/mm_private.h
  16. 1 0
      examples/utest/testcases/Kconfig
  17. 17 0
      examples/utest/testcases/mm/Kconfig
  18. 16 0
      examples/utest/testcases/mm/SConscript
  19. 65 0
      examples/utest/testcases/mm/common.h
  20. 113 0
      examples/utest/testcases/mm/mm_api_tc.c
  21. 16 0
      examples/utest/testcases/mm/mm_libcpu_tc.c
  22. 116 0
      examples/utest/testcases/mm/mm_lwp_tc.c
  23. 37 0
      examples/utest/testcases/mm/semaphore.h
  24. 287 0
      examples/utest/testcases/mm/test_aspace_api.h
  25. 80 0
      examples/utest/testcases/mm/test_aspace_api_internal.h
  26. 107 0
      examples/utest/testcases/mm/test_bst_adpt.h
  27. 115 0
      examples/utest/testcases/mm/test_cache_aarch64.h
  28. 194 0
      examples/utest/testcases/mm/test_cache_rv64.h
  29. 165 0
      examples/utest/testcases/mm/test_synchronization.h
  30. 0 1
      libcpu/aarch64/common/cache.h
  31. 4 3
      libcpu/aarch64/common/trap.c
  32. 19 2
      libcpu/arm/cortex-a/mmu.c
  33. 1 0
      libcpu/arm/cortex-a/mmu.h
  34. 3 3
      libcpu/arm/cortex-a/trap.c
  35. 3 3
      libcpu/risc-v/t-head/c906/trap.c
  36. 1 1
      libcpu/risc-v/virt64/cache.h
  37. 3 3
      libcpu/risc-v/virt64/trap.c
  38. 6 0
      src/kservice.c

+ 1 - 1
bsp/qemu-vexpress-a9/drivers/board.c

@@ -26,7 +26,7 @@
 
 #ifdef RT_USING_SMART
 struct mem_desc platform_mem_desc[] = {
-    {KERNEL_VADDR_START, KERNEL_VADDR_START + 0x0fffffff, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM}
+    {KERNEL_VADDR_START, KERNEL_VADDR_START + 0x10000000, (rt_size_t)ARCH_MAP_FAILED, NORMAL_MEM}
 };
 #else
 struct mem_desc platform_mem_desc[] = {

+ 16 - 5
components/lwp/lwp_shm.c

@@ -56,13 +56,24 @@ static void on_shm_varea_close(struct rt_varea *varea)
     shm->ref -= 1;
 }
 
-static void on_shm_page_fault(struct rt_varea *varea, struct rt_mm_fault_msg *msg)
+static void on_shm_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
 {
     struct lwp_shm_struct *shm;
+    int err;
     shm = rt_container_of(varea->mem_obj, struct lwp_shm_struct, mem_obj);
-    msg->response.status = MM_FAULT_STATUS_OK;
-    msg->response.vaddr = (void *)shm->addr;
-    msg->response.size = shm->size;
+
+    /* map all share page frames to user space in a time */
+    void *page = (void *)shm->addr;
+    void *pg_paddr = page + PV_OFFSET;
+    err = rt_varea_map_range(varea, varea->start, pg_paddr, shm->size);
+
+    if (err == RT_EOK)
+    {
+        msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
+        msg->response.size = shm->size;
+        msg->response.vaddr = page;
+    }
+
     return ;
 }
 
@@ -383,7 +394,7 @@ int _lwp_shmdt(void *shm_vaddr)
         return -1;
     }
 
-    ret = rt_aspace_unmap(lwp->aspace, shm_vaddr, 1);
+    ret = rt_aspace_unmap(lwp->aspace, shm_vaddr);
     if (ret != RT_EOK)
     {
         ret = -1;

+ 102 - 6
components/lwp/lwp_user_mm.c

@@ -108,15 +108,18 @@ static const char *user_get_name(rt_varea_t varea)
     return name;
 }
 
+#define NO_AUTO_FETCH               0x1
+#define VAREA_CAN_AUTO_FETCH(varea) (!((rt_ubase_t)((varea)->data) & NO_AUTO_FETCH))
+
 static void _user_do_page_fault(struct rt_varea *varea,
-                                struct rt_mm_fault_msg *msg)
+                                struct rt_aspace_fault_msg *msg)
 {
     struct rt_lwp_objs *lwp_objs;
     lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
 
     if (lwp_objs->source)
     {
-        void *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->vaddr);
+        void *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->fault_vaddr);
         if (paddr != ARCH_MAP_FAILED)
         {
             void *vaddr;
@@ -128,7 +131,7 @@ static void _user_do_page_fault(struct rt_varea *varea,
                 if (cp)
                 {
                     memcpy(cp, vaddr, ARCH_PAGE_SIZE);
-                    rt_varea_insert_page(varea, cp);
+                    rt_varea_pgmgr_insert(varea, cp);
                     msg->response.status = MM_FAULT_STATUS_OK;
                     msg->response.vaddr = cp;
                     msg->response.size = ARCH_PAGE_SIZE;
@@ -143,7 +146,7 @@ static void _user_do_page_fault(struct rt_varea *varea,
             {
                 rt_page_t page = rt_page_addr2page(vaddr);
                 page->ref_cnt += 1;
-                rt_varea_insert_page(varea, vaddr);
+                rt_varea_pgmgr_insert(varea, vaddr);
                 msg->response.status = MM_FAULT_STATUS_OK;
                 msg->response.vaddr = vaddr;
                 msg->response.size = ARCH_PAGE_SIZE;
@@ -155,14 +158,20 @@ static void _user_do_page_fault(struct rt_varea *varea,
             rt_mm_dummy_mapper.on_page_fault(varea, msg);
         }
     }
-    else /* if (!lwp_objs->source), no aspace as source data */
+    else if (VAREA_CAN_AUTO_FETCH(varea))
     {
+        /* if (!lwp_objs->source), no aspace as source data */
         rt_mm_dummy_mapper.on_page_fault(varea, msg);
     }
 }
 
 static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
 {
+    /**
+     * @brief one lwp_obj represent an base layout of page based memory in user space
+     * This is useful on duplication. Where we only have a (lwp_objs and offset) to
+     * provide identical memory. This is implemented by lwp_objs->source.
+     */
     lwp_objs->source = NULL;
     lwp_objs->mem_obj.get_name = user_get_name;
     lwp_objs->mem_obj.hint_free = NULL;
@@ -198,7 +207,7 @@ static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
 int lwp_unmap_user(struct rt_lwp *lwp, void *va)
 {
     int err;
-    err = rt_aspace_unmap(lwp->aspace, va, 1);
+    err = rt_aspace_unmap(lwp->aspace, va);
     return err;
 }
 
@@ -324,6 +333,93 @@ void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
     return ret;
 }
 
+static inline size_t _flags_to_attr(size_t flags)
+{
+    size_t attr;
+
+    if (flags & LWP_MAP_FLAG_NOCACHE)
+    {
+        attr = MMU_MAP_U_RW;
+    }
+    else
+    {
+        attr = MMU_MAP_U_RWCB;
+    }
+
+    return attr;
+}
+
+static inline mm_flag_t _flags_to_aspace_flag(size_t flags)
+{
+    mm_flag_t mm_flag = 0;
+
+    return mm_flag;
+}
+
+static rt_varea_t _lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
+{
+    void *va = map_va;
+    int ret = 0;
+    rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
+    rt_varea_t varea;
+    mm_flag_t mm_flags;
+    size_t attr;
+
+    varea = rt_malloc(sizeof(*varea));
+    if (varea)
+    {
+        attr = _flags_to_attr(flags);
+        mm_flags = _flags_to_aspace_flag(flags);
+        ret = rt_aspace_map_static(lwp->aspace, varea, &va, map_size,
+                                   attr, mm_flags, mem_obj, 0);
+        /* let aspace handle the free of varea */
+        varea->flag &= ~MMF_STATIC_ALLOC;
+        /* don't apply auto fetch on this */
+        varea->data = (void *)NO_AUTO_FETCH;
+    }
+    else
+    {
+        ret = -RT_ENOMEM;
+    }
+
+    if (ret != RT_EOK)
+    {
+        LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
+              map_size, ret);
+    }
+
+    return varea;
+}
+
+static rt_varea_t _map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
+{
+    rt_varea_t varea = RT_NULL;
+    size_t offset = 0;
+
+    if (!map_size)
+    {
+        return 0;
+    }
+    offset = (size_t)map_va & ARCH_PAGE_MASK;
+    map_size += (offset + ARCH_PAGE_SIZE - 1);
+    map_size &= ~ARCH_PAGE_MASK;
+    map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
+
+    varea = _lwp_map_user_varea(lwp, map_va, map_size, flags);
+
+    return varea;
+}
+
+rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags)
+{
+    return _map_user_varea_ext(lwp, map_va, map_size, flags);
+}
+
+rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size)
+{
+    return _map_user_varea_ext(lwp, map_va, map_size, LWP_MAP_FLAG_NONE);
+}
+
 void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
                        size_t map_size, int cached)
 {

+ 7 - 0
components/lwp/lwp_user_mm.h

@@ -25,12 +25,19 @@
 extern "C" {
 #endif
 
+#define LWP_MAP_FLAG_NONE       0x0000
+#define LWP_MAP_FLAG_NOCACHE    0x0001
+
 int lwp_user_space_init(struct rt_lwp *lwp, rt_bool_t is_fork);
 void lwp_unmap_user_space(struct rt_lwp *lwp);
 
 int lwp_unmap_user(struct rt_lwp *lwp, void *va);
 void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, rt_bool_t text);
 
+rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
+/* check LWP_MAP_FLAG_* */
+rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags);
+
 void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, rt_bool_t cached);
 int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);
 

+ 2 - 0
components/mm/avl_adpt.c

@@ -117,6 +117,7 @@ rt_varea_t _aspace_bst_search_exceed(struct rt_aspace *aspace, void *start)
 
         if (cmp < 0)
         {
+            /* varae exceed start */
             ptrdiff_t off = va_s - start;
             if (off < min_off)
             {
@@ -127,6 +128,7 @@ rt_varea_t _aspace_bst_search_exceed(struct rt_aspace *aspace, void *start)
         }
         else if (cmp > 0)
         {
+            /* find the next huger varea */
             node = node->avl_right;
         }
         else

+ 1 - 1
components/mm/ioremap.c

@@ -98,7 +98,7 @@ void *rt_ioremap_cached(void *paddr, size_t size)
 
 void rt_iounmap(volatile void *vaddr)
 {
-    rt_aspace_unmap(&rt_kernel_space, (void *)vaddr, 1);
+    rt_aspace_unmap(&rt_kernel_space, (void *)vaddr);
 }
 
 #else

+ 296 - 140
components/mm/mm_aspace.c

@@ -31,122 +31,100 @@
 #include <mmu.h>
 #include <tlb.h>
 
-static void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
 static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
                         void *limit_start, rt_size_t limit_size,
                         mm_flag_t flags);
+static void _varea_uninstall(rt_varea_t varea);
 
 struct rt_aspace rt_kernel_space;
 
-rt_varea_t _varea_create(void *start, rt_size_t size)
-{
-    rt_varea_t varea;
-    varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
-    if (varea)
-    {
-        varea->start = start;
-        varea->size = size;
-    }
-    return varea;
-}
-
-static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
-                                       rt_size_t attr, rt_size_t flags,
-                                       rt_mem_obj_t mem_obj, rt_size_t offset)
+int _init_lock(rt_aspace_t aspace)
 {
-    varea->aspace = aspace;
-    varea->attr = attr;
-    varea->mem_obj = mem_obj;
-    varea->flag = flags;
-    varea->offset = offset;
-    varea->frames = NULL;
+    int err;
+    MM_PGTBL_LOCK_INIT(aspace);
+    err = rt_mutex_init(&aspace->bst_lock, "aspace", RT_IPC_FLAG_FIFO);
 
-    if (varea->mem_obj && varea->mem_obj->on_varea_open)
-        varea->mem_obj->on_varea_open(varea);
+    return err;
 }
 
-/* restore context modified by varea install */
-static inline void _varea_uninstall(rt_varea_t varea)
+rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl)
 {
-    rt_aspace_t aspace = varea->aspace;
-
-    if (varea->mem_obj && varea->mem_obj->on_varea_close)
-        varea->mem_obj->on_varea_close(varea);
-
-    rt_varea_free_pages(varea);
-
-    rt_hw_mmu_unmap(aspace, varea->start, varea->size);
-    rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
+    int err = RT_EOK;
 
-    WR_LOCK(aspace);
-    _aspace_bst_remove(aspace, varea);
-    WR_UNLOCK(aspace);
-}
+    if (pgtbl)
+    {
+        aspace->page_table = pgtbl;
+        aspace->start = start;
+        aspace->size = length;
 
-int _init_lock(rt_aspace_t aspace)
-{
-    MM_PGTBL_LOCK_INIT(aspace);
-    rt_mutex_init(&aspace->bst_lock, "", RT_IPC_FLAG_FIFO);
+        err = _aspace_bst_init(aspace);
+        if (err == RT_EOK)
+        {
+            /**
+             * It has the side effect that lock will be added to object
+             * system management. So it must be paired with a detach once
+             * the initialization return successfully.
+             */
+            err = _init_lock(aspace);
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
 
-    return RT_EOK;
+    return err;
 }
 
 rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl)
 {
     rt_aspace_t aspace = NULL;
-    void *page_table = pgtbl;
+    int err;
 
-    if (page_table)
+    RT_ASSERT(length <= 0 - (rt_size_t)start);
+    aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
+    if (aspace)
     {
-        aspace = (rt_aspace_t)rt_malloc(sizeof(*aspace));
-        if (aspace)
+        rt_memset(aspace, 0, sizeof(*aspace));
+
+        err = rt_aspace_init(aspace, start, length, pgtbl);
+
+        if (err != RT_EOK)
         {
-            aspace->page_table = page_table;
-            aspace->start = start;
-            aspace->size = length;
-            if (_init_lock(aspace) != RT_EOK ||
-                _aspace_bst_init(aspace) != RT_EOK)
-            {
-                rt_free(aspace);
-                aspace = NULL;
-            }
+            LOG_W("%s(%p, %lx, %p): failed with code %d\n", __func__,
+                start, length, pgtbl, err);
+            rt_free(aspace);
+            aspace = RT_NULL;
         }
     }
     return aspace;
 }
 
-rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
-                           void *pgtbl)
+void rt_aspace_detach(rt_aspace_t aspace)
 {
-    void *page_table = pgtbl;
-    LOG_D("%s", __func__);
-
-    if (page_table)
+    WR_LOCK(aspace);
+    rt_varea_t varea = ASPACE_VAREA_FIRST(aspace);
+    while (varea)
     {
-        aspace->page_table = page_table;
-        aspace->start = start;
-        aspace->size = length;
-        if (_init_lock(aspace) != RT_EOK || _aspace_bst_init(aspace) != RT_EOK)
+        rt_varea_t prev = varea;
+        _varea_uninstall(varea);
+
+        varea = ASPACE_VAREA_NEXT(varea);
+        if (!(prev->flag & MMF_STATIC_ALLOC))
         {
-            aspace = NULL;
+            rt_free(prev);
         }
     }
-    return aspace;
-}
+    WR_UNLOCK(aspace);
 
-void rt_aspace_detach(rt_aspace_t aspace)
-{
-    _aspace_unmap(aspace, aspace->start, aspace->size);
     rt_mutex_detach(&aspace->bst_lock);
 }
 
 void rt_aspace_delete(rt_aspace_t aspace)
 {
-    if (aspace)
-    {
-        rt_aspace_detach(aspace);
-        rt_free(aspace);
-    }
+    RT_ASSERT(aspace);
+    rt_aspace_detach(aspace);
+    rt_free(aspace);
 }
 
 static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
@@ -178,19 +156,75 @@ static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
     return err;
 }
 
-rt_inline void _do_page_fault(struct rt_mm_fault_msg *msg, rt_size_t off,
+rt_inline void _do_page_fault(struct rt_aspace_fault_msg *msg, rt_size_t off,
                               void *vaddr, rt_mem_obj_t mem_obj,
                               rt_varea_t varea)
 {
     msg->off = off;
-    msg->vaddr = vaddr;
+    msg->fault_vaddr = vaddr;
     msg->fault_op = MM_FAULT_OP_READ;
     msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
     msg->response.status = -1;
+    msg->response.vaddr = 0;
+    msg->response.size = 0;
 
     mem_obj->on_page_fault(varea, msg);
 }
 
+int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
+{
+    int err = -RT_ERROR;
+    if (msg->response.status == MM_FAULT_STATUS_OK)
+    {
+        /**
+         * the page returned by handler is not checked
+         * cause no much assumption can make on it
+         */
+        void *store = msg->response.vaddr;
+        rt_size_t store_sz = msg->response.size;
+        if (msg->fault_vaddr + store_sz > varea->start + varea->size)
+        {
+            LOG_W("%s: too much (0x%lx) of buffer on vaddr %p is provided",
+                    __func__, store_sz, msg->fault_vaddr);
+        }
+        else
+        {
+            void *map;
+            void *v_addr = msg->fault_vaddr;
+            void *p_addr = store + PV_OFFSET;
+            map = rt_hw_mmu_map(varea->aspace, v_addr, p_addr, store_sz, varea->attr);
+
+            if (!map)
+            {
+                LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
+                    msg->fault_vaddr, store + PV_OFFSET, store_sz);
+            }
+            else
+            {
+                rt_hw_tlb_invalidate_range(varea->aspace, v_addr, store_sz, ARCH_PAGE_SIZE);
+                err = RT_EOK;
+            }
+        }
+    }
+    else if (msg->response.status == MM_FAULT_STATUS_OK_MAPPED)
+    {
+        if (rt_hw_mmu_v2p(varea->aspace, msg->fault_vaddr) == ARCH_MAP_FAILED)
+        {
+            LOG_W("%s: no page is mapped on %p", __func__, msg->fault_vaddr);
+        }
+        else
+        {
+            err = RT_EOK;
+        }
+    }
+    else
+    {
+        LOG_W("%s: failed on va %p inside varea %p(%s)", __func__, msg->fault_vaddr, varea,
+            varea->mem_obj->get_name ? varea->mem_obj->get_name(varea) : "unknow");
+    }
+    return err;
+}
+
 /* allocate memory page for mapping range */
 static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
                         rt_size_t size)
@@ -205,48 +239,28 @@ static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
     while (vaddr != end)
     {
         /* TODO try to map with huge TLB, when flag & HUGEPAGE */
-        struct rt_mm_fault_msg msg;
+        struct rt_aspace_fault_msg msg;
         _do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
 
-        if (msg.response.status == MM_FAULT_STATUS_OK)
-        {
-            void *store = msg.response.vaddr;
-            rt_size_t store_sz = msg.response.size;
-
-            if (store_sz + vaddr > end)
-            {
-                LOG_W("%s: too much (0x%lx) of buffer at vaddr %p is provided",
-                      __func__, store_sz, vaddr);
-                break;
-            }
-
-            void *map = rt_hw_mmu_map(aspace, vaddr, store + PV_OFFSET,
-                                      store_sz, varea->attr);
+        if (_varea_map_with_msg(varea, &msg))
+            break;
 
-            if (!map)
-            {
-                LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
-                      vaddr, store + PV_OFFSET, store_sz);
-            }
-            else
-            {
-                rt_hw_tlb_invalidate_range(aspace, vaddr, store_sz, ARCH_PAGE_SIZE);
-            }
-            vaddr += store_sz;
-            off += store_sz >> ARCH_PAGE_SHIFT;
-        }
-        else
-        {
-            err = -RT_ENOMEM;
-            LOG_W("%s failed because no memory is provided", __func__);
+        /**
+         * It's hard to identify the mapping pattern on a customized handler
+         * So we terminate the prefetch process on that case
+         */
+        if (msg.response.status == MM_FAULT_STATUS_OK_MAPPED)
             break;
-        }
+
+        vaddr += msg.response.size;
+        off += msg.response.size >> ARCH_PAGE_SHIFT;
     }
 
     return err;
 }
 
-int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
+/* caller must hold the aspace lock */
+static int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
 {
     void *alloc_va;
     int err = RT_EOK;
@@ -274,6 +288,42 @@ int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
     return err;
 }
 
+static inline void _varea_post_install(rt_varea_t varea, rt_aspace_t aspace,
+                                       rt_size_t attr, rt_size_t flags,
+                                       rt_mem_obj_t mem_obj, rt_size_t offset)
+{
+    varea->aspace = aspace;
+    varea->attr = attr;
+    varea->mem_obj = mem_obj;
+    varea->flag = flags;
+    varea->offset = offset;
+    varea->frames = NULL;
+
+    if (varea->mem_obj && varea->mem_obj->on_varea_open)
+        varea->mem_obj->on_varea_open(varea);
+}
+
+/**
+ * restore context modified by varea install
+ * caller must NOT hold the aspace lock
+ */
+static void _varea_uninstall(rt_varea_t varea)
+{
+    rt_aspace_t aspace = varea->aspace;
+
+    if (varea->mem_obj && varea->mem_obj->on_varea_close)
+        varea->mem_obj->on_varea_close(varea);
+
+    rt_hw_mmu_unmap(aspace, varea->start, varea->size);
+    rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
+
+    rt_varea_pgmgr_pop_all(varea);
+
+    WR_LOCK(aspace);
+    _aspace_bst_remove(aspace, varea);
+    WR_UNLOCK(aspace);
+}
+
 static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
                           mm_flag_t flags, rt_mem_obj_t mem_obj,
                           rt_size_t offset)
@@ -281,6 +331,12 @@ static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
     int err = RT_EOK;
 
     WR_LOCK(aspace);
+
+    /**
+     * @brief .prefer & .map_size are scratched from varea which setup by caller
+     * .limit_start & .limit_range_size have default to be in range of aspace
+     * .flags is from parameter, and will be fill in varea if install successfully
+     */
     struct rt_mm_va_hint hint = {.prefer = varea->start,
                                  .map_size = varea->size,
                                  .limit_start = aspace->start,
@@ -293,6 +349,7 @@ static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
         mem_obj->hint_free(&hint);
     }
 
+    /* try to allocate a virtual address region for varea */
     err = _varea_install(aspace, varea, &hint);
     WR_UNLOCK(aspace);
 
@@ -316,6 +373,18 @@ static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
     return err;
 }
 
+rt_varea_t _varea_create(void *start, rt_size_t size)
+{
+    rt_varea_t varea;
+    varea = (rt_varea_t)rt_malloc(sizeof(struct rt_varea));
+    if (varea)
+    {
+        varea->start = start;
+        varea->size = size;
+    }
+    return varea;
+}
+
 #define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
 #define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((start) - (limit_start))) > (limit_size))
 
@@ -339,7 +408,8 @@ static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
 
 static inline int _not_support(rt_size_t flags)
 {
-    rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT);
+    rt_size_t support_ops = (MMF_PREFETCH | MMF_MAP_FIXED | MMF_TEXT |
+        MMF_STATIC_ALLOC | MMF_REQUEST_ALIGN);
     return flags & ~(support_ops | _MMF_ALIGN_MASK);
 }
 
@@ -351,19 +421,25 @@ int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
     int err;
     rt_varea_t varea;
 
-    if (!aspace || !addr || !mem_obj || length == 0 ||
-        _not_in_range(*addr, length, aspace->start, aspace->size))
+    if (!aspace || !addr || !mem_obj || length == 0)
     {
         err = -RT_EINVAL;
-        LOG_I("%s: Invalid input", __func__);
+        LOG_I("%s(%p, %p, %lx, %lx, %lx, %p, %lx): Invalid input",
+            __func__, aspace, addr, length, attr, flags, mem_obj, offset);
+    }
+    else if (_not_in_range(*addr, length, aspace->start, aspace->size))
+    {
+        err = -RT_EINVAL;
+        LOG_I("%s(addr:%p, len:%lx): out of range", __func__, *addr, length);
     }
     else if (_not_support(flags))
     {
-        LOG_I("%s: no support flags 0x%p", __func__, flags);
+        LOG_I("%s: no support flags 0x%lx", __func__, flags);
         err = -RT_ENOSYS;
     }
     else
     {
+        /* allocate the varea and fill in start and size */
         varea = _varea_create(*addr, length);
 
         if (varea)
@@ -376,7 +452,7 @@ int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length,
         }
         else
         {
-            LOG_W("%s: mm aspace map failed", __func__);
+            LOG_W("%s: memory allocation failed", __func__);
             err = -RT_ENOMEM;
         }
     }
@@ -461,7 +537,7 @@ int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
 
         if (err == RT_EOK)
         {
-            _varea_post_install(varea, aspace, attr, 0, NULL, pa_off);
+            _varea_post_install(varea, aspace, attr, hint->flags, NULL, pa_off);
 
             vaddr = varea->start;
 
@@ -537,28 +613,25 @@ int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
     return err;
 }
 
-void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
+void _aspace_unmap(rt_aspace_t aspace, void *addr)
 {
-    struct _mm_range range = {addr, addr + length - 1};
-    rt_varea_t varea = _aspace_bst_search_overlap(aspace, range);
+    WR_LOCK(aspace);
+    rt_varea_t varea = _aspace_bst_search(aspace, addr);
+    WR_UNLOCK(aspace);
 
     if (varea == RT_NULL)
     {
-        LOG_I("%s: No such entry found at %p with %lx bytes\n", __func__, addr, length);
+        LOG_I("%s: No such entry found at %p\n", __func__, addr);
     }
 
-    while (varea)
+    _varea_uninstall(varea);
+    if (!(varea->flag & MMF_STATIC_ALLOC))
     {
-        _varea_uninstall(varea);
-        if (!(varea->flag & MMF_STATIC_ALLOC))
-        {
-            rt_free(varea);
-        }
-        varea = _aspace_bst_search_overlap(aspace, range);
+        rt_free(varea);
     }
 }
 
-int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
+int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
 {
     if (!aspace)
     {
@@ -566,14 +639,14 @@ int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
         return -RT_EINVAL;
     }
 
-    if (_not_in_range(addr, length, aspace->start, aspace->size))
+    if (_not_in_range(addr, 1, aspace->start, aspace->size))
     {
         LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
               aspace->start, aspace->start + aspace->size);
         return -RT_EINVAL;
     }
 
-    _aspace_unmap(aspace, addr, length);
+    _aspace_unmap(aspace, addr);
 
     return RT_EOK;
 }
@@ -659,9 +732,8 @@ static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
     {
         void *candidate;
         rt_size_t gap_size;
-        rt_varea_t former = _aspace_bst_search(aspace, limit.start);
 
-        candidate = former ? former->start + former->size : limit.start;
+        candidate = limit.start;
         candidate = _align(candidate, align_mask);
         gap_size = limit.end - candidate + 1;
 
@@ -688,6 +760,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
 
     if (prefer != RT_NULL)
     {
+        /* if prefer and free, just return the prefer region */
         prefer = _align(prefer, align_mask);
         struct _mm_range range = {prefer, prefer + req_size - 1};
         varea = _aspace_bst_search_overlap(aspace, range);
@@ -702,9 +775,11 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
         }
         else
         {
+            /* search from `varea` in ascending order */
             va = _ascending_search(varea, req_size, align_mask, limit);
             if (va == RT_NULL)
             {
+                /* rewind to first range */
                 limit.end = varea->start - 1;
                 va = _find_head_and_asc_search(aspace, req_size, align_mask,
                                                limit);
@@ -722,9 +797,13 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
 int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
 {
     int err = RT_EOK;
-    rt_varea_t varea = _aspace_bst_search(aspace, addr);
+    rt_varea_t varea;
     void *end = addr + (npage << ARCH_PAGE_SHIFT);
 
+    WR_LOCK(aspace);
+    varea = _aspace_bst_search(aspace, addr);
+    WR_UNLOCK(aspace);
+
     if (!varea)
     {
         LOG_W("%s: varea not exist", __func__);
@@ -744,18 +823,92 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
     return err;
 }
 
+int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page)
+{
+    int err = RT_EOK;
+    void *page_pa = rt_kmem_v2p(page);
+
+    if (!varea || !vaddr || !page)
+    {
+        LOG_W("%s(%p,%p,%p): invalid input", __func__, varea, vaddr, page);
+        err = -RT_EINVAL;
+    }
+    else if (page_pa == ARCH_MAP_FAILED)
+    {
+        LOG_W("%s: page is not in kernel space", __func__);
+        err = -RT_ERROR;
+    }
+    else if (_not_in_range(vaddr, ARCH_PAGE_SIZE, varea->start, varea->size))
+    {
+        LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
+            vaddr, ARCH_PAGE_SIZE, varea->start, varea->size);
+        err = -RT_EINVAL;
+    }
+    else
+    {
+        err = _do_named_map(
+            varea->aspace,
+            vaddr,
+            ARCH_PAGE_SIZE,
+            MM_PA_TO_OFF(page_pa),
+            varea->attr
+        );
+    }
+
+    return err;
+}
+
+#define ALIGNED(addr) (!((rt_size_t)(addr) & ARCH_PAGE_MASK))
+
+int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length)
+{
+    int err;
+    if (!varea || !vaddr || !paddr || !length ||
+        !ALIGNED(vaddr) || !ALIGNED(paddr) || !(ALIGNED(length)))
+    {
+        LOG_W("%s(%p,%p,%p,%lx): invalid input", __func__, varea, vaddr, paddr, length);
+        err = -RT_EINVAL;
+    }
+    else if (_not_in_range(vaddr, length, varea->start, varea->size))
+    {
+        LOG_W("%s(%p,%lx): not in range of varea(%p,%lx)", __func__,
+            vaddr, length, varea->start, varea->size);
+        err = -RT_EINVAL;
+    }
+    else
+    {
+        err = _do_named_map(
+            varea->aspace,
+            vaddr,
+            length,
+            MM_PA_TO_OFF(paddr),
+            varea->attr
+        );
+    }
+    return err;
+}
+
 int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
 {
     return -RT_ENOSYS;
 }
 
-int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
+int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
 {
     int err;
-    rt_varea_t varea = _aspace_bst_search(aspace, addr);
+    rt_varea_t varea;
+
+    WR_LOCK(aspace);
+    varea = _aspace_bst_search(aspace, addr);
+    WR_UNLOCK(aspace);
+
     if (varea)
     {
         err = rt_hw_mmu_control(aspace, varea->start, varea->size, cmd);
+        if (err == RT_EOK)
+        {
+            rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
+        }
     }
     else
     {
@@ -768,12 +921,15 @@ int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd)
 int rt_aspace_traversal(rt_aspace_t aspace,
                         int (*fn)(rt_varea_t varea, void *arg), void *arg)
 {
-    rt_varea_t varea = ASPACE_VAREA_FIRST(aspace);
+    rt_varea_t varea;
+    WR_LOCK(aspace);
+    varea = ASPACE_VAREA_FIRST(aspace);
     while (varea)
     {
         fn(varea, arg);
         varea = ASPACE_VAREA_NEXT(varea);
     }
+    WR_UNLOCK(aspace);
 
     return 0;
 }

+ 33 - 9
components/mm/mm_aspace.h

@@ -90,7 +90,7 @@ typedef struct rt_mm_va_hint
 typedef struct rt_mem_obj
 {
     void (*hint_free)(rt_mm_va_hint_t hint);
-    void (*on_page_fault)(struct rt_varea *varea, struct rt_mm_fault_msg *msg);
+    void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
 
     /* do pre open bushiness like inc a ref */
     void (*on_varea_open)(struct rt_varea *varea);
@@ -127,8 +127,7 @@ enum rt_mmu_cntl
 
 rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
 
-rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
-                           void *pgtbl);
+rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
 
 void rt_aspace_delete(rt_aspace_t aspace);
 
@@ -182,12 +181,11 @@ int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
  *
  * @param aspace
  * @param addr
- * @param length
  * @return int
  */
-int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);
+int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
 
-int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
+int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
 
 int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);
 
@@ -198,11 +196,35 @@ int rt_aspace_traversal(rt_aspace_t aspace,
 
 void rt_aspace_print_all(rt_aspace_t aspace);
 
-void rt_varea_insert_page(rt_varea_t varea, void *page_addr);
+/**
+ * @brief Map one page to varea
+ *
+ * @param varea target varea
+ * @param addr user address
+ * @param page the page frame to be mapped
+ * @return int
+ */
+int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
 
-void rt_varea_free_pages(rt_varea_t varea);
+/**
+ * @brief Map a range of physical address to varea
+ *
+ * @param varea target varea
+ * @param vaddr user address
+ * @param paddr physical address
+ * @param length map range
+ * @return int
+ */
+int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
 
-void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size);
+/**
+ * @brief Insert page to page manager of varea
+ * The page will be freed by varea on uninstall automatically
+ *
+ * @param varea target varea
+ * @param page_addr the page frame to be added
+ */
+void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
 
 rt_ubase_t rt_kmem_pvoff(void);
 
@@ -212,4 +234,6 @@ int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
 
 void *rt_kmem_v2p(void *vaddr);
 
+void rt_kmem_list(void);
+
 #endif /* __MM_ASPACE_H__ */

+ 19 - 29
components/mm/mm_fault.c

@@ -26,40 +26,28 @@
 #define UNRECOVERABLE 0
 #define RECOVERABLE   1
 
-static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg)
+static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
 {
-    int err = UNRECOVERABLE;
+    int err;
+    msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
+    msg->response.vaddr = 0;
+    msg->response.size = 0;
     if (varea->mem_obj && varea->mem_obj->on_page_fault)
     {
         varea->mem_obj->on_page_fault(varea, msg);
-        if (msg->response.status == MM_FAULT_STATUS_OK)
-        {
-            void *store = msg->response.vaddr;
-            rt_size_t store_sz = msg->response.size;
-
-            if (msg->vaddr + store_sz > varea->start + varea->size)
-            {
-                LOG_W("%s more size of buffer is provided than varea", __func__);
-            }
-            else
-            {
-                rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
-                            store_sz, varea->attr);
-                rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
-                                        ARCH_PAGE_SIZE);
-                err = RECOVERABLE;
-            }
-        }
+        err = _varea_map_with_msg(varea, msg);
+        err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
     }
     return err;
 }
 
-static int _read_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
+static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
 {
     int err = UNRECOVERABLE;
     if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
     {
         RT_ASSERT(pa == ARCH_MAP_FAILED);
+        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
         err = _fetch_page(varea, msg);
     }
     else
@@ -69,12 +57,13 @@ static int _read_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
     return err;
 }
 
-static int _write_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
+static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
 {
     int err = UNRECOVERABLE;
     if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
     {
         RT_ASSERT(pa == ARCH_MAP_FAILED);
+        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
         err = _fetch_page(varea, msg);
     }
     else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
@@ -88,33 +77,34 @@ static int _write_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
     return err;
 }
 
-static int _exec_fault(rt_varea_t varea, void *pa, struct rt_mm_fault_msg *msg)
+static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
 {
     int err = UNRECOVERABLE;
     if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
     {
         RT_ASSERT(pa == ARCH_MAP_FAILED);
+        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
         err = _fetch_page(varea, msg);
     }
     return err;
 }
 
-int rt_mm_fault_try_fix(struct rt_mm_fault_msg *msg)
+int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
 {
     struct rt_lwp *lwp = lwp_self();
     int err = UNRECOVERABLE;
-    uintptr_t va = (uintptr_t)msg->vaddr;
+    uintptr_t va = (uintptr_t)msg->fault_vaddr;
     va &= ~ARCH_PAGE_MASK;
-    msg->vaddr = (void *)va;
+    msg->fault_vaddr = (void *)va;
 
     if (lwp)
     {
         rt_aspace_t aspace = lwp->aspace;
-        rt_varea_t varea = _aspace_bst_search(aspace, msg->vaddr);
+        rt_varea_t varea = _aspace_bst_search(aspace, msg->fault_vaddr);
         if (varea)
         {
-            void *pa = rt_hw_mmu_v2p(aspace, msg->vaddr);
-            msg->off = (msg->vaddr - varea->start) >> ARCH_PAGE_SHIFT;
+            void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
+            msg->off = (msg->fault_vaddr - varea->start) >> ARCH_PAGE_SHIFT;
 
             /* permission checked by fault op */
             switch (msg->fault_op)

+ 8 - 5
components/mm/mm_fault.h

@@ -14,8 +14,11 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#define MM_FAULT_STATUS_OK            0
-#define MM_FAULT_STATUS_UNRECOVERABLE 1
+/* fast path fault handler, a page frame on kernel space is returned */
+#define MM_FAULT_STATUS_OK              0
+/* customized fault handler, done by using rt_varea_map_* */
+#define MM_FAULT_STATUS_OK_MAPPED       1
+#define MM_FAULT_STATUS_UNRECOVERABLE   4
 
 struct rt_mm_fault_res
 {
@@ -39,17 +42,17 @@ enum rt_mm_fault_type
     MM_FAULT_TYPE_GENERIC,
 };
 
-struct rt_mm_fault_msg
+struct rt_aspace_fault_msg
 {
     enum rt_mm_fault_op fault_op;
     enum rt_mm_fault_type fault_type;
     rt_size_t off;
-    void *vaddr;
+    void *fault_vaddr;
 
     struct rt_mm_fault_res response;
 };
 
 /* MMU base page fault handler, return 1 is */
-int rt_mm_fault_try_fix(struct rt_mm_fault_msg *msg);
+int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg);
 
 #endif /* __MM_FAULT_H__ */

+ 3 - 3
components/mm/mm_flag.h

@@ -82,9 +82,9 @@ enum mm_flag_cntl
  *
  * Direct use of flag is also acceptable: (MMF_MAP_FIXED | MMF_PREFETCH)
  */
-#define MMF_CREATE(cntl, align)                                                \
-    (align ? (MMF_SET_CNTL((mm_flag_t)0, (cntl) | MMF_REQUEST_ALIGN) |         \
-              MMF_SET_ALIGN((mm_flag_t)0, align))                              \
+#define MMF_CREATE(cntl, align)                                                 \
+    ((align) ? (MMF_SET_CNTL((mm_flag_t)0, (cntl) | MMF_REQUEST_ALIGN) |        \
+              MMF_SET_ALIGN((mm_flag_t)0, (align)))                             \
            : (MMF_SET_CNTL((mm_flag_t)0, (cntl) & ~MMF_REQUEST_ALIGN)))
 
 #undef _DEF_FLAG

+ 3 - 3
components/mm/mm_kmem.c

@@ -17,13 +17,13 @@
 #include "mm_private.h"
 #include <mmu.h>
 
-static void list_kernel_space(void)
+static void list_kmem(void)
 {
     rt_aspace_print_all(&rt_kernel_space);
 }
-MSH_CMD_EXPORT(list_kernel_space, List varea in kernel space);
+MSH_CMD_EXPORT(list_kmem, List varea in kernel virtual memory space);
 
-void rt_kmem_list_varea(void) __attribute__((alias("list_kernel_space")));
+void rt_kmem_list(void) __attribute__((alias("list_kmem")));
 
 static rt_ubase_t rt_pv_offset;
 

+ 6 - 6
components/mm/mm_object.c

@@ -24,7 +24,7 @@ static const char *get_name(rt_varea_t varea)
     return "dummy-mapper";
 }
 
-void rt_varea_insert_page(rt_varea_t varea, void *page_addr)
+void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr)
 {
     rt_page_t page = rt_page_addr2page(page_addr);
 
@@ -41,7 +41,7 @@ void rt_varea_insert_page(rt_varea_t varea, void *page_addr)
     }
 }
 
-void rt_varea_free_pages(rt_varea_t varea)
+void rt_varea_pgmgr_pop_all(rt_varea_t varea)
 {
     rt_page_t page = varea->frames;
 
@@ -54,7 +54,7 @@ void rt_varea_free_pages(rt_varea_t varea)
     }
 }
 
-void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size)
+void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
 {
     void *vend = vaddr + size;
     while (vaddr != vend)
@@ -67,7 +67,7 @@ void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size)
     }
 }
 
-static void on_page_fault(struct rt_varea *varea, struct rt_mm_fault_msg *msg)
+static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
 {
     void *page;
     page = rt_pages_alloc(0);
@@ -82,7 +82,7 @@ static void on_page_fault(struct rt_varea *varea, struct rt_mm_fault_msg *msg)
     msg->response.size = ARCH_PAGE_SIZE;
     msg->response.vaddr = page;
 
-    rt_varea_insert_page(varea, page);
+    rt_varea_pgmgr_insert(varea, page);
 }
 
 static void on_varea_open(struct rt_varea *varea)
@@ -96,7 +96,7 @@ static void on_varea_close(struct rt_varea *varea)
 
 static void on_page_offload(rt_varea_t varea, void *vaddr, rt_size_t size)
 {
-    rt_varea_offload_page(varea, vaddr, size);
+    rt_varea_pgmgr_pop(varea, vaddr, size);
 }
 
 struct rt_mem_obj rt_mm_dummy_mapper = {

+ 3 - 3
components/mm/mm_page.c

@@ -59,13 +59,13 @@ static void hint_free(rt_mm_va_hint_t hint)
     hint->prefer = rt_mpr_start;
 }
 
-static void on_page_fault(struct rt_varea *varea, struct rt_mm_fault_msg *msg)
+static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
 {
     void *init_start = (void *)init_mpr_align_start;
     void *init_end = (void *)init_mpr_align_end;
-    if (msg->vaddr < init_end && msg->vaddr >= init_start)
+    if (msg->fault_vaddr < init_end && msg->fault_vaddr >= init_start)
     {
-        rt_size_t offset = msg->vaddr - init_start;
+        rt_size_t offset = msg->fault_vaddr - init_start;
         msg->response.status = MM_FAULT_STATUS_OK;
         msg->response.vaddr = init_mpr_cont_start + offset;
         msg->response.size = ARCH_PAGE_SIZE;

+ 11 - 6
components/mm/mm_private.h

@@ -7,8 +7,8 @@
  * Date           Author       Notes
  * 2022-11-14     WangXiaoyao  the first version
  */
-#ifndef __MM_INTERN_H__
-#define __MM_INTERN_H__
+#ifndef __MM_PRIVATE_H__
+#define __MM_PRIVATE_H__
 
 #include "mm_aspace.h"
 #include <rtdef.h>
@@ -44,7 +44,7 @@ struct _mm_range
 rt_err_t _aspace_bst_init(struct rt_aspace *aspace);
 
 /**
- * @brief
+ * @brief Retrieve any varea if start in [varea->start, varea->end]
  *
  * @param aspace
  * @param start
@@ -53,8 +53,7 @@ rt_err_t _aspace_bst_init(struct rt_aspace *aspace);
 struct rt_varea *_aspace_bst_search(struct rt_aspace *aspace, void *start);
 
 /**
- * @brief Retrieve lowest varea satisfies
- * ((varea->start >= start) || (varea->end >= start))
+ * @brief Retrieve lowest varea satisfies (varea->start >= start)
  *
  * @param aspace
  * @param length
@@ -91,4 +90,10 @@ void _aspace_bst_insert(struct rt_aspace *aspace, struct rt_varea *varea);
  */
 void _aspace_bst_remove(struct rt_aspace *aspace, struct rt_varea *varea);
 
-#endif /* __MM_INTERN_H__ */
+void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size);
+
+void rt_varea_pgmgr_pop_all(rt_varea_t varea);
+
+int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg);
+
+#endif /* __MM_PRIVATE_H__ */

+ 1 - 0
examples/utest/testcases/Kconfig

@@ -12,6 +12,7 @@ source "$RTT_DIR/examples/utest/testcases/kernel/Kconfig"
 source "$RTT_DIR/examples/utest/testcases/cpp11/Kconfig"
 source "$RTT_DIR/examples/utest/testcases/drivers/serial_v2/Kconfig"
 source "$RTT_DIR/examples/utest/testcases/posix/Kconfig"
+source "$RTT_DIR/examples/utest/testcases/mm/Kconfig"
 
 endif
 

+ 17 - 0
examples/utest/testcases/mm/Kconfig

@@ -0,0 +1,17 @@
+menu "Memory Management Subsytem Testcase"
+
+    config UTEST_MM_API_TC
+    bool "Enable Utest for MM API"
+    default n
+    help
+        The test covers the Memory Management APIs under the
+        `components/mm` and `libcpu/[mmu.*|tlb.*|cache.*]`
+
+    config UTEST_MM_LWP_TC
+    bool "Enable Utest for MM API in lwp"
+    default n
+    help
+        The test covers the Memory Management APIs under the
+        `components/lwp`.
+
+endmenu

+ 16 - 0
examples/utest/testcases/mm/SConscript

@@ -0,0 +1,16 @@
+Import('rtconfig')
+from building import *
+
+cwd     = GetCurrentDir()
+src     = []
+CPPPATH = [cwd]
+
+if GetDepend(['UTEST_MM_API_TC']):
+    src += ['mm_api_tc.c', 'mm_libcpu_tc.c']
+
+if GetDepend(['UTEST_MM_LWP_TC']):
+    src += ['mm_lwp_tc.c']
+
+group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
+
+Return('group')

+ 65 - 0
examples/utest/testcases/mm/common.h

@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-20     WangXiaoyao  Complete testcase for mm_aspace.c
+ */
+#ifndef __TEST_MM_COMMON_H__
+#define __TEST_MM_COMMON_H__
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <utest.h>
+
+#include <board.h>
+#include <rtthread.h>
+#include <rthw.h>
+#include <lwp_arch.h>
+#include <mmu.h>
+#include <tlb.h>
+
+#include <ioremap.h>
+#include <mm_aspace.h>
+#include <mm_flag.h>
+#include <mm_page.h>
+#include <mm_private.h>
+
+extern rt_base_t rt_heap_lock(void);
+extern void rt_heap_unlock(rt_base_t level);
+
+/**
+ * @brief During the operations, is heap still the same;
+ */
+#define CONSIST_HEAP(statement) do {                 \
+    rt_size_t total, used, max_used;                \
+    rt_size_t totala, useda, max_useda;             \
+    rt_ubase_t level = rt_heap_lock();              \
+    rt_memory_info(&total, &used, &max_used);       \
+    statement;                                      \
+    rt_memory_info(&totala, &useda, &max_useda);    \
+    rt_heap_unlock(level);                          \
+    uassert_true(total == totala);                  \
+    uassert_true(used == useda);                    \
+    uassert_true(max_used == max_useda);            \
+    } while (0)
+
+rt_inline int memtest(volatile char *buf, int value, size_t buf_sz)
+{
+    int ret = 0;
+    for (size_t i = 0; i < buf_sz; i++)
+    {
+        if (buf[i] != value)
+        {
+            ret = -1;
+            break;
+        }
+    }
+    return ret;
+}
+
+#endif /* __TEST_MM_COMMON_H__ */

+ 113 - 0
examples/utest/testcases/mm/mm_api_tc.c

@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-12-14     WangXiaoyao  the first version
+ * 2023-03-20     WangXiaoyao  Format & add more testcases for API under mm_aspace.h
+ */
+#include "common.h"
+
+/**
+ * @brief Testing all APIs under components/mm
+ */
+
+void ioremap_tc(void);
+void flag_tc(void);
+
+#ifdef STANDALONE_TC
+#define TC_ASSERT(expr)                                                        \
+    ((expr)                                                                    \
+         ? 0                                                                   \
+         : rt_kprintf("AssertFault(%d): %s\n", __LINE__, RT_STRINGIFY(expr)))
+#else
+#define TC_ASSERT(expr) uassert_true(expr)
+#endif
+
+static rt_err_t utest_tc_init(void)
+{
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    return RT_EOK;
+}
+
+#include "test_aspace_api.h"
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(aspace_tc);
+    UTEST_UNIT_RUN(ioremap_tc);
+    UTEST_UNIT_RUN(flag_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.mm.api_tc", utest_tc_init, utest_tc_cleanup, 20);
+
+void ioremap_tc(void)
+{
+    const size_t bufsz = 0x1000;
+    void *paddr = (void *)rt_pages_alloc(rt_page_bits(bufsz)) + PV_OFFSET;
+    int *vaddr;
+    vaddr = rt_ioremap_cached(paddr, bufsz);
+    if (vaddr)
+    {
+        TC_ASSERT(*vaddr == *(int *)(paddr - PV_OFFSET));
+
+        rt_iounmap(vaddr);
+        rt_pages_free(paddr - PV_OFFSET, 0);
+    }
+}
+
+void flag_tc(void)
+{
+    size_t flags;
+
+    flags = MMF_CREATE(MMF_MAP_FIXED, 0x4000);
+    TC_ASSERT(MMF_GET_CNTL(flags) == (MMF_MAP_FIXED | MMF_REQUEST_ALIGN));
+    TC_ASSERT((1 << MMF_GET_ALIGN(flags)) == 0x4000);
+
+    flags = MMF_CREATE(MMF_MAP_FIXED, 0);
+    TC_ASSERT(MMF_GET_CNTL(flags) == MMF_MAP_FIXED);
+    TC_ASSERT(MMF_GET_ALIGN(flags) == 0);
+}
+
+#if 0
+
+#define BUF_SIZE (4ul << 20)
+static char ALIGN(BUF_SIZE) buf[BUF_SIZE];
+
+void buddy_tc(void)
+{
+    size_t total, free;
+    rt_page_get_info(&total, &free);
+
+    rt_region_t region = {
+        .start = (size_t)buf,
+        .end = (size_t)buf + BUF_SIZE,
+    };
+
+    size_t new_total, new_free;
+    rt_page_install(region);
+    rt_page_get_info(&new_total, &new_free);
+    TC_ASSERT(new_total - total == (BUF_SIZE >> ARCH_PAGE_SHIFT));
+    TC_ASSERT(new_free > free);
+}
+
+void mmu_page_tc()
+{
+    mm_aspace_t aspace = ASPACE_NEW();
+    size_t total, free;
+    rt_page_get_info(&total, &free);
+    rt_hw_mmu_map(aspace, (void *)0x3fffffffff, 0, ARCH_PAGE_SIZE,
+                  MMU_MAP_K_RWCB);
+    rt_hw_mmu_unmap(aspace, (void *)0x3fffffffff, ARCH_PAGE_SIZE);
+
+    size_t new_total, new_free;
+    rt_page_get_info(&new_total, &new_free);
+    TC_ASSERT(new_free == free);
+    mm_aspace_delete(aspace);
+}
+#endif

+ 16 - 0
examples/utest/testcases/mm/mm_libcpu_tc.c

@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-17     WangXiaoyao  cache API unit test
+ */
+#include <rtthread.h>
+
+#ifdef ARCH_RISCV64
+#include "test_cache_rv64.h"
+#elif defined(ARCH_ARMV8)
+#include "test_cache_aarch64.h"
+#endif

+ 116 - 0
examples/utest/testcases/mm/mm_lwp_tc.c

@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-27     WangXiaoyao  testcase for lwp
+ */
+
+#include "common.h"
+#include <lwp.h>
+#include "lwp_arch.h"
+#include "lwp_user_mm.h"
+#include "mm_aspace.h"
+#include "mmu.h"
+
+/**
+ * @brief user map API
+ * rt_varea_t lwp_map_user_varea(struct rt_lwp *lwp, void *map_va, size_t map_size);
+ * rt_varea_t lwp_map_user_varea_ext(struct rt_lwp *lwp, void *map_va, size_t map_size, size_t flags);
+ */
+#if 1 /* make it clear to identify the block :) */
+    /* for testing on _aspace_traverse */
+    static void *_prev_end;
+    static size_t _count;
+    static int _test_increase(rt_varea_t varea, void *param)
+    {
+        uassert_true(varea->start >= _prev_end);
+        _prev_end = varea->start + varea->size;
+        _count += 1;
+        return 0;
+    }
+    #define TEST_VAREA_INSERT(statement, aspace) do {\
+        size_t _prev_count; \
+        _count = 0;         \
+        _prev_end = 0;      \
+        rt_aspace_traversal((aspace), _test_increase, NULL);\
+        _prev_count = _count; \
+        statement;          \
+        _count = 0;         \
+        _prev_end = 0;      \
+        rt_aspace_traversal((aspace), _test_increase, NULL);\
+        uassert_true(_prev_count + 1 == _count); \
+        } while (0)
+#endif
+
+static void test_user_map_varea(void)
+{
+    const size_t buf_sz = ARCH_PAGE_SIZE * 4;
+    struct rt_lwp *lwp;
+    rt_varea_t varea;
+    lwp = lwp_new();
+
+    uassert_true(!!lwp);
+    uassert_true(!lwp_user_space_init(lwp, 0));
+
+    TEST_VAREA_INSERT(
+        varea = lwp_map_user_varea(lwp, 0, buf_sz),
+        lwp->aspace);
+    uassert_true(!!varea);
+    uassert_true(varea->attr == (MMU_MAP_U_RWCB));
+    uassert_true(varea->size == buf_sz);
+    uassert_true(varea->aspace == lwp->aspace);
+    uassert_true(varea->flag == 0);
+    uassert_true(varea->start != 0);
+    uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
+
+    uassert_true(!lwp_ref_dec(lwp));
+}
+
+static void test_user_map_varea_ext(void)
+{
+    const size_t buf_sz = ARCH_PAGE_SIZE * 4;
+    struct rt_lwp *lwp;
+    rt_varea_t varea;
+    lwp = lwp_new();
+
+    uassert_true(!!lwp);
+    uassert_true(!lwp_user_space_init(lwp, 0));
+
+    TEST_VAREA_INSERT(
+        varea = lwp_map_user_varea_ext(lwp, 0, buf_sz, LWP_MAP_FLAG_NOCACHE),
+        lwp->aspace);
+    uassert_true(!!varea);
+    uassert_true(varea->attr == (MMU_MAP_U_RW));
+    uassert_true(varea->size == buf_sz);
+    uassert_true(varea->aspace == lwp->aspace);
+    uassert_true(varea->flag == 0);
+    uassert_true(varea->start != 0);
+    uassert_true(varea->start >= (void *)USER_VADDR_START && varea->start < (void *)USER_VADDR_TOP);
+
+    uassert_true(!lwp_ref_dec(lwp));
+}
+
+static void user_map_varea_tc(void)
+{
+    CONSIST_HEAP(test_user_map_varea());
+    CONSIST_HEAP(test_user_map_varea_ext());
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(user_map_varea_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.lwp.mm_tc", utest_tc_init, utest_tc_cleanup, 20);

+ 37 - 0
examples/utest/testcases/mm/semaphore.h

@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-24     WangXiaoyao  Complete testcase for synchronization
+ */
+#ifndef __SEMAPHORE_H__
+#define __SEMAPHORE_H__
+
+#include <stdatomic.h>
+
+typedef struct {
+    atomic_int count;
+} semaphore_t;
+
+void semaphore_init(semaphore_t *sem, int count)
+{
+    atomic_init(&sem->count, count);
+}
+
+void semaphore_wait(semaphore_t *sem)
+{
+    int count;
+    do {
+        count = atomic_load(&sem->count);
+    } while (count == 0 || !atomic_compare_exchange_weak(&sem->count, &count, count - 1));
+}
+
+void semaphore_signal(semaphore_t *sem)
+{
+    atomic_fetch_add(&sem->count, 1);
+}
+
+#endif /* __SEMAPHORE_H__ */

+ 287 - 0
examples/utest/testcases/mm/test_aspace_api.h

@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-20     WangXiaoyao  Complete testcase for mm_aspace.c
+ */
+#ifndef __TEST_ASPACE_API_H__
+#define __TEST_ASPACE_API_H__
+
+#include "common.h"
+#include "lwp_arch.h"
+#include "test_aspace_api_internal.h"
+#include "test_synchronization.h"
+
+/**
+ * @brief API for aspace create/destroy
+ *
+ * rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
+ * rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
+ * void rt_aspace_delete(rt_aspace_t aspace);
+ * void rt_aspace_detach(rt_aspace_t aspace);
+ *
+ * the init & detach is covered by create & detach
+ */
+
+static void aspace_create_tc(void)
+{
+    /* test robustness, detect failure and recover status of overall system */
+    rt_aspace_t aspace;
+
+    CONSIST_HEAP(aspace = rt_aspace_create((void *)(0 - 0x1000), 0x1000, NULL));
+    uassert_true(!aspace);
+}
+
+#if 1 /* make it clear to identify the block :) */
+    /* for testing on _aspace_traverse */
+    static void *_prev_end;
+    static size_t _count;
+    static int _test_increase(rt_varea_t varea, void *param)
+    {
+        uassert_true(varea->start >= _prev_end);
+        _prev_end = varea->start + varea->size;
+        _count += 1;
+        return 0;
+    }
+#endif
+
+static void aspace_delete_tc(void)
+{
+    /**
+     * @brief Requirements: delete should recycle all types of vareas properly inside
+     * and release the resource allocated for it
+     */
+    rt_aspace_t aspace;
+    struct rt_mm_va_hint hint = {.flags = 0,
+                                 .map_size = 0x1000,
+                                 .prefer = 0};
+    struct rt_varea varea_phy;
+    struct rt_varea varea_mobj;
+    void *pgtbl;
+    void *vaddr;
+
+    /* compatible to armv7a */
+    pgtbl = rt_pages_alloc(2);
+    uassert_true(!!pgtbl);  /* page must be usable */
+    rt_memset(pgtbl, 0, ARCH_PAGE_SIZE);
+
+    CONSIST_HEAP({
+        aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, pgtbl);
+        uassert_true(!!aspace);
+
+        /* insert 4 types of vareas into this aspace */
+        hint.limit_start = aspace->start;
+        hint.limit_range_size = aspace->size;
+        uassert_true(!rt_aspace_map_phy(aspace, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
+        uassert_true(!rt_aspace_map_phy_static(aspace, &varea_phy, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
+        uassert_true(!rt_aspace_map(aspace, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+        uassert_true(!rt_aspace_map_static(aspace, &varea_mobj, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+
+        /* for testing on _aspace_traverse */
+        _count = 0;
+        _prev_end = 0;
+        uassert_true(!rt_aspace_traversal(aspace, _test_increase, 0));
+        /* ensure the mapping is done */
+        uassert_true(_count == 4);
+
+        rt_aspace_delete(aspace);
+
+        uassert_true(rt_pages_free(pgtbl, 2) == 1); /* page free must success */
+    });
+}
+
+/**
+ * @brief Memory Map on Virtual Address Space to Mappable Object
+ * int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
+ *                   mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
+ * int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
+ *                          rt_size_t length, rt_size_t attr, mm_flag_t flags,
+ *                          rt_mem_obj_t mem_obj, rt_size_t offset);
+ */
+static void aspace_map_tc(void)
+{
+    /**
+     * @brief Requirement:
+     * Robustness, filter out invalid input
+     */
+    void *vaddr = RT_NULL;
+    uassert_true(rt_aspace_map(0, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+    uassert_true(vaddr == RT_NULL);
+
+    vaddr = (void *)USER_VADDR_START;
+    uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+    uassert_true(vaddr == RT_NULL);
+
+    uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, -1, &rt_mm_dummy_mapper, 0));
+    uassert_true(vaddr == RT_NULL);
+
+    /**
+     * @brief Requirement:
+     * in _rt_aspace_map:_varea_install
+     * not covering an existed varea if a named mapping is mandatory
+     */
+    vaddr = (void *)((rt_ubase_t)aspace_map_tc & ~ARCH_PAGE_MASK);
+    CONSIST_HEAP(
+        uassert_true(
+            rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0)));
+    uassert_true(vaddr == RT_NULL);
+
+    /**
+     * @brief Requirement:
+     * in _rt_aspace_map:_varea_install:_find_free
+     * verify that this routine can choose a free region with specified size
+     * and specified alignment requirement
+     */
+    #define ALIGN_REQ (0x04000000)
+    CONSIST_HEAP({
+        uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_CREATE(0, ALIGN_REQ), &rt_mm_dummy_mapper, 0));
+        uassert_true(!((rt_ubase_t)vaddr & (ALIGN_REQ - 1)));
+        rt_aspace_unmap(&rt_kernel_space, vaddr);
+    });
+
+    /* test internal APIs */
+    test_find_free();
+}
+
+/**
+ * @brief Page frames mapping to varea
+ * complete the page table on specified varea, and handle tlb maintenance
+ * There are 2 variants of this API
+ *
+ * int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
+ * int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
+ */
+
+static rt_varea_t _create_varea(const size_t size)
+{
+    rt_varea_t varea;
+    void *vaddr = rt_ioremap_start;
+
+    varea = rt_malloc(sizeof(*varea));
+    uassert_true(!!varea);
+    uassert_true(!rt_aspace_map_static(&rt_kernel_space, varea, &vaddr, size, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+    varea->flag &= ~MMF_STATIC_ALLOC;
+    uassert_true(!!vaddr);
+    return varea;
+}
+
+static void test_varea_map_page(void)
+{
+    /**
+     * @brief rt_varea_map_page
+     * Requirements: complete the page table entry
+     */
+    const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
+    rt_varea_t varea = _create_varea(buf_sz);
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        void *page = rt_pages_alloc(0);
+        uassert_true(!!page);
+        uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
+
+        /* let page manager handle the free of page */
+        rt_varea_pgmgr_insert(varea, page);
+        uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
+    }
+
+    uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
+}
+
+static void test_varea_map_range(void)
+{
+    /**
+     * @brief rt_varea_map_range
+     * Requirements: complete the page table entry
+     */
+    const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
+    rt_varea_t varea = _create_varea(buf_sz);
+    void *page = rt_pages_alloc(rt_page_bits(buf_sz));
+    uassert_true(!!page);
+    uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
+    for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
+    {
+        uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
+    }
+
+    uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
+    uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
+}
+
+static void varea_map_tc(void)
+{
+    CONSIST_HEAP(test_varea_map_page());
+    CONSIST_HEAP(test_varea_map_range());
+}
+
+static void aspace_traversal_tc(void)
+{
+    /**
+     * @brief Requirement
+     * Iterate over each varea in the kernel space
+     */
+    CONSIST_HEAP(aspace_delete_tc());
+    uassert_true(4 == _count);
+}
+
+#ifdef ARCH_ARMV8
+static void aspace_control_tc(void)
+{
+    /* this case is designed only for one page size */
+    const size_t buf_sz = ARCH_PAGE_SIZE;
+    void *vaddr = RT_NULL;
+    volatile char *remap_nocache;
+    int platform_cache_probe;
+    uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_PREFETCH, &rt_mm_dummy_mapper, 0));
+    uassert_true(!!vaddr);
+
+    /* map non-cacheable region to verify cache */
+    remap_nocache = rt_ioremap(rt_kmem_v2p(vaddr), buf_sz);
+    uassert_true(!!remap_nocache);
+
+    /* pre probing */
+    rt_memset(vaddr, 0xba, buf_sz);
+    /* no need to sync transaction on same core */
+    platform_cache_probe = memtest(remap_nocache, 0xab, buf_sz);
+
+    if (!platform_cache_probe)
+    {
+        LOG_I("Cannot distinguish cache attribution on current platform");
+    }
+    else
+    {
+        LOG_I("Ready to verify attribution of cached & non-cacheable");
+    }
+
+    /* verify cache */
+    uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_NONCACHE));
+    rt_memset(vaddr, 0, buf_sz);
+    uassert_true(!memtest(remap_nocache, 0, buf_sz));
+
+    /* another option as MMU_CNTL_CACHE */
+    uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_CACHE));
+
+    rt_iounmap(remap_nocache);
+    uassert_true(!rt_aspace_unmap(&rt_kernel_space, vaddr));
+}
+#endif
+
+static void aspace_tc(void)
+{
+    UTEST_UNIT_RUN(aspace_create_tc);
+    UTEST_UNIT_RUN(aspace_delete_tc);
+    UTEST_UNIT_RUN(aspace_map_tc);
+    UTEST_UNIT_RUN(aspace_traversal_tc);
+#ifdef ARCH_ARMV8
+    UTEST_UNIT_RUN(aspace_control_tc);
+#endif
+    UTEST_UNIT_RUN(varea_map_tc);
+
+    /* functionality */
+    UTEST_UNIT_RUN(synchronization_tc);
+    return ;
+}
+
+#endif /* __TEST_ASPACE_API_H__ */

+ 80 - 0
examples/utest/testcases/mm/test_aspace_api_internal.h

@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-23     WangXiaoyao  Complete testcase for internal APIs
+ */
+#ifndef __TEST_ASPACE_API_INTERNAL_H__
+#define __TEST_ASPACE_API_INTERNAL_H__
+
+#include "common.h"
+#include "mmu.h"
+#include "test_bst_adpt.h"
+#include <stddef.h>
+
+/**
+ * @brief 3 cases for find free:
+ * with prefer & MAP_FIXED
+ * with prefer
+ * without prefer
+ *
+ * the requirement of find free:
+ * it will return a subset in address space that is free
+ * the subset contains `length` contiguous elements
+ * the alignment is satisfied
+ */
+static void test_find_free(void)
+{
+    void *top_page = rt_kernel_space.start + rt_kernel_space.size - 0x1000;
+    void *vaddr = top_page;
+
+    CONSIST_HEAP({
+        /* type 1, on success */
+        uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0));
+        uassert_true(vaddr == top_page);
+        /* type 1, on failure */
+        uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0));
+        uassert_true(!vaddr);
+
+        /* type 2, on success */
+        vaddr = top_page;
+        uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+        uassert_true(vaddr < top_page);
+        uassert_true(!!vaddr);
+        rt_aspace_unmap(&rt_kernel_space, vaddr);
+        /* type 2, on failure */
+        vaddr = rt_kernel_space.start;
+        uassert_true(-RT_ENOSPC == rt_aspace_map(&rt_kernel_space, &vaddr, rt_kernel_space.size - 0x08000000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
+        uassert_true(!vaddr);
+
+        /* type 3, on success is covered by ioremap */
+        /* type 3, on failure */
+        size_t map_size = ARCH_PAGE_SIZE;
+        while (1)
+        {
+            void *va = rt_ioremap(0, map_size);
+            if (va)
+            {
+                uassert_true(1);
+                rt_iounmap(va);
+                map_size <<= 1;
+            }
+            else
+            {
+                uassert_true(1);
+                break;
+            }
+        }
+
+        /* free top page */
+        rt_aspace_unmap(&rt_kernel_space, top_page);
+    });
+
+    /* test mm_private.h */
+    CONSIST_HEAP(test_bst_adpt());
+}
+
+#endif /* __TEST_ASPACE_API_INTERNAL_H__ */

+ 107 - 0
examples/utest/testcases/mm/test_bst_adpt.h

@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-23     WangXiaoyao  Complete testcase for internal APIs
+ */
+#ifndef __TEST_BST_ADPT_H__
+#define __TEST_BST_ADPT_H__
+
+#include "common.h"
+#include "lwp_arch.h"
+
+#ifdef RT_USING_SMART
+#include "lwp_user_mm.h"
+#include "mm_aspace.h"
+#include "mm_flag.h"
+#include <mm_private.h>
+#include <lwp_pid.h>
+
+void test_bst_adpt(void)
+{
+    size_t flags = MMF_MAP_FIXED;
+    void *target_va = (void *)USER_VADDR_START + 0x3000;
+    size_t map_size = 0x1000;
+    void *prev_va = target_va - map_size;
+    void *next_va = target_va + map_size + 1;
+    struct rt_lwp *lwp;
+    rt_aspace_t aspace;
+    rt_mem_obj_t mem_obj;
+
+    /* create aspace by lwp */
+    lwp = lwp_new();
+    uassert_true(!!lwp);
+    uassert_true(!lwp_user_space_init(lwp, 0));
+    aspace = lwp->aspace;
+    mem_obj = &lwp->lwp_obj->mem_obj;
+    uassert_true(!!aspace);
+    uassert_true(!!mem_obj);
+
+    /* _aspace_bst_search not cover */
+    uassert_true(!_aspace_bst_search(aspace, target_va)); // ret == NULL
+
+    uassert_true(
+        !rt_aspace_map(aspace, &target_va, map_size, MMU_MAP_K_RWCB, flags, mem_obj, 0));
+    /* 2 wrappers */
+    uassert_true(
+        !rt_aspace_map(aspace, &prev_va, map_size - 1, MMU_MAP_K_RWCB, flags, mem_obj, 0));
+    uassert_true(
+        !rt_aspace_map(aspace, &next_va, map_size - 1, MMU_MAP_K_RWCB, flags, mem_obj, 0));
+
+    /* _aspace_bst_search */
+    uassert_true(!!_aspace_bst_search(aspace, target_va));
+    uassert_true(!_aspace_bst_search(aspace, target_va + map_size));
+    uassert_true(!_aspace_bst_search(aspace, target_va - 1));
+
+    /**
+     * @brief _aspace_bst_search_exceed
+     * for given map [start, end]
+     */
+    rt_varea_t find;
+    find = _aspace_bst_search_exceed(aspace, target_va);
+    uassert_true(!!find);
+    uassert_true(find->start == target_va);
+
+    rt_varea_t last = ASPACE_VAREA_LAST(aspace);
+    find = _aspace_bst_search_exceed(aspace, last->start + 1);
+    uassert_true(!find);
+
+    /**
+     * @brief _aspace_bst_search_overlap
+     * for given map [start, end], five types of overlapping
+     */
+    /* 1. all below */
+    struct _mm_range range = {.start = prev_va - 2, .end = prev_va - 1};
+    find = _aspace_bst_search_overlap(aspace, range);
+    uassert_true(!find);
+    /* 2. start below */
+    range.end = prev_va;
+    find = _aspace_bst_search_overlap(aspace, range);
+    uassert_true(!!find);
+    uassert_true(find->start == prev_va);
+    /* 3. all wrapped */
+    range.start = prev_va;
+    range.end = prev_va + 1;
+    find = _aspace_bst_search_overlap(aspace, range);
+    uassert_true(!!find);
+    uassert_true(find->start == prev_va);
+    /* 4. end exceed */
+    range.start = next_va;
+    range.end = next_va + map_size + 1;
+    find = _aspace_bst_search_overlap(aspace, range);
+    uassert_true(!!find);
+    uassert_true(find->start == next_va);
+    /* 5. all exceed */
+    range.start = next_va + map_size;
+    find = _aspace_bst_search_overlap(aspace, range);
+    uassert_true(!find);
+
+    lwp_ref_dec(lwp);
+}
+
+#endif /* RT_USING_SMART */
+
+#endif /* __TEST_BST_ADPT_H__ */

+ 115 - 0
examples/utest/testcases/mm/test_cache_aarch64.h

@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-17     WangXiaoyao  cache API unit test
+ */
+#ifndef __TEST_CACHE_AARCH64_H__
+#define __TEST_CACHE_AARCH64_H__
+
+#include "common.h"
+#include <cache.h>
+
+const char *platform_cache_not_guarantee = "Cannot guarantee cache operation works";
+
+/**
+ * ==============================================================
+ * TEST FEATURE
+ * API under cache.h
+ *
+ * void rt_hw_icache_invalidate_range(unsigned long start_addr, int size);
+ * void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size);
+ * void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size);
+ * ==============================================================
+ */
+
+static int _get1_const(void)
+{
+    return 1;
+}
+
+static int _get1(void)
+{
+    return 1;
+}
+
+static int _get2(void)
+{
+    return 2;
+}
+
+/* hot patching codes and test if the value can be seen by icache */
+static void _test_icache_invalidate_range(void)
+{
+    /* reset _get1 */
+    rt_memcpy(_get1, _get1_const, _get2 - _get1);
+    rt_hw_cpu_dcache_clean(_get1, _get2 - _get1);
+    rt_hw_cpu_icache_invalidate(_get1, _get2 - _get1);
+    uassert_true(1 == _get1());
+
+    /* now copy _get2 to _get1 */
+    rt_memcpy(_get1, _get2, _get2 - _get1);
+    if (1 != _get1())
+        LOG_W(platform_cache_not_guarantee);
+
+    rt_hw_cpu_dcache_clean(_get1, _get2 - _get1);
+    rt_hw_cpu_icache_invalidate(_get1, _get2 - _get1);
+    __asm__ volatile("isb");
+    uassert_true(2 == _get1());
+    LOG_I("%s ok", __func__);
+}
+
+/* due to hardware feature of cortex-a, we should done this on 2 separated cpu */
+static void _test_dcache_clean_and_invalidate(void)
+{
+    const size_t padding = 1024 * 2;
+    const size_t buf_sz = ARCH_PAGE_SIZE * 2;
+    volatile char *remap_nocache;
+    char *page = rt_pages_alloc(rt_page_bits(buf_sz));
+    uassert_true(!!page);
+
+    rt_memset(page, 0xab, buf_sz);
+    rt_hw_cpu_dcache_invalidate(page, buf_sz);
+
+    int _outdate_flag = 0;
+    if (memtest(page, 0xab, buf_sz))
+        _outdate_flag = 1;
+
+    /* after ioremap, we can access system memory to verify outcome */
+    remap_nocache = rt_ioremap(page + PV_OFFSET, buf_sz);
+
+    rt_hw_cpu_dcache_clean(page + padding, ARCH_PAGE_SIZE);
+    memtest(remap_nocache + padding, 0xab, ARCH_PAGE_SIZE);
+
+    if (!_outdate_flag)
+        LOG_W(platform_cache_not_guarantee);
+    else
+        LOG_I("%s ok", __func__);
+
+    rt_pages_free(page, 0);
+    rt_iounmap(remap_nocache);
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    /* todo: format API under cache.h first */
+    UTEST_UNIT_RUN(_test_icache_invalidate_range);
+    UTEST_UNIT_RUN(_test_dcache_clean_and_invalidate);
+}
+
+UTEST_TC_EXPORT(testcase, "testcases.libcpu.cache", utest_tc_init, utest_tc_cleanup, 10);
+
+#endif /* __TEST_CACHE_AARCH64_H__ */

+ 194 - 0
examples/utest/testcases/mm/test_cache_rv64.h

@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-17     WangXiaoyao  cache API unit test
+ */
+
+#ifndef __TEST_CACHE_RV64_H
+#define __TEST_CACHE_RV64_H
+
+#ifdef ARCH_RISCV64
+#include "riscv_mmu.h"
+#include <utest.h>
+#include <cache.h>
+#include <page.h>
+#include <mmu.h>
+#include <ioremap.h>
+
+/**
+ * ==============================================================
+ * TEST FEATURE
+ * API under cache.h
+ * rt_hw_sync_cache_local
+ *
+ * rt_hw_cpu_dcache_clean
+ * rt_hw_cpu_dcache_invalidate
+ * rt_hw_cpu_dcache_clean_invalidate
+ * rt_hw_cpu_dcache_clean_all
+ * rt_hw_cpu_dcache_invalidate_all // meaningless
+ * rt_hw_cpu_dcache_clean_invalidate_all
+ * rt_hw_cpu_icache_invalidate
+ * rt_hw_cpu_icache_invalidate_all
+ * ==============================================================
+ */
+
+/* Ensure the ISA is valid for target ARCHITECTURE */
+static void _illegal_instr(void)
+{
+    rt_hw_sync_cache_local(_illegal_instr, 64);
+    rt_hw_cpu_dcache_clean(_illegal_instr, 64);
+    rt_hw_cpu_dcache_invalidate(_illegal_instr, 64);
+    // rt_hw_cpu_dcache_clean_invalidate(_illegal_instr, 64); // C908 ONLY
+    rt_hw_cpu_dcache_clean_all();
+    rt_hw_cpu_dcache_invalidate_all(); // !CAREFUL must be inline
+    // rt_hw_cpu_dcache_clean_invalidate_all(); // C908 ONLY
+    rt_hw_cpu_icache_invalidate(_illegal_instr, 64);
+    rt_hw_cpu_icache_invalidate_all();
+    uassert_true(1);
+    LOG_I("All ok!");
+}
+
+static int _get1(void)
+{
+    return 1;
+}
+
+static int _get2(void)
+{
+    return 2;
+}
+
+/* hot patching codes and test if the value can be seen by icache */
+static void _test_cache_sync(void)
+{
+    uassert_true(1 == _get1());
+    rt_memcpy(_get1, _get2, _get2 - _get1);
+    uassert_true(1 == _get1());
+    rt_hw_sync_cache_local(_get1, _get2 - _get1);
+    uassert_true(2 == _get1());
+    LOG_I("%s ok", __func__);
+}
+
+/* test clean operation should do and only effect the range specified by writing to a page */
+static void _test_dcache_clean(void)
+{
+    const size_t padding = 1024 * 3;
+    const size_t buf_sz = ARCH_PAGE_SIZE * 2;
+
+    char *page = rt_pages_alloc(rt_page_bits(buf_sz));
+    uassert_true(!!page);
+
+    /* after ioremap, we can access system memory to verify outcome */
+    volatile char *remap_nocache = rt_ioremap(page + PV_OFFSET, buf_sz);
+    rt_memset(page, 0xab, buf_sz);
+    rt_hw_cpu_sync();
+
+    int _outdate_flag = 0;
+    for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
+    {
+        if (remap_nocache[i] != 0xab)
+        {
+            _outdate_flag = 1;
+            break;
+        }
+    }
+
+    page[padding - 1] = 0xac;
+    page[padding + ARCH_PAGE_SIZE] = 0xac;
+    rt_hw_cpu_dcache_clean(page + padding, ARCH_PAGE_SIZE);
+
+    /* free some space in dcache to avoid padding data being written back */
+    rt_hw_cpu_dcache_invalidate(page + padding, ARCH_PAGE_SIZE);
+    uassert_true(remap_nocache[padding - 1] != 0xac);
+    uassert_true(remap_nocache[padding + ARCH_PAGE_SIZE] != 0xac);
+
+    int _test_ok = 1;
+    for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
+    {
+        if (remap_nocache[i] != 0xab)
+        {
+            _test_ok = 0;
+            break;
+        }
+    }
+    uassert_true(_test_ok);
+
+    if (!_outdate_flag)
+        LOG_W("Cannot guarantee clean works");
+    else
+        LOG_I("%s ok", __func__);
+
+    rt_pages_free(page, 0);
+    rt_iounmap(remap_nocache);
+}
+
+/* test clean op should do and only effect the range */
+static void _test_dcache_invalidate(void)
+{
+    const size_t padding = 1024 * 3;
+    const size_t buf_sz = ARCH_PAGE_SIZE * 2;
+
+    /* prepare */
+    char *page = rt_pages_alloc(rt_page_bits(buf_sz));
+    uassert_true(!!page);
+
+    volatile char *remap_nocache = rt_ioremap(page + PV_OFFSET, buf_sz);
+    rt_memset(page, 0x0, buf_sz);
+    rt_hw_cpu_sync();
+
+    int _outdate_flag = 0;
+    for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
+    {
+        remap_nocache[i] = 0xab;
+        rt_hw_cpu_dcache_invalidate((void *)&remap_nocache[i], 1);
+    }
+
+    rt_hw_cpu_dcache_clean_all();
+
+    int _test_ok = 1;
+    for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
+    {
+        if (remap_nocache[i] == 0xab)
+        {
+            _test_ok = 0;
+            break;
+        }
+    }
+    uassert_true(_test_ok);
+
+    LOG_I("%s ok", __func__);
+
+    rt_pages_free(page, 0);
+    rt_iounmap(remap_nocache);
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(_illegal_instr);
+#ifdef BOARD_allwinnerd1s
+    /* thead ISA extension */
+    UTEST_UNIT_RUN(_test_cache_sync);
+    /* part of it is hard to test on simulation machine */
+    UTEST_UNIT_RUN(_test_dcache_clean);
+    UTEST_UNIT_RUN(_test_dcache_invalidate);
+#endif
+}
+
+UTEST_TC_EXPORT(testcase, "testcases.libcpu.cache", utest_tc_init, utest_tc_cleanup, 10);
+
+#endif /* ARCH_RISCV64 */
+#endif /* __TEST_CACHE_RV64_H */

+ 165 - 0
examples/utest/testcases/mm/test_synchronization.h

@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-03-24     WangXiaoyao  Complete testcase for synchronization
+ */
+#ifndef __TEST_SYNCHRONIZATION_H__
+#define __TEST_SYNCHRONIZATION_H__
+
+#include "common.h"
+#include "semaphore.h"
+
+#ifdef RT_USING_SMP
+
+#define THREAD_CNT RT_CPUS_NR
+#define TEST_TIMES 2000
+#define PRIO (UTEST_THR_PRIORITY + 1)
+/* size of mapping buffer */
+#define BUF_SIZE (64ul << 10)
+
+/* engage with sibling */
+struct rt_semaphore done;
+static semaphore_t sem1[THREAD_CNT / 2];
+static semaphore_t sem2[THREAD_CNT / 2];
+
+static void *map(void)
+{
+    int err;
+    int flags = MMF_PREFETCH;
+    size_t attr = MMU_MAP_K_RWCB;
+    void *vaddr = 0;
+    err =
+        rt_aspace_map(&rt_kernel_space, &vaddr, BUF_SIZE, attr, flags, &rt_mm_dummy_mapper, 0);
+    if (err)
+        uassert_true(0);
+    return vaddr;
+}
+
+static void unmap(void *buf)
+{
+    int err;
+    err =
+        rt_aspace_unmap(&rt_kernel_space, buf);
+    if (err)
+        uassert_true(0);
+    return ;
+}
+
+static void group1_entry(void *param)
+{
+    const size_t id = (size_t)param;
+    size_t test_times = TEST_TIMES;
+    size_t alive = test_times / 10;
+    void *buf;
+
+    while (test_times--)
+    {
+        if (test_times % alive == 0)
+            uassert_true(1);
+
+        buf = map();
+
+        memset(buf, 'A' + id, BUF_SIZE);
+        /* if other core write to our cache, force the changes to be visible to us */
+        rt_hw_dmb();
+
+        if (memtest(buf, 'A' + id, BUF_SIZE))
+            uassert_true(0);
+
+        semaphore_signal(&sem1[id]);
+        semaphore_wait(&sem2[id]);
+        unmap(buf);
+    }
+
+    rt_sem_release(&done);
+    return;
+}
+
+static void group2_entry(void *param)
+{
+    const size_t id = (size_t)param;
+    size_t test_times = TEST_TIMES;
+    size_t alive = test_times / 10;
+    void *buf;
+
+    while (test_times--)
+    {
+        if (test_times % alive == 0)
+            uassert_true(1);
+
+        semaphore_signal(&sem2[id]);
+        semaphore_wait(&sem1[id]);
+        buf = map();
+
+        memset(buf, 'a' + id, BUF_SIZE);
+        /* if other core write to our cache, force the changes to be visible to us */
+        rt_hw_dmb();
+
+        if (memtest(buf, 'a' + id, BUF_SIZE))
+            uassert_true(0);
+
+        unmap(buf);
+    }
+
+    rt_sem_release(&done);
+    return;
+}
+
+/**
+ * @brief On a smp system, we create at least 4 threads
+ * 2 doing map, 2 doing unmapping at the same moment
+ */
+
+static void synchronization_tc(void)
+{
+    rt_thread_t group1[THREAD_CNT / 2];
+    rt_thread_t group2[THREAD_CNT / 2];
+
+    rt_sem_init(&done, __func__, 0, RT_IPC_FLAG_FIFO);
+
+    for (size_t i = 0; i < THREAD_CNT / 2; i++)
+    {
+        char name[RT_NAME_MAX];
+        rt_sprintf(name, "grp1_%d", i);
+        group1[i] =
+            rt_thread_create(name, group1_entry, (void *)i, ARCH_PAGE_SIZE, PRIO, 10);
+        uassert_true(!!group1[i]);
+        semaphore_init(&sem1[i], 0);
+
+        uassert_true(!rt_thread_startup(group1[i]));
+    }
+
+    for (size_t i = 0; i < THREAD_CNT / 2; i++)
+    {
+        char name[RT_NAME_MAX];
+        rt_sprintf(name, "grp2_%d", i);
+        group2[i] =
+            rt_thread_create(name, group2_entry, (void *)i, ARCH_PAGE_SIZE, PRIO, 10);
+        uassert_true(!!group2[i]);
+        semaphore_init(&sem2[i], 0);
+
+        uassert_true(!rt_thread_startup(group2[i]));
+    }
+
+    /* wait all thread exit */
+    for (size_t i = 0; i < (THREAD_CNT / 2 * 2); i++)
+    {
+        rt_sem_take(&done, RT_WAITING_FOREVER);
+    }
+    LOG_I("all threads exit");
+    rt_sem_detach(&done);
+}
+
+#else /* RT_USING_SMP */
+
+static void synchronization_tc(void)
+{
+    uassert_true(1);
+}
+#endif /* RT_USING_SMP */
+
+#endif /* __TEST_SYNCHRONIZATION_H__ */

+ 0 - 1
libcpu/aarch64/common/cache.h

@@ -26,7 +26,6 @@ static inline void rt_hw_icache_invalidate_all(void)
     __asm_invalidate_icache_all();
 }
 
-void rt_hw_icache_invalidate_range(unsigned long start_addr, int size);
 void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size);
 void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, rt_size_t size);
 

+ 4 - 3
libcpu/aarch64/common/trap.c

@@ -90,6 +90,7 @@ int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
         break;
     case 0x21:
     case 0x24:
+    case 0x25:
         fault_op = MM_FAULT_OP_WRITE;
         fault_type = _get_type(esr);
         break;
@@ -101,12 +102,12 @@ int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
     if (fault_op)
     {
         asm volatile("mrs %0, far_el1":"=r"(dfar));
-        struct rt_mm_fault_msg msg = {
+        struct rt_aspace_fault_msg msg = {
             .fault_op = fault_op,
             .fault_type = fault_type,
-            .vaddr = dfar,
+            .fault_vaddr = dfar,
         };
-        if (rt_mm_fault_try_fix(&msg))
+        if (rt_aspace_fault_try_fix(&msg))
         {
             ret = 1;
         }

+ 19 - 2
libcpu/arm/cortex-a/mmu.c

@@ -11,6 +11,10 @@
 #include <rthw.h>
 #include <rtthread.h>
 
+#define DBG_TAG "hw.mmu"
+#define DBG_LVL DBG_LOG
+#include <rtdbg.h>
+
 #include <board.h>
 #include "cp15.h"
 #include "mm_page.h"
@@ -55,11 +59,26 @@ void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
 
 void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
 {
+    void *vaddr;
+    size_t length;
+    /* init kernel space */
+#ifdef RT_USING_SMART
+    rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, -USER_VADDR_TOP, (void *)MMUTable);
+#else
+    rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, (void *)MMUTable);
+#endif /* RT_USING_SMART */
+
     /* set page table */
     for(; size > 0; size--)
     {
         if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
             mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
+
+        vaddr = (void *)mdesc->vaddr_start;
+        length = mdesc->vaddr_end - mdesc->vaddr_start;
+        rt_aspace_map_static(&rt_kernel_space, &mdesc->varea, &vaddr, length,
+                             mdesc->attr, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0);
+
         rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
                 mdesc->paddr_start, mdesc->attr);
         mdesc++;
@@ -123,12 +142,10 @@ int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, s
     }
 
 #ifdef RT_USING_SMART
-    rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, 0 - USER_VADDR_TOP, vtable);
     rt_ioremap_start = v_address;
     rt_ioremap_size = size;
     rt_mpr_start = rt_ioremap_start - rt_mpr_size;
 #else
-    rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, vtable);
     rt_mpr_start = (void *)0 - rt_mpr_size;
 #endif
 

+ 1 - 0
libcpu/arm/cortex-a/mmu.h

@@ -52,6 +52,7 @@ struct mem_desc
     rt_uint32_t vaddr_end;
     rt_uint32_t paddr_start;
     rt_uint32_t attr;
+    struct rt_varea varea;
 };
 
 #define MMU_MAP_MTBL_XN       (1<<0)

+ 3 - 3
libcpu/arm/cortex-a/trap.c

@@ -50,12 +50,12 @@ int check_user_stack(struct rt_hw_exp_stack *regs)
 
     if ((dfar >= (void *)USER_STACK_VSTART) && (dfar < (void *)USER_STACK_VEND))
     {
-        struct rt_mm_fault_msg msg = {
+        struct rt_aspace_fault_msg msg = {
             .fault_op = MM_FAULT_OP_WRITE,
             .fault_type = MM_FAULT_TYPE_PAGE_FAULT,
-            .vaddr = dfar,
+            .fault_vaddr = dfar,
         };
-        if (rt_mm_fault_try_fix(&msg))
+        if (rt_aspace_fault_try_fix(&msg))
         {
             regs->pc -= 8;
             return 1;

+ 3 - 3
libcpu/risc-v/t-head/c906/trap.c

@@ -203,13 +203,13 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
 
     if (fault_op)
     {
-        struct rt_mm_fault_msg msg = {
+        struct rt_aspace_fault_msg msg = {
             .fault_op = fault_op,
             .fault_type = fault_type,
-            .vaddr = (void *)stval,
+            .fault_vaddr = (void *)stval,
         };
 
-        if (rt_mm_fault_try_fix(&msg))
+        if (rt_aspace_fault_try_fix(&msg))
         {
             return;
         }

+ 1 - 1
libcpu/risc-v/virt64/cache.h

@@ -48,7 +48,7 @@ ALWAYS_INLINE void rt_hw_cpu_icache_invalidate_all_local() {}
 #define rt_hw_icache_invalidate_all rt_hw_cpu_icache_invalidate_all
 
 /** instruction barrier */
-void rt_hw_cpu_sync(void);
+static inline void rt_hw_cpu_sync(void) {}
 
 /**
  * @brief local cpu icahce & dcache synchronization

+ 3 - 3
libcpu/risc-v/virt64/trap.c

@@ -200,13 +200,13 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
 
     if (fault_op)
     {
-        struct rt_mm_fault_msg msg = {
+        struct rt_aspace_fault_msg msg = {
             .fault_op = fault_op,
             .fault_type = fault_type,
-            .vaddr = (void *)stval,
+            .fault_vaddr = (void *)stval,
         };
 
-        if (rt_mm_fault_try_fix(&msg))
+        if (rt_aspace_fault_try_fix(&msg))
         {
             return;
         }

+ 6 - 0
src/kservice.c

@@ -1568,6 +1568,12 @@ rt_inline void _heap_unlock(rt_base_t level)
 #endif
 }
 
+#ifdef RT_USING_UTESTCASES
+/* export to utest to observe the inner statements */
+rt_base_t rt_heap_lock(void) __attribute__((alias("_heap_lock")));
+void rt_heap_unlock(rt_base_t level) __attribute__((alias("_heap_unlock")));
+#endif
+
 #if defined(RT_USING_SMALL_MEM_AS_HEAP)
 static rt_smem_t system_heap;
 rt_inline void _smem_info(rt_size_t *total,