Browse Source

[lwp] arch should recycle resource it allocated

wangxiaoyao 2 years ago
parent
commit
672966cc41

+ 21 - 6
components/lwp/arch/aarch64/cortex-a/lwp_arch.c

@@ -13,6 +13,10 @@
 
 #ifdef ARCH_MM_MMU
 
+#define DBG_TAG "lwp.arch"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
 #include <lwp_arch.h>
 #include <lwp_user_mm.h>
 
@@ -25,10 +29,11 @@ int arch_user_space_init(struct rt_lwp *lwp)
     mmu_table = (size_t *)rt_pages_alloc(0);
     if (!mmu_table)
     {
-        return -1;
+        return -RT_ENOMEM;
     }
 
     lwp->end_heap = USER_HEAP_VADDR;
+
     memset(mmu_table, 0, ARCH_PAGE_SIZE);
     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
 
@@ -36,7 +41,7 @@ int arch_user_space_init(struct rt_lwp *lwp)
         (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
     if (!lwp->aspace)
     {
-        return -1;
+        return -RT_ERROR;
     }
 
     return 0;
@@ -47,12 +52,22 @@ void *arch_kernel_mmu_table_get(void)
     return (void *)NULL;
 }
 
-void arch_user_space_vtable_free(struct rt_lwp *lwp)
+void arch_user_space_free(struct rt_lwp *lwp)
 {
-    if (lwp && lwp->aspace->page_table)
+    if (lwp)
+    {
+        RT_ASSERT(lwp->aspace);
+        void *pgtbl = lwp->aspace->page_table;
+        rt_aspace_delete(lwp->aspace);
+
+        /* must be freed after aspace delete, pgtbl is required for unmap */
+        rt_pages_free(pgtbl, 0);
+        lwp->aspace = NULL;
+    }
+    else
     {
-        rt_pages_free(lwp->aspace->page_table, 0);
-        lwp->aspace->page_table = NULL;
+        LOG_W("%s: NULL lwp as parameter", __func__);
+        RT_ASSERT(0);
     }
 }
 

+ 21 - 6
components/lwp/arch/arm/cortex-a/lwp_arch.c

@@ -14,6 +14,10 @@
 
 #ifdef ARCH_MM_MMU
 
+#define DBG_TAG "lwp.arch"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
 #include <lwp_arch.h>
 #include <lwp_user_mm.h>
 
@@ -26,7 +30,7 @@ int arch_user_space_init(struct rt_lwp *lwp)
     mmu_table = (size_t *)rt_pages_alloc(2);
     if (!mmu_table)
     {
-        return -1;
+        return -RT_ENOMEM;
     }
 
     lwp->end_heap = USER_HEAP_VADDR;
@@ -34,10 +38,11 @@ int arch_user_space_init(struct rt_lwp *lwp)
     rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
     rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
+
     lwp->aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
     if (!lwp->aspace)
     {
-        return -1;
+        return -RT_ERROR;
     }
 
     return 0;
@@ -68,12 +73,22 @@ void arch_kuser_init(rt_aspace_t aspace, void *vectors)
     rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, (void *)((char *)vectors + 0x1000 - kuser_sz), kuser_sz);
 }
 
-void arch_user_space_vtable_free(struct rt_lwp *lwp)
+void arch_user_space_free(struct rt_lwp *lwp)
 {
-    if (lwp && lwp->aspace->page_table)
+    if (lwp)
+    {
+        RT_ASSERT(lwp->aspace);
+        void *pgtbl = lwp->aspace->page_table;
+        rt_aspace_delete(lwp->aspace);
+
+        /* must be freed after aspace delete, pgtbl is required for unmap */
+        rt_pages_free(pgtbl, 2);
+        lwp->aspace = RT_NULL;
+    }
+    else
     {
-        rt_pages_free(lwp->aspace->page_table, 2);
-        lwp->aspace->page_table = NULL;
+        LOG_W("%s: NULL lwp as parameter", __func__);
+        RT_ASSERT(0);
     }
 }
 

+ 23 - 7
components/lwp/arch/risc-v/rv64/lwp_arch.c

@@ -22,6 +22,10 @@
 
 #ifdef ARCH_MM_MMU
 
+#define DBG_TAG "lwp.arch"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
 #include <lwp.h>
 #include <lwp_arch.h>
 #include <lwp_user_mm.h>
@@ -90,18 +94,19 @@ int arch_user_space_init(struct rt_lwp *lwp)
     mmu_table = (rt_ubase_t *)rt_pages_alloc(0);
     if (!mmu_table)
     {
-        return -1;
+        return -RT_ENOMEM;
     }
 
     lwp->end_heap = USER_HEAP_VADDR;
 
-    rt_memcpy(mmu_table, MMUTable, ARCH_PAGE_SIZE);
+    rt_memcpy(mmu_table, rt_kernel_space.page_table, ARCH_PAGE_SIZE);
     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
+
     lwp->aspace = rt_aspace_create(
         (void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, mmu_table);
     if (!lwp->aspace)
     {
-        return -1;
+        return -RT_ERROR;
     }
 
     return 0;
@@ -112,12 +117,23 @@ void *arch_kernel_mmu_table_get(void)
     return (void *)((char *)MMUTable);
 }
 
-void arch_user_space_vtable_free(struct rt_lwp *lwp)
+void arch_user_space_free(struct rt_lwp *lwp)
 {
-    if (lwp && lwp->aspace->page_table)
+    if (lwp)
+    {
+        RT_ASSERT(lwp->aspace);
+
+        void *pgtbl = lwp->aspace->page_table;
+        rt_aspace_delete(lwp->aspace);
+
+        /* must be freed after aspace delete, pgtbl is required for unmap */
+        rt_pages_free(pgtbl, 0);
+        lwp->aspace = RT_NULL;
+    }
+    else
     {
-        rt_pages_free(lwp->aspace->page_table, 0);
-        lwp->aspace->page_table = NULL;
+        LOG_W("%s: NULL lwp as parameter", __func__);
+        RT_ASSERT(0);
     }
 }
 

+ 1 - 1
components/lwp/lwp_arch_comm.h

@@ -44,7 +44,7 @@ void *arch_get_user_sp(void);
 
 /* user space setup and control */
 int arch_user_space_init(struct rt_lwp *lwp);
-void arch_user_space_vtable_free(struct rt_lwp *lwp);
+void arch_user_space_free(struct rt_lwp *lwp);
 void *arch_kernel_mmu_table_get(void);
 void arch_kuser_init(rt_aspace_t aspace, void *vectors);
 int arch_expand_user_stack(void *addr);

+ 1 - 2
components/lwp/lwp_user_mm.c

@@ -78,9 +78,8 @@ void lwp_aspace_switch(struct rt_thread *thread)
 
 void lwp_unmap_user_space(struct rt_lwp *lwp)
 {
+    arch_user_space_free(lwp);
     rt_free(lwp->lwp_obj);
-    rt_aspace_delete(lwp->aspace);
-    arch_user_space_vtable_free(lwp);
 }
 
 static const char *user_get_name(rt_varea_t varea)