1
0
Эх сурвалжийг харах

mm_area加入对text段的属性支持

shaojinchun 5 жил өмнө
parent
commit
d6bcf31279

+ 1 - 1
components/lwp/arch/arm/cortex-a/arch_user_stack.c

@@ -27,7 +27,7 @@ int arch_expand_user_stack(void *addr)
     stack_addr &= ~ARCH_PAGE_MASK;
     if ((stack_addr >= (size_t)USER_STACK_VSTART) && (stack_addr < (size_t)USER_STACK_VEND))
     {
-        void *map = lwp_map_user(lwp_self(), (void*)stack_addr, ARCH_PAGE_SIZE);
+        void *map = lwp_map_user(lwp_self(), (void*)stack_addr, ARCH_PAGE_SIZE, 0);
 
         if (map || lwp_user_accessable(addr, 1))
         {

+ 4 - 4
components/lwp/lwp.c

@@ -140,7 +140,7 @@ static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **arg
         return RT_NULL;
 
     /* args = (int*)lwp_map_user(lwp, 0, size); */
-    args = (int *)lwp_map_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE), size);
+    args = (int *)lwp_map_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE), size, 0);
     if (args == RT_NULL)
         return RT_NULL;
 
@@ -406,7 +406,7 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
         if (process_header_size > ARCH_PAGE_SIZE)
             return -RT_ERROR;
 #ifdef RT_USING_USERSPACE
-        va = (uint8_t *)lwp_map_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE * 2), process_header_size);
+        va = (uint8_t *)lwp_map_user(lwp, (void *)(KERNEL_VADDR_START - ARCH_PAGE_SIZE * 2), process_header_size, 0);
         if (!va)
             return -RT_ERROR;
         pa = rt_hw_mmu_v2p(m_info, va);
@@ -478,11 +478,11 @@ static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, str
                         result = -RT_ERROR;
                         goto _exit;
                     }
-                    va = lwp_map_user(lwp, (void *)pheader.p_vaddr, pheader.p_memsz);
+                    va = lwp_map_user(lwp, (void *)pheader.p_vaddr, pheader.p_memsz, 1);
                 }
                 else
                 {
-                    va = lwp_map_user(lwp, 0, pheader.p_memsz);
+                    va = lwp_map_user(lwp, 0, pheader.p_memsz, 0);
                 }
                 if (va)
                 {

+ 2 - 1
components/lwp/lwp_mm_area.h

@@ -25,7 +25,8 @@ enum
 {
     MM_AREA_TYPE_PHY = 0,  /* mm_area physical address is IO register or reserved memory */
     MM_AREA_TYPE_SHM,      /* mm_area physical address is shared memory */
-    MM_AREA_TYPE_AUTO,     /* mm_area physical address is alloced from page manager */
+    MM_AREA_TYPE_DATA,     /* mm_area physical address is alloced from page manager for data */
+    MM_AREA_TYPE_TEXT,     /* mm_area physical address is alloced from page manager for text */
     MM_AREA_TYPE_UNKNOW,
 };
 

+ 1 - 5
components/lwp/lwp_syscall.c

@@ -1133,10 +1133,6 @@ rt_err_t sys_timer_control(rt_timer_t timer, int cmd, void *arg)
     return rt_timer_control(timer, cmd, arg);
 }
 
-#ifdef RT_USING_USERSPACE
-void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size);
-#endif
-
 rt_thread_t sys_thread_create(void *arg[])
 {
     rt_base_t level = 0;
@@ -1148,7 +1144,7 @@ rt_thread_t sys_thread_create(void *arg[])
     lwp = rt_thread_self()->lwp;
     lwp_ref_inc(lwp);
 #ifdef RT_USING_USERSPACE
-    user_stack  = lwp_map_user(lwp, 0, (size_t)arg[3]);
+    user_stack  = lwp_map_user(lwp, 0, (size_t)arg[3], 0);
 #else
     user_stack  = (void *)RT_KERNEL_MALLOC((uint32_t)arg[3]);
 #endif

+ 23 - 8
components/lwp/lwp_user_mm.c

@@ -116,26 +116,35 @@ void lwp_unmap_user_space(struct rt_lwp *lwp)
     while ((node = lwp_map_find_first(lwp->map_area)) != 0)
     {
         struct rt_mm_area_struct *ma = (struct rt_mm_area_struct*)node->data;
+        int pa_need_free = 0;
 
         RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
-        unmap_range(lwp, (void*)ma->addr, ma->size, (int)(ma->type == MM_AREA_TYPE_AUTO));
+
+        if ((ma->type == MM_AREA_TYPE_DATA) || (ma->type == MM_AREA_TYPE_TEXT))
+        {
+            pa_need_free = 1;
+        }
+        unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
         lwp_map_area_remove(&lwp->map_area, ma->addr);
     }
     rt_pages_free(m_info->vtable, 2);
 }
 
-static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size)
+static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
 {
     void *va = RT_NULL;
     int ret = 0;
     rt_mmu_info *m_info = &lwp->mmu_info;
+    int area_type;
 
     va = rt_hw_mmu_map_auto(m_info, map_va, map_size, MMU_MAP_U_RWCB);
     if (!va)
     {
         return 0;
     }
-    ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, MM_AREA_TYPE_AUTO);
+
+    area_type = text ? MM_AREA_TYPE_TEXT : MM_AREA_TYPE_DATA;
+    ret = lwp_map_area_insert(&lwp->map_area, (size_t)va, map_size, area_type);
     if (ret != 0)
     {
         unmap_range(lwp, va, map_size, 1);
@@ -149,6 +158,7 @@ int lwp_unmap_user(struct rt_lwp *lwp, void *va)
     rt_base_t level = 0;
     struct lwp_avl_struct *ma_avl_node = RT_NULL;
     struct rt_mm_area_struct *ma = RT_NULL;
+    int pa_need_free = 0;
 
     level = rt_hw_interrupt_disable();
     ma_avl_node = lwp_map_find(lwp->map_area, (size_t)va);
@@ -158,8 +168,13 @@ int lwp_unmap_user(struct rt_lwp *lwp, void *va)
         return -1;
     }
     ma = (struct rt_mm_area_struct *)ma_avl_node->data;
+
     RT_ASSERT(ma->type < MM_AREA_TYPE_UNKNOW);
-    unmap_range(lwp, (void *)ma->addr, ma->size, (int)(ma->type == MM_AREA_TYPE_AUTO));
+    if ((ma->type == MM_AREA_TYPE_DATA) || (ma->type == MM_AREA_TYPE_TEXT))
+    {
+        pa_need_free = 1;
+    }
+    unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
     lwp_map_area_remove(&lwp->map_area, (size_t)va);
     rt_hw_interrupt_enable(level);
     return 0;
@@ -175,7 +190,7 @@ int lwp_unmap_user_type(struct rt_lwp *lwp, void *va)
     return lwp_unmap_user(lwp, va);
 }
 
-void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size)
+void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
 {
     rt_base_t level = 0;
     void *ret = RT_NULL;
@@ -191,7 +206,7 @@ void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size)
     map_va = (void*)((size_t)map_va & ~ARCH_PAGE_MASK);
 
     level = rt_hw_interrupt_disable();
-    ret = _lwp_map_user(lwp, map_va, map_size);
+    ret = _lwp_map_user(lwp, map_va, map_size, text);
     rt_hw_interrupt_enable(level);
     if (ret)
     {
@@ -282,7 +297,7 @@ int lwp_brk(void *addr)
         void *va;
 
         size = (((size_t)addr - lwp->end_heap) + ARCH_PAGE_SIZE - 1) & ~ARCH_PAGE_MASK;
-        va = lwp_map_user(lwp, (void*)lwp->end_heap, size);
+        va = lwp_map_user(lwp, (void*)lwp->end_heap, size, 0);
         if (va)
         {
             lwp->end_heap += size;
@@ -308,7 +323,7 @@ void* lwp_mmap2(void *addr, size_t length, int prot,
     if (fd == -1)
     {
         lwp = rt_thread_self()->lwp;
-        ret = lwp_map_user(lwp, addr, length);
+        ret = lwp_map_user(lwp, addr, length, 0);
         if (!ret)
         {
             ret = (void*)-1;

+ 1 - 1
components/lwp/lwp_user_mm.h

@@ -25,7 +25,7 @@ int lwp_user_space_init(struct rt_lwp *lwp);
 void lwp_unmap_user_space(struct rt_lwp *lwp);
 
 int lwp_unmap_user(struct rt_lwp *lwp, void *va);
-void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size);
+void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text);
 
 void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached);
 int lwp_unmap_user_phy(struct rt_lwp *lwp, void *va);