浏览代码

mmu操作采用不关中断方式

shaojinchun 3 年之前
父节点
当前提交
a0a5aceb54

+ 4 - 6
components/lwp/ioremap.c

@@ -36,7 +36,6 @@ static void _iounmap_range(void *addr, size_t size)
 
 static void *_ioremap_type(void *paddr, size_t size, int type)
 {
-    rt_base_t level;
     void *v_addr = NULL;
     size_t attr;
 
@@ -52,7 +51,7 @@ static void *_ioremap_type(void *paddr, size_t size, int type)
         return v_addr;
     }
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     v_addr = rt_hw_mmu_map(&mmu_info, 0, paddr, size, attr);
     if (v_addr)
     {
@@ -63,7 +62,7 @@ static void *_ioremap_type(void *paddr, size_t size, int type)
             v_addr = NULL;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return v_addr;
 }
 
@@ -84,10 +83,9 @@ void *rt_ioremap_cached(void *paddr, size_t size)
 
 void rt_iounmap(volatile void *vaddr)
 {
-    rt_base_t level;
     struct lwp_avl_struct *ma_avl_node;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ma_avl_node = lwp_map_find(k_map_area, (size_t)vaddr);
     if (ma_avl_node)
     {
@@ -96,7 +94,7 @@ void rt_iounmap(volatile void *vaddr)
         _iounmap_range((void *)ma->addr, ma->size);
         lwp_map_area_remove(&k_map_area, (size_t)vaddr);
     }
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 }
 
 #else

+ 16 - 25
components/lwp/lwp_shm.c

@@ -146,11 +146,10 @@ err:
 int lwp_shmget(size_t key, size_t size, int create)
 {
     int ret = 0;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shmget(key, size, create);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -211,11 +210,10 @@ static int _lwp_shmrm(int id)
 int lwp_shmrm(int id)
 {
     int ret = 0;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shmrm(id);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -257,15 +255,14 @@ static void *_lwp_shmat(int id, void *shm_vaddr)
 void *lwp_shmat(int id, void *shm_vaddr)
 {
     void *ret = RT_NULL;
-    rt_base_t level = 0;
 
     if (((size_t)shm_vaddr & ARCH_PAGE_MASK) != 0)
     {
         return RT_NULL;
     }
-    level= rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shmat(id, shm_vaddr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -303,11 +300,10 @@ static int _lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
 int lwp_shm_ref_inc(struct rt_lwp *lwp, void *shm_vaddr)
 {
     int ret = 0;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shm_ref_inc(lwp, shm_vaddr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 
     return ret;
 }
@@ -327,11 +323,10 @@ static int _lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
 int lwp_shm_ref_dec(struct rt_lwp *lwp, void *shm_vaddr)
 {
     int ret = 0;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shm_ref_dec(lwp, shm_vaddr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 
     return ret;
 }
@@ -359,11 +354,10 @@ int _lwp_shmdt(void *shm_vaddr)
 int lwp_shmdt(void *shm_vaddr)
 {
     int ret = 0;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_shmdt(shm_vaddr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 
     return ret;
 }
@@ -389,11 +383,10 @@ void *_lwp_shminfo(int id)
 void *lwp_shminfo(int id)
 {
     void *vaddr = RT_NULL;
-    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     vaddr = _lwp_shminfo(id);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return vaddr;
 }
 
@@ -410,13 +403,11 @@ static int _shm_info(struct lwp_avl_struct* node_key, void *data)
 
 void list_shm(void)
 {
-    rt_base_t level = 0;
-
     rt_kprintf("   key        paddr      size       id\n");
     rt_kprintf("---------- ---------- ---------- --------\n");
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     lwp_avl_traversal(shm_tree_key, _shm_info, NULL);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 }
 MSH_CMD_EXPORT(list_shm, show share memory info);
 #endif

+ 13 - 19
components/lwp/lwp_user_mm.c

@@ -124,16 +124,15 @@ static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, in
 
 int lwp_unmap_user(struct rt_lwp *lwp, void *va)
 {
-    rt_base_t level = 0;
     struct lwp_avl_struct *ma_avl_node = RT_NULL;
     struct rt_mm_area_struct *ma = RT_NULL;
     int pa_need_free = 0;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ma_avl_node = lwp_map_find(lwp->map_area, (size_t)va);
     if (!ma_avl_node)
     {
-        rt_hw_interrupt_enable(level);
+        rt_mm_unlock();
         return -1;
     }
     ma = (struct rt_mm_area_struct *)ma_avl_node->data;
@@ -145,7 +144,7 @@ int lwp_unmap_user(struct rt_lwp *lwp, void *va)
     }
     unmap_range(lwp, (void *)ma->addr, ma->size, pa_need_free);
     lwp_map_area_remove(&lwp->map_area, (size_t)va);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return 0;
 }
 
@@ -225,7 +224,6 @@ int lwp_unmap_user_type(struct rt_lwp *lwp, void *va)
 
 void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
 {
-    rt_base_t level = 0;
     void *ret = RT_NULL;
     size_t offset = 0;
 
@@ -238,9 +236,9 @@ void *lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size, int text)
     map_size &= ~ARCH_PAGE_MASK;
     map_va = (void *)((size_t)map_va & ~ARCH_PAGE_MASK);
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_map_user(lwp, map_va, map_size, text);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     if (ret)
     {
         ret = (void *)((char *)ret + offset);
@@ -283,7 +281,6 @@ static void *_lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa,
 
 void *lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t map_size, int cached, int type)
 {
-    rt_base_t level = 0;
     void *ret = RT_NULL;
     size_t offset = 0;
 
@@ -303,9 +300,9 @@ void *lwp_map_user_type(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t m
     map_size &= ~ARCH_PAGE_MASK;
     map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _lwp_map_user_type(lwp, map_va, map_pa, map_size, cached, type);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     if (ret)
     {
         ret = (void *)((char *)ret + offset);
@@ -320,11 +317,10 @@ void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa, size_t ma
 
 rt_base_t lwp_brk(void *addr)
 {
-    rt_base_t level = 0;
     rt_base_t ret = -1;
     struct rt_lwp *lwp = RT_NULL;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     lwp = rt_thread_self()->lwp;
 
     if ((size_t)addr <= lwp->end_heap)
@@ -347,7 +343,7 @@ rt_base_t lwp_brk(void *addr)
             ret = lwp->end_heap;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
@@ -356,16 +352,15 @@ rt_base_t lwp_brk(void *addr)
 void* lwp_mmap2(void *addr, size_t length, int prot,
         int flags, int fd, off_t pgoffset)
 {
-    rt_base_t level = 0;
     void *ret = (void *)-1;
     struct rt_lwp *lwp = RT_NULL;
 
     if (fd == -1)
     {
         lwp = rt_thread_self()->lwp;
-        level = rt_hw_interrupt_disable();
+        rt_mm_lock();
         ret = lwp_map_user(lwp, addr, length, 0);
-        rt_hw_interrupt_enable(level);
+        rt_mm_unlock();
         if (ret)
         {
             if ((flags & MAP_ANONYMOUS) != 0)
@@ -383,14 +378,13 @@ void* lwp_mmap2(void *addr, size_t length, int prot,
 
 int lwp_munmap(void *addr)
 {
-    rt_base_t level = 0;
     int ret = 0;
     struct rt_lwp *lwp = RT_NULL;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     lwp = rt_thread_self()->lwp;
     ret = lwp_unmap_user(lwp, addr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 

+ 36 - 18
libcpu/aarch64/common/mmu.c

@@ -34,6 +34,34 @@
 #define MMU_TBL_PAGE_4k_LEVEL  3
 #define MMU_TBL_LEVEL_NR       4
 
+static rt_mutex_t mm_lock;
+
+void rt_mm_lock(void)
+{
+    if (rt_thread_self())
+    {
+        if (!mm_lock)
+        {
+            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
+        }
+        if (mm_lock)
+        {
+            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
+        }
+    }
+}
+
+void rt_mm_unlock(void)
+{
+    if (rt_thread_self())
+    {
+        if (mm_lock)
+        {
+            rt_mutex_release(mm_lock);
+        }
+    }
+}
+
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr);
 
 struct page_table
@@ -502,7 +530,6 @@ void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr)
  */
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, size_t size, size_t *vtable, size_t pv_off)
 {
-    rt_base_t level;
     size_t va_s, va_e;
 
     if (!mmu_info || !vtable)
@@ -526,15 +553,11 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, size_t size, size
         return -1;
     }
 
-    level = rt_hw_interrupt_disable();
-
     mmu_info->vtable = vtable;
     mmu_info->vstart = va_s;
     mmu_info->vend = va_e;
     mmu_info->pv_off = pv_off;
 
-    rt_hw_interrupt_enable(level);
-
     return 0;
 }
 
@@ -846,32 +869,28 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
 {
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     _rt_hw_mmu_unmap(mmu_info, v_addr, size);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 }
 #endif
 
@@ -927,11 +946,10 @@ void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 

+ 2 - 0
libcpu/aarch64/common/mmu.h

@@ -132,5 +132,7 @@ void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t att
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
+void rt_mm_lock(void);
+void rt_mm_unlock(void);
 
 #endif

+ 36 - 19
libcpu/arm/cortex-a/mmu.c

@@ -19,6 +19,34 @@
 #include "page.h"
 #endif
 
+static rt_mutex_t mm_lock;
+
+void rt_mm_lock(void)
+{
+    if (rt_thread_self())
+    {
+        if (!mm_lock)
+        {
+            mm_lock = rt_mutex_create("mm_lock", RT_IPC_FLAG_FIFO);
+        }
+        if (mm_lock)
+        {
+            rt_mutex_take(mm_lock, RT_WAITING_FOREVER);
+        }
+    }
+}
+
+void rt_mm_unlock(void)
+{
+    if (rt_thread_self())
+    {
+        if (mm_lock)
+        {
+            rt_mutex_release(mm_lock);
+        }
+    }
+}
+
 /* dump 2nd level page table */
 void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
 {
@@ -195,7 +223,6 @@ void rt_hw_cpu_dcache_clean(void *addr, int size);
 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
 {
     size_t l1_off, va_s, va_e;
-    rt_base_t level;
 
     if (!mmu_info || !vtable)
     {
@@ -218,15 +245,12 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size
         return -1;
     }
 
-    level = rt_hw_interrupt_disable();
-
     for (l1_off = va_s; l1_off <= va_e; l1_off++)
     {
         size_t v = vtable[l1_off];
 
         if (v & ARCH_MMU_USED_MASK)
         {
-            rt_hw_interrupt_enable(level);
             return -1;
         }
     }
@@ -236,8 +260,6 @@ int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size
     mmu_info->vend = va_e;
     mmu_info->pv_off = pv_off;
 
-    rt_hw_interrupt_enable(level);
-
     return 0;
 }
 
@@ -757,33 +779,29 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 
 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 #endif
 
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
 {
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     _rt_hw_mmu_unmap(mmu_info, v_addr, size);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
 }
 
 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
@@ -842,11 +860,10 @@ void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
 {
     void *ret;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    rt_mm_lock();
     ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
-    rt_hw_interrupt_enable(level);
+    rt_mm_unlock();
     return ret;
 }
 

+ 3 - 0
libcpu/arm/cortex-a/mmu.h

@@ -108,4 +108,7 @@ void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t att
 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
 
+void rt_mm_lock(void);
+void rt_mm_unlock(void);
+
 #endif