Prechádzať zdrojové kódy

增加mmu操作时禁止切换任务

shaojinchun 3 rokov pred
rodič
commit
3d512e0d43
2 zmenil súbory, kde vykonal 20 pridanie a 20 odobranie
  1. 9 20
      libcpu/aarch64/common/mmu.c
  2. 11 0
      libcpu/arm/cortex-a/mmu.c

+ 9 - 20
libcpu/aarch64/common/mmu.c

@@ -735,38 +735,22 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t s
     }
     if (vaddr)
     {
+        rt_enter_critical();
         ret = __rt_hw_mmu_map(mmu_info, (void *)vaddr, p_addr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
+            rt_exit_critical();
             return (void *)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
         }
+        rt_exit_critical();
     }
     return 0;
 }
 #else
 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
 {
-    size_t pa_s, pa_e;
-    size_t vaddr;
-    int pages;
-    int ret;
-
-    pa_s = (size_t)p_addr;
-    pa_e = (size_t)p_addr + size - 1;
-    pa_s >>= ARCH_PAGE_SHIFT;
-    pa_e >>= ARCH_PAGE_SHIFT;
-    pages = pa_e - pa_s + 1;
-    vaddr = find_vaddr(mmu_info, pages);
-    if (vaddr) {
-        //TODO ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
-        if (ret == 0)
-        {
-            rt_hw_cpu_tlb_invalidate();
-            return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
-        }
-    }
-    return 0;
+    return p_addr;
 }
 #endif
 
@@ -842,12 +826,15 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size
     }
     if (vaddr)
     {
+        rt_enter_critical();
         ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
+            rt_exit_critical();
             return (void *)((char *)vaddr + offset);
         }
+        rt_exit_critical();
     }
     return 0;
 }
@@ -862,8 +849,10 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, size_t size)
     va_s >>= ARCH_PAGE_SHIFT;
     va_e >>= ARCH_PAGE_SHIFT;
     pages = va_e - va_s + 1;
+    rt_enter_critical();
     __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
     rt_hw_cpu_tlb_invalidate();
+    rt_exit_critical();
 }
 
 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, size_t size, size_t attr)

+ 11 - 0
libcpu/arm/cortex-a/mmu.c

@@ -613,12 +613,15 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t s
         vaddr = find_vaddr(mmu_info, pages);
     }
     if (vaddr) {
+        rt_enter_critical();
         ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
+            rt_exit_critical();
             return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
         }
+        rt_exit_critical();
     }
     return 0;
 }
@@ -637,12 +640,15 @@ void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t at
     pages = pa_e - pa_s + 1;
     vaddr = find_vaddr(mmu_info, pages);
     if (vaddr) {
+        rt_enter_critical();
         ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
+            rt_exit_critical();
             return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
         }
+        rt_exit_critical();
     }
     return 0;
 }
@@ -750,12 +756,15 @@ void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size
         vaddr = find_vaddr(mmu_info, pages);
     }
     if (vaddr) {
+        rt_enter_critical();
         ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
         if (ret == 0)
         {
             rt_hw_cpu_tlb_invalidate();
+            rt_exit_critical();
             return (void*)((char*)vaddr + offset);
         }
+        rt_exit_critical();
     }
     return 0;
 }
@@ -771,8 +780,10 @@ void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
     va_s >>= ARCH_PAGE_SHIFT;
     va_e >>= ARCH_PAGE_SHIFT;
     pages = va_e - va_s + 1;
+    rt_enter_critical();
     __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
     rt_hw_cpu_tlb_invalidate();
+    rt_exit_critical();
 }
 
 #ifdef RT_USING_USERSPACE