Parcourir la source

page管理加入ref_cnt功能

shaojinchun il y a 5 ans
Parent
commit
9c4c413fde
3 fichiers modifiés avec 81 ajouts et 35 suppressions
  1. 20 8
      libcpu/arm/cortex-a/mmu.c
  2. 58 26
      libcpu/arm/cortex-a/page.c
  3. 3 1
      libcpu/arm/cortex-a/page.h

+ 20 - 8
libcpu/arm/cortex-a/mmu.c

@@ -248,7 +248,9 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
     size_t l1_off;
     size_t *mmu_l1, *mmu_l2;
     size_t sections;
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
+#endif
 
     /* for kernel ioremap */
     if ((size_t)v_address < KERNEL_VADDR_START)
@@ -295,8 +297,10 @@ int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size)
             return -1;
         }
 
+#ifndef RT_USING_USERSPACE
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
         *ref_cnt = 1;
+#endif
 
         loop_va += ARCH_SECTION_SIZE;
     }
@@ -409,7 +413,9 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
+#endif
 
     if (!mmu_info)
     {
@@ -442,20 +448,24 @@ static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages
             /* cache maintain */
             rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
 
+#ifdef RT_USING_USERSPACE
+            if (rt_pages_free(mmu_l2, 0))
+            {
+                *mmu_l1 = 0;
+                rt_hw_cpu_dcache_clean(mmu_l1, 4);
+            }
+#else
             ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
             (*ref_cnt)--;
             if (!*ref_cnt)
             {
-#ifdef RT_USING_USERSPACE
-                rt_pages_free(mmu_l2, 0);
-#else
                 rt_free_align(mmu_l2);
-#endif
                 *mmu_l1 = 0;
 
                 /* cache maintain */
                 rt_hw_cpu_dcache_clean(mmu_l1, 4);
             }
+#endif
         }
         loop_va += ARCH_PAGE_SIZE;
     }
@@ -467,7 +477,9 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
     size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
+#ifndef RT_USING_USERSPACE
     size_t *ref_cnt;
+#endif
 
     if (!mmu_info)
     {
@@ -483,6 +495,7 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
         if (*mmu_l1 & ARCH_MMU_USED_MASK)
         {
             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
+            rt_page_ref_inc(mmu_l2, 0);
         }
         else
         {
@@ -509,8 +522,10 @@ static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, si
             }
         }
 
+#ifndef RT_USING_USERSPACE
         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
         (*ref_cnt)++;
+#endif
 
         *(mmu_l2 + l2_off) = (loop_pa | attr);
         /* cache maintain */
@@ -605,7 +620,6 @@ static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npag
     size_t loop_pa;
     size_t l1_off, l2_off;
     size_t *mmu_l1, *mmu_l2;
-    size_t *ref_cnt;
 
     if (!mmu_info)
     {
@@ -625,6 +639,7 @@ static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npag
         if (*mmu_l1 & ARCH_MMU_USED_MASK)
         {
             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
+            rt_page_ref_inc(mmu_l2, 0);
         }
         else
         {
@@ -644,9 +659,6 @@ static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npag
                 goto err;
         }
 
-        ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
-        (*ref_cnt)++;
-
         loop_pa += mmu_info->pv_off;
         *(mmu_l2 + l2_off) = (loop_pa | attr);
         /* cache maintain */

+ 58 - 26
libcpu/arm/cortex-a/page.c

@@ -25,9 +25,10 @@
 
 struct page
 {
-    struct page *next;  // same level next
-    struct page *pre;   // same level pre
-    uint32_t size_bits; // if is ARCH_ADDRESS_WIDTH_BITS, means not free
+    struct page *next;  /* same level next */
+    struct page *pre;   /* same level pre  */
+    uint32_t size_bits; /* if is ARCH_ADDRESS_WIDTH_BITS, means not free */
+    int ref_cnt;        /* page group ref count */
 };
 
 static struct page* page_start;
@@ -120,12 +121,37 @@ static void page_insert(struct page *p, uint32_t size_bits)
     p->size_bits = size_bits;
 }
 
-static void _pages_free(struct page *p, uint32_t size_bits)
+static void _pages_ref_inc(struct page *p, uint32_t size_bits)
+{
+    struct page *page_head;
+    int idx;
+
+    /* find page group head */
+    idx = p - page_start;
+    if (idx < 0 || idx >= page_nr)
+    {
+        return;
+    }
+    idx = idx & ~((1UL << size_bits) - 1);
+
+    page_head= page_start + idx;
+    page_head->ref_cnt++;
+}
+
+static int _pages_free(struct page *p, uint32_t size_bits)
 {
     uint32_t level = size_bits;
     uint32_t high = ARCH_ADDRESS_WIDTH_BITS - size_bits - 1;
     struct page *buddy;
 
+    RT_ASSERT(p->ref_cnt > 0);
+
+    p->ref_cnt--;
+    if (p->ref_cnt != 0)
+    {
+        return 0;
+    }
+
     while (level < high)
     {
         buddy = buddy_get(p, level);
@@ -141,6 +167,7 @@ static void _pages_free(struct page *p, uint32_t size_bits)
         }
     }
     page_insert(p, level);
+    return 1;
 }
 
 static struct page *_pages_alloc(uint32_t size_bits)
@@ -178,9 +205,21 @@ static struct page *_pages_alloc(uint32_t size_bits)
             level--;
         }
     }
+    p->ref_cnt = 1;
     return p;
 }
 
+void rt_page_ref_inc(void *addr, uint32_t size_bits)
+{
+    struct page *p;
+    rt_base_t level;
+
+    p = addr_to_page(addr);
+    level = rt_hw_interrupt_disable();
+    _pages_ref_inc(p, size_bits);
+    rt_hw_interrupt_enable(level);
+}
+
 void *rt_pages_alloc(uint32_t size_bits)
 {
     struct page *p;
@@ -192,18 +231,20 @@ void *rt_pages_alloc(uint32_t size_bits)
     return page_to_addr(p);
 }
 
-void rt_pages_free(void *addr, uint32_t size_bits)
+int rt_pages_free(void *addr, uint32_t size_bits)
 {
     struct page *p;
+    int real_free = 0;
 
     p = addr_to_page(addr);
     if (p)
     {
         rt_base_t level;
         level = rt_hw_interrupt_disable();
-        _pages_free(p, size_bits);
+        real_free = _pages_free(p, size_bits);
         rt_hw_interrupt_enable(level);
     }
+    return real_free;
 }
 
 void rt_pageinfo_dump(void)
@@ -258,8 +299,6 @@ void rt_page_get_info(size_t *total_nr, size_t *free_nr)
 
 void rt_page_init(rt_region_t reg)
 {
-    uint32_t align_bits;
-    uint32_t size_bits;
     int i;
 
     LOG_D("split 0x%08x 0x%08x\n", reg.start, reg.end);
@@ -270,9 +309,9 @@ void rt_page_init(rt_region_t reg)
     reg.end &= ~ARCH_PAGE_MASK;
 
     {
-        int nr = ARCH_PAGE_SIZE/sizeof(struct page);
+        int nr = ARCH_PAGE_SIZE / sizeof(struct page);
         int total = (reg.end - reg.start) >> ARCH_PAGE_SHIFT;
-        int mnr = (total + nr)/(nr + 1);
+        int mnr = (total + nr) / (nr + 1);
 
         LOG_D("nr = 0x%08x\n", nr);
         LOG_D("total = 0x%08x\n", total);
@@ -286,30 +325,23 @@ void rt_page_init(rt_region_t reg)
 
     LOG_D("align 0x%08x 0x%08x\n", reg.start, reg.end);
 
-    /* init page struct */
-    for (i = 0; i < page_nr; i++)
-    {
-        page_start[i].size_bits = ARCH_ADDRESS_WIDTH_BITS;
-    }
-
     /* init free list */
     for (i = 0; i < ARCH_PAGE_LIST_SIZE; i++)
     {
         page_list[i] = 0;
     }
 
-    while (reg.start != reg.end)
+    /* init page struct */
+    for (i = 0; i < page_nr; i++)
     {
-        size_bits = ARCH_ADDRESS_WIDTH_BITS - 1 - __builtin_clz(reg.end - reg.start);
-        align_bits = __builtin_ctz(reg.start);
-        if (align_bits < size_bits)
-        {
-            size_bits = align_bits;
-        }
-
-        _pages_free(addr_to_page((void*)reg.start), size_bits - ARCH_PAGE_SHIFT);
+        page_start[i].size_bits = ARCH_ADDRESS_WIDTH_BITS;
+        page_start[i].ref_cnt = 1;
+    }
 
-        reg.start += (1U << size_bits);
+    /* add to free list */
+    for (i = 0; i < page_nr; i++)
+    {
+        _pages_free(page_start + i, 0);
     }
 }
 #endif

+ 3 - 1
libcpu/arm/cortex-a/page.h

@@ -23,7 +23,9 @@ void rt_page_init(rt_region_t reg);
 
 void *rt_pages_alloc(uint32_t size_bits);
 
-void rt_pages_free(void *addr, uint32_t size_bits);
+void rt_page_ref_inc(void *addr, uint32_t size_bits);
+
+int rt_pages_free(void *addr, uint32_t size_bits);
 
 void rt_pageinfo_dump(void);