Explorar o código

[libcpu] fix No memory higher than 1 GB is mapped

zhujiale hai 5 meses
pai
achega
96da7abd3e
Modificáronse 2 ficheiros con 23 adicións e 4 borrados
  1. 12 3
      libcpu/aarch64/common/mmu.c
  2. 11 1
      libcpu/aarch64/common/setup.c

+ 12 - 3
libcpu/aarch64/common/mmu.c

@@ -556,7 +556,8 @@ unsigned long get_free_page(void)
 }
 
 static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
-                               unsigned long pa, unsigned long attr)
+                               unsigned long pa, unsigned long attr,
+                               rt_bool_t flush)
 {
     int level;
     unsigned long *cur_lv_tbl = lv0_tbl;
@@ -585,6 +586,10 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
             }
             rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
             cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
+            if (flush)
+            {
+                rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
+            }
         }
         page = cur_lv_tbl[off];
         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
@@ -600,6 +605,10 @@ static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
     off = (va >> ARCH_SECTION_SHIFT);
     off &= MMU_LEVEL_MASK;
     cur_lv_tbl[off] = pa;
+    if (flush)
+    {
+        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
+    }
     return 0;
 }
 
@@ -633,7 +642,7 @@ void *rt_ioremap_early(void *paddr, size_t size)
 
     while (count --> 0)
     {
-        if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE))
+        if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE, RT_TRUE))
         {
             return RT_NULL;
         }
@@ -661,7 +670,7 @@ static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
     }
     for (i = 0; i < count; i++)
     {
-        ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
+        ret = _map_single_page_2M(lv0_tbl, va, pa, attr, RT_FALSE);
         va += ARCH_SECTION_SIZE;
         pa += ARCH_SECTION_SIZE;
         if (ret != 0)

+ 11 - 1
libcpu/aarch64/common/setup.c

@@ -235,7 +235,17 @@ void rt_hw_common_setup(void)
     rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
     rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
 
-    rt_memmove((void *)(fdt_start - pv_off), (void *)(fdt_ptr - pv_off), fdt_size);
+    /* To virtual address */
+    fdt_ptr = (void *)(fdt_ptr - pv_off);
+
+    if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > SIZE_GB)
+    {
+        fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
+
+        RT_ASSERT(fdt_ptr != RT_NULL);
+    }
+
+    rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
     fdt_ptr = (void *)fdt_start - pv_off;
 
     rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));