|
@@ -8,6 +8,7 @@
|
|
|
* 2019-11-01 Jesven The first version
|
|
|
* 2022-12-13 WangXiaoyao Hot-pluggable, extensible
|
|
|
* page management algorithm
|
|
|
+ * 2023-02-20 WangXiaoyao Multi-list page-management
|
|
|
*/
|
|
|
#include <rtthread.h>
|
|
|
|
|
@@ -39,7 +40,8 @@ static void *init_mpr_cont_start;
|
|
|
|
|
|
static struct rt_varea mpr_varea;
|
|
|
|
|
|
-static struct rt_page *page_list[RT_PAGE_MAX_ORDER];
|
|
|
+static struct rt_page *page_list_low[RT_PAGE_MAX_ORDER];
|
|
|
+static struct rt_page *page_list_high[RT_PAGE_MAX_ORDER];
|
|
|
|
|
|
#define page_start ((rt_page_t)rt_mpr_start)
|
|
|
|
|
@@ -61,21 +63,18 @@ static void hint_free(rt_mm_va_hint_t hint)
|
|
|
|
|
|
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
|
|
|
{
|
|
|
- void *init_start = (void *)init_mpr_align_start;
|
|
|
- void *init_end = (void *)init_mpr_align_end;
|
|
|
- if (msg->fault_vaddr < init_end && msg->fault_vaddr >= init_start)
|
|
|
+ char *init_start = (void *)init_mpr_align_start;
|
|
|
+ char *init_end = (void *)init_mpr_align_end;
|
|
|
+ if ((char *)msg->fault_vaddr < init_end && (char *)msg->fault_vaddr >= init_start)
|
|
|
{
|
|
|
- rt_size_t offset = msg->fault_vaddr - init_start;
|
|
|
+ rt_size_t offset = (char *)msg->fault_vaddr - init_start;
|
|
|
msg->response.status = MM_FAULT_STATUS_OK;
|
|
|
- msg->response.vaddr = init_mpr_cont_start + offset;
|
|
|
+ msg->response.vaddr = (char *)init_mpr_cont_start + offset;
|
|
|
msg->response.size = ARCH_PAGE_SIZE;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
- void *raw_page = rt_pages_alloc(0);
|
|
|
- msg->response.status = MM_FAULT_STATUS_OK;
|
|
|
- msg->response.vaddr = raw_page;
|
|
|
- msg->response.size = ARCH_PAGE_SIZE;
|
|
|
+ rt_mm_dummy_mapper.on_page_fault(varea, msg);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -85,15 +84,123 @@ static struct rt_mem_obj mm_page_mapper = {
|
|
|
.hint_free = hint_free,
|
|
|
};
|
|
|
|
|
|
+#ifdef RT_DEBUG_PAGE_LEAK
|
|
|
+static volatile int enable;
|
|
|
+static rt_page_t _trace_head;
|
|
|
+#define TRACE_ALLOC(pg, size) _trace_alloc(pg, __builtin_return_address(0), size)
|
|
|
+#define TRACE_FREE(pgaddr, size) _trace_free(pgaddr, __builtin_return_address(0), size)
|
|
|
+
|
|
|
+void rt_page_leak_trace_start()
|
|
|
+{
|
|
|
+ // TODO multicore safety
|
|
|
+ _trace_head = NULL;
|
|
|
+ enable = 1;
|
|
|
+}
|
|
|
+MSH_CMD_EXPORT(rt_page_leak_trace_start, start page leak tracer);
|
|
|
+
|
|
|
+static void _collect()
|
|
|
+{
|
|
|
+ rt_page_t page = _trace_head;
|
|
|
+ if (!page)
|
|
|
+ {
|
|
|
+ rt_kputs("ok!\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ while (page)
|
|
|
+ {
|
|
|
+ rt_page_t next = page->next;
|
|
|
+ void *pg_va = rt_page_page2addr(page);
|
|
|
+ LOG_W("LEAK: %p, allocator: %p, size bits: %lx", pg_va, page->caller, page->trace_size);
|
|
|
+ rt_pages_free(pg_va, page->trace_size);
|
|
|
+ page = next;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void rt_page_leak_trace_stop()
|
|
|
+{
|
|
|
+ // TODO multicore safety
|
|
|
+ enable = 0;
|
|
|
+ _collect();
|
|
|
+}
|
|
|
+MSH_CMD_EXPORT(rt_page_leak_trace_stop, stop page leak tracer);
|
|
|
+
|
|
|
+static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
|
|
|
+{
|
|
|
+ if (enable)
|
|
|
+ {
|
|
|
+ page->caller = caller;
|
|
|
+ page->trace_size = size_bits;
|
|
|
+ page->tl_prev = NULL;
|
|
|
+ page->tl_next = NULL;
|
|
|
+
|
|
|
+ if (_trace_head == NULL)
|
|
|
+ {
|
|
|
+ _trace_head = page;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ _trace_head->tl_prev = page;
|
|
|
+ page->tl_next = _trace_head;
|
|
|
+ _trace_head = page;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void _report(rt_page_t page, size_bits, char *msg)
|
|
|
+{
|
|
|
+ void *pg_va = rt_page_page2addr(page);
|
|
|
+ LOG_W("%s: %p, allocator: %p, size bits: %lx", msg, pg_va, page->caller, page->trace_size);
|
|
|
+ rt_kputs("backtrace\n");
|
|
|
+ rt_hw_backtrace(0, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
|
|
|
+{
|
|
|
+ if (enable)
|
|
|
+ {
|
|
|
+ /* free after free */
|
|
|
+ if (page->trace_size == 0xabadcafe)
|
|
|
+ {
|
|
|
+ _report("free after free")
|
|
|
+ return ;
|
|
|
+ }
|
|
|
+ else if (page->trace_size != size_bits)
|
|
|
+ {
|
|
|
+ rt_kprintf("free with size bits %lx\n", size_bits);
|
|
|
+ _report("incompatible size bits parameter");
|
|
|
+ return ;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (page->ref_cnt == 1)
|
|
|
+ {
|
|
|
+ if (page->tl_prev)
|
|
|
+ page->tl_prev->tl_next = page->tl_next;
|
|
|
+ if (page->tl_next)
|
|
|
+ page->tl_next->tl_prev = page->tl_prev;
|
|
|
+
|
|
|
+ if (page == _trace_head)
|
|
|
+ _trace_head = page->next;
|
|
|
+
|
|
|
+ page->tl_prev = NULL;
|
|
|
+ page->tl_next = NULL;
|
|
|
+ page->trace_size = 0xabadcafe;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+#else
|
|
|
+#define TRACE_ALLOC(x, y)
|
|
|
+#define TRACE_FREE(x, y)
|
|
|
+#endif
|
|
|
+
|
|
|
static inline void *page_to_addr(rt_page_t page)
|
|
|
{
|
|
|
- return (void *)((page - page_start) << ARCH_PAGE_SHIFT) - PV_OFFSET;
|
|
|
+ return (void *)(((page - page_start) << ARCH_PAGE_SHIFT) - PV_OFFSET);
|
|
|
}
|
|
|
|
|
|
static inline rt_page_t addr_to_page(rt_page_t pg_start, void *addr)
|
|
|
{
|
|
|
- addr += PV_OFFSET;
|
|
|
- return &pg_start[((uintptr_t)addr >> ARCH_PAGE_SHIFT)];
|
|
|
+ addr = (char *)addr + PV_OFFSET;
|
|
|
+ return &pg_start[((rt_ubase_t)addr >> ARCH_PAGE_SHIFT)];
|
|
|
}
|
|
|
|
|
|
#define FLOOR(val, align) (((rt_size_t)(val) + (align)-1) & ~((align)-1))
|
|
@@ -143,7 +250,7 @@ void *rt_page_page2addr(struct rt_page *p)
|
|
|
return page_to_addr(p);
|
|
|
}
|
|
|
|
|
|
-static inline struct rt_page *buddy_get(struct rt_page *p,
|
|
|
+static inline struct rt_page *_buddy_get(struct rt_page *p,
|
|
|
rt_uint32_t size_bits)
|
|
|
{
|
|
|
rt_size_t addr;
|
|
@@ -153,7 +260,7 @@ static inline struct rt_page *buddy_get(struct rt_page *p,
|
|
|
return rt_page_addr2page((void *)addr);
|
|
|
}
|
|
|
|
|
|
-static void page_remove(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
+static void _page_remove(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
|
|
|
{
|
|
|
if (p->pre)
|
|
|
{
|
|
@@ -172,7 +279,7 @@ static void page_remove(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
}
|
|
|
|
|
|
-static void page_insert(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
+static void _page_insert(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
|
|
|
{
|
|
|
p->next = page_list[size_bits];
|
|
|
if (p->next)
|
|
@@ -194,7 +301,7 @@ static void _pages_ref_inc(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
idx = idx & ~((1UL << size_bits) - 1);
|
|
|
|
|
|
page_head = page_start + idx;
|
|
|
- page_head = (void *)page_head + early_offset;
|
|
|
+ page_head = (void *)((char *)page_head + early_offset);
|
|
|
page_head->ref_cnt++;
|
|
|
}
|
|
|
|
|
@@ -211,13 +318,13 @@ static int _pages_ref_get(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
return page_head->ref_cnt;
|
|
|
}
|
|
|
|
|
|
-static int _pages_free(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
+static int _pages_free(rt_page_t page_list[], struct rt_page *p, rt_uint32_t size_bits)
|
|
|
{
|
|
|
rt_uint32_t level = size_bits;
|
|
|
struct rt_page *buddy;
|
|
|
|
|
|
RT_ASSERT(p >= page_start);
|
|
|
- RT_ASSERT((void *)p < rt_mpr_start + rt_mpr_size);
|
|
|
+ RT_ASSERT((char *)p < (char *)rt_mpr_start + rt_mpr_size);
|
|
|
RT_ASSERT(rt_kmem_v2p(p));
|
|
|
RT_ASSERT(p->ref_cnt > 0);
|
|
|
RT_ASSERT(p->size_bits == ARCH_ADDRESS_WIDTH_BITS);
|
|
@@ -231,10 +338,10 @@ static int _pages_free(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
|
|
|
while (level < RT_PAGE_MAX_ORDER - 1)
|
|
|
{
|
|
|
- buddy = buddy_get(p, level);
|
|
|
+ buddy = _buddy_get(p, level);
|
|
|
if (buddy && buddy->size_bits == level)
|
|
|
{
|
|
|
- page_remove(buddy, level);
|
|
|
+ _page_remove(page_list, buddy, level);
|
|
|
p = (p < buddy) ? p : buddy;
|
|
|
level++;
|
|
|
}
|
|
@@ -243,18 +350,18 @@ static int _pages_free(struct rt_page *p, rt_uint32_t size_bits)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- page_insert(p, level);
|
|
|
+ _page_insert(page_list, p, level);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static struct rt_page *_pages_alloc(rt_uint32_t size_bits)
|
|
|
+static struct rt_page *_pages_alloc(rt_page_t page_list[], rt_uint32_t size_bits)
|
|
|
{
|
|
|
struct rt_page *p;
|
|
|
|
|
|
if (page_list[size_bits])
|
|
|
{
|
|
|
p = page_list[size_bits];
|
|
|
- page_remove(p, size_bits);
|
|
|
+ _page_remove(page_list, p, size_bits);
|
|
|
}
|
|
|
else
|
|
|
{
|
|
@@ -273,11 +380,11 @@ static struct rt_page *_pages_alloc(rt_uint32_t size_bits)
|
|
|
}
|
|
|
|
|
|
p = page_list[level];
|
|
|
- page_remove(p, level);
|
|
|
+ _page_remove(page_list, p, level);
|
|
|
while (level > size_bits)
|
|
|
{
|
|
|
- page_insert(p, level - 1);
|
|
|
- p = buddy_get(p, level - 1);
|
|
|
+ _page_insert(page_list, p, level - 1);
|
|
|
+ p = _buddy_get(p, level - 1);
|
|
|
level--;
|
|
|
}
|
|
|
}
|
|
@@ -286,12 +393,12 @@ static struct rt_page *_pages_alloc(rt_uint32_t size_bits)
|
|
|
return p;
|
|
|
}
|
|
|
|
|
|
-static void _early_page_remove(rt_page_t page, rt_uint32_t size_bits)
|
|
|
+static void _early_page_remove(rt_page_t page_list[], rt_page_t page, rt_uint32_t size_bits)
|
|
|
{
|
|
|
- rt_page_t page_cont = (void *)page + early_offset;
|
|
|
+ rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
|
|
|
if (page_cont->pre)
|
|
|
{
|
|
|
- rt_page_t pre_cont = (void *)page_cont->pre + early_offset;
|
|
|
+ rt_page_t pre_cont = (rt_page_t)((char *)page_cont->pre + early_offset);
|
|
|
pre_cont->next = page_cont->next;
|
|
|
}
|
|
|
else
|
|
@@ -301,23 +408,23 @@ static void _early_page_remove(rt_page_t page, rt_uint32_t size_bits)
|
|
|
|
|
|
if (page_cont->next)
|
|
|
{
|
|
|
- rt_page_t next_cont = (void *)page_cont->next + early_offset;
|
|
|
+ rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
|
|
|
next_cont->pre = page_cont->pre;
|
|
|
}
|
|
|
|
|
|
page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
}
|
|
|
|
|
|
-static void _early_page_insert(rt_page_t page, int size_bits)
|
|
|
+static void _early_page_insert(rt_page_t page_list[], rt_page_t page, int size_bits)
|
|
|
{
|
|
|
RT_ASSERT((void *)page >= rt_mpr_start &&
|
|
|
- (void *)page - rt_mpr_start < +rt_mpr_size);
|
|
|
- rt_page_t page_cont = (void *)page + early_offset;
|
|
|
+ ((char *)page - (char *)rt_mpr_start) < rt_mpr_size);
|
|
|
+ rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
|
|
|
|
|
|
page_cont->next = page_list[size_bits];
|
|
|
if (page_cont->next)
|
|
|
{
|
|
|
- rt_page_t next_cont = (void *)page_cont->next + early_offset;
|
|
|
+ rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
|
|
|
next_cont->pre = page;
|
|
|
}
|
|
|
page_cont->pre = 0;
|
|
@@ -325,14 +432,14 @@ static void _early_page_insert(rt_page_t page, int size_bits)
|
|
|
page_cont->size_bits = size_bits;
|
|
|
}
|
|
|
|
|
|
-static struct rt_page *_early_pages_alloc(rt_uint32_t size_bits)
|
|
|
+static struct rt_page *_early_pages_alloc(rt_page_t page_list[], rt_uint32_t size_bits)
|
|
|
{
|
|
|
struct rt_page *p;
|
|
|
|
|
|
if (page_list[size_bits])
|
|
|
{
|
|
|
p = page_list[size_bits];
|
|
|
- _early_page_remove(p, size_bits);
|
|
|
+ _early_page_remove(page_list, p, size_bits);
|
|
|
}
|
|
|
else
|
|
|
{
|
|
@@ -351,20 +458,35 @@ static struct rt_page *_early_pages_alloc(rt_uint32_t size_bits)
|
|
|
}
|
|
|
|
|
|
p = page_list[level];
|
|
|
- _early_page_remove(p, level);
|
|
|
+ _early_page_remove(page_list, p, level);
|
|
|
while (level > size_bits)
|
|
|
{
|
|
|
- _early_page_insert(p, level - 1);
|
|
|
- p = buddy_get(p, level - 1);
|
|
|
+ _early_page_insert(page_list, p, level - 1);
|
|
|
+ p = _buddy_get(p, level - 1);
|
|
|
level--;
|
|
|
}
|
|
|
}
|
|
|
- rt_page_t page_cont = (void *)p + early_offset;
|
|
|
+ rt_page_t page_cont = (rt_page_t)((char *)p + early_offset);
|
|
|
page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
page_cont->ref_cnt = 1;
|
|
|
return p;
|
|
|
}
|
|
|
|
|
|
+static rt_page_t *_get_page_list(void *vaddr)
|
|
|
+{
|
|
|
+ rt_ubase_t pa_int = (rt_ubase_t)vaddr + PV_OFFSET;
|
|
|
+ rt_page_t *list;
|
|
|
+ if (pa_int > UINT32_MAX)
|
|
|
+ {
|
|
|
+ list = page_list_high;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ list = page_list_low;
|
|
|
+ }
|
|
|
+ return list;
|
|
|
+}
|
|
|
+
|
|
|
int rt_page_ref_get(void *addr, rt_uint32_t size_bits)
|
|
|
{
|
|
|
struct rt_page *p;
|
|
@@ -389,27 +511,73 @@ void rt_page_ref_inc(void *addr, rt_uint32_t size_bits)
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
}
|
|
|
|
|
|
-static rt_page_t (*pages_alloc_handler)(rt_uint32_t size_bits);
|
|
|
+static rt_page_t (*pages_alloc_handler)(rt_page_t page_list[], rt_uint32_t size_bits);
|
|
|
|
|
|
-void *rt_pages_alloc(rt_uint32_t size_bits)
|
|
|
+/* if not, we skip the finding on page_list_high */
|
|
|
+static size_t _high_page_configured = 0;
|
|
|
+
|
|
|
+static rt_page_t *_flag_to_page_list(size_t flags)
|
|
|
+{
|
|
|
+ rt_page_t *page_list;
|
|
|
+ if (_high_page_configured && (flags & PAGE_ANY_AVAILABLE))
|
|
|
+ {
|
|
|
+ page_list = page_list_high;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ page_list = page_list_low;
|
|
|
+ }
|
|
|
+ return page_list;
|
|
|
+}
|
|
|
+
|
|
|
+static void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
|
|
|
{
|
|
|
void *alloc_buf = RT_NULL;
|
|
|
struct rt_page *p;
|
|
|
rt_base_t level;
|
|
|
+ rt_page_t *page_list = _flag_to_page_list(flags);
|
|
|
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
- p = pages_alloc_handler(size_bits);
|
|
|
+ p = pages_alloc_handler(page_list, size_bits);
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
+
|
|
|
+ if (!p && page_list != page_list_low)
|
|
|
+ {
|
|
|
+ /* fall back */
|
|
|
+ page_list = page_list_low;
|
|
|
+
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ p = pages_alloc_handler(page_list, size_bits);
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+ }
|
|
|
+
|
|
|
if (p)
|
|
|
{
|
|
|
alloc_buf = page_to_addr(p);
|
|
|
+
|
|
|
+ #ifdef RT_DEBUG_PAGE_LEAK
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ TRACE_ALLOC(p, size_bits);
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+ #endif
|
|
|
}
|
|
|
return alloc_buf;
|
|
|
}
|
|
|
|
|
|
+void *rt_pages_alloc(rt_uint32_t size_bits)
|
|
|
+{
|
|
|
+ return _do_pages_alloc(size_bits, 0);
|
|
|
+}
|
|
|
+
|
|
|
+void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags)
|
|
|
+{
|
|
|
+ return _do_pages_alloc(size_bits, flags);
|
|
|
+}
|
|
|
+
|
|
|
int rt_pages_free(void *addr, rt_uint32_t size_bits)
|
|
|
{
|
|
|
struct rt_page *p;
|
|
|
+ rt_page_t *page_list = _get_page_list(addr);
|
|
|
int real_free = 0;
|
|
|
|
|
|
p = rt_page_addr2page(addr);
|
|
@@ -417,14 +585,18 @@ int rt_pages_free(void *addr, rt_uint32_t size_bits)
|
|
|
{
|
|
|
rt_base_t level;
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
- real_free = _pages_free(p, size_bits);
|
|
|
+ real_free = _pages_free(page_list, p, size_bits);
|
|
|
+ if (real_free)
|
|
|
+ TRACE_FREE(p, size_bits);
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
}
|
|
|
+
|
|
|
return real_free;
|
|
|
}
|
|
|
|
|
|
void rt_page_list(void) __attribute__((alias("list_page")));
|
|
|
|
|
|
+#warning TODO: improve list page
|
|
|
void list_page(void)
|
|
|
{
|
|
|
int i;
|
|
@@ -435,7 +607,7 @@ void list_page(void)
|
|
|
|
|
|
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
|
|
|
{
|
|
|
- struct rt_page *p = page_list[i];
|
|
|
+ struct rt_page *p = page_list_low[i];
|
|
|
|
|
|
rt_kprintf("level %d ", i);
|
|
|
|
|
@@ -447,6 +619,21 @@ void list_page(void)
|
|
|
}
|
|
|
rt_kprintf("\n");
|
|
|
}
|
|
|
+ for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
|
|
|
+ {
|
|
|
+ struct rt_page *p = page_list_high[i];
|
|
|
+
|
|
|
+ rt_kprintf("level %d ", i);
|
|
|
+
|
|
|
+ while (p)
|
|
|
+ {
|
|
|
+ total += (1UL << i);
|
|
|
+ rt_kprintf("[0x%08p]", rt_page_page2addr(p));
|
|
|
+ p = p->next;
|
|
|
+ }
|
|
|
+ rt_kprintf("\n");
|
|
|
+ }
|
|
|
+
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
rt_kprintf("free pages is 0x%08lx (%ld KB)\n", total, total * ARCH_PAGE_SIZE / 1024);
|
|
|
rt_kprintf("-------------------------------\n");
|
|
@@ -462,7 +649,17 @@ void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
|
|
|
level = rt_hw_interrupt_disable();
|
|
|
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
|
|
|
{
|
|
|
- struct rt_page *p = page_list[i];
|
|
|
+ struct rt_page *p = page_list_low[i];
|
|
|
+
|
|
|
+ while (p)
|
|
|
+ {
|
|
|
+ total_free += (1UL << i);
|
|
|
+ p = p->next;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
|
|
|
+ {
|
|
|
+ struct rt_page *p = page_list_high[i];
|
|
|
|
|
|
while (p)
|
|
|
{
|
|
@@ -475,6 +672,62 @@ void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
|
|
|
*free_nr = total_free;
|
|
|
}
|
|
|
|
|
|
+static void _install_page(rt_page_t mpr_head, rt_region_t region, void *insert_handler)
|
|
|
+{
|
|
|
+ void (*insert)(rt_page_t *page_list, rt_page_t page, int size_bits) = insert_handler;
|
|
|
+ rt_region_t shadow;
|
|
|
+ shadow.start = region.start & ~shadow_mask;
|
|
|
+ shadow.end = FLOOR(region.end, shadow_mask + 1);
|
|
|
+
|
|
|
+ if (shadow.end > UINT32_MAX)
|
|
|
+ _high_page_configured = 1;
|
|
|
+
|
|
|
+ rt_page_t shad_head = addr_to_page(mpr_head, (void *)shadow.start);
|
|
|
+ rt_page_t shad_tail = addr_to_page(mpr_head, (void *)shadow.end);
|
|
|
+ rt_page_t head = addr_to_page(mpr_head, (void *)region.start);
|
|
|
+ rt_page_t tail = addr_to_page(mpr_head, (void *)region.end);
|
|
|
+
|
|
|
+ /* mark shadow pages as illegal */
|
|
|
+ for (rt_page_t iter = shad_head; iter < head; iter++)
|
|
|
+ {
|
|
|
+ iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
+ }
|
|
|
+ for (rt_page_t iter = tail; iter < shad_tail; iter++)
|
|
|
+ {
|
|
|
+ iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* insert reserved pages to list */
|
|
|
+ const int max_order = RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1;
|
|
|
+ while (region.start != region.end)
|
|
|
+ {
|
|
|
+ struct rt_page *p;
|
|
|
+ int align_bits;
|
|
|
+ int size_bits;
|
|
|
+
|
|
|
+ size_bits =
|
|
|
+ ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(region.end - region.start);
|
|
|
+ align_bits = rt_hw_ctz(region.start);
|
|
|
+ if (align_bits < size_bits)
|
|
|
+ {
|
|
|
+ size_bits = align_bits;
|
|
|
+ }
|
|
|
+ if (size_bits > max_order)
|
|
|
+ {
|
|
|
+ size_bits = max_order;
|
|
|
+ }
|
|
|
+
|
|
|
+ p = addr_to_page(mpr_head, (void *)region.start);
|
|
|
+ p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
+ p->ref_cnt = 0;
|
|
|
+
|
|
|
+ /* insert to list */
|
|
|
+ rt_page_t *page_list = _get_page_list((void *)region.start);
|
|
|
+ insert(page_list, (rt_page_t)((char *)p - early_offset), size_bits - ARCH_PAGE_SHIFT);
|
|
|
+ region.start += (1UL << size_bits);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void rt_page_init(rt_region_t reg)
|
|
|
{
|
|
|
int i;
|
|
@@ -500,7 +753,8 @@ void rt_page_init(rt_region_t reg)
|
|
|
/* init free list */
|
|
|
for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
|
|
|
{
|
|
|
- page_list[i] = 0;
|
|
|
+ page_list_low[i] = 0;
|
|
|
+ page_list_high[i] = 0;
|
|
|
}
|
|
|
|
|
|
/* map MPR area */
|
|
@@ -524,9 +778,9 @@ void rt_page_init(rt_region_t reg)
|
|
|
rt_size_t init_mpr_npage = init_mpr_size >> ARCH_PAGE_SHIFT;
|
|
|
|
|
|
init_mpr_cont_start = (void *)reg.start;
|
|
|
- void *init_mpr_cont_end = init_mpr_cont_start + init_mpr_size;
|
|
|
- early_offset = init_mpr_cont_start - (void *)init_mpr_align_start;
|
|
|
- rt_page_t mpr_cont = rt_mpr_start + early_offset;
|
|
|
+ rt_size_t init_mpr_cont_end = (rt_size_t)init_mpr_cont_start + init_mpr_size;
|
|
|
+ early_offset = (rt_size_t)init_mpr_cont_start - init_mpr_align_start;
|
|
|
+ rt_page_t mpr_cont = (void *)((char *)rt_mpr_start + early_offset);
|
|
|
|
|
|
/* mark init mpr pages as illegal */
|
|
|
rt_page_t head_cont = addr_to_page(mpr_cont, (void *)reg.start);
|
|
@@ -536,48 +790,8 @@ void rt_page_init(rt_region_t reg)
|
|
|
iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
}
|
|
|
|
|
|
- /* mark shadow pages as illegal */
|
|
|
- rt_page_t shad_head_cont = addr_to_page(mpr_cont, (void *)shadow.start);
|
|
|
- for (rt_page_t iter = shad_head_cont; iter < head_cont; iter++)
|
|
|
- {
|
|
|
- iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
- }
|
|
|
- rt_page_t shad_tail_cont = addr_to_page(mpr_cont, (void *)shadow.end);
|
|
|
- for (rt_page_t iter = tail_cont; iter < shad_tail_cont; iter++)
|
|
|
- {
|
|
|
- iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
- }
|
|
|
-
|
|
|
- /* insert reserved pages to list */
|
|
|
- reg.start = (rt_size_t)init_mpr_cont_end;
|
|
|
- const int max_order = RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1;
|
|
|
- while (reg.start != reg.end)
|
|
|
- {
|
|
|
- struct rt_page *p;
|
|
|
- int align_bits;
|
|
|
- int size_bits;
|
|
|
-
|
|
|
- size_bits =
|
|
|
- ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(reg.end - reg.start);
|
|
|
- align_bits = rt_hw_ctz(reg.start);
|
|
|
- if (align_bits < size_bits)
|
|
|
- {
|
|
|
- size_bits = align_bits;
|
|
|
- }
|
|
|
- if (size_bits > max_order)
|
|
|
- {
|
|
|
- size_bits = max_order;
|
|
|
- }
|
|
|
-
|
|
|
- p = addr_to_page(mpr_cont, (void *)reg.start);
|
|
|
- p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
- p->ref_cnt = 0;
|
|
|
-
|
|
|
- /* insert to list */
|
|
|
- _early_page_insert((void *)p - early_offset,
|
|
|
- size_bits - ARCH_PAGE_SHIFT);
|
|
|
- reg.start += (1UL << size_bits);
|
|
|
- }
|
|
|
+ reg.start = init_mpr_cont_end;
|
|
|
+ _install_page(mpr_cont, reg, _early_page_insert);
|
|
|
|
|
|
pages_alloc_handler = _early_pages_alloc;
|
|
|
/* doing the page table bushiness */
|
|
@@ -594,7 +808,7 @@ void rt_page_init(rt_region_t reg)
|
|
|
static int _load_mpr_area(void *head, void *tail)
|
|
|
{
|
|
|
int err = 0;
|
|
|
- void *iter = (void *)((uintptr_t)head & ~ARCH_PAGE_MASK);
|
|
|
+ char *iter = (char *)((rt_ubase_t)head & ~ARCH_PAGE_MASK);
|
|
|
tail = (void *)FLOOR(tail, ARCH_PAGE_SIZE);
|
|
|
|
|
|
while (iter != tail)
|
|
@@ -630,19 +844,7 @@ int rt_page_install(rt_region_t region)
|
|
|
|
|
|
if (err == RT_EOK)
|
|
|
{
|
|
|
- while (region.start != region.end)
|
|
|
- {
|
|
|
- struct rt_page *p;
|
|
|
- int size_bits;
|
|
|
-
|
|
|
- size_bits = RT_PAGE_MAX_ORDER - 1;
|
|
|
- p = addr_to_page(page_start, (void *)region.start);
|
|
|
- p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
|
|
|
- p->ref_cnt = 1;
|
|
|
-
|
|
|
- _pages_free(p, size_bits);
|
|
|
- region.start += (1UL << (size_bits + ARCH_PAGE_SHIFT));
|
|
|
- }
|
|
|
+ _install_page(rt_mpr_start, region, _page_insert);
|
|
|
}
|
|
|
}
|
|
|
return err;
|