mm_memblock.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-07 zmshahaha the first version
  9. */
  10. #include "mm_memblock.h"
  11. #include "mm_page.h"
  12. #include "mm_aspace.h"
  13. #include <mmu.h>
  14. #define DBG_TAG "mm.memblock"
  15. #define DBG_LVL DBG_INFO
  16. #include <rtdbg.h>
  17. #define PHYS_ADDR_MAX (~((rt_size_t)0))
  18. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  19. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  20. #ifdef ARCH_CPU_64BIT
  21. #define MIN_BIT 16
  22. #else
  23. #define MIN_BIT 8
  24. #endif
  25. #ifndef RT_INIT_MEMORY_REGIONS
  26. #define RT_INIT_MEMORY_REGIONS 128
  27. #endif
  28. static struct rt_mmblk_reg _regions[RT_INIT_MEMORY_REGIONS];
  29. static int _hint_idx;
  30. static struct rt_memblock mmblk_memory;
  31. static struct rt_memblock mmblk_reserved;
  32. struct rt_memblock *rt_memblock_get_memory(void)
  33. {
  34. return &mmblk_memory;
  35. }
  36. struct rt_memblock *rt_memblock_get_reserved(void)
  37. {
  38. return &mmblk_reserved;
  39. }
  40. rt_inline struct rt_mmblk_reg *_next_region(struct rt_mmblk_reg *prev)
  41. {
  42. if (prev && prev->node.next)
  43. {
  44. return rt_slist_entry(prev->node.next, struct rt_mmblk_reg, node);
  45. }
  46. else
  47. {
  48. return RT_NULL;
  49. }
  50. }
  51. static struct rt_mmblk_reg *_alloc_memreg(struct rt_mmblk_reg *prev)
  52. {
  53. for (int i =_hint_idx; i < RT_INIT_MEMORY_REGIONS; i++)
  54. {
  55. if (_regions[i].alloc == RT_FALSE)
  56. {
  57. rt_slist_insert(&(prev->node), &(_regions[i].node));
  58. _regions[i].alloc = RT_TRUE;
  59. _hint_idx = i + 1;
  60. return &_regions[i];
  61. }
  62. }
  63. for (int i = 0; i < _hint_idx; i++)
  64. {
  65. if (_regions[i].alloc == RT_FALSE)
  66. {
  67. rt_slist_insert(&(prev->node), &(_regions[i].node));
  68. _regions[i].alloc = RT_TRUE;
  69. _hint_idx = i + 1;
  70. return &_regions[i];
  71. }
  72. }
  73. return RT_NULL;
  74. }
  75. static void _free_memreg(struct rt_mmblk_reg *prev)
  76. {
  77. struct rt_mmblk_reg *next = _next_region(prev);
  78. next->alloc = RT_FALSE;
  79. rt_slist_remove(&(prev->node), prev->node.next);
  80. }
  81. static rt_err_t _reg_insert_after(struct rt_mmblk_reg *prev, rt_region_t *reg,
  82. mmblk_flag_t flags)
  83. {
  84. struct rt_mmblk_reg *new_reg = _alloc_memreg(prev);
  85. if (!new_reg)
  86. {
  87. LOG_E("No enough space");
  88. return -RT_ENOMEM;
  89. }
  90. rt_memcpy(&(new_reg->memreg), reg, sizeof(*reg));
  91. new_reg->flags = flags;
  92. return RT_EOK;
  93. }
  94. rt_inline void _reg_remove_after(struct rt_mmblk_reg *prev)
  95. {
  96. _free_memreg(prev);
  97. }
  98. /* adding overlapped regions is banned */
  99. static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
  100. const char *name, rt_size_t start, rt_size_t end, mm_flag_t flag)
  101. {
  102. struct rt_mmblk_reg *reg = RT_NULL, *reg_next = RT_NULL;
  103. rt_slist_t sentinel;
  104. rt_region_t new_region;
  105. if (start >= end)
  106. return -RT_EINVAL;
  107. sentinel.next = &(memblock->reg_list);
  108. /* find suitable place */
  109. rt_slist_for_each_entry(reg, &sentinel, node)
  110. {
  111. reg_next = _next_region(reg);
  112. if (reg_next == RT_NULL)
  113. break;
  114. rt_size_t rstart = reg_next->memreg.start;
  115. rt_size_t rend = reg_next->memreg.end;
  116. /* not overlap */
  117. if (rstart >= end)
  118. break;
  119. if (rend <= start)
  120. continue;
  121. /* overlap */
  122. LOG_E("region to add %s: [%p-%p) overlap with existing region %s: [%p-%p)",\
  123. name, start, end, reg_next->memreg.name, rstart, rend);
  124. return -RT_EINVAL;
  125. }
  126. /* insert the region */
  127. new_region.name = name;
  128. new_region.start = start;
  129. new_region.end = end;
  130. return _reg_insert_after(reg, &new_region, flag);
  131. }
  132. rt_err_t rt_memblock_add_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
  133. {
  134. LOG_D("add physical address range [0x%.*lx-0x%.*lx) with flag 0x%x" \
  135. " to overall memory regions\n", MIN_BIT, base, MIN_BIT, base + size, flag);
  136. return _memblock_add_range(&mmblk_memory, name, start, end, flags);
  137. }
  138. rt_err_t rt_memblock_reserve_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
  139. {
  140. LOG_D("add physical address range %s [0x%.*lx-0x%.*lx) to reserved memory regions\n",
  141. name, MIN_BIT, start, MIN_BIT, end);
  142. return _memblock_add_range(&mmblk_reserved, name, start, end, flags);
  143. }
  144. /* [*start_reg, *end_reg) is the isolated range */
  145. static rt_err_t _memblock_separate_range(struct rt_memblock *memblock,
  146. rt_size_t start, rt_size_t end,
  147. struct rt_mmblk_reg **start_reg, struct rt_mmblk_reg **end_reg)
  148. {
  149. struct rt_mmblk_reg *reg = RT_NULL;
  150. rt_region_t new_region;
  151. rt_err_t err = RT_EOK;
  152. *start_reg = *end_reg = RT_NULL;
  153. rt_slist_for_each_entry(reg, &(memblock->reg_list), node)
  154. {
  155. rt_size_t rstart = reg->memreg.start;
  156. rt_size_t rend = reg->memreg.end;
  157. if (rstart >= end)
  158. break;
  159. if (rend <= start)
  160. continue;
  161. /* the beginning of the range separates its respective region */
  162. if (rstart < start)
  163. {
  164. new_region.start = start;
  165. new_region.end = rend;
  166. new_region.name = reg->memreg.name;
  167. err = _reg_insert_after(reg, &new_region, reg->flags);
  168. if (err != RT_EOK)
  169. return err;
  170. reg->memreg.end = start;
  171. *start_reg = _next_region(reg);
  172. *end_reg = _next_region(*start_reg);
  173. }
  174. /* the endpoint of the range separates its respective region */
  175. else if (rend > end)
  176. {
  177. new_region.start = end;
  178. new_region.end = rend;
  179. new_region.name = reg->memreg.name;
  180. err = _reg_insert_after(reg, &new_region, reg->flags);
  181. if (err != RT_EOK)
  182. return err;
  183. reg->memreg.end = end;
  184. *end_reg = _next_region(reg);
  185. break;
  186. }
  187. /* reg->next is fully contained in range */
  188. else
  189. {
  190. if (!*end_reg)
  191. *start_reg = reg;
  192. *end_reg = _next_region(reg);
  193. }
  194. }
  195. return err;
  196. }
  197. static void _memblock_set_flag(struct rt_mmblk_reg *start_reg, struct rt_mmblk_reg *end_reg, \
  198. mmblk_flag_t flags)
  199. {
  200. if (start_reg == RT_NULL)
  201. return;
  202. for (struct rt_mmblk_reg *iter = start_reg; iter != end_reg; iter = _next_region(iter)) {
  203. iter->flags |= flags;
  204. }
  205. }
  206. static void _next_free_region(struct rt_mmblk_reg **m, struct rt_mmblk_reg **r, mmblk_flag_t flags,
  207. rt_size_t *out_start, rt_size_t *out_end)
  208. {
  209. /* memory related data */
  210. rt_size_t m_start = 0;
  211. rt_size_t m_end = 0;
  212. /* reserved related data */
  213. rt_size_t r_start = 0;
  214. rt_size_t r_end = 0;
  215. struct rt_mmblk_reg *r_sentinel = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
  216. for (; *m != RT_NULL; *m = _next_region(*m))
  217. {
  218. if ((*m)->flags != flags)
  219. continue;
  220. m_start = (*m)->memreg.start;
  221. m_end = (*m)->memreg.end;
  222. for (; *r != RT_NULL; *r = _next_region(*r))
  223. {
  224. /*
  225. * r started with _resreg_guard
  226. * Find the complement of reserved memblock.
  227. * For example, if reserved memblock is following:
  228. *
  229. * 0:[8-16), 1:[32-48), 2:[128-130)
  230. *
  231. * The upper 32bit indexes the following regions.
  232. *
  233. * 0:[0-8), 1:[16-32), 2:[48-128), 3:[130-MAX)
  234. *
  235. * So we can find intersecting region other than excluding.
  236. */
  237. r_start = (*r == r_sentinel) ? 0 : (*r)->memreg.end;
  238. r_end = (_next_region(*r)) ? _next_region(*r)->memreg.start : PHYS_ADDR_MAX;
  239. /* two reserved region are adjacent */
  240. if (r_start == r_end)
  241. continue;
  242. if (r_start >= m_end)
  243. break;
  244. if (m_start < r_end)
  245. {
  246. *out_start = MAX(m_start, r_start);
  247. *out_end = MIN(m_end, r_end);
  248. if (m_end <= r_end)
  249. *m = _next_region(*m);
  250. else
  251. *r = _next_region(*r);
  252. return;
  253. }
  254. }
  255. }
  256. /* all regions found */
  257. *m = rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
  258. }
  259. /* for each region in memory with flags and not reserved */
  260. #define for_each_free_region(m, r, flags, p_start, p_end) \
  261. m = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node); \
  262. r = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node); \
  263. for (_next_free_region(&m, &r, flags, p_start, p_end); \
  264. m != rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node); \
  265. _next_free_region(&m, &r, flags, p_start, p_end))
  266. /* merge normal memory regions */
  267. static void _memblock_merge_memory(void)
  268. {
  269. struct rt_mmblk_reg *reg = RT_NULL;
  270. rt_slist_for_each_entry(reg, &(mmblk_memory.reg_list), node)
  271. {
  272. while (_next_region(reg) &&
  273. reg->flags == _next_region(reg)->flags &&
  274. reg->memreg.end == _next_region(reg)->memreg.start)
  275. {
  276. reg->memreg.end = _next_region(reg)->memreg.end;
  277. _reg_remove_after(reg);
  278. }
  279. }
  280. }
  281. void rt_memblock_setup_memory_environment(void)
  282. {
  283. struct rt_mmblk_reg *iter = RT_NULL, *start_reg = RT_NULL, *end_reg = RT_NULL;
  284. rt_region_t reg = {0};
  285. rt_size_t mem = 0;
  286. struct rt_mmblk_reg *m, *r;
  287. void *err;
  288. _memblock_merge_memory();
  289. LOG_I("System memory:");
  290. rt_slist_for_each_entry(iter, &(mmblk_memory.reg_list), node)
  291. {
  292. LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
  293. }
  294. LOG_I("Reserved memory:");
  295. rt_slist_for_each_entry(iter, &(mmblk_reserved.reg_list), node)
  296. {
  297. LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
  298. if (iter->flags != MEMBLOCK_NONE)
  299. {
  300. _memblock_separate_range(&mmblk_memory, iter->memreg.start, iter->memreg.end, &start_reg, &end_reg);
  301. _memblock_set_flag(start_reg, end_reg, iter->flags);
  302. }
  303. }
  304. /* install usable memory to system page */
  305. for_each_free_region(m, r, MEMBLOCK_NONE, &reg.start, &reg.end)
  306. {
  307. reg.start = RT_ALIGN(reg.start, ARCH_PAGE_SIZE);
  308. reg.end = RT_ALIGN_DOWN(reg.end, ARCH_PAGE_SIZE);
  309. if (reg.start >= reg.end)
  310. continue;
  311. LOG_I("physical memory region [%p-%p] installed to system page", reg.start, reg.end);
  312. reg.start -= PV_OFFSET;
  313. reg.end -= PV_OFFSET;
  314. struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
  315. .limit_start = rt_kernel_space.start,
  316. .limit_range_size = rt_kernel_space.size,
  317. .map_size = reg.end - reg.start,
  318. .prefer = (void *)reg.start};
  319. rt_aspace_map_phy(&rt_kernel_space, &hint, MMU_MAP_K_RWCB, (reg.start + PV_OFFSET) >> MM_PAGE_SHIFT, &err);
  320. rt_page_install(reg);
  321. mem += reg.end - reg.start;
  322. }
  323. LOG_I("%ld MB memory installed to system page", mem/1000000);
  324. }
  325. #ifdef UTEST_MM_API_TC
  326. /* functions below are only used for utest */
  327. void rt_memblock_merge(void)
  328. {
  329. _memblock_merge_memory();
  330. }
  331. static struct rt_mmblk_reg *mem;
  332. static struct rt_mmblk_reg *res;
  333. void rt_memblock_next_free_region_init(void)
  334. {
  335. mem = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node);
  336. res = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
  337. }
  338. void rt_memblock_next_free_region(mmblk_flag_t flags, rt_size_t *out_start, rt_size_t *out_end)
  339. {
  340. _next_free_region(&mem, &res, flags, out_start, out_end);
  341. }
  342. rt_bool_t rt_memblock_is_last_free(void)
  343. {
  344. return mem == rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
  345. }
  346. #endif /* UTEST_MM_API_TC */