mm_memblock.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-07 zmshahaha the first version
  9. */
  10. #include "mm_memblock.h"
  11. #include "mm_page.h"
  12. #include "mm_aspace.h"
  13. #include <mmu.h>
  14. #define DBG_TAG "mm.memblock"
  15. #define DBG_LVL DBG_INFO
  16. #include <rtdbg.h>
  17. #define PHYS_ADDR_MAX (~((rt_size_t)0))
  18. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  19. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  20. #ifndef RT_INIT_MEMORY_REGIONS
  21. #define RT_INIT_MEMORY_REGIONS 128
  22. #endif
  23. static struct rt_mmblk_reg _regions[RT_INIT_MEMORY_REGIONS];
  24. static int _hint_idx;
  25. static struct rt_memblock mmblk_memory;
  26. static struct rt_memblock mmblk_reserved;
  27. struct rt_memblock *rt_memblock_get_memory(void)
  28. {
  29. return &mmblk_memory;
  30. }
  31. struct rt_memblock *rt_memblock_get_reserved(void)
  32. {
  33. return &mmblk_reserved;
  34. }
  35. rt_inline struct rt_mmblk_reg *_next_region(struct rt_mmblk_reg *prev)
  36. {
  37. if (prev && prev->node.next)
  38. {
  39. return rt_slist_entry(prev->node.next, struct rt_mmblk_reg, node);
  40. }
  41. else
  42. {
  43. return RT_NULL;
  44. }
  45. }
  46. static struct rt_mmblk_reg *_alloc_memreg(struct rt_mmblk_reg *prev)
  47. {
  48. for (int i =_hint_idx; i < RT_INIT_MEMORY_REGIONS; i++)
  49. {
  50. if (_regions[i].alloc == RT_FALSE)
  51. {
  52. rt_slist_insert(&(prev->node), &(_regions[i].node));
  53. _regions[i].alloc = RT_TRUE;
  54. _hint_idx = i + 1;
  55. return &_regions[i];
  56. }
  57. }
  58. for (int i = 0; i < _hint_idx; i++)
  59. {
  60. if (_regions[i].alloc == RT_FALSE)
  61. {
  62. rt_slist_insert(&(prev->node), &(_regions[i].node));
  63. _regions[i].alloc = RT_TRUE;
  64. _hint_idx = i + 1;
  65. return &_regions[i];
  66. }
  67. }
  68. return RT_NULL;
  69. }
  70. static void _free_memreg(struct rt_mmblk_reg *prev)
  71. {
  72. struct rt_mmblk_reg *next = _next_region(prev);
  73. next->alloc = RT_FALSE;
  74. rt_slist_remove(&(prev->node), prev->node.next);
  75. }
  76. static rt_err_t _reg_insert_after(struct rt_mmblk_reg *prev, rt_region_t *reg,
  77. mmblk_flag_t flags)
  78. {
  79. struct rt_mmblk_reg *new_reg = _alloc_memreg(prev);
  80. if (!new_reg)
  81. {
  82. LOG_E("No enough space");
  83. return -RT_ENOMEM;
  84. }
  85. rt_memcpy(&(new_reg->memreg), reg, sizeof(*reg));
  86. new_reg->flags = flags;
  87. return RT_EOK;
  88. }
  89. rt_inline void _reg_remove_after(struct rt_mmblk_reg *prev)
  90. {
  91. _free_memreg(prev);
  92. }
  93. /* adding overlapped regions is banned */
  94. static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
  95. char *name, rt_size_t start, rt_size_t end, mm_flag_t flag)
  96. {
  97. struct rt_mmblk_reg *reg, *reg_next;
  98. rt_slist_t sentinel;
  99. rt_region_t new_region;
  100. if (start >= end)
  101. return -RT_EINVAL;
  102. sentinel.next = &(memblock->reg_list);
  103. /* find suitable place */
  104. rt_slist_for_each_entry(reg, &sentinel, node)
  105. {
  106. reg_next = _next_region(reg);
  107. if (reg_next == RT_NULL)
  108. break;
  109. rt_size_t rstart = reg_next->memreg.start;
  110. rt_size_t rend = reg_next->memreg.end;
  111. /* not overlap */
  112. if (rstart >= end)
  113. break;
  114. if (rend <= start)
  115. continue;
  116. /* overlap */
  117. LOG_E("region to add %s: [%p-%p) overlap with existing region %s: [%p-%p)",\
  118. name, start, end, reg_next->memreg.name, rstart, rend);
  119. return -RT_EINVAL;
  120. }
  121. /* insert the region */
  122. new_region.name = name;
  123. new_region.start = start;
  124. new_region.end = end;
  125. return _reg_insert_after(reg, &new_region, flag);
  126. }
  127. rt_err_t rt_memblock_add_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
  128. {
  129. LOG_D("add physical address range [%p-%p) with flag 0x%x" \
  130. " to overall memory regions\n", base, base + size, flag);
  131. return _memblock_add_range(&mmblk_memory, name, start, end, flags);
  132. }
  133. rt_err_t rt_memblock_reserve_memory(char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
  134. {
  135. LOG_D("add physical address range [%p-%p) to reserved memory regions\n",\
  136. base, base + size);
  137. return _memblock_add_range(&mmblk_reserved, name, start, end, flags);
  138. }
  139. /* [*start_reg, *end_reg) is the isolated range */
  140. static rt_err_t _memblock_separate_range(struct rt_memblock *memblock,
  141. rt_size_t start, rt_size_t end,
  142. struct rt_mmblk_reg **start_reg, struct rt_mmblk_reg **end_reg)
  143. {
  144. struct rt_mmblk_reg *reg = RT_NULL;
  145. rt_region_t new_region;
  146. rt_err_t err = RT_EOK;
  147. *start_reg = *end_reg = RT_NULL;
  148. rt_slist_for_each_entry(reg, &(memblock->reg_list), node)
  149. {
  150. rt_size_t rstart = reg->memreg.start;
  151. rt_size_t rend = reg->memreg.end;
  152. if (rstart >= end)
  153. break;
  154. if (rend <= start)
  155. continue;
  156. /* the beginning of the range separates its respective region */
  157. if (rstart < start)
  158. {
  159. new_region.start = start;
  160. new_region.end = rend;
  161. new_region.name = reg->memreg.name;
  162. err = _reg_insert_after(reg, &new_region, reg->flags);
  163. if (err != RT_EOK)
  164. return err;
  165. reg->memreg.end = start;
  166. *start_reg = _next_region(reg);
  167. *end_reg = _next_region(*start_reg);
  168. }
  169. /* the endpoint of the range separates its respective region */
  170. else if (rend > end)
  171. {
  172. new_region.start = end;
  173. new_region.end = rend;
  174. new_region.name = reg->memreg.name;
  175. err = _reg_insert_after(reg, &new_region, reg->flags);
  176. if (err != RT_EOK)
  177. return err;
  178. reg->memreg.end = end;
  179. *end_reg = _next_region(reg);
  180. break;
  181. }
  182. /* reg->next is fully contained in range */
  183. else
  184. {
  185. if (!*end_reg)
  186. *start_reg = reg;
  187. *end_reg = _next_region(reg);
  188. }
  189. }
  190. return err;
  191. }
  192. static void _memblock_set_flag(struct rt_mmblk_reg *start_reg, struct rt_mmblk_reg *end_reg, \
  193. mmblk_flag_t flags)
  194. {
  195. if (start_reg == RT_NULL)
  196. return;
  197. for (struct rt_mmblk_reg *iter = start_reg; iter != end_reg; iter = _next_region(iter)) {
  198. iter->flags |= flags;
  199. }
  200. }
  201. static void _next_free_region(struct rt_mmblk_reg **m, struct rt_mmblk_reg **r, mmblk_flag_t flags,
  202. rt_size_t *out_start, rt_size_t *out_end)
  203. {
  204. /* memory related data */
  205. rt_size_t m_start = 0;
  206. rt_size_t m_end = 0;
  207. /* reserved related data */
  208. rt_size_t r_start = 0;
  209. rt_size_t r_end = 0;
  210. struct rt_mmblk_reg *r_sentinel = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
  211. for (; *m != RT_NULL; *m = _next_region(*m))
  212. {
  213. if ((*m)->flags != flags)
  214. continue;
  215. m_start = (*m)->memreg.start;
  216. m_end = (*m)->memreg.end;
  217. for (; *r != RT_NULL; *r = _next_region(*r))
  218. {
  219. /*
  220. * r started with _resreg_guard
  221. * Find the complement of reserved memblock.
  222. * For example, if reserved memblock is following:
  223. *
  224. * 0:[8-16), 1:[32-48), 2:[128-130)
  225. *
  226. * The upper 32bit indexes the following regions.
  227. *
  228. * 0:[0-8), 1:[16-32), 2:[48-128), 3:[130-MAX)
  229. *
  230. * So we can find intersecting region other than excluding.
  231. */
  232. r_start = (*r == r_sentinel) ? 0 : (*r)->memreg.end;
  233. r_end = (_next_region(*r)) ? _next_region(*r)->memreg.start : PHYS_ADDR_MAX;
  234. /* two reserved region are adjacent */
  235. if (r_start == r_end)
  236. continue;
  237. if (r_start >= m_end)
  238. break;
  239. if (m_start < r_end)
  240. {
  241. *out_start = MAX(m_start, r_start);
  242. *out_end = MIN(m_end, r_end);
  243. if (m_end <= r_end)
  244. *m = _next_region(*m);
  245. else
  246. *r = _next_region(*r);
  247. return;
  248. }
  249. }
  250. }
  251. /* all regions found */
  252. *m = rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
  253. }
  254. /* for each region in memory with flags and not reserved */
  255. #define for_each_free_region(m, r, flags, p_start, p_end) \
  256. m = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node); \
  257. r = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node); \
  258. for (_next_free_region(&m, &r, flags, p_start, p_end); \
  259. m != rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node); \
  260. _next_free_region(&m, &r, flags, p_start, p_end))
  261. /* merge normal memory regions */
  262. static void _memblock_merge_memory(void)
  263. {
  264. struct rt_mmblk_reg *reg;
  265. rt_slist_for_each_entry(reg, &(mmblk_memory.reg_list), node)
  266. {
  267. while (_next_region(reg) &&
  268. reg->flags == _next_region(reg)->flags &&
  269. reg->memreg.end == _next_region(reg)->memreg.start)
  270. {
  271. reg->memreg.end = _next_region(reg)->memreg.end;
  272. _reg_remove_after(reg);
  273. }
  274. }
  275. }
  276. /* free all available memory to buddy system */
  277. static void _memblock_free_all(void)
  278. {
  279. rt_region_t reg;
  280. rt_size_t mem = 0;
  281. struct rt_mmblk_reg *m, *r;
  282. for_each_free_region(m, r, MEMBLOCK_NONE, &reg.start, &reg.end)
  283. {
  284. reg.start -= PV_OFFSET;
  285. reg.end -= PV_OFFSET;
  286. rt_page_install(reg);
  287. LOG_D("region [%p-%p) added to buddy system\n", reg.start, reg.end);
  288. mem += reg.end - reg.start;
  289. }
  290. LOG_D("0x%lx(%ld) bytes memory added to buddy system\n", mem, mem);
  291. }
  292. void rt_memblock_setup_memory_environment(void)
  293. {
  294. struct rt_mmblk_reg *reg, *start_reg, *end_reg;
  295. rt_err_t err = RT_EOK;
  296. _memblock_merge_memory();
  297. rt_slist_for_each_entry(reg, &(mmblk_reserved.reg_list), node)
  298. {
  299. if (reg->flags != MEMBLOCK_NONE)
  300. {
  301. err = _memblock_separate_range(&mmblk_memory, reg->memreg.start, reg->memreg.end, &start_reg, &end_reg);
  302. RT_ASSERT(err == RT_EOK);
  303. _memblock_set_flag(start_reg, end_reg, reg->flags);
  304. }
  305. }
  306. _memblock_free_all();
  307. }
  308. #ifdef UTEST_MM_API_TC
  309. /* functions below are only used for utest */
  310. void rt_memblock_merge(void)
  311. {
  312. _memblock_merge_memory();
  313. }
  314. static struct rt_mmblk_reg *mem;
  315. static struct rt_mmblk_reg *res;
  316. void rt_memblock_next_free_region_init(void)
  317. {
  318. mem = rt_slist_entry(&(mmblk_memory.reg_list.next), struct rt_mmblk_reg, node);
  319. res = rt_slist_entry(&(mmblk_reserved.reg_list), struct rt_mmblk_reg, node);
  320. }
  321. void rt_memblock_next_free_region(mmblk_flag_t flags, rt_size_t *out_start, rt_size_t *out_end)
  322. {
  323. _next_free_region(&mem, &res, flags, out_start, out_end);
  324. }
  325. rt_bool_t rt_memblock_is_last_free(void)
  326. {
  327. return mem == rt_slist_entry(&(mmblk_memory.reg_list), struct rt_mmblk_reg, node);
  328. }
  329. #endif /* UTEST_MM_API_TC */