memheap.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /*
  7. * File : memheap.c
  8. *
  9. * Change Logs:
  10. * Date Author Notes
  11. * 2012-04-10 Bernard first implementation
  12. * 2012-10-16 Bernard add the mutex lock for heap object.
  13. * 2012-12-29 Bernard memheap can be used as system heap.
  14. * change mutex lock to semaphore lock.
  15. * 2013-04-10 Bernard add rt_memheap_realloc function.
  16. * 2013-05-24 Bernard fix the rt_memheap_realloc issue.
  17. * 2013-07-11 Grissiom fix the memory block splitting issue.
  18. * 2013-07-15 Grissiom optimize rt_memheap_realloc
  19. * 2021-06-03 Flybreak Fix the crash problem after opening Oz optimization on ac6.
  20. */
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #ifdef RT_USING_MEMHEAP
  24. /* dynamic pool magic and mask */
  25. #define RT_MEMHEAP_MAGIC 0x1ea01ea0
  26. #define RT_MEMHEAP_MASK 0xfffffffe
  27. #define RT_MEMHEAP_USED 0x01
  28. #define RT_MEMHEAP_FREED 0x00
  29. #define RT_MEMHEAP_IS_USED(i) ((i)->magic & RT_MEMHEAP_USED)
  30. #define RT_MEMHEAP_MINIALLOC 12
  31. #define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_memheap_item), RT_ALIGN_SIZE)
  32. #define MEMITEM_SIZE(item) ((rt_ubase_t)item->next - (rt_ubase_t)item - RT_MEMHEAP_SIZE)
  33. #define MEMITEM(ptr) (struct rt_memheap_item*)((rt_uint8_t*)ptr - RT_MEMHEAP_SIZE)
  34. #ifdef RT_USING_MEMTRACE
  35. rt_inline void rt_memheap_setname(struct rt_memheap_item *item, const char *name)
  36. {
  37. int index;
  38. rt_uint8_t *ptr;
  39. ptr = (rt_uint8_t *) & (item->next_free);
  40. for (index = 0; index < sizeof(void *); index ++)
  41. {
  42. if (name[index] == '\0') break;
  43. ptr[index] = name[index];
  44. }
  45. if (name[index] == '\0') ptr[index] = '\0';
  46. else
  47. {
  48. ptr = (rt_uint8_t *) & (item->prev_free);
  49. for (index = 0; index < sizeof(void *) && (index + sizeof(void *)) < RT_NAME_MAX; index ++)
  50. {
  51. if (name[sizeof(void *) + index] == '\0') break;
  52. ptr[index] = name[sizeof(void *) + index];
  53. }
  54. if (name[sizeof(void *) + index] == '\0') ptr[index] = '\0';
  55. }
  56. }
  57. void rt_mem_set_tag(void *ptr, const char *name)
  58. {
  59. struct rt_memheap_item *item;
  60. if (ptr && name)
  61. {
  62. item = MEMITEM(ptr);
  63. rt_memheap_setname(item, name);
  64. }
  65. }
  66. #endif
  67. /*
  68. * The initialized memory pool will be:
  69. * +-----------------------------------+--------------------------+
  70. * | whole freed memory block | Used Memory Block Tailer |
  71. * +-----------------------------------+--------------------------+
  72. *
  73. * block_list --> whole freed memory block
  74. *
  75. * The length of Used Memory Block Tailer is 0,
  76. * which is prevents block merging across list
  77. */
  78. rt_err_t rt_memheap_init(struct rt_memheap *memheap,
  79. const char *name,
  80. void *start_addr,
  81. rt_size_t size)
  82. {
  83. struct rt_memheap_item *item;
  84. RT_ASSERT(memheap != RT_NULL);
  85. /* initialize pool object */
  86. rt_object_init(&(memheap->parent), RT_Object_Class_MemHeap, name);
  87. memheap->start_addr = start_addr;
  88. memheap->pool_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
  89. memheap->available_size = memheap->pool_size - (2 * RT_MEMHEAP_SIZE);
  90. memheap->max_used_size = memheap->pool_size - memheap->available_size;
  91. /* initialize the free list header */
  92. item = &(memheap->free_header);
  93. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  94. item->pool_ptr = memheap;
  95. item->next = RT_NULL;
  96. item->prev = RT_NULL;
  97. item->next_free = item;
  98. item->prev_free = item;
  99. /* set the free list to free list header */
  100. memheap->free_list = item;
  101. /* initialize the first big memory block */
  102. item = (struct rt_memheap_item *)start_addr;
  103. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  104. item->pool_ptr = memheap;
  105. item->next = RT_NULL;
  106. item->prev = RT_NULL;
  107. item->next_free = item;
  108. item->prev_free = item;
  109. #ifdef RT_USING_MEMTRACE
  110. rt_memset(item->owner_thread_name, ' ', sizeof(item->owner_thread_name));
  111. #endif
  112. item->next = (struct rt_memheap_item *)
  113. ((rt_uint8_t *)item + memheap->available_size + RT_MEMHEAP_SIZE);
  114. item->prev = item->next;
  115. /* block list header */
  116. memheap->block_list = item;
  117. /* place the big memory block to free list */
  118. item->next_free = memheap->free_list->next_free;
  119. item->prev_free = memheap->free_list;
  120. memheap->free_list->next_free->prev_free = item;
  121. memheap->free_list->next_free = item;
  122. /* move to the end of memory pool to build a small tailer block,
  123. * which prevents block merging
  124. */
  125. item = item->next;
  126. /* it's a used memory block */
  127. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
  128. item->pool_ptr = memheap;
  129. item->next = (struct rt_memheap_item *)start_addr;
  130. item->prev = (struct rt_memheap_item *)start_addr;
  131. /* not in free list */
  132. item->next_free = item->prev_free = RT_NULL;
  133. /* initialize semaphore lock */
  134. rt_sem_init(&(memheap->lock), name, 1, RT_IPC_FLAG_PRIO);
  135. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  136. ("memory heap: start addr 0x%08x, size %d, free list header 0x%08x\n",
  137. start_addr, size, &(memheap->free_header)));
  138. return RT_EOK;
  139. }
  140. RTM_EXPORT(rt_memheap_init);
  141. rt_err_t rt_memheap_detach(struct rt_memheap *heap)
  142. {
  143. RT_ASSERT(heap);
  144. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  145. RT_ASSERT(rt_object_is_systemobject(&heap->parent));
  146. rt_sem_detach(&heap->lock);
  147. rt_object_detach(&(heap->parent));
  148. /* Return a successful completion. */
  149. return RT_EOK;
  150. }
  151. RTM_EXPORT(rt_memheap_detach);
  152. void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size)
  153. {
  154. rt_err_t result;
  155. rt_uint32_t free_size;
  156. struct rt_memheap_item *header_ptr;
  157. RT_ASSERT(heap != RT_NULL);
  158. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  159. /* align allocated size */
  160. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  161. if (size < RT_MEMHEAP_MINIALLOC)
  162. size = RT_MEMHEAP_MINIALLOC;
  163. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d on heap:%8.*s",
  164. size, RT_NAME_MAX, heap->parent.name));
  165. if (size < heap->available_size)
  166. {
  167. /* search on free list */
  168. free_size = 0;
  169. /* lock memheap */
  170. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  171. if (result != RT_EOK)
  172. {
  173. rt_set_errno(result);
  174. return RT_NULL;
  175. }
  176. /* get the first free memory block */
  177. header_ptr = heap->free_list->next_free;
  178. while (header_ptr != heap->free_list && free_size < size)
  179. {
  180. /* get current freed memory block size */
  181. free_size = MEMITEM_SIZE(header_ptr);
  182. if (free_size < size)
  183. {
  184. /* move to next free memory block */
  185. header_ptr = header_ptr->next_free;
  186. }
  187. }
  188. /* determine if the memory is available. */
  189. if (free_size >= size)
  190. {
  191. /* a block that satisfies the request has been found. */
  192. /* determine if the block needs to be split. */
  193. if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC))
  194. {
  195. struct rt_memheap_item *new_ptr;
  196. /* split the block. */
  197. new_ptr = (struct rt_memheap_item *)
  198. (((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE);
  199. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  200. ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
  201. header_ptr,
  202. header_ptr->next,
  203. header_ptr->prev,
  204. new_ptr));
  205. /* mark the new block as a memory block and freed. */
  206. new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  207. /* put the pool pointer into the new block. */
  208. new_ptr->pool_ptr = heap;
  209. #ifdef RT_USING_MEMTRACE
  210. rt_memset(new_ptr->owner_thread_name, ' ', sizeof(new_ptr->owner_thread_name));
  211. #endif
  212. /* break down the block list */
  213. new_ptr->prev = header_ptr;
  214. new_ptr->next = header_ptr->next;
  215. header_ptr->next->prev = new_ptr;
  216. header_ptr->next = new_ptr;
  217. /* remove header ptr from free list */
  218. header_ptr->next_free->prev_free = header_ptr->prev_free;
  219. header_ptr->prev_free->next_free = header_ptr->next_free;
  220. header_ptr->next_free = RT_NULL;
  221. header_ptr->prev_free = RT_NULL;
  222. /* insert new_ptr to free list */
  223. new_ptr->next_free = heap->free_list->next_free;
  224. new_ptr->prev_free = heap->free_list;
  225. heap->free_list->next_free->prev_free = new_ptr;
  226. heap->free_list->next_free = new_ptr;
  227. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x\n",
  228. new_ptr->next_free,
  229. new_ptr->prev_free));
  230. /* decrement the available byte count. */
  231. heap->available_size = heap->available_size -
  232. size -
  233. RT_MEMHEAP_SIZE;
  234. if (heap->pool_size - heap->available_size > heap->max_used_size)
  235. heap->max_used_size = heap->pool_size - heap->available_size;
  236. }
  237. else
  238. {
  239. /* decrement the entire free size from the available bytes count. */
  240. heap->available_size = heap->available_size - free_size;
  241. if (heap->pool_size - heap->available_size > heap->max_used_size)
  242. heap->max_used_size = heap->pool_size - heap->available_size;
  243. /* remove header_ptr from free list */
  244. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  245. ("one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x\n",
  246. header_ptr,
  247. header_ptr->next_free,
  248. header_ptr->prev_free));
  249. header_ptr->next_free->prev_free = header_ptr->prev_free;
  250. header_ptr->prev_free->next_free = header_ptr->next_free;
  251. header_ptr->next_free = RT_NULL;
  252. header_ptr->prev_free = RT_NULL;
  253. }
  254. /* Mark the allocated block as not available. */
  255. header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
  256. #ifdef RT_USING_MEMTRACE
  257. if (rt_thread_self())
  258. rt_memcpy(header_ptr->owner_thread_name, rt_thread_self()->name, sizeof(header_ptr->owner_thread_name));
  259. else
  260. rt_memcpy(header_ptr->owner_thread_name, "NONE", sizeof(header_ptr->owner_thread_name));
  261. #endif
  262. /* release lock */
  263. rt_sem_release(&(heap->lock));
  264. /* Return a memory address to the caller. */
  265. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  266. ("alloc mem: memory[0x%08x], heap[0x%08x], size: %d\n",
  267. (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE),
  268. header_ptr,
  269. size));
  270. return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE);
  271. }
  272. /* release lock */
  273. rt_sem_release(&(heap->lock));
  274. }
  275. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n"));
  276. /* Return the completion status. */
  277. return RT_NULL;
  278. }
  279. RTM_EXPORT(rt_memheap_alloc);
  280. void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize)
  281. {
  282. rt_err_t result;
  283. rt_size_t oldsize;
  284. struct rt_memheap_item *header_ptr;
  285. struct rt_memheap_item *new_ptr;
  286. RT_ASSERT(heap);
  287. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  288. if (newsize == 0)
  289. {
  290. rt_memheap_free(ptr);
  291. return RT_NULL;
  292. }
  293. /* align allocated size */
  294. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  295. if (newsize < RT_MEMHEAP_MINIALLOC)
  296. newsize = RT_MEMHEAP_MINIALLOC;
  297. if (ptr == RT_NULL)
  298. {
  299. return rt_memheap_alloc(heap, newsize);
  300. }
  301. /* get memory block header and get the size of memory block */
  302. header_ptr = (struct rt_memheap_item *)
  303. ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
  304. oldsize = MEMITEM_SIZE(header_ptr);
  305. /* re-allocate memory */
  306. if (newsize > oldsize)
  307. {
  308. void *new_ptr;
  309. /* Fix the crash problem after opening Oz optimization on ac6 */
  310. volatile struct rt_memheap_item *next_ptr;
  311. /* lock memheap */
  312. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  313. if (result != RT_EOK)
  314. {
  315. rt_set_errno(result);
  316. return RT_NULL;
  317. }
  318. next_ptr = header_ptr->next;
  319. /* header_ptr should not be the tail */
  320. RT_ASSERT(next_ptr > header_ptr);
  321. /* check whether the following free space is enough to expand */
  322. if (!RT_MEMHEAP_IS_USED(next_ptr))
  323. {
  324. rt_int32_t nextsize;
  325. nextsize = MEMITEM_SIZE(next_ptr);
  326. RT_ASSERT(next_ptr > 0);
  327. /* Here is the ASCII art of the situation that we can make use of
  328. * the next free node without alloc/memcpy, |*| is the control
  329. * block:
  330. *
  331. * oldsize free node
  332. * |*|-----------|*|----------------------|*|
  333. * newsize >= minialloc
  334. * |*|----------------|*|-----------------|*|
  335. */
  336. if (nextsize + oldsize > newsize + RT_MEMHEAP_MINIALLOC)
  337. {
  338. /* decrement the entire free size from the available bytes count. */
  339. heap->available_size = heap->available_size - (newsize - oldsize);
  340. if (heap->pool_size - heap->available_size > heap->max_used_size)
  341. heap->max_used_size = heap->pool_size - heap->available_size;
  342. /* remove next_ptr from free list */
  343. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  344. ("remove block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x",
  345. next_ptr,
  346. next_ptr->next_free,
  347. next_ptr->prev_free));
  348. next_ptr->next_free->prev_free = next_ptr->prev_free;
  349. next_ptr->prev_free->next_free = next_ptr->next_free;
  350. next_ptr->next->prev = next_ptr->prev;
  351. next_ptr->prev->next = next_ptr->next;
  352. /* build a new one on the right place */
  353. next_ptr = (struct rt_memheap_item *)((char *)ptr + newsize);
  354. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  355. ("new free block: block[0x%08x] nextm[0x%08x] prevm[0x%08x]",
  356. next_ptr,
  357. next_ptr->next,
  358. next_ptr->prev));
  359. /* mark the new block as a memory block and freed. */
  360. next_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  361. /* put the pool pointer into the new block. */
  362. next_ptr->pool_ptr = heap;
  363. #ifdef RT_USING_MEMTRACE
  364. rt_memset(next_ptr->owner_thread_name, ' ', sizeof(next_ptr->owner_thread_name));
  365. #endif
  366. next_ptr->prev = header_ptr;
  367. next_ptr->next = header_ptr->next;
  368. header_ptr->next->prev = (struct rt_memheap_item *)next_ptr;
  369. header_ptr->next = (struct rt_memheap_item *)next_ptr;
  370. /* insert next_ptr to free list */
  371. next_ptr->next_free = heap->free_list->next_free;
  372. next_ptr->prev_free = heap->free_list;
  373. heap->free_list->next_free->prev_free = (struct rt_memheap_item *)next_ptr;
  374. heap->free_list->next_free = (struct rt_memheap_item *)next_ptr;
  375. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x",
  376. next_ptr->next_free,
  377. next_ptr->prev_free));
  378. /* release lock */
  379. rt_sem_release(&(heap->lock));
  380. return ptr;
  381. }
  382. }
  383. /* release lock */
  384. rt_sem_release(&(heap->lock));
  385. /* re-allocate a memory block */
  386. new_ptr = (void *)rt_memheap_alloc(heap, newsize);
  387. if (new_ptr != RT_NULL)
  388. {
  389. rt_memcpy(new_ptr, ptr, oldsize < newsize ? oldsize : newsize);
  390. rt_memheap_free(ptr);
  391. }
  392. return new_ptr;
  393. }
  394. /* don't split when there is less than one node space left */
  395. if (newsize + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC >= oldsize)
  396. return ptr;
  397. /* lock memheap */
  398. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  399. if (result != RT_EOK)
  400. {
  401. rt_set_errno(result);
  402. return RT_NULL;
  403. }
  404. /* split the block. */
  405. new_ptr = (struct rt_memheap_item *)
  406. (((rt_uint8_t *)header_ptr) + newsize + RT_MEMHEAP_SIZE);
  407. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  408. ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
  409. header_ptr,
  410. header_ptr->next,
  411. header_ptr->prev,
  412. new_ptr));
  413. /* mark the new block as a memory block and freed. */
  414. new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  415. /* put the pool pointer into the new block. */
  416. new_ptr->pool_ptr = heap;
  417. #ifdef RT_USING_MEMTRACE
  418. rt_memset(new_ptr->owner_thread_name, ' ', sizeof(new_ptr->owner_thread_name));
  419. #endif
  420. /* break down the block list */
  421. new_ptr->prev = header_ptr;
  422. new_ptr->next = header_ptr->next;
  423. header_ptr->next->prev = new_ptr;
  424. header_ptr->next = new_ptr;
  425. /* determine if the block can be merged with the next neighbor. */
  426. if (!RT_MEMHEAP_IS_USED(new_ptr->next))
  427. {
  428. struct rt_memheap_item *free_ptr;
  429. /* merge block with next neighbor. */
  430. free_ptr = new_ptr->next;
  431. heap->available_size = heap->available_size - MEMITEM_SIZE(free_ptr);
  432. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  433. ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
  434. header_ptr, header_ptr->next_free, header_ptr->prev_free));
  435. free_ptr->next->prev = new_ptr;
  436. new_ptr->next = free_ptr->next;
  437. /* remove free ptr from free list */
  438. free_ptr->next_free->prev_free = free_ptr->prev_free;
  439. free_ptr->prev_free->next_free = free_ptr->next_free;
  440. }
  441. /* insert the split block to free list */
  442. new_ptr->next_free = heap->free_list->next_free;
  443. new_ptr->prev_free = heap->free_list;
  444. heap->free_list->next_free->prev_free = new_ptr;
  445. heap->free_list->next_free = new_ptr;
  446. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free ptr: next_free 0x%08x, prev_free 0x%08x\n",
  447. new_ptr->next_free,
  448. new_ptr->prev_free));
  449. /* increment the available byte count. */
  450. heap->available_size = heap->available_size + MEMITEM_SIZE(new_ptr);
  451. /* release lock */
  452. rt_sem_release(&(heap->lock));
  453. /* return the old memory block */
  454. return ptr;
  455. }
  456. RTM_EXPORT(rt_memheap_realloc);
  457. void rt_memheap_free(void *ptr)
  458. {
  459. rt_err_t result;
  460. struct rt_memheap *heap;
  461. struct rt_memheap_item *header_ptr, *new_ptr;
  462. rt_uint32_t insert_header;
  463. /* NULL check */
  464. if (ptr == RT_NULL) return;
  465. /* set initial status as OK */
  466. insert_header = 1;
  467. new_ptr = RT_NULL;
  468. header_ptr = (struct rt_memheap_item *)
  469. ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
  470. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("free memory: memory[0x%08x], block[0x%08x]\n",
  471. ptr, header_ptr));
  472. /* check magic */
  473. if (header_ptr->magic != (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
  474. {
  475. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("bad magic:0x%08x @ memheap\n",
  476. header_ptr->magic));
  477. }
  478. RT_ASSERT(header_ptr->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED));
  479. /* check whether this block of memory has been over-written. */
  480. RT_ASSERT((header_ptr->next->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);
  481. /* get pool ptr */
  482. heap = header_ptr->pool_ptr;
  483. RT_ASSERT(heap);
  484. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  485. /* lock memheap */
  486. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  487. if (result != RT_EOK)
  488. {
  489. rt_set_errno(result);
  490. return ;
  491. }
  492. /* Mark the memory as available. */
  493. header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  494. /* Adjust the available number of bytes. */
  495. heap->available_size += MEMITEM_SIZE(header_ptr);
  496. /* Determine if the block can be merged with the previous neighbor. */
  497. if (!RT_MEMHEAP_IS_USED(header_ptr->prev))
  498. {
  499. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: left node 0x%08x\n",
  500. header_ptr->prev));
  501. /* adjust the available number of bytes. */
  502. heap->available_size += RT_MEMHEAP_SIZE;
  503. /* yes, merge block with previous neighbor. */
  504. (header_ptr->prev)->next = header_ptr->next;
  505. (header_ptr->next)->prev = header_ptr->prev;
  506. /* move header pointer to previous. */
  507. header_ptr = header_ptr->prev;
  508. /* don't insert header to free list */
  509. insert_header = 0;
  510. }
  511. /* determine if the block can be merged with the next neighbor. */
  512. if (!RT_MEMHEAP_IS_USED(header_ptr->next))
  513. {
  514. /* adjust the available number of bytes. */
  515. heap->available_size += RT_MEMHEAP_SIZE;
  516. /* merge block with next neighbor. */
  517. new_ptr = header_ptr->next;
  518. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  519. ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
  520. new_ptr, new_ptr->next_free, new_ptr->prev_free));
  521. new_ptr->next->prev = header_ptr;
  522. header_ptr->next = new_ptr->next;
  523. /* remove new ptr from free list */
  524. new_ptr->next_free->prev_free = new_ptr->prev_free;
  525. new_ptr->prev_free->next_free = new_ptr->next_free;
  526. }
  527. if (insert_header)
  528. {
  529. /* no left merge, insert to free list */
  530. header_ptr->next_free = heap->free_list->next_free;
  531. header_ptr->prev_free = heap->free_list;
  532. heap->free_list->next_free->prev_free = header_ptr;
  533. heap->free_list->next_free = header_ptr;
  534. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  535. ("insert to free list: next_free 0x%08x, prev_free 0x%08x\n",
  536. header_ptr->next_free, header_ptr->prev_free));
  537. }
  538. #ifdef RT_USING_MEMTRACE
  539. rt_memset(header_ptr->owner_thread_name, ' ', sizeof(header_ptr->owner_thread_name));
  540. #endif
  541. /* release lock */
  542. rt_sem_release(&(heap->lock));
  543. }
  544. RTM_EXPORT(rt_memheap_free);
  545. #ifdef RT_USING_FINSH
  546. static void _memheap_dump_tag(struct rt_memheap_item *item)
  547. {
  548. rt_uint8_t name[2 * sizeof(void *)];
  549. rt_uint8_t *ptr;
  550. ptr = (rt_uint8_t *) & (item->next_free);
  551. rt_memcpy(name, ptr, sizeof(void *));
  552. ptr = (rt_uint8_t *) & (item->prev_free);
  553. rt_memcpy(&name[sizeof(void *)], ptr, sizeof(void *));
  554. rt_kprintf("%.*s", 2 * sizeof(void *), name);
  555. }
  556. int rt_memheap_dump(struct rt_memheap *heap)
  557. {
  558. struct rt_memheap_item *item, *end;
  559. if (heap == RT_NULL) return 0;
  560. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  561. rt_kprintf("\n[%.*s] [0x%08x - 0x%08x]->\n", RT_NAME_MAX, heap->parent.name,
  562. (rt_ubase_t)heap->start_addr, (rt_ubase_t)heap->start_addr + heap->pool_size);
  563. rt_kprintf("------------------------------\n");
  564. /* lock memheap */
  565. rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  566. item = heap->block_list;
  567. end = (struct rt_memheap_item *)((rt_uint8_t *)heap->start_addr + heap->pool_size - RT_MEMHEAP_SIZE);
  568. /* for each memory block */
  569. while ((rt_ubase_t)item < ((rt_ubase_t)end))
  570. {
  571. if (RT_MEMHEAP_IS_USED(item) && ((item->magic & RT_MEMHEAP_MASK) != RT_MEMHEAP_MAGIC))
  572. rt_kprintf("0x%08x", item + 1);
  573. if (item->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
  574. {
  575. rt_kprintf("0x%08x: %-8d ", item + 1, MEMITEM_SIZE(item));
  576. _memheap_dump_tag(item);
  577. rt_kprintf("\n");
  578. }
  579. else
  580. {
  581. rt_kprintf("0x%08x: %-8d <F>\n", item + 1, MEMITEM_SIZE(item));
  582. }
  583. item = item->next;
  584. }
  585. rt_sem_release(&(heap->lock));
  586. return 0;
  587. }
  588. int memheaptrace(void)
  589. {
  590. int count = rt_object_get_length(RT_Object_Class_MemHeap);
  591. struct rt_memheap **heaps;
  592. if (count > 0)
  593. {
  594. int index;
  595. extern int list_memheap(void);
  596. heaps = (struct rt_memheap **)rt_malloc(sizeof(struct rt_memheap *) * count);
  597. if (heaps == RT_NULL) return 0;
  598. list_memheap();
  599. rt_kprintf("memheap header size: %d\n", RT_MEMHEAP_SIZE);
  600. count = rt_object_get_pointers(RT_Object_Class_MemHeap, (rt_object_t *)heaps, count);
  601. for (index = 0; index < count; index++)
  602. {
  603. rt_memheap_dump(heaps[index]);
  604. }
  605. rt_free(heaps);
  606. }
  607. return 0;
  608. }
  609. MSH_CMD_EXPORT(memheaptrace, dump memory trace information);
  610. #endif
  611. #ifdef RT_USING_MEMHEAP_AS_HEAP
  612. static struct rt_memheap _heap;
  613. void rt_system_heap_init(void *begin_addr, void *end_addr)
  614. {
  615. /* initialize a default heap in the system */
  616. rt_memheap_init(&_heap,
  617. "heap",
  618. begin_addr,
  619. (rt_uint32_t)end_addr - (rt_uint32_t)begin_addr);
  620. }
  621. void *rt_malloc(rt_size_t size)
  622. {
  623. void *ptr;
  624. /* try to allocate in system heap */
  625. ptr = rt_memheap_alloc(&_heap, size);
  626. if (ptr == RT_NULL)
  627. {
  628. struct rt_object *object;
  629. struct rt_list_node *node;
  630. struct rt_memheap *heap;
  631. struct rt_object_information *information;
  632. /* try to allocate on other memory heap */
  633. information = rt_object_get_information(RT_Object_Class_MemHeap);
  634. RT_ASSERT(information != RT_NULL);
  635. for (node = information->object_list.next;
  636. node != &(information->object_list);
  637. node = node->next)
  638. {
  639. object = rt_list_entry(node, struct rt_object, list);
  640. heap = (struct rt_memheap *)object;
  641. RT_ASSERT(heap);
  642. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  643. /* not allocate in the default system heap */
  644. if (heap == &_heap)
  645. continue;
  646. ptr = rt_memheap_alloc(heap, size);
  647. if (ptr != RT_NULL)
  648. break;
  649. }
  650. }
  651. #ifdef RT_USING_MEMTRACE
  652. if (ptr == RT_NULL)
  653. {
  654. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("malloc[%d] => NULL", size));
  655. }
  656. else
  657. {
  658. struct rt_memheap_item *item = MEMITEM(ptr);
  659. if (rt_thread_self())
  660. rt_memheap_setname(item, rt_thread_self()->name);
  661. else
  662. rt_memheap_setname(item, "<null>");
  663. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("malloc => 0x%08x : %d", ptr, size));
  664. }
  665. #endif
  666. return ptr;
  667. }
  668. RTM_EXPORT(rt_malloc);
  669. void rt_free(void *rmem)
  670. {
  671. rt_memheap_free(rmem);
  672. }
  673. RTM_EXPORT(rt_free);
  674. void *rt_realloc(void *rmem, rt_size_t newsize)
  675. {
  676. void *new_ptr;
  677. struct rt_memheap_item *header_ptr;
  678. if (rmem == RT_NULL)
  679. return rt_malloc(newsize);
  680. if (newsize == 0)
  681. {
  682. rt_free(rmem);
  683. return RT_NULL;
  684. }
  685. /* get old memory item */
  686. header_ptr = (struct rt_memheap_item *)
  687. ((rt_uint8_t *)rmem - RT_MEMHEAP_SIZE);
  688. new_ptr = rt_memheap_realloc(header_ptr->pool_ptr, rmem, newsize);
  689. if (new_ptr == RT_NULL && newsize != 0)
  690. {
  691. /* allocate memory block from other memheap */
  692. new_ptr = rt_malloc(newsize);
  693. if (new_ptr != RT_NULL && rmem != RT_NULL)
  694. {
  695. rt_size_t oldsize;
  696. /* get the size of old memory block */
  697. oldsize = MEMITEM_SIZE(header_ptr);
  698. if (newsize > oldsize)
  699. rt_memcpy(new_ptr, rmem, oldsize);
  700. else
  701. rt_memcpy(new_ptr, rmem, newsize);
  702. rt_free(rmem);
  703. }
  704. }
  705. #ifdef RT_USING_MEMTRACE
  706. if (new_ptr == RT_NULL)
  707. {
  708. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("realloc[%d] => NULL", newsize));
  709. }
  710. else
  711. {
  712. struct rt_memheap_item *item = MEMITEM(new_ptr);
  713. if (rt_thread_self())
  714. rt_memheap_setname(item, rt_thread_self()->name);
  715. else
  716. rt_memheap_setname(item, "<null>");
  717. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("realloc => 0x%08x : %d",
  718. new_ptr, newsize));
  719. }
  720. #endif
  721. return new_ptr;
  722. }
  723. RTM_EXPORT(rt_realloc);
  724. void *rt_calloc(rt_size_t count, rt_size_t size)
  725. {
  726. void *ptr;
  727. rt_size_t total_size;
  728. total_size = count * size;
  729. ptr = rt_malloc(total_size);
  730. if (ptr != RT_NULL)
  731. {
  732. /* clean memory */
  733. rt_memset(ptr, 0, total_size);
  734. }
  735. #ifdef RT_USING_MEMTRACE
  736. if (ptr == RT_NULL)
  737. {
  738. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("calloc[%d x %d] => NULL",
  739. count, size));
  740. }
  741. else
  742. {
  743. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("calloc => 0x%08x : %d",
  744. ptr, count * size));
  745. }
  746. #endif
  747. return ptr;
  748. }
  749. RTM_EXPORT(rt_calloc);
  750. void rt_memory_info(rt_uint32_t *total,
  751. rt_uint32_t *used,
  752. rt_uint32_t *max_used)
  753. {
  754. if (total != RT_NULL)
  755. *total = _heap.pool_size;
  756. if (used != RT_NULL)
  757. *used = _heap.pool_size - _heap.available_size;
  758. if (max_used != RT_NULL)
  759. *max_used = _heap.max_used_size;
  760. }
  761. #endif
  762. #ifdef RT_USING_MEMTRACE
  763. void dump_used_memheap(struct rt_memheap *mh)
  764. {
  765. struct rt_memheap_item *header_ptr;
  766. rt_uint32_t block_size;
  767. rt_kprintf("\nmemory heap address:\n");
  768. rt_kprintf("heap_ptr: 0x%08x\n", mh->start_addr);
  769. rt_kprintf("free : 0x%08x\n", mh->available_size);
  770. rt_kprintf("max_used: 0x%08x\n", mh->max_used_size);
  771. rt_kprintf("size : 0x%08x\n", mh->pool_size);
  772. rt_kprintf("\n--memory used information --\n");
  773. header_ptr = mh->block_list;
  774. while (header_ptr->next != mh->block_list)
  775. {
  776. if ((header_ptr->magic & RT_MEMHEAP_MASK) != RT_MEMHEAP_MAGIC)
  777. {
  778. rt_kprintf("[0x%08x - incorrect magic: 0x%08x\n", header_ptr, header_ptr->magic);
  779. break;
  780. }
  781. /* get current memory block size */
  782. block_size = MEMITEM_SIZE(header_ptr);
  783. if (block_size < 0)
  784. break;
  785. if (RT_MEMHEAP_IS_USED(header_ptr))
  786. {
  787. /* dump information */
  788. rt_kprintf("[0x%08x - %d - %c%c%c%c] used\n", header_ptr, block_size,
  789. header_ptr->owner_thread_name[0], header_ptr->owner_thread_name[1],
  790. header_ptr->owner_thread_name[2], header_ptr->owner_thread_name[3]);
  791. }
  792. else
  793. {
  794. /* dump information */
  795. rt_kprintf("[0x%08x - %d - %c%c%c%c] free\n", header_ptr, block_size,
  796. header_ptr->owner_thread_name[0], header_ptr->owner_thread_name[1],
  797. header_ptr->owner_thread_name[2], header_ptr->owner_thread_name[3]);
  798. }
  799. /* move to next used memory block */
  800. header_ptr = header_ptr->next;
  801. }
  802. }
  803. void memtrace_heap()
  804. {
  805. struct rt_object_information *info;
  806. struct rt_list_node *list;
  807. struct rt_memheap *mh;
  808. struct rt_list_node *node;
  809. info = rt_object_get_information(RT_Object_Class_MemHeap);
  810. list = &info->object_list;
  811. for (node = list->next; node != list; node = node->next)
  812. {
  813. mh = (struct rt_memheap *)rt_list_entry(node, struct rt_object, list);
  814. dump_used_memheap(mh);
  815. }
  816. }
  817. #ifdef RT_USING_FINSH
  818. #include <finsh.h>
  819. MSH_CMD_EXPORT(memtrace_heap, dump memory trace for heap);
  820. #endif /* end of RT_USING_FINSH */
  821. #endif /* end of RT_USING_MEMTRACE */
  822. #endif /* end of RT_USING_MEMHEAP */