memheap.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /*
  7. * File : memheap.c
  8. *
  9. * Change Logs:
  10. * Date Author Notes
  11. * 2012-04-10 Bernard first implementation
  12. * 2012-10-16 Bernard add the mutex lock for heap object.
  13. * 2012-12-29 Bernard memheap can be used as system heap.
  14. * change mutex lock to semaphore lock.
  15. * 2013-04-10 Bernard add rt_memheap_realloc function.
  16. * 2013-05-24 Bernard fix the rt_memheap_realloc issue.
  17. * 2013-07-11 Grissiom fix the memory block splitting issue.
  18. * 2013-07-15 Grissiom optimize rt_memheap_realloc
  19. * 2021-06-03 Flybreak Fix the crash problem after opening Oz optimization on ac6.
  20. */
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #ifdef RT_USING_MEMHEAP
  24. /* dynamic pool magic and mask */
  25. #define RT_MEMHEAP_MAGIC 0x1ea01ea0
  26. #define RT_MEMHEAP_MASK 0xfffffffe
  27. #define RT_MEMHEAP_USED 0x01
  28. #define RT_MEMHEAP_FREED 0x00
  29. #define RT_MEMHEAP_IS_USED(i) ((i)->magic & RT_MEMHEAP_USED)
  30. #define RT_MEMHEAP_MINIALLOC 12
  31. #define RT_MEMHEAP_SIZE RT_ALIGN(sizeof(struct rt_memheap_item), RT_ALIGN_SIZE)
  32. #define MEMITEM_SIZE(item) ((rt_ubase_t)item->next - (rt_ubase_t)item - RT_MEMHEAP_SIZE)
  33. #define MEMITEM(ptr) (struct rt_memheap_item*)((rt_uint8_t*)ptr - RT_MEMHEAP_SIZE)
  34. #ifdef RT_USING_MEMTRACE
  35. rt_inline void rt_memheap_setname(struct rt_memheap_item *item, const char *name)
  36. {
  37. int index;
  38. rt_uint8_t *ptr;
  39. ptr = (rt_uint8_t *) & (item->next_free);
  40. for (index = 0; index < sizeof(void *); index ++)
  41. {
  42. if (name[index] == '\0') break;
  43. ptr[index] = name[index];
  44. }
  45. if (name[index] == '\0') ptr[index] = '\0';
  46. else
  47. {
  48. ptr = (rt_uint8_t *) & (item->prev_free);
  49. for (index = 0; index < sizeof(void *) && (index + sizeof(void *)) < RT_NAME_MAX; index ++)
  50. {
  51. if (name[sizeof(void *) + index] == '\0') break;
  52. ptr[index] = name[sizeof(void *) + index];
  53. }
  54. if (name[sizeof(void *) + index] == '\0') ptr[index] = '\0';
  55. }
  56. }
  57. void rt_mem_set_tag(void *ptr, const char *name)
  58. {
  59. struct rt_memheap_item *item;
  60. if (ptr && name)
  61. {
  62. item = MEMITEM(ptr);
  63. rt_memheap_setname(item, name);
  64. }
  65. }
  66. #endif /* RT_USING_MEMTRACE */
  67. /*
  68. * The initialized memory pool will be:
  69. * +-----------------------------------+--------------------------+
  70. * | whole freed memory block | Used Memory Block Tailer |
  71. * +-----------------------------------+--------------------------+
  72. *
  73. * block_list --> whole freed memory block
  74. *
  75. * The length of Used Memory Block Tailer is 0,
  76. * which is prevents block merging across list
  77. */
  78. rt_err_t rt_memheap_init(struct rt_memheap *memheap,
  79. const char *name,
  80. void *start_addr,
  81. rt_size_t size)
  82. {
  83. struct rt_memheap_item *item;
  84. RT_ASSERT(memheap != RT_NULL);
  85. /* initialize pool object */
  86. rt_object_init(&(memheap->parent), RT_Object_Class_MemHeap, name);
  87. memheap->start_addr = start_addr;
  88. memheap->pool_size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE);
  89. memheap->available_size = memheap->pool_size - (2 * RT_MEMHEAP_SIZE);
  90. memheap->max_used_size = memheap->pool_size - memheap->available_size;
  91. /* initialize the free list header */
  92. item = &(memheap->free_header);
  93. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  94. item->pool_ptr = memheap;
  95. item->next = RT_NULL;
  96. item->prev = RT_NULL;
  97. item->next_free = item;
  98. item->prev_free = item;
  99. /* set the free list to free list header */
  100. memheap->free_list = item;
  101. /* initialize the first big memory block */
  102. item = (struct rt_memheap_item *)start_addr;
  103. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  104. item->pool_ptr = memheap;
  105. item->next = RT_NULL;
  106. item->prev = RT_NULL;
  107. item->next_free = item;
  108. item->prev_free = item;
  109. #ifdef RT_USING_MEMTRACE
  110. rt_memset(item->owner_thread_name, ' ', sizeof(item->owner_thread_name));
  111. #endif /* RT_USING_MEMTRACE */
  112. item->next = (struct rt_memheap_item *)
  113. ((rt_uint8_t *)item + memheap->available_size + RT_MEMHEAP_SIZE);
  114. item->prev = item->next;
  115. /* block list header */
  116. memheap->block_list = item;
  117. /* place the big memory block to free list */
  118. item->next_free = memheap->free_list->next_free;
  119. item->prev_free = memheap->free_list;
  120. memheap->free_list->next_free->prev_free = item;
  121. memheap->free_list->next_free = item;
  122. /* move to the end of memory pool to build a small tailer block,
  123. * which prevents block merging
  124. */
  125. item = item->next;
  126. /* it's a used memory block */
  127. item->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
  128. item->pool_ptr = memheap;
  129. item->next = (struct rt_memheap_item *)start_addr;
  130. item->prev = (struct rt_memheap_item *)start_addr;
  131. /* not in free list */
  132. item->next_free = item->prev_free = RT_NULL;
  133. /* initialize semaphore lock */
  134. rt_sem_init(&(memheap->lock), name, 1, RT_IPC_FLAG_PRIO);
  135. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  136. ("memory heap: start addr 0x%08x, size %d, free list header 0x%08x\n",
  137. start_addr, size, &(memheap->free_header)));
  138. return RT_EOK;
  139. }
  140. RTM_EXPORT(rt_memheap_init);
  141. rt_err_t rt_memheap_detach(struct rt_memheap *heap)
  142. {
  143. RT_ASSERT(heap);
  144. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  145. RT_ASSERT(rt_object_is_systemobject(&heap->parent));
  146. rt_sem_detach(&heap->lock);
  147. rt_object_detach(&(heap->parent));
  148. /* Return a successful completion. */
  149. return RT_EOK;
  150. }
  151. RTM_EXPORT(rt_memheap_detach);
  152. void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size)
  153. {
  154. rt_err_t result;
  155. rt_uint32_t free_size;
  156. struct rt_memheap_item *header_ptr;
  157. RT_ASSERT(heap != RT_NULL);
  158. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  159. /* align allocated size */
  160. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  161. if (size < RT_MEMHEAP_MINIALLOC)
  162. size = RT_MEMHEAP_MINIALLOC;
  163. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d on heap:%8.*s",
  164. size, RT_NAME_MAX, heap->parent.name));
  165. if (size < heap->available_size)
  166. {
  167. /* search on free list */
  168. free_size = 0;
  169. /* lock memheap */
  170. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  171. if (result != RT_EOK)
  172. {
  173. rt_set_errno(result);
  174. return RT_NULL;
  175. }
  176. /* get the first free memory block */
  177. header_ptr = heap->free_list->next_free;
  178. while (header_ptr != heap->free_list && free_size < size)
  179. {
  180. /* get current freed memory block size */
  181. free_size = MEMITEM_SIZE(header_ptr);
  182. if (free_size < size)
  183. {
  184. /* move to next free memory block */
  185. header_ptr = header_ptr->next_free;
  186. }
  187. }
  188. /* determine if the memory is available. */
  189. if (free_size >= size)
  190. {
  191. /* a block that satisfies the request has been found. */
  192. /* determine if the block needs to be split. */
  193. if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC))
  194. {
  195. struct rt_memheap_item *new_ptr;
  196. /* split the block. */
  197. new_ptr = (struct rt_memheap_item *)
  198. (((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE);
  199. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  200. ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
  201. header_ptr,
  202. header_ptr->next,
  203. header_ptr->prev,
  204. new_ptr));
  205. /* mark the new block as a memory block and freed. */
  206. new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  207. /* put the pool pointer into the new block. */
  208. new_ptr->pool_ptr = heap;
  209. #ifdef RT_USING_MEMTRACE
  210. rt_memset(new_ptr->owner_thread_name, ' ', sizeof(new_ptr->owner_thread_name));
  211. #endif /* RT_USING_MEMTRACE */
  212. /* break down the block list */
  213. new_ptr->prev = header_ptr;
  214. new_ptr->next = header_ptr->next;
  215. header_ptr->next->prev = new_ptr;
  216. header_ptr->next = new_ptr;
  217. /* remove header ptr from free list */
  218. header_ptr->next_free->prev_free = header_ptr->prev_free;
  219. header_ptr->prev_free->next_free = header_ptr->next_free;
  220. header_ptr->next_free = RT_NULL;
  221. header_ptr->prev_free = RT_NULL;
  222. /* insert new_ptr to free list */
  223. new_ptr->next_free = heap->free_list->next_free;
  224. new_ptr->prev_free = heap->free_list;
  225. heap->free_list->next_free->prev_free = new_ptr;
  226. heap->free_list->next_free = new_ptr;
  227. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x\n",
  228. new_ptr->next_free,
  229. new_ptr->prev_free));
  230. /* decrement the available byte count. */
  231. heap->available_size = heap->available_size -
  232. size -
  233. RT_MEMHEAP_SIZE;
  234. if (heap->pool_size - heap->available_size > heap->max_used_size)
  235. heap->max_used_size = heap->pool_size - heap->available_size;
  236. }
  237. else
  238. {
  239. /* decrement the entire free size from the available bytes count. */
  240. heap->available_size = heap->available_size - free_size;
  241. if (heap->pool_size - heap->available_size > heap->max_used_size)
  242. heap->max_used_size = heap->pool_size - heap->available_size;
  243. /* remove header_ptr from free list */
  244. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  245. ("one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x\n",
  246. header_ptr,
  247. header_ptr->next_free,
  248. header_ptr->prev_free));
  249. header_ptr->next_free->prev_free = header_ptr->prev_free;
  250. header_ptr->prev_free->next_free = header_ptr->next_free;
  251. header_ptr->next_free = RT_NULL;
  252. header_ptr->prev_free = RT_NULL;
  253. }
  254. /* Mark the allocated block as not available. */
  255. header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED);
  256. #ifdef RT_USING_MEMTRACE
  257. if (rt_thread_self())
  258. rt_memcpy(header_ptr->owner_thread_name, rt_thread_self()->name, sizeof(header_ptr->owner_thread_name));
  259. else
  260. rt_memcpy(header_ptr->owner_thread_name, "NONE", sizeof(header_ptr->owner_thread_name));
  261. #endif /* RT_USING_MEMTRACE */
  262. /* release lock */
  263. rt_sem_release(&(heap->lock));
  264. /* Return a memory address to the caller. */
  265. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  266. ("alloc mem: memory[0x%08x], heap[0x%08x], size: %d\n",
  267. (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE),
  268. header_ptr,
  269. size));
  270. return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE);
  271. }
  272. /* release lock */
  273. rt_sem_release(&(heap->lock));
  274. }
  275. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n"));
  276. /* Return the completion status. */
  277. return RT_NULL;
  278. }
  279. RTM_EXPORT(rt_memheap_alloc);
  280. void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize)
  281. {
  282. rt_err_t result;
  283. rt_size_t oldsize;
  284. struct rt_memheap_item *header_ptr;
  285. struct rt_memheap_item *new_ptr;
  286. RT_ASSERT(heap);
  287. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  288. if (newsize == 0)
  289. {
  290. rt_memheap_free(ptr);
  291. return RT_NULL;
  292. }
  293. /* align allocated size */
  294. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  295. if (newsize < RT_MEMHEAP_MINIALLOC)
  296. newsize = RT_MEMHEAP_MINIALLOC;
  297. if (ptr == RT_NULL)
  298. {
  299. return rt_memheap_alloc(heap, newsize);
  300. }
  301. /* get memory block header and get the size of memory block */
  302. header_ptr = (struct rt_memheap_item *)
  303. ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
  304. oldsize = MEMITEM_SIZE(header_ptr);
  305. /* re-allocate memory */
  306. if (newsize > oldsize)
  307. {
  308. void *new_ptr;
  309. /* Fix the crash problem after opening Oz optimization on ac6 */
  310. volatile struct rt_memheap_item *next_ptr;
  311. /* lock memheap */
  312. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  313. if (result != RT_EOK)
  314. {
  315. rt_set_errno(result);
  316. return RT_NULL;
  317. }
  318. next_ptr = header_ptr->next;
  319. /* header_ptr should not be the tail */
  320. RT_ASSERT(next_ptr > header_ptr);
  321. /* check whether the following free space is enough to expand */
  322. if (!RT_MEMHEAP_IS_USED(next_ptr))
  323. {
  324. rt_int32_t nextsize;
  325. nextsize = MEMITEM_SIZE(next_ptr);
  326. RT_ASSERT(next_ptr > 0);
  327. /* Here is the ASCII art of the situation that we can make use of
  328. * the next free node without alloc/memcpy, |*| is the control
  329. * block:
  330. *
  331. * oldsize free node
  332. * |*|-----------|*|----------------------|*|
  333. * newsize >= minialloc
  334. * |*|----------------|*|-----------------|*|
  335. */
  336. if (nextsize + oldsize > newsize + RT_MEMHEAP_MINIALLOC)
  337. {
  338. /* decrement the entire free size from the available bytes count. */
  339. heap->available_size = heap->available_size - (newsize - oldsize);
  340. if (heap->pool_size - heap->available_size > heap->max_used_size)
  341. heap->max_used_size = heap->pool_size - heap->available_size;
  342. /* remove next_ptr from free list */
  343. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  344. ("remove block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x",
  345. next_ptr,
  346. next_ptr->next_free,
  347. next_ptr->prev_free));
  348. next_ptr->next_free->prev_free = next_ptr->prev_free;
  349. next_ptr->prev_free->next_free = next_ptr->next_free;
  350. next_ptr->next->prev = next_ptr->prev;
  351. next_ptr->prev->next = next_ptr->next;
  352. /* build a new one on the right place */
  353. next_ptr = (struct rt_memheap_item *)((char *)ptr + newsize);
  354. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  355. ("new free block: block[0x%08x] nextm[0x%08x] prevm[0x%08x]",
  356. next_ptr,
  357. next_ptr->next,
  358. next_ptr->prev));
  359. /* mark the new block as a memory block and freed. */
  360. next_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  361. /* put the pool pointer into the new block. */
  362. next_ptr->pool_ptr = heap;
  363. #ifdef RT_USING_MEMTRACE
  364. rt_memset((void *)next_ptr->owner_thread_name, ' ', sizeof(next_ptr->owner_thread_name));
  365. #endif /* RT_USING_MEMTRACE */
  366. next_ptr->prev = header_ptr;
  367. next_ptr->next = header_ptr->next;
  368. header_ptr->next->prev = (struct rt_memheap_item *)next_ptr;
  369. header_ptr->next = (struct rt_memheap_item *)next_ptr;
  370. /* insert next_ptr to free list */
  371. next_ptr->next_free = heap->free_list->next_free;
  372. next_ptr->prev_free = heap->free_list;
  373. heap->free_list->next_free->prev_free = (struct rt_memheap_item *)next_ptr;
  374. heap->free_list->next_free = (struct rt_memheap_item *)next_ptr;
  375. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x",
  376. next_ptr->next_free,
  377. next_ptr->prev_free));
  378. /* release lock */
  379. rt_sem_release(&(heap->lock));
  380. return ptr;
  381. }
  382. }
  383. /* release lock */
  384. rt_sem_release(&(heap->lock));
  385. /* re-allocate a memory block */
  386. new_ptr = (void *)rt_memheap_alloc(heap, newsize);
  387. if (new_ptr != RT_NULL)
  388. {
  389. rt_memcpy(new_ptr, ptr, oldsize < newsize ? oldsize : newsize);
  390. rt_memheap_free(ptr);
  391. }
  392. return new_ptr;
  393. }
  394. /* don't split when there is less than one node space left */
  395. if (newsize + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC >= oldsize)
  396. return ptr;
  397. /* lock memheap */
  398. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  399. if (result != RT_EOK)
  400. {
  401. rt_set_errno(result);
  402. return RT_NULL;
  403. }
  404. /* split the block. */
  405. new_ptr = (struct rt_memheap_item *)
  406. (((rt_uint8_t *)header_ptr) + newsize + RT_MEMHEAP_SIZE);
  407. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  408. ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n",
  409. header_ptr,
  410. header_ptr->next,
  411. header_ptr->prev,
  412. new_ptr));
  413. /* mark the new block as a memory block and freed. */
  414. new_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  415. /* put the pool pointer into the new block. */
  416. new_ptr->pool_ptr = heap;
  417. #ifdef RT_USING_MEMTRACE
  418. rt_memset(new_ptr->owner_thread_name, ' ', sizeof(new_ptr->owner_thread_name));
  419. #endif /* RT_USING_MEMTRACE */
  420. /* break down the block list */
  421. new_ptr->prev = header_ptr;
  422. new_ptr->next = header_ptr->next;
  423. header_ptr->next->prev = new_ptr;
  424. header_ptr->next = new_ptr;
  425. /* determine if the block can be merged with the next neighbor. */
  426. if (!RT_MEMHEAP_IS_USED(new_ptr->next))
  427. {
  428. struct rt_memheap_item *free_ptr;
  429. /* merge block with next neighbor. */
  430. free_ptr = new_ptr->next;
  431. heap->available_size = heap->available_size - MEMITEM_SIZE(free_ptr);
  432. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  433. ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
  434. header_ptr, header_ptr->next_free, header_ptr->prev_free));
  435. free_ptr->next->prev = new_ptr;
  436. new_ptr->next = free_ptr->next;
  437. /* remove free ptr from free list */
  438. free_ptr->next_free->prev_free = free_ptr->prev_free;
  439. free_ptr->prev_free->next_free = free_ptr->next_free;
  440. }
  441. /* insert the split block to free list */
  442. new_ptr->next_free = heap->free_list->next_free;
  443. new_ptr->prev_free = heap->free_list;
  444. heap->free_list->next_free->prev_free = new_ptr;
  445. heap->free_list->next_free = new_ptr;
  446. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free ptr: next_free 0x%08x, prev_free 0x%08x\n",
  447. new_ptr->next_free,
  448. new_ptr->prev_free));
  449. /* increment the available byte count. */
  450. heap->available_size = heap->available_size + MEMITEM_SIZE(new_ptr);
  451. /* release lock */
  452. rt_sem_release(&(heap->lock));
  453. /* return the old memory block */
  454. return ptr;
  455. }
  456. RTM_EXPORT(rt_memheap_realloc);
  457. void rt_memheap_free(void *ptr)
  458. {
  459. rt_err_t result;
  460. struct rt_memheap *heap;
  461. struct rt_memheap_item *header_ptr, *new_ptr;
  462. rt_uint32_t insert_header;
  463. /* NULL check */
  464. if (ptr == RT_NULL) return;
  465. /* set initial status as OK */
  466. insert_header = 1;
  467. new_ptr = RT_NULL;
  468. header_ptr = (struct rt_memheap_item *)
  469. ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE);
  470. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("free memory: memory[0x%08x], block[0x%08x]\n",
  471. ptr, header_ptr));
  472. /* check magic */
  473. if (header_ptr->magic != (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
  474. {
  475. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("bad magic:0x%08x @ memheap\n",
  476. header_ptr->magic));
  477. }
  478. RT_ASSERT(header_ptr->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED));
  479. /* check whether this block of memory has been over-written. */
  480. RT_ASSERT((header_ptr->next->magic & RT_MEMHEAP_MASK) == RT_MEMHEAP_MAGIC);
  481. /* get pool ptr */
  482. heap = header_ptr->pool_ptr;
  483. RT_ASSERT(heap);
  484. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  485. /* lock memheap */
  486. result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  487. if (result != RT_EOK)
  488. {
  489. rt_set_errno(result);
  490. return ;
  491. }
  492. /* Mark the memory as available. */
  493. header_ptr->magic = (RT_MEMHEAP_MAGIC | RT_MEMHEAP_FREED);
  494. /* Adjust the available number of bytes. */
  495. heap->available_size += MEMITEM_SIZE(header_ptr);
  496. /* Determine if the block can be merged with the previous neighbor. */
  497. if (!RT_MEMHEAP_IS_USED(header_ptr->prev))
  498. {
  499. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: left node 0x%08x\n",
  500. header_ptr->prev));
  501. /* adjust the available number of bytes. */
  502. heap->available_size += RT_MEMHEAP_SIZE;
  503. /* yes, merge block with previous neighbor. */
  504. (header_ptr->prev)->next = header_ptr->next;
  505. (header_ptr->next)->prev = header_ptr->prev;
  506. /* move header pointer to previous. */
  507. header_ptr = header_ptr->prev;
  508. /* don't insert header to free list */
  509. insert_header = 0;
  510. }
  511. /* determine if the block can be merged with the next neighbor. */
  512. if (!RT_MEMHEAP_IS_USED(header_ptr->next))
  513. {
  514. /* adjust the available number of bytes. */
  515. heap->available_size += RT_MEMHEAP_SIZE;
  516. /* merge block with next neighbor. */
  517. new_ptr = header_ptr->next;
  518. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  519. ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n",
  520. new_ptr, new_ptr->next_free, new_ptr->prev_free));
  521. new_ptr->next->prev = header_ptr;
  522. header_ptr->next = new_ptr->next;
  523. /* remove new ptr from free list */
  524. new_ptr->next_free->prev_free = new_ptr->prev_free;
  525. new_ptr->prev_free->next_free = new_ptr->next_free;
  526. }
  527. if (insert_header)
  528. {
  529. /* no left merge, insert to free list */
  530. header_ptr->next_free = heap->free_list->next_free;
  531. header_ptr->prev_free = heap->free_list;
  532. heap->free_list->next_free->prev_free = header_ptr;
  533. heap->free_list->next_free = header_ptr;
  534. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP,
  535. ("insert to free list: next_free 0x%08x, prev_free 0x%08x\n",
  536. header_ptr->next_free, header_ptr->prev_free));
  537. }
  538. #ifdef RT_USING_MEMTRACE
  539. rt_memset(header_ptr->owner_thread_name, ' ', sizeof(header_ptr->owner_thread_name));
  540. #endif /* RT_USING_MEMTRACE */
  541. /* release lock */
  542. rt_sem_release(&(heap->lock));
  543. }
  544. RTM_EXPORT(rt_memheap_free);
  545. #ifdef RT_USING_FINSH
  546. static void _memheap_dump_tag(struct rt_memheap_item *item)
  547. {
  548. rt_uint8_t name[2 * sizeof(void *)];
  549. rt_uint8_t *ptr;
  550. ptr = (rt_uint8_t *) & (item->next_free);
  551. rt_memcpy(name, ptr, sizeof(void *));
  552. ptr = (rt_uint8_t *) & (item->prev_free);
  553. rt_memcpy(&name[sizeof(void *)], ptr, sizeof(void *));
  554. rt_kprintf("%.*s", 2 * sizeof(void *), name);
  555. }
  556. int rt_memheap_dump(struct rt_memheap *heap)
  557. {
  558. struct rt_memheap_item *item, *end;
  559. if (heap == RT_NULL) return 0;
  560. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  561. rt_kprintf("\n[%.*s] [0x%08x - 0x%08x]->\n", RT_NAME_MAX, heap->parent.name,
  562. (rt_ubase_t)heap->start_addr, (rt_ubase_t)heap->start_addr + heap->pool_size);
  563. rt_kprintf("------------------------------\n");
  564. /* lock memheap */
  565. rt_sem_take(&(heap->lock), RT_WAITING_FOREVER);
  566. item = heap->block_list;
  567. end = (struct rt_memheap_item *)((rt_uint8_t *)heap->start_addr + heap->pool_size - RT_MEMHEAP_SIZE);
  568. /* for each memory block */
  569. while ((rt_ubase_t)item < ((rt_ubase_t)end))
  570. {
  571. if (RT_MEMHEAP_IS_USED(item) && ((item->magic & RT_MEMHEAP_MASK) != RT_MEMHEAP_MAGIC))
  572. rt_kprintf("0x%08x", item + 1);
  573. if (item->magic == (RT_MEMHEAP_MAGIC | RT_MEMHEAP_USED))
  574. {
  575. rt_kprintf("0x%08x: %-8d ", item + 1, MEMITEM_SIZE(item));
  576. _memheap_dump_tag(item);
  577. rt_kprintf("\n");
  578. }
  579. else
  580. {
  581. rt_kprintf("0x%08x: %-8d <F>\n", item + 1, MEMITEM_SIZE(item));
  582. }
  583. item = item->next;
  584. }
  585. rt_sem_release(&(heap->lock));
  586. return 0;
  587. }
  588. int memheaptrace(void)
  589. {
  590. int count = rt_object_get_length(RT_Object_Class_MemHeap);
  591. struct rt_memheap **heaps;
  592. if (count > 0)
  593. {
  594. int index;
  595. extern int list_memheap(void);
  596. heaps = (struct rt_memheap **)rt_malloc(sizeof(struct rt_memheap *) * count);
  597. if (heaps == RT_NULL) return 0;
  598. list_memheap();
  599. rt_kprintf("memheap header size: %d\n", RT_MEMHEAP_SIZE);
  600. count = rt_object_get_pointers(RT_Object_Class_MemHeap, (rt_object_t *)heaps, count);
  601. for (index = 0; index < count; index++)
  602. {
  603. rt_memheap_dump(heaps[index]);
  604. }
  605. rt_free(heaps);
  606. }
  607. return 0;
  608. }
  609. MSH_CMD_EXPORT(memheaptrace, dump memory trace information);
  610. #endif /* RT_USING_FINSH */
  611. #ifdef RT_USING_MEMHEAP_AS_HEAP
  612. static struct rt_memheap _heap;
  613. void rt_system_heap_init(void *begin_addr, void *end_addr)
  614. {
  615. RT_ASSERT((rt_uint32_t)end_addr > (rt_uint32_t)begin_addr);
  616. /* initialize a default heap in the system */
  617. rt_memheap_init(&_heap,
  618. "heap",
  619. begin_addr,
  620. (rt_uint32_t)end_addr - (rt_uint32_t)begin_addr);
  621. }
  622. void *rt_malloc(rt_size_t size)
  623. {
  624. void *ptr;
  625. /* try to allocate in system heap */
  626. ptr = rt_memheap_alloc(&_heap, size);
  627. if (ptr == RT_NULL)
  628. {
  629. struct rt_object *object;
  630. struct rt_list_node *node;
  631. struct rt_memheap *heap;
  632. struct rt_object_information *information;
  633. /* try to allocate on other memory heap */
  634. information = rt_object_get_information(RT_Object_Class_MemHeap);
  635. RT_ASSERT(information != RT_NULL);
  636. for (node = information->object_list.next;
  637. node != &(information->object_list);
  638. node = node->next)
  639. {
  640. object = rt_list_entry(node, struct rt_object, list);
  641. heap = (struct rt_memheap *)object;
  642. RT_ASSERT(heap);
  643. RT_ASSERT(rt_object_get_type(&heap->parent) == RT_Object_Class_MemHeap);
  644. /* not allocate in the default system heap */
  645. if (heap == &_heap)
  646. continue;
  647. ptr = rt_memheap_alloc(heap, size);
  648. if (ptr != RT_NULL)
  649. break;
  650. }
  651. }
  652. #ifdef RT_USING_MEMTRACE
  653. if (ptr == RT_NULL)
  654. {
  655. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("malloc[%d] => NULL", size));
  656. }
  657. else
  658. {
  659. struct rt_memheap_item *item = MEMITEM(ptr);
  660. if (rt_thread_self())
  661. rt_memheap_setname(item, rt_thread_self()->name);
  662. else
  663. rt_memheap_setname(item, "<null>");
  664. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("malloc => 0x%08x : %d", ptr, size));
  665. }
  666. #endif /* RT_USING_MEMTRACE */
  667. return ptr;
  668. }
  669. RTM_EXPORT(rt_malloc);
  670. void rt_free(void *rmem)
  671. {
  672. rt_memheap_free(rmem);
  673. }
  674. RTM_EXPORT(rt_free);
  675. void *rt_realloc(void *rmem, rt_size_t newsize)
  676. {
  677. void *new_ptr;
  678. struct rt_memheap_item *header_ptr;
  679. if (rmem == RT_NULL)
  680. return rt_malloc(newsize);
  681. if (newsize == 0)
  682. {
  683. rt_free(rmem);
  684. return RT_NULL;
  685. }
  686. /* get old memory item */
  687. header_ptr = (struct rt_memheap_item *)
  688. ((rt_uint8_t *)rmem - RT_MEMHEAP_SIZE);
  689. new_ptr = rt_memheap_realloc(header_ptr->pool_ptr, rmem, newsize);
  690. if (new_ptr == RT_NULL && newsize != 0)
  691. {
  692. /* allocate memory block from other memheap */
  693. new_ptr = rt_malloc(newsize);
  694. if (new_ptr != RT_NULL && rmem != RT_NULL)
  695. {
  696. rt_size_t oldsize;
  697. /* get the size of old memory block */
  698. oldsize = MEMITEM_SIZE(header_ptr);
  699. if (newsize > oldsize)
  700. rt_memcpy(new_ptr, rmem, oldsize);
  701. else
  702. rt_memcpy(new_ptr, rmem, newsize);
  703. rt_free(rmem);
  704. }
  705. }
  706. #ifdef RT_USING_MEMTRACE
  707. if (new_ptr == RT_NULL)
  708. {
  709. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("realloc[%d] => NULL", newsize));
  710. }
  711. else
  712. {
  713. struct rt_memheap_item *item = MEMITEM(new_ptr);
  714. if (rt_thread_self())
  715. rt_memheap_setname(item, rt_thread_self()->name);
  716. else
  717. rt_memheap_setname(item, "<null>");
  718. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("realloc => 0x%08x : %d",
  719. new_ptr, newsize));
  720. }
  721. #endif /* RT_USING_MEMTRACE */
  722. return new_ptr;
  723. }
  724. RTM_EXPORT(rt_realloc);
  725. void *rt_calloc(rt_size_t count, rt_size_t size)
  726. {
  727. void *ptr;
  728. rt_size_t total_size;
  729. total_size = count * size;
  730. ptr = rt_malloc(total_size);
  731. if (ptr != RT_NULL)
  732. {
  733. /* clean memory */
  734. rt_memset(ptr, 0, total_size);
  735. }
  736. #ifdef RT_USING_MEMTRACE
  737. if (ptr == RT_NULL)
  738. {
  739. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("calloc[%d x %d] => NULL",
  740. count, size));
  741. }
  742. else
  743. {
  744. RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("calloc => 0x%08x : %d",
  745. ptr, count * size));
  746. }
  747. #endif /* RT_USING_MEMTRACE */
  748. return ptr;
  749. }
  750. RTM_EXPORT(rt_calloc);
  751. void rt_memory_info(rt_uint32_t *total,
  752. rt_uint32_t *used,
  753. rt_uint32_t *max_used)
  754. {
  755. if (total != RT_NULL)
  756. *total = _heap.pool_size;
  757. if (used != RT_NULL)
  758. *used = _heap.pool_size - _heap.available_size;
  759. if (max_used != RT_NULL)
  760. *max_used = _heap.max_used_size;
  761. }
  762. #endif /* RT_USING_MEMHEAP_AS_HEAP */
  763. #ifdef RT_USING_MEMTRACE
  764. void dump_used_memheap(struct rt_memheap *mh)
  765. {
  766. struct rt_memheap_item *header_ptr;
  767. rt_uint32_t block_size;
  768. rt_kprintf("\nmemory heap address:\n");
  769. rt_kprintf("heap_ptr: 0x%08x\n", mh->start_addr);
  770. rt_kprintf("free : 0x%08x\n", mh->available_size);
  771. rt_kprintf("max_used: 0x%08x\n", mh->max_used_size);
  772. rt_kprintf("size : 0x%08x\n", mh->pool_size);
  773. rt_kprintf("\n--memory used information --\n");
  774. header_ptr = mh->block_list;
  775. while (header_ptr->next != mh->block_list)
  776. {
  777. if ((header_ptr->magic & RT_MEMHEAP_MASK) != RT_MEMHEAP_MAGIC)
  778. {
  779. rt_kprintf("[0x%08x - incorrect magic: 0x%08x\n", header_ptr, header_ptr->magic);
  780. break;
  781. }
  782. /* get current memory block size */
  783. block_size = MEMITEM_SIZE(header_ptr);
  784. if (block_size < 0)
  785. break;
  786. if (RT_MEMHEAP_IS_USED(header_ptr))
  787. {
  788. /* dump information */
  789. rt_kprintf("[0x%08x - %d - %c%c%c%c] used\n", header_ptr, block_size,
  790. header_ptr->owner_thread_name[0], header_ptr->owner_thread_name[1],
  791. header_ptr->owner_thread_name[2], header_ptr->owner_thread_name[3]);
  792. }
  793. else
  794. {
  795. /* dump information */
  796. rt_kprintf("[0x%08x - %d - %c%c%c%c] free\n", header_ptr, block_size,
  797. header_ptr->owner_thread_name[0], header_ptr->owner_thread_name[1],
  798. header_ptr->owner_thread_name[2], header_ptr->owner_thread_name[3]);
  799. }
  800. /* move to next used memory block */
  801. header_ptr = header_ptr->next;
  802. }
  803. }
  804. void memtrace_heap()
  805. {
  806. struct rt_object_information *info;
  807. struct rt_list_node *list;
  808. struct rt_memheap *mh;
  809. struct rt_list_node *node;
  810. info = rt_object_get_information(RT_Object_Class_MemHeap);
  811. list = &info->object_list;
  812. for (node = list->next; node != list; node = node->next)
  813. {
  814. mh = (struct rt_memheap *)rt_list_entry(node, struct rt_object, list);
  815. dump_used_memheap(mh);
  816. }
  817. }
  818. #ifdef RT_USING_FINSH
  819. #include <finsh.h>
  820. MSH_CMD_EXPORT(memtrace_heap, dump memory trace for heap);
  821. #endif /* RT_USING_FINSH */
  822. #endif /* RT_USING_MEMTRACE */
  823. #endif /* RT_USING_MEMHEAP */