mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2008-7-12 Bernard the first version
  9. * 2010-06-09 Bernard fix the end stub of heap
  10. * fix memory check in rt_realloc function
  11. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  12. * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
  13. * 2017-07-14 armink fix rt_realloc issue when new size is 0
  14. * 2018-10-02 Bernard Add 64bit support
  15. */
  16. /*
  17. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  18. * All rights reserved.
  19. *
  20. * Redistribution and use in source and binary forms, with or without modification,
  21. * are permitted provided that the following conditions are met:
  22. *
  23. * 1. Redistributions of source code must retain the above copyright notice,
  24. * this list of conditions and the following disclaimer.
  25. * 2. Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials provided with the distribution.
  28. * 3. The name of the author may not be used to endorse or promote products
  29. * derived from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  34. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  35. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  36. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  37. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  38. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  40. * OF SUCH DAMAGE.
  41. *
  42. * This file is part of the lwIP TCP/IP stack.
  43. *
  44. * Author: Adam Dunkels <adam@sics.se>
  45. * Simon Goldschmidt
  46. *
  47. */
  48. #include <rthw.h>
  49. #include <rtthread.h>
  50. #ifndef RT_USING_MEMHEAP_AS_HEAP
  51. /* #define RT_MEM_DEBUG */
  52. #define RT_MEM_STATS
  53. #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
  54. #ifdef RT_USING_HOOK
  55. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  56. static void (*rt_free_hook)(void *ptr);
  57. /**
  58. * @addtogroup Hook
  59. */
  60. /**@{*/
  61. /**
  62. * This function will set a hook function, which will be invoked when a memory
  63. * block is allocated from heap memory.
  64. *
  65. * @param hook the hook function
  66. */
  67. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  68. {
  69. rt_malloc_hook = hook;
  70. }
  71. /**
  72. * This function will set a hook function, which will be invoked when a memory
  73. * block is released to heap memory.
  74. *
  75. * @param hook the hook function
  76. */
  77. void rt_free_sethook(void (*hook)(void *ptr))
  78. {
  79. rt_free_hook = hook;
  80. }
  81. /**@}*/
  82. #endif
  83. #define HEAP_MAGIC 0x1ea0
  84. struct heap_mem
  85. {
  86. /* magic and used flag */
  87. rt_uint16_t magic;
  88. rt_uint16_t used;
  89. #ifdef ARCH_CPU_64BIT
  90. rt_uint32_t resv;
  91. #endif
  92. rt_size_t next, prev;
  93. #ifdef RT_USING_MEMTRACE
  94. #ifdef ARCH_CPU_64BIT
  95. rt_uint8_t thread[8];
  96. #else
  97. rt_uint8_t thread[4]; /* thread name */
  98. #endif
  99. #endif
  100. };
  101. /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
  102. static rt_uint8_t *heap_ptr;
  103. /** the last entry, always unused! */
  104. static struct heap_mem *heap_end;
  105. #ifdef ARCH_CPU_64BIT
  106. #define MIN_SIZE 24
  107. #else
  108. #define MIN_SIZE 12
  109. #endif
  110. #define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
  111. #define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
  112. static struct heap_mem *lfree; /* pointer to the lowest free block */
  113. static struct rt_semaphore heap_sem;
  114. static rt_size_t mem_size_aligned;
  115. #ifdef RT_MEM_STATS
  116. static rt_size_t used_mem, max_mem;
  117. #endif
  118. #ifdef RT_USING_MEMTRACE
  119. rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
  120. {
  121. int index;
  122. for (index = 0; index < sizeof(mem->thread); index ++)
  123. {
  124. if (name[index] == '\0') break;
  125. mem->thread[index] = name[index];
  126. }
  127. for (; index < sizeof(mem->thread); index ++)
  128. {
  129. mem->thread[index] = ' ';
  130. }
  131. }
  132. #endif
  133. static void plug_holes(struct heap_mem *mem)
  134. {
  135. struct heap_mem *nmem;
  136. struct heap_mem *pmem;
  137. RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
  138. RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
  139. RT_ASSERT(mem->used == 0);
  140. /* plug hole forward */
  141. nmem = (struct heap_mem *)&heap_ptr[mem->next];
  142. if (mem != nmem &&
  143. nmem->used == 0 &&
  144. (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
  145. {
  146. /* if mem->next is unused and not end of heap_ptr,
  147. * combine mem and mem->next
  148. */
  149. if (lfree == nmem)
  150. {
  151. lfree = mem;
  152. }
  153. mem->next = nmem->next;
  154. ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
  155. }
  156. /* plug hole backward */
  157. pmem = (struct heap_mem *)&heap_ptr[mem->prev];
  158. if (pmem != mem && pmem->used == 0)
  159. {
  160. /* if mem->prev is unused, combine mem and mem->prev */
  161. if (lfree == mem)
  162. {
  163. lfree = pmem;
  164. }
  165. pmem->next = mem->next;
  166. ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
  167. }
  168. }
  169. /**
  170. * @ingroup SystemInit
  171. *
  172. * This function will initialize system heap memory.
  173. *
  174. * @param begin_addr the beginning address of system heap memory.
  175. * @param end_addr the end address of system heap memory.
  176. */
  177. void rt_system_heap_init(void *begin_addr, void *end_addr)
  178. {
  179. struct heap_mem *mem;
  180. rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
  181. rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
  182. RT_DEBUG_NOT_IN_INTERRUPT;
  183. /* alignment addr */
  184. if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
  185. ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
  186. {
  187. /* calculate the aligned memory size */
  188. mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
  189. }
  190. else
  191. {
  192. rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
  193. (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
  194. return;
  195. }
  196. /* point to begin address of heap */
  197. heap_ptr = (rt_uint8_t *)begin_align;
  198. RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
  199. (rt_ubase_t)heap_ptr, mem_size_aligned));
  200. /* initialize the start of the heap */
  201. mem = (struct heap_mem *)heap_ptr;
  202. mem->magic = HEAP_MAGIC;
  203. mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  204. mem->prev = 0;
  205. mem->used = 0;
  206. #ifdef RT_USING_MEMTRACE
  207. rt_mem_setname(mem, "INIT");
  208. #endif
  209. /* initialize the end of the heap */
  210. heap_end = (struct heap_mem *)&heap_ptr[mem->next];
  211. heap_end->magic = HEAP_MAGIC;
  212. heap_end->used = 1;
  213. heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  214. heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM;
  215. #ifdef RT_USING_MEMTRACE
  216. rt_mem_setname(heap_end, "INIT");
  217. #endif
  218. rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO);
  219. /* initialize the lowest-free pointer to the start of the heap */
  220. lfree = (struct heap_mem *)heap_ptr;
  221. }
  222. /**
  223. * @addtogroup MM
  224. */
  225. /**@{*/
  226. /**
  227. * Allocate a block of memory with a minimum of 'size' bytes.
  228. *
  229. * @param size is the minimum size of the requested block in bytes.
  230. *
  231. * @return pointer to allocated memory or NULL if no free memory was found.
  232. */
  233. void *rt_malloc(rt_size_t size)
  234. {
  235. rt_size_t ptr, ptr2;
  236. struct heap_mem *mem, *mem2;
  237. if (size == 0)
  238. return RT_NULL;
  239. RT_DEBUG_NOT_IN_INTERRUPT;
  240. if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
  241. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
  242. size, RT_ALIGN(size, RT_ALIGN_SIZE)));
  243. else
  244. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
  245. /* alignment size */
  246. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  247. if (size > mem_size_aligned)
  248. {
  249. RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
  250. return RT_NULL;
  251. }
  252. /* every data block must be at least MIN_SIZE_ALIGNED long */
  253. if (size < MIN_SIZE_ALIGNED)
  254. size = MIN_SIZE_ALIGNED;
  255. /* take memory semaphore */
  256. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  257. for (ptr = (rt_uint8_t *)lfree - heap_ptr;
  258. ptr < mem_size_aligned - size;
  259. ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
  260. {
  261. mem = (struct heap_mem *)&heap_ptr[ptr];
  262. if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
  263. {
  264. /* mem is not used and at least perfect fit is possible:
  265. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  266. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
  267. (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
  268. {
  269. /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
  270. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  271. * -> split large block, create empty remainder,
  272. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  273. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  274. * struct heap_mem would fit in but no data between mem2 and mem2->next
  275. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  276. * region that couldn't hold data, but when mem->next gets freed,
  277. * the 2 regions would be combined, resulting in more free memory
  278. */
  279. ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
  280. /* create mem2 struct */
  281. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  282. mem2->magic = HEAP_MAGIC;
  283. mem2->used = 0;
  284. mem2->next = mem->next;
  285. mem2->prev = ptr;
  286. #ifdef RT_USING_MEMTRACE
  287. rt_mem_setname(mem2, " ");
  288. #endif
  289. /* and insert it between mem and mem->next */
  290. mem->next = ptr2;
  291. mem->used = 1;
  292. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  293. {
  294. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  295. }
  296. #ifdef RT_MEM_STATS
  297. used_mem += (size + SIZEOF_STRUCT_MEM);
  298. if (max_mem < used_mem)
  299. max_mem = used_mem;
  300. #endif
  301. }
  302. else
  303. {
  304. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  305. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  306. * take care of this).
  307. * -> near fit or excact fit: do not split, no mem2 creation
  308. * also can't move mem->next directly behind mem, since mem->next
  309. * will always be used at this point!
  310. */
  311. mem->used = 1;
  312. #ifdef RT_MEM_STATS
  313. used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
  314. if (max_mem < used_mem)
  315. max_mem = used_mem;
  316. #endif
  317. }
  318. /* set memory block magic */
  319. mem->magic = HEAP_MAGIC;
  320. #ifdef RT_USING_MEMTRACE
  321. if (rt_thread_self())
  322. rt_mem_setname(mem, rt_thread_self()->name);
  323. else
  324. rt_mem_setname(mem, "NONE");
  325. #endif
  326. if (mem == lfree)
  327. {
  328. /* Find next free block after mem and update lowest free pointer */
  329. while (lfree->used && lfree != heap_end)
  330. lfree = (struct heap_mem *)&heap_ptr[lfree->next];
  331. RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
  332. }
  333. rt_sem_release(&heap_sem);
  334. RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
  335. RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
  336. RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
  337. RT_DEBUG_LOG(RT_DEBUG_MEM,
  338. ("allocate memory at 0x%x, size: %d\n",
  339. (rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
  340. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  341. RT_OBJECT_HOOK_CALL(rt_malloc_hook,
  342. (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
  343. /* return the memory data except mem struct */
  344. return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
  345. }
  346. }
  347. rt_sem_release(&heap_sem);
  348. return RT_NULL;
  349. }
  350. RTM_EXPORT(rt_malloc);
  351. /**
  352. * This function will change the previously allocated memory block.
  353. *
  354. * @param rmem pointer to memory allocated by rt_malloc
  355. * @param newsize the required new size
  356. *
  357. * @return the changed memory block address
  358. */
  359. void *rt_realloc(void *rmem, rt_size_t newsize)
  360. {
  361. rt_size_t size;
  362. rt_size_t ptr, ptr2;
  363. struct heap_mem *mem, *mem2;
  364. void *nmem;
  365. RT_DEBUG_NOT_IN_INTERRUPT;
  366. /* alignment size */
  367. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  368. if (newsize > mem_size_aligned)
  369. {
  370. RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
  371. return RT_NULL;
  372. }
  373. else if (newsize == 0)
  374. {
  375. rt_free(rmem);
  376. return RT_NULL;
  377. }
  378. /* allocate a new memory block */
  379. if (rmem == RT_NULL)
  380. return rt_malloc(newsize);
  381. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  382. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  383. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  384. {
  385. /* illegal memory */
  386. rt_sem_release(&heap_sem);
  387. return rmem;
  388. }
  389. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  390. ptr = (rt_uint8_t *)mem - heap_ptr;
  391. size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  392. if (size == newsize)
  393. {
  394. /* the size is the same as */
  395. rt_sem_release(&heap_sem);
  396. return rmem;
  397. }
  398. if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
  399. {
  400. /* split memory block */
  401. #ifdef RT_MEM_STATS
  402. used_mem -= (size - newsize);
  403. #endif
  404. ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
  405. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  406. mem2->magic = HEAP_MAGIC;
  407. mem2->used = 0;
  408. mem2->next = mem->next;
  409. mem2->prev = ptr;
  410. #ifdef RT_USING_MEMTRACE
  411. rt_mem_setname(mem2, " ");
  412. #endif
  413. mem->next = ptr2;
  414. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  415. {
  416. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  417. }
  418. plug_holes(mem2);
  419. rt_sem_release(&heap_sem);
  420. return rmem;
  421. }
  422. rt_sem_release(&heap_sem);
  423. /* expand memory */
  424. nmem = rt_malloc(newsize);
  425. if (nmem != RT_NULL) /* check memory */
  426. {
  427. rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
  428. rt_free(rmem);
  429. }
  430. return nmem;
  431. }
  432. RTM_EXPORT(rt_realloc);
  433. /**
  434. * This function will contiguously allocate enough space for count objects
  435. * that are size bytes of memory each and returns a pointer to the allocated
  436. * memory.
  437. *
  438. * The allocated memory is filled with bytes of value zero.
  439. *
  440. * @param count number of objects to allocate
  441. * @param size size of the objects to allocate
  442. *
  443. * @return pointer to allocated memory / NULL pointer if there is an error
  444. */
  445. void *rt_calloc(rt_size_t count, rt_size_t size)
  446. {
  447. void *p;
  448. /* allocate 'count' objects of size 'size' */
  449. p = rt_malloc(count * size);
  450. /* zero the memory */
  451. if (p)
  452. rt_memset(p, 0, count * size);
  453. return p;
  454. }
  455. RTM_EXPORT(rt_calloc);
  456. /**
  457. * This function will release the previously allocated memory block by
  458. * rt_malloc. The released memory block is taken back to system heap.
  459. *
  460. * @param rmem the address of memory which will be released
  461. */
  462. void rt_free(void *rmem)
  463. {
  464. struct heap_mem *mem;
  465. if (rmem == RT_NULL)
  466. return;
  467. RT_DEBUG_NOT_IN_INTERRUPT;
  468. RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
  469. RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
  470. (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
  471. RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
  472. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  473. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  474. {
  475. RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n"));
  476. return;
  477. }
  478. /* Get the corresponding struct heap_mem ... */
  479. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  480. RT_DEBUG_LOG(RT_DEBUG_MEM,
  481. ("release memory 0x%x, size: %d\n",
  482. (rt_ubase_t)rmem,
  483. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  484. /* protect the heap from concurrent access */
  485. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  486. /* ... which has to be in a used state ... */
  487. if (!mem->used || mem->magic != HEAP_MAGIC)
  488. {
  489. rt_kprintf("to free a bad data block:\n");
  490. rt_kprintf("mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
  491. }
  492. RT_ASSERT(mem->used);
  493. RT_ASSERT(mem->magic == HEAP_MAGIC);
  494. /* ... and is now unused. */
  495. mem->used = 0;
  496. mem->magic = HEAP_MAGIC;
  497. #ifdef RT_USING_MEMTRACE
  498. rt_mem_setname(mem, " ");
  499. #endif
  500. if (mem < lfree)
  501. {
  502. /* the newly freed struct is now the lowest */
  503. lfree = mem;
  504. }
  505. #ifdef RT_MEM_STATS
  506. used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
  507. #endif
  508. /* finally, see if prev or next are free also */
  509. plug_holes(mem);
  510. rt_sem_release(&heap_sem);
  511. }
  512. RTM_EXPORT(rt_free);
  513. #ifdef RT_MEM_STATS
  514. void rt_memory_info(rt_uint32_t *total,
  515. rt_uint32_t *used,
  516. rt_uint32_t *max_used)
  517. {
  518. if (total != RT_NULL)
  519. *total = mem_size_aligned;
  520. if (used != RT_NULL)
  521. *used = used_mem;
  522. if (max_used != RT_NULL)
  523. *max_used = max_mem;
  524. }
  525. #ifdef RT_USING_FINSH
  526. #include <finsh.h>
  527. void list_mem(void)
  528. {
  529. rt_kprintf("total memory: %d\n", mem_size_aligned);
  530. rt_kprintf("used memory : %d\n", used_mem);
  531. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  532. }
  533. FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
  534. #ifdef RT_USING_MEMTRACE
  535. int memcheck(void)
  536. {
  537. int position;
  538. rt_ubase_t level;
  539. struct heap_mem *mem;
  540. level = rt_hw_interrupt_disable();
  541. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  542. {
  543. position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  544. if (position < 0) goto __exit;
  545. if (position > (int)mem_size_aligned) goto __exit;
  546. if (mem->magic != HEAP_MAGIC) goto __exit;
  547. if (mem->used != 0 && mem->used != 1) goto __exit;
  548. }
  549. rt_hw_interrupt_enable(level);
  550. return 0;
  551. __exit:
  552. rt_kprintf("Memory block wrong:\n");
  553. rt_kprintf("address: 0x%08x\n", mem);
  554. rt_kprintf(" magic: 0x%04x\n", mem->magic);
  555. rt_kprintf(" used: %d\n", mem->used);
  556. rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
  557. rt_hw_interrupt_enable(level);
  558. return 0;
  559. }
  560. MSH_CMD_EXPORT(memcheck, check memory data);
  561. int memtrace(int argc, char **argv)
  562. {
  563. struct heap_mem *mem;
  564. list_mem();
  565. rt_kprintf("\nmemory heap address:\n");
  566. rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
  567. rt_kprintf("lfree : 0x%08x\n", lfree);
  568. rt_kprintf("heap_end: 0x%08x\n", heap_end);
  569. rt_kprintf("\n--memory item information --\n");
  570. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  571. {
  572. int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  573. int size;
  574. rt_kprintf("[0x%08x - ", mem);
  575. size = mem->next - position - SIZEOF_STRUCT_MEM;
  576. if (size < 1024)
  577. rt_kprintf("%5d", size);
  578. else if (size < 1024 * 1024)
  579. rt_kprintf("%4dK", size / 1024);
  580. else
  581. rt_kprintf("%4dM", size / (1024 * 1024));
  582. rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
  583. if (mem->magic != HEAP_MAGIC)
  584. rt_kprintf(": ***\n");
  585. else
  586. rt_kprintf("\n");
  587. }
  588. return 0;
  589. }
  590. MSH_CMD_EXPORT(memtrace, dump memory trace information);
  591. #endif /* end of RT_USING_MEMTRACE */
  592. #endif /* end of RT_USING_FINSH */
  593. #endif
  594. /**@}*/
  595. #endif /* end of RT_USING_HEAP */
  596. #endif /* end of RT_USING_MEMHEAP_AS_HEAP */