mem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2008-7-12 Bernard the first version
  9. * 2010-06-09 Bernard fix the end stub of heap
  10. * fix memory check in rt_realloc function
  11. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  12. * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
  13. * 2017-07-14 armink fix rt_realloc issue when new size is 0
  14. * 2018-10-02 Bernard Add 64bit support
  15. */
  16. /*
  17. * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
  18. * All rights reserved.
  19. *
  20. * Redistribution and use in source and binary forms, with or without modification,
  21. * are permitted provided that the following conditions are met:
  22. *
  23. * 1. Redistributions of source code must retain the above copyright notice,
  24. * this list of conditions and the following disclaimer.
  25. * 2. Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials provided with the distribution.
  28. * 3. The name of the author may not be used to endorse or promote products
  29. * derived from this software without specific prior written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  34. * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  35. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  36. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  37. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  38. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  40. * OF SUCH DAMAGE.
  41. *
  42. * This file is part of the lwIP TCP/IP stack.
  43. *
  44. * Author: Adam Dunkels <adam@sics.se>
  45. * Simon Goldschmidt
  46. *
  47. */
  48. #include <rthw.h>
  49. #include <rtthread.h>
  50. #ifndef RT_USING_MEMHEAP_AS_HEAP
  51. #define RT_MEM_STATS
  52. #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM)
  53. #ifdef RT_USING_HOOK
  54. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  55. static void (*rt_free_hook)(void *ptr);
  56. /**
  57. * @addtogroup Hook
  58. */
  59. /**@{*/
  60. /**
  61. * @brief This function will set a hook function, which will be invoked when a memory
  62. * block is allocated from heap memory.
  63. *
  64. * @param hook the hook function.
  65. */
  66. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  67. {
  68. rt_malloc_hook = hook;
  69. }
  70. /**
  71. * @brief This function will set a hook function, which will be invoked when a memory
  72. * block is released to heap memory.
  73. *
  74. * @param hook the hook function
  75. */
  76. void rt_free_sethook(void (*hook)(void *ptr))
  77. {
  78. rt_free_hook = hook;
  79. }
  80. /**@}*/
  81. #endif /* RT_USING_HOOK */
  82. #define HEAP_MAGIC 0x1ea0
  83. struct heap_mem
  84. {
  85. /* magic and used flag */
  86. rt_uint16_t magic;
  87. rt_uint16_t used;
  88. #ifdef ARCH_CPU_64BIT
  89. rt_uint32_t resv;
  90. #endif /* ARCH_CPU_64BIT */
  91. rt_size_t next, prev;
  92. #ifdef RT_USING_MEMTRACE
  93. #ifdef ARCH_CPU_64BIT
  94. rt_uint8_t thread[8];
  95. #else
  96. rt_uint8_t thread[4]; /* thread name */
  97. #endif /* ARCH_CPU_64BIT */
  98. #endif /* RT_USING_MEMTRACE */
  99. };
  100. /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */
  101. static rt_uint8_t *heap_ptr;
  102. /** the last entry, always unused! */
  103. static struct heap_mem *heap_end;
  104. #ifdef ARCH_CPU_64BIT
  105. #define MIN_SIZE 24
  106. #else
  107. #define MIN_SIZE 12
  108. #endif /* ARCH_CPU_64BIT */
  109. #define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
  110. #define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE)
  111. static struct heap_mem *lfree; /* pointer to the lowest free block */
  112. static struct rt_semaphore heap_sem;
  113. static rt_size_t mem_size_aligned;
  114. #ifdef RT_MEM_STATS
  115. static rt_size_t used_mem, max_mem;
  116. #endif /* RT_MEM_STATS */
  117. #ifdef RT_USING_MEMTRACE
  118. rt_inline void rt_mem_setname(struct heap_mem *mem, const char *name)
  119. {
  120. int index;
  121. for (index = 0; index < sizeof(mem->thread); index ++)
  122. {
  123. if (name[index] == '\0') break;
  124. mem->thread[index] = name[index];
  125. }
  126. for (; index < sizeof(mem->thread); index ++)
  127. {
  128. mem->thread[index] = ' ';
  129. }
  130. }
  131. #endif /* RT_USING_MEMTRACE */
  132. static void plug_holes(struct heap_mem *mem)
  133. {
  134. struct heap_mem *nmem;
  135. struct heap_mem *pmem;
  136. RT_ASSERT((rt_uint8_t *)mem >= heap_ptr);
  137. RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end);
  138. RT_ASSERT(mem->used == 0);
  139. /* plug hole forward */
  140. nmem = (struct heap_mem *)&heap_ptr[mem->next];
  141. if (mem != nmem &&
  142. nmem->used == 0 &&
  143. (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end)
  144. {
  145. /* if mem->next is unused and not end of heap_ptr,
  146. * combine mem and mem->next
  147. */
  148. if (lfree == nmem)
  149. {
  150. lfree = mem;
  151. }
  152. mem->next = nmem->next;
  153. ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr;
  154. }
  155. /* plug hole backward */
  156. pmem = (struct heap_mem *)&heap_ptr[mem->prev];
  157. if (pmem != mem && pmem->used == 0)
  158. {
  159. /* if mem->prev is unused, combine mem and mem->prev */
  160. if (lfree == mem)
  161. {
  162. lfree = pmem;
  163. }
  164. pmem->next = mem->next;
  165. ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr;
  166. }
  167. }
  168. /**
  169. * @brief This function will initialize system heap memory.
  170. *
  171. * @param begin_addr the beginning address of system heap memory.
  172. *
  173. * @param end_addr the end address of system heap memory.
  174. */
  175. void rt_system_heap_init(void *begin_addr, void *end_addr)
  176. {
  177. struct heap_mem *mem;
  178. rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
  179. rt_ubase_t end_align = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
  180. RT_DEBUG_NOT_IN_INTERRUPT;
  181. /* alignment addr */
  182. if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
  183. ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align))
  184. {
  185. /* calculate the aligned memory size */
  186. mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
  187. }
  188. else
  189. {
  190. rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
  191. (rt_ubase_t)begin_addr, (rt_ubase_t)end_addr);
  192. return;
  193. }
  194. /* point to begin address of heap */
  195. heap_ptr = (rt_uint8_t *)begin_align;
  196. RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n",
  197. (rt_ubase_t)heap_ptr, mem_size_aligned));
  198. /* initialize the start of the heap */
  199. mem = (struct heap_mem *)heap_ptr;
  200. mem->magic = HEAP_MAGIC;
  201. mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  202. mem->prev = 0;
  203. mem->used = 0;
  204. #ifdef RT_USING_MEMTRACE
  205. rt_mem_setname(mem, "INIT");
  206. #endif /* RT_USING_MEMTRACE */
  207. /* initialize the end of the heap */
  208. heap_end = (struct heap_mem *)&heap_ptr[mem->next];
  209. heap_end->magic = HEAP_MAGIC;
  210. heap_end->used = 1;
  211. heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM;
  212. heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM;
  213. #ifdef RT_USING_MEMTRACE
  214. rt_mem_setname(heap_end, "INIT");
  215. #endif /* RT_USING_MEMTRACE */
  216. rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_PRIO);
  217. /* initialize the lowest-free pointer to the start of the heap */
  218. lfree = (struct heap_mem *)heap_ptr;
  219. }
  220. /**
  221. * @addtogroup MM
  222. */
  223. /**@{*/
  224. /**
  225. * @brief Allocate a block of memory with a minimum of 'size' bytes.
  226. *
  227. * @param size is the minimum size of the requested block in bytes.
  228. *
  229. * @return the pointer to allocated memory or NULL if no free memory was found.
  230. */
  231. void *rt_malloc(rt_size_t size)
  232. {
  233. rt_size_t ptr, ptr2;
  234. struct heap_mem *mem, *mem2;
  235. if (size == 0)
  236. return RT_NULL;
  237. RT_DEBUG_NOT_IN_INTERRUPT;
  238. if (size != RT_ALIGN(size, RT_ALIGN_SIZE))
  239. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n",
  240. size, RT_ALIGN(size, RT_ALIGN_SIZE)));
  241. else
  242. RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size));
  243. /* alignment size */
  244. size = RT_ALIGN(size, RT_ALIGN_SIZE);
  245. if (size > mem_size_aligned)
  246. {
  247. RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n"));
  248. return RT_NULL;
  249. }
  250. /* every data block must be at least MIN_SIZE_ALIGNED long */
  251. if (size < MIN_SIZE_ALIGNED)
  252. size = MIN_SIZE_ALIGNED;
  253. /* take memory semaphore */
  254. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  255. for (ptr = (rt_uint8_t *)lfree - heap_ptr;
  256. ptr < mem_size_aligned - size;
  257. ptr = ((struct heap_mem *)&heap_ptr[ptr])->next)
  258. {
  259. mem = (struct heap_mem *)&heap_ptr[ptr];
  260. if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
  261. {
  262. /* mem is not used and at least perfect fit is possible:
  263. * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
  264. if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
  265. (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED))
  266. {
  267. /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing
  268. * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
  269. * -> split large block, create empty remainder,
  270. * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
  271. * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
  272. * struct heap_mem would fit in but no data between mem2 and mem2->next
  273. * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
  274. * region that couldn't hold data, but when mem->next gets freed,
  275. * the 2 regions would be combined, resulting in more free memory
  276. */
  277. ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
  278. /* create mem2 struct */
  279. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  280. mem2->magic = HEAP_MAGIC;
  281. mem2->used = 0;
  282. mem2->next = mem->next;
  283. mem2->prev = ptr;
  284. #ifdef RT_USING_MEMTRACE
  285. rt_mem_setname(mem2, " ");
  286. #endif /* RT_USING_MEMTRACE */
  287. /* and insert it between mem and mem->next */
  288. mem->next = ptr2;
  289. mem->used = 1;
  290. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  291. {
  292. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  293. }
  294. #ifdef RT_MEM_STATS
  295. used_mem += (size + SIZEOF_STRUCT_MEM);
  296. if (max_mem < used_mem)
  297. max_mem = used_mem;
  298. #endif /* RT_MEM_STATS */
  299. }
  300. else
  301. {
  302. /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
  303. * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
  304. * take care of this).
  305. * -> near fit or excact fit: do not split, no mem2 creation
  306. * also can't move mem->next directly behind mem, since mem->next
  307. * will always be used at this point!
  308. */
  309. mem->used = 1;
  310. #ifdef RT_MEM_STATS
  311. used_mem += mem->next - ((rt_uint8_t *)mem - heap_ptr);
  312. if (max_mem < used_mem)
  313. max_mem = used_mem;
  314. #endif /* RT_MEM_STATS */
  315. }
  316. /* set memory block magic */
  317. mem->magic = HEAP_MAGIC;
  318. #ifdef RT_USING_MEMTRACE
  319. if (rt_thread_self())
  320. rt_mem_setname(mem, rt_thread_self()->name);
  321. else
  322. rt_mem_setname(mem, "NONE");
  323. #endif /* RT_USING_MEMTRACE */
  324. if (mem == lfree)
  325. {
  326. /* Find next free block after mem and update lowest free pointer */
  327. while (lfree->used && lfree != heap_end)
  328. lfree = (struct heap_mem *)&heap_ptr[lfree->next];
  329. RT_ASSERT(((lfree == heap_end) || (!lfree->used)));
  330. }
  331. rt_sem_release(&heap_sem);
  332. RT_ASSERT((rt_ubase_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_ubase_t)heap_end);
  333. RT_ASSERT((rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
  334. RT_ASSERT((((rt_ubase_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
  335. RT_DEBUG_LOG(RT_DEBUG_MEM,
  336. ("allocate memory at 0x%x, size: %d\n",
  337. (rt_ubase_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM),
  338. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  339. RT_OBJECT_HOOK_CALL(rt_malloc_hook,
  340. (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size));
  341. /* return the memory data except mem struct */
  342. return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
  343. }
  344. }
  345. rt_sem_release(&heap_sem);
  346. return RT_NULL;
  347. }
  348. RTM_EXPORT(rt_malloc);
  349. /**
  350. * @brief This function will change the size of previously allocated memory block.
  351. *
  352. * @param rmem is the pointer to memory allocated by rt_malloc.
  353. *
  354. * @param newsize is the required new size.
  355. *
  356. * @return the changed memory block address.
  357. */
  358. void *rt_realloc(void *rmem, rt_size_t newsize)
  359. {
  360. rt_size_t size;
  361. rt_size_t ptr, ptr2;
  362. struct heap_mem *mem, *mem2;
  363. void *nmem;
  364. RT_DEBUG_NOT_IN_INTERRUPT;
  365. /* alignment size */
  366. newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
  367. if (newsize > mem_size_aligned)
  368. {
  369. RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n"));
  370. return RT_NULL;
  371. }
  372. else if (newsize == 0)
  373. {
  374. rt_free(rmem);
  375. return RT_NULL;
  376. }
  377. /* allocate a new memory block */
  378. if (rmem == RT_NULL)
  379. return rt_malloc(newsize);
  380. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  381. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  382. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  383. {
  384. /* illegal memory */
  385. rt_sem_release(&heap_sem);
  386. return rmem;
  387. }
  388. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  389. ptr = (rt_uint8_t *)mem - heap_ptr;
  390. size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  391. if (size == newsize)
  392. {
  393. /* the size is the same as */
  394. rt_sem_release(&heap_sem);
  395. return rmem;
  396. }
  397. if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
  398. {
  399. /* split memory block */
  400. #ifdef RT_MEM_STATS
  401. used_mem -= (size - newsize);
  402. #endif /* RT_MEM_STATS */
  403. ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
  404. mem2 = (struct heap_mem *)&heap_ptr[ptr2];
  405. mem2->magic = HEAP_MAGIC;
  406. mem2->used = 0;
  407. mem2->next = mem->next;
  408. mem2->prev = ptr;
  409. #ifdef RT_USING_MEMTRACE
  410. rt_mem_setname(mem2, " ");
  411. #endif /* RT_USING_MEMTRACE */
  412. mem->next = ptr2;
  413. if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM)
  414. {
  415. ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2;
  416. }
  417. if (mem2 < lfree)
  418. {
  419. /* the splited struct is now the lowest */
  420. lfree = mem2;
  421. }
  422. plug_holes(mem2);
  423. rt_sem_release(&heap_sem);
  424. return rmem;
  425. }
  426. rt_sem_release(&heap_sem);
  427. /* expand memory */
  428. nmem = rt_malloc(newsize);
  429. if (nmem != RT_NULL) /* check memory */
  430. {
  431. rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
  432. rt_free(rmem);
  433. }
  434. return nmem;
  435. }
  436. RTM_EXPORT(rt_realloc);
  437. /**
  438. * @brief This function will contiguously allocate enough space for count objects
  439. * that are size bytes of memory each and returns a pointer to the allocated
  440. * memory.
  441. *
  442. * @note The allocated memory is filled with bytes of value zero.
  443. *
  444. * @param count is the number of objects to allocate.
  445. *
  446. * @param size is the size of one object to allocate.
  447. *
  448. * @return pointer to allocated memory / NULL pointer if there is an error.
  449. */
  450. void *rt_calloc(rt_size_t count, rt_size_t size)
  451. {
  452. void *p;
  453. /* allocate 'count' objects of size 'size' */
  454. p = rt_malloc(count * size);
  455. /* zero the memory */
  456. if (p)
  457. rt_memset(p, 0, count * size);
  458. return p;
  459. }
  460. RTM_EXPORT(rt_calloc);
  461. /**
  462. * @brief This function will release the previously allocated memory block by
  463. * rt_malloc. The released memory block is taken back to system heap.
  464. *
  465. * @param rmem the address of memory which will be released.
  466. */
  467. void rt_free(void *rmem)
  468. {
  469. struct heap_mem *mem;
  470. if (rmem == RT_NULL)
  471. return;
  472. RT_DEBUG_NOT_IN_INTERRUPT;
  473. RT_ASSERT((((rt_ubase_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
  474. RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr &&
  475. (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end);
  476. RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem));
  477. if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr ||
  478. (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end)
  479. {
  480. RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n"));
  481. return;
  482. }
  483. /* Get the corresponding struct heap_mem ... */
  484. mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
  485. RT_DEBUG_LOG(RT_DEBUG_MEM,
  486. ("release memory 0x%x, size: %d\n",
  487. (rt_ubase_t)rmem,
  488. (rt_ubase_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr))));
  489. /* protect the heap from concurrent access */
  490. rt_sem_take(&heap_sem, RT_WAITING_FOREVER);
  491. /* ... which has to be in a used state ... */
  492. if (!mem->used || mem->magic != HEAP_MAGIC)
  493. {
  494. rt_kprintf("to free a bad data block:\n");
  495. rt_kprintf("mem: 0x%08x, used flag: %d, magic code: 0x%04x\n", mem, mem->used, mem->magic);
  496. }
  497. RT_ASSERT(mem->used);
  498. RT_ASSERT(mem->magic == HEAP_MAGIC);
  499. /* ... and is now unused. */
  500. mem->used = 0;
  501. mem->magic = HEAP_MAGIC;
  502. #ifdef RT_USING_MEMTRACE
  503. rt_mem_setname(mem, " ");
  504. #endif /* RT_USING_MEMTRACE */
  505. if (mem < lfree)
  506. {
  507. /* the newly freed struct is now the lowest */
  508. lfree = mem;
  509. }
  510. #ifdef RT_MEM_STATS
  511. used_mem -= (mem->next - ((rt_uint8_t *)mem - heap_ptr));
  512. #endif /* RT_MEM_STATS */
  513. /* finally, see if prev or next are free also */
  514. plug_holes(mem);
  515. rt_sem_release(&heap_sem);
  516. }
  517. RTM_EXPORT(rt_free);
  518. #ifdef RT_MEM_STATS
  519. /**
  520. * @brief This function will caculate the total memory, the used memory, and
  521. * the max used memory.
  522. *
  523. * @param total is a pointer to get the total size of the memory.
  524. *
  525. * @param used is a pointer to get the size of memory used.
  526. *
  527. * @param max_used is a pointer to get the maximum memory used.
  528. */
  529. void rt_memory_info(rt_uint32_t *total,
  530. rt_uint32_t *used,
  531. rt_uint32_t *max_used)
  532. {
  533. if (total != RT_NULL)
  534. *total = mem_size_aligned;
  535. if (used != RT_NULL)
  536. *used = used_mem;
  537. if (max_used != RT_NULL)
  538. *max_used = max_mem;
  539. }
  540. #ifdef RT_USING_FINSH
  541. #include <finsh.h>
  542. void list_mem(void)
  543. {
  544. rt_kprintf("total memory: %d\n", mem_size_aligned);
  545. rt_kprintf("used memory : %d\n", used_mem);
  546. rt_kprintf("maximum allocated memory: %d\n", max_mem);
  547. }
  548. FINSH_FUNCTION_EXPORT(list_mem, list memory usage information)
  549. #ifdef RT_USING_MEMTRACE
  550. int memcheck(void)
  551. {
  552. int position;
  553. rt_ubase_t level;
  554. struct heap_mem *mem;
  555. level = rt_hw_interrupt_disable();
  556. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  557. {
  558. position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  559. if (position < 0) goto __exit;
  560. if (position > (int)mem_size_aligned) goto __exit;
  561. if (mem->magic != HEAP_MAGIC) goto __exit;
  562. if (mem->used != 0 && mem->used != 1) goto __exit;
  563. }
  564. rt_hw_interrupt_enable(level);
  565. return 0;
  566. __exit:
  567. rt_kprintf("Memory block wrong:\n");
  568. rt_kprintf("address: 0x%08x\n", mem);
  569. rt_kprintf(" magic: 0x%04x\n", mem->magic);
  570. rt_kprintf(" used: %d\n", mem->used);
  571. rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
  572. rt_hw_interrupt_enable(level);
  573. return 0;
  574. }
  575. MSH_CMD_EXPORT(memcheck, check memory data);
  576. int memtrace(int argc, char **argv)
  577. {
  578. struct heap_mem *mem;
  579. list_mem();
  580. rt_kprintf("\nmemory heap address:\n");
  581. rt_kprintf("heap_ptr: 0x%08x\n", heap_ptr);
  582. rt_kprintf("lfree : 0x%08x\n", lfree);
  583. rt_kprintf("heap_end: 0x%08x\n", heap_end);
  584. rt_kprintf("\n--memory item information --\n");
  585. for (mem = (struct heap_mem *)heap_ptr; mem != heap_end; mem = (struct heap_mem *)&heap_ptr[mem->next])
  586. {
  587. int position = (rt_ubase_t)mem - (rt_ubase_t)heap_ptr;
  588. int size;
  589. rt_kprintf("[0x%08x - ", mem);
  590. size = mem->next - position - SIZEOF_STRUCT_MEM;
  591. if (size < 1024)
  592. rt_kprintf("%5d", size);
  593. else if (size < 1024 * 1024)
  594. rt_kprintf("%4dK", size / 1024);
  595. else
  596. rt_kprintf("%4dM", size / (1024 * 1024));
  597. rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
  598. if (mem->magic != HEAP_MAGIC)
  599. rt_kprintf(": ***\n");
  600. else
  601. rt_kprintf("\n");
  602. }
  603. return 0;
  604. }
  605. MSH_CMD_EXPORT(memtrace, dump memory trace information);
  606. #endif /* RT_USING_MEMTRACE */
  607. #endif /* RT_USING_FINSH */
  608. #endif /* defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM) */
  609. /**@}*/
  610. #endif /* RT_MEM_STATS */
  611. #endif /* RT_USING_MEMHEAP_AS_HEAP */