slab.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * File : slab.c
  3. * This file is part of RT-Thread RTOS
  4. * COPYRIGHT (C) 2008 - 2009, RT-Thread Development Team
  5. *
  6. * The license and distribution terms for this file may be
  7. * found in the file LICENSE in this distribution or at
  8. * http://www.rt-thread.org/license/LICENSE
  9. *
  10. * Change Logs:
  11. * Date Author Notes
  12. * 2008-07-12 Bernard the first version
  13. * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
  14. */
  15. /*
  16. * KERN_SLABALLOC.C - Kernel SLAB memory allocator
  17. *
  18. * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
  19. *
  20. * This code is derived from software contributed to The DragonFly Project
  21. * by Matthew Dillon <dillon@backplane.com>
  22. *
  23. * Redistribution and use in source and binary forms, with or without
  24. * modification, are permitted provided that the following conditions
  25. * are met:
  26. *
  27. * 1. Redistributions of source code must retain the above copyright
  28. * notice, this list of conditions and the following disclaimer.
  29. * 2. Redistributions in binary form must reproduce the above copyright
  30. * notice, this list of conditions and the following disclaimer in
  31. * the documentation and/or other materials provided with the
  32. * distribution.
  33. * 3. Neither the name of The DragonFly Project nor the names of its
  34. * contributors may be used to endorse or promote products derived
  35. * from this software without specific, prior written permission.
  36. *
  37. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  40. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  41. * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  42. * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
  43. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  44. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  45. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  46. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  47. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  48. * SUCH DAMAGE.
  49. *
  50. */
  51. #include <rthw.h>
  52. #include <rtthread.h>
  53. /* #define RT_SLAB_DEBUG */
  54. #if defined (RT_USING_HEAP) && defined (RT_USING_SLAB)
  55. #ifdef RT_USING_HOOK
  56. static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
  57. static void (*rt_free_hook)(void *ptr);
  58. /**
  59. * @addtogroup Hook
  60. */
  61. /*@{*/
  62. /**
  63. * This function will set a hook function, which will be invoked when a memory
  64. * block is allocated from heap memory.
  65. *
  66. * @param hook the hook function
  67. */
  68. void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
  69. {
  70. rt_malloc_hook = hook;
  71. }
  72. /**
  73. * This function will set a hook function, which will be invoked when a memory
  74. * block is released to heap memory.
  75. *
  76. * @param hook the hook function
  77. */
  78. void rt_free_sethook(void (*hook)(void *ptr))
  79. {
  80. rt_free_hook = hook;
  81. }
  82. /*@}*/
  83. #endif
  84. /*
  85. * slab allocator implementation
  86. *
  87. * A slab allocator reserves a ZONE for each chunk size, then lays the
  88. * chunks out in an array within the zone. Allocation and deallocation
  89. * is nearly instantanious, and fragmentation/overhead losses are limited
  90. * to a fixed worst-case amount.
  91. *
  92. * The downside of this slab implementation is in the chunk size
  93. * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
  94. * In a kernel implementation all this memory will be physical so
  95. * the zone size is adjusted downward on machines with less physical
  96. * memory. The upside is that overhead is bounded... this is the *worst*
  97. * case overhead.
  98. *
  99. * Slab management is done on a per-cpu basis and no locking or mutexes
  100. * are required, only a critical section. When one cpu frees memory
  101. * belonging to another cpu's slab manager an asynchronous IPI message
  102. * will be queued to execute the operation. In addition, both the
  103. * high level slab allocator and the low level zone allocator optimize
  104. * M_ZERO requests, and the slab allocator does not have to pre initialize
  105. * the linked list of chunks.
  106. *
  107. * XXX Balancing is needed between cpus. Balance will be handled through
  108. * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
  109. *
  110. * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
  111. * the new zone should be restricted to M_USE_RESERVE requests only.
  112. *
  113. * Alloc Size Chunking Number of zones
  114. * 0-127 8 16
  115. * 128-255 16 8
  116. * 256-511 32 8
  117. * 512-1023 64 8
  118. * 1024-2047 128 8
  119. * 2048-4095 256 8
  120. * 4096-8191 512 8
  121. * 8192-16383 1024 8
  122. * 16384-32767 2048 8
  123. * (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
  124. *
  125. * Allocations >= zone_limit go directly to kmem.
  126. *
  127. * API REQUIREMENTS AND SIDE EFFECTS
  128. *
  129. * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
  130. * have remained compatible with the following API requirements:
  131. *
  132. * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
  133. * + all power-of-2 sized allocations are power-of-2 aligned (twe)
  134. * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
  135. * + ability to allocate arbitrarily large chunks of memory
  136. */
  137. /*
  138. * Chunk structure for free elements
  139. */
  140. typedef struct slab_chunk
  141. {
  142. struct slab_chunk *c_next;
  143. } slab_chunk;
  144. /*
  145. * The IN-BAND zone header is placed at the beginning of each zone.
  146. */
  147. typedef struct slab_zone {
  148. rt_int32_t z_magic; /* magic number for sanity check */
  149. rt_int32_t z_nfree; /* total free chunks / ualloc space in zone */
  150. rt_int32_t z_nmax; /* maximum free chunks */
  151. struct slab_zone *z_next; /* zoneary[] link if z_nfree non-zero */
  152. rt_uint8_t *z_baseptr; /* pointer to start of chunk array */
  153. rt_int32_t z_uindex; /* current initial allocation index */
  154. rt_int32_t z_chunksize; /* chunk size for validation */
  155. rt_int32_t z_zoneindex; /* zone index */
  156. slab_chunk *z_freechunk; /* free chunk list */
  157. } slab_zone;
  158. #define ZALLOC_SLAB_MAGIC 0x51ab51ab
  159. #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
  160. #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
  161. #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
  162. #define NZONES 72 /* number of zones */
  163. #define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
  164. static slab_zone *zone_array[NZONES]; /* linked list of zones NFree > 0 */
  165. static slab_zone *zone_free; /* whole zones that have become free */
  166. static int zone_free_cnt;
  167. static int zone_size;
  168. static int zone_limit;
  169. static int zone_page_cnt;
  170. #ifdef RT_MEM_STATS
  171. /* some statistical variable */
  172. static rt_uint32_t rt_mem_allocated = 0;
  173. static rt_uint32_t rt_mem_zone = 0;
  174. static rt_uint32_t rt_mem_page_allocated = 0;
  175. #endif
  176. /*
  177. * Misc constants. Note that allocations that are exact multiples of
  178. * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
  179. */
  180. #define MIN_CHUNK_SIZE 8 /* in bytes */
  181. #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
  182. /*
  183. * Array of descriptors that describe the contents of each page
  184. */
  185. #define PAGE_TYPE_FREE 0x00
  186. #define PAGE_TYPE_SMALL 0x01
  187. #define PAGE_TYPE_LARGE 0x02
  188. struct memusage {
  189. rt_uint32_t type:2 ; /* page type */
  190. rt_uint32_t size:30; /* pages allocated or offset from zone */
  191. };
  192. static struct memusage *memusage = RT_NULL;
  193. #define btokup(addr) (&memusage[((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS])
  194. static rt_uint32_t heap_start, heap_end;
  195. /* page allocator */
  196. struct rt_page_head
  197. {
  198. struct rt_page_head *next; /* next valid page */
  199. rt_size_t page; /* number of page */
  200. /* dummy */
  201. char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_page_head*) + sizeof (rt_size_t))];
  202. };
  203. static struct rt_page_head *rt_page_list;
  204. static void *rt_page_alloc(rt_size_t npages)
  205. {
  206. struct rt_page_head *b, *n;
  207. struct rt_page_head **prev;
  208. RT_ASSERT(npages != 0);
  209. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  210. {
  211. if (b->page > npages)
  212. {
  213. /* splite pages */
  214. n = b + npages;
  215. n->next = b->next;
  216. n->page = b->page - npages;
  217. *prev = n;
  218. break;
  219. }
  220. if (b->page == npages)
  221. {
  222. /* this node fit, remove this node */
  223. *prev = b->next;
  224. break;
  225. }
  226. }
  227. return b;
  228. }
  229. static void rt_page_free(void *addr, rt_size_t npages)
  230. {
  231. struct rt_page_head *b, *n;
  232. struct rt_page_head **prev;
  233. RT_ASSERT(addr != RT_NULL);
  234. RT_ASSERT((rt_uint32_t)addr % RT_MM_PAGE_SIZE == 0);
  235. RT_ASSERT(npages != 0);
  236. n = (struct rt_page_head *)addr;
  237. for (prev = &rt_page_list; (b = *prev) != RT_NULL; prev = &(b->next))
  238. {
  239. RT_ASSERT(b->page > 0);
  240. RT_ASSERT(b > n || b + b->page <= n);
  241. if (b + b->page == n)
  242. {
  243. if (b + (b->page += npages) == b->next)
  244. {
  245. b->page += b->next->page;
  246. b->next = b->next->next;
  247. }
  248. return;
  249. }
  250. if (b == n + npages)
  251. {
  252. n->page = b->page + npages;
  253. n->next = b->next;
  254. *prev = n;
  255. return;
  256. }
  257. if (b > n + npages) break;
  258. }
  259. n->page = npages;
  260. n->next = b;
  261. *prev = n;
  262. }
  263. /*
  264. * Initialize the page allocator
  265. */
  266. static void rt_page_init(void* addr, rt_size_t npages)
  267. {
  268. RT_ASSERT(addr != RT_NULL);
  269. RT_ASSERT(npages != 0);
  270. rt_page_list = RT_NULL;
  271. rt_page_free(addr, npages);
  272. }
  273. /**
  274. * @ingroup SystemInit
  275. *
  276. * This function will init system heap
  277. *
  278. * @param begin_addr the beginning address of system page
  279. * @param end_addr the end address of system page
  280. *
  281. */
  282. void rt_system_heap_init(void *begin_addr, void* end_addr)
  283. {
  284. rt_uint32_t limsize, npages;
  285. /* align begin and end addr to page */
  286. heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE);
  287. heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE);
  288. if(heap_start >= heap_end) {
  289. rt_kprintf("rt_system_heap_init, error begin address 0x%x, and end address 0x%x\n", (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr);
  290. return;
  291. }
  292. limsize = heap_end - heap_start;
  293. npages = limsize / RT_MM_PAGE_SIZE;
  294. #ifdef RT_SLAB_DEBUG
  295. rt_kprintf("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n", heap_start, heap_end, limsize, npages);
  296. #endif
  297. /* init pages */
  298. rt_page_init((void*)heap_start, npages);
  299. /* calculate zone size */
  300. zone_size = ZALLOC_MIN_ZONE_SIZE;
  301. while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize/1024))
  302. zone_size <<= 1;
  303. zone_limit = zone_size / 4;
  304. if (zone_limit > ZALLOC_ZONE_LIMIT) zone_limit = ZALLOC_ZONE_LIMIT;
  305. zone_page_cnt = zone_size / RT_MM_PAGE_SIZE;
  306. #ifdef RT_SLAB_DEBUG
  307. rt_kprintf("zone size 0x%x, zone page count 0x%x\n", zone_size, zone_page_cnt);
  308. #endif
  309. /* allocate memusage array */
  310. limsize = npages * sizeof(struct memusage);
  311. limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
  312. memusage = rt_page_alloc(limsize/RT_MM_PAGE_SIZE);
  313. #ifdef RT_SLAB_DEBUG
  314. rt_kprintf("memusage 0x%x, size 0x%x\n", (rt_uint32_t)memusage, limsize);
  315. #endif
  316. }
  317. /*
  318. * Calculate the zone index for the allocation request size and set the
  319. * allocation request size to that particular zone's chunk size.
  320. */
  321. rt_inline int zoneindex(rt_uint32_t *bytes)
  322. {
  323. rt_uint32_t n = (rt_uint32_t)*bytes; /* unsigned for shift opt */
  324. if (n < 128)
  325. {
  326. *bytes = n = (n + 7) & ~7;
  327. return(n / 8 - 1); /* 8 byte chunks, 16 zones */
  328. }
  329. if (n < 256)
  330. {
  331. *bytes = n = (n + 15) & ~15;
  332. return(n / 16 + 7);
  333. }
  334. if (n < 8192)
  335. {
  336. if (n < 512)
  337. {
  338. *bytes = n = (n + 31) & ~31;
  339. return(n / 32 + 15);
  340. }
  341. if (n < 1024)
  342. {
  343. *bytes = n = (n + 63) & ~63;
  344. return(n / 64 + 23);
  345. }
  346. if (n < 2048)
  347. {
  348. *bytes = n = (n + 127) & ~127;
  349. return(n / 128 + 31);
  350. }
  351. if (n < 4096)
  352. {
  353. *bytes = n = (n + 255) & ~255;
  354. return(n / 256 + 39);
  355. }
  356. *bytes = n = (n + 511) & ~511;
  357. return(n / 512 + 47);
  358. }
  359. if (n < 16384)
  360. {
  361. *bytes = n = (n + 1023) & ~1023;
  362. return(n / 1024 + 55);
  363. }
  364. rt_kprintf("Unexpected byte count %d", n);
  365. return 0;
  366. }
  367. /**
  368. * @addtogroup MM
  369. */
  370. /*@{*/
  371. /**
  372. * This function will allocate a block from system heap memory.
  373. * - If the nbytes is less than zero,
  374. * or
  375. * - If there is no nbytes sized memory valid in system,
  376. * the RT_NULL is returned.
  377. *
  378. * @param size the size of memory to be allocated
  379. *
  380. * @return the allocated memory
  381. *
  382. */
  383. void *rt_malloc(rt_size_t size)
  384. {
  385. slab_zone *z;
  386. rt_int32_t zi;
  387. slab_chunk *chunk;
  388. rt_base_t interrupt_level;
  389. struct memusage *kup;
  390. /* zero size, return RT_NULL */
  391. if (size == 0) return RT_NULL;
  392. /*
  393. * Handle large allocations directly. There should not be very many of
  394. * these so performance is not a big issue.
  395. */
  396. if (size >= zone_limit)
  397. {
  398. size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
  399. chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS);
  400. if (chunk == RT_NULL) return RT_NULL;
  401. /* set kup */
  402. kup = btokup(chunk);
  403. kup->type = PAGE_TYPE_LARGE;
  404. kup->size = size >> RT_MM_PAGE_BITS;
  405. #ifdef RT_SLAB_DEBUG
  406. rt_kprintf("malloc a large memory 0x%x, page cnt %d, kup %d\n",
  407. size,
  408. size >> RT_MM_PAGE_BITS,
  409. ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS);
  410. #endif
  411. /* lock interrupt */
  412. interrupt_level = rt_hw_interrupt_disable();
  413. goto done;
  414. }
  415. /*
  416. * Attempt to allocate out of an existing zone. First try the free list,
  417. * then allocate out of unallocated space. If we find a good zone move
  418. * it to the head of the list so later allocations find it quickly
  419. * (we might have thousands of zones in the list).
  420. *
  421. * Note: zoneindex() will panic of size is too large.
  422. */
  423. zi = zoneindex(&size);
  424. RT_ASSERT(zi < NZONES);
  425. #ifdef RT_SLAB_DEBUG
  426. rt_kprintf("try to malloc 0x%x on zone: %d\n", size, zi);
  427. #endif
  428. interrupt_level = rt_hw_interrupt_disable();
  429. if ((z = zone_array[zi]) != RT_NULL)
  430. {
  431. RT_ASSERT(z->z_nfree > 0);
  432. /* Remove us from the zone_array[] when we become empty */
  433. if (--z->z_nfree == 0)
  434. {
  435. zone_array[zi] = z->z_next;
  436. z->z_next = RT_NULL;
  437. }
  438. /*
  439. * No chunks are available but nfree said we had some memory, so
  440. * it must be available in the never-before-used-memory area
  441. * governed by uindex. The consequences are very serious if our zone
  442. * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
  443. */
  444. if (z->z_uindex + 1 != z->z_nmax)
  445. {
  446. z->z_uindex = z->z_uindex + 1;
  447. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  448. }
  449. else
  450. {
  451. /* find on free chunk list */
  452. chunk = z->z_freechunk;
  453. /* remove this chunk from list */
  454. z->z_freechunk = z->z_freechunk->c_next;
  455. }
  456. goto done;
  457. }
  458. /*
  459. * If all zones are exhausted we need to allocate a new zone for this
  460. * index.
  461. *
  462. * At least one subsystem, the tty code (see CROUND) expects power-of-2
  463. * allocations to be power-of-2 aligned. We maintain compatibility by
  464. * adjusting the base offset below.
  465. */
  466. {
  467. rt_int32_t off;
  468. if ((z = zone_free) != RT_NULL)
  469. {
  470. /* remove zone from free zone list */
  471. zone_free = z->z_next;
  472. --zone_free_cnt;
  473. }
  474. else
  475. {
  476. /* allocate a zone from page */
  477. z = rt_page_alloc(zone_size / RT_MM_PAGE_SIZE);
  478. if (z == RT_NULL) goto fail;
  479. #ifdef RT_SLAB_DEBUG
  480. rt_kprintf("alloc a new zone: 0x%x\n", (rt_uint32_t)z);
  481. #endif
  482. /* set message usage */
  483. for (off = 0, kup = btokup(z); off < zone_page_cnt; off ++)
  484. {
  485. kup->type = PAGE_TYPE_SMALL;
  486. kup->size = off;
  487. kup ++;
  488. }
  489. }
  490. /* clear to zero */
  491. rt_memset(z, 0, sizeof(slab_zone));
  492. /* offset of slab zone struct in zone */
  493. off = sizeof(slab_zone);
  494. /*
  495. * Guarentee power-of-2 alignment for power-of-2-sized chunks.
  496. * Otherwise just 8-byte align the data.
  497. */
  498. if ((size | (size - 1)) + 1 == (size << 1))
  499. off = (off + size - 1) & ~(size - 1);
  500. else
  501. off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
  502. z->z_magic = ZALLOC_SLAB_MAGIC;
  503. z->z_zoneindex = zi;
  504. z->z_nmax = (zone_size - off) / size;
  505. z->z_nfree = z->z_nmax - 1;
  506. z->z_baseptr = (rt_uint8_t*)z + off;
  507. z->z_uindex = 0;
  508. z->z_chunksize = size;
  509. chunk = (slab_chunk *)(z->z_baseptr + z->z_uindex * size);
  510. /* link to zone array */
  511. z->z_next = zone_array[zi];
  512. zone_array[zi] = z;
  513. }
  514. done:
  515. rt_hw_interrupt_enable(interrupt_level);
  516. #ifdef RT_USING_HOOK
  517. if (rt_malloc_hook != RT_NULL) rt_malloc_hook((char*)chunk, size);
  518. #endif
  519. return chunk;
  520. fail:
  521. rt_hw_interrupt_enable(interrupt_level);
  522. return RT_NULL;
  523. }
  524. /**
  525. * This function will change the size of previously allocated memory block.
  526. *
  527. * @param ptr the previously allocated memory block
  528. * @param size the new size of memory block
  529. *
  530. * @return the allocated memory
  531. */
  532. void *rt_realloc(void *ptr, rt_size_t size)
  533. {
  534. void *nptr;
  535. slab_zone *z;
  536. struct memusage *kup;
  537. if (ptr == RT_NULL) return rt_malloc(size);
  538. if (size == 0)
  539. {
  540. rt_free(ptr);
  541. return RT_NULL;
  542. }
  543. /*
  544. * Get the original allocation's zone. If the new request winds up
  545. * using the same chunk size we do not have to do anything.
  546. */
  547. kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  548. if (kup->type == PAGE_TYPE_LARGE)
  549. {
  550. rt_size_t osize;
  551. osize = kup->size << RT_MM_PAGE_BITS;
  552. if ((nptr = rt_malloc(size)) == RT_NULL) return RT_NULL;
  553. rt_memcpy(nptr, ptr, size > osize? osize : size);
  554. rt_free(ptr);
  555. return nptr;
  556. }
  557. else if (kup->type == PAGE_TYPE_SMALL)
  558. {
  559. z = (slab_zone*)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
  560. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  561. zoneindex(&size);
  562. if (z->z_chunksize == size) return(ptr); /* same chunk */
  563. /*
  564. * Allocate memory for the new request size. Note that zoneindex has
  565. * already adjusted the request size to the appropriate chunk size, which
  566. * should optimize our bcopy(). Then copy and return the new pointer.
  567. */
  568. if ((nptr = rt_malloc(size)) == RT_NULL) return RT_NULL;
  569. rt_memcpy(nptr, ptr, size > z->z_chunksize? z->z_chunksize : size);
  570. rt_free(ptr);
  571. return nptr;
  572. }
  573. return RT_NULL;
  574. }
  575. /**
  576. * This function will contiguously allocate enough space for count objects
  577. * that are size bytes of memory each and returns a pointer to the allocated
  578. * memory.
  579. *
  580. * The allocated memory is filled with bytes of value zero.
  581. *
  582. * @param count number of objects to allocate
  583. * @param size size of the objects to allocate
  584. *
  585. * @return pointer to allocated memory / NULL pointer if there is an error
  586. */
  587. void *rt_calloc(rt_size_t count, rt_size_t size)
  588. {
  589. void *p;
  590. /* allocate 'count' objects of size 'size' */
  591. p = rt_malloc(count * size);
  592. /* zero the memory */
  593. if (p) rt_memset(p, 0, count * size);
  594. return p;
  595. }
  596. /**
  597. * This function will release the previously allocated memory block by rt_malloc.
  598. * The released memory block is taken back to system heap.
  599. *
  600. * @param ptr the address of memory which will be released
  601. */
  602. void rt_free(void *ptr)
  603. {
  604. slab_zone *z;
  605. slab_chunk *chunk;
  606. struct memusage *kup;
  607. rt_base_t interrupt_level;
  608. /* free a RT_NULL pointer */
  609. if (ptr == RT_NULL) return ;
  610. #ifdef RT_USING_HOOK
  611. if (rt_free_hook != RT_NULL) rt_free_hook(ptr);
  612. #endif
  613. /* get memory usage */
  614. #ifdef RT_SLAB_DEBUG
  615. rt_uint32 addr = ((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  616. rt_kprintf("free a memory 0x%x and align to 0x%x, kup index %d\n",
  617. (rt_uint32_t)ptr,
  618. (rt_uint32_t)addr,
  619. ((rt_uint32_t)(addr) - heap_start) >> RT_MM_PAGE_BITS);
  620. #endif
  621. kup = btokup((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK);
  622. /* release large allocation */
  623. if (kup->type == PAGE_TYPE_LARGE)
  624. {
  625. rt_uint32_t size;
  626. /* clear page counter */
  627. interrupt_level = rt_hw_interrupt_disable();
  628. size = kup->size;
  629. kup->size = 0;
  630. rt_hw_interrupt_enable(interrupt_level);
  631. #ifdef RT_SLAB_DEBUG
  632. rt_kprintf("free large memory block 0x%x, page count %d\n", (rt_uint32_t)ptr, size);
  633. #endif
  634. /* free this page */
  635. rt_page_free(ptr, size);
  636. return;
  637. }
  638. /* zone case. get out zone. */
  639. z = (slab_zone*)(((rt_uint32_t)ptr & ~RT_MM_PAGE_MASK) - kup->size * RT_MM_PAGE_SIZE);
  640. RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
  641. interrupt_level = rt_hw_interrupt_disable();
  642. chunk = (slab_chunk*)ptr;
  643. chunk->c_next = z->z_freechunk;
  644. z->z_freechunk = chunk;
  645. /*
  646. * Bump the number of free chunks. If it becomes non-zero the zone
  647. * must be added back onto the appropriate list.
  648. */
  649. if (z->z_nfree++ == 0)
  650. {
  651. z->z_next = zone_array[z->z_zoneindex];
  652. zone_array[z->z_zoneindex] = z;
  653. }
  654. /*
  655. * If the zone becomes totally free, and there are other zones we
  656. * can allocate from, move this zone to the FreeZones list. Since
  657. * this code can be called from an IPI callback, do *NOT* try to mess
  658. * with kernel_map here. Hysteresis will be performed at malloc() time.
  659. */
  660. if (z->z_nfree == z->z_nmax &&
  661. (z->z_next || zone_array[z->z_zoneindex] != z))
  662. {
  663. slab_zone **pz;
  664. #ifdef RT_SLAB_DEBUG
  665. rt_kprintf("free zone 0x%x\n", (rt_uint32_t)z, z->z_zoneindex);
  666. #endif
  667. /* remove zone from zone array list */
  668. for (pz = &zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next) ;
  669. *pz = z->z_next;
  670. /* reset zone */
  671. z->z_magic = -1;
  672. /* insert to free zone list */
  673. z->z_next = zone_free;
  674. zone_free = z;
  675. ++zone_free_cnt;
  676. /* release zone to page allocator */
  677. if (zone_free_cnt > ZONE_RELEASE_THRESH)
  678. {
  679. register rt_base_t i;
  680. z = zone_free;
  681. zone_free = z->z_next;
  682. --zone_free_cnt;
  683. /* set message usage */
  684. for (i = 0, kup = btokup(z); i < zone_page_cnt; i ++)
  685. {
  686. kup->type = PAGE_TYPE_FREE;
  687. kup->size = 0;
  688. kup ++;
  689. }
  690. /* release pages */
  691. rt_page_free(z, zone_size);
  692. }
  693. }
  694. rt_hw_interrupt_enable(interrupt_level);
  695. }
  696. /*@}*/
  697. #endif