1
0

mem_tc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-10-14 tyx the first version
  9. */
  10. #include <rtthread.h>
  11. #include <stdlib.h>
  12. #include "utest.h"
  13. struct rt_small_mem_item
  14. {
  15. rt_ubase_t pool_ptr; /**< small memory object addr */
  16. #ifdef ARCH_CPU_64BIT
  17. rt_uint32_t resv;
  18. #endif /* ARCH_CPU_64BIT */
  19. rt_size_t next; /**< next free item */
  20. rt_size_t prev; /**< prev free item */
  21. #ifdef RT_USING_MEMTRACE
  22. #ifdef ARCH_CPU_64BIT
  23. rt_uint8_t thread[8]; /**< thread name */
  24. #else
  25. rt_uint8_t thread[4]; /**< thread name */
  26. #endif /* ARCH_CPU_64BIT */
  27. #endif /* RT_USING_MEMTRACE */
  28. };
  29. struct rt_small_mem
  30. {
  31. struct rt_memory parent; /**< inherit from rt_memory */
  32. rt_uint8_t *heap_ptr; /**< pointer to the heap */
  33. struct rt_small_mem_item *heap_end;
  34. struct rt_small_mem_item *lfree;
  35. rt_size_t mem_size_aligned; /**< aligned memory size */
  36. };
  37. #define MEM_SIZE(_heap, _mem) \
  38. (((struct rt_small_mem_item *)(_mem))->next - ((rt_ubase_t)(_mem) - \
  39. (rt_ubase_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
  40. #define TEST_MEM_SIZE 1024
  41. static rt_size_t max_block(struct rt_small_mem *heap)
  42. {
  43. struct rt_small_mem_item *mem;
  44. rt_size_t max = 0, size;
  45. for (mem = (struct rt_small_mem_item *)heap->heap_ptr;
  46. mem != heap->heap_end;
  47. mem = (struct rt_small_mem_item *)&heap->heap_ptr[mem->next])
  48. {
  49. if (((rt_ubase_t)mem->pool_ptr & 0x1) == 0)
  50. {
  51. size = MEM_SIZE(heap, mem);
  52. if (size > max)
  53. {
  54. max = size;
  55. }
  56. }
  57. }
  58. return max;
  59. }
  60. static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
  61. {
  62. while (size-- != 0)
  63. {
  64. if (*(rt_uint8_t *)ptr != v)
  65. return *(rt_uint8_t *)ptr - v;
  66. }
  67. return 0;
  68. }
  69. struct mem_test_context
  70. {
  71. void *ptr;
  72. rt_size_t size;
  73. rt_uint8_t magic;
  74. };
  75. static void mem_functional_test(void)
  76. {
  77. rt_size_t total_size;
  78. rt_uint8_t *buf;
  79. struct rt_small_mem *heap;
  80. rt_uint8_t magic = __LINE__;
  81. /* Prepare test memory */
  82. buf = rt_malloc(TEST_MEM_SIZE);
  83. uassert_not_null(buf);
  84. uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
  85. rt_memset(buf, 0xAA, TEST_MEM_SIZE);
  86. /* small heap init */
  87. heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
  88. /* get total size */
  89. total_size = max_block(heap);
  90. uassert_int_not_equal(total_size, 0);
  91. /*
  92. * Allocate all memory at a time and test whether
  93. * the memory allocation release function is effective
  94. */
  95. {
  96. struct mem_test_context ctx;
  97. ctx.magic = magic++;
  98. ctx.size = max_block(heap);
  99. ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
  100. uassert_not_null(ctx.ptr);
  101. rt_memset(ctx.ptr, ctx.magic, ctx.size);
  102. uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
  103. rt_smem_free(ctx.ptr);
  104. uassert_int_equal(max_block(heap), total_size);
  105. }
  106. /*
  107. * Apply for memory release sequentially and
  108. * test whether memory block merging is effective
  109. */
  110. {
  111. rt_size_t i, max_free = 0;
  112. struct mem_test_context ctx[3];
  113. /* alloc mem */
  114. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  115. {
  116. ctx[i].magic = magic++;
  117. ctx[i].size = max_block(heap) / (sizeof(ctx) / sizeof(ctx[0]) - i);
  118. ctx[i].ptr = rt_smem_alloc(&heap->parent, ctx[i].size);
  119. uassert_not_null(ctx[i].ptr);
  120. rt_memset(ctx[i].ptr, ctx[i].magic, ctx[i].size);
  121. }
  122. /* All memory has been applied. The remaining memory should be 0 */
  123. uassert_int_equal(max_block(heap), 0);
  124. /* Verify that the memory data is correct */
  125. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  126. {
  127. uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
  128. }
  129. /* Sequential memory release */
  130. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  131. {
  132. uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
  133. rt_smem_free(ctx[i].ptr);
  134. max_free += ctx[i].size;
  135. uassert_true(max_block(heap) >= max_free);
  136. }
  137. /* Check whether the memory is fully merged */
  138. uassert_int_equal(max_block(heap), total_size);
  139. }
  140. /*
  141. * Apply for memory release at an interval to
  142. * test whether memory block merging is effective
  143. */
  144. {
  145. rt_size_t i, max_free = 0;
  146. struct mem_test_context ctx[3];
  147. /* alloc mem */
  148. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  149. {
  150. ctx[i].magic = magic++;
  151. ctx[i].size = max_block(heap) / (sizeof(ctx) / sizeof(ctx[0]) - i);
  152. ctx[i].ptr = rt_smem_alloc(&heap->parent, ctx[i].size);
  153. uassert_not_null(ctx[i].ptr);
  154. rt_memset(ctx[i].ptr, ctx[i].magic, ctx[i].size);
  155. }
  156. /* All memory has been applied. The remaining memory should be 0 */
  157. uassert_int_equal(max_block(heap), 0);
  158. /* Verify that the memory data is correct */
  159. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  160. {
  161. uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
  162. }
  163. /* Release even address */
  164. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  165. {
  166. if (i % 2 == 0)
  167. {
  168. uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
  169. rt_smem_free(ctx[i].ptr);
  170. uassert_true(max_block(heap) >= ctx[0].size);
  171. }
  172. }
  173. /* Release odd addresses and merge memory blocks */
  174. for (i = 0; i < sizeof(ctx) / sizeof(ctx[0]); i++)
  175. {
  176. if (i % 2 != 0)
  177. {
  178. uassert_int_equal(_mem_cmp(ctx[i].ptr, ctx[i].magic, ctx[i].size), 0);
  179. rt_smem_free(ctx[i].ptr);
  180. max_free += ctx[i - 1].size + ctx[i + 1].size;
  181. uassert_true(max_block(heap) >= max_free);
  182. }
  183. }
  184. /* Check whether the memory is fully merged */
  185. uassert_int_equal(max_block(heap), total_size);
  186. }
  187. /* mem realloc test,Small - > Large */
  188. {
  189. /* Request a piece of memory for subsequent reallocation operations */
  190. struct mem_test_context ctx[3];
  191. ctx[0].magic = magic++;
  192. ctx[0].size = max_block(heap) / 3;
  193. ctx[0].ptr = rt_smem_alloc(&heap->parent, ctx[0].size);
  194. uassert_not_null(ctx[0].ptr);
  195. rt_memset(ctx[0].ptr, ctx[0].magic, ctx[0].size);
  196. /* Apply for a small piece of memory and split the continuous memory */
  197. ctx[1].magic = magic++;
  198. ctx[1].size = RT_ALIGN_SIZE;
  199. ctx[1].ptr = rt_smem_alloc(&heap->parent, ctx[1].size);
  200. uassert_not_null(ctx[1].ptr);
  201. rt_memset(ctx[1].ptr, ctx[1].magic, ctx[1].size);
  202. /* Check whether the maximum memory block is larger than the first piece of memory */
  203. uassert_true(max_block(heap) > ctx[0].size);
  204. /* Reallocate the first piece of memory */
  205. ctx[2].magic = magic++;
  206. ctx[2].size = max_block(heap);
  207. ctx[2].ptr = rt_smem_realloc(&heap->parent, ctx[0].ptr, ctx[2].size);
  208. uassert_not_null(ctx[2].ptr);
  209. uassert_int_not_equal(ctx[0].ptr, ctx[2].ptr);
  210. uassert_int_equal(_mem_cmp(ctx[2].ptr, ctx[0].magic, ctx[0].size), 0);
  211. rt_memset(ctx[2].ptr, ctx[2].magic, ctx[2].size);
  212. /* Free the second piece of memory */
  213. uassert_int_equal(_mem_cmp(ctx[1].ptr, ctx[1].magic, ctx[1].size), 0);
  214. rt_smem_free(ctx[1].ptr);
  215. /* Free reallocated memory */
  216. uassert_int_equal(_mem_cmp(ctx[2].ptr, ctx[2].magic, ctx[2].size), 0);
  217. rt_smem_free(ctx[2].ptr);
  218. /* Check memory integrity */
  219. uassert_int_equal(max_block(heap), total_size);
  220. }
  221. /* mem realloc test,Large - > Small */
  222. {
  223. rt_size_t max_free;
  224. struct mem_test_context ctx;
  225. /* alloc a piece of memory */
  226. ctx.magic = magic++;
  227. ctx.size = max_block(heap) / 2;
  228. ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
  229. uassert_not_null(ctx.ptr);
  230. rt_memset(ctx.ptr, ctx.magic, ctx.size);
  231. uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
  232. /* Get remaining memory */
  233. max_free = max_block(heap);
  234. /* Change memory size */
  235. ctx.size = ctx.size / 2;
  236. uassert_int_equal((rt_ubase_t)rt_smem_realloc(&heap->parent, ctx.ptr, ctx.size), (rt_ubase_t)ctx.ptr);
  237. /* Get remaining size */
  238. uassert_true(max_block(heap) > max_free);
  239. /* Free memory */
  240. uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
  241. rt_smem_free(ctx.ptr);
  242. /* Check memory integrity */
  243. uassert_int_equal(max_block(heap), total_size);
  244. }
  245. /* mem realloc test,equal */
  246. {
  247. rt_size_t max_free;
  248. struct mem_test_context ctx;
  249. /* alloc a piece of memory */
  250. ctx.magic = magic++;
  251. ctx.size = max_block(heap) / 2;
  252. ctx.ptr = rt_smem_alloc(&heap->parent, ctx.size);
  253. uassert_not_null(ctx.ptr);
  254. rt_memset(ctx.ptr, ctx.magic, ctx.size);
  255. uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
  256. /* Get remaining memory */
  257. max_free = max_block(heap);
  258. /* Do not change memory size */
  259. uassert_int_equal((rt_ubase_t)rt_smem_realloc(&heap->parent, ctx.ptr, ctx.size), (rt_ubase_t)ctx.ptr);
  260. /* Get remaining size */
  261. uassert_true(max_block(heap) == max_free);
  262. /* Free memory */
  263. uassert_int_equal(_mem_cmp(ctx.ptr, ctx.magic, ctx.size), 0);
  264. rt_smem_free(ctx.ptr);
  265. /* Check memory integrity */
  266. uassert_int_equal(max_block(heap), total_size);
  267. }
  268. /* small heap deinit */
  269. rt_smem_detach(&heap->parent);
  270. /* release test resources */
  271. rt_free(buf);
  272. }
  273. struct mem_alloc_context
  274. {
  275. rt_list_t node;
  276. rt_size_t size;
  277. rt_uint8_t magic;
  278. };
  279. struct mem_alloc_head
  280. {
  281. rt_list_t list;
  282. rt_size_t count;
  283. rt_tick_t start;
  284. rt_tick_t end;
  285. rt_tick_t interval;
  286. };
  287. #define MEM_RANG_ALLOC_BLK_MIN 2
  288. #define MEM_RANG_ALLOC_BLK_MAX 5
  289. #define MEM_RANG_ALLOC_TEST_TIME 5
  290. static void mem_alloc_test(void)
  291. {
  292. struct mem_alloc_head head;
  293. rt_uint8_t *buf;
  294. struct rt_small_mem *heap;
  295. rt_size_t total_size, size;
  296. struct mem_alloc_context *ctx;
  297. /* init */
  298. rt_list_init(&head.list);
  299. head.count = 0;
  300. head.start = rt_tick_get();
  301. head.end = rt_tick_get() + rt_tick_from_millisecond(MEM_RANG_ALLOC_TEST_TIME * 1000);
  302. head.interval = (head.end - head.start) / 20;
  303. buf = rt_malloc(TEST_MEM_SIZE);
  304. uassert_not_null(buf);
  305. uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
  306. rt_memset(buf, 0xAA, TEST_MEM_SIZE);
  307. heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
  308. total_size = max_block(heap);
  309. uassert_int_not_equal(total_size, 0);
  310. /* test run */
  311. while (head.end - head.start < RT_TICK_MAX / 2)
  312. {
  313. if (rt_tick_get() - head.start >= head.interval)
  314. {
  315. head.start = rt_tick_get();
  316. rt_kprintf("#");
  317. }
  318. /* %60 probability to perform alloc operation */
  319. if (rand() % 10 >= 4)
  320. {
  321. size = rand() % MEM_RANG_ALLOC_BLK_MAX + MEM_RANG_ALLOC_BLK_MIN;
  322. size *= sizeof(struct mem_alloc_context);
  323. ctx = rt_smem_alloc(&heap->parent, size);
  324. if (ctx == RT_NULL)
  325. {
  326. if (head.count == 0)
  327. {
  328. break;
  329. }
  330. size = head.count / 2;
  331. while (size != head.count)
  332. {
  333. ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
  334. rt_list_remove(&ctx->node);
  335. if (ctx->size > sizeof(*ctx))
  336. {
  337. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  338. {
  339. uassert_true(0);
  340. }
  341. }
  342. rt_memset(ctx, 0xAA, ctx->size);
  343. rt_smem_free(ctx);
  344. head.count --;
  345. }
  346. continue;
  347. }
  348. if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
  349. {
  350. uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
  351. }
  352. rt_memset(ctx, 0, size);
  353. rt_list_init(&ctx->node);
  354. ctx->size = size;
  355. ctx->magic = rand() & 0xff;
  356. if (ctx->size > sizeof(*ctx))
  357. {
  358. rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  359. }
  360. rt_list_insert_after(&head.list, &ctx->node);
  361. head.count += 1;
  362. }
  363. else
  364. {
  365. if (!rt_list_isempty(&head.list))
  366. {
  367. ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
  368. rt_list_remove(&ctx->node);
  369. if (ctx->size > sizeof(*ctx))
  370. {
  371. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  372. {
  373. uassert_true(0);
  374. }
  375. }
  376. rt_memset(ctx, 0xAA, ctx->size);
  377. rt_smem_free(ctx);
  378. head.count --;
  379. }
  380. }
  381. }
  382. while (!rt_list_isempty(&head.list))
  383. {
  384. ctx = rt_list_first_entry(&head.list, struct mem_alloc_context, node);
  385. rt_list_remove(&ctx->node);
  386. if (ctx->size > sizeof(*ctx))
  387. {
  388. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  389. {
  390. uassert_true(0);
  391. }
  392. }
  393. rt_memset(ctx, 0xAA, ctx->size);
  394. rt_smem_free(ctx);
  395. head.count --;
  396. }
  397. uassert_int_equal(head.count, 0);
  398. uassert_int_equal(max_block(heap), total_size);
  399. /* small heap deinit */
  400. rt_smem_detach(&heap->parent);
  401. /* release test resources */
  402. rt_free(buf);
  403. }
  404. #define MEM_RANG_REALLOC_BLK_MIN 0
  405. #define MEM_RANG_REALLOC_BLK_MAX 5
  406. #define MEM_RANG_REALLOC_TEST_TIME 5
  407. struct mem_realloc_context
  408. {
  409. rt_size_t size;
  410. rt_uint8_t magic;
  411. };
  412. struct mem_realloc_head
  413. {
  414. struct mem_realloc_context **ctx_tab;
  415. rt_size_t count;
  416. rt_tick_t start;
  417. rt_tick_t end;
  418. rt_tick_t interval;
  419. };
  420. static void mem_realloc_test(void)
  421. {
  422. struct mem_realloc_head head;
  423. rt_uint8_t *buf;
  424. struct rt_small_mem *heap;
  425. rt_size_t total_size, size, idx;
  426. struct mem_realloc_context *ctx;
  427. int res;
  428. size = RT_ALIGN(sizeof(struct mem_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
  429. size = TEST_MEM_SIZE / size;
  430. /* init */
  431. head.ctx_tab = RT_NULL;
  432. head.count = size;
  433. head.start = rt_tick_get();
  434. head.end = rt_tick_get() + rt_tick_from_millisecond(MEM_RANG_ALLOC_TEST_TIME * 1000);
  435. head.interval = (head.end - head.start) / 20;
  436. buf = rt_malloc(TEST_MEM_SIZE);
  437. uassert_not_null(buf);
  438. uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
  439. rt_memset(buf, 0xAA, TEST_MEM_SIZE);
  440. heap = (struct rt_small_mem *)rt_smem_init("mem_tc", buf, TEST_MEM_SIZE);
  441. total_size = max_block(heap);
  442. uassert_int_not_equal(total_size, 0);
  443. /* init ctx tab */
  444. size = head.count * sizeof(struct mem_realloc_context *);
  445. head.ctx_tab = rt_smem_alloc(&heap->parent, size);
  446. uassert_not_null(head.ctx_tab);
  447. rt_memset(head.ctx_tab, 0, size);
  448. /* test run */
  449. while (head.end - head.start < RT_TICK_MAX / 2)
  450. {
  451. if (rt_tick_get() - head.start >= head.interval)
  452. {
  453. head.start = rt_tick_get();
  454. rt_kprintf("#");
  455. }
  456. size = rand() % MEM_RANG_ALLOC_BLK_MAX + MEM_RANG_ALLOC_BLK_MIN;
  457. size *= sizeof(struct mem_realloc_context);
  458. idx = rand() % head.count;
  459. ctx = rt_smem_realloc(&heap->parent, head.ctx_tab[idx], size);
  460. if (ctx == RT_NULL)
  461. {
  462. if (size == 0)
  463. {
  464. if (head.ctx_tab[idx])
  465. {
  466. head.ctx_tab[idx] = RT_NULL;
  467. }
  468. }
  469. else
  470. {
  471. for (idx = 0; idx < head.count; idx++)
  472. {
  473. ctx = head.ctx_tab[idx];
  474. if (rand() % 2 && ctx)
  475. {
  476. if (ctx->size > sizeof(*ctx))
  477. {
  478. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  479. if (res != 0)
  480. {
  481. uassert_int_equal(res, 0);
  482. }
  483. }
  484. rt_memset(ctx, 0xAA, ctx->size);
  485. rt_smem_realloc(&heap->parent, ctx, 0);
  486. head.ctx_tab[idx] = RT_NULL;
  487. }
  488. }
  489. }
  490. continue;
  491. }
  492. /* check mem */
  493. if (head.ctx_tab[idx] != RT_NULL)
  494. {
  495. res = 0;
  496. if (ctx->size < size)
  497. {
  498. if (ctx->size > sizeof(*ctx))
  499. {
  500. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  501. }
  502. }
  503. else
  504. {
  505. if (size > sizeof(*ctx))
  506. {
  507. res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
  508. }
  509. }
  510. if (res != 0)
  511. {
  512. uassert_int_equal(res, 0);
  513. }
  514. }
  515. /* init mem */
  516. ctx->magic = rand() & 0xff;
  517. ctx->size = size;
  518. if (ctx->size > sizeof(*ctx))
  519. {
  520. rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  521. }
  522. head.ctx_tab[idx] = ctx;
  523. }
  524. /* free all mem */
  525. for (idx = 0; idx < head.count; idx++)
  526. {
  527. ctx = head.ctx_tab[idx];
  528. if (ctx == RT_NULL)
  529. {
  530. continue;
  531. }
  532. if (ctx->size > sizeof(*ctx))
  533. {
  534. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  535. if (res != 0)
  536. {
  537. uassert_int_equal(res, 0);
  538. }
  539. }
  540. rt_memset(ctx, 0xAA, ctx->size);
  541. rt_smem_realloc(&heap->parent, ctx, 0);
  542. head.ctx_tab[idx] = RT_NULL;
  543. }
  544. uassert_int_not_equal(max_block(heap), total_size);
  545. /* small heap deinit */
  546. rt_smem_detach(&heap->parent);
  547. /* release test resources */
  548. rt_free(buf);
  549. }
  550. static rt_err_t utest_tc_init(void)
  551. {
  552. return RT_EOK;
  553. }
  554. static rt_err_t utest_tc_cleanup(void)
  555. {
  556. return RT_EOK;
  557. }
  558. static void testcase(void)
  559. {
  560. UTEST_UNIT_RUN(mem_functional_test);
  561. UTEST_UNIT_RUN(mem_alloc_test);
  562. UTEST_UNIT_RUN(mem_realloc_test);
  563. }
  564. UTEST_TC_EXPORT(testcase, "testcases.kernel.mem_tc", utest_tc_init, utest_tc_cleanup, 20);