slab_tc.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Copyright (c) 2006-2019, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-10-14 tyx the first version
  9. */
  10. #include <rtthread.h>
  11. #include <stdlib.h>
  12. #include "utest.h"
  13. #define TEST_SLAB_SIZE 1024 * 1024
  14. static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
  15. {
  16. while (size-- != 0)
  17. {
  18. if (*(rt_uint8_t *)ptr != v)
  19. return *(rt_uint8_t *)ptr - v;
  20. }
  21. return 0;
  22. }
  23. struct slab_alloc_context
  24. {
  25. rt_list_t node;
  26. rt_size_t size;
  27. rt_uint8_t magic;
  28. };
  29. struct slab_alloc_head
  30. {
  31. rt_list_t list;
  32. rt_size_t count;
  33. rt_tick_t start;
  34. rt_tick_t end;
  35. rt_tick_t interval;
  36. };
  37. #define SLAB_RANG_ALLOC_BLK_MIN 2
  38. #define SLAB_RANG_ALLOC_BLK_MAX 5
  39. #define SLAB_RANG_ALLOC_TEST_TIME 5
  40. static void slab_alloc_test(void)
  41. {
  42. struct slab_alloc_head head;
  43. rt_uint8_t *buf;
  44. rt_slab_t heap;
  45. rt_size_t size;
  46. struct slab_alloc_context *ctx;
  47. /* init */
  48. rt_list_init(&head.list);
  49. head.count = 0;
  50. head.start = rt_tick_get();
  51. head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
  52. head.interval = (head.end - head.start) / 20;
  53. buf = rt_malloc(TEST_SLAB_SIZE);
  54. uassert_not_null(buf);
  55. uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
  56. rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
  57. heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
  58. // test run
  59. while (head.end - head.start < RT_TICK_MAX / 2)
  60. {
  61. if (rt_tick_get() - head.start >= head.interval)
  62. {
  63. head.start = rt_tick_get();
  64. rt_kprintf("#");
  65. }
  66. // %60 probability to perform alloc operation
  67. if (rand() % 10 >= 4)
  68. {
  69. size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
  70. size *= sizeof(struct slab_alloc_context);
  71. ctx = rt_slab_alloc(heap, size);
  72. if (ctx == RT_NULL)
  73. {
  74. if (head.count == 0)
  75. {
  76. break;
  77. }
  78. size = head.count / 2;
  79. while (size != head.count)
  80. {
  81. ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
  82. rt_list_remove(&ctx->node);
  83. if (ctx->size > sizeof(*ctx))
  84. {
  85. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  86. {
  87. uassert_true(0);
  88. }
  89. }
  90. rt_memset(ctx, 0xAA, ctx->size);
  91. rt_slab_free(heap, ctx);
  92. head.count --;
  93. }
  94. continue;
  95. }
  96. //if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
  97. //{
  98. // uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
  99. //}
  100. rt_memset(ctx, 0, size);
  101. rt_list_init(&ctx->node);
  102. ctx->size = size;
  103. ctx->magic = rand() & 0xff;
  104. if (ctx->size > sizeof(*ctx))
  105. {
  106. rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  107. }
  108. rt_list_insert_after(&head.list, &ctx->node);
  109. head.count += 1;
  110. }
  111. else
  112. {
  113. if (!rt_list_isempty(&head.list))
  114. {
  115. ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
  116. rt_list_remove(&ctx->node);
  117. if (ctx->size > sizeof(*ctx))
  118. {
  119. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  120. {
  121. uassert_true(0);
  122. }
  123. }
  124. rt_memset(ctx, 0xAA, ctx->size);
  125. rt_slab_free(heap, ctx);
  126. head.count --;
  127. }
  128. }
  129. }
  130. while (!rt_list_isempty(&head.list))
  131. {
  132. ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
  133. rt_list_remove(&ctx->node);
  134. if (ctx->size > sizeof(*ctx))
  135. {
  136. if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
  137. {
  138. uassert_true(0);
  139. }
  140. }
  141. rt_memset(ctx, 0xAA, ctx->size);
  142. rt_slab_free(heap, ctx);
  143. head.count --;
  144. }
  145. uassert_int_equal(head.count, 0);
  146. // slab heap deinit
  147. rt_slab_detach(heap);
  148. /* release test resources */
  149. rt_free(buf);
  150. }
  151. #define SLAB_RANG_REALLOC_BLK_MIN 0
  152. #define SLAB_RANG_REALLOC_BLK_MAX 5
  153. #define SLAB_RANG_REALLOC_TEST_TIME 5
  154. struct slab_realloc_context
  155. {
  156. rt_size_t size;
  157. rt_uint8_t magic;
  158. };
  159. struct slab_realloc_head
  160. {
  161. struct slab_realloc_context **ctx_tab;
  162. rt_size_t count;
  163. rt_tick_t start;
  164. rt_tick_t end;
  165. rt_tick_t interval;
  166. };
  167. static void slab_realloc_test(void)
  168. {
  169. struct slab_realloc_head head;
  170. rt_uint8_t *buf;
  171. rt_slab_t heap;
  172. rt_size_t size, idx;
  173. struct slab_realloc_context *ctx;
  174. int res;
  175. size = RT_ALIGN(sizeof(struct slab_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
  176. size = TEST_SLAB_SIZE / size;
  177. /* init */
  178. head.ctx_tab = RT_NULL;
  179. head.count = size;
  180. head.start = rt_tick_get();
  181. head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
  182. head.interval = (head.end - head.start) / 20;
  183. buf = rt_malloc(TEST_SLAB_SIZE);
  184. uassert_not_null(buf);
  185. uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
  186. rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
  187. heap = rt_slab_init("slab_tc", buf, TEST_SLAB_SIZE);
  188. /* init ctx tab */
  189. size = head.count * sizeof(struct slab_realloc_context *);
  190. head.ctx_tab = rt_slab_alloc(heap, size);
  191. uassert_not_null(head.ctx_tab);
  192. rt_memset(head.ctx_tab, 0, size);
  193. // test run
  194. while (head.end - head.start < RT_TICK_MAX / 2)
  195. {
  196. if (rt_tick_get() - head.start >= head.interval)
  197. {
  198. head.start = rt_tick_get();
  199. rt_kprintf("#");
  200. }
  201. size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
  202. size *= sizeof(struct slab_realloc_context);
  203. idx = rand() % head.count;
  204. ctx = rt_slab_realloc(heap, head.ctx_tab[idx], size);
  205. if (ctx == RT_NULL)
  206. {
  207. if (size == 0)
  208. {
  209. if (head.ctx_tab[idx])
  210. {
  211. head.ctx_tab[idx] = RT_NULL;
  212. }
  213. }
  214. else
  215. {
  216. for (idx = 0; idx < head.count; idx++)
  217. {
  218. ctx = head.ctx_tab[idx];
  219. if (rand() % 2 && ctx)
  220. {
  221. if (ctx->size > sizeof(*ctx))
  222. {
  223. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  224. if (res != 0)
  225. {
  226. uassert_int_equal(res, 0);
  227. }
  228. }
  229. rt_memset(ctx, 0xAA, ctx->size);
  230. rt_slab_realloc(heap, ctx, 0);
  231. head.ctx_tab[idx] = RT_NULL;
  232. }
  233. }
  234. }
  235. continue;
  236. }
  237. /* check slab */
  238. if (head.ctx_tab[idx] != RT_NULL)
  239. {
  240. res = 0;
  241. if (ctx->size < size)
  242. {
  243. if (ctx->size > sizeof(*ctx))
  244. {
  245. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  246. }
  247. }
  248. else
  249. {
  250. if (size > sizeof(*ctx))
  251. {
  252. res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
  253. }
  254. }
  255. if (res != 0)
  256. {
  257. uassert_int_equal(res, 0);
  258. }
  259. }
  260. /* init slab */
  261. ctx->magic = rand() & 0xff;
  262. ctx->size = size;
  263. if (ctx->size > sizeof(*ctx))
  264. {
  265. rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  266. }
  267. head.ctx_tab[idx] = ctx;
  268. }
  269. // free all slab
  270. for (idx = 0; idx < head.count; idx++)
  271. {
  272. ctx = head.ctx_tab[idx];
  273. if (ctx == RT_NULL)
  274. {
  275. continue;
  276. }
  277. if (ctx->size > sizeof(*ctx))
  278. {
  279. res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
  280. if (res != 0)
  281. {
  282. uassert_int_equal(res, 0);
  283. }
  284. }
  285. rt_memset(ctx, 0xAA, ctx->size);
  286. rt_slab_realloc(heap, ctx, 0);
  287. head.ctx_tab[idx] = RT_NULL;
  288. }
  289. // slab heap deinit
  290. rt_slab_detach(heap);
  291. /* release test resources */
  292. rt_free(buf);
  293. }
  294. static rt_err_t utest_tc_init(void)
  295. {
  296. return RT_EOK;
  297. }
  298. static rt_err_t utest_tc_cleanup(void)
  299. {
  300. return RT_EOK;
  301. }
  302. static void testcase(void)
  303. {
  304. UTEST_UNIT_RUN(slab_alloc_test);
  305. UTEST_UNIT_RUN(slab_realloc_test);
  306. }
  307. UTEST_TC_EXPORT(testcase, "testcases.kernel.slab_tc", utest_tc_init, utest_tc_cleanup, 20);