1
0

sched_sem_tc.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-01-17 Shell the first version
  9. */
  10. #define __RT_IPC_SOURCE__
  11. #include <rtthread.h>
  12. #include "rthw.h"
  13. #include "utest.h"
  14. #define KERN_TEST_CONFIG_LOOP_TIMES 160
  15. #define KERN_TEST_CONCURRENT_THREADS (RT_CPUS_NR * 2)
  16. #define KERN_TEST_CONFIG_HIGHEST_PRIO 3
  17. #define KERN_TEST_CONFIG_LOWEST_PRIO (RT_THREAD_PRIORITY_MAX - 2)
  18. #define TEST_LEVEL_COUNTS (KERN_TEST_CONFIG_LOWEST_PRIO - KERN_TEST_CONFIG_HIGHEST_PRIO + 1)
  19. #if TEST_LEVEL_COUNTS <= RT_CPUS_NR
  20. #warning for the best of this test, TEST_LEVEL_COUNTS should greater than RT_CPUS_NR
  21. #endif
  22. #if KERN_TEST_CONCURRENT_THREADS < RT_CPUS_NR
  23. #warning for the best of this test, KERN_TEST_CONCURRENT_THREADS should greater than RT_CPUS_NR
  24. #endif
  25. #if KERN_TEST_CONFIG_LOWEST_PRIO >= RT_THREAD_PRIORITY_MAX - 1
  26. #error the thread priority should at least be greater than idle
  27. #endif
  28. static rt_atomic_t _star_counter = 1;
  29. static struct rt_semaphore _thr_exit_sem;
  30. static struct rt_semaphore _level_waiting[TEST_LEVEL_COUNTS];
  31. static rt_thread_t _thread_matrix[TEST_LEVEL_COUNTS][KERN_TEST_CONCURRENT_THREADS];
  32. static rt_atomic_t _load_average[RT_CPUS_NR];
  33. static void _print_char(rt_thread_t thr_self, int character)
  34. {
  35. rt_base_t current_counter;
  36. #ifdef RT_USING_SMP
  37. rt_kprintf("%c%d", character, RT_SCHED_CTX(thr_self).oncpu);
  38. #else
  39. rt_kprintf("%c0", character);
  40. #endif /* RT_USING_SMP */
  41. current_counter = rt_atomic_add(&_star_counter, 1);
  42. if (current_counter % 30 == 0)
  43. {
  44. rt_kprintf("\n");
  45. }
  46. }
  47. static void _stats_load_avg_inc(void)
  48. {
  49. int cpuid;
  50. cpuid = rt_hw_cpu_id();
  51. rt_atomic_add(&_load_average[cpuid], 1);
  52. }
  53. static void _stats_load_avg_print(void)
  54. {
  55. rt_base_t counts = 0;
  56. const rt_base_t total_test_counts = KERN_TEST_CONFIG_LOOP_TIMES * TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS;
  57. for (size_t i = 0; i < RT_CPUS_NR; i++)
  58. {
  59. rt_kprintf("%ld ", _load_average[i]);
  60. counts += _load_average[i];
  61. }
  62. rt_kprintf("\n");
  63. uassert_int_equal(counts, total_test_counts);
  64. }
  65. static void _thread_entry(void *param)
  66. {
  67. int level = (rt_ubase_t)param;
  68. rt_thread_t thr_self = rt_thread_self();
  69. if (level == 0)
  70. {
  71. /* always the first to execute among other working threads */
  72. for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
  73. {
  74. /* notify our consumer */
  75. rt_sem_release(&_level_waiting[level + 1]);
  76. _stats_load_avg_inc();
  77. /* waiting for resource of ours */
  78. rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
  79. }
  80. }
  81. else if (level == TEST_LEVEL_COUNTS - 1)
  82. {
  83. for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
  84. {
  85. /* waiting for our resource first */
  86. rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
  87. _stats_load_avg_inc();
  88. _print_char(thr_self, '*');
  89. rt_thread_delay(1);
  90. /* produce for level 0 worker */
  91. rt_sem_release(&_level_waiting[0]);
  92. }
  93. }
  94. else
  95. {
  96. for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
  97. {
  98. /* waiting for resource of ours */
  99. rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
  100. _stats_load_avg_inc();
  101. /* notify our consumer */
  102. rt_sem_release(&_level_waiting[level + 1]);
  103. }
  104. }
  105. uassert_true(1);
  106. rt_sem_release(&_thr_exit_sem);
  107. return;
  108. }
  109. static void scheduler_tc(void)
  110. {
  111. LOG_I("Test starts...");
  112. for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
  113. {
  114. for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
  115. {
  116. rt_thread_startup(_thread_matrix[i][j]);
  117. }
  118. }
  119. LOG_I("%d threads startup...", TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS);
  120. /* waiting for sub-threads to exit */
  121. for (size_t i = 0; i < TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS; i++)
  122. {
  123. rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
  124. }
  125. /* print load average */
  126. _stats_load_avg_print();
  127. }
  128. static rt_err_t utest_tc_init(void)
  129. {
  130. LOG_I("Setup environment...");
  131. rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
  132. for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
  133. {
  134. rt_sem_init(&_level_waiting[i], "test", 0, RT_IPC_FLAG_PRIO);
  135. for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
  136. {
  137. _thread_matrix[i][j] =
  138. rt_thread_create("test",
  139. _thread_entry,
  140. (void *)i,
  141. UTEST_THR_STACK_SIZE,
  142. KERN_TEST_CONFIG_HIGHEST_PRIO+i,
  143. 5);
  144. if (!_thread_matrix[i][j])
  145. uassert_not_null(_thread_matrix[i][j]);
  146. }
  147. }
  148. return RT_EOK;
  149. }
  150. static rt_err_t utest_tc_cleanup(void)
  151. {
  152. rt_sem_detach(&_thr_exit_sem);
  153. for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
  154. {
  155. rt_sem_detach(&_level_waiting[i]);
  156. }
  157. return RT_EOK;
  158. }
  159. static void testcase(void)
  160. {
  161. UTEST_UNIT_RUN(scheduler_tc);
  162. }
  163. UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.sem", utest_tc_init, utest_tc_cleanup, 10);