sched_thread_tc.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-01-25 Shell init ver.
  9. */
  10. #define __RT_KERNEL_SOURCE__
  11. #include <rtthread.h>
  12. #include "utest.h"
  13. #define TEST_LOOP_TIMES (100 * 1000)
  14. #define TEST_PROGRESS_COUNTS (36)
  15. #define TEST_THREAD_COUNT (RT_CPUS_NR * 1)
  16. #define TEST_PROGRESS_ON (TEST_LOOP_TIMES*TEST_THREAD_COUNT/TEST_PROGRESS_COUNTS)
  17. static struct rt_semaphore _thr_exit_sem;
  18. static rt_atomic_t _progress_counter;
  19. static volatile rt_thread_t threads_group[TEST_THREAD_COUNT][2];
  20. static void _thread_entry1(void *param)
  21. {
  22. rt_base_t critical_level;
  23. size_t idx = (size_t)param;
  24. for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
  25. {
  26. critical_level = rt_enter_critical();
  27. rt_thread_suspend(rt_thread_self());
  28. rt_thread_resume(threads_group[idx][1]);
  29. rt_exit_critical_safe(critical_level);
  30. if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
  31. uassert_true(1);
  32. }
  33. rt_sem_release(&_thr_exit_sem);
  34. return;
  35. }
  36. static void _thread_entry2(void *param)
  37. {
  38. rt_base_t critical_level;
  39. size_t idx = (size_t)param;
  40. for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
  41. {
  42. critical_level = rt_enter_critical();
  43. rt_thread_suspend(rt_thread_self());
  44. rt_thread_resume(threads_group[idx][0]);
  45. rt_exit_critical_safe(critical_level);
  46. if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
  47. uassert_true(1);
  48. }
  49. rt_sem_release(&_thr_exit_sem);
  50. return;
  51. }
  52. static void scheduler_tc(void)
  53. {
  54. for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
  55. {
  56. rt_thread_t t1 =
  57. rt_thread_create(
  58. "t1",
  59. _thread_entry1,
  60. (void *)i,
  61. UTEST_THR_STACK_SIZE,
  62. UTEST_THR_PRIORITY + 1,
  63. 100);
  64. rt_thread_t t2 =
  65. rt_thread_create(
  66. "t2",
  67. _thread_entry2,
  68. (void *)i,
  69. UTEST_THR_STACK_SIZE,
  70. UTEST_THR_PRIORITY + 1,
  71. 100);
  72. threads_group[i][0] = t1;
  73. threads_group[i][1] = t2;
  74. }
  75. for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
  76. {
  77. rt_thread_startup(threads_group[i][0]);
  78. rt_thread_startup(threads_group[i][1]);
  79. }
  80. for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
  81. {
  82. rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
  83. }
  84. }
  85. static rt_err_t utest_tc_init(void)
  86. {
  87. rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
  88. return RT_EOK;
  89. }
  90. static rt_err_t utest_tc_cleanup(void)
  91. {
  92. rt_sem_detach(&_thr_exit_sem);
  93. return RT_EOK;
  94. }
  95. static void testcase(void)
  96. {
  97. UTEST_UNIT_RUN(scheduler_tc);
  98. }
  99. UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.thread", utest_tc_init, utest_tc_cleanup, 10);