smp_002_tc.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024/10/28 Shell Added smp.smoke
  9. */
  10. #include <rtdevice.h>
  11. #include <utest.h>
  12. #include <utest_assert.h>
  13. #include <smp_call.h>
  14. #define PERCPU_TEST_COUNT 10000
  15. #define NEWLINE_ON 80
  16. static struct rt_semaphore _utestd_exited;
  17. static rt_thread_t _utestd[RT_CPUS_NR];
  18. static rt_atomic_t _entry_counts[RT_CPUS_NR];
  19. static void _logging_progress(void)
  20. {
  21. static rt_atomic_t counts;
  22. rt_ubase_t old;
  23. rt_kputs("#");
  24. old = rt_atomic_add(&counts, 1);
  25. if (old % NEWLINE_ON == 0)
  26. {
  27. rt_kputs("\n");
  28. }
  29. }
  30. static void _test_smp_cb(void *param)
  31. {
  32. rt_ubase_t req_cpuid = (rt_ubase_t)param;
  33. if (!rt_hw_interrupt_is_disabled())
  34. {
  35. /* SYNC.004 */
  36. uassert_true(0);
  37. }
  38. _logging_progress();
  39. rt_atomic_add(&_entry_counts[req_cpuid], 1);
  40. }
  41. static void _utestd_entry(void *oncpu_param)
  42. {
  43. rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
  44. volatile int cpu_mask;
  45. volatile int popcount = 0;
  46. rt_ubase_t tested_cpus = 0;
  47. if (rt_hw_cpu_id() != oncpu)
  48. {
  49. /* SYNC.004 */
  50. uassert_true(0);
  51. }
  52. for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
  53. {
  54. cpu_mask = rand() % RT_ALL_CPU;
  55. tested_cpus |= cpu_mask;
  56. rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, SMP_CALL_WAIT_ALL);
  57. popcount += __builtin_popcount(cpu_mask);
  58. }
  59. LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
  60. /* TARG.001 */
  61. uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
  62. /* TOP.001, TOP.002 */
  63. uassert_true(tested_cpus == RT_ALL_CPU);
  64. rt_sem_release(&_utestd_exited);
  65. }
  66. static void _blocking_mtsafe_call(void)
  67. {
  68. rt_err_t error;
  69. for (size_t i = 0; i < RT_CPUS_NR; i++)
  70. {
  71. error = rt_thread_startup(_utestd[i]);
  72. /* SYNC.001, SYNC.002, SYNC.003 */
  73. uassert_true(!error);
  74. }
  75. for (size_t i = 0; i < RT_CPUS_NR; i++)
  76. {
  77. rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
  78. }
  79. }
  80. static rt_err_t utest_tc_init(void)
  81. {
  82. for (size_t i = 0; i < RT_CPUS_NR; i++)
  83. {
  84. rt_atomic_store(&_entry_counts[i], 0);
  85. _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
  86. UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
  87. 20);
  88. rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
  89. /* SYNC.001, SYNC.002, SYNC.003 */
  90. uassert_true(_utestd[i] != RT_NULL);
  91. }
  92. rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
  93. srand(rt_tick_get());
  94. return RT_EOK;
  95. }
  96. static rt_err_t utest_tc_cleanup(void)
  97. {
  98. rt_sem_detach(&_utestd_exited);
  99. return RT_EOK;
  100. }
  101. static void _testcase(void)
  102. {
  103. UTEST_UNIT_RUN(_blocking_mtsafe_call);
  104. }
  105. UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.002", utest_tc_init, utest_tc_cleanup, 10);