smp_003_tc.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024/10/28 Shell Added smp.smoke
  9. */
  10. #include <rtdevice.h>
  11. #include <utest.h>
  12. #include <utest_assert.h>
  13. #include <smp_call.h>
  14. #define PERCPU_TEST_COUNT 10000
  15. #define NEWLINE_ON 80
  16. static struct rt_semaphore _utestd_exited;
  17. static rt_thread_t _utestd[RT_CPUS_NR];
  18. static rt_atomic_t _entry_counts[RT_CPUS_NR];
  19. static void _logging_progress(void)
  20. {
  21. static rt_atomic_t counts;
  22. rt_ubase_t old;
  23. rt_kputs("#");
  24. old = rt_atomic_add(&counts, 1);
  25. if (old % NEWLINE_ON == 0)
  26. {
  27. rt_kputs("\n");
  28. }
  29. }
  30. static void _test_smp_cb(void *param)
  31. {
  32. rt_ubase_t req_cpuid = (rt_ubase_t)param;
  33. if (!rt_hw_interrupt_is_disabled())
  34. {
  35. /* SYNC.004 */
  36. uassert_true(0);
  37. }
  38. _logging_progress();
  39. rt_atomic_add(&_entry_counts[req_cpuid], 1);
  40. }
  41. static void _utestd_entry(void *oncpu_param)
  42. {
  43. rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
  44. volatile int cpu_mask;
  45. volatile int popcount = 0;
  46. rt_thread_t curthr = rt_thread_self();
  47. if (rt_hw_cpu_id() != oncpu)
  48. {
  49. /* SYNC.004 */
  50. uassert_true(0);
  51. }
  52. for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
  53. {
  54. cpu_mask = rand() % RT_ALL_CPU;
  55. rt_smp_call_cpu_mask(cpu_mask, _test_smp_cb, oncpu_param, 0);
  56. popcount += __builtin_popcount(cpu_mask);
  57. }
  58. for (size_t i = 0; i < RT_CPUS_NR; i++)
  59. {
  60. rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)i);
  61. }
  62. LOG_D("popcount %d, _entry_counts[%d] %d", popcount, oncpu, _entry_counts[oncpu]);
  63. /* MP.002 */
  64. uassert_true(popcount == rt_atomic_load(&_entry_counts[oncpu]));
  65. rt_sem_release(&_utestd_exited);
  66. }
  67. static void _async_call(void)
  68. {
  69. for (size_t i = 0; i < RT_CPUS_NR; i++)
  70. {
  71. rt_thread_startup(_utestd[i]);
  72. }
  73. for (size_t i = 0; i < RT_CPUS_NR; i++)
  74. {
  75. rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
  76. }
  77. }
  78. static rt_err_t utest_tc_init(void)
  79. {
  80. for (size_t i = 0; i < RT_CPUS_NR; i++)
  81. {
  82. rt_atomic_store(&_entry_counts[i], 0);
  83. _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
  84. UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY,
  85. 20);
  86. rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
  87. /* SYNC.001, SYNC.002, SYNC.003 */
  88. uassert_true(_utestd[i] != RT_NULL);
  89. }
  90. rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
  91. srand(rt_tick_get());
  92. return RT_EOK;
  93. }
  94. static rt_err_t utest_tc_cleanup(void)
  95. {
  96. rt_sem_detach(&_utestd_exited);
  97. return RT_EOK;
  98. }
  99. static void _testcase(void)
  100. {
  101. UTEST_UNIT_RUN(_async_call);
  102. }
  103. UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.003", utest_tc_init, utest_tc_cleanup, 10);