smp_004_tc.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024/10/28 Shell Added smp.smoke
  9. */
  10. #include <rtdevice.h>
  11. #include <utest.h>
  12. #include <utest_assert.h>
  13. #include <smp_call.h>
  14. #define PERCPU_TEST_COUNT 10000
  15. #define NEWLINE_ON 80
  16. #define MAX_RETRIES (RT_TICK_PER_SECOND)
  17. static struct rt_semaphore _utestd_exited;
  18. static rt_thread_t _utestd[RT_CPUS_NR];
  19. static rt_atomic_t _entry_counts[RT_CPUS_NR];
  20. static struct rt_smp_call_req _callreq_data[RT_CPUS_NR][RT_CPUS_NR];
  21. static rt_ubase_t _masks_data[RT_CPUS_NR];
  22. static RT_DEFINE_SPINLOCK(_test_data_lock);
  23. static void _logging_progress(char id)
  24. {
  25. static rt_atomic_t counts;
  26. rt_ubase_t old;
  27. rt_kprintf("%c", id);
  28. old = rt_atomic_add(&counts, 1);
  29. if (old % NEWLINE_ON == 0)
  30. {
  31. rt_kputs("\n");
  32. }
  33. }
  34. static void _reentr_isr_cb(void *param)
  35. {
  36. rt_ubase_t *maskp;
  37. int oncpu;
  38. if (!rt_hw_interrupt_is_disabled())
  39. {
  40. /* SYNC.004 */
  41. uassert_true(0);
  42. }
  43. rt_spin_lock(&_test_data_lock);
  44. oncpu = rt_hw_cpu_id();
  45. maskp = (rt_ubase_t *)param;
  46. *maskp |= (1 << oncpu);
  47. rt_spin_unlock(&_test_data_lock);
  48. _logging_progress('0' + (maskp - _masks_data));
  49. }
  50. static void _test_smp_call_isr(void *param)
  51. {
  52. rt_err_t error;
  53. rt_ubase_t iter, oncpu = (rt_ubase_t)param;
  54. struct rt_smp_call_req *callreqp = _callreq_data[oncpu];
  55. if (rt_hw_cpu_id() != oncpu)
  56. {
  57. /* SYNC.004 */
  58. uassert_true(0);
  59. }
  60. if (!rt_hw_interrupt_is_disabled())
  61. {
  62. /* SYNC.004, PRIV.001 */
  63. uassert_true(0);
  64. }
  65. rt_smp_for_each_remote_cpu(iter, oncpu)
  66. {
  67. error = rt_smp_call_request(iter, SMP_CALL_NO_LOCAL, &callreqp[iter]);
  68. if (error)
  69. {
  70. /* SYNC.002 */
  71. uassert_false(error);
  72. }
  73. }
  74. }
  75. static rt_ubase_t _wait_for_update(rt_ubase_t *maskp, rt_ubase_t exp, int cpuid, rt_thread_t curthr)
  76. {
  77. rt_ubase_t level, current_mask;
  78. for (size_t i = cpuid; i < RT_CPUS_NR; i++)
  79. {
  80. rt_thread_control(curthr, RT_THREAD_CTRL_BIND_CPU, (void *)(i % RT_CPUS_NR));
  81. }
  82. for (size_t i = 0; i < MAX_RETRIES; i++)
  83. {
  84. level = rt_spin_lock_irqsave(&_test_data_lock);
  85. current_mask = *maskp;
  86. rt_spin_unlock_irqrestore(&_test_data_lock, level);
  87. if (current_mask == exp)
  88. {
  89. break;
  90. }
  91. rt_thread_delay(1);
  92. }
  93. return current_mask;
  94. }
  95. static void _utestd_entry(void *oncpu_param)
  96. {
  97. rt_thread_t curthr = rt_thread_self();
  98. rt_ubase_t oncpu = (rt_ubase_t)oncpu_param;
  99. rt_ubase_t worker_id = (oncpu + 1) % RT_CPUS_NR;
  100. int cpu_mask = 1ul << worker_id;
  101. rt_ubase_t req_cpus_mask = ~cpu_mask & RT_ALL_CPU;
  102. rt_ubase_t *mask_data = &_masks_data[worker_id];
  103. rt_ubase_t current_mask;
  104. rt_ubase_t level;
  105. for (size_t i = 0; i < PERCPU_TEST_COUNT; i++)
  106. {
  107. rt_smp_call_cpu_mask(cpu_mask, _test_smp_call_isr, (void *)worker_id, 0);
  108. current_mask = _wait_for_update(mask_data, req_cpus_mask, worker_id, curthr);
  109. if (current_mask != req_cpus_mask)
  110. {
  111. LOG_I("current mask 0x%x, last fetch 0x%x", *mask_data, current_mask);
  112. /* MP.002, TARG.001 */
  113. uassert_true(0);
  114. break;
  115. }
  116. else
  117. {
  118. rt_ubase_t iter;
  119. level = rt_spin_lock_irqsave(&_test_data_lock);
  120. *mask_data = 0;
  121. rt_spin_unlock_irqrestore(&_test_data_lock, level);
  122. rt_smp_for_each_remote_cpu(iter, worker_id)
  123. {
  124. rt_smp_request_wait_freed(&_callreq_data[worker_id][iter]);
  125. }
  126. }
  127. }
  128. rt_sem_release(&_utestd_exited);
  129. }
  130. static void _test_reentr_isr_main(void)
  131. {
  132. for (size_t i = 0; i < RT_CPUS_NR; i++)
  133. {
  134. rt_thread_startup(_utestd[i]);
  135. }
  136. for (size_t i = 0; i < RT_CPUS_NR; i++)
  137. {
  138. rt_sem_take(&_utestd_exited, RT_WAITING_FOREVER);
  139. }
  140. }
  141. static rt_err_t utest_tc_init(void)
  142. {
  143. size_t iter_x, iter_y;
  144. rt_smp_for_each_cpu(iter_x)
  145. {
  146. rt_smp_for_each_cpu(iter_y)
  147. {
  148. rt_smp_call_req_init(&_callreq_data[iter_x][iter_y],
  149. _reentr_isr_cb, &_masks_data[iter_x]);
  150. }
  151. }
  152. for (size_t i = 0; i < RT_CPUS_NR; i++)
  153. {
  154. _masks_data[i] = 0;
  155. rt_atomic_store(&_entry_counts[i], 0);
  156. _utestd[i] = rt_thread_create("utestd", _utestd_entry, (void *)i,
  157. UTEST_THR_STACK_SIZE, UTEST_THR_PRIORITY + 1,
  158. 20);
  159. rt_thread_control(_utestd[i], RT_THREAD_CTRL_BIND_CPU, (void *)i);
  160. uassert_true(_utestd[i] != RT_NULL);
  161. }
  162. rt_sem_init(&_utestd_exited, "utestd", 0, RT_IPC_FLAG_PRIO);
  163. srand(rt_tick_get());
  164. return RT_EOK;
  165. }
  166. static rt_err_t utest_tc_cleanup(void)
  167. {
  168. rt_sem_detach(&_utestd_exited);
  169. return RT_EOK;
  170. }
  171. static void _testcase(void)
  172. {
  173. UTEST_UNIT_RUN(_test_reentr_isr_main);
  174. }
  175. UTEST_TC_EXPORT(_testcase, "testcase.smp.smoke.004", utest_tc_init, utest_tc_cleanup, 10);