smp.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024/9/12 zhujiale the first version
  9. */
  10. #include "smp.h"
  11. #define DBG_TAG "SMP"
  12. #define DBG_LVL DBG_INFO
  13. #include <rtdbg.h>
  14. struct rt_smp_call rt_smp_work[RT_CPUS_NR];
  15. rt_atomic_t rt_smp_wait;
  16. rt_err_t smp_call_handler(struct rt_smp_event *event)
  17. {
  18. switch (event->event_id)
  19. {
  20. case SMP_CALL_EVENT_FUNC:
  21. event->func(event->data);
  22. rt_atomic_add(&rt_smp_wait, 1);
  23. break;
  24. default:
  25. LOG_E("error event id\n");
  26. return -RT_ERROR;
  27. }
  28. return RT_EOK;
  29. }
  30. void rt_smp_call_ipi_handler(int vector, void *param)
  31. {
  32. int err;
  33. int cur_cpu = rt_hw_cpu_id();
  34. rt_spin_lock(&rt_smp_work[cur_cpu].lock);
  35. if (rt_smp_work[cur_cpu].event.event_id)
  36. {
  37. err = smp_call_handler(&rt_smp_work[cur_cpu].event);
  38. if (err)
  39. {
  40. LOG_E("Have no event\n");
  41. rt_memset(&rt_smp_work[cur_cpu].event, 0, sizeof(struct rt_smp_event));
  42. rt_spin_unlock(&rt_smp_work[cur_cpu].lock);
  43. }
  44. rt_memset(&rt_smp_work[cur_cpu].event, 0, sizeof(struct rt_smp_event));
  45. }
  46. rt_spin_unlock(&rt_smp_work[cur_cpu].lock);
  47. }
  48. /**
  49. * @brief call function on specified CPU ,
  50. *
  51. * @param cpu_mask cpu mask for call
  52. * @param func the function pointer
  53. * @param data the data pointer
  54. * @param flag call flag if you set SMP_CALL_WAIT_ALL
  55. * then it will wait all cpu call finish and return
  56. * else it will call function on specified CPU and return immediately
  57. * @param cond the condition function pointer,if you set it then it will call function only when cond return true
  58. */
  59. void rt_smp_call_func_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond)
  60. {
  61. RT_DEBUG_NOT_IN_INTERRUPT;
  62. struct rt_smp_event event;
  63. rt_bool_t need_call = RT_TRUE, need_wait = RT_FALSE;
  64. int cur_cpu = rt_hw_cpu_id();
  65. int cpuid = 1 << cur_cpu;
  66. int tmp_id = 0, cpu_nr = 0;
  67. int tmp_mask;
  68. if (flag == SMP_CALL_WAIT_ALL)
  69. {
  70. need_wait = RT_TRUE;
  71. rt_atomic_store(&rt_smp_wait, 0);
  72. }
  73. if (cpuid & cpu_mask)
  74. {
  75. func(data);
  76. cpu_mask = cpu_mask & (~cpuid);
  77. }
  78. if (!cpu_mask)
  79. need_call = RT_FALSE;
  80. tmp_mask = cpu_mask;
  81. if (need_call)
  82. {
  83. while (tmp_mask)
  84. {
  85. if ((tmp_mask & 1) && (tmp_id < RT_CPUS_NR))
  86. {
  87. if (cond && !cond(tmp_id, data))
  88. continue;
  89. cpu_nr++;
  90. event.event_id = SMP_CALL_EVENT_FUNC;
  91. event.func = func;
  92. event.data = data;
  93. event.cpu_mask = cpu_mask;
  94. rt_spin_lock(&rt_smp_work[tmp_id].lock);
  95. rt_smp_work[tmp_id].event = event;
  96. rt_spin_unlock(&rt_smp_work[tmp_id].lock);
  97. }
  98. tmp_id++;
  99. tmp_mask = tmp_mask >> 1;
  100. }
  101. rt_hw_ipi_send(RT_FUNC_IPI, cpu_mask);
  102. }
  103. if (need_wait)
  104. {
  105. while (rt_atomic_load(&rt_smp_wait) != cpu_nr);
  106. }
  107. }
  108. void rt_smp_call_each_cpu(rt_smp_call_func_back func, void *data, rt_uint8_t flag)
  109. {
  110. rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, RT_NULL);
  111. }
  112. void rt_smp_call_each_cpu_cond(rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func)
  113. {
  114. rt_smp_call_func_cond(RT_ALL_CPU, func, data, flag, cond_func);
  115. }
  116. void rt_smp_call_any_cpu(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag)
  117. {
  118. rt_smp_call_func_cond(cpu_mask, func, data, flag, RT_NULL);
  119. }
  120. void rt_smp_call_any_cpu_cond(int cpu_mask, rt_smp_call_func_back func, void *data, rt_uint8_t flag, rt_smp_cond cond_func)
  121. {
  122. rt_smp_call_func_cond(cpu_mask, func, data, flag, cond_func);
  123. }
  124. void rt_smp_init(void)
  125. {
  126. for (int i = 0; i < RT_CPUS_NR; i++)
  127. {
  128. rt_memset(&rt_smp_work[i], 0, sizeof(struct rt_smp_call));
  129. rt_spin_lock_init(&rt_smp_work[i].lock);
  130. }
  131. }