smp.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #include "smp.h"
  2. struct smp_call **global_work;
  3. rt_err_t smp_call_handler(struct smp_event * event)
  4. {
  5. switch(event->event_id)
  6. {
  7. case SMP_CALL_EVENT_FUNC:
  8. event->func(event->data);
  9. break;
  10. default:
  11. rt_kprintf("error event id\n");
  12. return -RT_ERROR;
  13. break;
  14. }
  15. return RT_EOK;
  16. }
  17. void rt_smp_call_ipi_handler(int vector, void *param)
  18. {
  19. int err;
  20. struct smp_call *work,*tmp;
  21. int cur_cpu = rt_hw_cpu_id();
  22. rt_spin_lock(&global_work[cur_cpu]->lock);
  23. rt_list_for_each_entry_safe(work,tmp,&global_work[cur_cpu]->node,node)
  24. {
  25. if(work->event)
  26. {
  27. err = smp_call_handler(work->event);
  28. if(err)
  29. break;
  30. rt_list_remove(&work->node);
  31. rt_free(work);
  32. }
  33. }
  34. rt_spin_unlock(&global_work[cur_cpu]->lock);
  35. }
  36. void rt_smp_call_func_cond(int cpu_mask, smp_func func, void *data)
  37. {
  38. rt_bool_t run_cur_cpu = RT_TRUE;
  39. rt_bool_t need_call = RT_TRUE;
  40. int cur_cpu = rt_hw_cpu_id();
  41. int cpuid = 1 << cur_cpu;
  42. int tmp_id = 0;
  43. int tmp_mask = cpu_mask;
  44. if(cpuid & ~cpu_mask)
  45. run_cur_cpu = RT_FALSE;
  46. if(run_cur_cpu)
  47. func(data);
  48. if(!(cpu_mask & cpuid))
  49. need_call = RT_FALSE;
  50. else
  51. cpu_mask = cpu_mask & (~cpuid);
  52. if(need_call)
  53. {
  54. while(tmp_mask)
  55. {
  56. if((tmp_mask & 1) && tmp_id < RT_CPUS_NR)
  57. {
  58. struct smp_event *event = rt_calloc(1, sizeof(struct smp_event));
  59. event->event_id = SMP_CALL_EVENT_FUNC;
  60. event->func = func;
  61. event->data = data;
  62. event->cpu_mask = cpu_mask;
  63. struct smp_call *work = rt_calloc(1, sizeof(struct smp_call));
  64. if(work == RT_NULL)
  65. break;
  66. work->event = event;
  67. rt_spin_lock(&global_work[tmp_id]->lock);
  68. rt_list_insert_before(&global_work[tmp_id]->node, &work->node);
  69. rt_spin_unlock(&global_work[tmp_id]->lock);
  70. }
  71. tmp_id++;
  72. tmp_mask = tmp_mask >> 1;
  73. }
  74. rt_hw_ipi_send(RT_IPI_FUNC, cpu_mask);
  75. }
  76. }
  77. void smp_init(void)
  78. {
  79. struct smp_call **work_list = (struct smp_call **)rt_malloc(sizeof(struct smp_call *));
  80. for(int i = 0; i < RT_CPUS_NR; i++)
  81. {
  82. work_list[i] = rt_calloc(1, sizeof(struct smp_call));
  83. if(work_list[i] == RT_NULL)
  84. break;
  85. rt_list_init(&work_list[i]->node);
  86. rt_spin_lock_init(&work_list[i]->lock);
  87. }
  88. global_work = work_list;
  89. }
  90. void test_call(void *data)
  91. {
  92. rt_kprintf("call cpu id = %d \n",rt_hw_cpu_id());
  93. }
  94. void test()
  95. {
  96. int cpu_mask = 0xf;
  97. rt_smp_call_func_cond(cpu_mask,test_call, RT_NULL);
  98. }