|
@@ -30,6 +30,7 @@
|
|
|
* 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
|
|
|
* 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
|
|
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
|
|
+ * 2023-12-10 xqyjlj use rt_hw_spinlock
|
|
|
*/
|
|
|
|
|
|
#include <rtthread.h>
|
|
@@ -40,7 +41,7 @@
|
|
|
#include <rtdbg.h>
|
|
|
|
|
|
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
|
|
|
-static RT_DEFINE_SPINLOCK(_spinlock);
|
|
|
+static rt_hw_spinlock_t _mp_scheduler_spinlock;
|
|
|
rt_uint32_t rt_thread_ready_priority_group;
|
|
|
#if RT_THREAD_PRIORITY_MAX > 32
|
|
|
/* Maximum priority level, 256 */
|
|
@@ -122,7 +123,7 @@ static void _scheduler_stack_check(struct rt_thread *thread)
|
|
|
rt_kprintf("thread:%s stack overflow\n", thread->parent.name);
|
|
|
|
|
|
level = rt_hw_local_irq_disable();
|
|
|
- rt_spin_lock(&_spinlock);
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
while (level);
|
|
|
}
|
|
|
#endif
|
|
@@ -200,6 +201,8 @@ void rt_system_scheduler_init(void)
|
|
|
LOG_D("start scheduler: max priority 0x%02x",
|
|
|
RT_THREAD_PRIORITY_MAX);
|
|
|
|
|
|
+ rt_hw_spin_lock_init(&_mp_scheduler_spinlock);
|
|
|
+
|
|
|
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
|
|
|
{
|
|
|
rt_list_init(&rt_thread_priority_table[offset]);
|
|
@@ -266,14 +269,14 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
/* disable interrupt */
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_lock(&(thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(thread->spinlock.lock));
|
|
|
}
|
|
|
|
|
|
if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
|
|
|
{
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(thread->spinlock.lock));
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
@@ -284,7 +287,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(thread->spinlock.lock));
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
@@ -317,7 +320,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
}
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(thread->spinlock.lock));
|
|
|
}
|
|
|
|
|
|
cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
|
|
@@ -329,7 +332,7 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_lock(&(pcpu->spinlock));
|
|
|
+ rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
|
|
}
|
|
|
#if RT_THREAD_PRIORITY_MAX > 32
|
|
|
pcpu->ready_table[thread->number] |= thread->high_mask;
|
|
@@ -351,8 +354,8 @@ static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(pcpu->spinlock));
|
|
|
- rt_spin_unlock(&(thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&(thread->spinlock.lock));
|
|
|
}
|
|
|
|
|
|
if (cpu_id != bind_cpu)
|
|
@@ -397,7 +400,7 @@ static void _rt_schedule_remove_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_lock(&(pcpu->spinlock));
|
|
|
+ rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
|
|
}
|
|
|
if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
|
|
|
{
|
|
@@ -413,7 +416,7 @@ static void _rt_schedule_remove_thread(struct rt_thread *thread, rt_bool_t is_lo
|
|
|
}
|
|
|
if(is_lock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(pcpu->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -428,17 +431,17 @@ void rt_system_scheduler_start(void)
|
|
|
rt_ubase_t highest_ready_priority;
|
|
|
|
|
|
rt_hw_local_irq_disable();
|
|
|
- rt_spin_lock(&_spinlock);
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
|
|
|
to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
|
|
|
- rt_spin_lock(&to_thread->spinlock);
|
|
|
+ rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
|
|
to_thread->oncpu = rt_hw_cpu_id();
|
|
|
|
|
|
_rt_schedule_remove_thread(to_thread, RT_TRUE);
|
|
|
to_thread->stat = RT_THREAD_RUNNING;
|
|
|
|
|
|
- rt_spin_unlock(&to_thread->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
|
|
|
rt_hw_spin_unlock(&_cpus_lock);
|
|
|
|
|
@@ -465,19 +468,19 @@ void rt_schedule(void)
|
|
|
/* disable interrupt */
|
|
|
level = rt_hw_local_irq_disable();
|
|
|
|
|
|
- rt_spin_lock(&_spinlock);
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
|
|
|
cpu_id = rt_hw_cpu_id();
|
|
|
pcpu = rt_cpu_index(cpu_id);
|
|
|
- rt_spin_lock(&pcpu->spinlock);
|
|
|
+ rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
|
|
current_thread = pcpu->current_thread;
|
|
|
|
|
|
/* whether do switch in interrupt */
|
|
|
if (rt_atomic_load(&(pcpu->irq_nest)))
|
|
|
{
|
|
|
pcpu->irq_switch_flag = 1;
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
goto __exit;
|
|
|
}
|
|
@@ -498,7 +501,7 @@ void rt_schedule(void)
|
|
|
}
|
|
|
#endif /* RT_USING_SIGNALS */
|
|
|
|
|
|
- rt_spin_lock(&(current_thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(current_thread->spinlock.lock));
|
|
|
if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0) /* whether lock scheduler */
|
|
|
{
|
|
|
rt_ubase_t highest_ready_priority;
|
|
@@ -533,7 +536,7 @@ void rt_schedule(void)
|
|
|
|
|
|
if (to_thread != current_thread)
|
|
|
{
|
|
|
- rt_spin_lock(&(to_thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
|
|
}
|
|
|
to_thread->oncpu = cpu_id;
|
|
|
if (to_thread != current_thread)
|
|
@@ -560,9 +563,9 @@ void rt_schedule(void)
|
|
|
|
|
|
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
|
|
|
|
|
|
- rt_spin_unlock(&(to_thread->spinlock));
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
|
|
|
need_unlock = RT_FALSE;
|
|
|
rt_hw_context_switch((rt_ubase_t)¤t_thread->sp,
|
|
@@ -573,29 +576,29 @@ void rt_schedule(void)
|
|
|
|
|
|
if(need_unlock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(current_thread->spinlock));
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(current_thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
}
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
|
|
|
#ifdef RT_USING_SIGNALS
|
|
|
/* check stat of thread for signal */
|
|
|
- rt_spin_lock(&(current_thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(current_thread->spinlock));
|
|
|
if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
|
|
|
{
|
|
|
extern void rt_thread_handle_sig(rt_bool_t clean_state);
|
|
|
|
|
|
current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
|
|
|
|
|
|
- rt_spin_unlock(&(current_thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(current_thread->spinlock));
|
|
|
|
|
|
/* check signal status */
|
|
|
rt_thread_handle_sig(RT_TRUE);
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
- rt_spin_unlock(&(current_thread->spinlock));
|
|
|
+ rt_hw_spin_unlock(&(current_thread->spinlock));
|
|
|
}
|
|
|
#endif /* RT_USING_SIGNALS */
|
|
|
|
|
@@ -618,10 +621,10 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
rt_bool_t need_unlock = RT_TRUE;
|
|
|
|
|
|
level = rt_hw_local_irq_disable();
|
|
|
- rt_spin_lock(&_spinlock);
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
cpu_id = rt_hw_cpu_id();
|
|
|
pcpu = rt_cpu_index(cpu_id);
|
|
|
- rt_spin_lock(&pcpu->spinlock);
|
|
|
+ rt_hw_spin_lock(&(pcpu->spinlock.lock));
|
|
|
current_thread = pcpu->current_thread;
|
|
|
|
|
|
#ifdef RT_USING_SIGNALS
|
|
@@ -642,12 +645,12 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
|
|
|
if (pcpu->irq_switch_flag == 0)
|
|
|
{
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
rt_hw_local_irq_enable(level);
|
|
|
return;
|
|
|
}
|
|
|
- rt_spin_lock(&(current_thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(current_thread->spinlock.lock));
|
|
|
if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0 &&
|
|
|
rt_atomic_load(&(pcpu->irq_nest)) == 0)
|
|
|
{
|
|
@@ -686,7 +689,7 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
|
|
|
if (to_thread != current_thread)
|
|
|
{
|
|
|
- rt_spin_lock(&(to_thread->spinlock));
|
|
|
+ rt_hw_spin_lock(&(to_thread->spinlock.lock));
|
|
|
}
|
|
|
to_thread->oncpu = cpu_id;
|
|
|
if (to_thread != current_thread)
|
|
@@ -707,9 +710,9 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
|
|
|
RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
|
|
|
|
|
|
- rt_spin_unlock(&(to_thread->spinlock));
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(to_thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
|
|
|
need_unlock = RT_FALSE;
|
|
|
rt_hw_context_switch_interrupt(context, (rt_ubase_t)¤t_thread->sp,
|
|
@@ -720,9 +723,9 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
|
|
|
if(need_unlock)
|
|
|
{
|
|
|
- rt_spin_unlock(&(current_thread->spinlock));
|
|
|
- rt_spin_unlock(&pcpu->spinlock);
|
|
|
- rt_spin_unlock(&_spinlock);
|
|
|
+ rt_hw_spin_unlock(&(current_thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&(pcpu->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
}
|
|
|
|
|
|
rt_hw_local_irq_enable(level);
|
|
@@ -739,9 +742,11 @@ void rt_scheduler_do_irq_switch(void *context)
|
|
|
void rt_schedule_insert_thread(struct rt_thread *thread)
|
|
|
{
|
|
|
rt_base_t level;
|
|
|
- level = rt_spin_lock_irqsave(&_spinlock);
|
|
|
+ level = rt_hw_local_irq_disable();
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
_rt_schedule_insert_thread(thread, RT_TRUE);
|
|
|
- rt_spin_unlock_irqrestore(&_spinlock, level);
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
+ rt_hw_local_irq_enable(level);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -754,11 +759,13 @@ void rt_schedule_insert_thread(struct rt_thread *thread)
|
|
|
void rt_schedule_remove_thread(struct rt_thread *thread)
|
|
|
{
|
|
|
rt_base_t level;
|
|
|
- level = rt_spin_lock_irqsave(&_spinlock);
|
|
|
- rt_spin_lock(&thread->spinlock);
|
|
|
+ level = rt_hw_local_irq_disable();
|
|
|
+ rt_hw_spin_lock(&_mp_scheduler_spinlock);
|
|
|
+ rt_hw_spin_lock(&(thread->spinlock.lock));
|
|
|
_rt_schedule_remove_thread(thread, RT_TRUE);
|
|
|
- rt_spin_unlock(&thread->spinlock);
|
|
|
- rt_spin_unlock_irqrestore(&_spinlock, level);
|
|
|
+ rt_hw_spin_unlock(&(thread->spinlock.lock));
|
|
|
+ rt_hw_spin_unlock(&_mp_scheduler_spinlock);
|
|
|
+ rt_hw_local_irq_enable(level);
|
|
|
}
|
|
|
|
|
|
/**
|