Browse Source

🐞 fix: fix rt_thread_delay, assert, deadlock (#8366)

xqyjlj 1 year ago
parent
commit
48c78ba4e0
3 changed files with 61 additions and 24 deletions
  1. 15 7
      src/ipc.c
  2. 21 3
      src/scheduler_mp.c
  3. 25 14
      src/thread.c

+ 15 - 7
src/ipc.c

@@ -375,10 +375,10 @@ rt_err_t rt_sem_detach(rt_sem_t sem)
     level = rt_spin_lock_irqsave(&(sem->spinlock));
     level = rt_spin_lock_irqsave(&(sem->spinlock));
     /* wakeup all suspended threads */
     /* wakeup all suspended threads */
     _ipc_list_resume_all(&(sem->parent.suspend_thread));
     _ipc_list_resume_all(&(sem->parent.suspend_thread));
+    rt_spin_unlock_irqrestore(&(sem->spinlock), level);
 
 
     /* detach semaphore object */
     /* detach semaphore object */
     rt_object_detach(&(sem->parent.parent));
     rt_object_detach(&(sem->parent.parent));
-    rt_spin_unlock_irqrestore(&(sem->spinlock), level);
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -458,6 +458,8 @@ RTM_EXPORT(rt_sem_create);
  */
  */
 rt_err_t rt_sem_delete(rt_sem_t sem)
 rt_err_t rt_sem_delete(rt_sem_t sem)
 {
 {
+    rt_ubase_t level;
+
     /* parameter check */
     /* parameter check */
     RT_ASSERT(sem != RT_NULL);
     RT_ASSERT(sem != RT_NULL);
     RT_ASSERT(rt_object_get_type(&sem->parent.parent) == RT_Object_Class_Semaphore);
     RT_ASSERT(rt_object_get_type(&sem->parent.parent) == RT_Object_Class_Semaphore);
@@ -465,8 +467,10 @@ rt_err_t rt_sem_delete(rt_sem_t sem)
 
 
     RT_DEBUG_NOT_IN_INTERRUPT;
     RT_DEBUG_NOT_IN_INTERRUPT;
 
 
+    level = rt_spin_lock_irqsave(&(sem->spinlock));
     /* wakeup all suspended threads */
     /* wakeup all suspended threads */
     _ipc_list_resume_all(&(sem->parent.suspend_thread));
     _ipc_list_resume_all(&(sem->parent.suspend_thread));
+    rt_spin_unlock_irqrestore(&(sem->spinlock), level);
 
 
     /* delete semaphore object */
     /* delete semaphore object */
     rt_object_delete(&(sem->parent.parent));
     rt_object_delete(&(sem->parent.parent));
@@ -582,7 +586,7 @@ static rt_err_t _rt_sem_take(rt_sem_t sem, rt_int32_t timeout, int suspend_flag)
 
 
             if (thread->error != RT_EOK)
             if (thread->error != RT_EOK)
             {
             {
-                return thread->error;
+                return thread->error > 0 ? -thread->error : thread->error;
             }
             }
         }
         }
     }
     }
@@ -1340,7 +1344,11 @@ static rt_err_t _rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout, int suspend
 
 
                 if (thread->error == RT_EOK)
                 if (thread->error == RT_EOK)
                 {
                 {
-                    /* get mutex successfully */
+                    /**
+                     * get mutex successfully
+                     * Note: assert to avoid an unexpected resume
+                     */
+                    RT_ASSERT(mutex->owner == thread);
                 }
                 }
                 else
                 else
                 {
                 {
@@ -1698,10 +1706,10 @@ rt_err_t rt_event_detach(rt_event_t event)
     level = rt_spin_lock_irqsave(&(event->spinlock));
     level = rt_spin_lock_irqsave(&(event->spinlock));
     /* resume all suspended thread */
     /* resume all suspended thread */
     _ipc_list_resume_all(&(event->parent.suspend_thread));
     _ipc_list_resume_all(&(event->parent.suspend_thread));
+    rt_spin_unlock_irqrestore(&(event->spinlock), level);
 
 
     /* detach event object */
     /* detach event object */
     rt_object_detach(&(event->parent.parent));
     rt_object_detach(&(event->parent.parent));
-    rt_spin_unlock_irqrestore(&(event->spinlock), level);
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -1794,10 +1802,10 @@ rt_err_t rt_event_delete(rt_event_t event)
     rt_spin_lock(&(event->spinlock));
     rt_spin_lock(&(event->spinlock));
     /* resume all suspended thread */
     /* resume all suspended thread */
     _ipc_list_resume_all(&(event->parent.suspend_thread));
     _ipc_list_resume_all(&(event->parent.suspend_thread));
+    rt_spin_unlock(&(event->spinlock));
 
 
     /* delete event object */
     /* delete event object */
     rt_object_delete(&(event->parent.parent));
     rt_object_delete(&(event->parent.parent));
-    rt_spin_unlock(&(event->spinlock));
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -2263,10 +2271,10 @@ rt_err_t rt_mb_detach(rt_mailbox_t mb)
     _ipc_list_resume_all(&(mb->parent.suspend_thread));
     _ipc_list_resume_all(&(mb->parent.suspend_thread));
     /* also resume all mailbox private suspended thread */
     /* also resume all mailbox private suspended thread */
     _ipc_list_resume_all(&(mb->suspend_sender_thread));
     _ipc_list_resume_all(&(mb->suspend_sender_thread));
+    rt_spin_unlock_irqrestore(&(mb->spinlock), level);
 
 
     /* detach mailbox object */
     /* detach mailbox object */
     rt_object_detach(&(mb->parent.parent));
     rt_object_detach(&(mb->parent.parent));
-    rt_spin_unlock_irqrestore(&(mb->spinlock), level);
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -3043,10 +3051,10 @@ rt_err_t rt_mq_detach(rt_mq_t mq)
     _ipc_list_resume_all(&mq->parent.suspend_thread);
     _ipc_list_resume_all(&mq->parent.suspend_thread);
     /* also resume all message queue private suspended thread */
     /* also resume all message queue private suspended thread */
     _ipc_list_resume_all(&(mq->suspend_sender_thread));
     _ipc_list_resume_all(&(mq->suspend_sender_thread));
+    rt_spin_unlock_irqrestore(&(mq->spinlock), level);
 
 
     /* detach message queue object */
     /* detach message queue object */
     rt_object_detach(&(mq->parent.parent));
     rt_object_detach(&(mq->parent.parent));
-    rt_spin_unlock_irqrestore(&(mq->spinlock), level);
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }

+ 21 - 3
src/scheduler_mp.c

@@ -501,8 +501,17 @@ void rt_schedule(void)
     }
     }
 #endif /* RT_USING_SIGNALS */
 #endif /* RT_USING_SIGNALS */
 
 
+    /* whether lock scheduler */
+    if (rt_atomic_load(&(current_thread->critical_lock_nest)) != 0)
+    {
+        rt_hw_spin_unlock(&(pcpu->spinlock.lock));
+        rt_hw_spin_unlock(&_mp_scheduler_spinlock);
+        rt_hw_local_irq_enable(level);
+        goto __exit;
+    }
+
     rt_hw_spin_lock(&(current_thread->spinlock.lock));
     rt_hw_spin_lock(&(current_thread->spinlock.lock));
-    if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0) /* whether lock scheduler */
+
     {
     {
         rt_ubase_t highest_ready_priority;
         rt_ubase_t highest_ready_priority;
 
 
@@ -650,9 +659,18 @@ void rt_scheduler_do_irq_switch(void *context)
         rt_hw_local_irq_enable(level);
         rt_hw_local_irq_enable(level);
         return;
         return;
     }
     }
+
+    /* whether lock scheduler */
+    if (rt_atomic_load(&(current_thread->critical_lock_nest)) != 0)
+    {
+        rt_hw_spin_unlock(&(pcpu->spinlock.lock));
+        rt_hw_spin_unlock(&_mp_scheduler_spinlock);
+        rt_hw_local_irq_enable(level);
+        return;
+    }
+
     rt_hw_spin_lock(&(current_thread->spinlock.lock));
     rt_hw_spin_lock(&(current_thread->spinlock.lock));
-    if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0 &&
-        rt_atomic_load(&(pcpu->irq_nest)) == 0)
+    if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
     {
     {
         rt_ubase_t highest_ready_priority;
         rt_ubase_t highest_ready_priority;
 
 

+ 25 - 14
src/thread.c

@@ -34,6 +34,7 @@
  * 2022-10-15     Bernard      add nested mutex feature
  * 2022-10-15     Bernard      add nested mutex feature
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-12-10     xqyjlj       fix thread_exit/detach/delete
  * 2023-12-10     xqyjlj       fix thread_exit/detach/delete
+ *                             fix rt_thread_delay
  */
  */
 
 
 #include <rthw.h>
 #include <rthw.h>
@@ -642,7 +643,7 @@ RTM_EXPORT(rt_thread_yield);
  */
  */
 rt_err_t rt_thread_sleep(rt_tick_t tick)
 rt_err_t rt_thread_sleep(rt_tick_t tick)
 {
 {
-    rt_base_t level, level_local;
+    rt_base_t level;
     struct rt_thread *thread;
     struct rt_thread *thread;
     int err;
     int err;
 
 
@@ -660,10 +661,11 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
     RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
     RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
     /* reset thread error */
     /* reset thread error */
     thread->error = RT_EOK;
     thread->error = RT_EOK;
-    level_local = rt_hw_local_irq_disable();
+    level = rt_hw_local_irq_disable();
     /* suspend thread */
     /* suspend thread */
+    rt_enter_critical();
     err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
     err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    rt_spin_lock(&(thread->spinlock));
     /* reset the timeout of thread timer and start it */
     /* reset the timeout of thread timer and start it */
     if (err == RT_EOK)
     if (err == RT_EOK)
     {
     {
@@ -671,8 +673,10 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
         rt_timer_start(&(thread->thread_timer));
         rt_timer_start(&(thread->thread_timer));
 
 
         /* enable interrupt */
         /* enable interrupt */
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-        rt_hw_local_irq_enable(level_local);
+        rt_hw_local_irq_enable(level);
+        rt_spin_unlock(&(thread->spinlock));
+        rt_exit_critical();
+
         thread->error = -RT_EINTR;
         thread->error = -RT_EINTR;
 
 
         rt_schedule();
         rt_schedule();
@@ -683,8 +687,9 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
     }
     }
     else
     else
     {
     {
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-        rt_hw_local_irq_enable(level_local);
+        rt_hw_local_irq_enable(level);
+        rt_spin_unlock(&(thread->spinlock));
+        rt_exit_critical();
     }
     }
 
 
     return err;
     return err;
@@ -727,12 +732,12 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
 
-    /* disable interrupt */
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
-
     /* reset thread error */
     /* reset thread error */
     thread->error = RT_EOK;
     thread->error = RT_EOK;
 
 
+    /* disable interrupt */
+    level = rt_hw_local_irq_disable();
+
     cur_tick = rt_tick_get();
     cur_tick = rt_tick_get();
     if (cur_tick - *tick < inc_tick)
     if (cur_tick - *tick < inc_tick)
     {
     {
@@ -740,14 +745,20 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
 
 
         *tick += inc_tick;
         *tick += inc_tick;
         left_tick = *tick - cur_tick;
         left_tick = *tick - cur_tick;
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+
+        rt_enter_critical();
         /* suspend thread */
         /* suspend thread */
         rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
         rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-        level = rt_spin_lock_irqsave(&(thread->spinlock));
+
+        rt_spin_lock(&(thread->spinlock));
+
         /* reset the timeout of thread timer and start it */
         /* reset the timeout of thread timer and start it */
         rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
         rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
         rt_timer_start(&(thread->thread_timer));
         rt_timer_start(&(thread->thread_timer));
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+
+        rt_hw_local_irq_enable(level);
+        rt_spin_unlock(&(thread->spinlock));
+        rt_exit_critical();
 
 
         rt_schedule();
         rt_schedule();
 
 
@@ -760,7 +771,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     else
     else
     {
     {
         *tick = cur_tick;
         *tick = cur_tick;
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_hw_local_irq_enable(level);
     }
     }
 
 
     return thread->error;
     return thread->error;