|
@@ -76,6 +76,44 @@ void rt_thread_resume_sethook(void (*hook)(rt_thread_t thread))
|
|
|
RT_OBJECT_HOOKLIST_DEFINE(rt_thread_inited);
|
|
|
#endif /* defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR) */
|
|
|
|
|
|
+#ifdef RT_USING_MUTEX
|
|
|
+static void _thread_detach_from_mutex(rt_thread_t thread)
|
|
|
+{
|
|
|
+ rt_list_t *node;
|
|
|
+ rt_list_t *tmp_list;
|
|
|
+ struct rt_mutex *mutex;
|
|
|
+ rt_base_t level;
|
|
|
+
|
|
|
+ level = rt_spin_lock_irqsave(&thread->spinlock);
|
|
|
+
|
|
|
+ /* check if thread is waiting on a mutex */
|
|
|
+ if ((thread->pending_object) &&
|
|
|
+ (rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
|
|
|
+ {
|
|
|
+ /* remove it from its waiting list */
|
|
|
+ struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
|
|
|
+ rt_mutex_drop_thread(mutex, thread);
|
|
|
+ thread->pending_object = RT_NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
|
|
|
+ rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
|
|
|
+ {
|
|
|
+ mutex = rt_list_entry(node, struct rt_mutex, taken_list);
|
|
|
+ LOG_D("Thread [%s] exits while holding mutex [%s].\n", thread->parent.name, mutex->parent.parent.name);
|
|
|
+ /* recursively take */
|
|
|
+ mutex->hold = 1;
|
|
|
+ rt_mutex_release(mutex);
|
|
|
+ }
|
|
|
+
|
|
|
+ rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static void _thread_detach_from_mutex(rt_thread_t thread) {}
|
|
|
+#endif
|
|
|
+
|
|
|
static void _thread_exit(void)
|
|
|
{
|
|
|
struct rt_thread *thread;
|
|
@@ -88,6 +126,8 @@ static void _thread_exit(void)
|
|
|
|
|
|
rt_thread_close(thread);
|
|
|
|
|
|
+ _thread_detach_from_mutex(thread);
|
|
|
+
|
|
|
/* insert to defunct thread list */
|
|
|
rt_thread_defunct_enqueue(thread);
|
|
|
|
|
@@ -133,41 +173,6 @@ static void _thread_timeout(void *parameter)
|
|
|
rt_sched_unlock_n_resched(slvl);
|
|
|
}
|
|
|
|
|
|
-#ifdef RT_USING_MUTEX
|
|
|
-static void _thread_detach_from_mutex(rt_thread_t thread)
|
|
|
-{
|
|
|
- rt_list_t *node;
|
|
|
- rt_list_t *tmp_list;
|
|
|
- struct rt_mutex *mutex;
|
|
|
- rt_base_t level;
|
|
|
-
|
|
|
- level = rt_spin_lock_irqsave(&thread->spinlock);
|
|
|
-
|
|
|
- /* check if thread is waiting on a mutex */
|
|
|
- if ((thread->pending_object) &&
|
|
|
- (rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
|
|
|
- {
|
|
|
- /* remove it from its waiting list */
|
|
|
- struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
|
|
|
- rt_mutex_drop_thread(mutex, thread);
|
|
|
- thread->pending_object = RT_NULL;
|
|
|
- }
|
|
|
-
|
|
|
- /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
|
|
|
- rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
|
|
|
- {
|
|
|
- mutex = rt_list_entry(node, struct rt_mutex, taken_list);
|
|
|
- rt_mutex_release(mutex);
|
|
|
- }
|
|
|
-
|
|
|
- rt_spin_unlock_irqrestore(&thread->spinlock, level);
|
|
|
-}
|
|
|
-
|
|
|
-#else
|
|
|
-
|
|
|
-static void _thread_detach_from_mutex(rt_thread_t thread) {}
|
|
|
-#endif
|
|
|
-
|
|
|
static rt_err_t _thread_init(struct rt_thread *thread,
|
|
|
const char *name,
|
|
|
void (*entry)(void *parameter),
|