Browse Source

🎯 Sync smart & scheduler codes (#8537)

Signed-off-by: Shell <smokewood@qq.com>
Co-authored-by: xqyjlj <xqyjlj@126.com>
Shell 1 year ago
parent
commit
71560bafb5
71 changed files with 5832 additions and 2144 deletions
  1. 2 2
      bsp/qemu-vexpress-a9/applications/main.c
  2. 1 1
      bsp/qemu-vexpress-a9/drivers/secondary_cpu.c
  3. 1 1
      components/dfs/dfs_v2/filesystems/romfs/dfs_romfs.c
  4. 4 1
      components/dfs/dfs_v2/src/dfs_pcache.c
  5. 4 8
      components/drivers/audio/audio_pipe.c
  6. 10 6
      components/drivers/include/ipc/completion.h
  7. 1 1
      components/drivers/ipc/SConscript
  8. 64 57
      components/drivers/ipc/completion.c
  9. 57 98
      components/drivers/ipc/dataqueue.c
  10. 9 12
      components/drivers/ipc/workqueue.c
  11. 20 27
      components/finsh/cmd.c
  12. 12 11
      components/libc/compilers/common/include/sys/time.h
  13. 1 1
      components/libc/posix/libdl/dlmodule.c
  14. 1 1
      components/libc/posix/pthreads/pthread_cond.c
  15. 16 0
      components/lwp/libc_musl.h
  16. 3 46
      components/lwp/lwp.h
  17. 721 139
      components/lwp/lwp_futex.c
  18. 54 0
      components/lwp/lwp_futex_internal.h
  19. 65 0
      components/lwp/lwp_futex_table.c
  20. 6 6
      components/lwp/lwp_internal.c
  21. 4 4
      components/lwp/lwp_internal.h
  22. 40 41
      components/lwp/lwp_ipc.c
  23. 71 0
      components/lwp/lwp_itimer.c
  24. 39 112
      components/lwp/lwp_pid.c
  25. 22 5
      components/lwp/lwp_signal.c
  26. 9 0
      components/lwp/lwp_signal.h
  27. 34 40
      components/lwp/lwp_syscall.c
  28. 2 12
      components/mm/mm_aspace.h
  29. 9 0
      components/utilities/libadt/uthash/SConscript
  30. 16 0
      components/utilities/libadt/uthash/dict.h
  31. 57 0
      components/utilities/libadt/uthash/rt_uthash.h
  32. 1140 0
      components/utilities/libadt/uthash/uthash.h
  33. 3 5
      components/vbus/prio_queue.c
  34. 3 7
      components/vbus/vbus.c
  35. 1 3
      components/vbus/watermark_queue.c
  36. 2 4
      components/vbus/watermark_queue.h
  37. 4 0
      examples/utest/testcases/kernel/Kconfig
  38. 7 0
      examples/utest/testcases/kernel/SConscript
  39. 5 4
      examples/utest/testcases/kernel/mutex_tc.c
  40. 107 0
      examples/utest/testcases/kernel/sched_mtx_tc.c
  41. 196 0
      examples/utest/testcases/kernel/sched_sem_tc.c
  42. 121 0
      examples/utest/testcases/kernel/sched_thread_tc.c
  43. 232 0
      examples/utest/testcases/kernel/sched_timed_mtx_tc.c
  44. 149 0
      examples/utest/testcases/kernel/sched_timed_sem_tc.c
  45. 12 4
      examples/utest/testcases/kernel/signal_tc.c
  46. 29 16
      examples/utest/testcases/kernel/thread_tc.c
  47. 100 0
      include/rtcompiler.h
  48. 57 241
      include/rtdef.h
  49. 172 0
      include/rtsched.h
  50. 26 7
      include/rtthread.h
  51. 223 0
      include/rttypes.h
  52. 1 0
      libcpu/aarch64/common/cpuport.h
  53. 1 1
      libcpu/aarch64/cortex-a/entry_point.S
  54. 7 0
      libcpu/arm/cortex-a/cpuport.c
  55. 2 0
      libcpu/arm/cortex-a/cpuport.h
  56. 5 0
      libcpu/risc-v/virt64/cpuport.c
  57. 11 1
      src/Kconfig
  58. 2 3
      src/SConscript
  59. 2 17
      src/clock.c
  60. 29 48
      src/cpu.c
  61. 7 10
      src/idle.c
  62. 364 235
      src/ipc.c
  63. 34 14
      src/kservice.c
  64. 4 46
      src/mempool.c
  65. 1 1
      src/object.c
  66. 217 0
      src/scheduler_comm.c
  67. 657 384
      src/scheduler_mp.c
  68. 152 37
      src/scheduler_up.c
  69. 73 51
      src/signal.c
  70. 231 314
      src/thread.c
  71. 88 59
      src/timer.c

+ 2 - 2
bsp/qemu-vexpress-a9/applications/main.c

@@ -10,11 +10,11 @@
 
 #include <stdint.h>
 #include <stdio.h>
-#include <stdlib.h>
+#include <rtthread.h>
 
 int main(void)
 {
-    printf("Hello RT-Thread!\n");
+    rt_kprintf("Hello RT-Thread!\n");
 
     return 0;
 }

+ 1 - 1
bsp/qemu-vexpress-a9/drivers/secondary_cpu.c

@@ -47,7 +47,7 @@ void rt_hw_secondary_cpu_up(void)
     *plat_boot_reg-- = (void *)(size_t)-1;
     *plat_boot_reg = (void *)entry;
     rt_hw_dsb();
-    rt_hw_ipi_send(0, 1 << 1);
+    rt_hw_ipi_send(0, RT_CPU_MASK ^ (1 << rt_hw_cpu_id()));
 }
 
 /* Interface */

+ 1 - 1
components/dfs/dfs_v2/filesystems/romfs/dfs_romfs.c

@@ -352,7 +352,7 @@ static int dfs_romfs_getdents(struct dfs_file *file, struct dirent *dirp, uint32
 
         d->d_namlen = rt_strlen(name);
         d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
-        rt_strncpy(d->d_name, name, DFS_PATH_MAX);
+        rt_strncpy(d->d_name, name, DIRENT_NAME_MAX);
 
         /* move to next position */
         ++ file->fpos;

+ 4 - 1
components/dfs/dfs_v2/src/dfs_pcache.c

@@ -822,7 +822,10 @@ static int dfs_page_insert(struct dfs_page *page)
     rt_list_insert_before(&aspace->list_inactive, &page->space_node);
     aspace->pages_count ++;
 
-    RT_ASSERT(_dfs_page_insert(aspace, page) == 0);
+    if (_dfs_page_insert(aspace, page))
+    {
+        RT_ASSERT(0);
+    }
 
     if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
     {

+ 4 - 8
components/drivers/audio/audio_pipe.c

@@ -21,9 +21,7 @@ static void _rt_pipe_resume_writer(struct rt_audio_pipe *pipe)
         RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_WR);
 
         /* get suspended thread */
-        thread = rt_list_entry(pipe->suspended_write_list.next,
-                               struct rt_thread,
-                               tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_write_list.next);
 
         /* resume the write thread */
         rt_thread_resume(thread);
@@ -73,7 +71,7 @@ static rt_ssize_t rt_pipe_read(rt_device_t dev,
             rt_thread_suspend(thread);
             /* waiting on suspended read list */
             rt_list_insert_before(&(pipe->suspended_read_list),
-                                  &(thread->tlist));
+                                  &RT_THREAD_LIST_NODE(thread));
             rt_hw_interrupt_enable(level);
 
             rt_schedule();
@@ -103,9 +101,7 @@ static void _rt_pipe_resume_reader(struct rt_audio_pipe *pipe)
         RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_RD);
 
         /* get suspended thread */
-        thread = rt_list_entry(pipe->suspended_read_list.next,
-                               struct rt_thread,
-                               tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(pipe->suspended_read_list.next);
 
         /* resume the read thread */
         rt_thread_resume(thread);
@@ -161,7 +157,7 @@ static rt_ssize_t rt_pipe_write(rt_device_t dev,
             rt_thread_suspend(thread);
             /* waiting on suspended read list */
             rt_list_insert_before(&(pipe->suspended_write_list),
-                                  &(thread->tlist));
+                                  &RT_THREAD_LIST_NODE(thread));
             rt_hw_interrupt_enable(level);
 
             rt_schedule();

+ 10 - 6
components/drivers/include/ipc/completion.h

@@ -13,18 +13,22 @@
 #include <rtconfig.h>
 
 /**
- * Completion
+ * Completion - A tiny IPC implementation for resource-constrained scenarios
+ *
+ * It's an IPC using one CPU word with the encoding:
+ *
+ * BIT      | MAX-1 ----------------- 1 |       0        |
+ * CONTENT  |   suspended_thread & ~1   | completed flag |
  */
 
 struct rt_completion
 {
-    rt_uint32_t flag;
-
-    /* suspended list */
-    rt_list_t suspended_list;
-    struct rt_spinlock spinlock;
+    /* suspended thread, and completed flag */
+    rt_base_t susp_thread_n_flag;
 };
 
+#define RT_COMPLETION_INIT(comp) {0}
+
 void rt_completion_init(struct rt_completion *completion);
 rt_err_t rt_completion_wait(struct rt_completion *completion,
                             rt_int32_t            timeout);

+ 1 - 1
components/drivers/ipc/SConscript

@@ -8,6 +8,6 @@ if not GetDepend('RT_USING_HEAP'):
     SrcRemove(src, 'dataqueue.c')
     SrcRemove(src, 'pipe.c')
 
-group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_DEVICE_IPC'], CPPPATH = CPPPATH)
+group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_DEVICE_IPC'], CPPPATH = CPPPATH, LOCAL_CPPDEFINES=['__RT_IPC_SOURCE__'])
 
 Return('group')

+ 64 - 57
components/drivers/ipc/completion.c

@@ -8,13 +8,24 @@
  * 2012-09-30     Bernard      first version.
  * 2021-08-18     chenyingchun add comments
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
+ * 2024-01-25     Shell        reduce resource usage in completion for better synchronization
+ *                             and smaller footprint.
  */
 
+#define DBG_TAG           "drivers.ipc"
+#define DBG_LVL           DBG_INFO
+#include <rtdbg.h>
+
 #include <rthw.h>
 #include <rtdevice.h>
 
 #define RT_COMPLETED    1
 #define RT_UNCOMPLETED  0
+#define RT_COMPLETION_FLAG(comp) ((comp)->susp_thread_n_flag & 1)
+#define RT_COMPLETION_THREAD(comp) ((rt_thread_t)((comp)->susp_thread_n_flag & ~1))
+#define RT_COMPLETION_NEW_STAT(thread, flag) (((flag) & 1) | (((rt_base_t)thread) & ~1))
+
+static struct rt_spinlock _completion_lock = RT_SPINLOCK_INIT;
 
 /**
  * @brief This function will initialize a completion object.
@@ -23,14 +34,9 @@
  */
 void rt_completion_init(struct rt_completion *completion)
 {
-    rt_base_t level;
     RT_ASSERT(completion != RT_NULL);
 
-    rt_spin_lock_init(&(completion->spinlock));
-    level = rt_spin_lock_irqsave(&(completion->spinlock));
-    completion->flag = RT_UNCOMPLETED;
-    rt_list_init(&completion->suspended_list);
-    rt_spin_unlock_irqrestore(&(completion->spinlock), level);
+    completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_UNCOMPLETED);
 }
 RTM_EXPORT(rt_completion_init);
 
@@ -64,11 +70,11 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
     result = RT_EOK;
     thread = rt_thread_self();
 
-    level = rt_spin_lock_irqsave(&(completion->spinlock));
-    if (completion->flag != RT_COMPLETED)
+    level = rt_spin_lock_irqsave(&_completion_lock);
+    if (RT_COMPLETION_FLAG(completion) != RT_COMPLETED)
     {
         /* only one thread can suspend on complete */
-        RT_ASSERT(rt_list_isempty(&(completion->suspended_list)));
+        RT_ASSERT(RT_COMPLETION_THREAD(completion) == RT_NULL);
 
         if (timeout == 0)
         {
@@ -81,40 +87,43 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
             thread->error = RT_EOK;
 
             /* suspend thread */
-            rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-            /* add to suspended list */
-            rt_list_insert_before(&(completion->suspended_list),
-                                  &(thread->tlist));
-
-            /* current context checking */
-            RT_DEBUG_NOT_IN_INTERRUPT;
-
-            /* start timer */
-            if (timeout > 0)
+            result = rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
+            if (result == RT_EOK)
             {
-                /* reset the timeout of thread timer and start it */
-                rt_timer_control(&(thread->thread_timer),
-                                 RT_TIMER_CTRL_SET_TIME,
-                                 &timeout);
-                rt_timer_start(&(thread->thread_timer));
+                /* add to suspended thread */
+                completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(thread, RT_UNCOMPLETED);
+
+                /* current context checking */
+                RT_DEBUG_NOT_IN_INTERRUPT;
+
+                /* start timer */
+                if (timeout > 0)
+                {
+                    /* reset the timeout of thread timer and start it */
+                    rt_timer_control(&(thread->thread_timer),
+                                     RT_TIMER_CTRL_SET_TIME,
+                                     &timeout);
+                    rt_timer_start(&(thread->thread_timer));
+                }
+                /* enable interrupt */
+                rt_spin_unlock_irqrestore(&_completion_lock, level);
+
+                /* do schedule */
+                rt_schedule();
+
+                /* thread is waked up */
+                result = thread->error;
+
+                level = rt_spin_lock_irqsave(&_completion_lock);
             }
-            /* enable interrupt */
-            rt_spin_unlock_irqrestore(&(completion->spinlock), level);
-
-            /* do schedule */
-            rt_schedule();
-
-            /* thread is waked up */
-            result = thread->error;
-
-            level = rt_spin_lock_irqsave(&(completion->spinlock));
         }
     }
-    /* clean completed flag */
-    completion->flag = RT_UNCOMPLETED;
+
+    /* clean completed flag & remove susp_thread on the case of waking by timeout */
+    completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_UNCOMPLETED);
 
 __exit:
-    rt_spin_unlock_irqrestore(&(completion->spinlock), level);
+    rt_spin_unlock_irqrestore(&_completion_lock, level);
 
     return result;
 }
@@ -128,35 +137,33 @@ RTM_EXPORT(rt_completion_wait);
 void rt_completion_done(struct rt_completion *completion)
 {
     rt_base_t level;
+    rt_err_t error;
+    rt_thread_t suspend_thread;
     RT_ASSERT(completion != RT_NULL);
 
-    if (completion->flag == RT_COMPLETED)
+    level = rt_spin_lock_irqsave(&_completion_lock);
+    if (RT_COMPLETION_FLAG(completion) == RT_COMPLETED)
+    {
+        rt_spin_unlock_irqrestore(&_completion_lock, level);
         return;
+    }
 
-    level = rt_spin_lock_irqsave(&(completion->spinlock));
-    completion->flag = RT_COMPLETED;
-
-    if (!rt_list_isempty(&(completion->suspended_list)))
+    suspend_thread = RT_COMPLETION_THREAD(completion);
+    if (suspend_thread)
     {
         /* there is one thread in suspended list */
-        struct rt_thread *thread;
-
-        /* get thread entry */
-        thread = rt_list_entry(completion->suspended_list.next,
-                               struct rt_thread,
-                               tlist);
 
         /* resume it */
-        rt_thread_resume(thread);
-        rt_spin_unlock_irqrestore(&(completion->spinlock), level);
-
-        /* perform a schedule */
-        rt_schedule();
-    }
-    else
-    {
-        rt_spin_unlock_irqrestore(&(completion->spinlock), level);
+        error = rt_thread_resume(suspend_thread);
+        if (error)
+        {
+            LOG_D("%s: failed to resume thread", __func__);
+        }
     }
+
+    completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_COMPLETED);
+
+    rt_spin_unlock_irqrestore(&_completion_lock, level);
 }
 RTM_EXPORT(rt_completion_done);
 

+ 57 - 98
components/drivers/ipc/dataqueue.c

@@ -8,6 +8,7 @@
  * 2012-09-30     Bernard      first version.
  * 2016-10-31     armink       fix some resume push and pop thread bugs
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
+ * 2024-01-25     Shell        porting to susp_list API
  */
 
 #include <rthw.h>
@@ -121,27 +122,32 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
         thread->error = RT_EOK;
 
         /* suspend thread on the push list */
-        rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-        rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
-        /* start timer */
-        if (timeout > 0)
+        result = rt_thread_suspend_to_list(thread, &queue->suspended_push_list,
+                                           RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
+        if (result == RT_EOK)
         {
-            /* reset the timeout of thread timer and start it */
-            rt_timer_control(&(thread->thread_timer),
-                             RT_TIMER_CTRL_SET_TIME,
-                             &timeout);
-            rt_timer_start(&(thread->thread_timer));
-        }
+            /* start timer */
+            if (timeout > 0)
+            {
+                /* reset the timeout of thread timer and start it */
+                rt_timer_control(&(thread->thread_timer),
+                                RT_TIMER_CTRL_SET_TIME,
+                                &timeout);
+                rt_timer_start(&(thread->thread_timer));
+            }
+
+            /* enable interrupt */
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
-        /* enable interrupt */
-        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
+            /* do schedule */
+            rt_schedule();
 
-        /* do schedule */
-        rt_schedule();
+            /* thread is waked up */
+            level = rt_spin_lock_irqsave(&(queue->spinlock));
 
-        /* thread is waked up */
-        result = thread->error;
-        level = rt_spin_lock_irqsave(&(queue->spinlock));
+            /* error may be modified by waker, so take the lock before accessing it */
+            result = thread->error;
+        }
         if (result != RT_EOK) goto __exit;
     }
 
@@ -159,15 +165,10 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
     }
 
     /* there is at least one thread in suspended list */
-    if (!rt_list_isempty(&(queue->suspended_pop_list)))
+    if (rt_susp_list_dequeue(&queue->suspended_push_list,
+                             RT_THREAD_RESUME_RES_THR_ERR))
     {
-        /* get thread entry */
-        thread = rt_list_entry(queue->suspended_pop_list.next,
-                               struct rt_thread,
-                               tlist);
-
-        /* resume it */
-        rt_thread_resume(thread);
+        /* unlock and perform a schedule */
         rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
         /* perform a schedule */
@@ -239,29 +240,32 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
         thread->error = RT_EOK;
 
         /* suspend thread on the pop list */
-        rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-        rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
-        /* start timer */
-        if (timeout > 0)
+        result = rt_thread_suspend_to_list(thread, &queue->suspended_pop_list,
+                                           RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
+        if (result == RT_EOK)
         {
-            /* reset the timeout of thread timer and start it */
-            rt_timer_control(&(thread->thread_timer),
-                             RT_TIMER_CTRL_SET_TIME,
-                             &timeout);
-            rt_timer_start(&(thread->thread_timer));
-        }
-
-        /* enable interrupt */
-        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
+            /* start timer */
+            if (timeout > 0)
+            {
+                /* reset the timeout of thread timer and start it */
+                rt_timer_control(&(thread->thread_timer),
+                                RT_TIMER_CTRL_SET_TIME,
+                                &timeout);
+                rt_timer_start(&(thread->thread_timer));
+            }
+
+            /* enable interrupt */
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
-        /* do schedule */
-        rt_schedule();
+            /* do schedule */
+            rt_schedule();
 
-        /* thread is waked up */
-        result = thread->error;
-        level  = rt_spin_lock_irqsave(&(queue->spinlock));
-        if (result != RT_EOK)
-            goto __exit;
+            /* thread is waked up */
+            level  = rt_spin_lock_irqsave(&(queue->spinlock));
+            result = thread->error;
+            if (result != RT_EOK)
+                goto __exit;
+        }
     }
 
     *data_ptr = queue->queue[queue->get_index].data_ptr;
@@ -280,15 +284,10 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
     if (rt_data_queue_len(queue) <= queue->lwm)
     {
         /* there is at least one thread in suspended list */
-        if (!rt_list_isempty(&(queue->suspended_push_list)))
+        if (rt_susp_list_dequeue(&queue->suspended_push_list,
+                                       RT_THREAD_RESUME_RES_THR_ERR))
         {
-            /* get thread entry */
-            thread = rt_list_entry(queue->suspended_push_list.next,
-                                   struct rt_thread,
-                                   tlist);
-
-            /* resume it */
-            rt_thread_resume(thread);
+            /* unlock and perform a schedule */
             rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
             /* perform a schedule */
@@ -364,7 +363,6 @@ RTM_EXPORT(rt_data_queue_peek);
 void rt_data_queue_reset(struct rt_data_queue *queue)
 {
     rt_base_t level;
-    struct rt_thread *thread;
 
     RT_ASSERT(queue != RT_NULL);
     RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
@@ -382,52 +380,13 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
     /* wakeup all suspend threads */
 
     /* resume on pop list */
-    while (!rt_list_isempty(&(queue->suspended_pop_list)))
-    {
-        /* disable interrupt */
-        level = rt_spin_lock_irqsave(&(queue->spinlock));
-
-        /* get next suspend thread */
-        thread = rt_list_entry(queue->suspended_pop_list.next,
-                               struct rt_thread,
-                               tlist);
-        /* set error code to -RT_ERROR */
-        thread->error = -RT_ERROR;
-
-        /*
-         * resume thread
-         * In rt_thread_resume function, it will remove current thread from
-         * suspend list
-         */
-        rt_thread_resume(thread);
-
-        /* enable interrupt */
-        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
-    }
+    rt_susp_list_resume_all_irq(&queue->suspended_pop_list, RT_ERROR,
+                                &(queue->spinlock));
 
     /* resume on push list */
-    while (!rt_list_isempty(&(queue->suspended_push_list)))
-    {
-        /* disable interrupt */
-        level = rt_spin_lock_irqsave(&(queue->spinlock));
-
-        /* get next suspend thread */
-        thread = rt_list_entry(queue->suspended_push_list.next,
-                               struct rt_thread,
-                               tlist);
-        /* set error code to -RT_ERROR */
-        thread->error = -RT_ERROR;
-
-        /*
-         * resume thread
-         * In rt_thread_resume function, it will remove current thread from
-         * suspend list
-         */
-        rt_thread_resume(thread);
-
-        /* enable interrupt */
-        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
-    }
+    rt_susp_list_resume_all_irq(&queue->suspended_push_list, RT_ERROR,
+                                &(queue->spinlock));
+
     rt_exit_critical();
 
     rt_schedule();

+ 9 - 12
components/drivers/ipc/workqueue.c

@@ -64,7 +64,10 @@ static void _workqueue_thread_entry(void *parameter)
         {
             /* no software timer exist, suspend self. */
             rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
+
+            /* release lock after suspend so we will not lost any wakeups */
             rt_spin_unlock_irqrestore(&(queue->spinlock), level);
+
             rt_schedule();
             continue;
         }
@@ -105,13 +108,11 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
         work->workqueue = queue;
 
         /* whether the workqueue is doing work */
-        if (queue->work_current == RT_NULL &&
-                ((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
+        if (queue->work_current == RT_NULL)
         {
-            /* resume work thread */
+            /* resume work thread, and do a re-schedule if succeed */
             rt_thread_resume(queue->work_thread);
             rt_spin_unlock_irqrestore(&(queue->spinlock), level);
-            rt_schedule();
         }
         else
         {
@@ -187,13 +188,11 @@ static void _delayed_work_timeout_handler(void *parameter)
         work->flags |= RT_WORK_STATE_PENDING;
     }
     /* whether the workqueue is doing work */
-    if (queue->work_current == RT_NULL &&
-            ((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
+    if (queue->work_current == RT_NULL)
     {
-        /* resume work thread */
+        /* resume work thread, and do a re-schedule if succeed */
         rt_thread_resume(queue->work_thread);
         rt_spin_unlock_irqrestore(&(queue->spinlock), level);
-        rt_schedule();
     }
     else
     {
@@ -346,13 +345,11 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
     rt_list_remove(&(work->list));
     rt_list_insert_after(&queue->work_list, &(work->list));
     /* whether the workqueue is doing work */
-    if (queue->work_current == RT_NULL &&
-            ((queue->work_thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK))
+    if (queue->work_current == RT_NULL)
     {
-        /* resume work thread */
+        /* resume work thread, and do a re-schedule if succeed */
         rt_thread_resume(queue->work_thread);
         rt_spin_unlock_irqrestore(&(queue->spinlock), level);
-        rt_schedule();
     }
     else
     {

+ 20 - 27
components/finsh/cmd.c

@@ -216,15 +216,23 @@ long list_thread(void)
                     rt_uint8_t *ptr;
 
 #ifdef RT_USING_SMP
-                    if (thread->oncpu != RT_CPU_DETACHED)
-                        rt_kprintf("%-*.*s %3d %3d %4d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->bind_cpu, thread->current_priority);
+                    /* no synchronization applied since it's only for debug */
+                    if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
+                        rt_kprintf("%-*.*s %3d %3d %4d ", maxlen, RT_NAME_MAX,
+                                   thread->parent.name, RT_SCHED_CTX(thread).oncpu,
+                                   RT_SCHED_CTX(thread).bind_cpu,
+                                   RT_SCHED_PRIV(thread).current_priority);
                     else
-                        rt_kprintf("%-*.*s N/A %3d %4d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->bind_cpu, thread->current_priority);
+                        rt_kprintf("%-*.*s N/A %3d %4d ", maxlen, RT_NAME_MAX,
+                                   thread->parent.name,
+                                   RT_SCHED_CTX(thread).bind_cpu,
+                                   RT_SCHED_PRIV(thread).current_priority);
 
 #else
-                    rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
+                    /* no synchronization applied since it's only for debug */
+                    rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
 #endif /*RT_USING_SMP*/
-                    stat = (thread->stat & RT_THREAD_STAT_MASK);
+                    stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
                     if (stat == RT_THREAD_READY)        rt_kprintf(" ready  ");
                     else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
                     else if (stat == RT_THREAD_INIT)    rt_kprintf(" init   ");
@@ -250,7 +258,7 @@ long list_thread(void)
                                thread->stack_size,
                                (thread->stack_size - ((rt_ubase_t) ptr - (rt_ubase_t) thread->stack_addr)) * 100
                                / thread->stack_size,
-                               thread->remaining_tick,
+                               RT_SCHED_PRIV(thread).remaining_tick,
                                rt_strerror(thread->error),
                                thread);
 #endif
@@ -263,21 +271,6 @@ long list_thread(void)
     return 0;
 }
 
-static void show_wait_queue(struct rt_list_node *list)
-{
-    struct rt_thread *thread;
-    struct rt_list_node *node;
-
-    for (node = list->next; node != list; node = node->next)
-    {
-        thread = rt_list_entry(node, struct rt_thread, tlist);
-        rt_kprintf("%.*s", RT_NAME_MAX, thread->parent.name);
-
-        if (node->next != list)
-            rt_kprintf("/");
-    }
-}
-
 #ifdef RT_USING_SEMAPHORE
 long list_sem(void)
 {
@@ -326,7 +319,7 @@ long list_sem(void)
                                sem->parent.parent.name,
                                sem->value,
                                rt_list_len(&sem->parent.suspend_thread));
-                    show_wait_queue(&(sem->parent.suspend_thread));
+                    rt_susp_list_print(&(sem->parent.suspend_thread));
                     rt_kprintf("\n");
                 }
                 else
@@ -395,7 +388,7 @@ long list_event(void)
                                e->parent.parent.name,
                                e->set,
                                rt_list_len(&e->parent.suspend_thread));
-                    show_wait_queue(&(e->parent.suspend_thread));
+                    rt_susp_list_print(&(e->parent.suspend_thread));
                     rt_kprintf("\n");
                 }
                 else
@@ -464,7 +457,7 @@ long list_mutex(void)
                            m->hold,
                            m->priority,
                            rt_list_len(&m->parent.suspend_thread));
-                    show_wait_queue(&(m->parent.suspend_thread));
+                    rt_susp_list_print(&(m->parent.suspend_thread));
                     rt_kprintf("\n");
                 }
                 else
@@ -537,7 +530,7 @@ long list_mailbox(void)
                                m->entry,
                                m->size,
                                rt_list_len(&m->parent.suspend_thread));
-                    show_wait_queue(&(m->parent.suspend_thread));
+                    rt_susp_list_print(&(m->parent.suspend_thread));
                     rt_kprintf("\n");
                 }
                 else
@@ -607,7 +600,7 @@ long list_msgqueue(void)
                                m->parent.parent.name,
                                m->entry,
                                rt_list_len(&m->parent.suspend_thread));
-                    show_wait_queue(&(m->parent.suspend_thread));
+                    rt_susp_list_print(&(m->parent.suspend_thread));
                     rt_kprintf("\n");
                 }
                 else
@@ -744,7 +737,7 @@ long list_mempool(void)
                                mp->block_total_count,
                                mp->block_free_count,
                                suspend_thread_count);
-                    show_wait_queue(&(mp->suspend_thread));
+                    rt_susp_list_print(&(mp->suspend_thread));
                     rt_kprintf("\n");
                 }
                 else

+ 12 - 11
components/libc/compilers/common/include/sys/time.h

@@ -46,7 +46,17 @@ int8_t rt_tz_is_dst(void);
 
 struct itimerspec;
 
-#if defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__))
+/* 'struct timeval' is defined on __x86_64__ toolchain */
+#if !defined(__x86_64__) && !defined(_TIMEVAL_DEFINED)
+#define _TIMEVAL_DEFINED
+struct timeval
+{
+    time_t      tv_sec;     /* seconds */
+    suseconds_t tv_usec;    /* and microseconds */
+};
+#endif /* _TIMEVAL_DEFINED */
+
+#if defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__) || defined(RT_USING_SMART))
 /* linux x86 platform gcc use! */
 #define _TIMEVAL_DEFINED
 /* Values for the first argument to `getitimer' and `setitimer'.  */
@@ -71,16 +81,7 @@ struct itimerval
     /* Time to the next timer expiration.  */
     struct timeval it_value;
 };
-#endif /* defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__)) */
-
-#ifndef _TIMEVAL_DEFINED
-#define _TIMEVAL_DEFINED
-struct timeval
-{
-    time_t      tv_sec;     /* seconds */
-    suseconds_t tv_usec;    /* and microseconds */
-};
-#endif /* _TIMEVAL_DEFINED */
+#endif /* defined(_GNU_SOURCE) && (defined(__x86_64__) || defined(__i386__)) || defined(RT_USING_SMART) */
 
 #if defined(__ARMCC_VERSION) || defined(_WIN32) || (defined(__ICCARM__) && (__VER__ < 8010001))
 struct timespec

+ 1 - 1
components/libc/posix/libdl/dlmodule.c

@@ -203,7 +203,7 @@ void dlmodule_destroy_subthread(struct rt_dlmodule *module, rt_thread_t thread)
     rt_enter_critical();
 
     /* remove thread from thread_list (ready or defunct thread list) */
-    rt_list_remove(&(thread->tlist));
+    rt_list_remove(&RT_THREAD_LIST_NODE(thread));
 
     if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_CLOSE &&
         (thread->thread_timer.parent.type == (RT_Object_Class_Static | RT_Object_Class_Timer)))

+ 1 - 1
components/libc/posix/pthreads/pthread_cond.c

@@ -285,7 +285,7 @@ rt_err_t _pthread_cond_timedwait(pthread_cond_t *cond,
                 rt_thread_suspend(thread);
 
                 /* Only support FIFO */
-                rt_list_insert_before(&(sem->parent.suspend_thread), &(thread->tlist));
+                rt_list_insert_before(&(sem->parent.suspend_thread), &RT_THREAD_LIST_NODE(thread));
 
                 /**
                 rt_ipc_list_suspend(&(sem->parent.suspend_thread),

+ 16 - 0
components/lwp/libc_musl.h

@@ -28,6 +28,22 @@
 
 #define FUTEX_CLOCK_REALTIME 256
 
+#define FUTEX_WAITERS    0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+#define FUTEX_TID_MASK   0x3fffffff
+
+struct robust_list
+{
+    struct robust_list *next;
+};
+
+struct robust_list_head
+{
+    struct robust_list list;
+    long futex_offset;
+    struct robust_list *list_op_pending;
+};
+
 /* for pmutex op */
 #define PMUTEX_INIT    0
 #define PMUTEX_LOCK    1

+ 3 - 46
components/lwp/lwp.h

@@ -168,7 +168,6 @@ enum lwp_exit_request_type
 struct termios *get_old_termios(void);
 void lwp_setcwd(char *buf);
 char *lwp_getcwd(void);
-void lwp_request_thread_exit(rt_thread_t thread_to_exit);
 int  lwp_check_exit_request(void);
 void lwp_terminate(struct rt_lwp *lwp);
 
@@ -213,52 +212,10 @@ pid_t exec(char *filename, int debug, int argc, char **argv);
 /* ctime lwp API */
 int timer_list_free(rt_list_t *timer_list);
 
-struct rt_futex;
-rt_err_t lwp_futex(struct rt_lwp *lwp, struct rt_futex *futex, int *uaddr, int op, int val, const struct timespec *timeout);
+rt_err_t lwp_futex_init(void);
+rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
+                   const struct timespec *timeout, int *uaddr2, int val3);
 
-#ifdef ARCH_MM_MMU
-struct __pthread {
-    /* Part 1 -- these fields may be external or
-     *      * internal (accessed via asm) ABI. Do not change. */
-    struct pthread *self;
-    uintptr_t *dtv;
-    struct pthread *prev, *next; /* non-ABI */
-    uintptr_t sysinfo;
-    uintptr_t canary, canary2;
-
-    /* Part 2 -- implementation details, non-ABI. */
-    int tid;
-    int errno_val;
-    volatile int detach_state;
-    volatile int cancel;
-    volatile unsigned char canceldisable, cancelasync;
-    unsigned char tsd_used:1;
-    unsigned char dlerror_flag:1;
-    unsigned char *map_base;
-    size_t map_size;
-    void *stack;
-    size_t stack_size;
-    size_t guard_size;
-    void *result;
-    struct __ptcb *cancelbuf;
-    void **tsd;
-    struct {
-        volatile void *volatile head;
-        long off;
-        volatile void *volatile pending;
-    } robust_list;
-    volatile int timer_id;
-    locale_t locale;
-    volatile int killlock[1];
-    char *dlerror_buf;
-    void *stdio_locks;
-
-    /* Part 3 -- the positions of these fields relative to
-     *      * the end of the structure is external and internal ABI. */
-    uintptr_t canary_at_end;
-    uintptr_t *dtv_copy;
-};
-#endif
 
 #ifdef __cplusplus
 }

+ 721 - 139
components/lwp/lwp_futex.c

@@ -10,34 +10,66 @@
  *                             Coding style: remove multiple `return` in a routine
  * 2023-08-08     Shell        Fix return value of futex(wait); Fix ops that only
  *                             FUTEX_PRIVATE is supported currently
+ * 2023-11-03     Shell        Add Support for ~FUTEX_PRIVATE
+ * 2023-11-16     xqyjlj       Add Support for futex requeue and futex pi
  */
-#define DBG_TAG "lwp.futex"
-#define DBG_LVL DBG_INFO
-#include <rtdbg.h>
-
-#include "lwp_internal.h"
-#include "lwp_pid.h"
-
-#include <rtthread.h>
-#include <lwp.h>
-#ifdef ARCH_MM_MMU
-#include <lwp_user_mm.h>
-#endif
+#define __RT_IPC_SOURCE__
+
+#include "lwp_futex_internal.h"
 #include "sys/time.h"
+#include <stdatomic.h>
+
+struct rt_mutex _glob_futex;
 
-struct rt_futex
+rt_err_t lwp_futex_init(void)
 {
-    int *uaddr;
-    rt_list_t waiting_thread;
-    struct lwp_avl_struct node;
-    struct rt_object *custom_obj;
-};
+    return rt_mutex_init(&_glob_futex, "glob_ftx", RT_IPC_FLAG_PRIO);
+}
 
-/* must have futex address_search_head taken */
-static rt_err_t _futex_destroy_locked(void *data)
+static void _futex_lock(rt_lwp_t lwp, int op_flags)
+{
+    rt_err_t error;
+    if (op_flags & FUTEX_PRIVATE)
+    {
+        LWP_LOCK(lwp);
+    }
+    else
+    {
+        error = lwp_mutex_take_safe(&_glob_futex, RT_WAITING_FOREVER, 0);
+        if (error)
+        {
+            LOG_E("%s: Should not failed", __func__);
+            RT_ASSERT(0);
+        }
+    }
+}
+
+static void _futex_unlock(rt_lwp_t lwp, int op_flags)
+{
+    rt_err_t error;
+    if (op_flags & FUTEX_PRIVATE)
+    {
+        LWP_UNLOCK(lwp);
+    }
+    else
+    {
+        error = lwp_mutex_release_safe(&_glob_futex);
+        if (error)
+        {
+            LOG_E("%s: Should not failed", __func__);
+            RT_ASSERT(0);
+        }
+    }
+}
+
+/**
+ * Destroy a Private FuTeX (pftx)
+ * Note: must have futex address_search_head taken
+ */
+static rt_err_t _pftx_destroy_locked(void *data)
 {
     rt_err_t ret = -1;
-    struct rt_futex *futex = (struct rt_futex *)data;
+    rt_futex_t futex = (rt_futex_t)data;
 
     if (futex)
     {
@@ -50,19 +82,28 @@ static rt_err_t _futex_destroy_locked(void *data)
          *   routine, it's always safe because it has already taken a write lock
          *    to the lwp.)
          */
-        lwp_avl_remove(&futex->node, (struct lwp_avl_struct **)futex->node.data);
+        lwp_avl_remove(&futex->node,
+                       (struct lwp_avl_struct **)futex->node.data);
 
         /* release object */
+        if (futex->mutex)
+        {
+            rt_mutex_delete(futex->mutex);
+            futex->mutex = RT_NULL;
+        }
         rt_free(futex);
         ret = 0;
     }
     return ret;
 }
 
-/* must have futex address_search_head taken */
-static struct rt_futex *_futex_create_locked(int *uaddr, struct rt_lwp *lwp)
+/**
+ * Create a Private FuTeX (pftx)
+ * Note: must have futex address_search_head taken
+ */
+static rt_futex_t _pftx_create_locked(int *uaddr, struct rt_lwp *lwp)
 {
-    struct rt_futex *futex = RT_NULL;
+    rt_futex_t futex = RT_NULL;
     struct rt_object *obj = RT_NULL;
 
     /**
@@ -73,10 +114,12 @@ static struct rt_futex *_futex_create_locked(int *uaddr, struct rt_lwp *lwp)
      */
     if (lwp)
     {
-        futex = (struct rt_futex *)rt_malloc(sizeof(struct rt_futex));
+        futex = (rt_futex_t)rt_malloc(sizeof(struct rt_futex));
         if (futex)
         {
-            obj = rt_custom_object_create("futex", (void *)futex, _futex_destroy_locked);
+            /* Create a Private FuTeX (pftx) */
+            obj = rt_custom_object_create("pftx", (void *)futex,
+                                          _pftx_destroy_locked);
             if (!obj)
             {
                 rt_free(futex);
@@ -96,16 +139,17 @@ static struct rt_futex *_futex_create_locked(int *uaddr, struct rt_lwp *lwp)
                  */
                 if (lwp_user_object_add(lwp, obj))
                 {
+                    /* this will call a _pftx_destroy_locked, but that's okay */
                     rt_object_delete(obj);
                     rt_free(futex);
                     futex = RT_NULL;
                 }
                 else
                 {
-                    futex->uaddr = uaddr;
                     futex->node.avl_key = (avl_key_t)uaddr;
                     futex->node.data = &lwp->address_search_head;
                     futex->custom_obj = obj;
+                    futex->mutex = RT_NULL;
                     rt_list_init(&(futex->waiting_thread));
 
                     /**
@@ -122,29 +166,235 @@ static struct rt_futex *_futex_create_locked(int *uaddr, struct rt_lwp *lwp)
     return futex;
 }
 
-/* must have futex address_search_head taken */
-static struct rt_futex *_futex_get_locked(void *uaddr, struct rt_lwp *lwp)
+/**
+ * Get a Private FuTeX (pftx) match the (lwp, uaddr, op)
+ */
+static rt_futex_t _pftx_get(void *uaddr, struct rt_lwp *lwp, int op,
+                            rt_err_t *rc)
 {
-    struct rt_futex *futex = RT_NULL;
     struct lwp_avl_struct *node = RT_NULL;
+    rt_futex_t futex = RT_NULL;
+    rt_err_t error = -1;
+
+    LWP_LOCK(lwp);
 
     /**
      * Note: Critical Section
      * protect lwp address_search_head (READ)
      */
     node = lwp_avl_find((avl_key_t)uaddr, lwp->address_search_head);
-    if (!node)
+    if (node)
+    {
+        futex = rt_container_of(node, struct rt_futex, node);
+        error = 0;
+    }
+    else
+    {
+        /* create a futex according to this uaddr */
+        futex = _pftx_create_locked(uaddr, lwp);
+
+        if (!futex)
+            error = -ENOMEM;
+        else
+            error = 0;
+    }
+    LWP_UNLOCK(lwp);
+
+    *rc = error;
+    return futex;
+}
+
+/**
+ * Destroy a Shared FuTeX (pftx)
+ * Note: must have futex address_search_head taken
+ */
+static rt_err_t _sftx_destroy(void *data)
+{
+    rt_err_t ret = -1;
+    rt_futex_t futex = (rt_futex_t)data;
+
+    if (futex)
+    {
+        /* delete it even it's not in the table */
+        futex_global_table_delete(&futex->entry.key);
+        if (futex->mutex)
+        {
+            rt_mutex_delete(futex->mutex);
+            futex->mutex = RT_NULL;
+        }
+        rt_free(futex);
+        ret = 0;
+    }
+    return ret;
+}
+
+/**
+ * Create a Shared FuTeX (sftx)
+ */
+static rt_futex_t _sftx_create(struct shared_futex_key *key, struct rt_lwp *lwp)
+{
+    rt_futex_t futex = RT_NULL;
+    struct rt_object *obj = RT_NULL;
+
+    if (lwp)
+    {
+        futex = (rt_futex_t)rt_calloc(1, sizeof(struct rt_futex));
+        if (futex)
+        {
+            /* create a Shared FuTeX (sftx) */
+            obj = rt_custom_object_create("sftx", (void *)futex, _sftx_destroy);
+            if (!obj)
+            {
+                rt_free(futex);
+                futex = RT_NULL;
+            }
+            else
+            {
+                if (futex_global_table_add(key, futex))
+                {
+                    rt_object_delete(obj);
+                    rt_free(futex);
+                    futex = RT_NULL;
+                }
+                else
+                {
+                    futex->mutex = RT_NULL;
+                    rt_list_init(&(futex->waiting_thread));
+                    futex->custom_obj = obj;
+                }
+            }
+        }
+    }
+    return futex;
+}
+
+/**
+ * Get a Shared FuTeX (sftx) match the (lwp, uaddr, op)
+ */
+static rt_futex_t _sftx_get(void *uaddr, struct rt_lwp *lwp, int op,
+                            rt_err_t *rc)
+{
+    rt_futex_t futex = RT_NULL;
+    struct shared_futex_key key;
+    rt_varea_t varea;
+    rt_err_t error = -1;
+
+    RD_LOCK(lwp->aspace);
+    varea = rt_aspace_query(lwp->aspace, uaddr);
+    if (varea)
+    {
+        key.mobj = varea->mem_obj;
+        key.offset = ((varea->offset) << MM_PAGE_SHIFT) |
+                     ((long)uaddr & ((1 << MM_PAGE_SHIFT) - 1));
+        RD_UNLOCK(lwp->aspace);
+
+        /* query for the key */
+        _futex_lock(lwp, op & ~FUTEX_PRIVATE);
+        error = futex_global_table_find(&key, &futex);
+        if (error != RT_EOK)
+        {
+            /* not found, do allocation */
+            futex = _sftx_create(&key, lwp);
+            if (!futex)
+                error = -ENOMEM;
+            else
+                error = 0;
+        }
+        _futex_unlock(lwp, op & ~FUTEX_PRIVATE);
+    }
+    else
+    {
+        RD_UNLOCK(lwp->aspace);
+    }
+
+    *rc = error;
+    return futex;
+}
+
+/* must have futex address_search_head taken */
+static rt_futex_t _futex_get(void *uaddr, struct rt_lwp *lwp, int op_flags,
+                             rt_err_t *rc)
+{
+    rt_futex_t futex = RT_NULL;
+
+    if (op_flags & FUTEX_PRIVATE)
+    {
+        futex = _pftx_get(uaddr, lwp, op_flags, rc);
+    }
+    else
     {
-        return RT_NULL;
+        futex = _sftx_get(uaddr, lwp, op_flags, rc);
     }
-    futex = rt_container_of(node, struct rt_futex, node);
+
     return futex;
 }
 
-static int _futex_wait(struct rt_futex *futex, struct rt_lwp *lwp, int value, const struct timespec *timeout)
+static rt_err_t _suspend_thread_timeout_locked(rt_thread_t thread,
+                                               rt_futex_t futex,
+                                               rt_tick_t timeout)
+{
+    rt_err_t rc;
+
+    /**
+     * Brief: Add current thread into futex waiting thread list
+     *
+     * Note: Critical Section
+     * - the futex waiting_thread list (RW)
+     */
+    rc = rt_thread_suspend_to_list(thread, &futex->waiting_thread,
+                                   RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
+
+    if (rc == RT_EOK)
+    {
+        /* start the timer of thread */
+        rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME,
+                         &timeout);
+        rt_timer_start(&(thread->thread_timer));
+        rt_set_errno(ETIMEDOUT);
+    }
+
+    return rc;
+}
+
+static rt_err_t _suspend_thread_locked(rt_thread_t thread, rt_futex_t futex)
 {
+    /**
+     * Brief: Add current thread into futex waiting thread list
+     *
+     * Note: Critical Section
+     * - the futex waiting_thread list (RW)
+     */
+    return rt_thread_suspend_to_list(thread, &futex->waiting_thread,
+                                     RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
+}
+
+rt_inline int _futex_cmpxchg_value(int *curval, int *uaddr, int uval,
+                                   int newval)
+{
+    int err = 0;
+
+    if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
+    {
+        err = -EFAULT;
+        goto exit;
+    }
+
+    if (!atomic_compare_exchange_strong(uaddr, &uval, newval))
+    {
+        *curval = uval;
+        err = -EAGAIN;
+    }
+
+exit:
+    return err;
+}
+
+static int _futex_wait(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
+                       int value, const struct timespec *timeout, int op_flags)
+{
+    rt_tick_t to;
     rt_thread_t thread;
-    rt_err_t ret = -RT_EINTR;
+    rt_err_t rc = -RT_EINTR;
 
     /**
      * Brief: Remove current thread from scheduler, besides appends it to
@@ -152,89 +402,65 @@ static int _futex_wait(struct rt_futex *futex, struct rt_lwp *lwp, int value, co
      * a timer will be setup for current thread
      *
      * Note: Critical Section
-     * - futex (RW; Protected by lwp_lock)
+     * - futex.waiting (RW; Protected by lwp_lock)
      * - the local cpu
      */
-    LWP_LOCK(lwp);
-    if (*(futex->uaddr) == value)
+    _futex_lock(lwp, op_flags);
+    if (*uaddr == value)
     {
         thread = rt_thread_self();
 
-        rt_enter_critical();
-
-        ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
-
-        if (ret == RT_EOK)
+        if (timeout)
         {
-            /**
-             * Brief: Add current thread into futex waiting thread list
-             *
-             * Note: Critical Section
-             * - the futex waiting_thread list (RW)
-             */
-            rt_list_insert_before(&(futex->waiting_thread), &(thread->tlist));
-
-            if (timeout)
-            {
-                /* start the timer of thread */
-                rt_int32_t time = timeout->tv_sec * RT_TICK_PER_SECOND + timeout->tv_nsec * RT_TICK_PER_SECOND / NANOSECOND_PER_SECOND;
-
-                if (time < 0)
-                {
-                    time = 0;
-                }
+            to = timeout->tv_sec * RT_TICK_PER_SECOND;
+            to +=
+                (timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND;
 
-                rt_timer_control(&(thread->thread_timer),
-                                 RT_TIMER_CTRL_SET_TIME,
-                                 &time);
-                rt_timer_start(&(thread->thread_timer));
+            if (to < 0)
+            {
+                rc = -EINVAL;
+                _futex_unlock(lwp, op_flags);
+            }
+            else
+            {
+                rt_enter_critical();
+                rc = _suspend_thread_timeout_locked(thread, futex, to);
+                _futex_unlock(lwp, op_flags);
+                rt_exit_critical();
             }
         }
         else
         {
-            ret = EINTR;
+            rt_enter_critical();
+            rc = _suspend_thread_locked(thread, futex);
+            _futex_unlock(lwp, op_flags);
+            rt_exit_critical();
         }
 
-        LWP_UNLOCK(lwp);
-        rt_exit_critical();
-
-        if (ret == RT_EOK)
+        if (rc == RT_EOK)
         {
             /* do schedule */
             rt_schedule();
             /* check errno */
-            ret = rt_get_errno();
-        }
-
-        ret = ret > 0 ? -ret : ret;
-        switch (ret)
-        {
-            case RT_EOK:
-                ret = 0;
-                break;
-            case -RT_EINTR:
-                ret = -EINTR;
-                break;
-            default:
-                ret = -EAGAIN;
-                break;
+            rc = rt_get_errno();
+            rc = rc > 0 ? -rc : rc;
         }
     }
     else
     {
-        LWP_UNLOCK(lwp);
-        ret = -EAGAIN;
+        _futex_unlock(lwp, op_flags);
+        rc = -EAGAIN;
         rt_set_errno(EAGAIN);
     }
 
-    return ret;
+    return rc;
 }
 
-static long _futex_wake(struct rt_futex *futex, struct rt_lwp *lwp, int number)
+static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
+                        int op_flags)
 {
     long woken_cnt = 0;
     int is_empty = 0;
-    rt_thread_t thread;
 
     /**
      * Brief: Wakeup a suspended thread on the futex waiting thread list
@@ -244,27 +470,208 @@ static long _futex_wake(struct rt_futex *futex, struct rt_lwp *lwp, int number)
      */
     while (number && !is_empty)
     {
-        LWP_LOCK(lwp);
-        is_empty = rt_list_isempty(&(futex->waiting_thread));
+        _futex_lock(lwp, op_flags);
+        if (rt_susp_list_dequeue(&futex->waiting_thread, RT_EOK))
+        {
+            number--;
+            woken_cnt++;
+        }
+        _futex_unlock(lwp, op_flags);
+    }
+
+    /* do schedule */
+    rt_schedule();
+    return woken_cnt;
+}
+
+/**
+ *  Brief: Wake up to nr_wake futex1 threads.
+ *      If there are more waiters waiting on futex1 than nr_wake,
+ *      insert the remaining at most nr_requeue waiters waiting
+ *      on futex1 into the waiting queue of futex2.
+ */
+static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
+                           struct rt_lwp *lwp, int nr_wake, int nr_requeue,
+                           int opflags)
+{
+    long rtn;
+    long woken_cnt = 0;
+    int is_empty = 0;
+    rt_thread_t thread;
+
+    if (futex1 == futex2)
+    {
+        return -EINVAL;
+    }
+
+    /**
+     * Brief: Wakeup a suspended thread on the futex waiting thread list
+     *
+     * Note: Critical Section
+     * - the futex waiting_thread list (RW)
+     */
+    while (nr_wake && !is_empty)
+    {
+        rt_sched_lock_level_t slvl;
+        rt_sched_lock(&slvl);
+        is_empty = rt_list_isempty(&(futex1->waiting_thread));
         if (!is_empty)
         {
-            thread = rt_list_entry(futex->waiting_thread.next, struct rt_thread, tlist);
+            thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
             /* remove from waiting list */
-            rt_list_remove(&(thread->tlist));
+            rt_list_remove(&RT_THREAD_LIST_NODE(thread));
 
             thread->error = RT_EOK;
             /* resume the suspended thread */
             rt_thread_resume(thread);
 
-            number--;
+            nr_wake--;
             woken_cnt++;
         }
-        LWP_UNLOCK(lwp);
+        rt_sched_unlock(slvl);
+    }
+    rtn = woken_cnt;
+
+    /**
+     * Brief: Requeue
+     *
+     * Note: Critical Section
+     * - the futex waiting_thread list (RW)
+     */
+    while (!is_empty && nr_requeue)
+    {
+        rt_sched_lock_level_t slvl;
+        rt_sched_lock(&slvl);
+        is_empty = rt_list_isempty(&(futex1->waiting_thread));
+        if (!is_empty)
+        {
+            thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
+            rt_list_remove(&RT_THREAD_LIST_NODE(thread));
+            rt_list_insert_before(&(futex2->waiting_thread),
+                                  &RT_THREAD_LIST_NODE(thread));
+            nr_requeue--;
+            rtn++;
+        }
+        rt_sched_unlock(slvl);
     }
 
     /* do schedule */
     rt_schedule();
-    return woken_cnt;
+
+    return rtn;
+}
+
+/* timeout argument measured against the CLOCK_REALTIME clock. */
+static long _futex_lock_pi(rt_futex_t futex, struct rt_lwp *lwp, int *uaddr,
+                           const struct timespec *timeout, int op_flags,
+                           rt_bool_t trylock)
+{
+    int word = 0, nword, cword;
+    int tid = 0;
+    rt_err_t err = 0;
+    rt_thread_t thread = RT_NULL, current_thread = RT_NULL;
+    rt_tick_t to = RT_WAITING_FOREVER;
+
+    if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
+    {
+        return -EFAULT;
+    }
+
+    current_thread = rt_thread_self();
+
+    _futex_lock(lwp, op_flags);
+
+    lwp_get_from_user(&word, (void *)uaddr, sizeof(int));
+    tid = word & FUTEX_TID_MASK;
+    if (word == 0)
+    {
+        /* If the value is 0, then the kernel tries
+             to atomically set the futex value to the caller's TID.  */
+        nword = current_thread->tid;
+        if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
+        {
+            _futex_unlock(lwp, op_flags);
+            return -EAGAIN;
+        }
+        _futex_unlock(lwp, op_flags);
+        return 0;
+    }
+    else
+    {
+        thread = lwp_tid_get_thread_and_inc_ref(tid);
+        if (thread == RT_NULL)
+        {
+            _futex_unlock(lwp, op_flags);
+            return -ESRCH;
+        }
+        lwp_tid_dec_ref(thread);
+
+        nword =
+            word | FUTEX_WAITERS;
+        if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
+        {
+            _futex_unlock(lwp, op_flags);
+            return -EAGAIN;
+        }
+        word = nword;
+    }
+
+    if (futex->mutex == RT_NULL)
+    {
+        futex->mutex = rt_mutex_create("futexpi", RT_IPC_FLAG_PRIO);
+        if (futex->mutex == RT_NULL)
+        {
+            _futex_unlock(lwp, op_flags);
+            return -ENOMEM;
+        }
+
+        /* set mutex->owner */
+        rt_spin_lock(&(futex->mutex->spinlock));
+        futex->mutex->owner = thread;
+        futex->mutex->hold = 1;
+        rt_spin_unlock(&(futex->mutex->spinlock));
+    }
+    if (timeout)
+    {
+        to = rt_timespec_to_tick(timeout);
+    }
+
+    if (trylock)
+    {
+        to = RT_WAITING_NO;
+    }
+    _futex_unlock(lwp, op_flags);
+
+    err = rt_mutex_take_interruptible(futex->mutex, to);
+    if (err == -RT_ETIMEOUT)
+    {
+        err = -EDEADLK;
+    }
+
+    _futex_lock(lwp, op_flags);
+    nword = current_thread->tid | FUTEX_WAITERS;
+    if (_futex_cmpxchg_value(&cword, uaddr, word, nword))
+    {
+        err = -EAGAIN;
+    }
+    _futex_unlock(lwp, op_flags);
+
+    return err;
+}
+
+static long _futex_unlock_pi(rt_futex_t futex, struct rt_lwp *lwp, int op_flags)
+{
+    rt_err_t err = 0;
+    _futex_lock(lwp, op_flags);
+    if (!futex->mutex)
+    {
+        _futex_unlock(lwp, op_flags);
+        return -EPERM;
+    }
+    _futex_unlock(lwp, op_flags);
+
+    err = rt_mutex_release(futex->mutex);
+    return err;
 }
 
 #include <syscall_generic.h>
@@ -272,82 +679,257 @@ static long _futex_wake(struct rt_futex *futex, struct rt_lwp *lwp, int number)
 rt_inline rt_bool_t _timeout_ignored(int op)
 {
     /**
-     * if (op & (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI)) was TRUE
-     * `timeout` should be ignored by implementation, according to POSIX futex(2) manual.
-     * since only FUTEX_WAKE is implemented in rt-smart, only FUTEX_WAKE was omitted currently
+     * if (op &
+     * (FUTEX_WAKE|FUTEX_FD|FUTEX_WAKE_BITSET|FUTEX_TRYLOCK_PI|FUTEX_UNLOCK_PI))
+     * was TRUE `timeout` should be ignored by implementation, according to
+     * POSIX futex(2) manual. since only FUTEX_WAKE is implemented in rt-smart,
+     * only FUTEX_WAKE was omitted currently
      */
-    return (op & (FUTEX_WAKE));
+    return ((op & (FUTEX_WAKE)) || (op & (FUTEX_REQUEUE)) ||
+            (op & (FUTEX_CMP_REQUEUE)) || (op & (FUTEX_UNLOCK_PI)) ||
+            (op & (FUTEX_TRYLOCK_PI)));
 }
 
 sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout,
                    int *uaddr2, int val3)
 {
     struct rt_lwp *lwp = RT_NULL;
-    struct rt_futex *futex = RT_NULL;
     sysret_t ret = 0;
 
     if (!lwp_user_accessable(uaddr, sizeof(int)))
     {
-        ret = -EINVAL;
+        ret = -EFAULT;
     }
-    else if (timeout && !_timeout_ignored(op) && !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
+    else if (timeout && !_timeout_ignored(op) &&
+             !lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
     {
         ret = -EINVAL;
     }
     else
     {
         lwp = lwp_self();
-        ret = lwp_futex(lwp, futex, uaddr, op, val, timeout);
+        ret = lwp_futex(lwp, uaddr, op, val, timeout, uaddr2, val3);
     }
 
     return ret;
 }
 
-rt_err_t lwp_futex(struct rt_lwp *lwp, struct rt_futex *futex, int *uaddr, int op, int val, const struct timespec *timeout)
+#define FUTEX_FLAGS (FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME)
+rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
+                   const struct timespec *timeout, int *uaddr2, int val3)
 {
+    rt_futex_t futex, futex2;
     rt_err_t rc = 0;
+    int op_type = op & ~FUTEX_FLAGS;
+    int op_flags = op & FUTEX_FLAGS;
 
-    /**
-     * Brief: Check if the futex exist, otherwise create a new one
-     *
-     * Note: Critical Section
-     * - lwp address_search_head (READ)
-     */
-    LWP_LOCK(lwp);
-    futex = _futex_get_locked(uaddr, lwp);
-    if (futex == RT_NULL)
+    futex = _futex_get(uaddr, lwp, op_flags, &rc);
+    if (!rc)
     {
-        /* create a futex according to this uaddr */
-        futex = _futex_create_locked(uaddr, lwp);
-        if (futex == RT_NULL)
+        switch (op_type)
         {
-            rc = -ENOMEM;
+            case FUTEX_WAIT:
+                rc = _futex_wait(futex, lwp, uaddr, val, timeout, op_flags);
+                break;
+            case FUTEX_WAKE:
+                rc = _futex_wake(futex, lwp, val, op_flags);
+                break;
+            case FUTEX_REQUEUE:
+                futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
+                if (!rc)
+                {
+                    _futex_lock(lwp, op_flags);
+                    rc = _futex_requeue(futex, futex2, lwp, val, (long)timeout,
+                                        op_flags);
+                    _futex_unlock(lwp, op_flags);
+                }
+                break;
+            case FUTEX_CMP_REQUEUE:
+                futex2 = _futex_get(uaddr2, lwp, op_flags, &rc);
+                _futex_lock(lwp, op_flags);
+                if (*uaddr == val3)
+                {
+                    rc = 0;
+                }
+                else
+                {
+                    rc = -EAGAIN;
+                }
+                if (rc == 0)
+                {
+                    rc = _futex_requeue(futex, futex2, lwp, val,
+                                        (long)timeout, op_flags);
+                }
+                _futex_unlock(lwp, op_flags);
+                break;
+            case FUTEX_LOCK_PI:
+                rc = _futex_lock_pi(futex, lwp, uaddr, timeout, op_flags,
+                                    RT_FALSE);
+                break;
+            case FUTEX_UNLOCK_PI:
+                rc = _futex_unlock_pi(futex, lwp, op_flags);
+                break;
+            case FUTEX_TRYLOCK_PI:
+                rc = _futex_lock_pi(futex, lwp, uaddr, 0, op_flags, RT_TRUE);
+                break;
+            default:
+                LOG_W("User require op=%d which is not implemented", op);
+                rc = -ENOSYS;
+                break;
         }
     }
-    LWP_UNLOCK(lwp);
 
-    if (!rc)
+    return rc;
+}
+
+rt_inline int _fetch_robust_entry(struct robust_list **entry,
+                                  struct robust_list **head, rt_bool_t *is_pi)
+{
+    unsigned long uentry;
+
+    if (!lwp_user_accessable((void *)head, sizeof(*head)))
     {
-        if (!(op & FUTEX_PRIVATE))
-            rc = -ENOSYS;
-        else
+        return -EFAULT;
+    }
+
+    if (lwp_get_from_user(&uentry, (void *)head, sizeof(*head)) !=
+        sizeof(*head))
+    {
+        return -EFAULT;
+    }
+
+    *entry = (void *)(uentry & ~1UL);
+    *is_pi = uentry & 1;
+
+    return 0;
+}
+
+static int _handle_futex_death(int *uaddr, rt_thread_t thread, rt_bool_t is_pi,
+                               rt_bool_t is_pending_op)
+{
+    int word, cword = 0, nword;
+    rt_err_t rc;
+    struct rt_lwp *lwp;
+    rt_futex_t futex;
+
+    /* Futex address must be 32bit aligned */
+    if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+        return -1;
+
+    lwp = thread->lwp;
+retry:
+
+    if (!lwp_user_accessable((void *)uaddr, sizeof(*uaddr)))
+    {
+        return -1;
+    }
+
+    if (lwp_get_from_user(&word, (void *)uaddr, sizeof(*uaddr)) !=
+        sizeof(*uaddr))
+    {
+        return -1;
+    }
+
+    futex = _futex_get(uaddr, lwp, FUTEX_PRIVATE, &rc);
+    if (is_pending_op && !is_pi && !word)
+    {
+        _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
+        return 0;
+    }
+
+    if ((word & FUTEX_TID_MASK) != thread->tid)
+        return 0;
+
+    nword = (word & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
+
+    if ((rc = _futex_cmpxchg_value(&cword, uaddr, word, nword)))
+    {
+        switch (rc)
         {
-            op &= ~FUTEX_PRIVATE;
-            switch (op)
-            {
-                case FUTEX_WAIT:
-                    rc = _futex_wait(futex, lwp, val, timeout);
-                    break;
-                case FUTEX_WAKE:
-                    rc = _futex_wake(futex, lwp, val);
-                    break;
-                default:
-                    LOG_W("User require op=%d which is not implemented", op);
-                    rc = -ENOSYS;
-                    break;
-            }
+            case -EFAULT:
+                return -1;
+            case -EAGAIN:
+                rt_schedule();
+                goto retry;
+            default:
+                LOG_W("unknown errno: %d in '%s'", rc, __FUNCTION__);
+                return rc;
         }
     }
 
-    return rc;
+    if (cword != word)
+        goto retry;
+
+    if (!is_pi && (word & FUTEX_WAITERS))
+        _futex_wake(futex, lwp, 1, FUTEX_PRIVATE);
+
+    return 0;
+}
+
+/**
+ *  Brief: Walk thread->robust_list mark
+ *      any locks found there dead, and notify any waiters.
+ *
+ *  note: very carefully, it's a userspace list!
+ */
+void lwp_futex_exit_robust_list(rt_thread_t thread)
+{
+    struct robust_list *entry = RT_NULL;
+    struct robust_list *next_entry = RT_NULL;
+    struct robust_list *pending = RT_NULL;
+    struct robust_list_head *head;
+    unsigned int limit = 2048;
+    rt_bool_t pi, pip, next_pi;
+    unsigned long futex_offset;
+    int rc;
+
+    head = thread->robust_list;
+
+    if (head == RT_NULL)
+        return;
+
+    if (_fetch_robust_entry(&entry, &head->list.next, &pi))
+        return;
+
+    if (!lwp_user_accessable((void *)&head->futex_offset,
+                             sizeof(head->futex_offset)))
+    {
+        return;
+    }
+
+    if (lwp_get_from_user(&futex_offset, (void *)&head->futex_offset,
+                          sizeof(head->futex_offset)) !=
+        sizeof(head->futex_offset))
+    {
+        return;
+    }
+
+    if (_fetch_robust_entry(&pending, &head->list_op_pending, &pip))
+    {
+        return;
+    }
+
+    while (entry != &head->list)
+    {
+        rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
+        if (entry != pending)
+        {
+            if (_handle_futex_death((void *)entry + futex_offset, thread, pi,
+                                    RT_FALSE))
+                return;
+        }
+        if (rc)
+            return;
+        entry = next_entry;
+        pi = next_pi;
+
+        if (!--limit)
+            break;
+    }
+
+    if (pending)
+    {
+        _handle_futex_death((void *)pending + futex_offset, thread, pip,
+                            RT_TRUE);
+    }
 }

+ 54 - 0
components/lwp/lwp_futex_internal.h

@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-11-01     Shell        Init ver.
+ */
+#ifndef __LWP_FUTEX_INTERNAL_H__
+#define __LWP_FUTEX_INTERNAL_H__
+
+#define DBG_TAG "lwp.futex"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "rt_uthash.h"
+#include "lwp_internal.h"
+#include "lwp_pid.h"
+
+#include <rtthread.h>
+#include <lwp.h>
+
+#ifdef ARCH_MM_MMU
+#include <lwp_user_mm.h>
+#endif /* ARCH_MM_MMU */
+
+struct shared_futex_key
+{
+    rt_mem_obj_t mobj;
+    rt_base_t offset;
+};
+DEFINE_RT_UTHASH_TYPE(shared_futex_entry, struct shared_futex_key, key);
+
+struct rt_futex
+{
+    union {
+        /* for private futex */
+        struct lwp_avl_struct node;
+        /* for shared futex */
+        struct shared_futex_entry entry;
+    };
+
+    rt_list_t waiting_thread;
+    struct rt_object *custom_obj;
+    rt_mutex_t mutex;
+};
+typedef struct rt_futex *rt_futex_t;
+
+rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex);
+rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex);
+rt_err_t futex_global_table_delete(struct shared_futex_key *key);
+
+#endif /* __LWP_FUTEX_INTERNAL_H__ */

+ 65 - 0
components/lwp/lwp_futex_table.c

@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-11-01     Shell        Init ver.
+ */
+
+#include "lwp_futex_internal.h"
+
+static struct shared_futex_entry *_futex_hash_head;
+
+rt_err_t futex_global_table_add(struct shared_futex_key *key, rt_futex_t futex)
+{
+    rt_err_t rc = 0;
+    struct shared_futex_entry *entry = &futex->entry;
+    futex->entry.key.mobj = key->mobj;
+    futex->entry.key.offset = key->offset;
+
+    RT_UTHASH_ADD(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
+    return rc;
+}
+
+rt_err_t futex_global_table_find(struct shared_futex_key *key, rt_futex_t *futex)
+{
+    rt_err_t rc;
+    rt_futex_t found_futex;
+    struct shared_futex_entry *entry;
+
+    RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
+    if (entry)
+    {
+        rc = RT_EOK;
+        found_futex = rt_container_of(entry, struct rt_futex, entry);
+    }
+    else
+    {
+        rc = -RT_ENOENT;
+        found_futex = RT_NULL;
+    }
+
+    *futex = found_futex;
+    return rc;
+}
+
+rt_err_t futex_global_table_delete(struct shared_futex_key *key)
+{
+    rt_err_t rc;
+    struct shared_futex_entry *entry;
+
+    RT_UTHASH_FIND(_futex_hash_head, key, sizeof(struct shared_futex_key), entry);
+    if (entry)
+    {
+        RT_UTHASH_DELETE(_futex_hash_head, entry);
+        rc = RT_EOK;
+    }
+    else
+    {
+        rc = -RT_ENOENT;
+    }
+
+    return rc;
+}

+ 6 - 6
components/lwp/lwp_internal.c

@@ -17,7 +17,7 @@
 
 static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     int retry;
     rt_int32_t effect_timeout;
 
@@ -92,19 +92,19 @@ static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t i
         RT_ASSERT(0);
     }
 
-    RETURN(rc);
+    LWP_RETURN(rc);
 }
 
 rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     rc = _mutex_take_safe(mtx, timeout, interruptable);
-    RETURN(rc);
+    LWP_RETURN(rc);
 }
 
 rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
 
     rc = rt_mutex_release(mtx);
     if (rc)
@@ -113,7 +113,7 @@ rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
         rt_backtrace();
     }
 
-    RETURN(rc);
+    LWP_RETURN(rc);
 }
 
 rt_err_t lwp_critical_enter(struct rt_lwp *lwp)

+ 4 - 4
components/lwp/lwp_internal.h

@@ -86,13 +86,13 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
  * There tend to be chances where a return value is returned without correctly init
  */
 #ifndef LWP_DEBUG
-#define DEF_RETURN_CODE(name)   rt_err_t name
-#define RETURN(name)            return name
+#define LWP_DEF_RETURN_CODE(name)   rt_err_t name;RT_UNUSED(name)
+#define LWP_RETURN(name)            return name
 
 #else
 #define _LWP_UNINITIALIZED_RC   0xbeefcafe
-#define DEF_RETURN_CODE(name)   rt_err_t name = _LWP_UNINITIALIZED_RC
-#define RETURN(name)            {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
+#define LWP_DEF_RETURN_CODE(name)   rt_err_t name = _LWP_UNINITIALIZED_RC
+#define LWP_RETURN(name)            {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
 #endif /* LWP_DEBUG */
 
 #endif /* __LWP_INTERNAL_H__ */

+ 40 - 41
components/lwp/lwp_ipc.c

@@ -8,7 +8,9 @@
  * 2019-10-12     Jesven       first version
  * 2023-07-25     Shell        Remove usage of rt_hw_interrupt API in the lwp
  * 2023-09-16     zmq810150896 Increased versatility of some features on dfs v2
+ * 2024-01-25     Shell        porting to susp_list API
  */
+#define __RT_IPC_SOURCE__
 
 #define DBG_TAG "lwp.ipc"
 #define DBG_LVL DBG_WARNING
@@ -124,11 +126,9 @@ rt_inline rt_err_t rt_channel_list_resume(rt_list_t *list)
     struct rt_thread *thread;
 
     /* get the first thread entry waiting for sending */
-    thread = rt_list_entry(list->next, struct rt_thread, tlist);
+    thread = rt_susp_list_dequeue(list, RT_THREAD_RESUME_RES_THR_ERR);
 
-    rt_thread_resume(thread);
-
-    return RT_EOK;
+    return thread ? RT_EOK : -RT_ERROR;
 }
 
 /**
@@ -136,15 +136,8 @@ rt_inline rt_err_t rt_channel_list_resume(rt_list_t *list)
  */
 rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
 {
-    struct rt_thread *thread;
-
     /* wakeup all suspended threads for sending */
-    while (!rt_list_isempty(list))
-    {
-        thread = rt_list_entry(list->next, struct rt_thread, tlist);
-        thread->error = -RT_ERROR;
-        rt_thread_resume(thread);
-    }
+    rt_susp_list_resume_all(list, RT_ERROR);
 
     return RT_EOK;
 }
@@ -155,12 +148,7 @@ rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
 rt_inline rt_err_t rt_channel_list_suspend(rt_list_t *list, struct rt_thread *thread)
 {
     /* suspend thread */
-    rt_err_t ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
-
-    if (ret == RT_EOK)
-    {
-        rt_list_insert_before(list, &(thread->tlist)); /* list end */
-    }
+    rt_err_t ret = rt_thread_suspend_to_list(thread, list, RT_IPC_FLAG_FIFO, RT_INTERRUPTIBLE);
 
     return ret;
 }
@@ -372,10 +360,13 @@ static rt_err_t wakeup_sender_wait_reply(void *object, struct rt_thread *thread)
 
 static void sender_timeout(void *parameter)
 {
+    rt_sched_lock_level_t slvl;
     struct rt_thread *thread = (struct rt_thread *)parameter;
     rt_channel_t ch;
 
-    ch = (rt_channel_t)(thread->wakeup.user_data);
+    rt_sched_lock(&slvl);
+
+    ch = (rt_channel_t)(thread->wakeup_handle.user_data);
     if (ch->stat == RT_IPC_STAT_ACTIVE && ch->reply == thread)
     {
         ch->stat = RT_IPC_STAT_IDLE;
@@ -399,14 +390,14 @@ static void sender_timeout(void *parameter)
             l = l->next;
         }
     }
-    thread->error = -RT_ETIMEOUT;
-    thread->wakeup.func = RT_NULL;
 
-    rt_list_remove(&(thread->tlist));
+    thread->wakeup_handle.func = RT_NULL;
+    thread->error = RT_ETIMEOUT;
+
     /* insert to schedule ready list */
-    rt_schedule_insert_thread(thread);
+    rt_sched_insert_thread(thread);
     /* do schedule */
-    rt_schedule();
+    rt_sched_unlock_n_resched(slvl);
 }
 
 /**
@@ -522,7 +513,7 @@ static rt_err_t _send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int n
 
 static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time, rt_ipc_msg_t msg)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     rt_thread_t thread_recv;
     rt_thread_t thread_send = 0;
     void (*old_timeout_func)(void *) = 0;
@@ -627,9 +618,12 @@ static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, in
 
         if (!need_reply || rc == RT_EOK)
         {
-            thread_recv = rt_list_entry(ch->parent.suspend_thread.next, struct rt_thread, tlist);
+            rt_sched_lock_level_t slvl;
+            rt_sched_lock(&slvl);
+            thread_recv = RT_THREAD_LIST_NODE_ENTRY(ch->parent.suspend_thread.next);
             thread_recv->msg_ret = msg; /* to the first suspended receiver */
             thread_recv->error = RT_EOK;
+            rt_sched_unlock(slvl);
             rt_channel_list_resume(&ch->parent.suspend_thread);
         }
         break;
@@ -706,7 +700,7 @@ rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data
  */
 rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     rt_ipc_msg_t msg;
     struct rt_thread *thread;
     rt_base_t level;
@@ -758,7 +752,7 @@ rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
         rt_schedule();
     }
 
-    RETURN(rc);
+    LWP_RETURN(rc);
 }
 
 static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
@@ -783,24 +777,27 @@ static void receiver_timeout(void *parameter)
 {
     struct rt_thread *thread = (struct rt_thread *)parameter;
     rt_channel_t ch;
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
 
-    ch = (rt_channel_t)(thread->wakeup.user_data);
+    rt_sched_lock(&slvl);
+
+    ch = (rt_channel_t)(thread->wakeup_handle.user_data);
 
-    level = rt_spin_lock_irqsave(&ch->slock);
-    ch->stat = RT_IPC_STAT_IDLE;
     thread->error = -RT_ETIMEOUT;
-    thread->wakeup.func = RT_NULL;
+    thread->wakeup_handle.func = RT_NULL;
+
+    rt_spin_lock(&ch->slock);
+    ch->stat = RT_IPC_STAT_IDLE;
 
-    rt_list_remove(&(thread->tlist));
+    rt_list_remove(&RT_THREAD_LIST_NODE(thread));
     /* insert to schedule ready list */
-    rt_schedule_insert_thread(thread);
+    rt_sched_insert_thread(thread);
 
     _rt_channel_check_wq_wakup_locked(ch);
-    rt_spin_unlock_irqrestore(&ch->slock, level);
+    rt_spin_unlock(&ch->slock);
 
     /* do schedule */
-    rt_schedule();
+    rt_sched_unlock_n_resched(slvl);
 }
 
 /**
@@ -808,7 +805,7 @@ static void receiver_timeout(void *parameter)
  */
 static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     struct rt_thread *thread;
     rt_ipc_msg_t msg_ret;
     void (*old_timeout_func)(void *) = 0;
@@ -839,10 +836,12 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
             rt_list_remove(ch->wait_msg.next); /* remove the message from the channel */
             if (msg_ret->need_reply)
             {
+                rt_sched_lock_level_t slvl;
+                rt_sched_lock(&slvl);
                 RT_ASSERT(ch->wait_thread.next != &ch->wait_thread);
-
-                thread = rt_list_entry(ch->wait_thread.next, struct rt_thread, tlist);
+                thread = RT_THREAD_LIST_NODE_ENTRY(ch->wait_thread.next);
                 rt_list_remove(ch->wait_thread.next);
+                rt_sched_unlock(slvl);
                 ch->reply = thread;            /* record the waiting sender */
                 ch->stat = RT_IPC_STAT_ACTIVE; /* no valid suspened receivers */
             }
@@ -912,7 +911,7 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
 
     rt_spin_unlock_irqrestore(&ch->slock, level);
 
-    RETURN(rc);
+    LWP_RETURN(rc);
 }
 
 rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data)

+ 71 - 0
components/lwp/lwp_itimer.c

@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-11-30     Shell        Add itimer support
+ */
+
+#define _GNU_SOURCE
+#include <sys/time.h>
+#undef _GNU_SOURCE
+
+#define DBG_TAG "lwp.signal"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include <rthw.h>
+#include <rtthread.h>
+#include <string.h>
+
+#include "lwp_internal.h"
+#include "sys/signal.h"
+#include "syscall_generic.h"
+
+rt_err_t lwp_signal_setitimer(rt_lwp_t lwp, int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
+{
+    rt_err_t rc = RT_EOK;
+    timer_t timerid = 0;
+    int flags = 0;
+
+    if (lwp->signal.real_timer == LWP_SIG_INVALID_TIMER)
+    {
+        struct sigevent sevp = {
+            .sigev_signo = SIGALRM,
+            .sigev_notify = SIGEV_SIGNAL,
+        };
+
+        rc = timer_create(CLOCK_REALTIME_ALARM, &sevp, &timerid);
+        if (rc == RT_EOK)
+        {
+            RT_ASSERT(timerid != LWP_SIG_INVALID_TIMER);
+            lwp->signal.real_timer = timerid;
+        }
+        else
+        {
+            /* failed to create timer */
+        }
+    }
+    else
+    {
+        timerid = lwp->signal.real_timer;
+    }
+
+    if (rc == RT_EOK)
+    {
+        switch (which)
+        {
+            case ITIMER_REAL:
+                rc = timer_settime(timerid, flags, new, old);
+                break;
+            default:
+                rc = -ENOSYS;
+                LOG_W("%s() unsupported timer", __func__);
+                break;
+        }
+    }
+
+    return rc;
+}

+ 39 - 112
components/lwp/lwp_pid.c

@@ -14,8 +14,12 @@
  *                             error
  * 2023-10-27     shell        Format codes of sys_exit(). Fix the data racing where lock is missed
  *                             Add reference on pid/tid, so the resource is not freed while using.
+ * 2024-01-25     shell        porting to new sched API
  */
 
+/* includes scheduler related API */
+#define __RT_IPC_SOURCE__
+
 #include <rthw.h>
 #include <rtthread.h>
 
@@ -59,7 +63,7 @@ int lwp_pid_init(void)
 
 void lwp_pid_lock_take(void)
 {
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
 
     rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
     /* should never failed */
@@ -382,7 +386,7 @@ rt_lwp_t lwp_create(rt_base_t flags)
         }
     }
 
-    LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
+    LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
     return new_lwp;
 }
 
@@ -699,6 +703,7 @@ pid_t lwp_name2pid(const char *name)
     pid_t pid = 0;
     rt_thread_t main_thread;
     char* process_name = RT_NULL;
+    rt_sched_lock_level_t slvl;
 
     lwp_pid_lock_take();
     for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
@@ -713,10 +718,12 @@ pid_t lwp_name2pid(const char *name)
             if (!rt_strncmp(name, process_name, RT_NAME_MAX))
             {
                 main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
-                if (!(main_thread->stat & RT_THREAD_CLOSE))
+                rt_sched_lock(&slvl);
+                if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
                 {
                     pid = lwp->pid;
                 }
+                rt_sched_unlock(slvl);
             }
         }
     }
@@ -767,7 +774,7 @@ static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
             error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
             if (error == 0)
             {
-                rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
+                rt_list_insert_before(&child->wait_list, &RT_THREAD_LIST_NODE(cur_thr));
                 LWP_UNLOCK(child);
 
                 rt_set_errno(RT_EINTR);
@@ -898,15 +905,15 @@ static void print_thread_info(struct rt_thread* thread, int maxlen)
     rt_uint8_t stat;
 
 #ifdef RT_USING_SMP
-    if (thread->oncpu != RT_CPU_DETACHED)
-        rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
+    if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
+        rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
     else
-        rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
+        rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
 #else
     rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
 #endif /*RT_USING_SMP*/
 
-    stat = (thread->stat & RT_THREAD_STAT_MASK);
+    stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
     if (stat == RT_THREAD_READY)        rt_kprintf(" ready  ");
     else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
     else if (stat == RT_THREAD_INIT)    rt_kprintf(" init   ");
@@ -932,7 +939,7 @@ static void print_thread_info(struct rt_thread* thread, int maxlen)
             thread->stack_size,
             (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
             / thread->stack_size,
-            thread->remaining_tick,
+            RT_SCHED_PRIV(thread).remaining_tick,
             thread->error);
 #endif
 }
@@ -1066,99 +1073,15 @@ MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
 int lwp_check_exit_request(void)
 {
     rt_thread_t thread = rt_thread_self();
+    rt_base_t expected = LWP_EXIT_REQUEST_TRIGGERED;
+
     if (!thread->lwp)
     {
         return 0;
     }
 
-    if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
-    {
-        thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
-        return 1;
-    }
-    return 0;
-}
-
-static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
-{
-    int found = 0;
-    rt_base_t level;
-    rt_list_t *list;
-
-    level = rt_spin_lock_irqsave(&thread->spinlock);
-    list = lwp->t_grp.next;
-    while (list != &lwp->t_grp)
-    {
-        rt_thread_t iter_thread;
-
-        iter_thread = rt_list_entry(list, struct rt_thread, sibling);
-        if (thread == iter_thread)
-        {
-            found = 1;
-            break;
-        }
-        list = list->next;
-    }
-    rt_spin_unlock_irqrestore(&thread->spinlock, level);
-    return found;
-}
-
-void lwp_request_thread_exit(rt_thread_t thread_to_exit)
-{
-    rt_thread_t main_thread;
-    rt_base_t level;
-    rt_list_t *list;
-    struct rt_lwp *lwp;
-
-    lwp = lwp_self();
-
-    if ((!thread_to_exit) || (!lwp))
-    {
-        return;
-    }
-
-    level = rt_spin_lock_irqsave(&thread_to_exit->spinlock);
-
-    main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
-    if (thread_to_exit == main_thread)
-    {
-        goto finish;
-    }
-    if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
-    {
-        goto finish;
-    }
-
-    for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
-    {
-        rt_thread_t thread;
-
-        thread = rt_list_entry(list, struct rt_thread, sibling);
-        if (thread != thread_to_exit)
-        {
-            continue;
-        }
-        if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
-        {
-            thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
-        }
-        if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
-        {
-            thread->error = -RT_EINTR;
-            rt_hw_dsb();
-            rt_thread_wakeup(thread);
-        }
-        break;
-    }
-
-    while (found_thread(lwp, thread_to_exit))
-    {
-        rt_thread_mdelay(10);
-    }
-
-finish:
-    rt_spin_unlock_irqrestore(&thread_to_exit->spinlock, level);
-    return;
+    return rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
+                                             LWP_EXIT_REQUEST_IN_PROCESS);
 }
 
 static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
@@ -1193,34 +1116,32 @@ void lwp_terminate(struct rt_lwp *lwp)
 
 static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
 {
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
     rt_list_t *list;
     rt_thread_t thread;
+    rt_base_t expected = LWP_EXIT_REQUEST_NONE;
 
     /* broadcast exit request for sibling threads */
     LWP_LOCK(lwp);
     for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
     {
         thread = rt_list_entry(list, struct rt_thread, sibling);
-        level = rt_spin_lock_irqsave(&thread->spinlock);
-        if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
-        {
-            thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
-        }
-        rt_spin_unlock_irqrestore(&thread->spinlock, level);
 
-        level = rt_spin_lock_irqsave(&thread->spinlock);
-        if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
+        rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
+                                          LWP_EXIT_REQUEST_TRIGGERED);
+
+        rt_sched_lock(&slvl);
+        /* dont release, otherwise thread may have been freed */
+        if (rt_sched_thread_is_suspended(thread))
         {
             thread->error = RT_EINTR;
-            rt_spin_unlock_irqrestore(&thread->spinlock, level);
+            rt_sched_unlock(slvl);
 
-            rt_hw_dsb();
             rt_thread_wakeup(thread);
         }
         else
         {
-            rt_spin_unlock_irqrestore(&thread->spinlock, level);
+            rt_sched_unlock(slvl);
         }
     }
     LWP_UNLOCK(lwp);
@@ -1240,6 +1161,7 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
         subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
         if (!subthread_is_terminated)
         {
+            rt_sched_lock_level_t slvl;
             rt_thread_t sub_thread;
             rt_list_t *list;
             int all_subthread_in_init = 1;
@@ -1247,13 +1169,18 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
             /* check all subthread is in init state */
             for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
             {
-
+                rt_sched_lock(&slvl);
                 sub_thread = rt_list_entry(list, struct rt_thread, sibling);
-                if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
+                if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
                 {
+                    rt_sched_unlock(slvl);
                     all_subthread_in_init = 0;
                     break;
                 }
+                else
+                {
+                    rt_sched_unlock(slvl);
+                }
             }
             if (all_subthread_in_init)
             {
@@ -1344,7 +1271,7 @@ static void _resr_cleanup(struct rt_lwp *lwp)
         LWP_UNLOCK(lwp);
         if (!rt_list_isempty(&lwp->wait_list))
         {
-            thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
+            thread = RT_THREAD_LIST_NODE_ENTRY(lwp->wait_list.next);
             thread->error = RT_EOK;
             thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
             rt_thread_resume(thread);

+ 22 - 5
components/lwp/lwp_signal.c

@@ -11,7 +11,7 @@
  *                             remove lwp_signal_backup/restore() to reduce architecture codes
  *                             update the generation, pending and delivery routines
  */
-
+#define __RT_IPC_SOURCE__
 #define DBG_TAG "lwp.signal"
 #define DBG_LVL DBG_INFO
 #include <rtdbg.h>
@@ -408,6 +408,8 @@ rt_err_t lwp_signal_init(struct lwp_signal *sig)
 {
     rt_err_t rc = RT_EOK;
 
+    sig->real_timer = LWP_SIG_INVALID_TIMER;
+
     memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
 
     memset(&sig->sig_action, 0, sizeof(sig->sig_action));
@@ -423,6 +425,7 @@ rt_err_t lwp_signal_detach(struct lwp_signal *signal)
 {
     rt_err_t ret = RT_EOK;
 
+    timer_delete(signal->real_timer);
     lwp_sigqueue_clear(&signal->sig_queue);
 
     return ret;
@@ -561,27 +564,41 @@ void lwp_thread_signal_catch(void *exp_frame)
 static int _do_signal_wakeup(rt_thread_t thread, int sig)
 {
     int need_schedule;
+    rt_sched_lock_level_t slvl;
     if (!_sigismember(&thread->signal.sigset_mask, sig))
     {
-        if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
+        rt_sched_lock(&slvl);
+        int stat = rt_sched_thread_get_stat(thread);
+        if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
         {
-            if ((thread->stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
+            if ((stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
             {
+                thread->error = RT_EINTR;
+                rt_sched_unlock(slvl);
+
                 rt_thread_wakeup(thread);
                 need_schedule = 1;
             }
-            else if ((sig == SIGKILL) && ((thread->stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
+            else if ((sig == SIGKILL || sig == SIGSTOP) &&
+                    ((stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
             {
+                thread->error = RT_EINTR;
+                rt_sched_unlock(slvl);
+
                 rt_thread_wakeup(thread);
                 need_schedule = 1;
             }
             else
             {
+                rt_sched_unlock(slvl);
                 need_schedule = 0;
             }
         }
         else
+        {
+            rt_sched_unlock(slvl);
             need_schedule = 0;
+        }
     }
     else
         need_schedule = 0;
@@ -838,7 +855,7 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
 
     LOG_D("%s(signo=%d)", __func__, signo);
 
-    if (!thread || signo < 0 || signo >= _LWP_NSIG)
+    if (!thread || signo <= 0 || signo >= _LWP_NSIG)
     {
         ret = -RT_EINVAL;
     }

+ 9 - 0
components/lwp/lwp_signal.h

@@ -17,6 +17,9 @@
 #include <rtthread.h>
 #include <sys/signal.h>
 
+struct timespec;
+struct itimerspec;
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -28,6 +31,7 @@ extern "C" {
 #define LWP_SIG_USER_SA_FLAGS                                               \
     (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART |   \
      SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS)
+#define LWP_SIG_INVALID_TIMER ((timer_t)-1)
 
 typedef enum {
     LWP_SIG_MASK_CMD_BLOCK,
@@ -40,6 +44,7 @@ typedef enum {
  * LwP implementation of POSIX signal
  */
 struct lwp_signal {
+    timer_t real_timer;
     struct lwp_sigqueue sig_queue;
     rt_thread_t sig_dispatch_thr[_LWP_NSIG];
 
@@ -167,6 +172,10 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
  */
 void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *sigset);
 
+rt_err_t lwp_signal_setitimer(struct rt_lwp *lwp, int which,
+                              const struct itimerspec *restrict new,
+                              struct itimerspec *restrict old);
+
 #ifdef __cplusplus
 }
 #endif

+ 34 - 40
components/lwp/lwp_syscall.c

@@ -14,7 +14,7 @@
  * 2023-07-06     Shell        adapt the signal API, and clone, fork to new implementation of lwp signal
  * 2023-07-27     Shell        Move tid_put() from lwp_free() to sys_exit()
  */
-
+#define __RT_IPC_SOURCE__
 #define _GNU_SOURCE
 
 /* RT-Thread System call */
@@ -1120,7 +1120,7 @@ sysret_t sys_getpriority(int which, id_t who)
         if (lwp)
         {
             rt_thread_t thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
-            prio = thread->current_priority;
+            prio = RT_SCHED_PRIV(thread).current_priority;
         }
 
         lwp_pid_lock_release();
@@ -1808,7 +1808,7 @@ rt_thread_t sys_thread_create(void *arg[])
     }
 
 #ifdef RT_USING_SMP
-    thread->bind_cpu = lwp->bind_cpu;
+    RT_SCHED_CTX(thread).bind_cpu = lwp->bind_cpu;
 #endif
     thread->cleanup = lwp_cleanup;
     thread->user_entry = (void (*)(void *))arg[1];
@@ -1935,15 +1935,15 @@ long _sys_clone(void *arg[])
             RT_NULL,
             RT_NULL,
             self->stack_size,
-            self->init_priority,
-            self->init_tick);
+            RT_SCHED_PRIV(self).init_priority,
+            RT_SCHED_PRIV(self).init_tick);
     if (!thread)
     {
         goto fail;
     }
 
 #ifdef RT_USING_SMP
-    thread->bind_cpu = lwp->bind_cpu;
+    RT_SCHED_CTX(self).bind_cpu = lwp->bind_cpu;
 #endif
     thread->cleanup = lwp_cleanup;
     thread->user_entry = RT_NULL;
@@ -2120,8 +2120,8 @@ sysret_t _sys_fork(void)
             RT_NULL,
             RT_NULL,
             self_thread->stack_size,
-            self_thread->init_priority,
-            self_thread->init_tick);
+            RT_SCHED_PRIV(self_thread).init_priority,
+            RT_SCHED_PRIV(self_thread).init_tick);
     if (!thread)
     {
         SET_ERRNO(ENOMEM);
@@ -4231,6 +4231,9 @@ sysret_t sys_sigtimedwait(const sigset_t *sigset, siginfo_t *info, const struct
     struct timespec ktimeout;
     struct timespec *ptimeout;
 
+    /* for RT_ASSERT */
+    RT_UNUSED(ret);
+
     /* Fit sigset size to lwp set */
     if (sizeof(lwpset) < sigsize)
     {
@@ -5505,7 +5508,7 @@ sysret_t sys_sched_setaffinity(pid_t pid, size_t size, void *set)
 sysret_t sys_sched_getaffinity(const pid_t pid, size_t size, void *set)
 {
 #ifdef ARCH_MM_MMU
-    DEF_RETURN_CODE(rc);
+    LWP_DEF_RETURN_CODE(rc);
     void *mask;
     struct rt_lwp *lwp;
     rt_bool_t need_release = RT_FALSE;
@@ -5571,7 +5574,7 @@ sysret_t sys_sched_getaffinity(const pid_t pid, size_t size, void *set)
 
     kmem_put(mask);
 
-    RETURN(rc);
+    LWP_RETURN(rc);
 #else
     return -1;
 #endif
@@ -5679,13 +5682,11 @@ sysret_t sys_sched_yield(void)
     return 0;
 }
 
-sysret_t sys_sched_getparam(const pid_t pid, void *param)
+sysret_t sys_sched_getparam(const pid_t tid, void *param)
 {
     struct sched_param *sched_param = RT_NULL;
-    struct rt_lwp *lwp = NULL;
-    rt_thread_t main_thread;
+    rt_thread_t thread;
     int ret = -1;
-    rt_bool_t need_release = RT_FALSE;
 
     if (!lwp_user_accessable(param, sizeof(struct sched_param)))
     {
@@ -5698,27 +5699,16 @@ sysret_t sys_sched_getparam(const pid_t pid, void *param)
         return -ENOMEM;
     }
 
-    if (pid > 0)
-    {
-        need_release = RT_TRUE;
-        lwp_pid_lock_take();
-        lwp = lwp_from_pid_locked(pid);
-    }
-    else if (pid == 0)
-    {
-        lwp = lwp_self();
-    }
+    thread = lwp_tid_get_thread_and_inc_ref(tid);
 
-    if (lwp)
+    if (thread)
     {
-        main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
-        if (need_release)
-            lwp_pid_lock_release();
-
-        sched_param->sched_priority = main_thread->current_priority;
+        sched_param->sched_priority = RT_SCHED_PRIV(thread).current_priority;
         ret = 0;
     }
 
+    lwp_tid_dec_ref(thread);
+
     lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
     kmem_put(sched_param);
 
@@ -5800,7 +5790,7 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
     }
 
     thread = lwp_tid_get_thread_and_inc_ref(tid);
-    sched_param->sched_priority = thread->current_priority;
+    sched_param->sched_priority = RT_SCHED_PRIV(thread).current_priority;
     lwp_tid_dec_ref(thread);
 
     lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
@@ -6814,20 +6804,24 @@ sysret_t sys_memfd_create()
 {
     return 0;
 }
+
 sysret_t sys_setitimer(int which, const struct itimerspec *restrict new, struct itimerspec *restrict old)
 {
-    int ret = 0;
-    timer_t timerid = 0;
-    struct sigevent sevp_k = {0};
+    sysret_t rc = 0;
+    rt_lwp_t lwp = lwp_self();
+    struct itimerspec new_value_k;
+    struct itimerspec old_value_k;
 
-    sevp_k.sigev_notify = SIGEV_SIGNAL;
-    sevp_k.sigev_signo = SIGALRM;
-    ret = timer_create(CLOCK_REALTIME_ALARM, &sevp_k, &timerid);
-    if (ret != 0)
+    if (lwp_get_from_user(&new_value_k, (void *)new, sizeof(*new)) != sizeof(*new))
     {
-        return GET_ERRNO();
+        return -EFAULT;
     }
-    return sys_timer_settime(timerid,0,new,old);
+
+    rc = lwp_signal_setitimer(lwp, which, &new_value_k, &old_value_k);
+    if (old && lwp_put_to_user(old, (void *)&old_value_k, sizeof old_value_k) != sizeof old_value_k)
+        return -EFAULT;
+
+    return rc;
 }
 
 const static struct rt_syscall_def func_table[] =

+ 2 - 12
components/mm/mm_aspace.h

@@ -25,22 +25,12 @@
 #define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
 #define PV_OFFSET        (rt_kmem_pvoff())
 
-#ifndef RT_USING_SMP
-typedef rt_spinlock_t mm_spinlock;
-
-#define MM_PGTBL_LOCK_INIT(aspace)
-#define MM_PGTBL_LOCK(aspace)      (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
-#define MM_PGTBL_UNLOCK(aspace)    (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
-
-#else
-typedef struct rt_spinlock mm_spinlock;
+typedef struct rt_spinlock mm_spinlock_t;
 
 #define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
 #define MM_PGTBL_LOCK(aspace)      (rt_spin_lock(&((aspace)->pgtbl_lock)))
 #define MM_PGTBL_UNLOCK(aspace)    (rt_spin_unlock(&((aspace)->pgtbl_lock)))
 
-#endif /* RT_USING_SMP */
-
 struct rt_aspace;
 struct rt_varea;
 struct rt_mem_obj;
@@ -53,7 +43,7 @@ typedef struct rt_aspace
     rt_size_t size;
 
     void *page_table;
-    mm_spinlock pgtbl_lock;
+    mm_spinlock_t pgtbl_lock;
 
     struct _aspace_tree tree;
     struct rt_mutex bst_lock;

+ 9 - 0
components/utilities/libadt/uthash/SConscript

@@ -0,0 +1,9 @@
+from building import *
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c')
+CPPPATH = [cwd]
+group   = []
+
+group = DefineGroup('LIBADT', src, depend = [], CPPPATH = CPPPATH)
+Return('group')

+ 16 - 0
components/utilities/libadt/uthash/dict.h

@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-11-01     Shell        Init ver.
+ */
+
+#ifndef __LIBADT_DICT_H__
+#define __LIBADT_DICT_H__
+
+#include "rt_uthash.h"
+
+#endif

+ 57 - 0
components/utilities/libadt/uthash/rt_uthash.h

@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-11-01     Shell        Porting to RTT API
+ */
+#ifndef __LIBADT_RT_UTHASH_H__
+#define __LIBADT_RT_UTHASH_H__
+
+#include <rtthread.h>
+
+#define uthash_malloc(sz)    rt_malloc(sz)
+#define uthash_free(ptr, sz) rt_free(ptr)
+
+/**
+ * for performance consideration, using libc implementations
+ * as the default case. If you care about the compatibility
+ * problem, define the RT_UTHASH_CONFIG_COMPATIBILITY_FIRST
+ * before including the rt_uthash.h.
+ */
+#ifndef RT_UTHASH_CONFIG_COMPATIBILITY_FIRST
+#define uthash_bzero(a, n) memset(a, '\0', n)
+#define uthash_strlen(s)   strlen(s)
+
+#else
+#define uthash_bzero(a, n) rt_memset(a, '\0', n)
+#define uthash_strlen(s)   rt_strlen(s)
+
+#endif /* RT_UTHASH_CONFIG_COMPATIBILITY_FIRST */
+
+/* if any fatal happen, throw an exception and return a failure */
+#define uthash_fatal(msg)  \
+    do                     \
+    {                      \
+        LOG_E(msg);        \
+        return -RT_ENOMEM; \
+    } while (0)
+
+#include "uthash.h"
+
+#define DEFINE_RT_UTHASH_TYPE(entry_name, key_type, key_name) \
+    typedef struct entry_name                                 \
+    {                                                         \
+        key_type key_name;                                    \
+        UT_hash_handle hh;                                    \
+    } *entry_name##_t;
+
+#define RT_UTHASH_ADD(head, key_member, keylen_in, value) \
+    HASH_ADD(hh, head, key_member, keylen_in, value)
+#define RT_UTHASH_FIND(head, key_ptr, keylen_in, pval) \
+    HASH_FIND(hh, head, key_ptr, keylen_in, pval)
+#define RT_UTHASH_DELETE(head, pobj) HASH_DELETE(hh, head, pobj)
+
+#endif /* __LIBADT_RT_UTHASH_H__ */

+ 1140 - 0
components/utilities/libadt/uthash/uthash.h

@@ -0,0 +1,1140 @@
+/*
+Copyright (c) 2003-2022, Troy D. Hanson  https://troydhanson.github.io/uthash/
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef UTHASH_H
+#define UTHASH_H
+
+#define UTHASH_VERSION 2.3.0
+
+#include <string.h>   /* memcmp, memset, strlen */
+#include <stddef.h>   /* ptrdiff_t */
+#include <stdlib.h>   /* exit */
+
+#if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT
+/* This codepath is provided for backward compatibility, but I plan to remove it. */
+#warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead"
+typedef unsigned int uint32_t;
+typedef unsigned char uint8_t;
+#elif defined(HASH_NO_STDINT) && HASH_NO_STDINT
+#else
+#include <stdint.h>   /* uint8_t, uint32_t */
+#endif
+
+/* These macros use decltype or the earlier __typeof GNU extension.
+   As decltype is only available in newer compilers (VS2010 or gcc 4.3+
+   when compiling c++ source) this code uses whatever method is needed
+   or, for VS2008 where neither is available, uses casting workarounds. */
+#if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
+#if defined(_MSC_VER)   /* MS compiler */
+#if _MSC_VER >= 1600 && defined(__cplusplus)  /* VS2010 or newer in C++ mode */
+#define DECLTYPE(x) (decltype(x))
+#else                   /* VS2008 or older (or VS2010 in C mode) */
+#define NO_DECLTYPE
+#endif
+#elif defined(__MCST__)  /* Elbrus C Compiler */
+#define DECLTYPE(x) (__typeof(x))
+#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
+#define NO_DECLTYPE
+#else                   /* GNU, Sun and other compilers */
+#define DECLTYPE(x) (__typeof(x))
+#endif
+#endif
+
+#ifdef NO_DECLTYPE
+#define DECLTYPE(x)
+#define DECLTYPE_ASSIGN(dst,src)                                                 \
+do {                                                                             \
+  char **_da_dst = (char**)(&(dst));                                             \
+  *_da_dst = (char*)(src);                                                       \
+} while (0)
+#else
+#define DECLTYPE_ASSIGN(dst,src)                                                 \
+do {                                                                             \
+  (dst) = DECLTYPE(dst)(src);                                                    \
+} while (0)
+#endif
+
+#ifndef uthash_malloc
+#define uthash_malloc(sz) malloc(sz)      /* malloc fcn                      */
+#endif
+#ifndef uthash_free
+#define uthash_free(ptr,sz) free(ptr)     /* free fcn                        */
+#endif
+#ifndef uthash_bzero
+#define uthash_bzero(a,n) memset(a,'\0',n)
+#endif
+#ifndef uthash_strlen
+#define uthash_strlen(s) strlen(s)
+#endif
+
+#ifndef HASH_FUNCTION
+#define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv)
+#endif
+
+#ifndef HASH_KEYCMP
+#define HASH_KEYCMP(a,b,n) memcmp(a,b,n)
+#endif
+
+#ifndef uthash_noexpand_fyi
+#define uthash_noexpand_fyi(tbl)          /* can be defined to log noexpand  */
+#endif
+#ifndef uthash_expand_fyi
+#define uthash_expand_fyi(tbl)            /* can be defined to log expands   */
+#endif
+
+#ifndef HASH_NONFATAL_OOM
+#define HASH_NONFATAL_OOM 0
+#endif
+
+#if HASH_NONFATAL_OOM
+/* malloc failures can be recovered from */
+
+#ifndef uthash_nonfatal_oom
+#define uthash_nonfatal_oom(obj) do {} while (0)    /* non-fatal OOM error */
+#endif
+
+#define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0)
+#define IF_HASH_NONFATAL_OOM(x) x
+
+#else
+/* malloc failures result in lost memory, hash tables are unusable */
+
+#ifndef uthash_fatal
+#define uthash_fatal(msg) exit(-1)        /* fatal OOM error */
+#endif
+
+#define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory")
+#define IF_HASH_NONFATAL_OOM(x)
+
+#endif
+
+/* initial number of buckets */
+#define HASH_INITIAL_NUM_BUCKETS 32U     /* initial number of buckets        */
+#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
+#define HASH_BKT_CAPACITY_THRESH 10U     /* expand when bucket count reaches */
+
+/* calculate the element whose hash handle address is hhp */
+#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
+/* calculate the hash handle from element address elp */
+#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho)))
+
+#define HASH_ROLLBACK_BKT(hh, head, itemptrhh)                                   \
+do {                                                                             \
+  struct UT_hash_handle *_hd_hh_item = (itemptrhh);                              \
+  unsigned _hd_bkt;                                                              \
+  HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt);         \
+  (head)->hh.tbl->buckets[_hd_bkt].count++;                                      \
+  _hd_hh_item->hh_next = NULL;                                                   \
+  _hd_hh_item->hh_prev = NULL;                                                   \
+} while (0)
+
+#define HASH_VALUE(keyptr,keylen,hashv)                                          \
+do {                                                                             \
+  HASH_FUNCTION(keyptr, keylen, hashv);                                          \
+} while (0)
+
+#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out)                 \
+do {                                                                             \
+  (out) = NULL;                                                                  \
+  if (head) {                                                                    \
+    unsigned _hf_bkt;                                                            \
+    HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt);                  \
+    if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) {                         \
+      HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
+    }                                                                            \
+  }                                                                              \
+} while (0)
+
+#define HASH_FIND(hh,head,keyptr,keylen,out)                                     \
+do {                                                                             \
+  (out) = NULL;                                                                  \
+  if (head) {                                                                    \
+    unsigned _hf_hashv;                                                          \
+    HASH_VALUE(keyptr, keylen, _hf_hashv);                                       \
+    HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out);             \
+  }                                                                              \
+} while (0)
+
+#ifdef HASH_BLOOM
+#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
+#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
+#define HASH_BLOOM_MAKE(tbl,oomed)                                               \
+do {                                                                             \
+  (tbl)->bloom_nbits = HASH_BLOOM;                                               \
+  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN);                 \
+  if (!(tbl)->bloom_bv) {                                                        \
+    HASH_RECORD_OOM(oomed);                                                      \
+  } else {                                                                       \
+    uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN);                           \
+    (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE;                                     \
+  }                                                                              \
+} while (0)
+
+#define HASH_BLOOM_FREE(tbl)                                                     \
+do {                                                                             \
+  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN);                              \
+} while (0)
+
+#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
+#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
+
+#define HASH_BLOOM_ADD(tbl,hashv)                                                \
+  HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
+
+#define HASH_BLOOM_TEST(tbl,hashv)                                               \
+  HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
+
+#else
+#define HASH_BLOOM_MAKE(tbl,oomed)
+#define HASH_BLOOM_FREE(tbl)
+#define HASH_BLOOM_ADD(tbl,hashv)
+#define HASH_BLOOM_TEST(tbl,hashv) (1)
+#define HASH_BLOOM_BYTELEN 0U
+#endif
+
+#define HASH_MAKE_TABLE(hh,head,oomed)                                           \
+do {                                                                             \
+  (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table));         \
+  if (!(head)->hh.tbl) {                                                         \
+    HASH_RECORD_OOM(oomed);                                                      \
+  } else {                                                                       \
+    uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table));                         \
+    (head)->hh.tbl->tail = &((head)->hh);                                        \
+    (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS;                      \
+    (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2;            \
+    (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head);                  \
+    (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc(                    \
+        HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket));               \
+    (head)->hh.tbl->signature = HASH_SIGNATURE;                                  \
+    if (!(head)->hh.tbl->buckets) {                                              \
+      HASH_RECORD_OOM(oomed);                                                    \
+      uthash_free((head)->hh.tbl, sizeof(UT_hash_table));                        \
+    } else {                                                                     \
+      uthash_bzero((head)->hh.tbl->buckets,                                      \
+          HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket));             \
+      HASH_BLOOM_MAKE((head)->hh.tbl, oomed);                                    \
+      IF_HASH_NONFATAL_OOM(                                                      \
+        if (oomed) {                                                             \
+          uthash_free((head)->hh.tbl->buckets,                                   \
+              HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket));           \
+          uthash_free((head)->hh.tbl, sizeof(UT_hash_table));                    \
+        }                                                                        \
+      )                                                                          \
+    }                                                                            \
+  }                                                                              \
+} while (0)
+
+#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
+do {                                                                             \
+  (replaced) = NULL;                                                             \
+  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
+  if (replaced) {                                                                \
+    HASH_DELETE(hh, head, replaced);                                             \
+  }                                                                              \
+  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
+} while (0)
+
+#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
+do {                                                                             \
+  (replaced) = NULL;                                                             \
+  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
+  if (replaced) {                                                                \
+    HASH_DELETE(hh, head, replaced);                                             \
+  }                                                                              \
+  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
+} while (0)
+
+#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced)                   \
+do {                                                                             \
+  unsigned _hr_hashv;                                                            \
+  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv);                         \
+  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
+} while (0)
+
+#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn)    \
+do {                                                                             \
+  unsigned _hr_hashv;                                                            \
+  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv);                         \
+  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
+} while (0)
+
+#define HASH_APPEND_LIST(hh, head, add)                                          \
+do {                                                                             \
+  (add)->hh.next = NULL;                                                         \
+  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail);           \
+  (head)->hh.tbl->tail->next = (add);                                            \
+  (head)->hh.tbl->tail = &((add)->hh);                                           \
+} while (0)
+
+#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn)                                 \
+do {                                                                             \
+  do {                                                                           \
+    if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) {                             \
+      break;                                                                     \
+    }                                                                            \
+  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next));           \
+} while (0)
+
+#ifdef NO_DECLTYPE
+#undef HASH_AKBI_INNER_LOOP
+#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn)                                 \
+do {                                                                             \
+  char *_hs_saved_head = (char*)(head);                                          \
+  do {                                                                           \
+    DECLTYPE_ASSIGN(head, _hs_iter);                                             \
+    if (cmpfcn(head, add) > 0) {                                                 \
+      DECLTYPE_ASSIGN(head, _hs_saved_head);                                     \
+      break;                                                                     \
+    }                                                                            \
+    DECLTYPE_ASSIGN(head, _hs_saved_head);                                       \
+  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next));           \
+} while (0)
+#endif
+
+#if HASH_NONFATAL_OOM
+
+#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed)            \
+do {                                                                             \
+  if (!(oomed)) {                                                                \
+    unsigned _ha_bkt;                                                            \
+    (head)->hh.tbl->num_items++;                                                 \
+    HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt);                  \
+    HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed);    \
+    if (oomed) {                                                                 \
+      HASH_ROLLBACK_BKT(hh, head, &(add)->hh);                                   \
+      HASH_DELETE_HH(hh, head, &(add)->hh);                                      \
+      (add)->hh.tbl = NULL;                                                      \
+      uthash_nonfatal_oom(add);                                                  \
+    } else {                                                                     \
+      HASH_BLOOM_ADD((head)->hh.tbl, hashval);                                   \
+      HASH_EMIT_KEY(hh, head, keyptr, keylen_in);                                \
+    }                                                                            \
+  } else {                                                                       \
+    (add)->hh.tbl = NULL;                                                        \
+    uthash_nonfatal_oom(add);                                                    \
+  }                                                                              \
+} while (0)
+
+#else
+
+#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed)            \
+do {                                                                             \
+  unsigned _ha_bkt;                                                              \
+  (head)->hh.tbl->num_items++;                                                   \
+  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt);                    \
+  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed);      \
+  HASH_BLOOM_ADD((head)->hh.tbl, hashval);                                       \
+  HASH_EMIT_KEY(hh, head, keyptr, keylen_in);                                    \
+} while (0)
+
+#endif
+
+
+#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
+do {                                                                             \
+  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; )                                     \
+  (add)->hh.hashv = (hashval);                                                   \
+  (add)->hh.key = (char*) (keyptr);                                              \
+  (add)->hh.keylen = (unsigned) (keylen_in);                                     \
+  if (!(head)) {                                                                 \
+    (add)->hh.next = NULL;                                                       \
+    (add)->hh.prev = NULL;                                                       \
+    HASH_MAKE_TABLE(hh, add, _ha_oomed);                                         \
+    IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { )                                    \
+      (head) = (add);                                                            \
+    IF_HASH_NONFATAL_OOM( } )                                                    \
+  } else {                                                                       \
+    void *_hs_iter = (head);                                                     \
+    (add)->hh.tbl = (head)->hh.tbl;                                              \
+    HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn);                                 \
+    if (_hs_iter) {                                                              \
+      (add)->hh.next = _hs_iter;                                                 \
+      if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) {     \
+        HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add);              \
+      } else {                                                                   \
+        (head) = (add);                                                          \
+      }                                                                          \
+      HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add);                      \
+    } else {                                                                     \
+      HASH_APPEND_LIST(hh, head, add);                                           \
+    }                                                                            \
+  }                                                                              \
+  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed);       \
+  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER");                    \
+} while (0)
+
+#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn)             \
+do {                                                                             \
+  unsigned _hs_hashv;                                                            \
+  HASH_VALUE(keyptr, keylen_in, _hs_hashv);                                      \
+  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
+} while (0)
+
+#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
+  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
+
+#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn)                 \
+  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
+
+#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add)        \
+do {                                                                             \
+  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; )                                     \
+  (add)->hh.hashv = (hashval);                                                   \
+  (add)->hh.key = (const void*) (keyptr);                                        \
+  (add)->hh.keylen = (unsigned) (keylen_in);                                     \
+  if (!(head)) {                                                                 \
+    (add)->hh.next = NULL;                                                       \
+    (add)->hh.prev = NULL;                                                       \
+    HASH_MAKE_TABLE(hh, add, _ha_oomed);                                         \
+    IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { )                                    \
+      (head) = (add);                                                            \
+    IF_HASH_NONFATAL_OOM( } )                                                    \
+  } else {                                                                       \
+    (add)->hh.tbl = (head)->hh.tbl;                                              \
+    HASH_APPEND_LIST(hh, head, add);                                             \
+  }                                                                              \
+  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed);       \
+  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE");                            \
+} while (0)
+
+#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add)                            \
+do {                                                                             \
+  unsigned _ha_hashv;                                                            \
+  HASH_VALUE(keyptr, keylen_in, _ha_hashv);                                      \
+  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add);      \
+} while (0)
+
+#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add)            \
+  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
+
+#define HASH_ADD(hh,head,fieldname,keylen_in,add)                                \
+  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
+
+#define HASH_TO_BKT(hashv,num_bkts,bkt)                                          \
+do {                                                                             \
+  bkt = ((hashv) & ((num_bkts) - 1U));                                           \
+} while (0)
+
+/* delete "delptr" from the hash table.
+ * "the usual" patch-up process for the app-order doubly-linked-list.
+ * The use of _hd_hh_del below deserves special explanation.
+ * These used to be expressed using (delptr) but that led to a bug
+ * if someone used the same symbol for the head and deletee, like
+ *  HASH_DELETE(hh,users,users);
+ * We want that to work, but by changing the head (users) below
+ * we were forfeiting our ability to further refer to the deletee (users)
+ * in the patch-up process. Solution: use scratch space to
+ * copy the deletee pointer, then the latter references are via that
+ * scratch pointer rather than through the repointed (users) symbol.
+ */
+#define HASH_DELETE(hh,head,delptr)                                              \
+    HASH_DELETE_HH(hh, head, &(delptr)->hh)
+
+#define HASH_DELETE_HH(hh,head,delptrhh)                                         \
+do {                                                                             \
+  const struct UT_hash_handle *_hd_hh_del = (delptrhh);                          \
+  if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) {                \
+    HASH_BLOOM_FREE((head)->hh.tbl);                                             \
+    uthash_free((head)->hh.tbl->buckets,                                         \
+                (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket));    \
+    uthash_free((head)->hh.tbl, sizeof(UT_hash_table));                          \
+    (head) = NULL;                                                               \
+  } else {                                                                       \
+    unsigned _hd_bkt;                                                            \
+    if (_hd_hh_del == (head)->hh.tbl->tail) {                                    \
+      (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev);     \
+    }                                                                            \
+    if (_hd_hh_del->prev != NULL) {                                              \
+      HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next;   \
+    } else {                                                                     \
+      DECLTYPE_ASSIGN(head, _hd_hh_del->next);                                   \
+    }                                                                            \
+    if (_hd_hh_del->next != NULL) {                                              \
+      HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev;   \
+    }                                                                            \
+    HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt);        \
+    HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del);               \
+    (head)->hh.tbl->num_items--;                                                 \
+  }                                                                              \
+  HASH_FSCK(hh, head, "HASH_DELETE_HH");                                         \
+} while (0)
+
+/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
+#define HASH_FIND_STR(head,findstr,out)                                          \
+do {                                                                             \
+    unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr);            \
+    HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out);                     \
+} while (0)
+#define HASH_ADD_STR(head,strfield,add)                                          \
+do {                                                                             \
+    unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield);    \
+    HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add);                  \
+} while (0)
+#define HASH_REPLACE_STR(head,strfield,add,replaced)                             \
+do {                                                                             \
+    unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield);    \
+    HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced);    \
+} while (0)
+#define HASH_FIND_INT(head,findint,out)                                          \
+    HASH_FIND(hh,head,findint,sizeof(int),out)
+#define HASH_ADD_INT(head,intfield,add)                                          \
+    HASH_ADD(hh,head,intfield,sizeof(int),add)
+#define HASH_REPLACE_INT(head,intfield,add,replaced)                             \
+    HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
+#define HASH_FIND_PTR(head,findptr,out)                                          \
+    HASH_FIND(hh,head,findptr,sizeof(void *),out)
+#define HASH_ADD_PTR(head,ptrfield,add)                                          \
+    HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
+#define HASH_REPLACE_PTR(head,ptrfield,add,replaced)                             \
+    HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
+#define HASH_DEL(head,delptr)                                                    \
+    HASH_DELETE(hh,head,delptr)
+
+/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
+ * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
+ */
+#ifdef HASH_DEBUG
+#include <stdio.h>   /* fprintf, stderr */
+#define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0)
+#define HASH_FSCK(hh,head,where)                                                 \
+do {                                                                             \
+  struct UT_hash_handle *_thh;                                                   \
+  if (head) {                                                                    \
+    unsigned _bkt_i;                                                             \
+    unsigned _count = 0;                                                         \
+    char *_prev;                                                                 \
+    for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) {           \
+      unsigned _bkt_count = 0;                                                   \
+      _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head;                            \
+      _prev = NULL;                                                              \
+      while (_thh) {                                                             \
+        if (_prev != (char*)(_thh->hh_prev)) {                                   \
+          HASH_OOPS("%s: invalid hh_prev %p, actual %p\n",                       \
+              (where), (void*)_thh->hh_prev, (void*)_prev);                      \
+        }                                                                        \
+        _bkt_count++;                                                            \
+        _prev = (char*)(_thh);                                                   \
+        _thh = _thh->hh_next;                                                    \
+      }                                                                          \
+      _count += _bkt_count;                                                      \
+      if ((head)->hh.tbl->buckets[_bkt_i].count !=  _bkt_count) {                \
+        HASH_OOPS("%s: invalid bucket count %u, actual %u\n",                    \
+            (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count);         \
+      }                                                                          \
+    }                                                                            \
+    if (_count != (head)->hh.tbl->num_items) {                                   \
+      HASH_OOPS("%s: invalid hh item count %u, actual %u\n",                     \
+          (where), (head)->hh.tbl->num_items, _count);                           \
+    }                                                                            \
+    _count = 0;                                                                  \
+    _prev = NULL;                                                                \
+    _thh =  &(head)->hh;                                                         \
+    while (_thh) {                                                               \
+      _count++;                                                                  \
+      if (_prev != (char*)_thh->prev) {                                          \
+        HASH_OOPS("%s: invalid prev %p, actual %p\n",                            \
+            (where), (void*)_thh->prev, (void*)_prev);                           \
+      }                                                                          \
+      _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh);                         \
+      _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL);     \
+    }                                                                            \
+    if (_count != (head)->hh.tbl->num_items) {                                   \
+      HASH_OOPS("%s: invalid app item count %u, actual %u\n",                    \
+          (where), (head)->hh.tbl->num_items, _count);                           \
+    }                                                                            \
+  }                                                                              \
+} while (0)
+#else
+#define HASH_FSCK(hh,head,where)
+#endif
+
+/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
+ * the descriptor to which this macro is defined for tuning the hash function.
+ * The app can #include <unistd.h> to get the prototype for write(2). */
+#ifdef HASH_EMIT_KEYS
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)                                   \
+do {                                                                             \
+  unsigned _klen = fieldlen;                                                     \
+  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen));                                  \
+  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen);                        \
+} while (0)
+#else
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
+#endif
+
+/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
+#define HASH_BER(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned _hb_keylen = (unsigned)keylen;                                        \
+  const unsigned char *_hb_key = (const unsigned char*)(key);                    \
+  (hashv) = 0;                                                                   \
+  while (_hb_keylen-- != 0U) {                                                   \
+    (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++;                           \
+  }                                                                              \
+} while (0)
+
+
+/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
+ * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
+ * (archive link: https://archive.is/Ivcan )
+ */
+#define HASH_SAX(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned _sx_i;                                                                \
+  const unsigned char *_hs_key = (const unsigned char*)(key);                    \
+  hashv = 0;                                                                     \
+  for (_sx_i=0; _sx_i < keylen; _sx_i++) {                                       \
+    hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i];                       \
+  }                                                                              \
+} while (0)
+/* FNV-1a variation */
+#define HASH_FNV(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned _fn_i;                                                                \
+  const unsigned char *_hf_key = (const unsigned char*)(key);                    \
+  (hashv) = 2166136261U;                                                         \
+  for (_fn_i=0; _fn_i < keylen; _fn_i++) {                                       \
+    hashv = hashv ^ _hf_key[_fn_i];                                              \
+    hashv = hashv * 16777619U;                                                   \
+  }                                                                              \
+} while (0)
+
+#define HASH_OAT(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned _ho_i;                                                                \
+  const unsigned char *_ho_key=(const unsigned char*)(key);                      \
+  hashv = 0;                                                                     \
+  for(_ho_i=0; _ho_i < keylen; _ho_i++) {                                        \
+      hashv += _ho_key[_ho_i];                                                   \
+      hashv += (hashv << 10);                                                    \
+      hashv ^= (hashv >> 6);                                                     \
+  }                                                                              \
+  hashv += (hashv << 3);                                                         \
+  hashv ^= (hashv >> 11);                                                        \
+  hashv += (hashv << 15);                                                        \
+} while (0)
+
+#define HASH_JEN_MIX(a,b,c)                                                      \
+do {                                                                             \
+  a -= b; a -= c; a ^= ( c >> 13 );                                              \
+  b -= c; b -= a; b ^= ( a << 8 );                                               \
+  c -= a; c -= b; c ^= ( b >> 13 );                                              \
+  a -= b; a -= c; a ^= ( c >> 12 );                                              \
+  b -= c; b -= a; b ^= ( a << 16 );                                              \
+  c -= a; c -= b; c ^= ( b >> 5 );                                               \
+  a -= b; a -= c; a ^= ( c >> 3 );                                               \
+  b -= c; b -= a; b ^= ( a << 10 );                                              \
+  c -= a; c -= b; c ^= ( b >> 15 );                                              \
+} while (0)
+
+#define HASH_JEN(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned _hj_i,_hj_j,_hj_k;                                                    \
+  unsigned const char *_hj_key=(unsigned const char*)(key);                      \
+  hashv = 0xfeedbeefu;                                                           \
+  _hj_i = _hj_j = 0x9e3779b9u;                                                   \
+  _hj_k = (unsigned)(keylen);                                                    \
+  while (_hj_k >= 12U) {                                                         \
+    _hj_i +=    (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 )                      \
+        + ( (unsigned)_hj_key[2] << 16 )                                         \
+        + ( (unsigned)_hj_key[3] << 24 ) );                                      \
+    _hj_j +=    (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 )                      \
+        + ( (unsigned)_hj_key[6] << 16 )                                         \
+        + ( (unsigned)_hj_key[7] << 24 ) );                                      \
+    hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 )                         \
+        + ( (unsigned)_hj_key[10] << 16 )                                        \
+        + ( (unsigned)_hj_key[11] << 24 ) );                                     \
+                                                                                 \
+     HASH_JEN_MIX(_hj_i, _hj_j, hashv);                                          \
+                                                                                 \
+     _hj_key += 12;                                                              \
+     _hj_k -= 12U;                                                               \
+  }                                                                              \
+  hashv += (unsigned)(keylen);                                                   \
+  switch ( _hj_k ) {                                                             \
+    case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */         \
+    case 10: hashv += ( (unsigned)_hj_key[9] << 16 );  /* FALLTHROUGH */         \
+    case 9:  hashv += ( (unsigned)_hj_key[8] << 8 );   /* FALLTHROUGH */         \
+    case 8:  _hj_j += ( (unsigned)_hj_key[7] << 24 );  /* FALLTHROUGH */         \
+    case 7:  _hj_j += ( (unsigned)_hj_key[6] << 16 );  /* FALLTHROUGH */         \
+    case 6:  _hj_j += ( (unsigned)_hj_key[5] << 8 );   /* FALLTHROUGH */         \
+    case 5:  _hj_j += _hj_key[4];                      /* FALLTHROUGH */         \
+    case 4:  _hj_i += ( (unsigned)_hj_key[3] << 24 );  /* FALLTHROUGH */         \
+    case 3:  _hj_i += ( (unsigned)_hj_key[2] << 16 );  /* FALLTHROUGH */         \
+    case 2:  _hj_i += ( (unsigned)_hj_key[1] << 8 );   /* FALLTHROUGH */         \
+    case 1:  _hj_i += _hj_key[0];                      /* FALLTHROUGH */         \
+    default: ;                                                                   \
+  }                                                                              \
+  HASH_JEN_MIX(_hj_i, _hj_j, hashv);                                             \
+} while (0)
+
+/* The Paul Hsieh hash function */
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__)             \
+  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)             \
+                       +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+#define HASH_SFH(key,keylen,hashv)                                               \
+do {                                                                             \
+  unsigned const char *_sfh_key=(unsigned const char*)(key);                     \
+  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen;                                \
+                                                                                 \
+  unsigned _sfh_rem = _sfh_len & 3U;                                             \
+  _sfh_len >>= 2;                                                                \
+  hashv = 0xcafebabeu;                                                           \
+                                                                                 \
+  /* Main loop */                                                                \
+  for (;_sfh_len > 0U; _sfh_len--) {                                             \
+    hashv    += get16bits (_sfh_key);                                            \
+    _sfh_tmp  = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv;              \
+    hashv     = (hashv << 16) ^ _sfh_tmp;                                        \
+    _sfh_key += 2U*sizeof (uint16_t);                                            \
+    hashv    += hashv >> 11;                                                     \
+  }                                                                              \
+                                                                                 \
+  /* Handle end cases */                                                         \
+  switch (_sfh_rem) {                                                            \
+    case 3: hashv += get16bits (_sfh_key);                                       \
+            hashv ^= hashv << 16;                                                \
+            hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18;              \
+            hashv += hashv >> 11;                                                \
+            break;                                                               \
+    case 2: hashv += get16bits (_sfh_key);                                       \
+            hashv ^= hashv << 11;                                                \
+            hashv += hashv >> 17;                                                \
+            break;                                                               \
+    case 1: hashv += *_sfh_key;                                                  \
+            hashv ^= hashv << 10;                                                \
+            hashv += hashv >> 1;                                                 \
+            break;                                                               \
+    default: ;                                                                   \
+  }                                                                              \
+                                                                                 \
+  /* Force "avalanching" of final 127 bits */                                    \
+  hashv ^= hashv << 3;                                                           \
+  hashv += hashv >> 5;                                                           \
+  hashv ^= hashv << 4;                                                           \
+  hashv += hashv >> 17;                                                          \
+  hashv ^= hashv << 25;                                                          \
+  hashv += hashv >> 6;                                                           \
+} while (0)
+
+/* iterate over items in a known bucket to find desired item */
+#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out)               \
+do {                                                                             \
+  if ((head).hh_head != NULL) {                                                  \
+    DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head));                     \
+  } else {                                                                       \
+    (out) = NULL;                                                                \
+  }                                                                              \
+  while ((out) != NULL) {                                                        \
+    if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) {       \
+      if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) {                  \
+        break;                                                                   \
+      }                                                                          \
+    }                                                                            \
+    if ((out)->hh.hh_next != NULL) {                                             \
+      DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next));                \
+    } else {                                                                     \
+      (out) = NULL;                                                              \
+    }                                                                            \
+  }                                                                              \
+} while (0)
+
+/* add an item to a bucket  */
+#define HASH_ADD_TO_BKT(head,hh,addhh,oomed)                                     \
+do {                                                                             \
+  UT_hash_bucket *_ha_head = &(head);                                            \
+  _ha_head->count++;                                                             \
+  (addhh)->hh_next = _ha_head->hh_head;                                          \
+  (addhh)->hh_prev = NULL;                                                       \
+  if (_ha_head->hh_head != NULL) {                                               \
+    _ha_head->hh_head->hh_prev = (addhh);                                        \
+  }                                                                              \
+  _ha_head->hh_head = (addhh);                                                   \
+  if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \
+      && !(addhh)->tbl->noexpand) {                                              \
+    HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed);                              \
+    IF_HASH_NONFATAL_OOM(                                                        \
+      if (oomed) {                                                               \
+        HASH_DEL_IN_BKT(head,addhh);                                             \
+      }                                                                          \
+    )                                                                            \
+  }                                                                              \
+} while (0)
+
+/* remove an item from a given bucket */
+#define HASH_DEL_IN_BKT(head,delhh)                                              \
+do {                                                                             \
+  UT_hash_bucket *_hd_head = &(head);                                            \
+  _hd_head->count--;                                                             \
+  if (_hd_head->hh_head == (delhh)) {                                            \
+    _hd_head->hh_head = (delhh)->hh_next;                                        \
+  }                                                                              \
+  if ((delhh)->hh_prev) {                                                        \
+    (delhh)->hh_prev->hh_next = (delhh)->hh_next;                                \
+  }                                                                              \
+  if ((delhh)->hh_next) {                                                        \
+    (delhh)->hh_next->hh_prev = (delhh)->hh_prev;                                \
+  }                                                                              \
+} while (0)
+
+/* Bucket expansion has the effect of doubling the number of buckets
+ * and redistributing the items into the new buckets. Ideally the
+ * items will distribute more or less evenly into the new buckets
+ * (the extent to which this is true is a measure of the quality of
+ * the hash function as it applies to the key domain).
+ *
+ * With the items distributed into more buckets, the chain length
+ * (item count) in each bucket is reduced. Thus by expanding buckets
+ * the hash keeps a bound on the chain length. This bounded chain
+ * length is the essence of how a hash provides constant time lookup.
+ *
+ * The calculation of tbl->ideal_chain_maxlen below deserves some
+ * explanation. First, keep in mind that we're calculating the ideal
+ * maximum chain length based on the *new* (doubled) bucket count.
+ * In fractions this is just n/b (n=number of items,b=new num buckets).
+ * Since the ideal chain length is an integer, we want to calculate
+ * ceil(n/b). We don't depend on floating point arithmetic in this
+ * hash, so to calculate ceil(n/b) with integers we could write
+ *
+ *      ceil(n/b) = (n/b) + ((n%b)?1:0)
+ *
+ * and in fact a previous version of this hash did just that.
+ * But now we have improved things a bit by recognizing that b is
+ * always a power of two. We keep its base 2 log handy (call it lb),
+ * so now we can write this with a bit shift and logical AND:
+ *
+ *      ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
+ *
+ */
+#define HASH_EXPAND_BUCKETS(hh,tbl,oomed)                                        \
+do {                                                                             \
+  unsigned _he_bkt;                                                              \
+  unsigned _he_bkt_i;                                                            \
+  struct UT_hash_handle *_he_thh, *_he_hh_nxt;                                   \
+  UT_hash_bucket *_he_new_buckets, *_he_newbkt;                                  \
+  _he_new_buckets = (UT_hash_bucket*)uthash_malloc(                              \
+           sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U);             \
+  if (!_he_new_buckets) {                                                        \
+    HASH_RECORD_OOM(oomed);                                                      \
+  } else {                                                                       \
+    uthash_bzero(_he_new_buckets,                                                \
+        sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U);                \
+    (tbl)->ideal_chain_maxlen =                                                  \
+       ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) +                      \
+       ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U);    \
+    (tbl)->nonideal_items = 0;                                                   \
+    for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) {           \
+      _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head;                             \
+      while (_he_thh != NULL) {                                                  \
+        _he_hh_nxt = _he_thh->hh_next;                                           \
+        HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt);           \
+        _he_newbkt = &(_he_new_buckets[_he_bkt]);                                \
+        if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) {                 \
+          (tbl)->nonideal_items++;                                               \
+          if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \
+            _he_newbkt->expand_mult++;                                           \
+          }                                                                      \
+        }                                                                        \
+        _he_thh->hh_prev = NULL;                                                 \
+        _he_thh->hh_next = _he_newbkt->hh_head;                                  \
+        if (_he_newbkt->hh_head != NULL) {                                       \
+          _he_newbkt->hh_head->hh_prev = _he_thh;                                \
+        }                                                                        \
+        _he_newbkt->hh_head = _he_thh;                                           \
+        _he_thh = _he_hh_nxt;                                                    \
+      }                                                                          \
+    }                                                                            \
+    uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
+    (tbl)->num_buckets *= 2U;                                                    \
+    (tbl)->log2_num_buckets++;                                                   \
+    (tbl)->buckets = _he_new_buckets;                                            \
+    (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ?   \
+        ((tbl)->ineff_expands+1U) : 0U;                                          \
+    if ((tbl)->ineff_expands > 1U) {                                             \
+      (tbl)->noexpand = 1;                                                       \
+      uthash_noexpand_fyi(tbl);                                                  \
+    }                                                                            \
+    uthash_expand_fyi(tbl);                                                      \
+  }                                                                              \
+} while (0)
+
+
+/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
+/* Note that HASH_SORT assumes the hash handle name to be hh.
+ * HASH_SRT was added to allow the hash handle name to be passed in. */
+#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
+#define HASH_SRT(hh,head,cmpfcn)                                                 \
+do {                                                                             \
+  unsigned _hs_i;                                                                \
+  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize;               \
+  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail;            \
+  if (head != NULL) {                                                            \
+    _hs_insize = 1;                                                              \
+    _hs_looping = 1;                                                             \
+    _hs_list = &((head)->hh);                                                    \
+    while (_hs_looping != 0U) {                                                  \
+      _hs_p = _hs_list;                                                          \
+      _hs_list = NULL;                                                           \
+      _hs_tail = NULL;                                                           \
+      _hs_nmerges = 0;                                                           \
+      while (_hs_p != NULL) {                                                    \
+        _hs_nmerges++;                                                           \
+        _hs_q = _hs_p;                                                           \
+        _hs_psize = 0;                                                           \
+        for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) {                           \
+          _hs_psize++;                                                           \
+          _hs_q = ((_hs_q->next != NULL) ?                                       \
+            HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL);                   \
+          if (_hs_q == NULL) {                                                   \
+            break;                                                               \
+          }                                                                      \
+        }                                                                        \
+        _hs_qsize = _hs_insize;                                                  \
+        while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) {    \
+          if (_hs_psize == 0U) {                                                 \
+            _hs_e = _hs_q;                                                       \
+            _hs_q = ((_hs_q->next != NULL) ?                                     \
+              HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL);                 \
+            _hs_qsize--;                                                         \
+          } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) {                     \
+            _hs_e = _hs_p;                                                       \
+            if (_hs_p != NULL) {                                                 \
+              _hs_p = ((_hs_p->next != NULL) ?                                   \
+                HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL);               \
+            }                                                                    \
+            _hs_psize--;                                                         \
+          } else if ((cmpfcn(                                                    \
+                DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)),             \
+                DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q))              \
+                )) <= 0) {                                                       \
+            _hs_e = _hs_p;                                                       \
+            if (_hs_p != NULL) {                                                 \
+              _hs_p = ((_hs_p->next != NULL) ?                                   \
+                HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL);               \
+            }                                                                    \
+            _hs_psize--;                                                         \
+          } else {                                                               \
+            _hs_e = _hs_q;                                                       \
+            _hs_q = ((_hs_q->next != NULL) ?                                     \
+              HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL);                 \
+            _hs_qsize--;                                                         \
+          }                                                                      \
+          if ( _hs_tail != NULL ) {                                              \
+            _hs_tail->next = ((_hs_e != NULL) ?                                  \
+              ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL);                       \
+          } else {                                                               \
+            _hs_list = _hs_e;                                                    \
+          }                                                                      \
+          if (_hs_e != NULL) {                                                   \
+            _hs_e->prev = ((_hs_tail != NULL) ?                                  \
+              ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL);                    \
+          }                                                                      \
+          _hs_tail = _hs_e;                                                      \
+        }                                                                        \
+        _hs_p = _hs_q;                                                           \
+      }                                                                          \
+      if (_hs_tail != NULL) {                                                    \
+        _hs_tail->next = NULL;                                                   \
+      }                                                                          \
+      if (_hs_nmerges <= 1U) {                                                   \
+        _hs_looping = 0;                                                         \
+        (head)->hh.tbl->tail = _hs_tail;                                         \
+        DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list));           \
+      }                                                                          \
+      _hs_insize *= 2U;                                                          \
+    }                                                                            \
+    HASH_FSCK(hh, head, "HASH_SRT");                                             \
+  }                                                                              \
+} while (0)
+
+/* This function selects items from one hash into another hash.
+ * The end result is that the selected items have dual presence
+ * in both hashes. There is no copy of the items made; rather
+ * they are added into the new hash through a secondary hash
+ * hash handle that must be present in the structure. */
+#define HASH_SELECT(hh_dst, dst, hh_src, src, cond)                              \
+do {                                                                             \
+  unsigned _src_bkt, _dst_bkt;                                                   \
+  void *_last_elt = NULL, *_elt;                                                 \
+  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL;                         \
+  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst));                 \
+  if ((src) != NULL) {                                                           \
+    for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) {    \
+      for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head;               \
+        _src_hh != NULL;                                                         \
+        _src_hh = _src_hh->hh_next) {                                            \
+        _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh);                         \
+        if (cond(_elt)) {                                                        \
+          IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; )                             \
+          _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho);          \
+          _dst_hh->key = _src_hh->key;                                           \
+          _dst_hh->keylen = _src_hh->keylen;                                     \
+          _dst_hh->hashv = _src_hh->hashv;                                       \
+          _dst_hh->prev = _last_elt;                                             \
+          _dst_hh->next = NULL;                                                  \
+          if (_last_elt_hh != NULL) {                                            \
+            _last_elt_hh->next = _elt;                                           \
+          }                                                                      \
+          if ((dst) == NULL) {                                                   \
+            DECLTYPE_ASSIGN(dst, _elt);                                          \
+            HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed);                             \
+            IF_HASH_NONFATAL_OOM(                                                \
+              if (_hs_oomed) {                                                   \
+                uthash_nonfatal_oom(_elt);                                       \
+                (dst) = NULL;                                                    \
+                continue;                                                        \
+              }                                                                  \
+            )                                                                    \
+          } else {                                                               \
+            _dst_hh->tbl = (dst)->hh_dst.tbl;                                    \
+          }                                                                      \
+          HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt);      \
+          HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \
+          (dst)->hh_dst.tbl->num_items++;                                        \
+          IF_HASH_NONFATAL_OOM(                                                  \
+            if (_hs_oomed) {                                                     \
+              HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh);                           \
+              HASH_DELETE_HH(hh_dst, dst, _dst_hh);                              \
+              _dst_hh->tbl = NULL;                                               \
+              uthash_nonfatal_oom(_elt);                                         \
+              continue;                                                          \
+            }                                                                    \
+          )                                                                      \
+          HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv);                          \
+          _last_elt = _elt;                                                      \
+          _last_elt_hh = _dst_hh;                                                \
+        }                                                                        \
+      }                                                                          \
+    }                                                                            \
+  }                                                                              \
+  HASH_FSCK(hh_dst, dst, "HASH_SELECT");                                         \
+} while (0)
+
+#define HASH_CLEAR(hh,head)                                                      \
+do {                                                                             \
+  if ((head) != NULL) {                                                          \
+    HASH_BLOOM_FREE((head)->hh.tbl);                                             \
+    uthash_free((head)->hh.tbl->buckets,                                         \
+                (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket));      \
+    uthash_free((head)->hh.tbl, sizeof(UT_hash_table));                          \
+    (head) = NULL;                                                               \
+  }                                                                              \
+} while (0)
+
+#define HASH_OVERHEAD(hh,head)                                                   \
+ (((head) != NULL) ? (                                                           \
+ (size_t)(((head)->hh.tbl->num_items   * sizeof(UT_hash_handle))   +             \
+          ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket))   +             \
+           sizeof(UT_hash_table)                                   +             \
+           (HASH_BLOOM_BYTELEN))) : 0U)
+
+#ifdef NO_DECLTYPE
+#define HASH_ITER(hh,head,el,tmp)                                                \
+for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
+  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
+#else
+#define HASH_ITER(hh,head,el,tmp)                                                \
+for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL));      \
+  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
+#endif
+
+/* obtain a count of items in the hash */
+#define HASH_COUNT(head) HASH_CNT(hh,head)
+#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
+
+typedef struct UT_hash_bucket {
+   struct UT_hash_handle *hh_head;
+   unsigned count;
+
+   /* expand_mult is normally set to 0. In this situation, the max chain length
+    * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
+    * the bucket's chain exceeds this length, bucket expansion is triggered).
+    * However, setting expand_mult to a non-zero value delays bucket expansion
+    * (that would be triggered by additions to this particular bucket)
+    * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
+    * (The multiplier is simply expand_mult+1). The whole idea of this
+    * multiplier is to reduce bucket expansions, since they are expensive, in
+    * situations where we know that a particular bucket tends to be overused.
+    * It is better to let its chain length grow to a longer yet-still-bounded
+    * value, than to do an O(n) bucket expansion too often.
+    */
+   unsigned expand_mult;
+
+} UT_hash_bucket;
+
+/* random signature used only to find hash tables in external analysis */
+#define HASH_SIGNATURE 0xa0111fe1u
+#define HASH_BLOOM_SIGNATURE 0xb12220f2u
+
+typedef struct UT_hash_table {
+   UT_hash_bucket *buckets;
+   unsigned num_buckets, log2_num_buckets;
+   unsigned num_items;
+   struct UT_hash_handle *tail; /* tail hh in app order, for fast append    */
+   ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
+
+   /* in an ideal situation (all buckets used equally), no bucket would have
+    * more than ceil(#items/#buckets) items. that's the ideal chain length. */
+   unsigned ideal_chain_maxlen;
+
+   /* nonideal_items is the number of items in the hash whose chain position
+    * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
+    * hash distribution; reaching them in a chain traversal takes >ideal steps */
+   unsigned nonideal_items;
+
+   /* ineffective expands occur when a bucket doubling was performed, but
+    * afterward, more than half the items in the hash had nonideal chain
+    * positions. If this happens on two consecutive expansions we inhibit any
+    * further expansion, as it's not helping; this happens when the hash
+    * function isn't a good fit for the key domain. When expansion is inhibited
+    * the hash will still work, albeit no longer in constant time. */
+   unsigned ineff_expands, noexpand;
+
+   uint32_t signature; /* used only to find hash tables in external analysis */
+#ifdef HASH_BLOOM
+   uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
+   uint8_t *bloom_bv;
+   uint8_t bloom_nbits;
+#endif
+
+} UT_hash_table;
+
+typedef struct UT_hash_handle {
+   struct UT_hash_table *tbl;
+   void *prev;                       /* prev element in app order      */
+   void *next;                       /* next element in app order      */
+   struct UT_hash_handle *hh_prev;   /* previous hh in bucket order    */
+   struct UT_hash_handle *hh_next;   /* next hh in bucket order        */
+   const void *key;                  /* ptr to enclosing struct's key  */
+   unsigned keylen;                  /* enclosing struct's key len     */
+   unsigned hashv;                   /* result of hash-fcn(key)        */
+} UT_hash_handle;
+
+#endif /* UTHASH_H */

+ 3 - 5
components/vbus/prio_queue.c

@@ -89,7 +89,7 @@ void rt_prio_queue_detach(struct rt_prio_queue *que)
         rt_base_t level = rt_hw_interrupt_disable();
 
         /* get next suspend thread */
-        thread = rt_list_entry(que->suspended_pop_list.next, struct rt_thread, tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
         /* set error code to -RT_ERROR */
         thread->error = -RT_ERROR;
 
@@ -160,9 +160,7 @@ rt_err_t rt_prio_queue_push(struct rt_prio_queue *que,
         rt_thread_t thread;
 
         /* get thread entry */
-        thread = rt_list_entry(que->suspended_pop_list.next,
-                               struct rt_thread,
-                               tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(que->suspended_pop_list.next);
         /* resume it */
         rt_thread_resume(thread);
         rt_hw_interrupt_enable(level);
@@ -207,7 +205,7 @@ rt_err_t rt_prio_queue_pop(struct rt_prio_queue *que,
         thread->error = RT_EOK;
         rt_thread_suspend(thread);
 
-        rt_list_insert_before(&(que->suspended_pop_list), &(thread->tlist));
+        rt_list_insert_before(&(que->suspended_pop_list), &RT_THREAD_LIST_NODE(thread));
 
         if (timeout > 0)
         {

+ 3 - 7
components/vbus/vbus.c

@@ -336,7 +336,7 @@ rt_err_t rt_vbus_post(rt_uint8_t id,
         rt_enter_critical();
         rt_thread_suspend(thread);
 
-        rt_list_insert_after(&_chn_suspended_threads[id], &thread->tlist);
+        rt_list_insert_after(&_chn_suspended_threads[id], &RT_THREAD_LIST_NODE(thread));
         if (timeout > 0)
         {
             rt_timer_control(&(thread->thread_timer),
@@ -443,9 +443,7 @@ static void rt_vbus_notify_chn(unsigned char chnr, rt_err_t err)
     {
         rt_thread_t thread;
 
-        thread = rt_list_entry(_chn_suspended_threads[chnr].next,
-                               struct rt_thread,
-                               tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(_chn_suspended_threads[chnr].next);
         thread->error = err;
         rt_thread_resume(thread);
     }
@@ -855,9 +853,7 @@ static int _chn0_actor(unsigned char *dp, size_t dsize)
             {
                 rt_thread_t thread;
 
-                thread = rt_list_entry(_chn_suspended_threads[chnr].next,
-                                       struct rt_thread,
-                                       tlist);
+                thread = RT_THREAD_LIST_NODE_ENTRY(_chn_suspended_threads[chnr].next);
                 rt_thread_resume(thread);
             }
             rt_exit_critical();

+ 1 - 3
components/vbus/watermark_queue.c

@@ -43,9 +43,7 @@ void rt_wm_que_dump(struct rt_watermark_queue *wg)
     {
         rt_thread_t thread;
 
-        thread = rt_list_entry(wg->suspended_threads.next,
-                               struct rt_thread,
-                               tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
         rt_kprintf(" %.*s", RT_NAME_MAX, thread->parent.name);
     }
     rt_kprintf("\n");

+ 2 - 4
components/vbus/watermark_queue.h

@@ -64,7 +64,7 @@ rt_inline rt_err_t rt_wm_que_inc(struct rt_watermark_queue *wg,
         thread = rt_thread_self();
         thread->error = RT_EOK;
         rt_thread_suspend(thread);
-        rt_list_insert_after(&wg->suspended_threads, &thread->tlist);
+        rt_list_insert_after(&wg->suspended_threads, &RT_THREAD_LIST_NODE(thread));
         if (timeout > 0)
         {
             rt_timer_control(&(thread->thread_timer),
@@ -116,9 +116,7 @@ rt_inline void rt_wm_que_dec(struct rt_watermark_queue *wg)
         {
             rt_thread_t thread;
 
-            thread = rt_list_entry(wg->suspended_threads.next,
-                                   struct rt_thread,
-                                   tlist);
+            thread = RT_THREAD_LIST_NODE_ENTRY(wg->suspended_threads.next);
             rt_thread_resume(thread);
             need_sched = 1;
         }

+ 4 - 0
examples/utest/testcases/kernel/Kconfig

@@ -69,4 +69,8 @@ config UTEST_MTSAFE_KPRINT_TC
     bool "mtsafe kprint test"
     default n
 
+config UTEST_SCHEDULER_TC
+    bool "scheduler test"
+    default n
+
 endmenu

+ 7 - 0
examples/utest/testcases/kernel/SConscript

@@ -50,6 +50,13 @@ if GetDepend(['UTEST_HOOKLIST_TC']):
 if GetDepend(['UTEST_MTSAFE_KPRINT_TC']):
     src += ['mtsafe_kprint_tc.c']
 
+# Stressful testcase for scheduler (MP/UP)
+if GetDepend(['UTEST_SCHEDULER_TC']):
+    src += ['sched_timed_sem_tc.c']
+    src += ['sched_timed_mtx_tc.c']
+    src += ['sched_mtx_tc.c']
+    src += ['sched_sem_tc.c', 'sched_thread_tc.c']
+
 group = DefineGroup('utestcases', src, depend = ['RT_USING_UTESTCASES'], CPPPATH = CPPPATH)
 
 Return('group')

+ 5 - 4
examples/utest/testcases/kernel/mutex_tc.c

@@ -8,15 +8,16 @@
  * 2021-09.01     luckyzjq     the first version
  * 2023-09-15     xqyjlj       change stack size in cpu64
  */
+#define __RT_IPC_SOURCE__
 
 #include <rtthread.h>
 #include <stdlib.h>
 #include "utest.h"
 
 #ifdef ARCH_CPU_64BIT
-#define THREAD_STACKSIZE 4096
+#define THREAD_STACKSIZE 8192
 #else
-#define THREAD_STACKSIZE 1024
+#define THREAD_STACKSIZE 4096
 #endif
 
 static struct rt_mutex static_mutex;
@@ -241,7 +242,7 @@ static void static_thread1_entry(void *param)
 
     /*  thread3 hode mutex  thread2 take mutex */
     /* check thread2 and thread3 priority */
-    if (tid2->current_priority != tid3->current_priority)
+    if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
     {
         uassert_true(RT_FALSE);
     }
@@ -550,7 +551,7 @@ static void dynamic_thread1_entry(void *param)
 
     /*  thread3 hode mutex  thread2 take mutex */
     /* check thread2 and thread3 priority */
-    if (tid2->current_priority != tid3->current_priority)
+    if (RT_SCHED_PRIV(tid2).current_priority != RT_SCHED_PRIV(tid3).current_priority)
     {
         uassert_true(RT_FALSE);
     }

+ 107 - 0
examples/utest/testcases/kernel/sched_mtx_tc.c

@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-17     Shell        the first version
+ */
+#include <rtthread.h>
+#include <stdlib.h>
+#include "utest.h"
+
+/**
+ * Stressful Test for Mutex
+ */
+
+#define TEST_SECONDS 30
+#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
+#define TEST_THREAD_COUNTS (RT_CPUS_NR)
+#define TEST_PROGRESS_COUNTS (36)
+#define TEST_PROGRESS_ON (TEST_LOOP_TICKS/TEST_PROGRESS_COUNTS)
+#define TEST_PRIORITY_HIGHEST (UTEST_THR_PRIORITY+1)
+#define TEST_RANDOM_LATENCY_MAX (1000 * 1000)
+
+static struct rt_semaphore _thr_exit_sem;
+static rt_atomic_t _progress_counter;
+static rt_atomic_t _exit_flag;
+static struct rt_mutex _racing_lock;
+
+static void test_thread_entry(void *param)
+{
+    while (1)
+    {
+        rt_mutex_take(&_racing_lock, RT_WAITING_FOREVER);
+        rt_mutex_release(&_racing_lock);
+
+        if (rt_atomic_load(&_exit_flag))
+        {
+            break;
+        }
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+}
+
+static void mutex_stress_tc(void)
+{
+    rt_err_t error;
+    rt_thread_t tester;
+    const rt_base_t priority_base = TEST_PRIORITY_HIGHEST;
+
+    for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
+    {
+        tester = rt_thread_create(
+            "tester",
+            test_thread_entry,
+            (void *)0,
+            UTEST_THR_STACK_SIZE,
+            priority_base + (i % (RT_THREAD_PRIORITY_MAX - TEST_PRIORITY_HIGHEST)),
+            1);
+
+        rt_thread_startup(tester);
+    }
+
+    for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
+    {
+        rt_thread_delay(1);
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    /* trigger exit request for all sub-threads */
+    rt_atomic_store(&_exit_flag, 1);
+
+    /* waiting for sub-threads to exit */
+    for (size_t i = 0; i < TEST_THREAD_COUNTS; i++)
+    {
+        error = rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
+        uassert_int_equal(error, RT_EOK);
+    }
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    int *pseed = rt_malloc(sizeof(int));
+    srand(*(int *)pseed);
+    rt_free(pseed);
+
+    rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
+    rt_mutex_init(&_racing_lock, "ipc", RT_IPC_FLAG_PRIO);
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_sem_detach(&_thr_exit_sem);
+    rt_mutex_detach(&_racing_lock);
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(mutex_stress_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.mutex", utest_tc_init, utest_tc_cleanup, TEST_SECONDS);

+ 196 - 0
examples/utest/testcases/kernel/sched_sem_tc.c

@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-17     Shell        the first version
+ */
+#define __RT_IPC_SOURCE__
+
+#include <rtthread.h>
+#include "rthw.h"
+#include "utest.h"
+
+#define KERN_TEST_CONFIG_LOOP_TIMES     160
+#define KERN_TEST_CONCURRENT_THREADS    (RT_CPUS_NR * 2)
+#define KERN_TEST_CONFIG_HIGHEST_PRIO   3
+#define KERN_TEST_CONFIG_LOWEST_PRIO    (RT_THREAD_PRIORITY_MAX - 2)
+
+#define TEST_LEVEL_COUNTS (KERN_TEST_CONFIG_LOWEST_PRIO - KERN_TEST_CONFIG_HIGHEST_PRIO + 1)
+#if TEST_LEVEL_COUNTS <= RT_CPUS_NR
+#warning for the best of this test, TEST_LEVEL_COUNTS should greater than RT_CPUS_NR
+#endif
+#if KERN_TEST_CONCURRENT_THREADS < RT_CPUS_NR
+#warning for the best of this test, KERN_TEST_CONCURRENT_THREADS should greater than RT_CPUS_NR
+#endif
+#if KERN_TEST_CONFIG_LOWEST_PRIO >= RT_THREAD_PRIORITY_MAX - 1
+#error the thread priority should at least be greater than idle
+#endif
+
+static rt_atomic_t _star_counter = 1;
+static struct rt_semaphore _thr_exit_sem;
+static struct rt_semaphore _level_waiting[TEST_LEVEL_COUNTS];
+static rt_thread_t _thread_matrix[TEST_LEVEL_COUNTS][KERN_TEST_CONCURRENT_THREADS];
+static rt_atomic_t _load_average[RT_CPUS_NR];
+
+static void _print_char(rt_thread_t thr_self, int character)
+{
+    rt_base_t current_counter;
+
+#ifdef RT_USING_SMP
+    rt_kprintf("%c%d", character, RT_SCHED_CTX(thr_self).oncpu);
+#else
+    rt_kprintf("%c0", character);
+#endif /* RT_USING_SMP */
+
+    current_counter = rt_atomic_add(&_star_counter, 1);
+    if (current_counter % 30 == 0)
+    {
+        rt_kprintf("\n");
+    }
+}
+
+static void _stats_load_avg_inc(void)
+{
+    int cpuid;
+
+    cpuid = rt_hw_cpu_id();
+    rt_atomic_add(&_load_average[cpuid], 1);
+}
+
+static void _stats_load_avg_print(void)
+{
+    rt_base_t counts = 0;
+    const rt_base_t total_test_counts = KERN_TEST_CONFIG_LOOP_TIMES * TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS;
+
+    for (size_t i = 0; i < RT_CPUS_NR; i++)
+    {
+        rt_kprintf("%ld ", _load_average[i]);
+        counts += _load_average[i];
+    }
+
+    rt_kprintf("\n");
+    uassert_int_equal(counts, total_test_counts);
+}
+
+static void _thread_entry(void *param)
+{
+    int level = (rt_ubase_t)param;
+    rt_thread_t thr_self = rt_thread_self();
+
+    if (level == 0)
+    {
+        /* always the first to execute among other working threads */
+        for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
+        {
+            /* notify our consumer */
+            rt_sem_release(&_level_waiting[level + 1]);
+
+            _stats_load_avg_inc();
+
+            /* waiting for resource of ours */
+            rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
+        }
+    }
+    else if (level == TEST_LEVEL_COUNTS - 1)
+    {
+
+        for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
+        {
+            /* waiting for our resource first */
+            rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
+
+            _stats_load_avg_inc();
+
+            _print_char(thr_self, '*');
+
+            rt_thread_delay(1);
+
+            /* produce for level 0 worker */
+            rt_sem_release(&_level_waiting[0]);
+        }
+    }
+    else
+    {
+        for (size_t i = 0; i < KERN_TEST_CONFIG_LOOP_TIMES; i++)
+        {
+            /* waiting for resource of ours */
+            rt_sem_take(&_level_waiting[level], RT_WAITING_FOREVER);
+
+            _stats_load_avg_inc();
+
+            /* notify our consumer */
+            rt_sem_release(&_level_waiting[level + 1]);
+        }
+    }
+
+    uassert_true(1);
+    rt_sem_release(&_thr_exit_sem);
+
+    return;
+}
+
+static void scheduler_tc(void)
+{
+    LOG_I("Test starts...");
+    for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
+    {
+        for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
+        {
+            rt_thread_startup(_thread_matrix[i][j]);
+        }
+    }
+    LOG_I("%d threads startup...", TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS);
+
+    /* waiting for sub-threads to exit */
+    for (size_t i = 0; i < TEST_LEVEL_COUNTS * KERN_TEST_CONCURRENT_THREADS; i++)
+    {
+        rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
+    }
+
+    /* print load average */
+    _stats_load_avg_print();
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    LOG_I("Setup environment...");
+    rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
+
+    for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
+    {
+        rt_sem_init(&_level_waiting[i], "test", 0, RT_IPC_FLAG_PRIO);
+
+        for (size_t j = 0; j < KERN_TEST_CONCURRENT_THREADS; j++)
+        {
+            _thread_matrix[i][j] =
+                rt_thread_create("test",
+                                 _thread_entry,
+                                 (void *)i,
+                                 UTEST_THR_STACK_SIZE,
+                                 KERN_TEST_CONFIG_HIGHEST_PRIO+i,
+                                 5);
+            if (!_thread_matrix[i][j])
+                uassert_not_null(_thread_matrix[i][j]);
+        }
+    }
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_sem_detach(&_thr_exit_sem);
+    for (size_t i = 0; i < TEST_LEVEL_COUNTS; i++)
+    {
+        rt_sem_detach(&_level_waiting[i]);
+    }
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(scheduler_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.sem", utest_tc_init, utest_tc_cleanup, 10);

+ 121 - 0
examples/utest/testcases/kernel/sched_thread_tc.c

@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-25     Shell        init ver.
+ */
+#define __RT_KERNEL_SOURCE__
+#include <rtthread.h>
+#include "utest.h"
+
+#define TEST_LOOP_TIMES (100 * 1000)
+#define TEST_PROGRESS_COUNTS (36)
+#define TEST_THREAD_COUNT (RT_CPUS_NR * 1)
+#define TEST_PROGRESS_ON (TEST_LOOP_TIMES*TEST_THREAD_COUNT/TEST_PROGRESS_COUNTS)
+
+static struct rt_semaphore _thr_exit_sem;
+static rt_atomic_t _progress_counter;
+
+static volatile rt_thread_t threads_group[TEST_THREAD_COUNT][2];
+
+static void _thread_entry1(void *param)
+{
+    rt_base_t critical_level;
+    size_t idx = (size_t)param;
+
+    for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
+    {
+        critical_level = rt_enter_critical();
+
+        rt_thread_suspend(rt_thread_self());
+        rt_thread_resume(threads_group[idx][1]);
+
+        rt_exit_critical_safe(critical_level);
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void _thread_entry2(void *param)
+{
+    rt_base_t critical_level;
+    size_t idx = (size_t)param;
+
+    for (size_t i = 0; i < TEST_LOOP_TIMES; i++)
+    {
+        critical_level = rt_enter_critical();
+
+        rt_thread_suspend(rt_thread_self());
+        rt_thread_resume(threads_group[idx][0]);
+
+        rt_exit_critical_safe(critical_level);
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void scheduler_tc(void)
+{
+    for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
+    {
+        rt_thread_t t1 =
+            rt_thread_create(
+                "t1",
+                _thread_entry1,
+                (void *)i,
+                UTEST_THR_STACK_SIZE,
+                UTEST_THR_PRIORITY + 1,
+                100);
+        rt_thread_t t2 =
+            rt_thread_create(
+                "t2",
+                _thread_entry2,
+                (void *)i,
+                UTEST_THR_STACK_SIZE,
+                UTEST_THR_PRIORITY + 1,
+                100);
+
+        threads_group[i][0] = t1;
+        threads_group[i][1] = t2;
+    }
+
+    for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
+    {
+        rt_thread_startup(threads_group[i][0]);
+        rt_thread_startup(threads_group[i][1]);
+    }
+
+    for (size_t i = 0; i < TEST_THREAD_COUNT; i++)
+    {
+        rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
+    }
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_sem_detach(&_thr_exit_sem);
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(scheduler_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.thread", utest_tc_init, utest_tc_cleanup, 10);

+ 232 - 0
examples/utest/testcases/kernel/sched_timed_mtx_tc.c

@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-25     Shell        init ver.
+ */
+#define __RT_KERNEL_SOURCE__
+#include <rtthread.h>
+#include <stdlib.h>
+#include "utest.h"
+
+#define TEST_SECONDS 10
+#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
+#define TEST_PROGRESS_COUNTS (36)
+#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
+
+static struct rt_semaphore _thr_exit_sem;
+static struct rt_mutex _ipc_primitive;
+static struct rt_semaphore _cons_can_take_mtx;
+static struct rt_semaphore _prod_can_take_mtx;
+static rt_atomic_t _progress_counter;
+#define CONSUMER_MAGIC 0x11223344
+#define PRODUCER_MAGIC 0x44332211
+static rt_atomic_t _last_holder_flag = CONSUMER_MAGIC;
+static rt_base_t _timedout_failed_times = 0;
+
+/**
+ * Test on timedout IPC with racing condition where timedout routine and producer
+ * thread may race to wakeup sleeper.
+ *
+ * This test will fork 2 thread, one producer and one consumer. The producer will
+ * looping and trigger the IPC on the edge of new tick arrives. The consumer will
+ * wait on IPC with a timedout of 1 tick.
+ */
+
+static void _wait_until_edge(void)
+{
+    rt_tick_t entry_level, current;
+    rt_base_t random_latency;
+
+    entry_level = rt_tick_get();
+    do
+    {
+        current = rt_tick_get();
+    }
+    while (current == entry_level);
+
+    /* give a random latency for test */
+    random_latency = rand() % 1000 * 1000;
+    entry_level = current;
+    for (size_t i = 0; i < random_latency; i++)
+    {
+        current = rt_tick_get();
+        if (current != entry_level)
+            break;
+    }
+}
+
+static void _producer_entry(void *param)
+{
+    rt_err_t error;
+    for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
+    {
+        /**
+         * only try to take mutex after consumer have taken it after last
+         * release from us.
+         */
+        error = rt_sem_take(&_prod_can_take_mtx, RT_WAITING_FOREVER);
+        if (error)
+        {
+            uassert_true(0);
+            break;
+        }
+
+        error = rt_mutex_take(&_ipc_primitive, RT_WAITING_FOREVER);
+        if (error)
+        {
+            uassert_true(0);
+            break;
+        }
+
+        /* ensure that mutex should be held in round-robin method */
+        if (rt_atomic_load(&_last_holder_flag) != CONSUMER_MAGIC)
+        {
+            uassert_true(0);
+            break;
+        }
+        else
+        {
+            rt_atomic_store(&_last_holder_flag, PRODUCER_MAGIC);
+            rt_sem_release(&_cons_can_take_mtx);
+        }
+
+        _wait_until_edge();
+
+        rt_mutex_release(&_ipc_primitive);
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void _consumer_entry(void *param)
+{
+    rt_err_t error;
+    for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
+    {
+        /**
+         * only try to take mutex after producer have taken it after last
+         * release from us.
+         */
+        error = rt_sem_take(&_cons_can_take_mtx, RT_WAITING_FOREVER);
+        if (error)
+        {
+            uassert_true(0);
+            break;
+        }
+
+        while (1)
+        {
+            error = rt_mutex_take_interruptible(&_ipc_primitive, 1);
+            if (error == -RT_ETIMEOUT)
+            {
+                _timedout_failed_times++;
+                if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
+                {
+                    uassert_true(0);
+                    break;
+                }
+            }
+            else
+            {
+                break;
+            }
+        }
+
+        if (error != RT_EOK)
+        {
+            uassert_true(0);
+            break;
+        }
+
+        /* ensure that mutex should be held in round-robin method */
+        if (rt_atomic_load(&_last_holder_flag) != PRODUCER_MAGIC)
+        {
+            uassert_true(0);
+            break;
+        }
+        else
+        {
+            rt_atomic_store(&_last_holder_flag, CONSUMER_MAGIC);
+            rt_sem_release(&_prod_can_take_mtx);
+        }
+
+        rt_mutex_release(&_ipc_primitive);
+        if (rt_mutex_get_owner(&_ipc_primitive) == rt_thread_self())
+        {
+            uassert_true(0);
+            break;
+        }
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void timed_mtx_tc(void)
+{
+    rt_thread_t prod = rt_thread_create(
+        "prod",
+        _producer_entry,
+        (void *)0,
+        UTEST_THR_STACK_SIZE,
+        UTEST_THR_PRIORITY + 1,
+        4);
+
+    rt_thread_t cons = rt_thread_create(
+        "cons",
+        _consumer_entry,
+        (void *)0,
+        UTEST_THR_STACK_SIZE,
+        UTEST_THR_PRIORITY + 1,
+        100);
+
+    rt_thread_startup(prod);
+    rt_thread_startup(cons);
+
+    for (size_t i = 0; i < 2; i++)
+    {
+        uassert_int_equal(
+            rt_sem_take(&_thr_exit_sem, 2 * TEST_LOOP_TICKS),
+            RT_EOK);
+    }
+
+    /* Summary */
+    LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    _timedout_failed_times = 0;
+
+    rt_mutex_init(&_ipc_primitive, "ipc", RT_IPC_FLAG_PRIO);
+    rt_sem_init(&_cons_can_take_mtx, "test", 0, RT_IPC_FLAG_PRIO);
+    rt_sem_init(&_prod_can_take_mtx, "test", 1, RT_IPC_FLAG_PRIO);
+    rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_mutex_detach(&_ipc_primitive);
+    rt_sem_detach(&_cons_can_take_mtx);
+    rt_sem_detach(&_prod_can_take_mtx);
+    rt_sem_detach(&_thr_exit_sem);
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(timed_mtx_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_mtx", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);

+ 149 - 0
examples/utest/testcases/kernel/sched_timed_sem_tc.c

@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-25     Shell        init ver.
+ */
+#define __RT_KERNEL_SOURCE__
+#include <rtthread.h>
+#include <stdlib.h>
+#include "utest.h"
+
+#define TEST_SECONDS 10
+#define TEST_LOOP_TICKS (TEST_SECONDS * RT_TICK_PER_SECOND)
+#define TEST_PROGRESS_COUNTS (36)
+#define TEST_PROGRESS_ON (TEST_LOOP_TICKS*2/TEST_PROGRESS_COUNTS)
+
+static struct rt_semaphore _thr_exit_sem;
+static struct rt_semaphore _ipc_sem;
+static rt_atomic_t _progress_counter;
+static rt_base_t _timedout_failed_times = 0;
+
+/**
+ * Test on timedout IPC with racing condition where timedout routine and producer
+ * thread may race to wakeup sleeper.
+ *
+ * This test will fork 2 thread, one producer and one consumer. The producer will
+ * looping and trigger the IPC on the edge of new tick arrives. The consumer will
+ * wait on IPC with a timedout of 1 tick.
+ */
+
+static void _wait_until_edge(void)
+{
+    rt_tick_t entry_level, current;
+    rt_base_t random_latency;
+
+    entry_level = rt_tick_get();
+    do
+    {
+        current = rt_tick_get();
+    }
+    while (current == entry_level);
+
+    /* give a random latency for test */
+    random_latency = rand();
+    entry_level = current;
+    for (size_t i = 0; i < random_latency; i++)
+    {
+        current = rt_tick_get();
+        if (current != entry_level)
+            break;
+    }
+}
+
+static void _producer_entry(void *param)
+{
+    for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
+    {
+        _wait_until_edge();
+
+        rt_sem_release(&_ipc_sem);
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void _consumer_entry(void *param)
+{
+    int error;
+    for (size_t i = 0; i < TEST_LOOP_TICKS; i++)
+    {
+        error = rt_sem_take_interruptible(&_ipc_sem, 1);
+        if (error == -RT_ETIMEOUT)
+        {
+            _timedout_failed_times++;
+        }
+        else
+        {
+            if (error != RT_EOK)
+                uassert_true(0);
+        }
+
+        if (rt_atomic_add(&_progress_counter, 1) % TEST_PROGRESS_ON == 0)
+            uassert_true(1);
+    }
+
+    rt_sem_release(&_thr_exit_sem);
+    return;
+}
+
+static void timed_sem_tc(void)
+{
+    rt_thread_t prod = rt_thread_create(
+        "prod",
+        _producer_entry,
+        (void *)0,
+        UTEST_THR_STACK_SIZE,
+        UTEST_THR_PRIORITY + 1,
+        4);
+
+    rt_thread_t cons = rt_thread_create(
+        "cons",
+        _consumer_entry,
+        (void *)0,
+        UTEST_THR_STACK_SIZE,
+        UTEST_THR_PRIORITY + 1,
+        100);
+
+    rt_thread_startup(prod);
+    rt_thread_startup(cons);
+
+    for (size_t i = 0; i < 2; i++)
+    {
+        rt_sem_take(&_thr_exit_sem, RT_WAITING_FOREVER);
+    }
+
+    /* Summary */
+    LOG_I("Total failed times: %ld(in %d)\n", _timedout_failed_times, TEST_LOOP_TICKS);
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    int *pseed = rt_malloc(sizeof(int));
+    srand(*(int *)pseed);
+    rt_free(pseed);
+
+    rt_sem_init(&_ipc_sem, "ipc", 0, RT_IPC_FLAG_PRIO);
+    rt_sem_init(&_thr_exit_sem, "test", 0, RT_IPC_FLAG_PRIO);
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_sem_detach(&_ipc_sem);
+    rt_sem_detach(&_thr_exit_sem);
+    return RT_EOK;
+}
+
+static void testcase(void)
+{
+    UTEST_UNIT_RUN(timed_sem_tc);
+}
+UTEST_TC_EXPORT(testcase, "testcases.kernel.scheduler.timed_sem", utest_tc_init, utest_tc_cleanup, TEST_SECONDS * 2);

+ 12 - 4
examples/utest/testcases/kernel/signal_tc.c

@@ -22,7 +22,8 @@
 #include <rtthread.h>
 #include "utest.h"
 
-int recive_sig = 0;
+static volatile int recive_sig = 0;
+static struct rt_semaphore _received_signal;
 
 void sig_handle_default(int signo)
 {
@@ -125,12 +126,15 @@ void rt_signal_wait_thread(void *parm)
     (void)sigaddset(&selectset, SIGUSR1);
 
     /* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
-    if (rt_signal_wait(&selectset, &recive_si, RT_TICK_PER_SECOND) != RT_EOK)
+    if (rt_signal_wait((void *)&selectset, &recive_si, RT_TICK_PER_SECOND) != RT_EOK)
     {
         return;
     }
 
     recive_sig = recive_si.si_signo;
+
+    LOG_I("received signal %d", recive_sig);
+    rt_sem_release(&_received_signal);
 }
 
 static void rt_signal_wait_test(void)
@@ -147,7 +151,7 @@ static void rt_signal_wait_test(void)
     rt_thread_mdelay(1);
     /* case 5:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: kill, should received. */
     uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
-    rt_thread_mdelay(1);
+    rt_sem_take(&_received_signal, RT_WAITING_FOREVER);
     uassert_int_equal(recive_sig, SIGUSR1);
 
     return;
@@ -167,7 +171,9 @@ static void rt_signal_wait_test2(void)
     /* case 6:rt_signal_wait, two thread, thread1: install and unmask, then wait 1s; thread2: sleep 2s then kill, should can't received. */
     rt_thread_mdelay(2000);
     uassert_int_equal(rt_thread_kill(t1, SIGUSR1), RT_EOK);
-    rt_thread_mdelay(1);
+    uassert_int_not_equal(
+        rt_sem_take(&_received_signal, 1),
+        RT_EOK);
     uassert_int_not_equal(recive_sig, SIGUSR1);
 
     return;
@@ -175,11 +181,13 @@ static void rt_signal_wait_test2(void)
 
 static rt_err_t utest_tc_init(void)
 {
+    rt_sem_init(&_received_signal, "utest", 0, RT_IPC_FLAG_PRIO);
     return RT_EOK;
 }
 
 static rt_err_t utest_tc_cleanup(void)
 {
+    rt_sem_detach(&_received_signal);
     return RT_EOK;
 }
 

+ 29 - 16
examples/utest/testcases/kernel/thread_tc.c

@@ -9,6 +9,8 @@
  * 2021-10.11     mazhiyuan    add idle, yield, suspend, control, priority, delay_until
  */
 
+#define __RT_IPC_SOURCE__ /* include internal API for utest */
+
 #include <rtthread.h>
 #include <stdlib.h>
 #include "utest.h"
@@ -56,7 +58,7 @@ static void test_dynamic_thread(void)
                             thread1_entry,
                             (void *)1,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority + 1,
+                            UTEST_THR_PRIORITY + 1,
                             THREAD_TIMESLICE - 5);
     if (tid1 == RT_NULL)
     {
@@ -105,7 +107,7 @@ static void test_static_thread(void)
                               (void *)2,
                               &thread2_stack[0],
                               sizeof(thread2_stack),
-                              __current_thread->current_priority + 1,
+                              UTEST_THR_PRIORITY + 1,
                               THREAD_TIMESLICE);
     if (ret_init != RT_EOK)
     {
@@ -139,10 +141,11 @@ __exit:
 
 static void thread3_entry(void *parameter)
 {
-    rt_tick_t tick;
+    rt_tick_t tick, latency_tick;
     tick = rt_tick_get();
     rt_thread_delay(15);
-    if (rt_tick_get() - tick > 16)
+    latency_tick = rt_tick_get() - tick;
+    if (latency_tick > 16 || latency_tick < 15)
     {
         tid3_finish_flag = 1;
         tid3_delay_pass_flag = 0;
@@ -160,7 +163,7 @@ static void test_thread_delay(void)
                             thread3_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority - 1,
+                            UTEST_THR_PRIORITY - 1,
                             THREAD_TIMESLICE);
     if (tid3 == RT_NULL)
     {
@@ -210,7 +213,7 @@ static void test_idle_hook(void)
                             thread4_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority - 1,
+                            UTEST_THR_PRIORITY - 1,
                             THREAD_TIMESLICE);
     if (tid4 == RT_NULL)
     {
@@ -264,7 +267,7 @@ static void test_thread_yield(void)
                             thread5_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority - 1,
+                            UTEST_THR_PRIORITY - 1,
                             THREAD_TIMESLICE);
     if (tid5 == RT_NULL)
     {
@@ -283,7 +286,7 @@ static void test_thread_yield(void)
                             thread6_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority - 1,
+                            UTEST_THR_PRIORITY - 1,
                             THREAD_TIMESLICE);
     if (tid6 == RT_NULL)
     {
@@ -319,12 +322,13 @@ static void test_thread_control(void)
 {
     rt_err_t ret_control = -RT_ERROR;
     rt_err_t rst_delete = -RT_ERROR;
+    rt_sched_lock_level_t slvl;
 
     tid7 = rt_thread_create("thread7",
                             thread7_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority + 1,
+                            UTEST_THR_PRIORITY + 1,
                             THREAD_TIMESLICE);
     if (tid7 == RT_NULL)
     {
@@ -342,12 +346,17 @@ static void test_thread_control(void)
     }
     rt_thread_mdelay(200);
     rt_thread_control(tid7, RT_THREAD_CTRL_CHANGE_PRIORITY, &change_priority);
-    if (tid7->current_priority != change_priority)
+
+    rt_sched_lock(&slvl);
+    if (rt_sched_thread_get_curr_prio(tid7) != change_priority)
     {
         LOG_E("rt_thread_control failed!");
         uassert_false(1);
+        rt_sched_unlock(slvl);
         goto __exit;
     }
+    rt_sched_unlock(slvl);
+
     rst_delete = rt_thread_control(tid7, RT_THREAD_CTRL_CLOSE, RT_NULL);
     if (rst_delete != RT_EOK)
     {
@@ -380,7 +389,7 @@ static void test_thread_priority(void)
                             thread8_entry,
                             RT_NULL,
                             THREAD_STACK_SIZE,
-                            __current_thread->current_priority - 1,
+                            UTEST_THR_PRIORITY - 1,
                             THREAD_TIMESLICE);
     if (tid8 == RT_NULL)
     {
@@ -448,6 +457,10 @@ static void test_delay_until(void)
     rt_kprintf("delta[20] -> %d\n", delta);
     uassert_int_equal(delta, 20);
 
+    /**
+     * the rt_kprints above can take few ticks to complete, maybe more than 10
+     */
+    tick = rt_tick_get();
     check_tick = tick;
     rt_thread_delay(2);
     rt_thread_delay_until(&tick, 10);
@@ -495,7 +508,7 @@ void test_timeslice(void)
     timeslice_cntB2 = 0;
 
     tidA = rt_thread_create("timeslice", test_timeslice_threadA_entry, RT_NULL,
-                           2048, __current_thread->current_priority + 1, 10);
+                           2048, UTEST_THR_PRIORITY + 1, 10);
     if (!tidA)
     {
         LOG_E("rt_thread_create failed!");
@@ -512,7 +525,7 @@ void test_timeslice(void)
     }
 
     tidB1 = rt_thread_create("timeslice", test_timeslice_threadB1_entry, RT_NULL,
-                           2048, __current_thread->current_priority + 2, 2);
+                           2048, UTEST_THR_PRIORITY + 2, 2);
     if (!tidB1)
     {
         LOG_E("rt_thread_create failed!");
@@ -529,7 +542,7 @@ void test_timeslice(void)
     }
 
     tidB2 = rt_thread_create("timeslice", test_timeslice_threadB2_entry, RT_NULL,
-                           2048, __current_thread->current_priority + 2, 2);
+                           2048, UTEST_THR_PRIORITY + 2, 2);
     if (!tidB2)
     {
         LOG_E("rt_thread_create failed!");
@@ -655,7 +668,7 @@ void test_thread_yield_nosmp(void)
 //                            thread9_entry,
 //                            RT_NULL,
 //                            THREAD_STACK_SIZE,
-//                            __current_thread->current_priority + 1,
+//                            UTEST_THR_PRIORITY + 1,
 //                            THREAD_TIMESLICE);
 //     if (tid == RT_NULL)
 //     {
@@ -695,7 +708,7 @@ void test_thread_yield_nosmp(void)
 static rt_err_t utest_tc_init(void)
 {
     __current_thread = rt_thread_self();
-    change_priority = __current_thread->current_priority + 5;
+    change_priority = UTEST_THR_PRIORITY + 5;
     tid3_delay_pass_flag = 0;
     tid3_finish_flag = 0;
     tid4_finish_flag = 0;

+ 100 - 0
include/rtcompiler.h

@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-18     Shell        Separate the compiler porting from rtdef.h
+ */
+#ifndef __RT_COMPILER_H__
+#define __RT_COMPILER_H__
+
+#include <rtconfig.h>
+
+#if defined(__ARMCC_VERSION)        /* ARM Compiler */
+#define rt_section(x)               __attribute__((section(x)))
+#define rt_used                     __attribute__((used))
+#define rt_align(n)                 __attribute__((aligned(n)))
+#define rt_weak                     __attribute__((weak))
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static __inline
+#define rt_always_inline            rt_inline
+#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
+#define rt_section(x)               @ x
+#define rt_used                     __root
+#define PRAGMA(x)                   _Pragma(#x)
+#define rt_align(n)                 PRAGMA(data_alignment=n)
+#define rt_weak                     __weak
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static inline
+#define rt_always_inline            rt_inline
+#elif defined (__GNUC__)            /* GNU GCC Compiler */
+#define __RT_STRINGIFY(x...)        #x
+#define RT_STRINGIFY(x...)          __RT_STRINGIFY(x)
+#define rt_section(x)               __attribute__((section(x)))
+#define rt_used                     __attribute__((used))
+#define rt_align(n)                 __attribute__((aligned(n)))
+#define rt_weak                     __attribute__((weak))
+#define rt_typeof                   __typeof__
+#define rt_noreturn                 __attribute__ ((noreturn))
+#define rt_inline                   static __inline
+#define rt_always_inline            static inline __attribute__((always_inline))
+#elif defined (__ADSPBLACKFIN__)    /* for VisualDSP++ Compiler */
+#define rt_section(x)               __attribute__((section(x)))
+#define rt_used                     __attribute__((used))
+#define rt_align(n)                 __attribute__((aligned(n)))
+#define rt_weak                     __attribute__((weak))
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static inline
+#define rt_always_inline            rt_inline
+#elif defined (_MSC_VER)            /* for Visual Studio Compiler */
+#define rt_section(x)
+#define rt_used
+#define rt_align(n)                 __declspec(align(n))
+#define rt_weak
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static __inline
+#define rt_always_inline            rt_inline
+#elif defined (__TI_COMPILER_VERSION__) /* for TI CCS Compiler */
+/**
+ * The way that TI compiler set section is different from other(at least
+ * GCC and MDK) compilers. See ARM Optimizing C/C++ Compiler 5.9.3 for more
+ * details.
+ */
+#define rt_section(x)               __attribute__((section(x)))
+#ifdef __TI_EABI__
+#define rt_used                     __attribute__((retain)) __attribute__((used))
+#else
+#define rt_used                     __attribute__((used))
+#endif
+#define PRAGMA(x)                   _Pragma(#x)
+#define rt_align(n)                 __attribute__((aligned(n)))
+#ifdef __TI_EABI__
+#define rt_weak                     __attribute__((weak))
+#else
+#define rt_weak
+#endif
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static inline
+#define rt_always_inline            rt_inline
+#elif defined (__TASKING__)         /* for TASKING Compiler */
+#define rt_section(x)               __attribute__((section(x)))
+#define rt_used                     __attribute__((used, protect))
+#define PRAGMA(x)                   _Pragma(#x)
+#define rt_align(n)                 __attribute__((__align(n)))
+#define rt_weak                     __attribute__((weak))
+#define rt_typeof                   typeof
+#define rt_noreturn
+#define rt_inline                   static inline
+#define rt_always_inline            rt_inline
+#else                              /* Unkown Compiler */
+    #error not supported tool chain
+#endif /* __ARMCC_VERSION */
+
+#endif /* __RT_COMPILER_H__ */

+ 57 - 241
include/rtdef.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2022, RT-Thread Development Team
+ * Copyright (c) 2006-2024, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -55,6 +55,8 @@
  * 2023-11-21     Meco Man     add RT_USING_NANO macro
  * 2023-12-18     xqyjlj       add rt_always_inline
  * 2023-12-22     Shell        Support hook list
+ * 2024-01-18     Shell        Seperate basical types to a rttypes.h
+ *                             Seperate the compiler portings to rtcompiler.h
  */
 
 #ifndef __RT_DEF_H__
@@ -96,71 +98,7 @@ extern "C" {
 
 
 /* RT-Thread basic data type definitions */
-typedef int                             rt_bool_t;      /**< boolean type */
-typedef signed long                     rt_base_t;      /**< Nbit CPU related date type */
-typedef unsigned long                   rt_ubase_t;     /**< Nbit unsigned CPU related data type */
-
-#ifndef RT_USING_ARCH_DATA_TYPE
-#ifdef RT_USING_LIBC
-typedef int8_t                          rt_int8_t;      /**<  8bit integer type */
-typedef int16_t                         rt_int16_t;     /**< 16bit integer type */
-typedef int32_t                         rt_int32_t;     /**< 32bit integer type */
-typedef uint8_t                         rt_uint8_t;     /**<  8bit unsigned integer type */
-typedef uint16_t                        rt_uint16_t;    /**< 16bit unsigned integer type */
-typedef uint32_t                        rt_uint32_t;    /**< 32bit unsigned integer type */
-typedef int64_t                         rt_int64_t;     /**< 64bit integer type */
-typedef uint64_t                        rt_uint64_t;    /**< 64bit unsigned integer type */
-#else
-typedef signed   char                   rt_int8_t;      /**<  8bit integer type */
-typedef signed   short                  rt_int16_t;     /**< 16bit integer type */
-typedef signed   int                    rt_int32_t;     /**< 32bit integer type */
-typedef unsigned char                   rt_uint8_t;     /**<  8bit unsigned integer type */
-typedef unsigned short                  rt_uint16_t;    /**< 16bit unsigned integer type */
-typedef unsigned int                    rt_uint32_t;    /**< 32bit unsigned integer type */
-#ifdef ARCH_CPU_64BIT
-typedef signed long                     rt_int64_t;     /**< 64bit integer type */
-typedef unsigned long                   rt_uint64_t;    /**< 64bit unsigned integer type */
-#else
-typedef signed long long                rt_int64_t;     /**< 64bit integer type */
-typedef unsigned long long              rt_uint64_t;    /**< 64bit unsigned integer type */
-#endif /* ARCH_CPU_64BIT */
-#endif /* RT_USING_LIBC */
-#endif /* RT_USING_ARCH_DATA_TYPE */
-
-#if defined(RT_USING_LIBC) && !defined(RT_USING_NANO)
-typedef size_t                          rt_size_t;      /**< Type for size number */
-typedef ssize_t                         rt_ssize_t;     /**< Used for a count of bytes or an error indication */
-#else
-typedef rt_ubase_t                      rt_size_t;      /**< Type for size number */
-typedef rt_base_t                       rt_ssize_t;     /**< Used for a count of bytes or an error indication */
-#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
-
-typedef rt_base_t                       rt_err_t;       /**< Type for error number */
-typedef rt_uint32_t                     rt_time_t;      /**< Type for time stamp */
-typedef rt_uint32_t                     rt_tick_t;      /**< Type for tick count */
-typedef rt_base_t                       rt_flag_t;      /**< Type for flags */
-typedef rt_ubase_t                      rt_dev_t;       /**< Type for device */
-typedef rt_base_t                       rt_off_t;       /**< Type for offset */
-
-#ifdef __cplusplus
-    typedef rt_base_t rt_atomic_t;
-#else
-#if defined(RT_USING_HW_ATOMIC)
-    typedef rt_base_t rt_atomic_t;
-#elif defined(RT_USING_STDC_ATOMIC)
-    #include <stdatomic.h>
-    typedef atomic_size_t rt_atomic_t;
-#else
-    typedef rt_base_t rt_atomic_t;
-#endif /* RT_USING_STDC_ATOMIC */
-#endif /* __cplusplus */
-
-/* boolean type definitions */
-#define RT_TRUE                         1               /**< boolean true  */
-#define RT_FALSE                        0               /**< boolean fails */
-
-/* null pointer definition */
-#define RT_NULL                         0
+#include "rttypes.h"
 
 /**@}*/
 
@@ -194,90 +132,7 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define RT_STATIC_ASSERT(name, expn) typedef char _static_assert_##name[(expn)?1:-1]
 
 /* Compiler Related Definitions */
-#if defined(__ARMCC_VERSION)        /* ARM Compiler */
-#define rt_section(x)               __attribute__((section(x)))
-#define rt_used                     __attribute__((used))
-#define rt_align(n)                 __attribute__((aligned(n)))
-#define rt_weak                     __attribute__((weak))
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static __inline
-#define rt_always_inline            rt_inline
-#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
-#define rt_section(x)               @ x
-#define rt_used                     __root
-#define PRAGMA(x)                   _Pragma(#x)
-#define rt_align(n)                 PRAGMA(data_alignment=n)
-#define rt_weak                     __weak
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static inline
-#define rt_always_inline            rt_inline
-#elif defined (__GNUC__)            /* GNU GCC Compiler */
-#define __RT_STRINGIFY(x...)        #x
-#define RT_STRINGIFY(x...)          __RT_STRINGIFY(x)
-#define rt_section(x)               __attribute__((section(x)))
-#define rt_used                     __attribute__((used))
-#define rt_align(n)                 __attribute__((aligned(n)))
-#define rt_weak                     __attribute__((weak))
-#define rt_typeof                   __typeof__
-#define rt_noreturn                 __attribute__ ((noreturn))
-#define rt_inline                   static __inline
-#define rt_always_inline            static inline __attribute__((always_inline))
-#elif defined (__ADSPBLACKFIN__)    /* for VisualDSP++ Compiler */
-#define rt_section(x)               __attribute__((section(x)))
-#define rt_used                     __attribute__((used))
-#define rt_align(n)                 __attribute__((aligned(n)))
-#define rt_weak                     __attribute__((weak))
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static inline
-#define rt_always_inline            rt_inline
-#elif defined (_MSC_VER)            /* for Visual Studio Compiler */
-#define rt_section(x)
-#define rt_used
-#define rt_align(n)                 __declspec(align(n))
-#define rt_weak
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static __inline
-#define rt_always_inline            rt_inline
-#elif defined (__TI_COMPILER_VERSION__) /* for TI CCS Compiler */
-/**
- * The way that TI compiler set section is different from other(at least
- * GCC and MDK) compilers. See ARM Optimizing C/C++ Compiler 5.9.3 for more
- * details.
- */
-#define rt_section(x)               __attribute__((section(x)))
-#ifdef __TI_EABI__
-#define rt_used                     __attribute__((retain)) __attribute__((used))
-#else
-#define rt_used                     __attribute__((used))
-#endif
-#define PRAGMA(x)                   _Pragma(#x)
-#define rt_align(n)                 __attribute__((aligned(n)))
-#ifdef __TI_EABI__
-#define rt_weak                     __attribute__((weak))
-#else
-#define rt_weak
-#endif
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static inline
-#define rt_always_inline            rt_inline
-#elif defined (__TASKING__)         /* for TASKING Compiler */
-#define rt_section(x)               __attribute__((section(x)))
-#define rt_used                     __attribute__((used, protect))
-#define PRAGMA(x)                   _Pragma(#x)
-#define rt_align(n)                 __attribute__((__align(n)))
-#define rt_weak                     __attribute__((weak))
-#define rt_typeof                   typeof
-#define rt_noreturn
-#define rt_inline                   static inline
-#define rt_always_inline            rt_inline
-#else                              /* Unkown Compiler */
-    #error not supported tool chain
-#endif /* __ARMCC_VERSION */
+#include "rtcompiler.h"
 
 /* initialization export */
 #ifdef RT_USING_COMPONENTS_INIT
@@ -417,6 +272,8 @@ typedef int (*init_fn_t)(void);
 #define RT_EPERM                        EPERM           /**< Operation not permitted */
 #define RT_EFAULT                       EFAULT          /**< Bad address */
 #define RT_ENOBUFS                      ENOBUFS         /**< No buffer space is available */
+#define RT_ESCHEDISR                    253             /**< scheduler failure in isr context */
+#define RT_ESCHEDLOCKED                 252             /**< scheduler failure in critical region */
 #define RT_ETRAP                        254             /**< Trap event */
 #else
 #define RT_EOK                          0               /**< There is no error */
@@ -436,6 +293,8 @@ typedef int (*init_fn_t)(void);
 #define RT_ETRAP                        14              /**< Trap event */
 #define RT_EFAULT                       15              /**< Bad address */
 #define RT_ENOBUFS                      16              /**< No buffer space is available */
+#define RT_ESCHEDISR                    17              /**< scheduler failure in isr context */
+#define RT_ESCHEDLOCKED                 18              /**< scheduler failure in critical region */
 #endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
 
 /**@}*/
@@ -469,53 +328,6 @@ typedef int (*init_fn_t)(void);
  */
 #define RT_ALIGN_DOWN(size, align)      ((size) & ~((align) - 1))
 
-/**
- * Double List structure
- */
-struct rt_list_node
-{
-    struct rt_list_node *next;                          /**< point to next node. */
-    struct rt_list_node *prev;                          /**< point to prev node. */
-};
-typedef struct rt_list_node rt_list_t;                  /**< Type for lists. */
-
-/**
- * Single List structure
- */
-struct rt_slist_node
-{
-    struct rt_slist_node *next;                         /**< point to next node. */
-};
-typedef struct rt_slist_node rt_slist_t;                /**< Type for single list. */
-
-#ifdef RT_USING_SMP
-#include <cpuport.h> /* for spinlock from arch */
-
-struct rt_spinlock
-{
-    rt_hw_spinlock_t lock;
-#if defined(RT_DEBUGING_SPINLOCK)
-    void *owner;
-    void *pc;
-#endif /* RT_DEBUGING_SPINLOCK */
-};
-typedef struct rt_spinlock rt_spinlock_t;
-
-#ifndef RT_SPINLOCK_INIT
-#define RT_SPINLOCK_INIT {{0}} // default
-#endif /* RT_SPINLOCK_INIT */
-
-#else
-typedef rt_ubase_t rt_spinlock_t;
-struct rt_spinlock
-{
-    rt_spinlock_t lock;
-};
-#define RT_SPINLOCK_INIT {0}
-#endif /* RT_USING_SMP */
-
-#define RT_DEFINE_SPINLOCK(x)  struct rt_spinlock x = RT_SPINLOCK_INIT
-
 /**
  * @addtogroup KernelObject
  */
@@ -770,6 +582,8 @@ struct rt_object_information
 
 #define RT_TIMER_FLAG_HARD_TIMER        0x0             /**< hard timer,the timer's callback function will be called in tick isr. */
 #define RT_TIMER_FLAG_SOFT_TIMER        0x4             /**< soft timer,the timer's callback function will be called in timer thread. */
+#define RT_TIMER_FLAG_THREAD_TIMER \
+    (0x8 | RT_TIMER_FLAG_HARD_TIMER)                    /**< thread timer that cooperates with scheduler directly */
 
 #define RT_TIMER_CTRL_SET_TIME          0x0             /**< set timer control command */
 #define RT_TIMER_CTRL_GET_TIME          0x1             /**< get timer control command */
@@ -791,6 +605,11 @@ struct rt_object_information
 #define RT_TIMER_SKIP_LIST_MASK         0x3             /**< Timer skips the list mask */
 #endif
 
+/**
+ * timeout handler of rt_timer
+ */
+typedef void (*rt_timer_func_t)(void *parameter);
+
 /**
  * timer structure
  */
@@ -800,8 +619,8 @@ struct rt_timer
 
     rt_list_t        row[RT_TIMER_SKIP_LIST_LEVEL];
 
-    void (*timeout_func)(void *parameter);              /**< timeout function */
-    void            *parameter;                         /**< timeout function's parameter */
+    rt_timer_func_t  timeout_func;                      /**< timeout function */
+    void             *parameter;                        /**< timeout function's parameter */
 
     rt_tick_t        init_tick;                         /**< timer timeout tick */
     rt_tick_t        timeout_tick;                      /**< timeout tick */
@@ -901,30 +720,43 @@ struct rt_cpu_usage_stats
 };
 typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t;
 
+#define _SCHEDULER_CONTEXT(fileds) fileds
+
 /**
  * CPUs definitions
  *
  */
 struct rt_cpu
 {
-    struct rt_thread            *current_thread;
-    struct rt_thread            *idle_thread;
-    rt_atomic_t                 irq_nest;
-    rt_uint8_t                  irq_switch_flag;
+    /**
+     * protected by:
+     *   - other cores: accessing from other coress is undefined behaviour
+     *   - local core: rt_enter_critical()/rt_exit_critical()
+     */
+    _SCHEDULER_CONTEXT(
+        struct rt_thread        *current_thread;
+
+        rt_uint8_t              irq_switch_flag:1;
+        rt_uint8_t              critical_switch_flag:1;
+        rt_uint8_t              sched_lock_flag:1;
+
+        rt_uint8_t              current_priority;
+        rt_list_t               priority_table[RT_THREAD_PRIORITY_MAX];
+    #if RT_THREAD_PRIORITY_MAX > 32
+        rt_uint32_t             priority_group;
+        rt_uint8_t              ready_table[32];
+    #else
+        rt_uint32_t             priority_group;
+    #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 
-    rt_uint8_t                  current_priority;
-    rt_list_t                   priority_table[RT_THREAD_PRIORITY_MAX];
-#if RT_THREAD_PRIORITY_MAX > 32
-    rt_uint32_t                 priority_group;
-    rt_uint8_t                  ready_table[32];
-#else
-    rt_uint32_t                 priority_group;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        rt_atomic_t             tick;   /**< Passing tickes on this core */
+    );
 
-    rt_atomic_t                 tick;
+    struct rt_thread            *idle_thread;
+    rt_atomic_t                 irq_nest;
 
-    struct rt_spinlock          spinlock;
 #ifdef RT_USING_SMART
+    struct rt_spinlock          spinlock;
     struct rt_cpu_usage_stats   cpu_stat;
 #endif
 };
@@ -1013,11 +845,12 @@ typedef void (*rt_thread_cleanup_t)(struct rt_thread *tid);
 /**
  * Thread structure
  */
+
+#include "rtsched.h" /* for struct rt_sched_thread_ctx */
+
 struct rt_thread
 {
     struct rt_object            parent;
-    rt_list_t                   tlist;                  /**< the thread list */
-    rt_list_t                   tlist_schedule;         /**< the thread list */
 
     /* stack point and entry */
     void                        *sp;                    /**< stack point */
@@ -1029,24 +862,13 @@ struct rt_thread
     /* error code */
     rt_err_t                    error;                  /**< error code */
 
-    rt_uint8_t                  stat;                   /**< thread status */
-
 #ifdef RT_USING_SMP
-    rt_uint8_t                  bind_cpu;               /**< thread is bind to cpu */
-    rt_uint8_t                  oncpu;                  /**< process on cpu */
-
     rt_atomic_t                 cpus_lock_nest;         /**< cpus lock count */
-    rt_atomic_t                 critical_lock_nest;     /**< critical lock count */
-#endif /*RT_USING_SMP*/
-
-    /* priority */
-    rt_uint8_t                  current_priority;       /**< current priority */
-    rt_uint8_t                  init_priority;          /**< initialized priority */
-#if RT_THREAD_PRIORITY_MAX > 32
-    rt_uint8_t                  number;
-    rt_uint8_t                  high_mask;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-    rt_uint32_t                 number_mask;            /**< priority number mask */
+#endif
+
+    RT_SCHED_THREAD_CTX;
+    struct rt_timer             thread_timer;           /**< built-in thread timer */
+    rt_thread_cleanup_t         cleanup;                /**< cleanup function when thread exit */
 
 #ifdef RT_USING_MUTEX
     /* object for IPC */
@@ -1071,9 +893,6 @@ struct rt_thread
     void                        *si_list;               /**< the signal infor list */
 #endif /* RT_USING_SIGNALS */
 
-    rt_atomic_t                 init_tick;              /**< thread's initialized tick */
-    rt_atomic_t                 remaining_tick;         /**< remaining tick */
-
 #ifdef RT_USING_CPU_USAGE
     rt_uint64_t                 duration_tick;          /**< cpu usage tick */
 #endif /* RT_USING_CPU_USAGE */
@@ -1082,10 +901,6 @@ struct rt_thread
     void                        *pthread_data;          /**< the handle of pthread data, adapt 32/64bit */
 #endif /* RT_USING_PTHREADS */
 
-    struct rt_timer             thread_timer;           /**< built-in thread timer */
-
-    rt_thread_cleanup_t         cleanup;                /**< cleanup function when thread exit */
-
     /* light weight process if present */
 #ifdef RT_USING_SMART
     void                        *msg_ret;               /**< the return msg */
@@ -1100,11 +915,12 @@ struct rt_thread
 
     struct lwp_thread_signal    signal;                 /**< lwp signal for user-space thread */
     struct rt_user_context      user_ctx;               /**< user space context */
-    struct rt_wakeup            wakeup;                 /**< wakeup data */
-    int                         exit_request;           /**< pending exit request of thread */
+    struct rt_wakeup            wakeup_handle;          /**< wakeup handle for IPC */
+    rt_atomic_t                 exit_request;           /**< pending exit request of thread */
     int                         tid;                    /**< thread ID used by process */
     int                         tid_ref_count;          /**< reference of tid */
     void                        *susp_recycler;         /**< suspended recycler on this thread */
+    void                        *robust_list;           /**< pi lock, very carefully, it's a userspace list!*/
 
     rt_uint64_t                 user_time;
     rt_uint64_t                 system_time;
@@ -1167,7 +983,7 @@ struct rt_ipc_object
 {
     struct rt_object parent;                            /**< inherit from rt_object */
 
-    rt_list_t        suspend_thread;                    /**< threads pended on this resource */
+    rt_list_t suspend_thread;                 /**< threads pended on this resource */
 };
 
 #ifdef RT_USING_SEMAPHORE

+ 172 - 0
include/rtsched.h

@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2023-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-19     Shell        Seperate schduling statements from rt_thread_t
+ *                             to rt_sched_thread_ctx. Add definitions of scheduler.
+ */
+#ifndef __RT_SCHED_H__
+#define __RT_SCHED_H__
+
+#include "rttypes.h"
+#include "rtcompiler.h"
+
+struct rt_thread;
+
+typedef rt_uint8_t rt_sched_thread_status_t;
+
+#ifdef RT_USING_SCHED_THREAD_CTX
+
+/**
+ * Scheduler private status binding on thread. Caller should never accessing
+ * these members.
+ */
+struct rt_sched_thread_priv
+{
+    rt_tick_t                   init_tick;              /**< thread's initialized tick */
+    rt_tick_t                   remaining_tick;         /**< remaining tick */
+
+    /* priority */
+    rt_uint8_t                  current_priority;       /**< current priority */
+    rt_uint8_t                  init_priority;          /**< initialized priority */
+#if RT_THREAD_PRIORITY_MAX > 32
+    rt_uint8_t                  number;                 /**< priority low number */
+    rt_uint8_t                  high_mask;              /**< priority high mask */
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+    rt_uint32_t                 number_mask;            /**< priority number mask */
+
+};
+
+/**
+ * Scheduler public status binding on thread. Caller must hold the scheduler
+ * lock before access any one of its member.
+ */
+struct rt_sched_thread_ctx
+{
+    rt_list_t                   thread_list_node;       /**< node in thread list */
+
+    rt_uint8_t                  stat;                   /**< thread status */
+    rt_uint8_t                  sched_flag_locked:1;    /**< calling thread have the scheduler locked */
+    rt_uint8_t                  sched_flag_ttmr_set:1;  /**< thread timer is start */
+
+#ifdef RT_USING_SMP
+    rt_uint8_t                  bind_cpu;               /**< thread is bind to cpu */
+    rt_uint8_t                  oncpu;                  /**< process on cpu */
+
+    rt_base_t                   critical_lock_nest;     /**< critical lock count */
+#endif
+
+    struct rt_sched_thread_priv sched_thread_priv;      /**< private context of scheduler */
+};
+
+#define RT_SCHED_THREAD_CTX struct rt_sched_thread_ctx sched_thread_ctx
+
+#define RT_SCHED_PRIV(thread) ((thread)->sched_thread_ctx.sched_thread_priv)
+#define RT_SCHED_CTX(thread) ((thread)->sched_thread_ctx)
+
+/**
+ * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
+ * to a thread pointer.
+ */
+#define RT_THREAD_LIST_NODE_ENTRY(node)                                      \
+    rt_container_of(                                                         \
+        rt_list_entry((node), struct rt_sched_thread_ctx, thread_list_node), \
+        struct rt_thread, sched_thread_ctx)
+#define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).thread_list_node)
+
+#else /* !defined(RT_USING_SCHED_THREAD_CTX) */
+
+#if RT_THREAD_PRIORITY_MAX > 32
+#define _RT_SCHED_THREAD_CTX_PRIO_EXT                 \
+    rt_uint8_t number;    /**< priority low number */ \
+    rt_uint8_t high_mask; /**< priority high mask */
+
+#else /* ! RT_THREAD_PRIORITY_MAX > 32 */
+
+#define _RT_SCHED_THREAD_CTX_PRIO_EXT
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+
+#define RT_SCHED_THREAD_CTX                                                    \
+    rt_list_t tlist;                    /**< node in thread list */            \
+    rt_uint8_t stat;                    /**< thread status */                  \
+    rt_uint8_t sched_flag_locked:1;                                            \
+            /**< calling thread have the scheduler locked */                   \
+    rt_uint8_t sched_flag_ttmr_set:1;   /**< thread timer is start */          \
+    rt_tick_t init_tick;                /**< thread's initialized tick */      \
+    rt_tick_t remaining_tick;           /**< remaining tick */                 \
+    rt_uint8_t current_priority;        /**< current priority */               \
+    rt_uint8_t init_priority;           /**< initialized priority */           \
+    _RT_SCHED_THREAD_CTX_PRIO_EXT;                                             \
+    rt_uint32_t number_mask; /**< priority number mask */
+
+#define RT_SCHED_PRIV(thread) (*thread)
+#define RT_SCHED_CTX(thread) (*thread)
+
+/**
+ * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
+ * to a thread pointer.
+ */
+#define RT_THREAD_LIST_NODE_ENTRY(node) rt_list_entry((node), struct rt_thread, tlist)
+#define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).tlist)
+
+#endif /* RT_USING_SCHED_THREAD_CTX */
+
+/**
+ * System Scheduler Locking
+ */
+
+typedef rt_ubase_t rt_sched_lock_level_t;
+
+rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl);
+rt_err_t rt_sched_unlock(rt_sched_lock_level_t level);
+rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level);
+
+rt_bool_t rt_sched_is_locked(void);
+
+#ifdef RT_USING_SMP
+#define RT_SCHED_DEBUG_IS_LOCKED do { RT_ASSERT(rt_sched_is_locked()); } while (0)
+#define RT_SCHED_DEBUG_IS_UNLOCKED do { RT_ASSERT(!rt_sched_is_locked()); } while (0)
+
+#else /* !RT_USING_SMP */
+
+#define RT_SCHED_DEBUG_IS_LOCKED
+#define RT_SCHED_DEBUG_IS_UNLOCKED
+#endif /* RT_USING_SMP */
+
+/**
+ * NOTE: user should NEVER use these APIs directly. See rt_thread_.* or IPC
+ * methods instead.
+ */
+#if defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__)
+
+/* thread initialization and startup routine */
+void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
+void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
+void rt_sched_thread_startup(struct rt_thread *thread);
+
+/* scheduler related routine */
+void rt_sched_post_ctx_switch(struct rt_thread *thread);
+rt_err_t rt_sched_tick_increase(void);
+
+/* thread status operation */
+rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread);
+rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread);
+rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread);
+rt_err_t rt_sched_thread_yield(struct rt_thread *thread);
+rt_err_t rt_sched_thread_close(struct rt_thread *thread);
+rt_err_t rt_sched_thread_ready(struct rt_thread *thread);
+rt_err_t rt_sched_thread_suspend(struct rt_thread *thread, rt_sched_lock_level_t level);
+rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority);
+rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu);
+rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread);
+rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread);
+rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread);
+void rt_sched_insert_thread(struct rt_thread *thread);
+void rt_sched_remove_thread(struct rt_thread *thread);
+
+#endif /* defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__) */
+
+#endif /* __RT_SCHED_H__ */

+ 26 - 7
include/rtthread.h

@@ -21,6 +21,7 @@
  * 2023-06-30     ChuShicheng  move debug check from the rtdebug.h
  * 2023-10-16     Shell        Support a new backtrace framework
  * 2023-12-10     xqyjlj       fix spinlock in up
+ * 2024-01-25     Shell        Add rt_susp_list for IPC primitives
  */
 
 #ifndef __RT_THREAD_H__
@@ -43,7 +44,7 @@ extern "C" {
 #endif
 
 #ifdef __GNUC__
-int entry(void); 
+int entry(void);
 #endif
 
 /**
@@ -173,7 +174,6 @@ rt_err_t rt_thread_resume(rt_thread_t thread);
 rt_err_t rt_thread_wakeup(rt_thread_t thread);
 void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
 #endif /* RT_USING_SMART */
-void rt_thread_timeout(void *parameter);
 rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
 #ifdef RT_USING_SIGNALS
 void rt_thread_alloc_sig(rt_thread_t tid);
@@ -212,11 +212,10 @@ void rt_system_scheduler_start(void);
 
 void rt_schedule(void);
 void rt_scheduler_do_irq_switch(void *context);
-void rt_schedule_insert_thread(struct rt_thread *thread);
-void rt_schedule_remove_thread(struct rt_thread *thread);
 
-void rt_enter_critical(void);
+rt_base_t rt_enter_critical(void);
 void rt_exit_critical(void);
+void rt_exit_critical_safe(rt_base_t critical_level);
 rt_uint16_t rt_critical_level(void);
 
 #ifdef RT_USING_HOOK
@@ -368,6 +367,26 @@ void rt_slab_free(rt_slab_t m, void *ptr);
  * @{
  */
 
+/**
+ * Suspend list - A basic building block for IPC primitives which interacts with
+ *                scheduler directly. Its API is similar to a FIFO list.
+ *
+ * Note: don't use in application codes directly
+ */
+void rt_susp_list_print(rt_list_t *list);
+/* reserve thread error while resuming it */
+#define RT_THREAD_RESUME_RES_THR_ERR (-1)
+struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error);
+rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error);
+rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list,
+                                     rt_err_t thread_error,
+                                     struct rt_spinlock *lock);
+
+/* suspend and enqueue */
+rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag);
+/* only for a suspended thread, and caller must hold the scheduler lock */
+rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags);
+
 #ifdef RT_USING_SEMAPHORE
 /*
  * semaphore interface
@@ -725,11 +744,11 @@ int rt_snprintf(char *buf, rt_size_t size, const char *format, ...);
 #if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
 rt_device_t rt_console_set_device(const char *name);
 rt_device_t rt_console_get_device(void);
-#ifdef RT_USING_THREDSAFE_PRINTF
+#ifdef RT_USING_THREADSAFE_PRINTF
     rt_thread_t rt_console_current_user(void);
 #else
     rt_inline void *rt_console_current_user(void) { return RT_NULL; }
-#endif /* RT_USING_THREDSAFE_PRINTF */
+#endif /* RT_USING_THREADSAFE_PRINTF */
 #endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
 
 rt_err_t rt_get_errno(void);

+ 223 - 0
include/rttypes.h

@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-18     Shell        Separate the basic types from rtdef.h
+ */
+
+#ifndef __RT_TYPES_H__
+#define __RT_TYPES_H__
+
+#include <rtconfig.h>
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdarg.h>
+#ifndef RT_USING_NANO
+#include <sys/types.h>
+#include <sys/errno.h>
+#if defined(RT_USING_SIGNALS) || defined(RT_USING_SMART)
+#include <sys/signal.h>
+#endif /* defined(RT_USING_SIGNALS) || defined(RT_USING_SMART) */
+#endif /* RT_USING_NANO */
+
+/**
+ * RT-Thread basic data types definition
+ */
+
+typedef int                             rt_bool_t;      /**< boolean type */
+typedef signed long                     rt_base_t;      /**< Nbit CPU related date type */
+typedef unsigned long                   rt_ubase_t;     /**< Nbit unsigned CPU related data type */
+
+#ifndef RT_USING_ARCH_DATA_TYPE
+#ifdef RT_USING_LIBC
+typedef int8_t                          rt_int8_t;      /**<  8bit integer type */
+typedef int16_t                         rt_int16_t;     /**< 16bit integer type */
+typedef int32_t                         rt_int32_t;     /**< 32bit integer type */
+typedef uint8_t                         rt_uint8_t;     /**<  8bit unsigned integer type */
+typedef uint16_t                        rt_uint16_t;    /**< 16bit unsigned integer type */
+typedef uint32_t                        rt_uint32_t;    /**< 32bit unsigned integer type */
+typedef int64_t                         rt_int64_t;     /**< 64bit integer type */
+typedef uint64_t                        rt_uint64_t;    /**< 64bit unsigned integer type */
+#else
+typedef signed   char                   rt_int8_t;      /**<  8bit integer type */
+typedef signed   short                  rt_int16_t;     /**< 16bit integer type */
+typedef signed   int                    rt_int32_t;     /**< 32bit integer type */
+typedef unsigned char                   rt_uint8_t;     /**<  8bit unsigned integer type */
+typedef unsigned short                  rt_uint16_t;    /**< 16bit unsigned integer type */
+typedef unsigned int                    rt_uint32_t;    /**< 32bit unsigned integer type */
+#ifdef ARCH_CPU_64BIT
+typedef signed long                     rt_int64_t;     /**< 64bit integer type */
+typedef unsigned long                   rt_uint64_t;    /**< 64bit unsigned integer type */
+#else
+typedef signed long long                rt_int64_t;     /**< 64bit integer type */
+typedef unsigned long long              rt_uint64_t;    /**< 64bit unsigned integer type */
+#endif /* ARCH_CPU_64BIT */
+#endif /* RT_USING_LIBC */
+#endif /* RT_USING_ARCH_DATA_TYPE */
+
+#if defined(RT_USING_LIBC) && !defined(RT_USING_NANO)
+typedef size_t                          rt_size_t;      /**< Type for size number */
+typedef ssize_t                         rt_ssize_t;     /**< Used for a count of bytes or an error indication */
+#else
+typedef rt_ubase_t                      rt_size_t;      /**< Type for size number */
+typedef rt_base_t                       rt_ssize_t;     /**< Used for a count of bytes or an error indication */
+#endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
+
+typedef rt_base_t                       rt_err_t;       /**< Type for error number */
+typedef rt_uint32_t                     rt_time_t;      /**< Type for time stamp */
+typedef rt_uint32_t                     rt_tick_t;      /**< Type for tick count */
+typedef rt_base_t                       rt_flag_t;      /**< Type for flags */
+typedef rt_ubase_t                      rt_dev_t;       /**< Type for device */
+typedef rt_base_t                       rt_off_t;       /**< Type for offset */
+
+#ifdef __cplusplus
+typedef rt_base_t rt_atomic_t;
+#else
+#if defined(RT_USING_HW_ATOMIC)
+typedef rt_base_t rt_atomic_t;
+#elif defined(RT_USING_STDC_ATOMIC)
+#include <stdatomic.h>
+typedef atomic_size_t rt_atomic_t;
+#else
+typedef rt_base_t rt_atomic_t;
+#endif /* RT_USING_STDC_ATOMIC */
+#endif /* __cplusplus */
+
+/* boolean type definitions */
+#define RT_TRUE                         1               /**< boolean true  */
+#define RT_FALSE                        0               /**< boolean fails */
+
+/* null pointer definition */
+#define RT_NULL                         0
+
+/**
+ * Double List structure
+ */
+struct rt_list_node
+{
+    struct rt_list_node *next;                          /**< point to next node. */
+    struct rt_list_node *prev;                          /**< point to prev node. */
+};
+typedef struct rt_list_node rt_list_t;                  /**< Type for lists. */
+
+/**
+ * Single List structure
+ */
+struct rt_slist_node
+{
+    struct rt_slist_node *next;                         /**< point to next node. */
+};
+typedef struct rt_slist_node rt_slist_t;                /**< Type for single list. */
+
+/**
+ * Spinlock
+ */
+#ifdef RT_USING_SMP
+#include <cpuport.h> /* for spinlock from arch */
+
+struct rt_spinlock
+{
+    rt_hw_spinlock_t lock;
+#ifdef RT_USING_DEBUG
+    rt_uint32_t critical_level;
+#endif /* RT_USING_DEBUG */
+#if defined(RT_DEBUGING_SPINLOCK)
+    void *owner;
+    void *pc;
+#endif /* RT_DEBUGING_SPINLOCK */
+};
+
+#ifdef RT_DEBUGING_SPINLOCK
+
+#define __OWNER_MAGIC ((void *)0xdeadbeaf)
+
+#if defined(__GNUC__)
+#define __GET_RETURN_ADDRESS __builtin_return_address(0)
+#else
+#define __GET_RETURN_ADDRESS RT_NULL
+#endif
+
+#define _SPIN_LOCK_DEBUG_OWNER(lock)                  \
+    do                                                \
+    {                                                 \
+        struct rt_thread *_curthr = rt_thread_self(); \
+        if (_curthr != RT_NULL)                       \
+        {                                             \
+            (lock)->owner = _curthr;                  \
+            (lock)->pc = __GET_RETURN_ADDRESS;        \
+        }                                             \
+    } while (0)
+
+#define _SPIN_UNLOCK_DEBUG_OWNER(lock) \
+    do                                 \
+    {                                  \
+        (lock)->owner = __OWNER_MAGIC; \
+        (lock)->pc = RT_NULL;          \
+    } while (0)
+
+#else
+
+#define _SPIN_LOCK_DEBUG_OWNER(lock)
+#define _SPIN_UNLOCK_DEBUG_OWNER(lock)
+#endif
+
+#ifdef RT_USING_DEBUG
+
+#define _SPIN_LOCK_DEBUG_CRITICAL(lock)                   \
+    do                                                    \
+    {                                                     \
+        struct rt_thread *_curthr = rt_thread_self();     \
+        if (_curthr != RT_NULL)                           \
+        {                                                 \
+            (lock)->critical_level = rt_critical_level(); \
+        }                                                 \
+    } while (0)
+
+#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) \
+    do                                              \
+    {                                               \
+        (critical) = (lock)->critical_level;        \
+    } while (0)
+
+#else
+
+#define _SPIN_LOCK_DEBUG_CRITICAL(lock)
+#define _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical) (critical = 0)
+#endif /* RT_USING_DEBUG */
+
+#define RT_SPIN_LOCK_DEBUG(lock)         \
+    do                                   \
+    {                                    \
+        _SPIN_LOCK_DEBUG_OWNER(lock);    \
+        _SPIN_LOCK_DEBUG_CRITICAL(lock); \
+    } while (0)
+
+#define RT_SPIN_UNLOCK_DEBUG(lock, critical)         \
+    do                                               \
+    {                                                \
+        _SPIN_UNLOCK_DEBUG_OWNER(lock);              \
+        _SPIN_UNLOCK_DEBUG_CRITICAL(lock, critical); \
+    } while (0)
+
+#ifndef RT_SPINLOCK_INIT
+#define RT_SPINLOCK_INIT {{0}} /* can be overridden by cpuport.h */
+#endif /* RT_SPINLOCK_INIT */
+
+#else
+
+struct rt_spinlock
+{
+    rt_ubase_t lock;
+};
+#define RT_SPINLOCK_INIT {0}
+#endif /* RT_USING_SMP */
+
+typedef struct rt_spinlock rt_spinlock_t;
+
+#define RT_DEFINE_SPINLOCK(x)  struct rt_spinlock x = RT_SPINLOCK_INIT
+
+#endif /* __RT_TYPES_H__ */

+ 1 - 0
libcpu/aarch64/common/cpuport.h

@@ -13,6 +13,7 @@
 #define  CPUPORT_H__
 
 #include <armv8.h>
+#include <rtcompiler.h>
 #include <rtdef.h>
 
 #ifdef RT_USING_SMP

+ 1 - 1
libcpu/aarch64/cortex-a/entry_point.S

@@ -151,7 +151,7 @@ _secondary_cpu_entry:
 #else
     bl      rt_hw_cpu_id_set
     mrs     x0, tpidr_el1
-#endif
+#endif /* RT_USING_OFW */
 
     /* Set current cpu's stack top */
     sub     x0, x0, #1

+ 7 - 0
libcpu/arm/cortex-a/cpuport.c

@@ -103,4 +103,11 @@ int __rt_ffs(int value)
 }
 #endif
 
+rt_bool_t rt_hw_interrupt_is_disabled(void)
+{
+    int rc;
+    __asm__ volatile("mrs %0, cpsr" : "=r" (rc));
+    return !!(rc & 0x80);
+}
+
 /*@}*/

+ 2 - 0
libcpu/arm/cortex-a/cpuport.h

@@ -10,6 +10,8 @@
 #ifndef  CPUPORT_H__
 #define  CPUPORT_H__
 
+#include <rtcompiler.h>
+
 /* the exception stack without VFP registers */
 struct rt_hw_exp_stack
 {

+ 5 - 0
libcpu/risc-v/virt64/cpuport.c

@@ -56,6 +56,11 @@ void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
     return (void *)sp;
 }
 
+int rt_hw_cpu_id(void)
+{
+    return 0;
+}
+
 /**
  * This function will initialize thread stack, we assuming
  * when scheduler restore this new thread, context will restore

+ 11 - 1
src/Kconfig

@@ -35,6 +35,7 @@ config RT_USING_SMART
     select RT_USING_POSIX_FS
     select RT_USING_POSIX_TERMIOS
     select RT_USING_KTIME
+    select RT_USING_STDC_ATOMIC
     depends on ARCH_ARM_CORTEX_M || ARCH_ARM_ARM9 || ARCH_ARM_CORTEX_A || ARCH_ARMV8 || ARCH_RISCV64
     depends on !RT_USING_NANO
     help
@@ -66,6 +67,7 @@ config RT_USING_AMP
 config RT_USING_SMP
     bool "Enable SMP (Symmetric multiprocessing)"
     default n
+    select RT_USING_SCHED_THREAD_CTX
     help
         This option should be selected by machines which have an SMP-
         capable CPU.
@@ -417,10 +419,18 @@ config RT_USING_INTERRUPT_INFO
     help
         Add name and counter information for interrupt trace.
 
-config RT_USING_THREDSAFE_PRINTF
+config RT_USING_THREADSAFE_PRINTF
     bool "Enable thread safe kernel print service"
     default y if RT_USING_SMP && RT_USING_SMART
 
+config RT_USING_SCHED_THREAD_CTX
+    bool "Using the scheduler thread context"
+    help
+        Using the scheduler thread context embedded in the thread object.
+        This options is only for backward compatible codes. Maybe use as a
+        mandatory option in the future.
+    default y if RT_USING_SMP
+
 config RT_USING_CONSOLE
     bool "Using console for rt_kprintf"
     default y

+ 2 - 3
src/SConscript

@@ -26,8 +26,7 @@ if GetDepend('RT_USING_DEVICE') == False:
 
 if GetDepend('RT_USING_SMP') == False:
     SrcRemove(src, ['cpu.c', 'scheduler_mp.c'])
-
-if GetDepend('RT_USING_SMP') == True:
+else:
     SrcRemove(src, ['scheduler_up.c'])
 
 LOCAL_CFLAGS = ''
@@ -43,6 +42,6 @@ if rtconfig.PLATFORM in GetGCCLikePLATFORM():
         LOCAL_CFLAGS += ' -Wimplicit-fallthrough' # implicit fallthrough warning
         LOCAL_CFLAGS += ' -Wduplicated-cond -Wduplicated-branches' # duplicated condition warning
 
-group = DefineGroup('Kernel', src, depend=[''], CPPPATH=inc, CPPDEFINES=['__RTTHREAD__'], LOCAL_CFLAGS=LOCAL_CFLAGS)
+group = DefineGroup('Kernel', src, depend=[''], CPPPATH=inc, CPPDEFINES=['__RTTHREAD__'], LOCAL_CFLAGS=LOCAL_CFLAGS, LOCAL_CPPDEFINES=['__RT_KERNEL_SOURCE__'])
 
 Return('group')

+ 2 - 17
src/clock.c

@@ -85,34 +85,19 @@ void rt_tick_set(rt_tick_t tick)
  */
 void rt_tick_increase(void)
 {
-    struct rt_thread *thread;
-    rt_base_t level;
-    rt_atomic_t oldval = 0;
-
     RT_ASSERT(rt_interrupt_get_nest() > 0);
 
     RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
     /* increase the global tick */
 #ifdef RT_USING_SMP
+    /* get percpu and increase the tick */
     rt_atomic_add(&(rt_cpu_self()->tick), 1);
 #else
     rt_atomic_add(&(rt_tick), 1);
 #endif /* RT_USING_SMP */
 
     /* check time slice */
-    thread = rt_thread_self();
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
-    rt_atomic_sub(&(thread->remaining_tick), 1);
-    if (rt_atomic_compare_exchange_strong(&(thread->remaining_tick), &oldval, thread->init_tick))
-    {
-        thread->stat |= RT_THREAD_STAT_YIELD;
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-        rt_schedule();
-    }
-    else
-    {
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    }
+    rt_sched_tick_increase();
 
     /* check timer */
 #ifdef RT_USING_SMP

+ 29 - 48
src/cpu.c

@@ -8,6 +8,7 @@
  * 2018-10-30     Bernard      The first version
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-12-10     xqyjlj       spinlock should lock sched
+ * 2024-01-25     Shell        Using rt_exit_critical_safe
  */
 #include <rthw.h>
 #include <rtthread.h>
@@ -16,6 +17,10 @@
 #include <lwp.h>
 #endif
 
+#ifdef RT_USING_DEBUG
+rt_base_t _cpus_critical_level;
+#endif /* RT_USING_DEBUG */
+
 #ifdef RT_USING_SMP
 static struct rt_cpu _cpus[RT_CPUS_NR];
 rt_hw_spinlock_t _cpus_lock;
@@ -23,14 +28,6 @@ rt_hw_spinlock_t _cpus_lock;
 void *_cpus_lock_owner = 0;
 void *_cpus_lock_pc = 0;
 
-#define __OWNER_MAGIC ((void *)0xdeadbeaf)
-
-#if defined (__GNUC__)
-#define __GET_RETURN_ADDRESS __builtin_return_address(0)
-#else
-#define __GET_RETURN_ADDRESS RT_NULL
-#endif
-
 #endif /* RT_DEBUGING_SPINLOCK */
 
 /**
@@ -56,13 +53,7 @@ void rt_spin_lock(struct rt_spinlock *lock)
 {
     rt_enter_critical();
     rt_hw_spin_lock(&lock->lock);
-#if defined(RT_DEBUGING_SPINLOCK)
-    if (rt_cpu_self() != RT_NULL)
-    {
-        lock->owner = rt_cpu_self()->current_thread;
-    }
-    lock->pc = __GET_RETURN_ADDRESS;
-#endif /* RT_DEBUGING_SPINLOCK */
+    RT_SPIN_LOCK_DEBUG(lock);
 }
 RTM_EXPORT(rt_spin_lock)
 
@@ -73,12 +64,10 @@ RTM_EXPORT(rt_spin_lock)
  */
 void rt_spin_unlock(struct rt_spinlock *lock)
 {
+    rt_base_t critical_level;
+    RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
     rt_hw_spin_unlock(&lock->lock);
-#if defined(RT_DEBUGING_SPINLOCK)
-    lock->owner = __OWNER_MAGIC;
-    lock->pc = RT_NULL;
-#endif /* RT_DEBUGING_SPINLOCK */
-    rt_exit_critical();
+    rt_exit_critical_safe(critical_level);
 }
 RTM_EXPORT(rt_spin_unlock)
 
@@ -99,13 +88,7 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
     level = rt_hw_local_irq_disable();
     rt_enter_critical();
     rt_hw_spin_lock(&lock->lock);
-#if defined(RT_DEBUGING_SPINLOCK)
-    if (rt_cpu_self() != RT_NULL)
-    {
-        lock->owner = rt_cpu_self()->current_thread;
-        lock->pc = __GET_RETURN_ADDRESS;
-    }
-#endif /* RT_DEBUGING_SPINLOCK */
+    RT_SPIN_LOCK_DEBUG(lock);
     return level;
 }
 RTM_EXPORT(rt_spin_lock_irqsave)
@@ -119,13 +102,12 @@ RTM_EXPORT(rt_spin_lock_irqsave)
  */
 void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
 {
-#if defined(RT_DEBUGING_SPINLOCK)
-    lock->owner = __OWNER_MAGIC;
-    lock->pc = RT_NULL;
-#endif /* RT_DEBUGING_SPINLOCK */
+    rt_base_t critical_level;
+
+    RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
     rt_hw_spin_unlock(&lock->lock);
     rt_hw_local_irq_enable(level);
-    rt_exit_critical();
+    rt_exit_critical_safe(critical_level);
 }
 RTM_EXPORT(rt_spin_unlock_irqrestore)
 
@@ -162,7 +144,6 @@ rt_base_t rt_cpus_lock(void)
     struct rt_cpu* pcpu;
 
     level = rt_hw_local_irq_disable();
-    rt_enter_critical();
     pcpu = rt_cpu_self();
     if (pcpu->current_thread != RT_NULL)
     {
@@ -171,11 +152,16 @@ rt_base_t rt_cpus_lock(void)
         rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
         if (lock_nest == 0)
         {
+            rt_enter_critical();
             rt_hw_spin_lock(&_cpus_lock);
-#if defined(RT_DEBUGING_SPINLOCK)
+#ifdef RT_USING_DEBUG
+            _cpus_critical_level = rt_critical_level();
+#endif /* RT_USING_DEBUG */
+
+#ifdef RT_DEBUGING_SPINLOCK
             _cpus_lock_owner = pcpu->current_thread;
             _cpus_lock_pc = __GET_RETURN_ADDRESS;
-#endif
+#endif /* RT_DEBUGING_SPINLOCK */
         }
     }
 
@@ -194,6 +180,7 @@ void rt_cpus_unlock(rt_base_t level)
 
     if (pcpu->current_thread != RT_NULL)
     {
+        rt_base_t critical_level = 0;
         RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
         rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
 
@@ -202,12 +189,16 @@ void rt_cpus_unlock(rt_base_t level)
 #if defined(RT_DEBUGING_SPINLOCK)
             _cpus_lock_owner = __OWNER_MAGIC;
             _cpus_lock_pc = RT_NULL;
-#endif
+#endif /* RT_DEBUGING_SPINLOCK */
+#ifdef RT_USING_DEBUG
+            critical_level = _cpus_critical_level;
+            _cpus_critical_level = 0;
+#endif /* RT_USING_DEBUG */
             rt_hw_spin_unlock(&_cpus_lock);
+            rt_exit_critical_safe(critical_level);
         }
     }
     rt_hw_local_irq_enable(level);
-    rt_exit_critical();
 }
 RTM_EXPORT(rt_cpus_unlock);
 
@@ -220,20 +211,10 @@ RTM_EXPORT(rt_cpus_unlock);
  */
 void rt_cpus_lock_status_restore(struct rt_thread *thread)
 {
-    struct rt_cpu* pcpu = rt_cpu_self();
-
 #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
     lwp_aspace_switch(thread);
 #endif
-    if (pcpu->current_thread != RT_NULL )
-    {
-        rt_hw_spin_unlock(&(pcpu->current_thread->spinlock.lock));
-        if ((pcpu->current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
-        {
-            rt_schedule_insert_thread(pcpu->current_thread);
-        }
-    }
-    pcpu->current_thread = thread;
+    rt_sched_post_ctx_switch(thread);
 }
 RTM_EXPORT(rt_cpus_lock_status_restore);
 #endif /* RT_USING_SMP */

+ 7 - 10
src/idle.c

@@ -146,7 +146,7 @@ void rt_thread_defunct_enqueue(rt_thread_t thread)
 {
     rt_base_t level;
     level = rt_spin_lock_irqsave(&_defunct_spinlock);
-    rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
+    rt_list_insert_after(&_rt_thread_defunct, &RT_THREAD_LIST_NODE(thread));
     rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
 #ifdef RT_USING_SMP
     rt_sem_release(&system_sem);
@@ -166,20 +166,16 @@ rt_thread_t rt_thread_defunct_dequeue(void)
     level = rt_spin_lock_irqsave(&_defunct_spinlock);
     if (l->next != l)
     {
-        thread = rt_list_entry(l->next,
-                struct rt_thread,
-                tlist);
-        rt_list_remove(&(thread->tlist));
+        thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
+        rt_list_remove(&RT_THREAD_LIST_NODE(thread));
     }
     rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
 #else
     if (l->next != l)
     {
-        thread = rt_list_entry(l->next,
-                struct rt_thread,
-                tlist);
+        thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
         level = rt_hw_interrupt_disable();
-        rt_list_remove(&(thread->tlist));
+        rt_list_remove(&RT_THREAD_LIST_NODE(thread));
         rt_hw_interrupt_enable(level);
     }
 #endif
@@ -308,9 +304,10 @@ static void rt_thread_system_entry(void *parameter)
 
     while (1)
     {
-        int ret= rt_sem_take(&system_sem, RT_WAITING_FOREVER);
+        int ret = rt_sem_take(&system_sem, RT_WAITING_FOREVER);
         if (ret != RT_EOK)
         {
+            rt_kprintf("failed to sem_take() error %d\n", ret);
             RT_ASSERT(0);
         }
         rt_defunct_execute();

File diff suppressed because it is too large
+ 364 - 235
src/ipc.c


+ 34 - 14
src/kservice.c

@@ -1500,14 +1500,21 @@ rt_weak void rt_hw_console_output(const char *str)
 }
 RTM_EXPORT(rt_hw_console_output);
 
-#ifdef RT_USING_THREDSAFE_PRINTF
+#ifdef RT_USING_THREADSAFE_PRINTF
 
-static struct rt_spinlock _pr_lock = RT_SPINLOCK_INIT;
-static struct rt_spinlock _prf_lock = RT_SPINLOCK_INIT;
+/* system console lock */
+static struct rt_spinlock _syscon_lock = RT_SPINLOCK_INIT;
+/* lock of kprintf buffer */
+static struct rt_spinlock _prbuf_lock = RT_SPINLOCK_INIT;
 /* current user of system console */
 static rt_thread_t _pr_curr_user;
+
+#ifdef RT_USING_DEBUG
+static rt_base_t _pr_critical_level;
+#endif /* RT_USING_DEBUG */
+
 /* nested level of current user */
-static int _pr_curr_user_nested;
+static volatile int _pr_curr_user_nested;
 
 rt_thread_t rt_console_current_user(void)
 {
@@ -1516,35 +1523,42 @@ rt_thread_t rt_console_current_user(void)
 
 static void _console_take(void)
 {
-    rt_ubase_t level = rt_spin_lock_irqsave(&_pr_lock);
+    rt_ubase_t level = rt_spin_lock_irqsave(&_syscon_lock);
     rt_thread_t self_thread = rt_thread_self();
+    rt_base_t critical_level;
+    RT_UNUSED(critical_level);
 
     while (_pr_curr_user != self_thread)
     {
         if (_pr_curr_user == RT_NULL)
         {
             /* no preemption is allowed to avoid dead lock */
-            rt_enter_critical();
+            critical_level = rt_enter_critical();
+#ifdef RT_USING_DEBUG
+            _pr_critical_level = _syscon_lock.critical_level;
+            _syscon_lock.critical_level = critical_level;
+#endif
             _pr_curr_user = self_thread;
             break;
         }
         else
         {
-            rt_spin_unlock_irqrestore(&_pr_lock, level);
+            rt_spin_unlock_irqrestore(&_syscon_lock, level);
             rt_thread_yield();
-            level = rt_spin_lock_irqsave(&_pr_lock);
+            level = rt_spin_lock_irqsave(&_syscon_lock);
         }
     }
 
     _pr_curr_user_nested++;
 
-    rt_spin_unlock_irqrestore(&_pr_lock, level);
+    rt_spin_unlock_irqrestore(&_syscon_lock, level);
 }
 
 static void _console_release(void)
 {
-    rt_ubase_t level = rt_spin_lock_irqsave(&_pr_lock);
+    rt_ubase_t level = rt_spin_lock_irqsave(&_syscon_lock);
     rt_thread_t self_thread = rt_thread_self();
+    RT_UNUSED(self_thread);
 
     RT_ASSERT(_pr_curr_user == self_thread);
 
@@ -1552,22 +1566,28 @@ static void _console_release(void)
     if (!_pr_curr_user_nested)
     {
         _pr_curr_user = RT_NULL;
+
+#ifdef RT_USING_DEBUG
+        rt_exit_critical_safe(_syscon_lock.critical_level);
+        _syscon_lock.critical_level = _pr_critical_level;
+#else
         rt_exit_critical();
+#endif
     }
-    rt_spin_unlock_irqrestore(&_pr_lock, level);
+    rt_spin_unlock_irqrestore(&_syscon_lock, level);
 }
 
 #define CONSOLE_TAKE          _console_take()
 #define CONSOLE_RELEASE       _console_release()
-#define PRINTF_BUFFER_TAKE    rt_ubase_t level = rt_spin_lock_irqsave(&_prf_lock)
-#define PRINTF_BUFFER_RELEASE rt_spin_unlock_irqrestore(&_prf_lock, level)
+#define PRINTF_BUFFER_TAKE    rt_ubase_t level = rt_spin_lock_irqsave(&_prbuf_lock)
+#define PRINTF_BUFFER_RELEASE rt_spin_unlock_irqrestore(&_prbuf_lock, level)
 #else
 
 #define CONSOLE_TAKE
 #define CONSOLE_RELEASE
 #define PRINTF_BUFFER_TAKE
 #define PRINTF_BUFFER_RELEASE
-#endif /* RT_USING_THREDSAFE_PRINTF */
+#endif /* RT_USING_THREADSAFE_PRINTF */
 
 /**
  * @brief This function will put string to the console.

+ 4 - 46
src/mempool.c

@@ -141,7 +141,6 @@ RTM_EXPORT(rt_mp_init);
  */
 rt_err_t rt_mp_detach(struct rt_mempool *mp)
 {
-    struct rt_thread *thread;
     rt_base_t level;
 
     /* parameter check */
@@ -151,21 +150,7 @@ rt_err_t rt_mp_detach(struct rt_mempool *mp)
 
     level = rt_spin_lock_irqsave(&(mp->spinlock));
     /* wake up all suspended threads */
-    while (!rt_list_isempty(&(mp->suspend_thread)))
-    {
-
-        /* get next suspend thread */
-        thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
-        /* set error code to -RT_ERROR */
-        thread->error = -RT_ERROR;
-
-        /*
-         * resume thread
-         * In rt_thread_resume function, it will remove current thread from
-         * suspend list
-         */
-        rt_thread_resume(thread);
-    }
+    rt_susp_list_resume_all(&mp->suspend_thread, RT_ERROR);
 
     /* detach object */
     rt_object_detach(&(mp->parent));
@@ -257,7 +242,6 @@ RTM_EXPORT(rt_mp_create);
  */
 rt_err_t rt_mp_delete(rt_mp_t mp)
 {
-    struct rt_thread *thread;
     rt_base_t level;
 
     RT_DEBUG_NOT_IN_INTERRUPT;
@@ -269,20 +253,7 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
 
     level = rt_spin_lock_irqsave(&(mp->spinlock));
     /* wake up all suspended threads */
-    while (!rt_list_isempty(&(mp->suspend_thread)))
-    {
-        /* get next suspend thread */
-        thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
-        /* set error code to -RT_ERROR */
-        thread->error = -RT_ERROR;
-
-        /*
-         * resume thread
-         * In rt_thread_resume function, it will remove current thread from
-         * suspend list
-         */
-        rt_thread_resume(thread);
-    }
+    rt_susp_list_resume_all(&mp->suspend_thread, RT_ERROR);
 
     rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
@@ -339,8 +310,7 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
         thread->error = RT_EOK;
 
         /* need suspend thread */
-        rt_thread_suspend(thread);
-        rt_list_insert_after(&(mp->suspend_thread), &(thread->tlist));
+        rt_thread_suspend_to_list(thread, &mp->suspend_thread, RT_IPC_FLAG_FIFO, RT_UNINTERRUPTIBLE);
 
         if (time > 0)
         {
@@ -403,7 +373,6 @@ void rt_mp_free(void *block)
 {
     rt_uint8_t **block_ptr;
     struct rt_mempool *mp;
-    struct rt_thread *thread;
     rt_base_t level;
 
     /* parameter check */
@@ -424,19 +393,8 @@ void rt_mp_free(void *block)
     *block_ptr = mp->block_list;
     mp->block_list = (rt_uint8_t *)block_ptr;
 
-    if (!rt_list_isempty(&(mp->suspend_thread)))
+    if (rt_susp_list_dequeue(&mp->suspend_thread, RT_EOK))
     {
-        /* get the suspended thread */
-        thread = rt_list_entry(mp->suspend_thread.next,
-                               struct rt_thread,
-                               tlist);
-
-        /* set error */
-        thread->error = RT_EOK;
-
-        /* resume thread */
-        rt_thread_resume(thread);
-
         rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
         /* do a schedule */

+ 1 - 1
src/object.c

@@ -70,7 +70,7 @@ enum rt_object_info_type
     RT_Object_Info_Module,                             /**< The object is a module. */
 #endif
 #ifdef RT_USING_HEAP
-    RT_Object_Info_Memory,                            /**< The object is a memory. */
+    RT_Object_Info_Memory,                             /**< The object is a memory. */
 #endif
 #ifdef RT_USING_SMART
     RT_Object_Info_Channel,                            /**< The object is a IPC channel */

+ 217 - 0
src/scheduler_comm.c

@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2006-2024, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * (scheduler_comm.c) Common API of scheduling routines.
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2024-01-18     Shell        Separate scheduling related codes from thread.c, scheduler_.*
+ */
+
+#define DBG_TAG           "kernel.sched"
+#define DBG_LVL           DBG_INFO
+#include <rtdbg.h>
+
+#include <rtthread.h>
+
+void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
+{
+    /* setup thread status */
+    RT_SCHED_CTX(thread).stat  = RT_THREAD_INIT;
+
+#ifdef RT_USING_SMP
+    /* not bind on any cpu */
+    RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
+    RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
+#endif /* RT_USING_SMP */
+
+    rt_sched_thread_init_priv(thread, tick, priority);
+}
+
+rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+    RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
+    return RT_EOK;
+}
+
+rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
+{
+    rt_err_t error;
+    RT_SCHED_DEBUG_IS_LOCKED;
+
+    if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
+    {
+        error = rt_timer_stop(&thread->thread_timer);
+
+        /* mask out timer flag no matter stop success or not */
+        RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
+    }
+    else
+    {
+        error = RT_EOK;
+    }
+    return error;
+}
+
+rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+    return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
+}
+
+rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+    return RT_SCHED_PRIV(thread).current_priority;
+}
+
+rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
+{
+    /* read only fields, so lock is unecessary */
+    return RT_SCHED_PRIV(thread).init_priority;
+}
+
+/**
+ * @note Caller must hold the scheduler lock
+ */
+rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+    return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
+}
+
+rt_err_t rt_sched_thread_close(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+    RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
+    return RT_EOK;
+}
+
+rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
+{
+    RT_SCHED_DEBUG_IS_LOCKED;
+
+    RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
+    RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
+
+    return RT_EOK;
+}
+
+rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
+{
+    rt_err_t error;
+
+    RT_SCHED_DEBUG_IS_LOCKED;
+
+    if (!rt_sched_thread_is_suspended(thread))
+    {
+        /* failed to proceed, and that's possibly due to a racing condition */
+        error = -RT_EINVAL;
+    }
+    else
+    {
+        if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
+        {
+            /**
+             * Quiet timeout timer first if set. and don't continue if we
+             * failed, because it probably means that a timeout ISR racing to
+             * resume thread before us.
+             */
+            error = rt_sched_thread_timer_stop(thread);
+        }
+        else
+        {
+            error = RT_EOK;
+        }
+
+        if (!error)
+        {
+            /* remove from suspend list */
+            rt_list_remove(&RT_THREAD_LIST_NODE(thread));
+
+        #ifdef RT_USING_SMART
+            thread->wakeup_handle.func = RT_NULL;
+        #endif
+
+            /* insert to schedule ready list and remove from susp list */
+            rt_sched_insert_thread(thread);
+        }
+    }
+
+    return error;
+}
+
+rt_err_t rt_sched_tick_increase(void)
+{
+    struct rt_thread *thread;
+    rt_sched_lock_level_t slvl;
+
+    thread = rt_thread_self();
+
+    rt_sched_lock(&slvl);
+
+    RT_SCHED_PRIV(thread).remaining_tick--;
+    if (RT_SCHED_PRIV(thread).remaining_tick)
+    {
+        rt_sched_unlock(slvl);
+    }
+    else
+    {
+        rt_sched_thread_yield(thread);
+
+        /* request a rescheduling even though we are probably in an ISR */
+        rt_sched_unlock_n_resched(slvl);
+    }
+
+    return RT_EOK;
+}
+
+/**
+ * @brief Update priority of the target thread
+ */
+rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
+{
+    RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
+    RT_SCHED_DEBUG_IS_LOCKED;
+
+    /* for ready thread, change queue; otherwise simply update the priority */
+    if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
+    {
+        /* remove thread from schedule queue first */
+        rt_sched_remove_thread(thread);
+
+        /* change thread priority */
+        RT_SCHED_PRIV(thread).current_priority = priority;
+
+        /* recalculate priority attribute */
+#if RT_THREAD_PRIORITY_MAX > 32
+        RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;               /* 5bit */
+        RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
+        RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07);   /* 3bit */
+#else
+        RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
+
+        /* insert thread to schedule queue again */
+        rt_sched_insert_thread(thread);
+    }
+    else
+    {
+        RT_SCHED_PRIV(thread).current_priority = priority;
+
+        /* recalculate priority attribute */
+#if RT_THREAD_PRIORITY_MAX > 32
+        RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;               /* 5bit */
+        RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
+        RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07);   /* 3bit */
+#else
+        RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+    }
+
+    return RT_EOK;
+}

File diff suppressed because it is too large
+ 657 - 384
src/scheduler_mp.c


+ 152 - 37
src/scheduler_up.c

@@ -161,15 +161,44 @@ static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *high
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 
     /* get highest ready priority thread */
-    highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
-                              struct rt_thread,
-                              tlist);
+    highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[highest_ready_priority].next);
 
     *highest_prio = highest_ready_priority;
 
     return highest_priority_thread;
 }
 
+rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
+{
+    rt_base_t level;
+    if (!plvl)
+        return -RT_EINVAL;
+
+    level = rt_hw_interrupt_disable();
+    *plvl = level;
+
+    return RT_EOK;
+}
+
+rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
+{
+    rt_hw_interrupt_enable(level);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
+{
+    if (rt_thread_self())
+    {
+        /* if scheduler is available */
+        rt_schedule();
+    }
+    rt_hw_interrupt_enable(level);
+
+    return RT_EOK;
+}
+
 /**
  * @brief This function will initialize the system scheduler.
  */
@@ -208,8 +237,8 @@ void rt_system_scheduler_start(void)
 
     rt_current_thread = to_thread;
 
-    rt_schedule_remove_thread(to_thread);
-    to_thread->stat = RT_THREAD_RUNNING;
+    rt_sched_remove_thread(to_thread);
+    RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
 
     /* switch to new thread */
 
@@ -250,13 +279,13 @@ void rt_schedule(void)
 
             to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
 
-            if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
+            if ((RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
             {
-                if (rt_current_thread->current_priority < highest_ready_priority)
+                if (RT_SCHED_PRIV(rt_current_thread).current_priority < highest_ready_priority)
                 {
                     to_thread = rt_current_thread;
                 }
-                else if (rt_current_thread->current_priority == highest_ready_priority && (rt_current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
+                else if (RT_SCHED_PRIV(rt_current_thread).current_priority == highest_ready_priority && (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
                 {
                     to_thread = rt_current_thread;
                 }
@@ -277,16 +306,16 @@ void rt_schedule(void)
 
                 if (need_insert_from_thread)
                 {
-                    rt_schedule_insert_thread(from_thread);
+                    rt_sched_insert_thread(from_thread);
                 }
 
-                if ((from_thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
+                if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
                 {
-                    from_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
+                    RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
                 }
 
-                rt_schedule_remove_thread(to_thread);
-                to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
+                rt_sched_remove_thread(to_thread);
+                RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
 
                 /* switch to new thread */
                 LOG_D("[%d]switch to priority#%d "
@@ -315,11 +344,11 @@ void rt_schedule(void)
 #ifdef RT_USING_SIGNALS
                     /* check stat of thread for signal */
                     level = rt_hw_interrupt_disable();
-                    if (rt_current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
+                    if (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
                     {
                         extern void rt_thread_handle_sig(rt_bool_t clean_state);
 
-                        rt_current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
+                        RT_SCHED_CTX(rt_current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
 
                         rt_hw_interrupt_enable(level);
 
@@ -343,8 +372,8 @@ void rt_schedule(void)
             }
             else
             {
-                rt_schedule_remove_thread(rt_current_thread);
-                rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
+                rt_sched_remove_thread(rt_current_thread);
+                RT_SCHED_CTX(rt_current_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(rt_current_thread).stat & ~RT_THREAD_STAT_MASK);
             }
         }
     }
@@ -356,6 +385,42 @@ __exit:
     return;
 }
 
+/* Normally, there isn't anyone racing with us so this operation is lockless */
+void rt_sched_thread_startup(struct rt_thread *thread)
+{
+#if RT_THREAD_PRIORITY_MAX > 32
+    RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;            /* 5bit */
+    RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
+    RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07);  /* 3bit */
+#else
+    RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+
+    /* change thread stat, so we can resume it */
+    RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
+}
+
+void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
+{
+    rt_list_init(&RT_THREAD_LIST_NODE(thread));
+
+    /* priority init */
+    RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
+    RT_SCHED_PRIV(thread).init_priority    = priority;
+    RT_SCHED_PRIV(thread).current_priority = priority;
+
+    /* don't add to scheduler queue as init thread */
+    RT_SCHED_PRIV(thread).number_mask = 0;
+#if RT_THREAD_PRIORITY_MAX > 32
+    RT_SCHED_PRIV(thread).number = 0;
+    RT_SCHED_PRIV(thread).high_mask = 0;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+
+    /* tick init */
+    RT_SCHED_PRIV(thread).init_tick = tick;
+    RT_SCHED_PRIV(thread).remaining_tick = tick;
+}
+
 /**
  * @brief This function will insert a thread to the system ready queue. The state of
  *        thread will be set as READY and the thread will be removed from suspend queue.
@@ -364,7 +429,7 @@ __exit:
  *
  * @note  Please do not invoke this function in user application.
  */
-void rt_schedule_insert_thread(struct rt_thread *thread)
+void rt_sched_insert_thread(struct rt_thread *thread)
 {
     rt_base_t level;
 
@@ -376,33 +441,33 @@ void rt_schedule_insert_thread(struct rt_thread *thread)
     /* it's current thread, it should be RUNNING thread */
     if (thread == rt_current_thread)
     {
-        thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
+        RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
         goto __exit;
     }
 
     /* READY thread, insert to ready queue */
-    thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
+    RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
     /* there is no time slices left(YIELD), inserting thread before ready list*/
-    if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
+    if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
     {
-        rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
-                              &(thread->tlist));
+        rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
+                              &RT_THREAD_LIST_NODE(thread));
     }
     /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
     else
     {
-        rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
-                              &(thread->tlist));
+        rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
+                              &RT_THREAD_LIST_NODE(thread));
     }
 
     LOG_D("insert thread[%.*s], the priority: %d",
-          RT_NAME_MAX, thread->parent.name, thread->current_priority);
+          RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority);
 
     /* set priority mask */
 #if RT_THREAD_PRIORITY_MAX > 32
-    rt_thread_ready_table[thread->number] |= thread->high_mask;
+    rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
-    rt_thread_ready_priority_group |= thread->number_mask;
+    rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
 
 __exit:
     /* enable interrupt */
@@ -416,7 +481,7 @@ __exit:
  *
  * @note  Please do not invoke this function in user application.
  */
-void rt_schedule_remove_thread(struct rt_thread *thread)
+void rt_sched_remove_thread(struct rt_thread *thread)
 {
     rt_base_t level;
 
@@ -427,20 +492,20 @@ void rt_schedule_remove_thread(struct rt_thread *thread)
 
     LOG_D("remove thread[%.*s], the priority: %d",
           RT_NAME_MAX, thread->parent.name,
-          thread->current_priority);
+          RT_SCHED_PRIV(rt_current_thread).current_priority);
 
     /* remove thread from ready list */
-    rt_list_remove(&(thread->tlist));
-    if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
+    rt_list_remove(&RT_THREAD_LIST_NODE(thread));
+    if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
     {
 #if RT_THREAD_PRIORITY_MAX > 32
-        rt_thread_ready_table[thread->number] &= ~thread->high_mask;
-        if (rt_thread_ready_table[thread->number] == 0)
+        rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
+        if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
         {
-            rt_thread_ready_priority_group &= ~thread->number_mask;
+            rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
         }
 #else
-        rt_thread_ready_priority_group &= ~thread->number_mask;
+        rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
     }
 
@@ -448,12 +513,54 @@ void rt_schedule_remove_thread(struct rt_thread *thread)
     rt_hw_interrupt_enable(level);
 }
 
+#ifdef RT_USING_DEBUG
+
+static volatile int _critical_error_occurred = 0;
+
+void rt_exit_critical_safe(rt_base_t critical_level)
+{
+    rt_base_t level;
+    /* disable interrupt */
+    level = rt_hw_interrupt_disable();
+
+    if (!_critical_error_occurred)
+    {
+        if (critical_level != rt_scheduler_lock_nest)
+        {
+            int dummy = 1;
+            _critical_error_occurred = 1;
+
+            rt_kprintf("%s: un-compatible critical level\n" \
+                       "\tCurrent %d\n\tCaller %d\n",
+                       __func__, rt_scheduler_lock_nest,
+                       critical_level);
+            rt_backtrace();
+
+            while (dummy) ;
+        }
+    }
+    rt_hw_interrupt_enable(level);
+
+    rt_exit_critical();
+}
+
+#else
+
+void rt_exit_critical_safe(rt_base_t critical_level)
+{
+    return rt_exit_critical();
+}
+
+#endif
+RTM_EXPORT(rt_exit_critical_safe);
+
 /**
  * @brief This function will lock the thread scheduler.
  */
-void rt_enter_critical(void)
+rt_base_t rt_enter_critical(void)
 {
     rt_base_t level;
+    rt_base_t critical_level;
 
     /* disable interrupt */
     level = rt_hw_interrupt_disable();
@@ -463,9 +570,12 @@ void rt_enter_critical(void)
      * enough and does not check here
      */
     rt_scheduler_lock_nest ++;
+    critical_level = rt_scheduler_lock_nest;
 
     /* enable interrupt */
     rt_hw_interrupt_enable(level);
+
+    return critical_level;
 }
 RTM_EXPORT(rt_enter_critical);
 
@@ -511,5 +621,10 @@ rt_uint16_t rt_critical_level(void)
 }
 RTM_EXPORT(rt_critical_level);
 
+rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
+{
+    return -RT_EINVAL;
+}
+
 /**@}*/
 /**@endcond*/

+ 73 - 51
src/signal.c

@@ -29,6 +29,8 @@
 #define sig_mask(sig_no)    (1u << sig_no)
 #define sig_valid(sig_no)   (sig_no >= 0 && sig_no < RT_SIG_MAX)
 
+static struct rt_spinlock _thread_signal_lock = RT_SPINLOCK_INIT;
+
 struct siginfo_node
 {
     siginfo_t si;
@@ -63,7 +65,7 @@ static void _signal_entry(void *parameter)
 #endif /* RT_USING_SMP */
 
     LOG_D("switch back to: 0x%08x\n", tid->sp);
-    tid->stat &= ~RT_THREAD_STAT_SIGNAL;
+    RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL;
 
 #ifdef RT_USING_SMP
     rt_hw_context_switch_to((rt_base_t)&parameter, tid);
@@ -86,16 +88,16 @@ static void _signal_deliver(rt_thread_t tid)
 {
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
     /* thread is not interested in pended signals */
     if (!(tid->sig_pending & tid->sig_mask))
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
         return;
     }
 
-    if ((tid->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
+    if ((RT_SCHED_CTX(tid).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
     {
         /* resume thread to handle signal */
 #ifdef RT_USING_SMART
@@ -104,9 +106,9 @@ static void _signal_deliver(rt_thread_t tid)
         rt_thread_resume(tid);
 #endif
         /* add signal state */
-        tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
+        RT_SCHED_CTX(tid).stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
 
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
         /* re-schedule */
         rt_schedule();
@@ -116,9 +118,9 @@ static void _signal_deliver(rt_thread_t tid)
         if (tid == rt_thread_self())
         {
             /* add signal state */
-            tid->stat |= RT_THREAD_STAT_SIGNAL;
+            RT_SCHED_CTX(tid).stat |= RT_THREAD_STAT_SIGNAL;
 
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
             /* do signal action in self thread context */
             if (rt_interrupt_get_nest() == 0)
@@ -126,16 +128,16 @@ static void _signal_deliver(rt_thread_t tid)
                 rt_thread_handle_sig(RT_TRUE);
             }
         }
-        else if (!((tid->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL))
+        else if (!((RT_SCHED_CTX(tid).stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL))
         {
             /* add signal state */
-            tid->stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
+            RT_SCHED_CTX(tid).stat |= (RT_THREAD_STAT_SIGNAL | RT_THREAD_STAT_SIGNAL_PENDING);
 
 #ifdef RT_USING_SMP
             {
                 int cpu_id;
 
-                cpu_id = tid->oncpu;
+                cpu_id = RT_SCHED_CTX(tid).oncpu;
                 if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_hw_cpu_id()))
                 {
                     rt_uint32_t cpu_mask;
@@ -146,13 +148,13 @@ static void _signal_deliver(rt_thread_t tid)
             }
 #else
             /* point to the signal handle entry */
-            tid->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
+            RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
             tid->sig_ret = tid->sp;
             tid->sp = rt_hw_stack_init((void *)_signal_entry, RT_NULL,
                                        (void *)((char *)tid->sig_ret - 32), RT_NULL);
 #endif /* RT_USING_SMP */
 
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
             LOG_D("signal stack pointer @ 0x%08x", tid->sp);
 
             /* re-schedule */
@@ -160,7 +162,7 @@ static void _signal_deliver(rt_thread_t tid)
         }
         else
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
         }
     }
 }
@@ -168,37 +170,38 @@ static void _signal_deliver(rt_thread_t tid)
 #ifdef RT_USING_SMP
 void *rt_signal_check(void* context)
 {
-    rt_base_t level;
+    rt_sched_lock_level_t level;
     int cpu_id;
     struct rt_cpu* pcpu;
     struct rt_thread *current_thread;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
+
     cpu_id = rt_hw_cpu_id();
     pcpu   = rt_cpu_index(cpu_id);
     current_thread = pcpu->current_thread;
 
     if (pcpu->irq_nest)
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
         return context;
     }
 
     if (current_thread->cpus_lock_nest == 1)
     {
-        if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
+        if (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
         {
             void *sig_context;
 
-            current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
+            RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
 
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
             sig_context = rt_hw_stack_init((void *)_signal_entry, context,
                     (void*)((char*)context - 32), RT_NULL);
             return sig_context;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
     return context;
 }
 #endif /* RT_USING_SMP */
@@ -227,10 +230,14 @@ rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler)
 
     if (!sig_valid(signo)) return SIG_ERR;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
     if (tid->sig_vectors == RT_NULL)
     {
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
+
         rt_thread_alloc_sig(tid);
+
+        level = rt_spin_lock_irqsave(&_thread_signal_lock);
     }
 
     if (tid->sig_vectors)
@@ -241,7 +248,7 @@ rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler)
         else if (handler == SIG_DFL) tid->sig_vectors[signo] = _signal_default_handler;
         else tid->sig_vectors[signo] = handler;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
     return old;
 }
@@ -262,11 +269,11 @@ void rt_signal_mask(int signo)
     rt_base_t level;
     rt_thread_t tid = rt_thread_self();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
     tid->sig_mask &= ~sig_mask(signo);
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 }
 
 /**
@@ -285,19 +292,19 @@ void rt_signal_unmask(int signo)
     rt_base_t level;
     rt_thread_t tid = rt_thread_self();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
     tid->sig_mask |= sig_mask(signo);
 
     /* let thread handle pended signals */
     if (tid->sig_mask & tid->sig_pending)
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
         _signal_deliver(tid);
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
     }
 }
 
@@ -335,7 +342,7 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
     /* clear siginfo to avoid unknown value */
     memset(si, 0x0, sizeof(rt_siginfo_t));
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
     /* already pending */
     if (tid->sig_pending & *set) goto __done;
@@ -349,7 +356,7 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
     /* suspend self thread */
     rt_thread_suspend_with_flag(tid, RT_UNINTERRUPTIBLE);
     /* set thread stat as waiting for signal */
-    tid->stat |= RT_THREAD_STAT_SIGNAL_WAIT;
+    RT_SCHED_CTX(tid).stat |= RT_THREAD_STAT_SIGNAL_WAIT;
 
     /* start timeout timer */
     if (timeout != RT_WAITING_FOREVER)
@@ -360,21 +367,21 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout)
                          &timeout);
         rt_timer_start(&(tid->thread_timer));
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
     /* do thread scheduling */
     rt_schedule();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
     /* remove signal waiting flag */
-    tid->stat &= ~RT_THREAD_STAT_SIGNAL_WAIT;
+    RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL_WAIT;
 
     /* check errno of thread */
     if (tid->error == -RT_ETIMEOUT)
     {
         tid->error = RT_EOK;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
         /* timer timeout */
         ret = -RT_ETIMEOUT;
@@ -428,7 +435,7 @@ __done:
      }
 
 __done_int:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
 __done_return:
     return ret;
@@ -441,11 +448,11 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
     rt_thread_t tid = rt_thread_self();
     struct siginfo_node *si_node;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
     if (tid->sig_pending & tid->sig_mask)
     {
         /* if thread is not waiting for signal */
-        if (!(tid->stat & RT_THREAD_STAT_SIGNAL_WAIT))
+        if (!(RT_SCHED_CTX(tid).stat & RT_THREAD_STAT_SIGNAL_WAIT))
         {
             while (tid->sig_pending & tid->sig_mask)
             {
@@ -464,12 +471,12 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
                 signo   = si_node->si.si_signo;
                 handler = tid->sig_vectors[signo];
                 tid->sig_pending &= ~sig_mask(signo);
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
                 LOG_D("handle signal: %d, handler 0x%08x", signo, handler);
                 if (handler) handler(signo);
 
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&_thread_signal_lock);
                 error = -RT_EINTR;
 
                 rt_mp_free(si_node); /* release this siginfo node */
@@ -480,7 +487,7 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
             /* whether clean signal status */
             if (clean_state == RT_TRUE)
             {
-                tid->stat &= ~RT_THREAD_STAT_SIGNAL;
+                RT_SCHED_CTX(tid).stat &= ~RT_THREAD_STAT_SIGNAL;
             }
             else
             {
@@ -488,12 +495,13 @@ void rt_thread_handle_sig(rt_bool_t clean_state)
             }
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 }
 
 void rt_thread_alloc_sig(rt_thread_t tid)
 {
     int index;
+    rt_bool_t need_free = RT_FALSE;
     rt_base_t level;
     rt_sighandler_t *vectors;
 
@@ -505,9 +513,23 @@ void rt_thread_alloc_sig(rt_thread_t tid)
         vectors[index] = _signal_default_handler;
     }
 
-    level = rt_hw_interrupt_disable();
-    tid->sig_vectors = vectors;
-    rt_hw_interrupt_enable(level);
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
+
+    if (tid->sig_vectors == RT_NULL)
+    {
+        tid->sig_vectors = vectors;
+    }
+    else
+    {
+        need_free = RT_TRUE;
+    }
+
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
+
+    if (need_free)
+    {
+        rt_free(vectors);
+    }
 }
 
 void rt_thread_free_sig(rt_thread_t tid)
@@ -516,13 +538,13 @@ void rt_thread_free_sig(rt_thread_t tid)
     struct siginfo_node *si_node;
     rt_sighandler_t *sig_vectors;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
     si_node = (struct siginfo_node *)tid->si_list;
     tid->si_list = RT_NULL;
 
     sig_vectors = tid->sig_vectors;
     tid->sig_vectors = RT_NULL;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
     if (si_node)
     {
@@ -570,7 +592,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
     si.si_code  = SI_USER;
     si.si_value.sival_ptr = RT_NULL;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_thread_signal_lock);
     if (tid->sig_pending & sig_mask(sig))
     {
         /* whether already emits this signal? */
@@ -590,12 +612,12 @@ int rt_thread_kill(rt_thread_t tid, int sig)
             if (entry->si.si_signo == sig)
             {
                 memcpy(&(entry->si), &si, sizeof(siginfo_t));
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
                 return 0;
             }
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
 
     si_node = (struct siginfo_node *) rt_mp_alloc(_siginfo_pool, 0);
     if (si_node)
@@ -603,7 +625,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
         rt_slist_init(&(si_node->list));
         memcpy(&(si_node->si), &si, sizeof(siginfo_t));
 
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
         if (tid->si_list)
         {
@@ -620,7 +642,7 @@ int rt_thread_kill(rt_thread_t tid, int sig)
         /* a new signal */
         tid->sig_pending |= sig_mask(sig);
 
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_thread_signal_lock, level);
     }
     else
     {

+ 231 - 314
src/thread.c

@@ -79,31 +79,30 @@ RT_OBJECT_HOOKLIST_DEFINE(rt_thread_inited);
 static void _thread_exit(void)
 {
     struct rt_thread *thread;
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
+    rt_base_t critical_level;
 
     /* get current thread */
     thread = rt_thread_self();
 
-    rt_enter_critical();
+    critical_level = rt_enter_critical();
+    rt_sched_lock(&slvl);
 
     /* remove from schedule */
-    rt_schedule_remove_thread(thread);
-
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    rt_sched_remove_thread(thread);
 
     /* remove it from timer list */
     rt_timer_detach(&thread->thread_timer);
 
     /* change stat */
-    thread->stat = RT_THREAD_CLOSE;
+    rt_sched_thread_close(thread);
 
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    rt_sched_unlock(slvl);
 
     /* insert to defunct thread list */
     rt_thread_defunct_enqueue(thread);
 
-    LOG_D("line:%d thread:%s exit\n", __LINE__, rt_thread_self()->parent.name);
-    rt_exit_critical();
+    rt_exit_critical_safe(critical_level);
 
     /* switch to next task */
     rt_schedule();
@@ -118,41 +117,66 @@ static void _thread_exit(void)
 static void _thread_timeout(void *parameter)
 {
     struct rt_thread *thread;
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
 
     thread = (struct rt_thread *)parameter;
 
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
-    RT_ASSERT((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+
+    rt_sched_lock(&slvl);
+
+    /**
+     * resume of the thread and stop of the thread timer should be an atomic
+     * operation. So we don't expected that thread had resumed.
+     */
+    RT_ASSERT(rt_sched_thread_is_suspended(thread));
+
     /* set error number */
     thread->error = -RT_ETIMEOUT;
 
     /* remove from suspend list */
-    rt_list_remove(&(thread->tlist));
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    rt_list_remove(&RT_THREAD_LIST_NODE(thread));
     /* insert to schedule ready list */
-    rt_schedule_insert_thread(thread);
-    /* do schedule */
-    rt_schedule();
+    rt_sched_insert_thread(thread);
+    /* do schedule and release the scheduler lock */
+    rt_sched_unlock_n_resched(slvl);
 }
 
-/* release the mutex held by a thread when thread is reclaimed */
 #ifdef RT_USING_MUTEX
-static void _free_owned_mutex(rt_thread_t thread)
+static void _thread_detach_from_mutex(rt_thread_t thread)
 {
     rt_list_t *node;
     rt_list_t *tmp_list;
     struct rt_mutex *mutex;
+    rt_base_t level;
+
+    level = rt_spin_lock_irqsave(&thread->spinlock);
+
+    /* check if thread is waiting on a mutex */
+    if ((thread->pending_object) &&
+        (rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
+    {
+        /* remove it from its waiting list */
+        struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
+        rt_mutex_drop_thread(mutex, thread);
+        thread->pending_object = RT_NULL;
+    }
 
+    /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
     rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
     {
         mutex = rt_list_entry(node, struct rt_mutex, taken_list);
         rt_mutex_release(mutex);
     }
+
+    rt_spin_unlock_irqrestore(&thread->spinlock, level);
 }
+
+#else
+
+static void _thread_detach_from_mutex(rt_thread_t thread) {}
 #endif
 
 static rt_err_t _thread_init(struct rt_thread *thread,
@@ -166,16 +190,14 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 {
     RT_UNUSED(name);
 
-    /* init thread list */
-    rt_list_init(&(thread->tlist));
-    rt_list_init(&(thread->tlist_schedule));
+    rt_sched_thread_init_ctx(thread, tick, priority);
 
 #ifdef RT_USING_MEM_PROTECTION
     thread->mem_regions = RT_NULL;
 #endif
 
 #ifdef RT_USING_SMART
-    thread->wakeup.func = RT_NULL;
+    thread->wakeup_handle.func = RT_NULL;
 #endif
 
     thread->entry = (void *)entry;
@@ -200,13 +222,6 @@ static rt_err_t _thread_init(struct rt_thread *thread,
                                           (void *)_thread_exit);
 #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
 
-    /* priority init */
-    RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
-    thread->init_priority    = priority;
-    thread->current_priority = priority;
-
-    thread->number_mask = 0;
-
 #ifdef RT_USING_MUTEX
     rt_list_init(&thread->taken_object_list);
     thread->pending_object = RT_NULL;
@@ -217,28 +232,13 @@ static rt_err_t _thread_init(struct rt_thread *thread,
     thread->event_info = 0;
 #endif /* RT_USING_EVENT */
 
-#if RT_THREAD_PRIORITY_MAX > 32
-    thread->number = 0;
-    thread->high_mask = 0;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-
-    /* tick init */
-    rt_atomic_store(&thread->init_tick, tick);
-    rt_atomic_store(&thread->remaining_tick, tick);
-
     /* error and flags */
     thread->error = RT_EOK;
-    thread->stat  = RT_THREAD_INIT;
-
-#ifdef RT_USING_SMP
-    /* not bind on any cpu */
-    thread->bind_cpu = RT_CPUS_NR;
-    thread->oncpu = RT_CPU_DETACHED;
 
     /* lock init */
+#ifdef RT_USING_SMP
     rt_atomic_store(&thread->cpus_lock_nest, 0);
-    rt_atomic_store(&thread->critical_lock_nest, 0);
-#endif /* RT_USING_SMP */
+#endif
 
     /* initialize cleanup function and user data */
     thread->cleanup   = 0;
@@ -250,7 +250,7 @@ static rt_err_t _thread_init(struct rt_thread *thread,
                   _thread_timeout,
                   thread,
                   0,
-                  RT_TIMER_FLAG_ONE_SHOT);
+                  RT_TIMER_FLAG_ONE_SHOT | RT_TIMER_FLAG_THREAD_TIMER);
 
     /* initialize signal */
 #ifdef RT_USING_SIGNALS
@@ -268,6 +268,7 @@ static rt_err_t _thread_init(struct rt_thread *thread,
     thread->tid_ref_count = 0;
     thread->lwp = RT_NULL;
     thread->susp_recycler = RT_NULL;
+    thread->robust_list = RT_NULL;
     rt_list_init(&(thread->sibling));
 
     /* lwp thread-signal init */
@@ -392,34 +393,24 @@ rt_err_t rt_thread_startup(rt_thread_t thread)
 {
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
-    RT_ASSERT((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_INIT);
+    RT_ASSERT((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_INIT);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
-    /* calculate priority attribute */
-#if RT_THREAD_PRIORITY_MAX > 32
-    thread->number      = thread->current_priority >> 3;            /* 5bit */
-    thread->number_mask = 1L << thread->number;
-    thread->high_mask   = 1L << (thread->current_priority & 0x07);  /* 3bit */
-#else
-    thread->number_mask = 1L << thread->current_priority;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-
     LOG_D("startup a thread:%s with priority:%d",
           thread->parent.name, thread->current_priority);
-    /* change thread stat */
-    thread->stat = RT_THREAD_SUSPEND;
-    /* then resume it */
+
+    /* calculate priority attribute and reset thread stat to suspend */
+    rt_sched_thread_startup(thread);
+
+    /* resume and do a schedule if scheduler is available */
     rt_thread_resume(thread);
-    if (rt_thread_self() != RT_NULL)
-    {
-        /* do a scheduling */
-        rt_schedule();
-    }
 
     return RT_EOK;
 }
 RTM_EXPORT(rt_thread_startup);
 
+static rt_err_t _thread_detach(rt_thread_t thread);
+
 /**
  * @brief   This function will detach a thread. The thread object will be removed from
  *          thread queue and detached/deleted from the system object management.
@@ -431,52 +422,68 @@ RTM_EXPORT(rt_thread_startup);
  */
 rt_err_t rt_thread_detach(rt_thread_t thread)
 {
-    rt_base_t level;
-
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
     RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread));
 
-    if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_CLOSE)
-        return RT_EOK;
-
-    rt_enter_critical();
+    return _thread_detach(thread);
+}
+RTM_EXPORT(rt_thread_detach);
 
-    if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
+static rt_err_t _thread_detach(rt_thread_t thread)
+{
+    rt_err_t error;
+    rt_sched_lock_level_t slvl;
+    rt_uint8_t thread_status;
+    rt_base_t critical_level;
+
+    /**
+     * forbid scheduling on current core before returning since current thread
+     * may be detached from scheduler.
+     */
+    critical_level = rt_enter_critical();
+
+    /* before checking status of scheduler */
+    rt_sched_lock(&slvl);
+
+    /* check if thread is already closed */
+    thread_status = rt_sched_thread_get_stat(thread);
+    if (thread_status != RT_THREAD_CLOSE)
     {
-        /* remove from schedule */
-        rt_schedule_remove_thread(thread);
-    }
+        if (thread_status != RT_THREAD_INIT)
+        {
+            /* remove from schedule */
+            rt_sched_remove_thread(thread);
+        }
 
-    /* disable interrupt */
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+        /* release thread timer */
+        rt_timer_detach(&(thread->thread_timer));
 
-    /* release thread timer */
-    rt_timer_detach(&(thread->thread_timer));
+        /* change stat */
+        rt_sched_thread_close(thread);
 
-    /* change stat */
-    thread->stat = RT_THREAD_CLOSE;
+        /* scheduler works are done */
+        rt_sched_unlock(slvl);
 
-#ifdef RT_USING_MUTEX
-    _free_owned_mutex(thread);
-    if ((thread->pending_object) &&
-        (rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
-    {
-        struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
-        rt_mutex_drop_thread(mutex, thread);
-        thread->pending_object = RT_NULL;
+        _thread_detach_from_mutex(thread);
+
+        /* insert to defunct thread list */
+        rt_thread_defunct_enqueue(thread);
+
+        error = RT_EOK;
     }
-#endif
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    else
+    {
+        rt_sched_unlock(slvl);
 
-    /* insert to defunct thread list */
-    rt_thread_defunct_enqueue(thread);
+        /* already closed */
+        error = RT_EOK;
+    }
 
-    rt_exit_critical();
-    return RT_EOK;
+    rt_exit_critical_safe(critical_level);
+    return error;
 }
-RTM_EXPORT(rt_thread_detach);
 
 #ifdef RT_USING_HEAP
 /**
@@ -546,47 +553,12 @@ RTM_EXPORT(rt_thread_create);
  */
 rt_err_t rt_thread_delete(rt_thread_t thread)
 {
-    rt_base_t level;
-
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
     RT_ASSERT(rt_object_is_systemobject((rt_object_t)thread) == RT_FALSE);
 
-    if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_CLOSE)
-        return RT_EOK;
-
-    rt_enter_critical();
-
-    if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
-    {
-        /* remove from schedule */
-        rt_schedule_remove_thread(thread);
-    }
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
-
-    /* release thread timer */
-    rt_timer_detach(&(thread->thread_timer));
-
-    /* change stat */
-    thread->stat = RT_THREAD_CLOSE;
-
-#ifdef RT_USING_MUTEX
-    _free_owned_mutex(thread);
-    if ((thread->pending_object) &&
-        (rt_object_get_type(thread->pending_object) == RT_Object_Class_Mutex))
-    {
-        struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
-        rt_mutex_drop_thread(mutex, thread);
-        thread->pending_object = RT_NULL;
-    }
-#endif
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    /* insert to defunct thread list */
-    rt_thread_defunct_enqueue(thread);
-
-    rt_exit_critical();
-    return RT_EOK;
+    return _thread_detach(thread);
 }
 RTM_EXPORT(rt_thread_delete);
 #endif /* RT_USING_HEAP */
@@ -601,15 +573,12 @@ RTM_EXPORT(rt_thread_delete);
  */
 rt_err_t rt_thread_yield(void)
 {
-    struct rt_thread *thread;
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
+    rt_sched_lock(&slvl);
 
-    thread = rt_thread_self();
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
-    rt_atomic_store(&thread->remaining_tick, thread->init_tick);
-    thread->stat |= RT_THREAD_STAT_YIELD;
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    rt_schedule();
+    rt_sched_thread_yield(rt_thread_self());
+
+    rt_sched_unlock_n_resched(slvl);
 
     return RT_EOK;
 }
@@ -626,8 +595,8 @@ RTM_EXPORT(rt_thread_yield);
  */
 static rt_err_t _thread_sleep(rt_tick_t tick)
 {
-    rt_base_t level;
     struct rt_thread *thread;
+    rt_base_t critical_level;
     int err;
 
     if (tick == 0)
@@ -642,37 +611,37 @@ static rt_err_t _thread_sleep(rt_tick_t tick)
 
     /* current context checking */
     RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
+
     /* reset thread error */
     thread->error = RT_EOK;
-    level = rt_hw_local_irq_disable();
+
+    /* lock scheduler since current thread may be suspended */
+    critical_level = rt_enter_critical();
+
     /* suspend thread */
-    rt_enter_critical();
     err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
-    rt_spin_lock(&(thread->spinlock));
+
     /* reset the timeout of thread timer and start it */
     if (err == RT_EOK)
     {
         rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &tick);
         rt_timer_start(&(thread->thread_timer));
 
-        /* enable interrupt */
-        rt_spin_unlock(&(thread->spinlock));
-        rt_hw_local_irq_enable(level);
-        rt_exit_critical();
-
         thread->error = -RT_EINTR;
 
+        /* notify a pending rescheduling */
         rt_schedule();
 
+        /* exit critical and do a rescheduling */
+        rt_exit_critical_safe(critical_level);
+
         /* clear error number of this thread to RT_EOK */
         if (thread->error == -RT_ETIMEOUT)
             thread->error = RT_EOK;
     }
     else
     {
-        rt_spin_unlock(&(thread->spinlock));
-        rt_hw_local_irq_enable(level);
-        rt_exit_critical();
+        rt_exit_critical_safe(critical_level);
     }
 
     return err;
@@ -704,9 +673,9 @@ RTM_EXPORT(rt_thread_delay);
  */
 rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
 {
-    rt_base_t level;
     struct rt_thread *thread;
     rt_tick_t cur_tick;
+    rt_base_t critical_level;
 
     RT_ASSERT(tick != RT_NULL);
 
@@ -719,7 +688,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     thread->error = RT_EOK;
 
     /* disable interrupt */
-    level = rt_hw_local_irq_disable();
+    critical_level = rt_enter_critical();
 
     cur_tick = rt_tick_get();
     if (cur_tick - *tick < inc_tick)
@@ -729,19 +698,14 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
         *tick += inc_tick;
         left_tick = *tick - cur_tick;
 
-        rt_enter_critical();
         /* suspend thread */
         rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
 
-        rt_spin_lock(&(thread->spinlock));
-
         /* reset the timeout of thread timer and start it */
         rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
         rt_timer_start(&(thread->thread_timer));
 
-        rt_spin_unlock(&(thread->spinlock));
-        rt_hw_local_irq_enable(level);
-        rt_exit_critical();
+        rt_exit_critical_safe(critical_level);
 
         rt_schedule();
 
@@ -754,7 +718,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     else
     {
         *tick = cur_tick;
-        rt_hw_local_irq_enable(level);
+        rt_exit_critical_safe(critical_level);
     }
 
     return thread->error;
@@ -780,65 +744,6 @@ rt_err_t rt_thread_mdelay(rt_int32_t ms)
 RTM_EXPORT(rt_thread_mdelay);
 
 #ifdef RT_USING_SMP
-static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
-{
-    rt_base_t level;
-
-    if (cpu >= RT_CPUS_NR)
-    {
-        cpu = RT_CPUS_NR;
-    }
-
-    if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
-    {
-        /* unbind */
-        /* remove from old ready queue */
-        rt_schedule_remove_thread(thread);
-        /* change thread bind cpu */
-        thread->bind_cpu = cpu;
-        /* add to new ready queue */
-        rt_schedule_insert_thread(thread);
-        if (rt_thread_self() != RT_NULL)
-        {
-            rt_schedule();
-        }
-    }
-    else
-    {
-        level = rt_spin_lock_irqsave(&(thread->spinlock));
-        thread->bind_cpu = cpu;
-        if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
-        {
-            /* thread is running on a cpu */
-            int current_cpu = rt_hw_cpu_id();
-
-            if (cpu != RT_CPUS_NR)
-            {
-                if (thread->oncpu == current_cpu)
-                {
-                    /* current thread on current cpu */
-                    if (cpu != current_cpu)
-                    {
-                        /* bind to other cpu */
-                        rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
-                        /* self cpu need reschedule */
-                        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-                        rt_schedule();
-                        level = rt_spin_lock_irqsave(&(thread->spinlock));
-                    }
-                    /* else do nothing */
-                }
-                else
-                {
-                    /* no running on self cpu, but dest cpu can be itself */
-                    rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << thread->oncpu);
-                }
-            }
-            /* else do nothing */
-        }
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    }
-}
 #endif
 
 /**
@@ -863,8 +768,6 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
  */
 rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
 {
-    rt_base_t level;
-
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
@@ -873,44 +776,12 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
     {
         case RT_THREAD_CTRL_CHANGE_PRIORITY:
         {
-            /* for ready thread, change queue */
-            if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
-            {
-                /* remove thread from schedule queue first */
-                rt_schedule_remove_thread(thread);
-                level = rt_spin_lock_irqsave(&(thread->spinlock));
-                /* change thread priority */
-                thread->current_priority = *(rt_uint8_t *)arg;
-
-                /* recalculate priority attribute */
-    #if RT_THREAD_PRIORITY_MAX > 32
-                thread->number      = thread->current_priority >> 3;            /* 5bit */
-                thread->number_mask = 1 << thread->number;
-                thread->high_mask   = 1 << (thread->current_priority & 0x07);   /* 3bit */
-    #else
-                thread->number_mask = 1 << thread->current_priority;
-    #endif /* RT_THREAD_PRIORITY_MAX > 32 */
-                thread->stat  = RT_THREAD_INIT;
-                rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-                /* insert thread to schedule queue again */
-                rt_schedule_insert_thread(thread);
-            }
-            else
-            {
-                level = rt_spin_lock_irqsave(&(thread->spinlock));
-                thread->current_priority = *(rt_uint8_t *)arg;
-
-                /* recalculate priority attribute */
-    #if RT_THREAD_PRIORITY_MAX > 32
-                thread->number      = thread->current_priority >> 3;            /* 5bit */
-                thread->number_mask = 1 << thread->number;
-                thread->high_mask   = 1 << (thread->current_priority & 0x07);   /* 3bit */
-    #else
-                thread->number_mask = 1 << thread->current_priority;
-    #endif /* RT_THREAD_PRIORITY_MAX > 32 */
-                rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-            }
-            break;
+            rt_err_t error;
+            rt_sched_lock_level_t slvl;
+            rt_sched_lock(&slvl);
+            error = rt_sched_thread_change_priority(thread, *(rt_uint8_t *)arg);
+            rt_sched_unlock(slvl);
+            return error;
         }
 
         case RT_THREAD_CTRL_STARTUP:
@@ -936,16 +807,14 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
             return rt_err;
         }
 
-    #ifdef RT_USING_SMP
         case RT_THREAD_CTRL_BIND_CPU:
         {
             rt_uint8_t cpu;
 
             cpu = (rt_uint8_t)(size_t)arg;
-            rt_thread_cpu_bind(thread, cpu);
-            break;
+            return rt_sched_thread_bind_cpu(thread, cpu);
         }
-#endif /*RT_USING_SMP*/
+
     default:
         break;
     }
@@ -958,7 +827,7 @@ RTM_EXPORT(rt_thread_control);
 #include <lwp_signal.h>
 #endif
 
-static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_flag)
+static void _thread_set_suspend_state(struct rt_thread *thread, int suspend_flag)
 {
     rt_uint8_t stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
 
@@ -978,7 +847,7 @@ static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_fl
         RT_ASSERT(0);
         break;
     }
-    thread->stat = stat | (thread->stat & ~RT_THREAD_STAT_MASK);
+    RT_SCHED_CTX(thread).stat = stat | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
 }
 
 /**
@@ -992,15 +861,25 @@ static void rt_thread_set_suspend_state(struct rt_thread *thread, int suspend_fl
  *          other threads and occupying this resouce, starvation can occur very easily.
  *
  * @param   thread the thread to be suspended.
+ * @param   susp_list the list thread enqueued to. RT_NULL if no list.
+ * @param   ipc_flags is a flag for the thread object to be suspended. It determines how the thread is suspended.
+ *          The flag can be ONE of the following values:
+ *              RT_IPC_FLAG_PRIO          The pending threads will queue in order of priority.
+ *              RT_IPC_FLAG_FIFO          The pending threads will queue in the first-in-first-out method
+ *                                         (also known as first-come-first-served (FCFS) scheduling strategy).
+ *          NOTE: RT_IPC_FLAG_FIFO is a non-real-time scheduling mode. It is strongly recommended to use
+ *          RT_IPC_FLAG_PRIO to ensure the thread is real-time UNLESS your applications concern about
+ *          the first-in-first-out principle, and you clearly understand that all threads involved in
+ *          this semaphore will become non-real-time threads.
  * @param   suspend_flag status flag of the thread to be suspended.
  *
  * @return  Return the operation status. If the return value is RT_EOK, the function is successfully executed.
  *          If the return value is any other values, it means this operation failed.
  */
-rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
+rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag)
 {
     rt_base_t stat;
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
 
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
@@ -1009,13 +888,13 @@ rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
 
     LOG_D("thread suspend:  %s", thread->parent.name);
 
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    rt_sched_lock(&slvl);
 
-    stat = thread->stat & RT_THREAD_STAT_MASK;
+    stat = rt_sched_thread_get_stat(thread);
     if ((stat != RT_THREAD_READY) && (stat != RT_THREAD_RUNNING))
     {
         LOG_D("thread suspend: thread disorder, 0x%2x", thread->stat);
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_sched_unlock(slvl);
         return -RT_ERROR;
     }
 
@@ -1024,28 +903,74 @@ rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
         /* not suspend running status thread on other core */
         RT_ASSERT(thread == rt_thread_self());
     }
+
 #ifdef RT_USING_SMART
+    rt_sched_unlock(slvl);
+
+    /* check pending signals for thread before suspend */
     if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
     {
         /* not to suspend */
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
         return -RT_EINTR;
     }
+
+    rt_sched_lock(&slvl);
+    if (stat == RT_THREAD_READY)
+    {
+        stat = rt_sched_thread_get_stat(thread);
+
+        if (stat != RT_THREAD_READY)
+        {
+            /* status updated while we check for signal */
+            rt_sched_unlock(slvl);
+            return -RT_ERROR;
+        }
+    }
 #endif
 
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    rt_schedule_remove_thread(thread);
-    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    /* change thread stat */
+    rt_sched_remove_thread(thread);
+    _thread_set_suspend_state(thread, suspend_flag);
 
-    rt_thread_set_suspend_state(thread, suspend_flag);
+    if (susp_list)
+    {
+        /**
+         * enqueue thread on the push list before leaving critical region of
+         * scheduler, so we won't miss notification of async events.
+         */
+        rt_susp_list_enqueue(susp_list, thread, ipc_flags);
+    }
 
     /* stop thread timer anyway */
-    rt_timer_stop(&(thread->thread_timer));
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    rt_sched_thread_timer_stop(thread);
+
+    rt_sched_unlock(slvl);
 
     RT_OBJECT_HOOK_CALL(rt_thread_suspend_hook, (thread));
     return RT_EOK;
 }
+RTM_EXPORT(rt_thread_suspend_to_list);
+
+/**
+ * @brief   This function will suspend the specified thread and change it to suspend state.
+ *
+ * @note    This function ONLY can suspend current thread itself.
+ *              rt_thread_suspend(rt_thread_self());
+ *
+ *          Do not use the rt_thread_suspend to suspend other threads. You have no way of knowing what code a
+ *          thread is executing when you suspend it. If you suspend a thread while sharing a resouce with
+ *          other threads and occupying this resouce, starvation can occur very easily.
+ *
+ * @param   thread the thread to be suspended.
+ * @param   suspend_flag status flag of the thread to be suspended.
+ *
+ * @return  Return the operation status. If the return value is RT_EOK, the function is successfully executed.
+ *          If the return value is any other values, it means this operation failed.
+ */
+rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
+{
+    return rt_thread_suspend_to_list(thread, RT_NULL, 0, suspend_flag);
+}
 RTM_EXPORT(rt_thread_suspend_with_flag);
 
 rt_err_t rt_thread_suspend(rt_thread_t thread)
@@ -1064,41 +989,31 @@ RTM_EXPORT(rt_thread_suspend);
  */
 rt_err_t rt_thread_resume(rt_thread_t thread)
 {
-    rt_base_t level;
+    rt_sched_lock_level_t slvl;
+    rt_err_t error;
 
     /* parameter check */
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
-    LOG_D("thread resume:  %s", thread->parent.name);
-
-    level = rt_spin_lock_irqsave(&(thread->spinlock)); //TODO need lock for cpu
+    LOG_D("thread resume: %s", thread->parent.name);
 
-    if ((thread->stat & RT_THREAD_SUSPEND_MASK) != RT_THREAD_SUSPEND_MASK)
-    {
-        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    rt_sched_lock(&slvl);
 
-        LOG_D("thread resume: thread disorder, %d",
-              thread->stat);
+    error = rt_sched_thread_ready(thread);
 
-        return -RT_ERROR;
+    if (!error)
+    {
+        error = rt_sched_unlock_n_resched(slvl);
+    }
+    else
+    {
+        rt_sched_unlock(slvl);
     }
-
-    /* remove from suspend list */
-    rt_list_remove(&(thread->tlist));
-
-    rt_timer_stop(&thread->thread_timer);
-
-#ifdef RT_USING_SMART
-    thread->wakeup.func = RT_NULL;
-#endif
-
-    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
-    /* insert to schedule ready list */
-    rt_schedule_insert_thread(thread);
 
     RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
-    return RT_EOK;
+
+    return error;
 }
 RTM_EXPORT(rt_thread_resume);
 
@@ -1112,19 +1027,21 @@ RTM_EXPORT(rt_thread_resume);
  */
 rt_err_t rt_thread_wakeup(rt_thread_t thread)
 {
-    register rt_base_t temp;
+    rt_sched_lock_level_t slvl;
     rt_err_t ret;
     rt_wakeup_func_t func = RT_NULL;
 
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
-    temp = rt_spin_lock_irqsave(&(thread->spinlock));
-    func = thread->wakeup.func;
-    thread->wakeup.func = RT_NULL;
-    rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
+
+    rt_sched_lock(&slvl);
+    func = thread->wakeup_handle.func;
+    thread->wakeup_handle.func = RT_NULL;
+    rt_sched_unlock(slvl);
+
     if (func)
     {
-        ret = func(thread->wakeup.user_data, thread);
+        ret = func(thread->wakeup_handle.user_data, thread);
     }
     else
     {
@@ -1136,15 +1053,15 @@ RTM_EXPORT(rt_thread_wakeup);
 
 void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data)
 {
-    register rt_base_t temp;
+    rt_sched_lock_level_t slvl;
 
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
-    temp = rt_spin_lock_irqsave(&(thread->spinlock));
-    thread->wakeup.func = func;
-    thread->wakeup.user_data = user_data;
-    rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
+    rt_sched_lock(&slvl);
+    thread->wakeup_handle.func = func;
+    thread->wakeup_handle.user_data = user_data;
+    rt_sched_unlock(slvl);
 }
 RTM_EXPORT(rt_thread_wakeup_set);
 #endif

+ 88 - 59
src/timer.c

@@ -20,6 +20,7 @@
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to timer.c
  * 2022-04-19     Stanley      Correct descriptions
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
+ * 2024-01-25     Shell        add RT_TIMER_FLAG_THREAD_TIMER for timer to sync with sched
  */
 
 #include <rtthread.h>
@@ -31,7 +32,7 @@
 
 /* hard timer list */
 static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
-static struct rt_spinlock _hard_spinlock;
+static struct rt_spinlock _htimer_lock;
 
 #ifdef RT_USING_TIMER_SOFT
 
@@ -50,7 +51,7 @@ static struct rt_spinlock _hard_spinlock;
 static rt_uint8_t _soft_timer_status = RT_SOFT_TIMER_IDLE;
 /* soft timer list */
 static rt_list_t _soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL];
-static struct rt_spinlock _soft_spinlock;
+static struct rt_spinlock _stimer_lock;
 static struct rt_thread _timer_thread;
 static struct rt_semaphore _soft_timer_sem;
 rt_align(RT_ALIGN_SIZE)
@@ -94,6 +95,35 @@ void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer))
 /**@}*/
 #endif /* RT_USING_HOOK */
 
+rt_inline struct rt_spinlock* _timerlock_idx(struct rt_timer *timer)
+{
+#ifdef RT_USING_TIMER_SOFT
+    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
+    {
+        return &_stimer_lock;
+    }
+    else
+#endif /* RT_USING_TIMER_SOFT */
+    {
+        return &_htimer_lock;
+    }
+}
+
+rt_inline rt_list_t* _timerhead_idx(struct rt_timer *timer)
+{
+#ifdef RT_USING_TIMER_SOFT
+    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
+    {
+        /* insert timer to soft timer list */
+        return _soft_timer_list;
+    }
+    else
+#endif /* RT_USING_TIMER_SOFT */
+    {
+        /* insert timer to system timer list */
+        return _timer_list;
+    }
+}
 
 /**
  * @brief [internal] The init funtion of timer
@@ -280,17 +310,7 @@ rt_err_t rt_timer_detach(rt_timer_t timer)
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
     RT_ASSERT(rt_object_is_systemobject(&timer->parent));
 
-#ifdef RT_USING_TIMER_SOFT
-    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
-    {
-        spinlock = &_soft_spinlock;
-    }
-    else
-#endif /* RT_USING_TIMER_SOFT */
-    {
-        spinlock = &_hard_spinlock;
-    }
-
+    spinlock = _timerlock_idx(timer);
     level = rt_spin_lock_irqsave(spinlock);
 
     _timer_remove(timer);
@@ -325,6 +345,7 @@ RTM_EXPORT(rt_timer_detach);
  *
  *          RT_TIMER_FLAG_HARD_TIMER        Hardware timer
  *          RT_TIMER_FLAG_SOFT_TIMER        Software timer
+ *          RT_TIMER_FLAG_THREAD_TIMER      Thread timer
  *
  *        NOTE:
  *        You can use multiple values with "|" logical operator.  By default, system will use the RT_TIME_FLAG_HARD_TIMER.
@@ -373,16 +394,7 @@ rt_err_t rt_timer_delete(rt_timer_t timer)
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
     RT_ASSERT(rt_object_is_systemobject(&timer->parent) == RT_FALSE);
 
-#ifdef RT_USING_TIMER_SOFT
-    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
-    {
-        spinlock = &_soft_spinlock;
-    }
-    else
-#endif /* RT_USING_TIMER_SOFT */
-    {
-        spinlock = &_hard_spinlock;
-    }
+    spinlock = _timerlock_idx(timer);
 
     level = rt_spin_lock_irqsave(spinlock);
 
@@ -485,6 +497,8 @@ static rt_err_t _timer_start(rt_list_t *timer_list, rt_timer_t timer)
  */
 rt_err_t rt_timer_start(rt_timer_t timer)
 {
+    rt_sched_lock_level_t slvl;
+    int is_thread_timer = 0;
     struct rt_spinlock *spinlock;
     rt_list_t *timer_list;
     rt_base_t level;
@@ -498,13 +512,24 @@ rt_err_t rt_timer_start(rt_timer_t timer)
     if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
     {
         timer_list = _soft_timer_list;
-        spinlock = &_soft_spinlock;
+        spinlock = &_stimer_lock;
     }
     else
 #endif /* RT_USING_TIMER_SOFT */
     {
         timer_list = _timer_list;
-        spinlock = &_hard_spinlock;
+        spinlock = &_htimer_lock;
+    }
+
+    if (timer->parent.flag & RT_TIMER_FLAG_THREAD_TIMER)
+    {
+        rt_thread_t thread;
+        is_thread_timer = 1;
+        rt_sched_lock(&slvl);
+
+        thread = rt_container_of(timer, struct rt_thread, thread_timer);
+        RT_ASSERT(rt_object_get_type(&thread->parent) == RT_Object_Class_Thread);
+        rt_sched_thread_timer_start(thread);
     }
 
     level = rt_spin_lock_irqsave(spinlock);
@@ -512,17 +537,19 @@ rt_err_t rt_timer_start(rt_timer_t timer)
     err = _timer_start(timer_list, timer);
 
 #ifdef RT_USING_TIMER_SOFT
-    if (err == RT_EOK)
+    if (err == RT_EOK && (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER))
     {
-        if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
-        {
-            rt_sem_release(&_soft_timer_sem);
-        }
+        rt_sem_release(&_soft_timer_sem);
     }
 #endif /* RT_USING_TIMER_SOFT */
 
     rt_spin_unlock_irqrestore(spinlock, level);
 
+    if (is_thread_timer)
+    {
+        rt_sched_unlock(slvl);
+    }
+
     return err;
 }
 RTM_EXPORT(rt_timer_start);
@@ -543,16 +570,8 @@ rt_err_t rt_timer_stop(rt_timer_t timer)
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
 
-#ifdef RT_USING_TIMER_SOFT
-    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
-    {
-        spinlock = &_soft_spinlock;
-    }
-    else
-#endif /* RT_USING_TIMER_SOFT */
-    {
-        spinlock = &_hard_spinlock;
-    }
+    spinlock = _timerlock_idx(timer);
+
     level = rt_spin_lock_irqsave(spinlock);
 
     if (!(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED))
@@ -565,6 +584,7 @@ rt_err_t rt_timer_stop(rt_timer_t timer)
     _timer_remove(timer);
     /* change status */
     timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
+
     rt_spin_unlock_irqrestore(spinlock, level);
 
     return RT_EOK;
@@ -582,10 +602,16 @@ RTM_EXPORT(rt_timer_stop);
  */
 rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
 {
+    struct rt_spinlock *spinlock;
+    rt_base_t level;
+
     /* parameter check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
 
+    spinlock = _timerlock_idx(timer);
+
+    level = rt_spin_lock_irqsave(spinlock);
     switch (cmd)
     {
     case RT_TIMER_CTRL_GET_TIME:
@@ -640,6 +666,7 @@ rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
     default:
         break;
     }
+    rt_spin_unlock_irqrestore(spinlock, level);
 
     return RT_EOK;
 }
@@ -660,21 +687,23 @@ void rt_timer_check(void)
 
     RT_ASSERT(rt_interrupt_get_nest() > 0);
 
+    LOG_D("timer check enter");
+
+    level = rt_spin_lock_irqsave(&_htimer_lock);
+
+    current_tick = rt_tick_get();
+
 #ifdef RT_USING_SMP
+    /* Running on core 0 only */
     if (rt_hw_cpu_id() != 0)
     {
+        rt_spin_unlock_irqrestore(&_htimer_lock, level);
         return;
     }
 #endif
 
     rt_list_init(&list);
 
-    LOG_D("timer check enter");
-
-    current_tick = rt_tick_get();
-
-    level = rt_spin_lock_irqsave(&_hard_spinlock);
-
     while (!rt_list_isempty(&_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
     {
         t = rt_list_entry(_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
@@ -696,7 +725,7 @@ void rt_timer_check(void)
             }
             /* add timer to temporary list  */
             rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
-            rt_spin_unlock_irqrestore(&_hard_spinlock, level);
+            rt_spin_unlock_irqrestore(&_htimer_lock, level);
             /* call timeout function */
             t->timeout_func(t->parameter);
 
@@ -705,7 +734,7 @@ void rt_timer_check(void)
 
             RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
             LOG_D("current tick: %d", current_tick);
-            level = rt_spin_lock_irqsave(&_hard_spinlock);
+            level = rt_spin_lock_irqsave(&_htimer_lock);
             /* Check whether the timer object is detached or started again */
             if (rt_list_isempty(&list))
             {
@@ -722,7 +751,7 @@ void rt_timer_check(void)
         }
         else break;
     }
-    rt_spin_unlock_irqrestore(&_hard_spinlock, level);
+    rt_spin_unlock_irqrestore(&_htimer_lock, level);
     LOG_D("timer check leave");
 }
 
@@ -736,9 +765,9 @@ rt_tick_t rt_timer_next_timeout_tick(void)
     rt_base_t level;
     rt_tick_t next_timeout = RT_TICK_MAX;
 
-    level = rt_spin_lock_irqsave(&_hard_spinlock);
+    level = rt_spin_lock_irqsave(&_htimer_lock);
     _timer_list_next_timeout(_timer_list, &next_timeout);
-    rt_spin_unlock_irqrestore(&_hard_spinlock, level);
+    rt_spin_unlock_irqrestore(&_htimer_lock, level);
 
     return next_timeout;
 }
@@ -757,7 +786,7 @@ static void _soft_timer_check(void)
 
     rt_list_init(&list);
     LOG_D("software timer check enter");
-    level = rt_spin_lock_irqsave(&_soft_spinlock);
+    level = rt_spin_lock_irqsave(&_stimer_lock);
 
     while (!rt_list_isempty(&_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
     {
@@ -785,7 +814,7 @@ static void _soft_timer_check(void)
 
             _soft_timer_status = RT_SOFT_TIMER_BUSY;
 
-            rt_spin_unlock_irqrestore(&_soft_spinlock, level);
+            rt_spin_unlock_irqrestore(&_stimer_lock, level);
 
             /* call timeout function */
             t->timeout_func(t->parameter);
@@ -793,7 +822,7 @@ static void _soft_timer_check(void)
             RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
             LOG_D("current tick: %d", current_tick);
 
-            level = rt_spin_lock_irqsave(&_soft_spinlock);
+            level = rt_spin_lock_irqsave(&_stimer_lock);
 
             _soft_timer_status = RT_SOFT_TIMER_IDLE;
             /* Check whether the timer object is detached or started again */
@@ -813,7 +842,7 @@ static void _soft_timer_check(void)
         else break; /* not check anymore */
     }
 
-    rt_spin_unlock_irqrestore(&_soft_spinlock, level);
+    rt_spin_unlock_irqrestore(&_stimer_lock, level);
 
     LOG_D("software timer check leave");
 }
@@ -836,9 +865,9 @@ static void _timer_thread_entry(void *parameter)
     while (1)
     {
         /* get the next timeout tick */
-        level = rt_spin_lock_irqsave(&_soft_spinlock);
+        level = rt_spin_lock_irqsave(&_stimer_lock);
         ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout);
-        rt_spin_unlock_irqrestore(&_soft_spinlock, level);
+        rt_spin_unlock_irqrestore(&_stimer_lock, level);
 
         if (ret != RT_EOK)
         {
@@ -878,7 +907,7 @@ void rt_system_timer_init(void)
     {
         rt_list_init(_timer_list + i);
     }
-    rt_spin_lock_init(&_hard_spinlock);
+    rt_spin_lock_init(&_htimer_lock);
 }
 
 /**
@@ -897,7 +926,7 @@ void rt_system_timer_thread_init(void)
     {
         rt_list_init(_soft_timer_list + i);
     }
-    rt_spin_lock_init(&_soft_spinlock);
+    rt_spin_lock_init(&_stimer_lock);
     rt_sem_init(&_soft_timer_sem, "stimer", 0, RT_IPC_FLAG_PRIO);
     /* start software timer thread */
     rt_thread_init(&_timer_thread,

Some files were not shown because too many files changed in this diff