1
0
Эх сурвалжийг харах

🎈 perf: perf rt_hw_interrupt_disable/enable (#8042)

Signed-off-by: Shell <smokewood@qq.com>
Co-authored-by: Shell <smokewood@qq.com>
xqyjlj 1 жил өмнө
parent
commit
3283f54c7a
80 өөрчлөгдсөн 2436 нэмэгдсэн , 1905 устгасан
  1. 3 1
      bsp/qemu-virt64-aarch64/drivers/secondary_cpu.c
  2. 3 1
      components/dfs/dfs_v1/src/dfs.c
  3. 3 1
      components/dfs/dfs_v2/src/dfs.c
  4. 2 0
      components/drivers/include/drivers/serial.h
  5. 1 0
      components/drivers/include/ipc/completion.h
  6. 1 0
      components/drivers/include/ipc/dataqueue.h
  7. 1 0
      components/drivers/include/ipc/ringblk_buf.h
  8. 2 0
      components/drivers/include/ipc/waitqueue.h
  9. 1 0
      components/drivers/include/ipc/workqueue.h
  10. 11 9
      components/drivers/ipc/completion.c
  11. 26 23
      components/drivers/ipc/dataqueue.c
  12. 12 10
      components/drivers/ipc/ringblk_buf.c
  13. 19 14
      components/drivers/ipc/waitqueue.c
  14. 18 18
      components/drivers/ipc/workqueue.c
  15. 2 9
      components/drivers/ktime/src/hrtimer.c
  16. 24 21
      components/drivers/serial/serial.c
  17. 5 12
      components/drivers/tty/console.c
  18. 1 0
      components/drivers/tty/include/tty.h
  19. 24 18
      components/drivers/tty/n_tty.c
  20. 2 5
      components/drivers/tty/pty.c
  21. 8 9
      components/drivers/tty/tty.c
  22. 5 5
      components/drivers/tty/tty_ioctl.c
  23. 11 12
      components/drivers/tty/tty_ldisc.c
  24. 56 33
      components/finsh/cmd.c
  25. 20 14
      components/libc/compilers/common/ctime.c
  26. 6 4
      components/libc/posix/io/poll/poll.c
  27. 1 6
      components/libc/posix/pthreads/pthread.c
  28. 25 0
      components/lwp/arch/aarch64/cortex-a/lwp_arch.c
  29. 13 20
      components/lwp/arch/aarch64/cortex-a/lwp_gcc.S
  30. 82 35
      components/lwp/libc_musl.h
  31. 61 17
      components/lwp/lwp.c
  32. 26 2
      components/lwp/lwp.h
  33. 0 47
      components/lwp/lwp_clone.h
  34. 4 4
      components/lwp/lwp_internal.h
  35. 338 294
      components/lwp/lwp_ipc.c
  36. 234 146
      components/lwp/lwp_pid.c
  37. 36 3
      components/lwp/lwp_pid.h
  38. 11 11
      components/lwp/lwp_pmutex.c
  39. 48 61
      components/lwp/lwp_signal.c
  40. 0 2
      components/lwp/lwp_signal.h
  41. 137 85
      components/lwp/lwp_syscall.c
  42. 5 1
      components/lwp/lwp_syscall.h
  43. 65 18
      components/lwp/lwp_tid.c
  44. 19 18
      components/mm/mm_page.c
  45. 7 6
      components/net/lwip/port/ethernetif.c
  46. 2 0
      components/net/lwip/port/netif/ethernetif.h
  47. 4 2
      components/net/lwip/port/sys_arch.c
  48. 18 17
      components/net/netdev/src/netdev.c
  49. 4 2
      components/net/sal/impl/af_inet_lwip.c
  50. 8 7
      components/utilities/resource/resource_id.c
  51. 3 1
      components/utilities/resource/resource_id.h
  52. 12 11
      components/utilities/ulog/ulog.c
  53. 1 0
      examples/utest/testcases/kernel/atomic_tc.c
  54. 2 0
      examples/utest/testcases/kernel/event_tc.c
  55. 3 0
      examples/utest/testcases/kernel/mailbox_tc.c
  56. 9 0
      examples/utest/testcases/kernel/messagequeue_tc.c
  57. 13 6
      examples/utest/testcases/kernel/mutex_tc.c
  58. 0 7
      include/rtatomic.h
  59. 81 37
      include/rtdef.h
  60. 14 15
      include/rthw.h
  61. 5 14
      include/rtthread.h
  62. 1 0
      libcpu/Kconfig
  63. 29 5
      libcpu/aarch64/common/context_gcc.S
  64. 48 35
      libcpu/aarch64/common/cpu.c
  65. 2 6
      libcpu/aarch64/common/cpuport.h
  66. 47 2
      libcpu/aarch64/common/interrupt.c
  67. 3 0
      libcpu/aarch64/common/trap.c
  68. 7 0
      src/Kconfig
  69. 16 20
      src/clock.c
  70. 5 1
      src/components.c
  71. 56 65
      src/cpu.c
  72. 20 19
      src/idle.c
  73. 126 140
      src/ipc.c
  74. 8 22
      src/irq.c
  75. 15 27
      src/mempool.c
  76. 46 50
      src/object.c
  77. 286 218
      src/scheduler_mp.c
  78. 0 11
      src/signal.c
  79. 71 97
      src/thread.c
  80. 92 73
      src/timer.c

+ 3 - 1
bsp/qemu-virt64-aarch64/drivers/secondary_cpu.c

@@ -12,6 +12,7 @@
 #include "gic.h"
 #include "interrupt.h"
 #include "mmu.h"
+#include "gtimer.h"
 
 #ifdef RT_USING_SMP
 
@@ -29,8 +30,9 @@ void rt_hw_secondary_cpu_bsp_start(void)
     arm_gic_cpu_init(0, 0);
 
     // local timer init
+    rt_hw_gtimer_init();
 
     rt_system_scheduler_start();
 }
 
-#endif // SMP
+#endif // SMP

+ 3 - 1
components/dfs/dfs_v1/src/dfs.c

@@ -800,11 +800,13 @@ struct dfs_fdtable *dfs_fdtable_get_pid(int pid)
     struct rt_lwp *lwp = RT_NULL;
     struct dfs_fdtable *fdt = RT_NULL;
 
-    lwp = lwp_from_pid(pid);
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(pid);
     if (lwp)
     {
         fdt = &lwp->fdt;
     }
+    lwp_pid_lock_release();
 
     return fdt;
 }

+ 3 - 1
components/dfs/dfs_v2/src/dfs.c

@@ -401,11 +401,13 @@ struct dfs_fdtable *dfs_fdtable_get_pid(int pid)
     struct rt_lwp *lwp = RT_NULL;
     struct dfs_fdtable *fdt = RT_NULL;
 
-    lwp = lwp_from_pid(pid);
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(pid);
     if (lwp)
     {
         fdt = &lwp->fdt;
     }
+    lwp_pid_lock_release();
 
     return fdt;
 }

+ 2 - 0
components/drivers/include/drivers/serial.h

@@ -152,6 +152,8 @@ struct rt_serial_device
     void *serial_rx;
     void *serial_tx;
 
+    struct rt_spinlock spinlock;
+
     struct rt_device_notify rx_notify;
 };
 typedef struct rt_serial_device rt_serial_t;

+ 1 - 0
components/drivers/include/ipc/completion.h

@@ -22,6 +22,7 @@ struct rt_completion
 
     /* suspended list */
     rt_list_t suspended_list;
+    struct rt_spinlock spinlock;
 };
 
 void rt_completion_init(struct rt_completion *completion);

+ 1 - 0
components/drivers/include/ipc/dataqueue.h

@@ -33,6 +33,7 @@ struct rt_data_queue
     rt_uint16_t is_full   : 1;
 
     struct rt_data_item *queue;
+    struct rt_spinlock spinlock;
 
     rt_list_t suspended_push_list;
     rt_list_t suspended_pop_list;

+ 1 - 0
components/drivers/include/ipc/ringblk_buf.h

@@ -78,6 +78,7 @@ struct rt_rbb
     rt_slist_t *tail;
     /* free node list */
     rt_slist_t free_list;
+    struct rt_spinlock spinlock;
 };
 typedef struct rt_rbb *rt_rbb_t;
 

+ 2 - 0
components/drivers/include/ipc/waitqueue.h

@@ -26,6 +26,7 @@ struct rt_wqueue_node
     rt_thread_t polling_thread;
     rt_list_t   list;
 
+    rt_wqueue_t *wqueue;
     rt_wqueue_func_t wakeup;
     rt_uint32_t key;
 };
@@ -39,6 +40,7 @@ rt_inline void rt_wqueue_init(rt_wqueue_t *queue)
 
     queue->flag = RT_WQ_FLAG_CLEAN;
     rt_list_init(&(queue->waiting_list));
+    rt_spin_lock_init(&(queue->spinlock));
 }
 
 void rt_wqueue_add(rt_wqueue_t *queue, struct rt_wqueue_node *node);

+ 1 - 0
components/drivers/include/ipc/workqueue.h

@@ -41,6 +41,7 @@ struct rt_workqueue
 
     struct rt_semaphore sem;
     rt_thread_t    work_thread;
+    struct rt_spinlock spinlock;
 };
 
 struct rt_work

+ 11 - 9
components/drivers/ipc/completion.c

@@ -7,6 +7,7 @@
  * Date           Author       Notes
  * 2012-09-30     Bernard      first version.
  * 2021-08-18     chenyingchun add comments
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -25,10 +26,11 @@ void rt_completion_init(struct rt_completion *completion)
     rt_base_t level;
     RT_ASSERT(completion != RT_NULL);
 
-    level = rt_hw_interrupt_disable();
+    rt_spin_lock_init(&(completion->spinlock));
+    level = rt_spin_lock_irqsave(&(completion->spinlock));
     completion->flag = RT_UNCOMPLETED;
     rt_list_init(&completion->suspended_list);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(completion->spinlock), level);
 }
 RTM_EXPORT(rt_completion_init);
 
@@ -62,7 +64,7 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
     result = RT_EOK;
     thread = rt_thread_self();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(completion->spinlock));
     if (completion->flag != RT_COMPLETED)
     {
         /* only one thread can suspend on complete */
@@ -97,7 +99,7 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
                 rt_timer_start(&(thread->thread_timer));
             }
             /* enable interrupt */
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(completion->spinlock), level);
 
             /* do schedule */
             rt_schedule();
@@ -105,14 +107,14 @@ rt_err_t rt_completion_wait(struct rt_completion *completion,
             /* thread is waked up */
             result = thread->error;
 
-            level = rt_hw_interrupt_disable();
+            level = rt_spin_lock_irqsave(&(completion->spinlock));
         }
     }
     /* clean completed flag */
     completion->flag = RT_UNCOMPLETED;
 
 __exit:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(completion->spinlock), level);
 
     return result;
 }
@@ -131,7 +133,7 @@ void rt_completion_done(struct rt_completion *completion)
     if (completion->flag == RT_COMPLETED)
         return;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(completion->spinlock));
     completion->flag = RT_COMPLETED;
 
     if (!rt_list_isempty(&(completion->suspended_list)))
@@ -146,14 +148,14 @@ void rt_completion_done(struct rt_completion *completion)
 
         /* resume it */
         rt_thread_resume(thread);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(completion->spinlock), level);
 
         /* perform a schedule */
         rt_schedule();
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(completion->spinlock), level);
     }
 }
 RTM_EXPORT(rt_completion_done);

+ 26 - 23
components/drivers/ipc/dataqueue.c

@@ -7,6 +7,7 @@
  * Date           Author       Notes
  * 2012-09-30     Bernard      first version.
  * 2016-10-31     armink       fix some resume push and pop thread bugs
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -57,6 +58,8 @@ rt_data_queue_init(struct rt_data_queue *queue,
     queue->is_empty = 1;
     queue->is_full = 0;
 
+    rt_spin_lock_init(&(queue->spinlock));
+
     rt_list_init(&(queue->suspended_push_list));
     rt_list_init(&(queue->suspended_pop_list));
 
@@ -103,7 +106,7 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
     result = RT_EOK;
     thread = rt_thread_self();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     while (queue->is_full)
     {
         /* queue is full */
@@ -131,14 +134,14 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
         }
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
         /* do schedule */
         rt_schedule();
 
         /* thread is waked up */
         result = thread->error;
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(queue->spinlock));
         if (result != RT_EOK) goto __exit;
     }
 
@@ -165,7 +168,7 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
 
         /* resume it */
         rt_thread_resume(thread);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
         /* perform a schedule */
         rt_schedule();
@@ -174,7 +177,7 @@ rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
     }
 
 __exit:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
     {
         queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
@@ -222,7 +225,7 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
     result = RT_EOK;
     thread = rt_thread_self();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     while (queue->is_empty)
     {
         /* queue is empty */
@@ -249,14 +252,14 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
         }
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
         /* do schedule */
         rt_schedule();
 
         /* thread is waked up */
         result = thread->error;
-        level  = rt_hw_interrupt_disable();
+        level  = rt_spin_lock_irqsave(&(queue->spinlock));
         if (result != RT_EOK)
             goto __exit;
     }
@@ -286,14 +289,14 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
 
             /* resume it */
             rt_thread_resume(thread);
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
             /* perform a schedule */
             rt_schedule();
         }
         else
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         }
 
         if (queue->evt_notify != RT_NULL)
@@ -303,7 +306,7 @@ rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
     }
 
 __exit:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
     {
         queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
@@ -339,12 +342,12 @@ rt_err_t rt_data_queue_peek(struct rt_data_queue *queue,
         return -RT_EEMPTY;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
     *data_ptr = queue->queue[queue->get_index].data_ptr;
     *size     = queue->queue[queue->get_index].data_size;
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     return RT_EOK;
 }
@@ -366,14 +369,14 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
     RT_ASSERT(queue != RT_NULL);
     RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
     queue->get_index = 0;
     queue->put_index = 0;
     queue->is_empty = 1;
     queue->is_full = 0;
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     rt_enter_critical();
     /* wakeup all suspend threads */
@@ -382,7 +385,7 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
     while (!rt_list_isempty(&(queue->suspended_pop_list)))
     {
         /* disable interrupt */
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(queue->spinlock));
 
         /* get next suspend thread */
         thread = rt_list_entry(queue->suspended_pop_list.next,
@@ -399,14 +402,14 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
         rt_thread_resume(thread);
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     }
 
     /* resume on push list */
     while (!rt_list_isempty(&(queue->suspended_push_list)))
     {
         /* disable interrupt */
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(queue->spinlock));
 
         /* get next suspend thread */
         thread = rt_list_entry(queue->suspended_push_list.next,
@@ -423,7 +426,7 @@ void rt_data_queue_reset(struct rt_data_queue *queue)
         rt_thread_resume(thread);
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     }
     rt_exit_critical();
 
@@ -448,9 +451,9 @@ rt_err_t rt_data_queue_deinit(struct rt_data_queue *queue)
     /* wakeup all suspend threads */
     rt_data_queue_reset(queue);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     queue->magic = 0;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     rt_free(queue->queue);
 
@@ -478,7 +481,7 @@ rt_uint16_t rt_data_queue_len(struct rt_data_queue *queue)
         return 0;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
     if (queue->put_index > queue->get_index)
     {
@@ -489,7 +492,7 @@ rt_uint16_t rt_data_queue_len(struct rt_data_queue *queue)
         len = queue->size + queue->put_index - queue->get_index;
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     return len;
 }

+ 12 - 10
components/drivers/ipc/ringblk_buf.c

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2018-08-25     armink       the first version
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -44,6 +45,7 @@ void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t
         rt_slist_init(&block_set[i].list);
         rt_slist_insert(&rbb->free_list, &block_set[i].list);
     }
+    rt_spin_lock_init(&(rbb->spinlock));
 }
 RTM_EXPORT(rt_rbb_init);
 
@@ -173,7 +175,7 @@ rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
     RT_ASSERT(rbb);
     RT_ASSERT(blk_size < (1L << 24));
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(rbb->spinlock));
 
     new_rbb = find_empty_blk_in_set(rbb);
 
@@ -255,7 +257,7 @@ rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
         new_rbb = RT_NULL;
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
 
     return new_rbb;
 }
@@ -294,7 +296,7 @@ rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
     if (rt_slist_isempty(&rbb->blk_list))
         return 0;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(rbb->spinlock));
 
     for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
     {
@@ -310,7 +312,7 @@ rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
 
 __exit:
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
 
     return block;
 }
@@ -360,12 +362,12 @@ void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
     RT_ASSERT(block);
     RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(rbb->spinlock));
     /* remove it on rbb block list */
     list_remove(rbb, &block->list);
     block->status = RT_RBB_BLK_UNUSED;
     rt_slist_insert(&rbb->free_list, &block->list);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
 }
 RTM_EXPORT(rt_rbb_blk_free);
 
@@ -405,7 +407,7 @@ rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_bl
     if (rt_slist_isempty(&rbb->blk_list))
         return 0;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(rbb->spinlock));
 
     node = rt_slist_first(&rbb->blk_list);
     if (node != RT_NULL)
@@ -454,7 +456,7 @@ rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_bl
         blk_queue->blk_num++;
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
 
     return data_total_size;
 }
@@ -541,7 +543,7 @@ rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
     if (rt_slist_isempty(&rbb->blk_list))
         return 0;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(rbb->spinlock));
 
     for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
     {
@@ -573,7 +575,7 @@ rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
         data_len += last_block->size;
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
 
     return data_len;
 }

+ 19 - 14
components/drivers/ipc/waitqueue.c

@@ -8,6 +8,7 @@
  * 2018/06/26     Bernard      Fix the wait queue issue when wakeup a soon
  *                             to blocked thread.
  * 2022-01-24     THEWON       let rt_wqueue_wait return thread->error when using signal
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <stdint.h>
@@ -25,9 +26,10 @@ void rt_wqueue_add(rt_wqueue_t *queue, struct rt_wqueue_node *node)
 {
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
+    node->wqueue = queue;
     rt_list_insert_before(&(queue->waiting_list), &(node->list));
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 }
 
 /**
@@ -39,9 +41,11 @@ void rt_wqueue_remove(struct rt_wqueue_node *node)
 {
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    RT_ASSERT(node->wqueue != RT_NULL);
+
+    level = rt_spin_lock_irqsave(&(node->wqueue->spinlock));
     rt_list_remove(&(node->list));
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(node->wqueue->spinlock), level);
 }
 
 /**
@@ -79,7 +83,7 @@ void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key)
 
     queue_list = &(queue->waiting_list);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     /* set wakeup flag in the queue */
     queue->flag = RT_WQ_FLAG_WAKEUP;
 
@@ -93,13 +97,12 @@ void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key)
                 rt_thread_resume(entry->polling_thread);
                 need_schedule = 1;
 
-                rt_wqueue_remove(entry);
+                rt_list_remove(&(entry->list));
                 break;
             }
         }
     }
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     if (need_schedule)
         rt_schedule();
 }
@@ -136,9 +139,10 @@ static int _rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec, int susp
     __wait.polling_thread = rt_thread_self();
     __wait.key = 0;
     __wait.wakeup = __wqueue_default_wake;
+    __wait.wqueue = queue;
     rt_list_init(&__wait.list);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
     /* reset thread error */
     tid->error = RT_EOK;
@@ -152,11 +156,12 @@ static int _rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec, int susp
     ret = rt_thread_suspend_with_flag(tid, suspend_flag);
     if (ret != RT_EOK)
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         /* suspend failed */
         return -RT_EINTR;
     }
-    rt_wqueue_add(queue, &__wait);
+
+    rt_list_insert_before(&(queue->waiting_list), &(__wait.list));
 
     /* start timer */
     if (tick != RT_WAITING_FOREVER)
@@ -167,15 +172,15 @@ static int _rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec, int susp
 
         rt_timer_start(tmr);
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     rt_schedule();
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
 __exit_wakeup:
     queue->flag = RT_WQ_FLAG_CLEAN;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
     rt_wqueue_remove(&__wait);
 

+ 18 - 18
components/drivers/ipc/workqueue.c

@@ -9,6 +9,7 @@
  * 2021-08-01     Meco Man     remove rt_delayed_work_init()
  * 2021-08-14     Jackistang   add comments for function interface
  * 2022-01-16     Meco Man     add rt_work_urgent()
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -22,7 +23,6 @@ rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue)
 {
     rt_err_t result;
 
-    rt_enter_critical();
     while (1)
     {
         /* try to take condition semaphore */
@@ -44,7 +44,6 @@ rt_inline rt_err_t _workqueue_work_completion(struct rt_workqueue *queue)
             break;
         }
     }
-    rt_exit_critical();
 
     return result;
 }
@@ -60,12 +59,12 @@ static void _workqueue_thread_entry(void *parameter)
 
     while (1)
     {
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(queue->spinlock));
         if (rt_list_isempty(&(queue->work_list)))
         {
             /* no software timer exist, suspend self. */
             rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
             rt_schedule();
             continue;
         }
@@ -76,7 +75,7 @@ static void _workqueue_thread_entry(void *parameter)
         queue->work_current = work;
         work->flags &= ~RT_WORK_STATE_PENDING;
         work->workqueue = RT_NULL;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
 
         /* do work */
         work->work_func(work, work->work_data);
@@ -93,7 +92,7 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
 {
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
 
     /* remove list */
     rt_list_remove(&(work->list));
@@ -111,12 +110,12 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
         {
             /* resume work thread */
             rt_thread_resume(queue->work_thread);
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
             rt_schedule();
         }
         else
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         }
         return RT_EOK;
     }
@@ -137,11 +136,11 @@ static rt_err_t _workqueue_submit_work(struct rt_workqueue *queue,
         work->workqueue = queue;
         /* insert delay work list */
         rt_list_insert_after(queue->delayed_list.prev, &(work->list));
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         rt_timer_start(&(work->timer));
         return RT_EOK;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     return -RT_ERROR;
 }
 
@@ -150,7 +149,7 @@ static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_wor
     rt_base_t level;
     rt_err_t err;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     rt_list_remove(&(work->list));
     work->flags &= ~RT_WORK_STATE_PENDING;
     /* Timer started */
@@ -162,7 +161,7 @@ static rt_err_t _workqueue_cancel_work(struct rt_workqueue *queue, struct rt_wor
     }
     err = queue->work_current != work ? RT_EOK : -RT_EBUSY;
     work->workqueue = RT_NULL;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     return err;
 }
 
@@ -176,7 +175,7 @@ static void _delayed_work_timeout_handler(void *parameter)
     queue = work->workqueue;
     RT_ASSERT(queue != RT_NULL);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     rt_timer_detach(&(work->timer));
     work->flags &= ~RT_WORK_STATE_SUBMITTING;
     /* remove delay list */
@@ -193,12 +192,12 @@ static void _delayed_work_timeout_handler(void *parameter)
     {
         /* resume work thread */
         rt_thread_resume(queue->work_thread);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         rt_schedule();
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     }
 }
 
@@ -258,6 +257,7 @@ struct rt_workqueue *rt_workqueue_create(const char *name, rt_uint16_t stack_siz
             return RT_NULL;
         }
 
+        rt_spin_lock_init(&(queue->spinlock));
         rt_thread_startup(queue->work_thread);
     }
 
@@ -341,7 +341,7 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
     RT_ASSERT(queue != RT_NULL);
     RT_ASSERT(work != RT_NULL);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(queue->spinlock));
     /* NOTE: the work MUST be initialized firstly */
     rt_list_remove(&(work->list));
     rt_list_insert_after(&queue->work_list, &(work->list));
@@ -351,12 +351,12 @@ rt_err_t rt_workqueue_urgent_work(struct rt_workqueue *queue, struct rt_work *wo
     {
         /* resume work thread */
         rt_thread_resume(queue->work_thread);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
         rt_schedule();
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(queue->spinlock), level);
     }
 
     return RT_EOK;

+ 2 - 9
components/drivers/ktime/src/hrtimer.c

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2023-07-10     xqyjlj       The first version.
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rtdevice.h>
@@ -22,7 +23,7 @@
 
 static rt_list_t          _timer_list = RT_LIST_OBJECT_INIT(_timer_list);
 static rt_ktime_hrtimer_t _nowtimer   = RT_NULL;
-static struct rt_spinlock _spinlock;
+static RT_DEFINE_SPINLOCK(_spinlock);
 
 rt_weak unsigned long rt_ktime_hrtimer_getres(void)
 {
@@ -389,11 +390,3 @@ rt_err_t rt_ktime_hrtimer_mdelay(unsigned long ms)
 {
     return rt_ktime_hrtimer_ndelay(ms * 1000000);
 }
-
-static int rt_ktime_hrtimer_lock_init(void)
-{
-    RT_UNUSED(_spinlock);
-    rt_spin_lock_init(&_spinlock);
-    return 0;
-}
-INIT_BOARD_EXPORT(rt_ktime_hrtimer_lock_init);

+ 24 - 21
components/drivers/serial/serial.c

@@ -26,6 +26,7 @@
  *                             when using interrupt tx
  * 2020-12-14     Meco Man     implement function of setting window's size(TIOCSWINSZ)
  * 2021-08-22     Meco Man     implement function of getting window's size(TIOCGWINSZ)
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -208,10 +209,10 @@ static int serial_fops_poll(struct dfs_file *fd, struct rt_pollreq *req)
 
         rx_fifo = (struct rt_serial_rx_fifo*) serial->serial_rx;
 
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(serial->spinlock));
         if ((rx_fifo->get_index != rx_fifo->put_index) || (rx_fifo->get_index == rx_fifo->put_index && rx_fifo->is_full == RT_TRUE))
             mask |= POLLIN;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(serial->spinlock), level);
     }
 
     return mask;
@@ -303,13 +304,13 @@ rt_inline int _serial_int_rx(struct rt_serial_device *serial, rt_uint8_t *data,
         rt_base_t level;
 
         /* disable interrupt */
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(serial->spinlock));
 
         /* there's no data: */
         if ((rx_fifo->get_index == rx_fifo->put_index) && (rx_fifo->is_full == RT_FALSE))
         {
             /* no data, enable interrupt and break out */
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(serial->spinlock), level);
             break;
         }
 
@@ -324,7 +325,7 @@ rt_inline int _serial_int_rx(struct rt_serial_device *serial, rt_uint8_t *data,
         }
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(serial->spinlock), level);
 
         *data = ch & 0xff;
         data ++; length --;
@@ -501,7 +502,7 @@ rt_inline int _serial_dma_rx(struct rt_serial_device *serial, rt_uint8_t *data,
 
     RT_ASSERT((serial != RT_NULL) && (data != RT_NULL));
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(serial->spinlock));
 
     if (serial->config.bufsz == 0)
     {
@@ -518,7 +519,7 @@ rt_inline int _serial_dma_rx(struct rt_serial_device *serial, rt_uint8_t *data,
             serial->ops->dma_transmit(serial, data, length, RT_SERIAL_DMA_RX);
         }
         else result = -RT_EBUSY;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(serial->spinlock), level);
 
         if (result == RT_EOK) return length;
 
@@ -547,7 +548,7 @@ rt_inline int _serial_dma_rx(struct rt_serial_device *serial, rt_uint8_t *data,
                     recv_len + rx_fifo->get_index - serial->config.bufsz);
         }
         rt_dma_recv_update_get_index(serial, recv_len);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(serial->spinlock), level);
         return recv_len;
     }
 }
@@ -563,18 +564,18 @@ rt_inline int _serial_dma_tx(struct rt_serial_device *serial, const rt_uint8_t *
     result = rt_data_queue_push(&(tx_dma->data_queue), data, length, RT_WAITING_FOREVER);
     if (result == RT_EOK)
     {
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(serial->spinlock));
         if (tx_dma->activated != RT_TRUE)
         {
             tx_dma->activated = RT_TRUE;
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(serial->spinlock), level);
 
             /* make a DMA transfer */
             serial->ops->dma_transmit(serial, (rt_uint8_t *)data, length, RT_SERIAL_DMA_TX);
         }
         else
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(serial->spinlock), level);
         }
 
         return length;
@@ -980,10 +981,10 @@ static void _tc_flush(struct rt_serial_device *serial, int queue)
             if((device->open_flag & RT_DEVICE_FLAG_INT_RX) || (device->open_flag & RT_DEVICE_FLAG_DMA_RX))
             {
                 RT_ASSERT(RT_NULL != rx_fifo);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(serial->spinlock));
                 rx_fifo->get_index = rx_fifo->put_index;
                 rx_fifo->is_full = RT_FALSE;
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(serial->spinlock), level);
             }
             else
             {
@@ -1245,9 +1246,9 @@ static rt_err_t rt_serial_control(struct rt_device *dev,
                 rt_size_t recved = 0;
                 rt_base_t level;
 
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(serial->spinlock));
                 recved = _serial_fifo_calc_recved_len(serial);
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(serial->spinlock), level);
 
                 *(rt_size_t *)args = recved;
             }
@@ -1286,6 +1287,8 @@ rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
     struct rt_device *device;
     RT_ASSERT(serial != RT_NULL);
 
+    rt_spin_lock_init(&(serial->spinlock));
+
     device = &(serial->parent);
 
     device->type        = RT_Device_Class_Char;
@@ -1337,7 +1340,7 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
 
 
                 /* disable interrupt */
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(serial->spinlock));
 
                 rx_fifo->buffer[rx_fifo->put_index] = ch;
                 rx_fifo->put_index += 1;
@@ -1354,7 +1357,7 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
                 }
 
                 /* enable interrupt */
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(serial->spinlock), level);
             }
 
             /* invoke callback */
@@ -1363,10 +1366,10 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
                 rt_size_t rx_length;
 
                 /* get rx length */
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(serial->spinlock));
                 rx_length = (rx_fifo->put_index >= rx_fifo->get_index)? (rx_fifo->put_index - rx_fifo->get_index):
                     (serial->config.bufsz - (rx_fifo->get_index - rx_fifo->put_index));
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(serial->spinlock), level);
 
                 if (rx_length)
                 {
@@ -1438,13 +1441,13 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
             else
             {
                 /* disable interrupt */
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(serial->spinlock));
                 /* update fifo put index */
                 rt_dma_recv_update_put_index(serial, length);
                 /* calculate received total length */
                 length = rt_dma_calc_recved_len(serial);
                 /* enable interrupt */
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(serial->spinlock), level);
                 /* invoke callback */
                 if (serial->parent.rx_indicate != RT_NULL)
                 {

+ 5 - 12
components/drivers/tty/console.c

@@ -149,14 +149,7 @@ static rt_err_t iodev_open(struct tty_struct *console)
 
 struct rt_device *console_get_iodev(void)
 {
-    rt_base_t level = 0;
-    struct rt_device *iodev = RT_NULL;
-
-    level = rt_hw_interrupt_disable();
-    iodev = console_dev.io_dev;
-    rt_hw_interrupt_enable(level);
-
-    return iodev;
+    return console_dev.io_dev;
 }
 
 struct rt_device *console_set_iodev(struct rt_device *iodev)
@@ -169,7 +162,7 @@ struct rt_device *console_set_iodev(struct rt_device *iodev)
 
     console = &console_dev;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&console->spinlock);
 
     RT_ASSERT(console->init_flag >= TTY_INIT_FLAG_REGED);
 
@@ -195,7 +188,7 @@ struct rt_device *console_set_iodev(struct rt_device *iodev)
     }
 
 exit:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&console->spinlock, level);
     return io_before;
 }
 
@@ -213,7 +206,7 @@ static rt_err_t rt_console_init(struct rt_device *dev)
 
     console = (struct tty_struct *)dev;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&console->spinlock);
 
     RT_ASSERT(console->init_flag == TTY_INIT_FLAG_REGED);
 
@@ -225,7 +218,7 @@ static rt_err_t rt_console_init(struct rt_device *dev)
 
     console->init_flag = TTY_INIT_FLAG_INITED;
 exit:
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&console->spinlock, level);
     return result;
 }
 

+ 1 - 0
components/drivers/tty/include/tty.h

@@ -166,6 +166,7 @@ struct tty_struct
 
 #define RT_TTY_BUF 1024
     rt_list_t tty_drivers;
+    struct rt_spinlock spinlock;
 };
 
 enum

+ 24 - 18
components/drivers/tty/n_tty.c

@@ -50,6 +50,8 @@
 #define ECHO_BLOCK      256
 #define ECHO_DISCARD_WATERMARK  RT_TTY_BUF - (ECHO_BLOCK + 32)
 
+static struct rt_spinlock _spinlock = RT_SPINLOCK_INIT;
+
 struct n_tty_data
 {
     /* producer-published */
@@ -87,27 +89,29 @@ struct n_tty_data
 
 rt_inline int set_bit(int nr,int *addr)
 {
-    int mask, retval, level;
+    int mask, retval;
+    rt_base_t level;
 
     addr += nr >> 5;
     mask = 1 << (nr & 0x1f);
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     retval = (mask & *addr) != 0;
     *addr |= mask;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     return retval;
 }
 
 rt_inline int clear_bit(int nr, int *addr)
 {
-    int mask, retval, level;
+    int mask, retval;
+    rt_base_t level;
 
     addr += nr >> 5;
     mask = 1 << (nr & 0x1f);
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     retval = (mask & *addr) != 0;
     *addr &= ~mask;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     return retval;
 }
 
@@ -122,15 +126,16 @@ rt_inline int test_bit(int nr, int *addr)
 
 rt_inline int test_and_clear_bit(int nr, volatile void *addr)
 {
-    int mask, retval, level;
+    int mask, retval;
+    rt_base_t level;
     volatile unsigned int *a = addr;
 
     a += nr >> 5;
     mask = 1 << (nr & 0x1f);
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     retval = (mask & *a) != 0;
     *a &= ~mask;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return retval;
 }
@@ -1358,7 +1363,7 @@ static int copy_from_read_buf(struct tty_struct *tty,char *b,size_t nr)
     if (n)
     {
         const char *from = read_buf_addr(ldata, tail);
-        rt_memcpy(b, from, n);
+        memcpy(b, from, n);
         is_eof = n == 1 && *from == EOF_CHAR(tty);
         ldata->read_tail += n;
         /* Turn single EOF into zero-length read */
@@ -1445,12 +1450,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, char *b, size_t nr)
     size_t temp_n = n;
     if (n > buf_size)
     {
-        rt_memcpy(b, from, buf_size);
+        memcpy(b, from, buf_size);
         b += buf_size;
         temp_n -= buf_size;
         from = ldata->read_buf;
     }
-    rt_memcpy(b, from, temp_n);
+    memcpy(b, from, temp_n);
 
     if (found)
     {
@@ -2021,7 +2026,7 @@ static struct rt_wqueue *_wait_queue_current_get(struct tty_struct *tty)
 
 static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
 {
-    int level = 0;
+    rt_base_t level = 0;
     char *b = (char *)buf;
     struct tty_struct *tty = RT_NULL;
     struct rt_wqueue *wq = RT_NULL;
@@ -2029,13 +2034,11 @@ static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
     int retval = 0;
     int c = 0;
 
-    level = rt_hw_interrupt_disable();
     tty = (struct tty_struct *)fd->vnode->data;
     RT_ASSERT(tty != RT_NULL);
     c = job_control(tty);
     if (c < 0)
     {
-        rt_hw_interrupt_enable(level);
         return c;
     }
 
@@ -2054,12 +2057,14 @@ static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
             }
 
             wait_ret = rt_wqueue_wait_interruptible(wq, 0, RT_WAITING_FOREVER);
+
             if (wait_ret != 0)
             {
                 break;
             }
         }
 
+        level = rt_spin_lock_irqsave(&tty->spinlock);
         if (ldata->icanon && !L_EXTPROC(tty))
         {
             retval = canon_copy_from_read_buf(tty, b, count);
@@ -2068,13 +2073,14 @@ static int n_tty_read(struct dfs_file *fd, void *buf, size_t count)
         {
             retval = copy_from_read_buf(tty, b, count);
         }
+        rt_spin_unlock_irqrestore(&tty->spinlock, level);
 
         if (retval >= 1)
         {
             break;
         }
     }
-    rt_hw_interrupt_enable(level);
+
     return retval;
 }
 
@@ -2189,12 +2195,12 @@ static int n_tty_poll(struct dfs_file *fd, struct rt_pollreq *req)
     wq = _wait_queue_current_get(tty);
     rt_poll_add(wq, req);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&tty->spinlock);
     if (input_available_p(tty, 1))
     {
         mask |= POLLIN;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&tty->spinlock, level);
 
     return mask;
 }

+ 2 - 5
components/drivers/tty/pty.c

@@ -73,17 +73,14 @@ static int pty_get_index(struct tty_struct *tty, int *arg)
  */
 static rt_err_t pty_device_init(struct rt_device *dev)
 {
-    rt_ubase_t level = 0;
     rt_err_t result = RT_EOK;
     struct tty_struct *tty = RT_NULL;
 
     RT_ASSERT(dev != RT_NULL);
     tty = (struct tty_struct *)dev;
 
-    level = rt_hw_interrupt_disable();
     RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_REGED);
     tty->init_flag = TTY_INIT_FLAG_INITED;
-    rt_hw_interrupt_enable(level);
 
     return result;
 }
@@ -150,12 +147,12 @@ static rt_ssize_t pty_device_write(struct rt_device *dev,
     RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_INITED);
     to = tty->other_struct;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&tty->spinlock);
     if (to->ldisc->ops->receive_buf)
     {
         len = to->ldisc->ops->receive_buf(to, (char *)buffer, size);
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&tty->spinlock, level);
 
     return len;
 }

+ 8 - 9
components/drivers/tty/tty.c

@@ -142,29 +142,27 @@ int __tty_check_change(struct tty_struct *tty, int sig)
 {
     pid_t pgrp = 0, tty_pgrp = 0;
     int ret = 0;
-    int level = 0;
+    struct rt_lwp *lwp;
 
-    level = rt_hw_interrupt_disable();
-    if (current == RT_NULL)
+    lwp = lwp_self();
+
+    if (lwp == RT_NULL)
     {
-        rt_hw_interrupt_enable(level);
         return 0;
     }
 
-    if (current->tty != tty)
+    if (lwp->tty != tty)
     {
-        rt_hw_interrupt_enable(level);
         return 0;
     }
 
-    pgrp = current->__pgrp;
+    pgrp = lwp->__pgrp;
     tty_pgrp = tty->pgrp;
 
     if (tty_pgrp && (pgrp != tty->pgrp))
     {
-        lwp_signal_kill(current, sig, SI_USER, 0);
+        lwp_signal_kill(lwp, sig, SI_USER, 0);
     }
-    rt_hw_interrupt_enable(level);
 
     if (!tty_pgrp)
     {
@@ -494,6 +492,7 @@ int tty_init(struct tty_struct *tty, int type, int subtype, struct rt_device *io
 
             rt_mutex_init(&tty->lock, "ttyLock", RT_IPC_FLAG_PRIO);
             rt_wqueue_init(&tty->wait_queue);
+            rt_spin_lock_init(&tty->spinlock);
 
             tty_ldisc_init(tty);
             tty->init_termios = tty_std_termios;

+ 5 - 5
components/drivers/tty/tty_ioctl.c

@@ -39,7 +39,7 @@ static int set_termios(struct tty_struct *tty, void *arg, int opt)
     struct termios old_termios;
     struct tty_ldisc *ld = RT_NULL;
     struct termios *new_termios = (struct termios *)arg;
-    int level = 0;
+    rt_base_t level = 0;
     int retval = tty_check_change(tty);
 
     if (retval)
@@ -47,10 +47,10 @@ static int set_termios(struct tty_struct *tty, void *arg, int opt)
         return retval;
     }
 
-    rt_memcpy(&old_termios, &(tty->init_termios), sizeof(struct termios));
-    level = rt_hw_interrupt_disable();
+    memcpy(&old_termios, &(tty->init_termios), sizeof(struct termios));
+    level = rt_spin_lock_irqsave(&tty->spinlock);
     tty->init_termios = *new_termios;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&tty->spinlock, level);
     ld = tty->ldisc;
     if (ld != NULL)
     {
@@ -88,7 +88,7 @@ int n_tty_ioctl_extend(struct tty_struct *tty, int cmd, void *args)
             return -RT_EINVAL;
         }
 
-        rt_memcpy(tio, &real_tty->init_termios, sizeof(real_tty->init_termios));
+        memcpy(tio, &real_tty->init_termios, sizeof(real_tty->init_termios));
         return ret;
     }
     case TCSETSF:

+ 11 - 12
components/drivers/tty/tty_ldisc.c

@@ -14,27 +14,29 @@ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS] = {
     &n_tty_ops, /* N_TTY = 0 */
 };
 
+static struct rt_spinlock _spinlock = RT_SPINLOCK_INIT;
+
 static struct tty_ldisc_ops *get_ldops(int disc)
 {
     struct tty_ldisc_ops *ldops = RT_NULL;
-    int level = 0;
-    level = rt_hw_interrupt_disable();
+    rt_base_t level = 0;
+    level = rt_spin_lock_irqsave(&_spinlock);
     ldops = tty_ldiscs[disc];
     if (ldops)
     {
         ldops->refcount++;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     return ldops;
 }
 
 static void put_ldops(struct tty_ldisc_ops *ldops)
 {
-    int level = 0;
+    rt_base_t level = 0;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     ldops->refcount--;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 }
 
 static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
@@ -120,18 +122,18 @@ void tty_ldisc_kill(struct tty_struct *tty)
 int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
 {
     int ret = 0;
-    int level = 0;
+    rt_base_t level = 0;
 
     if (disc < N_TTY || disc >= NR_LDISCS)
     {
         return -EINVAL;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     tty_ldiscs[disc] = new_ldisc;
     new_ldisc->num = disc;
     new_ldisc->refcount = 0;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return ret;
 }
@@ -146,20 +148,17 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
 
 void tty_ldisc_release(struct tty_struct *tty)
 {
-    int level = 0;
     struct tty_struct *o_tty = tty->other_struct;
 
     /*
      * Shutdown this line discipline. As this is the final close,
      * it does not race with the set_ldisc code path.
      */
-    level = rt_hw_interrupt_disable();
     tty_ldisc_kill(tty);
     if (o_tty)
     {
         tty_ldisc_kill(o_tty);
     }
-    rt_hw_interrupt_enable(level);
 }
 
 /**

+ 56 - 33
components/finsh/cmd.c

@@ -30,6 +30,7 @@
  *                             Provide protection for the "first layer of objects" when list_*
  * 2020-04-07     chenhui      add clear
  * 2022-07-02     Stanley Lwin add list command
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -94,6 +95,7 @@ static rt_list_t *list_get_next(rt_list_t *current, list_get_next_t *arg)
     rt_base_t level;
     rt_list_t *node, *list;
     rt_list_t **array;
+    struct rt_object_information *info;
     int nr;
 
     arg->nr_out = 0;
@@ -104,6 +106,7 @@ static rt_list_t *list_get_next(rt_list_t *current, list_get_next_t *arg)
     }
 
     list = arg->list;
+    info = rt_list_entry(list, struct rt_object_information, object_list);
 
     if (!current) /* find first */
     {
@@ -115,7 +118,7 @@ static rt_list_t *list_get_next(rt_list_t *current, list_get_next_t *arg)
         node = current;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&info->spinlock);
 
     if (!first_flag)
     {
@@ -124,7 +127,7 @@ static rt_list_t *list_get_next(rt_list_t *current, list_get_next_t *arg)
         obj = rt_list_entry(node, struct rt_object, list);
         if ((obj->type & ~RT_Object_Class_Static) != arg->type)
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&info->spinlock, level);
             return (rt_list_t *)RT_NULL;
         }
     }
@@ -148,7 +151,7 @@ static rt_list_t *list_get_next(rt_list_t *current, list_get_next_t *arg)
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&info->spinlock, level);
     arg->nr_out = nr;
     return node;
 }
@@ -157,6 +160,7 @@ long list_thread(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
     const char *item_title = "thread";
@@ -164,6 +168,7 @@ long list_thread(void)
     int maxlen;
 
     list_find_init(&find_arg, RT_Object_Class_Thread, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -194,16 +199,16 @@ long list_thread(void)
                 struct rt_thread thread_info, *thread;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
 
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
                 /* copy info */
                 rt_memcpy(&thread_info, obj, sizeof thread_info);
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 thread = (struct rt_thread *)obj;
                 {
@@ -277,6 +282,7 @@ long list_sem(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -284,6 +290,7 @@ long list_sem(void)
     const char *item_title = "semaphore";
 
     list_find_init(&find_arg, RT_Object_Class_Semaphore, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -302,13 +309,13 @@ long list_sem(void)
                 struct rt_semaphore *sem;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 sem = (struct rt_semaphore *)obj;
                 if (!rt_list_isempty(&sem->parent.suspend_thread))
@@ -343,6 +350,7 @@ long list_event(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -350,6 +358,7 @@ long list_event(void)
     const char *item_title = "event";
 
     list_find_init(&find_arg, RT_Object_Class_Event, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -368,14 +377,14 @@ long list_event(void)
                 struct rt_event *e;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 e = (struct rt_event *)obj;
                 if (!rt_list_isempty(&e->parent.suspend_thread))
@@ -407,6 +416,7 @@ long list_mutex(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -414,6 +424,7 @@ long list_mutex(void)
     const char *item_title = "mutex";
 
     list_find_init(&find_arg, RT_Object_Class_Mutex, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -432,14 +443,14 @@ long list_mutex(void)
                 struct rt_mutex *m;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 m = (struct rt_mutex *)obj;
                 if (!rt_list_isempty(&m->parent.suspend_thread))
@@ -480,6 +491,7 @@ long list_mailbox(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -487,6 +499,7 @@ long list_mailbox(void)
     const char *item_title = "mailbox";
 
     list_find_init(&find_arg, RT_Object_Class_MailBox, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -505,14 +518,14 @@ long list_mailbox(void)
                 struct rt_mailbox *m;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 m = (struct rt_mailbox *)obj;
                 if (!rt_list_isempty(&m->parent.suspend_thread))
@@ -550,6 +563,7 @@ long list_msgqueue(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -557,6 +571,7 @@ long list_msgqueue(void)
     const char *item_title = "msgqueue";
 
     list_find_init(&find_arg, RT_Object_Class_MessageQueue, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -574,14 +589,14 @@ long list_msgqueue(void)
                 struct rt_messagequeue *m;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 m = (struct rt_messagequeue *)obj;
                 if (!rt_list_isempty(&m->parent.suspend_thread))
@@ -616,6 +631,7 @@ long list_memheap(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -623,6 +639,7 @@ long list_memheap(void)
     const char *item_title = "memheap";
 
     list_find_init(&find_arg, RT_Object_Class_MemHeap, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -640,14 +657,14 @@ long list_memheap(void)
                 struct rt_memheap *mh;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 mh = (struct rt_memheap *)obj;
 
@@ -672,6 +689,7 @@ long list_mempool(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -679,6 +697,7 @@ long list_mempool(void)
     const char *item_title = "mempool";
 
     list_find_init(&find_arg, RT_Object_Class_MemPool, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -698,14 +717,14 @@ long list_mempool(void)
                 rt_list_t *node;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 mp = (struct rt_mempool *)obj;
 
@@ -750,6 +769,7 @@ long list_timer(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
 
@@ -757,6 +777,7 @@ long list_timer(void)
     const char *item_title = "timer";
 
     list_find_init(&find_arg, RT_Object_Class_Timer, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -774,14 +795,14 @@ long list_timer(void)
                 struct rt_timer *timer;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 timer = (struct rt_timer *)obj;
                 rt_kprintf("%-*.*s 0x%08x 0x%08x ",
@@ -848,6 +869,7 @@ long list_device(void)
 {
     rt_base_t level;
     list_get_next_t find_arg;
+    struct rt_object_information *info;
     rt_list_t *obj_list[LIST_FIND_OBJ_NR];
     rt_list_t *next = (rt_list_t *)RT_NULL;
     const char *device_type;
@@ -856,6 +878,7 @@ long list_device(void)
     const char *item_title = "device";
 
     list_find_init(&find_arg, RT_Object_Class_Device, obj_list, sizeof(obj_list) / sizeof(obj_list[0]));
+    info = rt_list_entry(find_arg.list, struct rt_object_information, object_list);
 
     maxlen = RT_NAME_MAX;
 
@@ -873,14 +896,14 @@ long list_device(void)
                 struct rt_device *device;
 
                 obj = rt_list_entry(obj_list[i], struct rt_object, list);
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&info->spinlock);
                 if ((obj->type & ~RT_Object_Class_Static) != find_arg.type)
                 {
-                    rt_hw_interrupt_enable(level);
+                    rt_spin_unlock_irqrestore(&info->spinlock, level);
                     continue;
                 }
 
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&info->spinlock, level);
 
                 device = (struct rt_device *)obj;
                 device_type = "Unknown";

+ 20 - 14
components/libc/compilers/common/ctime.c

@@ -23,6 +23,7 @@
  * 2023-07-16     Shell        update signal generation routine for lwp
  *                             adapt to new api and do the signal handling in thread context
  * 2023-08-12     Meco Man     re-implement RT-Thread lightweight timezone API
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include "sys/time.h"
@@ -136,21 +137,12 @@ static volatile int32_t _current_tz_offset_sec = \
 /* return current timezone offset in seconds */
 void rt_tz_set(int32_t offset_sec)
 {
-    rt_base_t level;
-    level = rt_hw_interrupt_disable();
     _current_tz_offset_sec = offset_sec;
-    rt_hw_interrupt_enable(level);
 }
 
 int32_t rt_tz_get(void)
 {
-    int32_t offset_sec;
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
-    offset_sec = _current_tz_offset_sec;
-    rt_hw_interrupt_enable(level);
-    return offset_sec;
+    return _current_tz_offset_sec;
 }
 
 int8_t rt_tz_is_dst(void)
@@ -796,11 +788,15 @@ static void _lwp_timer_event_from_tid(struct rt_work *work, void *param)
 
     RT_ASSERT(data->tid);
 
-    thread = lwp_tid_get_thread(data->tid);
+    /* stop others from delete thread */
+    thread = lwp_tid_get_thread_and_inc_ref(data->tid);
+    /** The tid of thread is a READ ONLY value, but here still facing the risk of thread already been delete error */
     ret = lwp_thread_signal_kill(thread, data->signo, SI_TIMER, 0);
+    lwp_tid_dec_ref(thread);
+
     if (ret)
     {
-        LOG_W("%s: Do kill failed(tid %d) returned %d", __func__, data->tid, ret);
+        LOG_D("%s: Do kill failed(tid %d) returned %d", __func__, data->tid, ret);
     }
 }
 
@@ -808,11 +804,21 @@ static void _lwp_timer_event_from_pid(struct rt_work *work, void *param)
 {
     rt_err_t ret;
     struct lwp_timer_event_param *data = rt_container_of(work, struct lwp_timer_event_param, work);
+    struct rt_lwp *lwp;
+
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(data->pid);
+    if (lwp)
+        lwp_ref_inc(lwp);
+    lwp_pid_lock_release();
+
+    ret = lwp_signal_kill(lwp, data->signo, SI_TIMER, 0);
+    if (lwp)
+        lwp_ref_dec(lwp);
 
-    ret = lwp_signal_kill(lwp_from_pid(data->pid), data->signo, SI_TIMER, 0);
     if (ret)
     {
-        LOG_W("%s: Do kill failed(pid %d) returned %d", __func__, data->pid, ret);
+        LOG_D("%s: Do kill failed(pid %d) returned %d", __func__, data->pid, ret);
     }
 }
 

+ 6 - 4
components/libc/posix/io/poll/poll.c

@@ -32,6 +32,8 @@ struct rt_poll_node
     struct rt_poll_node *next;
 };
 
+static RT_DEFINE_SPINLOCK(_spinlock);
+
 static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
 {
     struct rt_poll_node *pn;
@@ -85,7 +87,7 @@ static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
 
     timeout = rt_tick_from_millisecond(msec);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     if (timeout != 0 && !pt->triggered)
     {
@@ -99,16 +101,16 @@ static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
                 rt_timer_start(&(thread->thread_timer));
             }
 
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
 
             rt_schedule();
 
-            level = rt_hw_interrupt_disable();
+            level = rt_spin_lock_irqsave(&_spinlock);
         }
     }
 
     ret = !pt->triggered;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return ret;
 }

+ 1 - 6
components/libc/posix/pthreads/pthread.c

@@ -17,13 +17,12 @@
 #include <sys/time.h>
 #include "pthread_internal.h"
 
-RT_DEFINE_SPINLOCK(pth_lock);
+RT_DEFINE_HW_SPINLOCK(pth_lock);
 _pthread_data_t *pth_table[PTHREAD_NUM_MAX] = {NULL};
 static int concurrency_level;
 
 _pthread_data_t *_pthread_get_data(pthread_t thread)
 {
-    RT_DECLARE_SPINLOCK(pth_lock);
     _pthread_data_t *ptd;
 
     if (thread >= PTHREAD_NUM_MAX) return NULL;
@@ -40,7 +39,6 @@ _pthread_data_t *_pthread_get_data(pthread_t thread)
 pthread_t _pthread_data_get_pth(_pthread_data_t *ptd)
 {
     int index;
-    RT_DECLARE_SPINLOCK(pth_lock);
 
     rt_hw_spin_lock(&pth_lock);
     for (index = 0; index < PTHREAD_NUM_MAX; index ++)
@@ -56,7 +54,6 @@ pthread_t _pthread_data_create(void)
 {
     int index;
     _pthread_data_t *ptd = NULL;
-    RT_DECLARE_SPINLOCK(pth_lock);
 
     ptd = (_pthread_data_t*)rt_malloc(sizeof(_pthread_data_t));
     if (!ptd) return PTHREAD_NUM_MAX;
@@ -90,8 +87,6 @@ pthread_t _pthread_data_create(void)
 
 void _pthread_data_destroy(_pthread_data_t *ptd)
 {
-    RT_DECLARE_SPINLOCK(pth_lock);
-
     extern _pthread_key_data_t _thread_keys[PTHREAD_KEY_MAX];
     pthread_t pth;
 

+ 25 - 0
components/lwp/arch/aarch64/cortex-a/lwp_arch.c

@@ -96,6 +96,31 @@ int arch_expand_user_stack(void *addr)
 }
 
 #endif
+
+int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
+                            void *user_stack, void **thread_sp)
+{
+    struct rt_hw_exp_stack *syscall_frame;
+    struct rt_hw_exp_stack *thread_frame;
+    struct rt_hw_exp_stack *ori_syscall = rt_thread_self()->user_ctx.ctx;
+    RT_ASSERT(ori_syscall != RT_NULL);
+
+    thread_frame = (void *)((long)new_thread_stack - sizeof(struct rt_hw_exp_stack));
+    syscall_frame = (void *)((long)new_thread_stack - 2 * sizeof(struct rt_hw_exp_stack));
+
+    memcpy(syscall_frame, ori_syscall, sizeof(*syscall_frame));
+    syscall_frame->sp_el0 = (long)user_stack;
+    syscall_frame->x0 = 0;
+
+    thread_frame->cpsr = ((3 << 6) | 0x4 | 0x1);
+    thread_frame->pc = (long)exit;
+    thread_frame->x0 = 0;
+
+    *thread_sp = syscall_frame;
+
+    return 0;
+}
+
 #define ALGIN_BYTES (16)
 
 struct signal_ucontext

+ 13 - 20
components/lwp/arch/aarch64/cortex-a/lwp_gcc.S

@@ -94,21 +94,6 @@ arch_crt_start_umode:
     msr elr_el1, x1
     eret
 
-/*
-void arch_set_thread_context(void *exit_addr, void *new_thread_stack, void *user_stack, void **thread_sp);
-*/
-.global arch_set_thread_context
-arch_set_thread_context:
-    sub x1, x1, #CONTEXT_SIZE
-    str x2, [x1, #CONTEXT_OFFSET_SP_EL0]
-    sub x1, x1, #CONTEXT_SIZE
-    str xzr, [x1, #CONTEXT_OFFSET_X0]       /* new thread return 0 */
-    mov x4, #((3 << 6) | 0x4 | 0x1)         /* el1h, disable interrupt */
-    str x4, [x1, #CONTEXT_OFFSET_SPSR_EL1]
-    str x0, [x1, #CONTEXT_OFFSET_ELR_EL1]
-    str x1, [x3]
-    ret
-
 .global arch_get_user_sp
 arch_get_user_sp:
     mrs x0, sp_el0
@@ -197,6 +182,7 @@ arch_syscall_exit:
 
 /* the sp is reset to the outer most level, irq and fiq are disabled */
 START_POINT(arch_ret_to_user)
+    msr daifset, #3
     /* save exception frame */
     SAVE_FPU sp
     stp x0, x1, [sp, #-0x10]!
@@ -226,6 +212,7 @@ START_POINT(arch_ret_to_user)
     bl lwp_check_exit_request
     cbz w0, 1f
     /* exit on event */
+    msr daifclr, #3
     mov x0, xzr
     b sys_exit
 1:
@@ -247,7 +234,7 @@ START_POINT(arch_ret_to_user)
 
     /**
      * push 2 dummy words to simulate a exception frame of interrupt
-     * @note in kernel state, the context switch dont saved the context
+     * Note: in kernel state, the context switch dont saved the context
      */
     mrs x0, spsr_el1
     mrs x1, elr_el1
@@ -397,7 +384,6 @@ lwp_check_debug_quit:
     ret
 
 arch_signal_quit:
-    msr daifset, #3
 
     /* drop current exception frame */
     add sp, sp, #CONTEXT_SIZE
@@ -406,6 +392,12 @@ arch_signal_quit:
     add x0, x0, #-CONTEXT_SIZE
     msr sp_el0, x0
 
+    /**
+     * Note: Since we will reset spsr, but the reschedule will
+     * corrupt the spsr, we diable irq for a short period here
+     */
+    msr daifset, #3
+
     /* restore previous exception frame */
     msr spsel, #0
 
@@ -471,14 +463,14 @@ arch_thread_signal_enter:
     dsb sy
 
     /**
-     * @brief Prepare the environment for signal handler
+     * Brief: Prepare the environment for signal handler
      */
 
-    /** 
+    /**
      * reset the cpsr
      * and drop exp frame on kernel stack, reset kernel sp
      *
-     * @note Since we will reset spsr, but the reschedule will
+     * Note: Since we will reset spsr, but the reschedule will
      * corrupt the spsr, we diable irq for a short period here
      */
     msr daifset, #3
@@ -501,6 +493,7 @@ arch_thread_signal_enter:
 
     /**
      * handler(signo, psi, ucontext);
+     *
      */
     eret
 

+ 82 - 35
components/lwp/libc_musl.h

@@ -15,14 +15,14 @@
 
 #define FUTEX_WAIT        0
 #define FUTEX_WAKE        1
-#define FUTEX_FD        2
-#define FUTEX_REQUEUE        3
-#define FUTEX_CMP_REQUEUE    4
-#define FUTEX_WAKE_OP        5
-#define FUTEX_LOCK_PI        6
-#define FUTEX_UNLOCK_PI        7
-#define FUTEX_TRYLOCK_PI    8
-#define FUTEX_WAIT_BITSET    9
+#define FUTEX_FD          2
+#define FUTEX_REQUEUE     3
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP     5
+#define FUTEX_LOCK_PI     6
+#define FUTEX_UNLOCK_PI   7
+#define FUTEX_TRYLOCK_PI  8
+#define FUTEX_WAIT_BITSET 9
 
 #define FUTEX_PRIVATE 128
 
@@ -35,30 +35,28 @@
 #define PMUTEX_DESTROY 3
 
 /* for sys/mman.h */
-#define MAP_FAILED ((void *) -1)
+#define MAP_FAILED ((void *)-1)
 
-#define MAP_SHARED     0x01
-#define MAP_PRIVATE    0x02
+#define MAP_SHARED          0x01
+#define MAP_PRIVATE         0x02
 #define MAP_SHARED_VALIDATE 0x03
-#define MAP_TYPE       0x0f
-#define MAP_FIXED      0x10
-#define MAP_ANON       0x20
-#define MAP_ANONYMOUS  MAP_ANON
-#define MAP_NORESERVE  0x4000
-#define MAP_GROWSDOWN  0x0100
-#define MAP_DENYWRITE  0x0800
-#define MAP_EXECUTABLE 0x1000
-#define MAP_LOCKED     0x2000
-#define MAP_POPULATE   0x8000
-#define MAP_NONBLOCK   0x10000
-#define MAP_STACK      0x20000
-#define MAP_HUGETLB    0x40000
-#define MAP_SYNC       0x80000
+#define MAP_TYPE            0x0f
+#define MAP_FIXED           0x10
+#define MAP_ANON            0x20
+#define MAP_ANONYMOUS       MAP_ANON
+#define MAP_NORESERVE       0x4000
+#define MAP_GROWSDOWN       0x0100
+#define MAP_DENYWRITE       0x0800
+#define MAP_EXECUTABLE      0x1000
+#define MAP_LOCKED          0x2000
+#define MAP_POPULATE        0x8000
+#define MAP_NONBLOCK        0x10000
+#define MAP_STACK           0x20000
+#define MAP_HUGETLB         0x40000
+#define MAP_SYNC            0x80000
 #define MAP_FIXED_NOREPLACE 0x100000
-#define MAP_FILE       0
-
-#define MAP_UNINITIALIZED 0x4000000 /** For anonymous mmap, memory could be
-                                     * uninitialized */
+#define MAP_FILE            0
+#define MAP_UNINITIALIZED   0x4000000
 
 #define MAP_HUGE_SHIFT 26
 #define MAP_HUGE_MASK  0x3f
@@ -83,13 +81,13 @@
 #define PROT_GROWSDOWN 0x01000000
 #define PROT_GROWSUP   0x02000000
 
-#define MS_ASYNC       1
-#define MS_INVALIDATE  2
-#define MS_SYNC        4
+#define MS_ASYNC      1
+#define MS_INVALIDATE 2
+#define MS_SYNC       4
 
-#define MCL_CURRENT    1
-#define MCL_FUTURE     2
-#define MCL_ONFAULT    4
+#define MCL_CURRENT 1
+#define MCL_FUTURE  2
+#define MCL_ONFAULT 4
 
 #define POSIX_MADV_NORMAL     0
 #define POSIX_MADV_RANDOM     1
@@ -97,4 +95,53 @@
 #define POSIX_MADV_WILLNEED   3
 #define POSIX_MADV_DONTNEED   4
 
+#define CLONE_VM             0x00000100
+#define CLONE_FS             0x00000200
+#define CLONE_FILES          0x00000400
+#define CLONE_SIGHAND        0x00000800
+#define CLONE_PTRACE         0x00002000
+#define CLONE_VFORK          0x00004000
+#define CLONE_PARENT         0x00008000
+#define CLONE_THREAD         0x00010000
+#define CLONE_NEWNS          0x00020000
+#define CLONE_SYSVSEM        0x00040000
+#define CLONE_SETTLS         0x00080000
+#define CLONE_PARENT_SETTID  0x00100000
+#define CLONE_CHILD_CLEARTID 0x00200000
+#define CLONE_DETACHED       0x00400000
+#define CLONE_UNTRACED       0x00800000
+#define CLONE_CHILD_SETTID   0x01000000
+#define CLONE_NEWCGROUP      0x02000000
+#define CLONE_NEWUTS         0x04000000
+#define CLONE_NEWIPC         0x08000000
+#define CLONE_NEWUSER        0x10000000
+#define CLONE_NEWPID         0x20000000
+#define CLONE_NEWNET         0x40000000
+#define CLONE_IO             0x80000000
+
+/* arg[] -> flags
+ *          stack
+ *          new_tid
+ *          tls
+ *          set_clear_tid_address
+ *          quit_func
+ *          start_args
+ *          */
+#define SYS_CLONE_ARGS_NR 7
+
+/* wait.h */
+
+/* options */
+#define WNOHANG   1
+#define WUNTRACED 2
+
+#define WSTOPPED   2
+#define WEXITED    4
+#define WCONTINUED 8
+#define WNOWAIT    0x1000000
+
+#define __WNOTHREAD 0x20000000
+#define __WALL      0x40000000
+#define __WCLONE    0x80000000
+
 #endif /* __LIBC_MUSL_H__ */

+ 61 - 17
components/lwp/lwp.c

@@ -14,7 +14,7 @@
  * 2023-10-16     Shell        Support a new backtrace framework
  */
 
-#define DBG_TAG "LWP"
+#define DBG_TAG "lwp"
 #define DBG_LVL DBG_WARNING
 #include <rtdbg.h>
 
@@ -34,7 +34,7 @@
 #error "lwp need file system(RT_USING_DFS)"
 #endif
 
-#include "lwp.h"
+#include "lwp_internal.h"
 #include "lwp_arch.h"
 #include "lwp_arch_comm.h"
 #include "lwp_signal.h"
@@ -68,6 +68,21 @@ struct termios *get_old_termios(void)
     return &old_stdin_termios;
 }
 
+int lwp_component_init(void)
+{
+    int rc;
+    if ((rc = lwp_tid_init()) != RT_EOK)
+    {
+        LOG_E("%s: lwp_component_init() failed", __func__);
+    }
+    else if ((rc = lwp_pid_init()) != RT_EOK)
+    {
+        LOG_E("%s: lwp_pid_init() failed", __func__);
+    }
+    return rc;
+}
+INIT_COMPONENT_EXPORT(lwp_component_init);
+
 void lwp_setcwd(char *buf)
 {
     struct rt_lwp *lwp = RT_NULL;
@@ -1030,10 +1045,9 @@ out:
     return ret;
 }
 
-/* lwp thread clean up */
+/* lwp-thread clean up routine */
 void lwp_cleanup(struct rt_thread *tid)
 {
-    rt_base_t level;
     struct rt_lwp *lwp;
 
     if (tid == NULL)
@@ -1044,16 +1058,16 @@ void lwp_cleanup(struct rt_thread *tid)
     else
         LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
 
-    level = rt_hw_interrupt_disable();
+    /**
+     * Brief: lwp thread cleanup
+     *
+     * Note: Critical Section
+     * - thread control block (RW. It's ensured that no one else can access tcb
+     *   other than itself)
+     */
     lwp = (struct rt_lwp *)tid->lwp;
-
-    /* lwp thread cleanup */
-    lwp_tid_put(tid->tid);
-    rt_list_remove(&tid->sibling);
     lwp_thread_signal_detach(&tid->signal);
 
-    rt_hw_interrupt_enable(level);
-
     /* tty will be release in lwp_ref_dec() if ref is cleared */
     lwp_ref_dec(lwp);
     return;
@@ -1133,10 +1147,44 @@ struct rt_lwp *lwp_self(void)
     return RT_NULL;
 }
 
+rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
+{
+    /* lwp add to children link */
+    LWP_LOCK(parent);
+    child->sibling = parent->first_child;
+    parent->first_child = child;
+    child->parent = parent;
+    LWP_UNLOCK(parent);
+
+    LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
+
+    return 0;
+}
+
+rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
+{
+    struct rt_lwp **lwp_node;
+
+    LWP_LOCK(parent);
+    /* detach from children link */
+    lwp_node = &parent->first_child;
+    while (*lwp_node != child)
+    {
+        RT_ASSERT(*lwp_node != RT_NULL);
+        lwp_node = &(*lwp_node)->sibling;
+    }
+    (*lwp_node) = child->sibling;
+    child->parent = RT_NULL;
+    LWP_UNLOCK(parent);
+
+    LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
+
+    return 0;
+}
+
 pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
 {
     int result;
-    rt_base_t level;
     struct rt_lwp *lwp;
     char *thread_name;
     char *argv_last = argv[argc - 1];
@@ -1231,7 +1279,6 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
             lwp_tid_set_thread(tid, thread);
             LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
                     (rt_size_t)thread->stack_addr + thread->stack_size);
-            level = rt_hw_interrupt_disable();
             self_lwp = lwp_self();
             if (self_lwp)
             {
@@ -1239,9 +1286,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
                 lwp->__pgrp = tid;
                 lwp->session = self_lwp->session;
                 /* lwp add to children link */
-                lwp->sibling = self_lwp->first_child;
-                self_lwp->first_child = lwp;
-                lwp->parent = self_lwp;
+                lwp_children_register(self_lwp, lwp);
             }
             else
             {
@@ -1331,7 +1376,6 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
                 lwp->debug = debug;
                 rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
             }
-            rt_hw_interrupt_enable(level);
 
             rt_thread_startup(thread);
             return lwp_to_pid(lwp);

+ 26 - 2
components/lwp/lwp.h

@@ -37,6 +37,7 @@
 
 #ifdef ARCH_MM_MMU
 #include "lwp_shm.h"
+#include <locale.h>
 #include "mmu.h"
 #include "page.h"
 #else
@@ -75,6 +76,12 @@ struct rt_lwp_notify
     rt_slist_t list_node;
 };
 
+#ifdef RT_USING_MUSLLIBC
+#define LWP_CREATE_STAT(exit_code) (((exit_code) & 0xff) << 8)
+#else
+#error "No compatible lwp set status provided for this libc"
+#endif
+
 struct rt_lwp
 {
 #ifdef ARCH_MM_MMU
@@ -98,7 +105,6 @@ struct rt_lwp
     struct rt_lwp *sibling;
 
     rt_list_t wait_list;
-    rt_bool_t finish;
     rt_bool_t terminated;
     rt_bool_t background;
     int lwp_ret;
@@ -150,6 +156,8 @@ struct rt_lwp
 typedef struct rt_lwp *rt_lwp_t;
 
 struct rt_lwp *lwp_self(void);
+rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child);
+rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child);
 
 enum lwp_exit_request_type
 {
@@ -165,9 +173,25 @@ int  lwp_check_exit_request(void);
 void lwp_terminate(struct rt_lwp *lwp);
 void lwp_wait_subthread_exit(void);
 
+int lwp_tid_init(void);
 int lwp_tid_get(void);
 void lwp_tid_put(int tid);
-rt_thread_t lwp_tid_get_thread(int tid);
+
+/**
+ * @brief Automatically get a thread and increase a reference count
+ *
+ * @param tid queried thread ID
+ * @return rt_thread_t
+ */
+rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid);
+
+/**
+ * @brief Decrease a reference count
+ *
+ * @param thread target thread
+ */
+void lwp_tid_dec_ref(rt_thread_t thread);
+
 void lwp_tid_set_thread(int tid, rt_thread_t thread);
 
 int lwp_execve(char *filename, int debug, int argc, char **argv, char **envp);

+ 0 - 47
components/lwp/lwp_clone.h

@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2006-2023, RT-Thread Development Team
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Change Logs:
- * Date           Author       Notes
- * 2023-07-06     RT-Thread    the first version
- */
-#ifndef __LWP_CLONE_H__
-#define __LWP_CLONE_H__
-
-#define CLONE_VM    0x00000100
-#define CLONE_FS    0x00000200
-#define CLONE_FILES 0x00000400
-#define CLONE_SIGHAND   0x00000800
-#define CLONE_PTRACE    0x00002000
-#define CLONE_VFORK 0x00004000
-#define CLONE_PARENT    0x00008000
-#define CLONE_THREAD    0x00010000
-#define CLONE_NEWNS 0x00020000
-#define CLONE_SYSVSEM   0x00040000
-#define CLONE_SETTLS    0x00080000
-#define CLONE_PARENT_SETTID 0x00100000
-#define CLONE_CHILD_CLEARTID    0x00200000
-#define CLONE_DETACHED  0x00400000
-#define CLONE_UNTRACED  0x00800000
-#define CLONE_CHILD_SETTID  0x01000000
-#define CLONE_NEWCGROUP 0x02000000
-#define CLONE_NEWUTS    0x04000000
-#define CLONE_NEWIPC    0x08000000
-#define CLONE_NEWUSER   0x10000000
-#define CLONE_NEWPID    0x20000000
-#define CLONE_NEWNET    0x40000000
-#define CLONE_IO    0x80000000
-
-/* arg[] -> flags
- *          stack
- *          new_tid
- *          tls
- *          set_clear_tid_address
- *          quit_func
- *          start_args
- *          */
-#define SYS_CLONE_ARGS_NR 7
-
-#endif /* __LWP_CLONE_H__ */

+ 4 - 4
components/lwp/lwp_internal.h

@@ -82,7 +82,7 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
 #endif /* LWP_OVERRIDE_CPUS_LOCK */
 
 /**
- * @brief Return code with safety check
+ * Brief: Return code with safety check
  * There tend to be chances where a return value is returned without correctly init
  */
 #ifndef LWP_DEBUG
@@ -90,9 +90,9 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
 #define RETURN(name)            return name
 
 #else
-#define UNINITIALIZED 0xbeefcafe
-#define DEF_RETURN_CODE(name)   rt_err_t name = UNINITIALIZED
-#define RETURN(name)            {RT_ASSERT(name != UNINITIALIZED);return name;}
+#define _LWP_UNINITIALIZED_RC   0xbeefcafe
+#define DEF_RETURN_CODE(name)   rt_err_t name = _LWP_UNINITIALIZED_RC
+#define RETURN(name)            {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
 #endif /* LWP_DEBUG */
 
 #endif /* __LWP_INTERNAL_H__ */

+ 338 - 294
components/lwp/lwp_ipc.c

@@ -6,12 +6,18 @@
  * Change Logs:
  * Date           Author       Notes
  * 2019-10-12     Jesven       first version
+ * 2023-07-25     Shell        Remove usage of rt_hw_interrupt API in the lwp
  * 2023-09-16     zmq810150896 Increased versatility of some features on dfs v2
  */
+
+#define DBG_TAG "lwp.ipc"
+#define DBG_LVL DBG_WARNING
+#include <rtdbg.h>
+
 #include <rtthread.h>
 #include <rthw.h>
-#include <lwp.h>
 
+#include "lwp_internal.h"
 #include "lwp_ipc.h"
 #include "lwp_ipc_internal.h"
 
@@ -49,6 +55,10 @@ static rt_ipc_msg_t _ipc_msg_free_list = (rt_ipc_msg_t)RT_NULL; /* released chai
 static int rt_ipc_msg_used = 0;                                 /* first unallocated entry */
 static struct rt_ipc_msg ipc_msg_pool[RT_CH_MSG_MAX_NR];        /* initial message array */
 
+static rt_spinlock_t ipc_big_lock;
+#define ipc_list_lock   ipc_big_lock
+#define ipc_ch_lock     ipc_big_lock
+
 /**
  * Allocate an IPC message from the statically-allocated array.
  */
@@ -119,19 +129,16 @@ rt_inline rt_err_t rt_channel_list_resume(rt_list_t *list)
 /**
  * Wakeup all the suspended threads in the list.
  */
-rt_inline rt_err_t rt_channel_list_resume_all(rt_list_t *list)
+rt_inline rt_err_t _channel_list_resume_all_locked(rt_list_t *list)
 {
     struct rt_thread *thread;
-    register rt_ubase_t temp;
 
     /* wakeup all suspended threads for sending */
     while (!rt_list_isempty(list))
     {
-        temp = rt_hw_interrupt_disable();
         thread = rt_list_entry(list->next, struct rt_thread, tlist);
         thread->error = -RT_ERROR;
         rt_thread_resume(thread);
-        rt_hw_interrupt_enable(temp);
     }
 
     return RT_EOK;
@@ -154,19 +161,14 @@ rt_inline rt_err_t rt_channel_list_suspend(rt_list_t *list, struct rt_thread *th
 }
 
 
-static void _rt_channel_check_wq_wakup(rt_channel_t ch)
+static void _rt_channel_check_wq_wakup_locked(rt_channel_t ch)
 {
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
     if (rt_list_isempty(&ch->wait_msg))
     {
-        rt_hw_interrupt_enable(level);
         return;
     }
 
     rt_wqueue_wakeup(&ch->reader_queue, 0);
-    rt_hw_interrupt_enable(level);
 }
 
 /**
@@ -174,28 +176,39 @@ static void _rt_channel_check_wq_wakup(rt_channel_t ch)
  */
 rt_channel_t rt_raw_channel_open(const char *name, int flags)
 {
-    register rt_ubase_t temp = 0;
+    rt_err_t err = RT_EOK;
     rt_channel_t ch = RT_NULL;
 
     struct rt_object *object;
     struct rt_list_node *node;
     struct rt_object_information *information;
 
-    temp = rt_hw_interrupt_disable();
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    /**
+     * Brief: Match an existing channel from object list with the same name
+     *        If no such channel found, it will create a new channel if O_CREAT
+     *        is set in the flag
+     *
+     * Note: Critical Section
+     * - Channel Object list (RW; this may write to a channel if needed, and
+     *   the RCU operation of the routine should be atomic)
+     */
+    rt_spin_lock(&ipc_list_lock);
     information = rt_object_get_information(RT_Object_Class_Channel);
     RT_ASSERT(information != RT_NULL);
 
-    /* retrieve the existing IPC channels */
-    for (node  = information->object_list.next;
-            node != &(information->object_list);
-            node  = node->next)
+    for (node = information->object_list.next;
+         node != &(information->object_list);
+         node = node->next)
     {
         object = rt_list_entry(node, struct rt_object, list);
         if (rt_strncmp(object->name, name, RT_NAME_MAX) == 0)
         {
             if ((flags & O_CREAT) && (flags & O_EXCL))
             {
-                goto quit;
+                err = -RT_EFULL;
+                break;
             }
             /* find the IPC channel with the specific name */
             ch = (rt_channel_t)object;
@@ -203,31 +216,30 @@ rt_channel_t rt_raw_channel_open(const char *name, int flags)
             break;
         }
     }
-    if (!ch) /* create a new IPC channel */
+
+    if (!ch && err == RT_EOK)
     {
+        /* create a new IPC channel */
         if (flags & O_CREAT)
         {
-            RT_DEBUG_NOT_IN_INTERRUPT;
-
             /* allocate a real IPC channel structure */
             ch = (rt_channel_t)rt_object_allocate(RT_Object_Class_Channel, name);
         }
 
-        if (!ch)
+        if (ch)
         {
-            goto quit;
+            rt_channel_object_init(&ch->parent);    /* suspended receivers */
+            rt_list_init(&ch->wait_msg);            /* unhandled messages */
+            rt_list_init(&ch->wait_thread);         /* suspended senders */
+            rt_wqueue_init(&ch->reader_queue);      /* reader poll queue */
+            ch->reply = RT_NULL;
+            ch->stat = RT_IPC_STAT_IDLE;            /* no suspended threads */
+            ch->ref = 1;
         }
-
-        rt_channel_object_init(&ch->parent);    /* suspended receivers */
-        rt_list_init(&ch->wait_msg);            /* unhandled messages */
-        rt_list_init(&ch->wait_thread);         /* suspended senders */
-        rt_wqueue_init(&ch->reader_queue);      /* reader poll queue */
-        ch->reply = RT_NULL;
-        ch->stat = RT_IPC_STAT_IDLE;            /* no suspended threads */
-        ch->ref = 1;
     }
-quit:
-    rt_hw_interrupt_enable(temp);
+
+    rt_spin_unlock(&ipc_list_lock);
+
     return ch;
 }
 
@@ -236,46 +248,56 @@ quit:
  */
 rt_err_t rt_raw_channel_close(rt_channel_t ch)
 {
-    register rt_ubase_t temp;
+    rt_err_t rc = RT_EOK;
 
     RT_DEBUG_NOT_IN_INTERRUPT;
 
     if (ch == RT_NULL)
     {
-        return -RT_EIO;
-    }
-
-    temp = rt_hw_interrupt_disable();
-    if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
+        rc = -RT_EIO;
     }
-    if (rt_object_is_systemobject(&ch->parent.parent) != RT_FALSE)
+    else
     {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
-    }
+        /**
+         * Brief: Remove the channel from object list
+         *
+         * Note: Critical Section
+         * - the channel
+         */
+        rt_spin_lock(&ipc_ch_lock);
 
-    if (ch->ref == 0)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
-    }
-    ch->ref--;
-    if (ch->ref == 0)
-    {
-        /* wakeup all the suspended receivers and senders */
-        rt_channel_list_resume_all(&ch->parent.suspend_thread);
-        rt_channel_list_resume_all(&ch->wait_thread);
+        if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
+        {
+            rc = -RT_EIO;
+        }
+        else if (rt_object_is_systemobject(&ch->parent.parent) != RT_FALSE)
+        {
+            rc = -RT_EIO;
+        }
+        else if (ch->ref == 0)
+        {
+            rc = -RT_EIO;
+        }
+        else
+        {
+            ch->ref--;
+            if (ch->ref == 0)
+            {
+                /* wakeup all the suspended receivers and senders */
+                _channel_list_resume_all_locked(&ch->parent.suspend_thread);
+                _channel_list_resume_all_locked(&ch->wait_thread);
 
-        /* all ipc msg will lost */
-        rt_list_init(&ch->wait_msg);
+                /* all ipc msg will lost */
+                rt_list_init(&ch->wait_msg);
 
-        rt_object_delete(&ch->parent.parent);   /* release the IPC channel structure */
+                rt_object_delete(&ch->parent.parent);   /* release the IPC channel structure */
+            }
+            rc = RT_EOK;
+        }
+        rt_spin_unlock(&ipc_ch_lock);
     }
-    rt_hw_interrupt_enable(temp);
-    return RT_EOK;
+
+    return rc;
 }
 
 static rt_err_t wakeup_sender_wait_recv(void *object, struct rt_thread *thread)
@@ -430,16 +452,15 @@ static int _ipc_msg_fd_new(void *file)
     return fd;
 }
 
+static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time, rt_ipc_msg_t msg);
+
 /**
  * Send data through an IPC channel, wait for the reply or not.
  */
-static rt_err_t _rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time)
+static rt_err_t _send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time)
 {
     rt_ipc_msg_t msg;
-    struct rt_thread *thread_recv, *thread_send = 0;
-    register rt_base_t temp;
-    rt_err_t ret;
-    void (*old_timeout_func)(void *) = 0;
+    rt_err_t rc = -RT_ERROR;
 
     if (need_reply)
     {
@@ -448,29 +469,38 @@ static rt_err_t _rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_ms
 
     if (ch == RT_NULL)
     {
-        return -RT_EIO;
-    }
-
-    temp = rt_hw_interrupt_disable();
-
-    if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
+        rc = -RT_EIO;
     }
-    if (need_reply && time == 0)
+    else
     {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ETIMEOUT;
+        if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
+        {
+            rc = -RT_EIO;
+        }
+        else if (need_reply && time == 0)
+        {
+            rc = -RT_ETIMEOUT;
+        }
+        else
+        {
+            /* allocate an IPC message */
+            msg = _ipc_msg_alloc();
+            if (!msg)
+                rc = -RT_ENOMEM;
+            else
+                rc = _do_send_recv_timeout(ch, data, need_reply, data_ret, time, msg);
+        }
     }
 
-    /* allocate an IPC message */
-    msg = _ipc_msg_alloc();
-    if (!msg)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ENOMEM;
-    }
+    return rc;
+}
+
+static rt_err_t _do_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, int need_reply, rt_channel_msg_t data_ret, rt_int32_t time, rt_ipc_msg_t msg)
+{
+    DEF_RETURN_CODE(rc);
+    rt_thread_t thread_recv;
+    rt_thread_t thread_send = 0;
+    void (*old_timeout_func)(void *) = 0;
 
     /* IPC message : file descriptor */
     if (data->type == RT_CHANNEL_FD)
@@ -486,43 +516,48 @@ static rt_err_t _rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_ms
         thread_send->error = RT_EOK;
     }
 
+    rt_spin_lock(&ipc_ch_lock);
+
     switch (ch->stat)
     {
         case RT_IPC_STAT_IDLE:
         case RT_IPC_STAT_ACTIVE:
             if (need_reply)
             {
-                ret = rt_channel_list_suspend(&ch->wait_thread, thread_send);
-                if (ret != RT_EOK)
+                rc = rt_channel_list_suspend(&ch->wait_thread, thread_send);
+                if (rc != RT_EOK)
                 {
                     _ipc_msg_free(msg);
-                    rt_hw_interrupt_enable(temp);
-                    return ret;
                 }
-                rt_thread_wakeup_set(thread_send, wakeup_sender_wait_recv, (void*)ch);
-                if (time > 0)
+                else
                 {
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_GET_FUNC,
-                            &old_timeout_func);
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_SET_FUNC,
-                            sender_timeout);
-                    /* reset the timeout of thread timer and start it */
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_SET_TIME,
-                            &time);
-                    rt_timer_start(&(thread_send->thread_timer));
+                    rt_thread_wakeup_set(thread_send, wakeup_sender_wait_recv, (void*)ch);
+                    if (time > 0)
+                    {
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_GET_FUNC,
+                                &old_timeout_func);
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_SET_FUNC,
+                                sender_timeout);
+                        /* reset the timeout of thread timer and start it */
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_SET_TIME,
+                                &time);
+                        rt_timer_start(&(thread_send->thread_timer));
+                    }
                 }
             }
-            /*
+
+            /**
              * If there is no thread waiting for messages, chain the message
              * into the list.
              */
-            rt_list_insert_before(&ch->wait_msg, &msg->mlist);
+            if (rc == RT_EOK)
+                rt_list_insert_before(&ch->wait_msg, &msg->mlist);
             break;
         case RT_IPC_STAT_WAIT:
-            /*
+            /**
              * If there are suspended receivers on the IPC channel, transfer the
              * pointer of the message to the first receiver directly and wake it
              * up.
@@ -531,80 +566,85 @@ static rt_err_t _rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_ms
 
             if (need_reply)
             {
-                ret = rt_channel_list_suspend(&ch->wait_thread, thread_send);
-                if (ret != RT_EOK)
+                rc = rt_channel_list_suspend(&ch->wait_thread, thread_send);
+                if (rc != RT_EOK)
                 {
                     _ipc_msg_free(msg);
-                    rt_hw_interrupt_enable(temp);
-                    return ret;
                 }
-                ch->reply = thread_send;    /* record the current waiting sender */
-                ch->stat = RT_IPC_STAT_ACTIVE;
-                rt_thread_wakeup_set(thread_send, wakeup_sender_wait_reply, (void*)ch);
-                if (time > 0)
+                else
                 {
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_GET_FUNC,
-                            &old_timeout_func);
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_SET_FUNC,
-                            sender_timeout);
-                    /* reset the timeout of thread timer and start it */
-                    rt_timer_control(&(thread_send->thread_timer),
-                            RT_TIMER_CTRL_SET_TIME,
-                            &time);
-                    rt_timer_start(&(thread_send->thread_timer));
+                    ch->reply = thread_send;    /* record the current waiting sender */
+                    ch->stat = RT_IPC_STAT_ACTIVE;
+                    rt_thread_wakeup_set(thread_send, wakeup_sender_wait_reply, (void*)ch);
+                    if (time > 0)
+                    {
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_GET_FUNC,
+                                &old_timeout_func);
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_SET_FUNC,
+                                sender_timeout);
+                        /* reset the timeout of thread timer and start it */
+                        rt_timer_control(&(thread_send->thread_timer),
+                                RT_TIMER_CTRL_SET_TIME,
+                                &time);
+                        rt_timer_start(&(thread_send->thread_timer));
+                    }
                 }
             }
             else
             {
                 ch->stat = RT_IPC_STAT_IDLE;
             }
-            thread_recv = rt_list_entry(ch->parent.suspend_thread.next, struct rt_thread, tlist);
-            thread_recv->msg_ret = msg;     /* to the first suspended receiver */
-            thread_recv->error = RT_EOK;
-            rt_channel_list_resume(&ch->parent.suspend_thread);
+
+            if (!need_reply || rc == RT_EOK)
+            {
+                thread_recv = rt_list_entry(ch->parent.suspend_thread.next, struct rt_thread, tlist);
+                thread_recv->msg_ret = msg;     /* to the first suspended receiver */
+                thread_recv->error = RT_EOK;
+                rt_channel_list_resume(&ch->parent.suspend_thread);
+            }
             break;
         default:
             break;
     }
 
-    if ( ch->stat == RT_IPC_STAT_IDLE)
+    if (rc == RT_EOK)
     {
-        _rt_channel_check_wq_wakup(ch);
-    }
-    rt_hw_interrupt_enable(temp);
-
-    /* reschedule in order to let the potential receivers run */
-    rt_schedule();
-
-    if (need_reply)
-    {
-        temp = rt_hw_interrupt_disable();
-        if (old_timeout_func)
+        if (ch->stat == RT_IPC_STAT_IDLE)
         {
-            rt_timer_control(&(thread_send->thread_timer),
-                    RT_TIMER_CTRL_SET_FUNC,
-                    old_timeout_func);
+            _rt_channel_check_wq_wakup_locked(ch);
         }
-        ret = thread_send->error;
-        rt_hw_interrupt_enable(temp);
+        rt_spin_unlock(&ipc_ch_lock);
 
-        if (ret != RT_EOK)
+        /* reschedule in order to let the potential receivers run */
+        rt_schedule();
+
+        rt_spin_lock(&ipc_ch_lock);
+        if (need_reply)
         {
-            return ret;
-        }
+            if (old_timeout_func)
+            {
+                rt_timer_control(&(thread_send->thread_timer),
+                        RT_TIMER_CTRL_SET_FUNC,
+                        old_timeout_func);
+            }
+            rc = thread_send->error;
+
+            if (rc == RT_EOK)
+            {
+                /* If the sender gets the chance to run, the requested reply must be valid. */
+                RT_ASSERT(data_ret != RT_NULL);
+                *data_ret = ((rt_ipc_msg_t)(thread_send->msg_ret))->msg;   /* extract data */
+                _ipc_msg_free(thread_send->msg_ret);    /* put back the message to kernel */
 
-        /* If the sender gets the chance to run, the requested reply must be valid. */
-        RT_ASSERT(data_ret != RT_NULL);
-        *data_ret = ((rt_ipc_msg_t)(thread_send->msg_ret))->msg;   /* extract data */
-        temp = rt_hw_interrupt_disable();
-        _ipc_msg_free(thread_send->msg_ret);    /* put back the message to kernel */
-        rt_hw_interrupt_enable(temp);
-        thread_send->msg_ret = RT_NULL;
+                thread_send->msg_ret = RT_NULL;
+            }
+        }
     }
+    rt_spin_unlock(&ipc_ch_lock);
 
-    return RT_EOK;
+    return rc;
 }
 
 /**
@@ -612,7 +652,7 @@ static rt_err_t _rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_ms
  */
 rt_err_t rt_raw_channel_send(rt_channel_t ch, rt_channel_msg_t data)
 {
-    return _rt_raw_channel_send_recv_timeout(ch, data, 0, 0, RT_WAITING_FOREVER);
+    return _send_recv_timeout(ch, data, 0, 0, RT_WAITING_FOREVER);
 }
 
 /**
@@ -620,7 +660,7 @@ rt_err_t rt_raw_channel_send(rt_channel_t ch, rt_channel_msg_t data)
  */
 rt_err_t rt_raw_channel_send_recv(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret)
 {
-    return _rt_raw_channel_send_recv_timeout(ch, data, 1, data_ret, RT_WAITING_FOREVER);
+    return _send_recv_timeout(ch, data, 1, data_ret, RT_WAITING_FOREVER);
 }
 
 /**
@@ -628,7 +668,7 @@ rt_err_t rt_raw_channel_send_recv(rt_channel_t ch, rt_channel_msg_t data, rt_cha
  */
 rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_channel_msg_t data_ret, rt_int32_t time)
 {
-    return _rt_raw_channel_send_recv_timeout(ch, data, 1, data_ret, time);
+    return _send_recv_timeout(ch, data, 1, data_ret, time);
 }
 
 /**
@@ -636,56 +676,58 @@ rt_err_t rt_raw_channel_send_recv_timeout(rt_channel_t ch, rt_channel_msg_t data
  */
 rt_err_t rt_raw_channel_reply(rt_channel_t ch, rt_channel_msg_t data)
 {
+    DEF_RETURN_CODE(rc);
     rt_ipc_msg_t msg;
     struct rt_thread *thread;
-    register rt_base_t temp;
 
     if (ch == RT_NULL)
     {
-        return -RT_EIO;
-    }
-
-    temp = rt_hw_interrupt_disable();
-
-    if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
-    }
-
-    if (ch->stat != RT_IPC_STAT_ACTIVE)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ERROR;
+        rc = -RT_EIO;
     }
-
-    if (ch->reply == RT_NULL)
+    else
     {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ERROR;
-    }
+        rt_spin_lock(&ipc_ch_lock);
 
-    /* allocate an IPC message */
-    msg = _ipc_msg_alloc();
-    if (!msg)
-    {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ENOMEM;
-    }
+        if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
+        {
+            rc = -RT_EIO;
+        }
+        else if (ch->stat != RT_IPC_STAT_ACTIVE)
+        {
+            rc = -RT_ERROR;
+        }
+        else if (ch->reply == RT_NULL)
+        {
+            rc = -RT_ERROR;
+        }
+        else
+        {
+            /* allocate an IPC message */
+            msg = _ipc_msg_alloc();
+            if (!msg)
+            {
+                rc = -RT_ENOMEM;
+            }
+            else
+            {
+                rt_ipc_msg_init(msg, data, 0);
 
-    rt_ipc_msg_init(msg, data, 0);
+                thread = ch->reply;
+                thread->msg_ret = msg;          /* transfer the reply to the sender */
+                rt_thread_resume(thread);       /* wake up the sender */
+                ch->stat = RT_IPC_STAT_IDLE;
+                ch->reply = RT_NULL;
 
-    thread = ch->reply;
-    thread->msg_ret = msg;          /* transfer the reply to the sender */
-    rt_thread_resume(thread);       /* wake up the sender */
-    ch->stat = RT_IPC_STAT_IDLE;
-    ch->reply = RT_NULL;
+                _rt_channel_check_wq_wakup_locked(ch);
+                rc = RT_EOK;
+            }
+        }
+        rt_spin_unlock(&ipc_ch_lock);
 
-    _rt_channel_check_wq_wakup(ch);
-    rt_hw_interrupt_enable(temp);
-    rt_schedule();
+        rt_schedule();
+    }
 
-    return RT_EOK;
+    RETURN(rc);
 }
 
 static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
@@ -697,7 +739,11 @@ static rt_err_t wakeup_receiver(void *object, struct rt_thread *thread)
     ch->stat = RT_IPC_STAT_IDLE;
     thread->error = -RT_EINTR;
     ret = rt_channel_list_resume(&ch->parent.suspend_thread);
-    _rt_channel_check_wq_wakup(ch);
+
+    rt_spin_lock(&ipc_ch_lock);
+    _rt_channel_check_wq_wakup_locked(ch);
+    rt_spin_unlock(&ipc_ch_lock);
+
     return ret;
 }
 
@@ -712,11 +758,14 @@ static void receiver_timeout(void *parameter)
     thread->error = -RT_ETIMEOUT;
     thread->wakeup.func = RT_NULL;
 
+    rt_spin_lock(&ipc_ch_lock);
     rt_list_remove(&(thread->tlist));
     /* insert to schedule ready list */
     rt_schedule_insert_thread(thread);
 
-    _rt_channel_check_wq_wakup(ch);
+    _rt_channel_check_wq_wakup_locked(ch);
+    rt_spin_unlock(&ipc_ch_lock);
+
     /* do schedule */
     rt_schedule();
 }
@@ -726,10 +775,9 @@ static void receiver_timeout(void *parameter)
  */
 static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t data, rt_int32_t time)
 {
+    DEF_RETURN_CODE(rc);
     struct rt_thread *thread;
     rt_ipc_msg_t msg_ret;
-    register rt_base_t temp;
-    rt_err_t ret;
     void (*old_timeout_func)(void *) = 0;
 
     RT_DEBUG_NOT_IN_INTERRUPT;
@@ -739,102 +787,97 @@ static rt_err_t _rt_raw_channel_recv_timeout(rt_channel_t ch, rt_channel_msg_t d
         return -RT_EIO;
     }
 
-    temp = rt_hw_interrupt_disable();
+    rt_spin_lock(&ipc_ch_lock);
 
     if (rt_object_get_type(&ch->parent.parent) != RT_Object_Class_Channel)
     {
-        rt_hw_interrupt_enable(temp);
-        return -RT_EIO;
+        rc = -RT_EIO;
     }
-    if (ch->stat != RT_IPC_STAT_IDLE)
+    else if (ch->stat != RT_IPC_STAT_IDLE)
     {
-        rt_hw_interrupt_enable(temp);
-        return -RT_ERROR;
+        rc = -RT_ERROR;
     }
-
-    if (ch->wait_msg.next != &ch->wait_msg) /* there exist unhandled messages */
+    else
     {
-        msg_ret = rt_list_entry(ch->wait_msg.next, struct rt_ipc_msg, mlist);
-        rt_list_remove(ch->wait_msg.next);  /* remove the message from the channel */
-        if (msg_ret->need_reply)
+        if (ch->wait_msg.next != &ch->wait_msg) /* there exist unhandled messages */
         {
-            RT_ASSERT(ch->wait_thread.next != &ch->wait_thread);
+            msg_ret = rt_list_entry(ch->wait_msg.next, struct rt_ipc_msg, mlist);
+            rt_list_remove(ch->wait_msg.next);  /* remove the message from the channel */
+            if (msg_ret->need_reply)
+            {
+                RT_ASSERT(ch->wait_thread.next != &ch->wait_thread);
 
-            thread = rt_list_entry(ch->wait_thread.next, struct rt_thread, tlist);
-            rt_list_remove(ch->wait_thread.next);
-            ch->reply = thread;             /* record the waiting sender */
-            ch->stat = RT_IPC_STAT_ACTIVE;  /* no valid suspened receivers */
+                thread = rt_list_entry(ch->wait_thread.next, struct rt_thread, tlist);
+                rt_list_remove(ch->wait_thread.next);
+                ch->reply = thread;             /* record the waiting sender */
+                ch->stat = RT_IPC_STAT_ACTIVE;  /* no valid suspened receivers */
+            }
+            *data = msg_ret->msg;      /* extract the transferred data */
+            if (data->type == RT_CHANNEL_FD)
+            {
+                data->u.fd.fd = _ipc_msg_fd_new(data->u.fd.file);
+            }
+            _ipc_msg_free(msg_ret);     /* put back the message to kernel */
+            rc = RT_EOK;
         }
-        *data = msg_ret->msg;      /* extract the transferred data */
-        if (data->type == RT_CHANNEL_FD)
+        else if (time == 0)
         {
-            data->u.fd.fd = _ipc_msg_fd_new(data->u.fd.file);
+            rc = -RT_ETIMEOUT;
         }
-        _ipc_msg_free(msg_ret);     /* put back the message to kernel */
-    }
-    else
-    {
-        if (time == 0)
+        else
         {
-            rt_hw_interrupt_enable(temp);
-            return -RT_ETIMEOUT;
-        }
-        /* no valid message, we must wait */
-        thread = rt_thread_self();
+            /* no valid message, we must wait */
+            thread = rt_thread_self();
 
-        ret = rt_channel_list_suspend(&ch->parent.suspend_thread, thread);
-        if (ret != RT_EOK)
-        {
-            rt_hw_interrupt_enable(temp);
-            return ret;
-        }
-        rt_thread_wakeup_set(thread, wakeup_receiver, (void*)ch);
-        ch->stat = RT_IPC_STAT_WAIT;/* no valid suspended senders */
-        thread->error = RT_EOK;
-        if (time > 0)
-        {
-            rt_timer_control(&(thread->thread_timer),
-                    RT_TIMER_CTRL_GET_FUNC,
-                    &old_timeout_func);
-            rt_timer_control(&(thread->thread_timer),
-                    RT_TIMER_CTRL_SET_FUNC,
-                    receiver_timeout);
-            /* reset the timeout of thread timer and start it */
-            rt_timer_control(&(thread->thread_timer),
-                    RT_TIMER_CTRL_SET_TIME,
-                    &time);
-            rt_timer_start(&(thread->thread_timer));
-        }
-        rt_hw_interrupt_enable(temp);
+            rc = rt_channel_list_suspend(&ch->parent.suspend_thread, thread);
+            if (rc == RT_EOK)
+            {
+                rt_thread_wakeup_set(thread, wakeup_receiver, (void*)ch);
+                ch->stat = RT_IPC_STAT_WAIT;/* no valid suspended senders */
+                thread->error = RT_EOK;
+                if (time > 0)
+                {
+                    rt_timer_control(&(thread->thread_timer),
+                            RT_TIMER_CTRL_GET_FUNC,
+                            &old_timeout_func);
+                    rt_timer_control(&(thread->thread_timer),
+                            RT_TIMER_CTRL_SET_FUNC,
+                            receiver_timeout);
+                    /* reset the timeout of thread timer and start it */
+                    rt_timer_control(&(thread->thread_timer),
+                            RT_TIMER_CTRL_SET_TIME,
+                            &time);
+                    rt_timer_start(&(thread->thread_timer));
+                }
+                rt_spin_unlock(&ipc_ch_lock);
 
-        rt_schedule();              /* let the senders run */
+                rt_schedule();              /* let the senders run */
 
-        temp = rt_hw_interrupt_disable();
-        if (old_timeout_func)
-        {
-            rt_timer_control(&(thread->thread_timer),
-                    RT_TIMER_CTRL_SET_FUNC,
-                    old_timeout_func);
-        }
-        ret = thread->error;
-        if ( ret != RT_EOK)
-        {
-            rt_hw_interrupt_enable(temp);
-            return ret;
-        }
-        /* If waked up, the received message has been store into the thread. */
-        *data = ((rt_ipc_msg_t)(thread->msg_ret))->msg;    /* extract data */
-        if (data->type == RT_CHANNEL_FD)
-        {
-            data->u.fd.fd = _ipc_msg_fd_new(data->u.fd.file);
+                rt_spin_lock(&ipc_ch_lock);
+                if (old_timeout_func)
+                {
+                    rt_timer_control(&(thread->thread_timer),
+                            RT_TIMER_CTRL_SET_FUNC,
+                            old_timeout_func);
+                }
+                rc = thread->error;
+                if (rc == RT_EOK)
+                {
+                    /* If waked up, the received message has been store into the thread. */
+                    *data = ((rt_ipc_msg_t)(thread->msg_ret))->msg;    /* extract data */
+                    if (data->type == RT_CHANNEL_FD)
+                    {
+                        data->u.fd.fd = _ipc_msg_fd_new(data->u.fd.file);
+                    }
+                    _ipc_msg_free(thread->msg_ret);     /* put back the message to kernel */
+                    thread->msg_ret = RT_NULL;
+                }
+            }
         }
-        _ipc_msg_free(thread->msg_ret);     /* put back the message to kernel */
-        thread->msg_ret = RT_NULL;
     }
 
-    rt_hw_interrupt_enable(temp);
-
-    return RT_EOK;
+    rt_spin_unlock(&ipc_ch_lock);
+    RETURN(rc);
 }
 
 rt_err_t rt_raw_channel_recv(rt_channel_t ch, rt_channel_msg_t data)
@@ -950,9 +993,10 @@ static int channel_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
 static int channel_fops_close(struct dfs_file *file)
 {
     rt_channel_t ch;
-    rt_base_t level;
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    rt_spin_lock(&ipc_ch_lock);
 
-    level = rt_hw_interrupt_disable();
     ch = (rt_channel_t)file->vnode->data;
     if (file->vnode->ref_count == 1)
     {
@@ -960,8 +1004,8 @@ static int channel_fops_close(struct dfs_file *file)
         if (ch->ref == 0)
         {
             /* wakeup all the suspended receivers and senders */
-            rt_channel_list_resume_all(&ch->parent.suspend_thread);
-            rt_channel_list_resume_all(&ch->wait_thread);
+            _channel_list_resume_all_locked(&ch->parent.suspend_thread);
+            _channel_list_resume_all_locked(&ch->wait_thread);
 
             /* all ipc msg will lost */
             rt_list_init(&ch->wait_msg);
@@ -969,7 +1013,8 @@ static int channel_fops_close(struct dfs_file *file)
             rt_object_delete(&ch->parent.parent);   /* release the IPC channel structure */
         }
     }
-    rt_hw_interrupt_enable(level);
+
+    rt_spin_unlock(&ipc_ch_lock);
     return 0;
 }
 
@@ -1160,23 +1205,23 @@ rt_err_t rt_channel_peek(int fd, rt_channel_msg_t data)
     return lwp_channel_recv_timeout(FDT_TYPE_KERNEL, fd, data, 0);
 }
 
-#ifdef RT_USING_FINSH
 static int list_channel(void)
 {
-    rt_base_t level;
     rt_channel_t *channels;
     rt_ubase_t index, count;
     struct rt_object *object;
     struct rt_list_node *node;
     struct rt_object_information *information;
 
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
     const char* stat_strs[] = {"idle", "wait", "active"};
 
     information = rt_object_get_information(RT_Object_Class_Channel);
     RT_ASSERT(information != RT_NULL);
 
     count = 0;
-    level = rt_hw_interrupt_disable();
+    rt_spin_lock(&ipc_list_lock);
     /* get the count of IPC channels */
     for (node  = information->object_list.next;
             node != &(information->object_list);
@@ -1184,7 +1229,7 @@ static int list_channel(void)
     {
         count ++;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock(&ipc_list_lock);
 
     if (count == 0) return 0;
 
@@ -1192,18 +1237,18 @@ static int list_channel(void)
     if (channels == RT_NULL) return 0; /* out of memory */
 
     index = 0;
-    level = rt_hw_interrupt_disable();
+    rt_spin_lock(&ipc_list_lock);
     /* retrieve pointer of IPC channels */
-    for (node  = information->object_list.next;
-            node != &(information->object_list);
-            node  = node->next)
+    for (node = information->object_list.next;
+         count > 0 && node != &(information->object_list);
+         count--, node = node->next)
     {
         object = rt_list_entry(node, struct rt_object, list);
 
         channels[index] = (rt_channel_t)object;
         index ++;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock(&ipc_list_lock);
 
     rt_kprintf(" channel state\n");
     rt_kprintf("-------- -------\n");
@@ -1222,5 +1267,4 @@ static int list_channel(void)
     return 0;
 }
 MSH_CMD_EXPORT(list_channel, list IPC channel information);
-#endif
 

+ 234 - 146
components/lwp/lwp_pid.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2023, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -9,30 +9,31 @@
  * 2021-02-20     lizhirui     fix warning
  * 2023-06-26     shell        clear ref to parent on waitpid()
  *                             Remove recycling of lwp on waitpid() and leave it to defunct routine
+ * 2023-07-27     shell        Move the detach of children process on parent exit to lwp_terminate.
+ *                             Make lwp_from_pid locked by caller to avoid possible use-after-free
+ *                             error
  */
 
 #include <rthw.h>
 #include <rtthread.h>
 
+#define DBG_TAG "lwp.pid"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
 #include <dfs_file.h>
 #include <unistd.h>
 #include <stdio.h> /* rename() */
 #include <sys/stat.h>
 #include <sys/statfs.h> /* statfs() */
 
-#include "lwp.h"
-#include "lwp_pid.h"
-#include "lwp_signal.h"
+#include "lwp_internal.h"
 #include "tty.h"
 
 #ifdef ARCH_MM_MMU
 #include "lwp_user_mm.h"
 #endif
 
-#define DBG_TAG    "LWP_PID"
-#define DBG_LVL    DBG_INFO
-#include <rtdbg.h>
-
 #define PID_MAX 10000
 
 #define PID_CT_ASSERT(name, x) \
@@ -46,19 +47,40 @@ static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
 static int lwp_pid_ary_alloced = 0;
 static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
 static pid_t current_pid = 0;
+static struct rt_mutex pid_mtx;
+
+int lwp_pid_init(void)
+{
+    rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
+    return 0;
+}
+
+void lwp_pid_lock_take(void)
+{
+    DEF_RETURN_CODE(rc);
+
+    rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
+    /* should never failed */
+    RT_ASSERT(rc == RT_EOK);
+}
+
+void lwp_pid_lock_release(void)
+{
+    /* should never failed */
+    if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
+        RT_ASSERT(0);
+}
 
 struct lwp_avl_struct *lwp_get_pid_ary(void)
 {
     return lwp_pid_ary;
 }
 
-static pid_t lwp_pid_get(void)
+static pid_t lwp_pid_get_locked(void)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
     pid_t pid = 0;
 
-    level = rt_hw_interrupt_disable();
     p = lwp_pid_free_head;
     if (p)
     {
@@ -97,13 +119,11 @@ static pid_t lwp_pid_get(void)
         lwp_avl_insert(p, &lwp_pid_root);
         current_pid = pid;
     }
-    rt_hw_interrupt_enable(level);
     return pid;
 }
 
-static void lwp_pid_put(pid_t pid)
+static void lwp_pid_put_locked(pid_t pid)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
 
     if (pid == 0)
@@ -111,7 +131,6 @@ static void lwp_pid_put(pid_t pid)
         return;
     }
 
-    level = rt_hw_interrupt_disable();
     p  = lwp_avl_find(pid, lwp_pid_root);
     if (p)
     {
@@ -120,21 +139,27 @@ static void lwp_pid_put(pid_t pid)
         p->avl_right = lwp_pid_free_head;
         lwp_pid_free_head = p;
     }
-    rt_hw_interrupt_enable(level);
 }
 
-static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
+void lwp_pid_put(struct rt_lwp *lwp)
+{
+    lwp_pid_lock_take();
+    lwp_pid_put_locked(lwp->pid);
+    lwp_pid_lock_release();
+
+    /* reset pid field */
+    lwp->pid = 0;
+}
+
+static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
 
-    level = rt_hw_interrupt_disable();
     p  = lwp_avl_find(pid, lwp_pid_root);
     if (p)
     {
         p->data = lwp;
     }
-    rt_hw_interrupt_enable(level);
 }
 
 static void __exit_files(struct rt_lwp *lwp)
@@ -203,11 +228,7 @@ int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
             node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
             if (node)
             {
-                rt_base_t level;
-
-                level = rt_hw_interrupt_disable();
-                object->lwp_ref_count++;
-                rt_hw_interrupt_enable(level);
+                rt_atomic_add(&object->lwp_ref_count, 1);
                 node->avl_key = (avl_key_t)object;
                 lwp_avl_insert(node, &lwp->object_root);
                 ret = 0;
@@ -317,7 +338,6 @@ void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
 rt_lwp_t lwp_create(rt_base_t flags)
 {
     pid_t pid;
-    rt_base_t level;
     rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
 
     if (new_lwp)
@@ -336,8 +356,8 @@ rt_lwp_t lwp_create(rt_base_t flags)
         /* lwp with pid */
         if (flags & LWP_CREATE_FLAG_ALLOC_PID)
         {
-            level = rt_hw_interrupt_disable();
-            pid = lwp_pid_get();
+            lwp_pid_lock_take();
+            pid = lwp_pid_get_locked();
             if (pid == 0)
             {
                 lwp_user_object_lock_destroy(new_lwp);
@@ -348,29 +368,33 @@ rt_lwp_t lwp_create(rt_base_t flags)
             else
             {
                 new_lwp->pid = pid;
-                lwp_pid_set_lwp(pid, new_lwp);
+                lwp_pid_set_lwp_locked(pid, new_lwp);
             }
-
-            rt_hw_interrupt_enable(level);
+            lwp_pid_lock_release();
         }
     }
+
+    LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
     return new_lwp;
 }
 
+/** when reference is 0, a lwp can be released */
 void lwp_free(struct rt_lwp* lwp)
 {
-    rt_base_t level;
-
     if (lwp == RT_NULL)
     {
         return;
     }
 
+    /**
+     * Brief: Recycle the lwp when reference is cleared
+     *
+     * Note: Critical Section
+     * - lwp (RW. there is no other writer/reader compete with lwp_free, since
+     *   all the reference is clear)
+     */
     LOG_D("lwp free: %p\n", lwp);
 
-    level = rt_hw_interrupt_disable();
-    lwp->finish = 1;
-    rt_hw_interrupt_enable(level);
 
     if (lwp->args != RT_NULL)
     {
@@ -429,8 +453,6 @@ void lwp_free(struct rt_lwp* lwp)
     lwp_unmap_user_space(lwp);
 #endif
     timer_list_free(&lwp->timer);
-
-    level = rt_hw_interrupt_disable();
     /* for children */
     while (lwp->first_child)
     {
@@ -438,21 +460,20 @@ void lwp_free(struct rt_lwp* lwp)
 
         child = lwp->first_child;
         lwp->first_child = child->sibling;
-        if (child->finish)
+        if (child->terminated)
         {
-            lwp_pid_put(lwp_to_pid(child));
-            rt_hw_interrupt_enable(level);
+            lwp_pid_put(child);
             rt_free(child);
-            level = rt_hw_interrupt_disable();
         }
         else
         {
+            /** Note: safe since the slist node is release */
             child->sibling = RT_NULL;
+            /* Note: this may cause an orphan lwp */
             child->parent = RT_NULL;
         }
     }
 
-    rt_hw_interrupt_enable(level);
     if (!lwp->background)
     {
         struct termios *old_stdin_termios = get_old_termios();
@@ -462,7 +483,6 @@ void lwp_free(struct rt_lwp* lwp)
         {
             tcsetattr(1, 0, old_stdin_termios);
         }
-        level = rt_hw_interrupt_disable();
         if (lwp->tty != RT_NULL)
         {
             rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
@@ -480,10 +500,6 @@ void lwp_free(struct rt_lwp* lwp)
             lwp->tty = RT_NULL;
         }
     }
-    else
-    {
-        level = rt_hw_interrupt_disable();
-    }
 
     /* for parent */
     if (lwp->parent)
@@ -495,7 +511,6 @@ void lwp_free(struct rt_lwp* lwp)
             thread->error = RT_EOK;
             thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
             rt_thread_resume(thread);
-            rt_hw_interrupt_enable(level);
             return;
         }
         else
@@ -510,35 +525,28 @@ void lwp_free(struct rt_lwp* lwp)
         }
     }
 
-    lwp_pid_put(lwp_to_pid(lwp));
-    rt_hw_interrupt_enable(level);
+    lwp_pid_put(lwp);
     rt_free(lwp);
 }
 
+/** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
 int lwp_ref_inc(struct rt_lwp *lwp)
 {
-    rt_base_t level;
+    int ref;
+    ref = rt_atomic_add(&lwp->ref, 1);
+    LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
 
-    level = rt_hw_interrupt_disable();
-    lwp->ref++;
-    rt_hw_interrupt_enable(level);
-
-    return 0;
+    return ref;
 }
 
 int lwp_ref_dec(struct rt_lwp *lwp)
 {
-    rt_base_t level;
-    int ref = -1;
+    int ref;
 
-    level = rt_hw_interrupt_disable();
-    if (lwp->ref)
-    {
-        lwp->ref--;
-        ref = lwp->ref;
-    }
-    rt_hw_interrupt_enable(level);
-    if (!ref)
+    ref = rt_atomic_add(&lwp->ref, -1);
+    LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
+
+    if (ref == 1)
     {
         struct rt_channel_msg msg;
 
@@ -547,7 +555,6 @@ int lwp_ref_dec(struct rt_lwp *lwp)
             memset(&msg, 0, sizeof msg);
             rt_raw_channel_send(gdb_server_channel(), &msg);
         }
-        lwp_signal_detach(&lwp->signal);
 
 #ifndef ARCH_MM_MMU
 #ifdef RT_LWP_USING_SHM
@@ -555,26 +562,27 @@ int lwp_ref_dec(struct rt_lwp *lwp)
 #endif /* RT_LWP_USING_SHM */
 #endif /* not defined ARCH_MM_MMU */
         lwp_free(lwp);
-
-        return 0;
+    }
+    else
+    {
+        /* reference must be a positive integer */
+        RT_ASSERT(ref > 1);
     }
 
-    return -1;
+    return ref;
 }
 
-struct rt_lwp* lwp_from_pid(pid_t pid)
+struct rt_lwp* lwp_from_pid_locked(pid_t pid)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
     struct rt_lwp *lwp = RT_NULL;
 
-    level = rt_hw_interrupt_disable();
     p  = lwp_avl_find(pid, lwp_pid_root);
     if (p)
     {
         lwp = (struct rt_lwp *)p->data;
     }
-    rt_hw_interrupt_enable(level);
+
     return lwp;
 }
 
@@ -592,12 +600,15 @@ char* lwp_pid2name(int32_t pid)
     struct rt_lwp *lwp;
     char* process_name = RT_NULL;
 
-    lwp = lwp_from_pid(pid);
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(pid);
     if (lwp)
     {
         process_name = strrchr(lwp->cmd, '/');
         process_name = process_name? process_name + 1: lwp->cmd;
     }
+    lwp_pid_lock_release();
+
     return process_name;
 }
 
@@ -607,9 +618,8 @@ pid_t lwp_name2pid(const char *name)
     pid_t pid = 0;
     rt_thread_t main_thread;
     char* process_name = RT_NULL;
-    rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    lwp_pid_lock_take();
     for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
     {
         /* 0 is reserved */
@@ -629,7 +639,7 @@ pid_t lwp_name2pid(const char *name)
             }
         }
     }
-    rt_hw_interrupt_enable(level);
+    lwp_pid_lock_release();
     return pid;
 }
 
@@ -638,83 +648,141 @@ int lwp_getpid(void)
     return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
 }
 
-pid_t waitpid(pid_t pid, int *status, int options)
+/**
+ * @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
+ *        status code for user
+ */
+static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
+                                      struct rt_lwp *self_lwp, int *status,
+                                      int options)
 {
-    pid_t ret = -1;
-    rt_base_t level;
-    struct rt_thread *thread;
-    struct rt_lwp *lwp;
-    struct rt_lwp *this_lwp;
-
-    this_lwp = lwp_self();
-    if (!this_lwp)
-    {
-        goto quit;
-    }
+    sysret_t error;
+    int lwp_stat;
+    int terminated;
 
-    level = rt_hw_interrupt_disable();
-    if (pid == -1)
+    if (!child)
     {
-        lwp = this_lwp->first_child;
-        if (!lwp)
-            goto quit;
-        else
-            pid = lwp->pid;
+        error = -RT_ERROR;
     }
     else
     {
-        lwp = lwp_from_pid(pid);
-        if (!lwp)
+        /**
+         * Note: Critical Section
+         * - child lwp (RW. This will modify its parent if valid)
+         */
+        LWP_LOCK(child);
+        if (child->terminated)
         {
-            goto quit;
+            error = child->pid;
         }
-    }
+        else if (rt_list_isempty(&child->wait_list))
+        {
+            /**
+             * Note: only one thread can wait on wait_list.
+             * dont reschedule before mutex unlock
+             */
+            rt_enter_critical();
+
+            error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
+            if (error == 0)
+            {
+                rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
+                LWP_UNLOCK(child);
 
-    if (lwp->parent != this_lwp)
-    {
-        goto quit;
+                rt_exit_critical();
+                rt_schedule();
+
+                if (child->terminated)
+                    error = child->pid;
+                else
+                    error = -RT_EINTR;
+            }
+            else
+                rt_exit_critical();
+        }
+        else
+            error = -RT_EINTR;
+
+        lwp_stat = child->lwp_ret;
+        terminated = child->terminated;
+        if (!terminated)
+            LWP_UNLOCK(child);
+
+        if (error > 0)
+        {
+            if (terminated)
+            {
+                /** Reap the child process if it's exited */
+                lwp_children_unregister(self_lwp, child);
+                child->parent = RT_NULL;
+                lwp_pid_put(child);
+            }
+            if (status)
+                lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
+        }
     }
 
-    if (lwp->finish)
+    return error;
+}
+
+pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
+
+pid_t lwp_waitpid(const pid_t pid, int *status, int options)
+{
+    pid_t rc = -1;
+    struct rt_thread *thread;
+    struct rt_lwp *child;
+    struct rt_lwp *self_lwp;
+
+    thread = rt_thread_self();
+    self_lwp = lwp_self();
+
+    if (!self_lwp)
     {
-        ret = pid;
+        rc = -RT_EINVAL;
     }
     else
     {
-        if (!rt_list_isempty(&lwp->wait_list))
+        if (pid > 0)
         {
-            goto quit;
+            lwp_pid_lock_take();
+            child = lwp_from_pid_locked(pid);
+            if (child->parent != self_lwp)
+                rc = -RT_ERROR;
+            else
+                rc = RT_EOK;
+            lwp_pid_lock_release();
+
+            if (rc == RT_EOK)
+                rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
+        }
+        else if (pid == -1)
+        {
+            LWP_LOCK(self_lwp);
+            child = self_lwp->first_child;
+            LWP_UNLOCK(self_lwp);
+            RT_ASSERT(!child || child->parent == self_lwp);
+
+            rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
         }
-        thread = rt_thread_self();
-        rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-        rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
-        rt_schedule();
-        if (thread->error == RT_EOK)
+        else
         {
-            ret = pid;
+            /* not supported yet */
+            rc = -RT_EINVAL;
         }
     }
 
-    if (ret != -1)
+    if (rc > 0)
     {
-        /* delete from sibling list of its parent */
-        struct rt_lwp **lwp_node;
-
-        lwp_data_put(this_lwp, status, &lwp->lwp_ret, sizeof(*status));
-        lwp_node = &this_lwp->first_child;
-        while (*lwp_node != lwp)
-        {
-            RT_ASSERT(*lwp_node != RT_NULL);
-            lwp_node = &(*lwp_node)->sibling;
-        }
-        (*lwp_node) = lwp->sibling;
-        lwp->parent = RT_NULL;
-        lwp_pid_put(pid);
+        LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
+    }
+    else
+    {
+        RT_ASSERT(rc != 0);
+        LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
     }
 
-quit:
-    rt_hw_interrupt_enable(level);
-    return ret;
+    return rc;
 }
 
 #ifdef RT_USING_FINSH
@@ -811,8 +879,10 @@ long list_process(void)
                     struct rt_thread th;
 
                     thread = threads[index];
+
+                    /** FIXME: take the rt_thread_t lock */
                     level = rt_hw_interrupt_disable();
-                    if ((thread->parent.type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
+                    if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
                     {
                         rt_hw_interrupt_enable(level);
                         continue;
@@ -870,7 +940,9 @@ static void cmd_kill(int argc, char** argv)
             sig = atoi(argv[3]);
         }
     }
-    lwp_signal_kill(lwp_from_pid(pid), sig, SI_USER, 0);
+    lwp_pid_lock_take();
+    lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
+    lwp_pid_lock_release();
 }
 MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
 
@@ -885,7 +957,9 @@ static void cmd_killall(int argc, char** argv)
 
     while((pid = lwp_name2pid(argv[1])) > 0)
     {
-        lwp_signal_kill(lwp_from_pid(pid), SIGKILL, SI_USER, 0);
+        lwp_pid_lock_take();
+        lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
+        lwp_pid_lock_release();
         rt_thread_mdelay(100);
     }
 }
@@ -915,6 +989,7 @@ static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
     rt_base_t level;
     rt_list_t *list;
 
+    /** FIXME: take the rt_thread_t lock */
     level = rt_hw_interrupt_disable();
     list = lwp->t_grp.next;
     while (list != &lwp->t_grp)
@@ -947,6 +1022,7 @@ void lwp_request_thread_exit(rt_thread_t thread_to_exit)
         return;
     }
 
+    /* FIXME: take the rt_thread_t lock */
     level = rt_hw_interrupt_disable();
 
     main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
@@ -993,7 +1069,6 @@ finish:
 
 void lwp_terminate(struct rt_lwp *lwp)
 {
-    rt_base_t level;
     rt_list_t *list;
 
     if (!lwp)
@@ -1004,11 +1079,11 @@ void lwp_terminate(struct rt_lwp *lwp)
 
     LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
 
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
 
-    /* stop the receiving of signals */
     if (!lwp->terminated)
     {
+        /* stop the receiving of signals */
         lwp->terminated = RT_TRUE;
 
         /* broadcast exit request for sibling threads */
@@ -1029,12 +1104,11 @@ void lwp_terminate(struct rt_lwp *lwp)
             }
         }
     }
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
 }
 
 void lwp_wait_subthread_exit(void)
 {
-    rt_base_t level;
     struct rt_lwp *lwp;
     rt_thread_t thread;
     rt_thread_t main_thread;
@@ -1057,7 +1131,13 @@ void lwp_wait_subthread_exit(void)
         int subthread_is_terminated;
         LOG_D("%s: wait for subthread exiting", __func__);
 
-        level = rt_hw_interrupt_disable();
+        /**
+         * Brief: wait for all *running* sibling threads to exit
+         *
+         * Note: Critical Section
+         * - sibling list of lwp (RW. It will clear all siblings finally)
+         */
+        LWP_LOCK(lwp);
         subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
         if (!subthread_is_terminated)
         {
@@ -1083,12 +1163,21 @@ void lwp_wait_subthread_exit(void)
                 {
                     sub_thread = rt_list_entry(list, struct rt_thread, sibling);
                     rt_list_remove(&sub_thread->sibling);
+
+                    /**
+                     * Note: Critical Section
+                     * - thread control block (RW. Since it will free the thread
+                     *   control block, it must ensure no one else can access
+                     *   thread any more)
+                     */
+                    lwp_tid_put(sub_thread->tid);
+                    sub_thread->tid = 0;
                     rt_thread_delete(sub_thread);
                 }
                 subthread_is_terminated = 1;
             }
         }
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
 
         if (subthread_is_terminated)
         {
@@ -1103,7 +1192,8 @@ static int _lwp_setaffinity(pid_t pid, int cpu)
     struct rt_lwp *lwp;
     int ret = -1;
 
-    lwp = lwp_from_pid(pid);
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(pid);
     if (lwp)
     {
 #ifdef RT_USING_SMP
@@ -1120,12 +1210,12 @@ static int _lwp_setaffinity(pid_t pid, int cpu)
 #endif
         ret = 0;
     }
+    lwp_pid_lock_release();
     return ret;
 }
 
 int lwp_setaffinity(pid_t pid, int cpu)
 {
-    rt_base_t level;
     int ret;
 
 #ifdef RT_USING_SMP
@@ -1134,9 +1224,7 @@ int lwp_setaffinity(pid_t pid, int cpu)
         cpu = RT_CPUS_NR;
     }
 #endif
-    level = rt_hw_interrupt_disable();
     ret = _lwp_setaffinity(pid, cpu);
-    rt_hw_interrupt_enable(level);
     return ret;
 }
 

+ 36 - 3
components/lwp/lwp_pid.h

@@ -23,8 +23,22 @@ extern "C" {
 struct rt_lwp;
 
 struct lwp_avl_struct *lwp_get_pid_ary(void);
-
-/* create a lwp object */
+int lwp_pid_init(void);
+void lwp_pid_put(struct rt_lwp *lwp);
+void lwp_pid_lock_take(void);
+void lwp_pid_lock_release(void);
+
+/**
+ * @brief Create a new lwp object
+ *        This will initialize the member in the object and register to system.
+ *        Besides, a new pid is allocate with lwp
+ *
+ * @param flags control the property of the lwp object. Can be ORed with:
+ *        LWP_CREATE_FLAG_NONE: raw lwp object
+ *        LWP_CREATE_FLAG_ALLOC_PID: lwp object with specified pid
+ *
+ * @return struct rt_lwp* object
+ */
 struct rt_lwp* lwp_create(rt_base_t flags);
 
 void lwp_free(struct rt_lwp* lwp);
@@ -32,7 +46,7 @@ void lwp_free(struct rt_lwp* lwp);
 int lwp_ref_inc(struct rt_lwp *lwp);
 int lwp_ref_dec(struct rt_lwp *lwp);
 
-struct rt_lwp* lwp_from_pid(pid_t pid);
+struct rt_lwp* lwp_from_pid_locked(pid_t pid);
 pid_t lwp_to_pid(struct rt_lwp* lwp);
 
 pid_t lwp_name2pid(const char* name);
@@ -40,6 +54,7 @@ char* lwp_pid2name(int32_t pid);
 
 int lwp_getpid(void);
 
+pid_t lwp_waitpid(const pid_t pid, int *status, int options);
 pid_t waitpid(pid_t pid, int *status, int options);
 long list_process(void);
 
@@ -52,6 +67,24 @@ rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object);
 void lwp_user_object_clear(struct rt_lwp *lwp);
 void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp);
 
+rt_inline struct rt_lwp *lwp_from_pid_and_lock(pid_t pid)
+{
+    struct rt_lwp *lwp;
+    lwp_pid_lock_take();
+    lwp = lwp_from_pid_locked(pid);
+    if (lwp)
+        lwp_ref_inc(lwp);
+    lwp_pid_lock_release();
+
+    return lwp;
+}
+
+rt_inline void lwp_from_pid_release_lock(struct rt_lwp *lwp)
+{
+    if (lwp)
+        lwp_ref_dec(lwp);
+}
+
 #ifdef __cplusplus
 }
 #endif

+ 11 - 11
components/lwp/lwp_pmutex.c

@@ -65,15 +65,14 @@ INIT_PREV_EXPORT(pmutex_system_init);
 static rt_err_t pmutex_destory(void *data)
 {
     rt_err_t ret = -1;
-    rt_base_t level = 0;
     struct rt_pmutex *pmutex = (struct rt_pmutex *)data;
 
     if (pmutex)
     {
-        level = rt_hw_interrupt_disable();
+        lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 0);
         /* remove pmutex from pmutext avl */
         lwp_avl_remove(&pmutex->node, (struct lwp_avl_struct **)pmutex->node.data);
-        rt_hw_interrupt_enable(level);
+        lwp_mutex_release_safe(&_pmutex_lock);
 
         if (pmutex->type == PMUTEX_NORMAL)
         {
@@ -214,8 +213,7 @@ static int _pthread_mutex_init(void *umutex)
     }
     else
     {
-        rt_base_t level = rt_hw_interrupt_disable();
-
+        lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 1);
         if (pmutex->type == PMUTEX_NORMAL)
         {
             pmutex->lock.ksem->value = 1;
@@ -227,7 +225,7 @@ static int _pthread_mutex_init(void *umutex)
             pmutex->lock.kmutex->hold     = 0;
             pmutex->lock.kmutex->ceiling_priority = 0xFF;
         }
-        rt_hw_interrupt_enable(level);
+        lwp_mutex_release_safe(&_pmutex_lock);
     }
 
     rt_mutex_release(&_pmutex_lock);
@@ -242,7 +240,6 @@ static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
     struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
     rt_err_t lock_ret = 0;
     rt_int32_t time = RT_WAITING_FOREVER;
-    register rt_base_t temp;
 
     if (!lwp_user_accessable((void *)umutex, sizeof(struct rt_umutex)))
     {
@@ -291,11 +288,14 @@ static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
         }
         break;
     case PMUTEX_ERRORCHECK:
-        temp = rt_hw_interrupt_disable();
+        lock_ret = lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 1);
+        if (lock_ret != RT_EOK)
+        {
+            return -EINTR;
+        }
         if (pmutex->lock.kmutex->owner == rt_thread_self())
         {
-            /* enable interrupt */
-            rt_hw_interrupt_enable(temp);
+            lwp_mutex_release_safe(&_pmutex_lock);
             return -EDEADLK;
         }
         lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
@@ -303,7 +303,7 @@ static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
         {
             umutex_p->_m_lock = rt_thread_self()->tid;
         }
-        rt_hw_interrupt_enable(temp);
+        lwp_mutex_release_safe(&_pmutex_lock);
         break;
     default: /* unknown type */
         return -EINVAL;

+ 48 - 61
components/lwp/lwp_signal.c

@@ -12,17 +12,15 @@
  *                             update the generation, pending and delivery routines
  */
 
-#define DBG_TAG    "LWP_SIGNAL"
-#define DBG_LVL    DBG_INFO
+#define DBG_TAG "lwp.signal"
+#define DBG_LVL DBG_INFO
 #include <rtdbg.h>
 
 #include <rthw.h>
 #include <rtthread.h>
 #include <string.h>
 
-#include "lwp.h"
-#include "lwp_arch.h"
-#include "lwp_signal.h"
+#include "lwp_internal.h"
 #include "sys/signal.h"
 #include "syscall_generic.h"
 
@@ -362,7 +360,7 @@ static void _thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
     lwp_sigset_t new_set;
 
     /**
-     * @note POSIX wants this API to be capable to query the current mask
+     * Note: POSIX wants this API to be capable to query the current mask
      *       by passing NULL in `sigset`
      */
     if (oset)
@@ -408,28 +406,24 @@ static void lwp_signal_notify(rt_slist_t *list_head, lwp_siginfo_t siginfo)
 
 rt_err_t lwp_signal_init(struct lwp_signal *sig)
 {
-    rt_err_t rc;
-    rc = rt_mutex_init(&sig->sig_lock, "lwpsig", RT_IPC_FLAG_FIFO);
-    if (rc == RT_EOK)
-    {
-        memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
-
-        memset(&sig->sig_action, 0, sizeof(sig->sig_action));
-        memset(&sig->sig_action_nodefer, 0, sizeof(sig->sig_action_nodefer));
-        memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
-        memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
-        memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
-        lwp_sigqueue_init(&sig->sig_queue);
-    }
+    rt_err_t rc = RT_EOK;
+
+    memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
+
+    memset(&sig->sig_action, 0, sizeof(sig->sig_action));
+    memset(&sig->sig_action_nodefer, 0, sizeof(sig->sig_action_nodefer));
+    memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
+    memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
+    memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
+    lwp_sigqueue_init(&sig->sig_queue);
     return rc;
 }
 
 rt_err_t lwp_signal_detach(struct lwp_signal *signal)
 {
-    rt_err_t ret;
+    rt_err_t ret = RT_EOK;
 
     lwp_sigqueue_clear(&signal->sig_queue);
-    ret = rt_mutex_detach(&signal->sig_lock);
 
     return ret;
 }
@@ -475,7 +469,6 @@ int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
 
 void lwp_thread_signal_catch(void *exp_frame)
 {
-    rt_base_t level;
     int signo = 0;
     struct rt_thread *thread;
     struct rt_lwp *lwp;
@@ -492,7 +485,7 @@ void lwp_thread_signal_catch(void *exp_frame)
     lwp = (struct rt_lwp*)thread->lwp;
 
     RT_ASSERT(!!lwp);
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
 
     /* check if signal exist */
     if (!sigqueue_isempty(_SIGQ(thread)))
@@ -541,7 +534,7 @@ void lwp_thread_signal_catch(void *exp_frame)
                 p_usi = RT_NULL;
         }
     }
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
 
     if (pending && signo)
     {
@@ -556,7 +549,7 @@ void lwp_thread_signal_catch(void *exp_frame)
 
         /**
          * enter signal action of user
-         * @note that the p_usi is release before entering signal action by
+         * Note: that the p_usi is release before entering signal action by
          * reseting the kernel sp.
          */
         LOG_D("%s: enter signal handler(signo=%d) at %p", __func__, signo, handler);
@@ -611,7 +604,7 @@ static rt_thread_t _signal_find_catcher(struct rt_lwp *lwp, int signo)
     {
         candidate = rt_thread_self();
 
-        /** @note: lwp of current is a const value that can be safely read */
+        /** Note: lwp of current is a const value that can be safely read */
         if (candidate->lwp == lwp &&
             !_sigismember(&candidate->signal.sigset_mask, signo))
         {
@@ -682,7 +675,7 @@ rt_inline rt_bool_t _sighandler_cannot_caught(struct rt_lwp *lwp, int signo)
 rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
 {
     rt_err_t ret = -1;
-    rt_base_t level;
+
     lwp_siginfo_t siginfo;
     rt_bool_t terminated;
     rt_bool_t need_schedule;
@@ -701,8 +694,7 @@ rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
 
         need_schedule = RT_FALSE;
 
-        /* FIXME: acquire READ lock to lwp */
-        level = rt_hw_interrupt_disable();
+        LWP_LOCK(lwp);
         terminated = lwp->terminated;
 
         /* short-circuit code for inactive task, ignored signals */
@@ -727,7 +719,7 @@ rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
             }
         }
 
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
 
         if (need_schedule)
             rt_schedule();
@@ -771,12 +763,12 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
     lwp_sigqueue_t thread_sigq;
     rt_list_t *thread_list;
     rt_err_t ret = RT_EOK;
-    rt_base_t level;
+
 
     if (lwp)
     {
         /** acquire READ access to lwp */
-        level = rt_hw_interrupt_disable();
+        LWP_LOCK(lwp);
 
         if (oact)
         {
@@ -788,7 +780,7 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
         if (act)
         {
             /**
-             * @note POSIX.1-2017 requires calls to sigaction() that supply a NULL act
+             * Note: POSIX.1-2017 requires calls to sigaction() that supply a NULL act
              * argument succeed, even in the case of signals that cannot be caught or ignored
              */
             if (_sighandler_cannot_caught(lwp, signo))
@@ -805,9 +797,9 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
                 _signal_action_flag_u2k(signo, &lwp->signal, act);
 
                 /**
-                 * @brief Discard the pending signal if signal action is set to SIG_IGN
+                 * Brief: Discard the pending signal if signal action is set to SIG_IGN
                  *
-                 * @note POSIX.1-2017: Setting a signal action to SIG_IGN for a signal
+                 * Note: POSIX.1-2017: Setting a signal action to SIG_IGN for a signal
                  * that is pending shall cause the pending signal to be discarded,
                  * whether or not it is blocked.
                  */
@@ -826,7 +818,7 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
             }
         }
 
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
     }
     else
         ret = -RT_EINVAL;
@@ -837,7 +829,7 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
 rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long value)
 {
     rt_err_t ret = -1;
-    rt_base_t level;
+
     struct rt_lwp *lwp;
     lwp_siginfo_t siginfo;
     rt_bool_t need_schedule;
@@ -845,6 +837,8 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
     /** must be able to be suspended */
     RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
 
+    LOG_D("%s(signo=%d)", __func__, signo);
+
     if (!thread || signo < 0 || signo >= _LWP_NSIG)
     {
         ret = -RT_EINVAL;
@@ -856,8 +850,7 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
 
         RT_ASSERT(lwp);
 
-        /* FIXME: acquire READ lock to lwp */
-        level = rt_hw_interrupt_disable();
+        LWP_LOCK(lwp);
 
         if (!lwp)
             ret = -RT_EPERM;
@@ -870,8 +863,8 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
             if (siginfo)
             {
                 need_schedule = _siginfo_deliver_to_thread(thread, siginfo);
-                ret = 0;
                 lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
+                ret = 0;
             }
             else
             {
@@ -880,7 +873,7 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
             }
         }
 
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
 
         if (need_schedule)
             rt_schedule();
@@ -906,15 +899,13 @@ rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
                                 const lwp_sigset_t *sigset, lwp_sigset_t *oset)
 {
     rt_err_t ret = -1;
-    rt_base_t level;
     struct rt_lwp *lwp;
 
     if (thread)
     {
-        /** FIXME: acquire READ access to rt_thread */
-        level = rt_hw_interrupt_disable();
-
         lwp = (struct rt_lwp*)thread->lwp;
+        LWP_LOCK(lwp);
+
         if (!lwp)
         {
             ret = -RT_EPERM;
@@ -925,7 +916,7 @@ rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
             _thread_signal_mask(thread, how, sigset, oset);
         }
 
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
     }
     else
         ret = -RT_EINVAL;
@@ -968,14 +959,14 @@ static int _dequeue_signal(rt_thread_t thread, lwp_sigset_t *mask, siginfo_t *us
 rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
                                      siginfo_t *usi, struct timespec *timeout)
 {
-    rt_base_t level;
     rt_err_t ret;
     lwp_sigset_t saved_sigset;
     lwp_sigset_t blocked_sigset;
     int sig;
+    struct rt_lwp *lwp = thread->lwp;
 
     /**
-     * @brief POSIX
+     * Brief: POSIX
      * If one of the signals in set is already pending for the calling thread,
      * sigwaitinfo() will return immediately
      */
@@ -985,21 +976,19 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
     _sigdelset(sigset, SIGSTOP);
     _signotsets(sigset, sigset);
 
-
-    /* FIXME: acquire READ lock to lwp */
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
     sig = _dequeue_signal(thread, sigset, usi);
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
     if (sig)
         return sig;
 
     /**
-     * @brief POSIX
+     * Brief: POSIX
      * if none of the signals specified by set are pending, sigtimedwait() shall
      * wait for the time interval specified in the timespec structure referenced
      * by timeout.
      *
-     * @note If the pending signal arrives before thread suspend, the suspend
+     * Note: If the pending signal arrives before thread suspend, the suspend
      * operation will return a failure
      */
     _sigandsets(&blocked_sigset, &thread->signal.sigset_mask, sigset);
@@ -1010,7 +999,7 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
         time = rt_timespec_to_tick(timeout);
 
         /**
-         * @brief POSIX
+         * Brief: POSIX
          * If the timespec structure pointed to by timeout is zero-valued and
          * if none of the signals specified by set are pending, then
          * sigtimedwait() shall return immediately with an error
@@ -1043,17 +1032,15 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
     /* else ret == -EINTR */
     _thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &saved_sigset, RT_NULL);
 
-    /* FIXME: acquire READ lock to lwp */
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
     sig = _dequeue_signal(thread, sigset, usi);
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
 
     return sig ? sig : ret;
 }
 
 void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
 {
-    rt_base_t level;
     struct rt_lwp *lwp;
     lwp = thread->lwp;
 
@@ -1061,10 +1048,10 @@ void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
     {
         memset(pending, 0, sizeof(*pending));
 
-        level = rt_hw_interrupt_disable();
+        LWP_LOCK(lwp);
         sigqueue_examine(_SIGQ(thread), pending);
         sigqueue_examine(_SIGQ(lwp), pending);
-        rt_hw_interrupt_enable(level);
+        LWP_UNLOCK(lwp);
 
         _sigandsets(pending, pending, &thread->signal.sigset_mask);
     }

+ 0 - 2
components/lwp/lwp_signal.h

@@ -40,8 +40,6 @@ typedef enum {
  * LwP implementation of POSIX signal
  */
 struct lwp_signal {
-    struct rt_mutex sig_lock;
-
     struct lwp_sigqueue sig_queue;
     rt_thread_t sig_dispatch_thr[_LWP_NSIG];
 

+ 137 - 85
components/lwp/lwp_syscall.c

@@ -12,6 +12,7 @@
  * 2021-02-20     lizhirui     fix some warnings
  * 2023-03-13     WangXiaoyao  Format & fix syscall return value
  * 2023-07-06     Shell        adapt the signal API, and clone, fork to new implementation of lwp signal
+ * 2023-07-27     Shell        Move tid_put() from lwp_free() to sys_exit()
  */
 
 #define _GNU_SOURCE
@@ -20,11 +21,12 @@
 #include <rtthread.h>
 #include <rthw.h>
 #include <board.h>
-#include <mm_aspace.h>
+
+#include <console.h>
 #include <string.h>
 #include <stdint.h>
 
-#define DBG_TAG    "SYSCALL"
+#define DBG_TAG    "lwp.syscall"
 #define DBG_LVL    DBG_INFO
 #include <rtdbg.h>
 
@@ -33,6 +35,7 @@
 #include "libc_musl.h"
 #include "lwp_internal.h"
 #ifdef ARCH_MM_MMU
+#include <mm_aspace.h>
 #include <lwp_user_mm.h>
 #include <lwp_arch.h>
 #endif
@@ -325,16 +328,13 @@ static void _crt_thread_entry(void *parameter)
 /* exit group */
 sysret_t sys_exit_group(int value)
 {
-    rt_base_t level;
     rt_thread_t tid, main_thread;
     struct rt_lwp *lwp;
 
-    LOG_D("process exit");
-
     tid = rt_thread_self();
     lwp = (struct rt_lwp *)tid->lwp;
+    LOG_D("process(%p) exit.", lwp);
 
-    level = rt_hw_interrupt_disable();
 #ifdef ARCH_MM_MMU
     if (tid->clear_child_tid)
     {
@@ -351,7 +351,7 @@ sysret_t sys_exit_group(int value)
     if (main_thread == tid)
     {
         lwp_wait_subthread_exit();
-        lwp->lwp_ret = value;
+        lwp->lwp_ret = LWP_CREATE_STAT(value);
     }
 #else
     main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
@@ -373,18 +373,24 @@ sysret_t sys_exit_group(int value)
     }
 #endif /* ARCH_MM_MMU */
 
+    /**
+     * Note: the tid tree always hold a reference to thread, hence the tid must
+     * be release before cleanup of thread
+     */
+    lwp_tid_put(tid->tid);
+    tid->tid = 0;
+    rt_list_remove(&tid->sibling);
     rt_thread_delete(tid);
-    rt_hw_interrupt_enable(level);
     rt_schedule();
 
     /* never reach here */
+    RT_ASSERT(0);
     return 0;
 }
 
 /* thread exit */
 void sys_exit(int status)
 {
-    rt_base_t level;
     rt_thread_t tid, main_thread;
     struct rt_lwp *lwp;
 
@@ -393,8 +399,6 @@ void sys_exit(int status)
     tid = rt_thread_self();
     lwp = (struct rt_lwp *)tid->lwp;
 
-    level = rt_hw_interrupt_disable();
-
 #ifdef ARCH_MM_MMU
     if (tid->clear_child_tid)
     {
@@ -411,12 +415,14 @@ void sys_exit(int status)
     {
         lwp_terminate(lwp);
         lwp_wait_subthread_exit();
-        lwp->lwp_ret = status;
+        lwp->lwp_ret = LWP_CREATE_STAT(status);
     }
 #endif /* ARCH_MM_MMU */
 
+    lwp_tid_put(tid->tid);
+    tid->tid = 0;
+    rt_list_remove(&tid->sibling);
     rt_thread_delete(tid);
-    rt_hw_interrupt_enable(level);
     rt_schedule();
 
     return;
@@ -1079,23 +1085,32 @@ sysret_t sys_kill(int pid, int signo)
 {
     rt_err_t kret;
     sysret_t sysret;
-    rt_base_t level;
     struct rt_lwp *lwp;
 
     /* handling the semantics of sys_kill */
     if (pid > 0)
     {
-        /* TODO: lock the lwp strcut */
-        level = rt_hw_interrupt_disable();
-        lwp = lwp_from_pid(pid);
-
-        /* lwp_signal_kill() can handle NULL lwp */
+        /**
+         * Brief: Match the pid and send signal to the lwp if found
+         * Note: Critical Section
+         * - pid tree (READ. since the lwp is fetch from the pid tree, it must stay there)
+         */
+        lwp_pid_lock_take();
+        lwp = lwp_from_pid_locked(pid);
         if (lwp)
+        {
+            lwp_ref_inc(lwp);
+            lwp_pid_lock_release();
+
             kret = lwp_signal_kill(lwp, signo, SI_USER, 0);
+            lwp_ref_dec(lwp);
+            kret = 0;
+        }
         else
+        {
+            lwp_pid_lock_release();
             kret = -RT_ENOENT;
-
-        rt_hw_interrupt_enable(level);
+        }
     }
     else if (pid == 0)
     {
@@ -1807,7 +1822,6 @@ sysret_t sys_timer_getoverrun(timer_t timerid)
 
 rt_thread_t sys_thread_create(void *arg[])
 {
-    rt_base_t level = 0;
     void *user_stack = 0;
     struct rt_lwp *lwp = 0;
     rt_thread_t thread = RT_NULL;
@@ -1887,9 +1901,9 @@ rt_thread_t sys_thread_create(void *arg[])
         rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
     }
 
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
     rt_list_insert_after(&lwp->t_grp, &thread->sibling);
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
 
     return thread;
 
@@ -1903,11 +1917,9 @@ fail:
 }
 
 #ifdef ARCH_MM_MMU
-#include "lwp_clone.h"
 
 long _sys_clone(void *arg[])
 {
-    rt_base_t level = 0;
     struct rt_lwp *lwp = 0;
     rt_thread_t thread = RT_NULL;
     rt_thread_t self = RT_NULL;
@@ -2001,9 +2013,9 @@ long _sys_clone(void *arg[])
         rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
     }
 
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
     rt_list_insert_after(&lwp->t_grp, &thread->sibling);
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
 
     /* copy origin stack */
     lwp_memcpy(thread->stack_addr, self->stack_addr, thread->stack_size);
@@ -2016,6 +2028,10 @@ long _sys_clone(void *arg[])
     return (long)tid;
 fail:
     lwp_tid_put(tid);
+    if (thread)
+    {
+        rt_thread_delete(thread);
+    }
     if (lwp)
     {
         lwp_ref_dec(lwp);
@@ -2091,7 +2107,6 @@ static int lwp_copy_files(struct rt_lwp *dst, struct rt_lwp *src)
 
 sysret_t _sys_fork(void)
 {
-    rt_base_t level;
     int tid = 0;
     sysret_t falival = 0;
     struct rt_lwp *lwp = RT_NULL;
@@ -2166,34 +2181,27 @@ sysret_t _sys_fork(void)
     thread->lwp = (void *)lwp;
     thread->tid = tid;
 
-    level = rt_hw_interrupt_disable();
-
+    LWP_LOCK(self_lwp);
     /* add thread to lwp process */
     rt_list_insert_after(&lwp->t_grp, &thread->sibling);
+    LWP_UNLOCK(self_lwp);
 
-    /* lwp add to children link */
-    lwp->sibling = self_lwp->first_child;
-    self_lwp->first_child = lwp;
-    lwp->parent = self_lwp;
+    lwp_children_register(self_lwp, lwp);
 
-    rt_hw_interrupt_enable(level);
-
-    /* copy origin stack */
+    /* copy kernel stack context from self thread */
     lwp_memcpy(thread->stack_addr, self_thread->stack_addr, self_thread->stack_size);
     lwp_tid_set_thread(tid, thread);
 
     /* duplicate user objects */
     lwp_user_object_dup(lwp, self_lwp);
 
-    level = rt_hw_interrupt_disable();
     user_stack = arch_get_user_sp();
-    rt_hw_interrupt_enable(level);
-
     arch_set_thread_context(arch_fork_exit,
             (void *)((char *)thread->stack_addr + thread->stack_size),
             user_stack, &thread->sp);
+
     /* new thread never reach there */
-    level = rt_hw_interrupt_disable();
+    LWP_LOCK(lwp);
     if (lwp->tty != RT_NULL)
     {
         int ret;
@@ -2212,7 +2220,8 @@ sysret_t _sys_fork(void)
 
         lwp->tty->foreground = lwp;
     }
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
+
     rt_thread_startup(thread);
     return lwp_to_pid(lwp);
 fail:
@@ -2222,6 +2231,10 @@ fail:
     {
         lwp_tid_put(tid);
     }
+    if (thread)
+    {
+        rt_thread_delete(thread);
+    }
     if (lwp)
     {
         lwp_ref_dec(lwp);
@@ -2576,7 +2589,6 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
     char *p;
     struct rt_lwp *new_lwp = NULL;
     struct rt_lwp *lwp;
-    rt_base_t level;
     int uni_thread;
     rt_thread_t thread;
     struct process_aux *aux;
@@ -2591,7 +2603,8 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
     lwp = lwp_self();
     thread = rt_thread_self();
     uni_thread = 1;
-    level = rt_hw_interrupt_disable();
+
+    LWP_LOCK(lwp);
     if (lwp->t_grp.prev != &thread->sibling)
     {
         uni_thread = 0;
@@ -2600,7 +2613,8 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
     {
         uni_thread = 0;
     }
-    rt_hw_interrupt_enable(level);
+    LWP_UNLOCK(lwp);
+
     if (!uni_thread)
     {
         SET_ERRNO(EINVAL);
@@ -2771,10 +2785,14 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
             }
         }
 
-        /* load ok, now set thread name and swap the data of lwp and new_lwp */
-        level = rt_hw_interrupt_disable();
+        /**
+         * Set thread name and swap the data of lwp and new_lwp.
+         * Since no other threads can access the lwp field, it't uneccessary to
+         * take a lock here
+         */
 
-        rt_strncpy(thread->parent.name, run_name + last_backslash, RT_NAME_MAX);
+        strncpy(thread->parent.name, run_name + last_backslash, RT_NAME_MAX);
+        strncpy(lwp->cmd, new_lwp->cmd, RT_NAME_MAX);
 
         rt_pages_free(page, 0);
 
@@ -2797,12 +2815,10 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
         lwp_signal_detach(&lwp->signal);
         lwp_signal_init(&lwp->signal);
 
-        /* to do: clsoe files with flag CLOEXEC */
+        /* to do: clsoe files with flag CLOEXEC, recy sub-thread */
 
         lwp_aspace_switch(thread);
 
-        rt_hw_interrupt_enable(level);
-
         lwp_ref_dec(new_lwp);
         arch_start_umode(lwp->args,
                 lwp->text_entry,
@@ -4067,14 +4083,19 @@ sysret_t sys_sigtimedwait(const sigset_t *sigset, siginfo_t *info, const struct
 sysret_t sys_tkill(int tid, int sig)
 {
 #ifdef ARCH_MM_MMU
-    rt_base_t level;
     rt_thread_t thread;
-    int ret;
-
-    level = rt_hw_interrupt_disable();
-    thread = lwp_tid_get_thread(tid);
+    sysret_t ret;
+
+    /**
+     * Brief: Match a tid and do the kill
+     *
+     * Note: Critical Section
+     * - the thread (READ. may be released at the meantime; protected by locked)
+     */
+    thread = lwp_tid_get_thread_and_inc_ref(tid);
     ret = lwp_thread_signal_kill(thread, sig, SI_USER, 0);
-    rt_hw_interrupt_enable(level);
+    lwp_tid_dec_ref(thread);
+
     return ret;
 #else
     return lwp_thread_kill((rt_thread_t)tid, sig);
@@ -4182,7 +4203,7 @@ sysret_t sys_waitpid(int32_t pid, int *status, int options)
     }
     else
     {
-        ret = waitpid(pid, status, options);
+        ret = lwp_waitpid(pid, status, options);
     }
 #else
     if (!lwp_user_accessable((void *)status, sizeof(int)))
@@ -5271,11 +5292,13 @@ sysret_t sys_setaffinity(pid_t pid, size_t size, void *set)
     return -1;
 }
 
-sysret_t sys_getaffinity(pid_t pid, size_t size, void *set)
+sysret_t sys_getaffinity(const pid_t pid, size_t size, void *set)
 {
 #ifdef ARCH_MM_MMU
+    DEF_RETURN_CODE(rc);
     cpu_set_t mask;
     struct rt_lwp *lwp;
+    rt_bool_t need_release = RT_FALSE;
 
     if (size <= 0 || size > sizeof(cpu_set_t))
     {
@@ -5286,33 +5309,47 @@ sysret_t sys_getaffinity(pid_t pid, size_t size, void *set)
         return -EFAULT;
     }
 
-    if (pid == 0) lwp = lwp_self();
-    else lwp = lwp_from_pid(pid);
-    if (!lwp)
+    if (pid == 0)
     {
-        return -ESRCH;
+        lwp = lwp_self();
     }
-
-#ifdef RT_USING_SMP
-    if (lwp->bind_cpu == RT_CPUS_NR)    /* not bind */
+    else
     {
-        CPU_ZERO_S(size, &mask);
+        need_release = RT_TRUE;
+        lwp_pid_lock_take();
+        lwp = lwp_from_pid_locked(pid);
     }
-    else /* set bind cpu */
+
+    if (!lwp)
     {
-        /* TODO: only single-core bindings are now supported of rt-smart */
-        CPU_SET_S(lwp->bind_cpu, size, &mask);
+        rc = -ESRCH;
     }
-#else
-    CPU_SET_S(0, size, &mask);
-#endif
-
-    if (lwp_put_to_user(set, &mask, size) != size)
+    else
     {
-        return -1;
+    #ifdef RT_USING_SMP
+        if (lwp->bind_cpu == RT_CPUS_NR)    /* not bind */
+        {
+            CPU_ZERO_S(size, &mask);
+        }
+        else /* set bind cpu */
+        {
+            /* TODO: only single-core bindings are now supported of rt-smart */
+            CPU_SET_S(lwp->bind_cpu, size, &mask);
+        }
+    #else
+        CPU_SET_S(0, size, &mask);
+    #endif
+
+        if (lwp_put_to_user(set, &mask, size) != size)
+            rc = -EFAULT;
+        else
+            rc = 0;
     }
 
-    return 0;
+    if (need_release)
+        lwp_pid_lock_release();
+
+    RETURN(rc);
 #else
     return -1;
 #endif
@@ -5369,6 +5406,7 @@ sysret_t sys_sched_setparam(pid_t pid, void *param)
     struct sched_param *sched_param = RT_NULL;
     struct rt_lwp *lwp = NULL;
     rt_thread_t main_thread;
+    rt_bool_t need_release = RT_FALSE;
     int ret = -1;
 
     if (!lwp_user_accessable(param, sizeof(struct sched_param)))
@@ -5390,15 +5428,21 @@ sysret_t sys_sched_setparam(pid_t pid, void *param)
 
     if (pid > 0)
     {
-        lwp = lwp_from_pid(pid);
+        need_release = RT_TRUE;
+        lwp_pid_lock_take();
+        lwp = lwp_from_pid_locked(pid);
     }
     else if (pid == 0)
     {
         lwp = lwp_self();
     }
+
     if (lwp)
     {
         main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
+        if (need_release)
+            lwp_pid_lock_release();
+
         ret = rt_thread_control(main_thread, RT_THREAD_CTRL_CHANGE_PRIORITY, (void *)&sched_param->sched_priority);
     }
 
@@ -5413,12 +5457,13 @@ sysret_t sys_sched_yield(void)
     return 0;
 }
 
-sysret_t sys_sched_getparam(pid_t pid, void *param)
+sysret_t sys_sched_getparam(const pid_t pid, void *param)
 {
     struct sched_param *sched_param = RT_NULL;
     struct rt_lwp *lwp = NULL;
     rt_thread_t main_thread;
     int ret = -1;
+    rt_bool_t need_release = RT_FALSE;
 
     if (!lwp_user_accessable(param, sizeof(struct sched_param)))
     {
@@ -5433,15 +5478,21 @@ sysret_t sys_sched_getparam(pid_t pid, void *param)
 
     if (pid > 0)
     {
-        lwp = lwp_from_pid(pid);
+        need_release = RT_TRUE;
+        lwp_pid_lock_take();
+        lwp = lwp_from_pid_locked(pid);
     }
     else if (pid == 0)
     {
         lwp = lwp_self();
     }
+
     if (lwp)
     {
         main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
+        if (need_release)
+            lwp_pid_lock_release();
+
         sched_param->sched_priority = main_thread->current_priority;
         ret = 0;
     }
@@ -5474,12 +5525,10 @@ sysret_t sys_sched_get_priority_min(int policy)
 
 sysret_t sys_sched_setscheduler(int tid, int policy, void *param)
 {
-    int ret = 0;
+    sysret_t ret;
     struct sched_param *sched_param = RT_NULL;
     rt_thread_t thread = RT_NULL;
 
-    thread = lwp_tid_get_thread(tid);
-
     if (!lwp_user_accessable(param, sizeof(struct sched_param)))
     {
         return -EFAULT;
@@ -5497,7 +5546,9 @@ sysret_t sys_sched_setscheduler(int tid, int policy, void *param)
         return -EINVAL;
     }
 
+    thread = lwp_tid_get_thread_and_inc_ref(tid);
     ret = rt_thread_control(thread, RT_THREAD_CTRL_CHANGE_PRIORITY, (void *)&sched_param->sched_priority);
+    lwp_tid_dec_ref(thread);
 
     kmem_put(sched_param);
 
@@ -5509,7 +5560,6 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
     struct sched_param *sched_param = RT_NULL;
     rt_thread_t thread = RT_NULL;
 
-    thread = lwp_tid_get_thread(tid);
 
     if (!lwp_user_accessable(param, sizeof(struct sched_param)))
     {
@@ -5528,7 +5578,9 @@ sysret_t sys_sched_getscheduler(int tid, int *policy, void *param)
         return -EINVAL;
     }
 
+    thread = lwp_tid_get_thread_and_inc_ref(tid);
     sched_param->sched_priority = thread->current_priority;
+    lwp_tid_dec_ref(thread);
 
     lwp_put_to_user((void *)param, sched_param, sizeof(struct sched_param));
     kmem_put(sched_param);

+ 5 - 1
components/lwp/lwp_syscall.h

@@ -11,7 +11,11 @@
 #ifndef __LWP_SYSCALL_H__
 #define __LWP_SYSCALL_H__
 
-#include <syscall_generic.h>
+#ifdef RT_USING_MUSLLIBC
+#include "libc_musl.h"
+#endif
+
+#include "syscall_generic.h"
 
 #include <stdint.h>
 #include <rtthread.h>

+ 65 - 18
components/lwp/lwp_tid.c

@@ -8,19 +8,19 @@
  * 2021-01-15     shaojinchun  first version
  */
 
+#define DBG_TAG    "lwp.tid"
+#define DBG_LVL    DBG_LOG
+#include <rtdbg.h>
+
 #include <rthw.h>
 #include <rtthread.h>
 
-#include "lwp.h"
+#include "lwp_internal.h"
 
 #ifdef ARCH_MM_MMU
 #include "lwp_user_mm.h"
 #endif
 
-#define DBG_TAG    "LWP_TID"
-#define DBG_LVL    DBG_INFO
-#include <rtdbg.h>
-
 #define TID_MAX 10000
 
 #define TID_CT_ASSERT(name, x) \
@@ -35,13 +35,19 @@ static int lwp_tid_ary_alloced = 0;
 static struct lwp_avl_struct *lwp_tid_root = RT_NULL;
 static int current_tid = 0;
 
+static struct rt_mutex tid_lock;
+
+int lwp_tid_init(void)
+{
+    return rt_mutex_init(&tid_lock, "tidmtx", RT_IPC_FLAG_PRIO);
+}
+
 int lwp_tid_get(void)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
     int tid = 0;
 
-    level = rt_hw_interrupt_disable();
+    lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
     p = lwp_tid_free_head;
     if (p)
     {
@@ -80,53 +86,94 @@ int lwp_tid_get(void)
         lwp_avl_insert(p, &lwp_tid_root);
         current_tid = tid;
     }
-    rt_hw_interrupt_enable(level);
+    lwp_mutex_release_safe(&tid_lock);
     return tid;
 }
 
 void lwp_tid_put(int tid)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
+    rt_thread_t thread;
+    rt_thread_t current;
 
-    level = rt_hw_interrupt_disable();
+    lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
     p  = lwp_avl_find(tid, lwp_tid_root);
     if (p)
     {
+        thread = p->data;
         p->data = RT_NULL;
         lwp_avl_remove(p, &lwp_tid_root);
         p->avl_right = lwp_tid_free_head;
         lwp_tid_free_head = p;
     }
-    rt_hw_interrupt_enable(level);
+    else
+        thread = RT_NULL;
+
+    if (thread && thread->tid_ref_count)
+    {
+        current = rt_thread_self();
+        RT_ASSERT(thread->susp_recycler == RT_NULL);
+        thread->susp_recycler = current;
+
+        rt_enter_critical();
+        rt_thread_suspend_with_flag(current, RT_UNINTERRUPTIBLE);
+        lwp_mutex_release_safe(&tid_lock);
+        rt_exit_critical();
+
+        rt_schedule();
+    }
+    else
+        lwp_mutex_release_safe(&tid_lock);
 }
 
-rt_thread_t lwp_tid_get_thread(int tid)
+rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
     rt_thread_t thread = RT_NULL;
+    lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
 
-    level = rt_hw_interrupt_disable();
     p  = lwp_avl_find(tid, lwp_tid_root);
     if (p)
     {
         thread = (rt_thread_t)p->data;
+        if (thread != RT_NULL)
+        {
+            thread->tid_ref_count += 1;
+        }
     }
-    rt_hw_interrupt_enable(level);
+    lwp_mutex_release_safe(&tid_lock);
     return thread;
 }
 
+void lwp_tid_dec_ref(rt_thread_t thread)
+{
+    rt_thread_t susp_putter;
+    if (thread)
+    {
+        RT_ASSERT(rt_object_get_type(&thread->parent) == RT_Object_Class_Thread);
+        susp_putter = thread->susp_recycler;
+        lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
+
+        RT_ASSERT(thread->tid_ref_count > 0);
+        thread->tid_ref_count -= 1;
+        if (!thread->tid_ref_count && susp_putter)
+        {
+            rt_thread_resume(susp_putter);
+        }
+        lwp_mutex_release_safe(&tid_lock);
+    }
+}
+
 void lwp_tid_set_thread(int tid, rt_thread_t thread)
 {
-    rt_base_t level;
     struct lwp_avl_struct *p;
 
-    level = rt_hw_interrupt_disable();
+    lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
     p  = lwp_avl_find(tid, lwp_tid_root);
     if (p)
     {
+        RT_ASSERT(p->data == RT_NULL);
         p->data = thread;
     }
-    rt_hw_interrupt_enable(level);
+    lwp_mutex_release_safe(&tid_lock);
 }

+ 19 - 18
components/mm/mm_page.c

@@ -42,6 +42,7 @@ static struct rt_varea mpr_varea;
 
 static struct rt_page *page_list_low[RT_PAGE_MAX_ORDER];
 static struct rt_page *page_list_high[RT_PAGE_MAX_ORDER];
+static RT_DEFINE_SPINLOCK(_spinlock);
 
 #define page_start ((rt_page_t)rt_mpr_start)
 
@@ -502,9 +503,9 @@ int rt_page_ref_get(void *addr, rt_uint32_t size_bits)
     int ref;
 
     p = rt_page_addr2page(addr);
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     ref = _pages_ref_get(p, size_bits);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     return ref;
 }
 
@@ -514,9 +515,9 @@ void rt_page_ref_inc(void *addr, rt_uint32_t size_bits)
     rt_base_t level;
 
     p = rt_page_addr2page(addr);
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     _pages_ref_inc(p, size_bits);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 }
 
 static rt_page_t (*pages_alloc_handler)(rt_page_t page_list[], rt_uint32_t size_bits);
@@ -545,18 +546,18 @@ rt_inline void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
     rt_base_t level;
     rt_page_t *page_list = _flag_to_page_list(flags);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     p = pages_alloc_handler(page_list, size_bits);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     if (!p && page_list != page_list_low)
     {
         /* fall back */
         page_list = page_list_low;
 
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&_spinlock);
         p = pages_alloc_handler(page_list, size_bits);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_spinlock, level);
     }
 
     if (p)
@@ -564,9 +565,9 @@ rt_inline void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags)
         alloc_buf = page_to_addr(p);
 
         #ifdef RT_DEBUGING_PAGE_LEAK
-            level = rt_hw_interrupt_disable();
+            level = rt_spin_lock_irqsave(&_spinlock);
             TRACE_ALLOC(p, size_bits);
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
         #endif
     }
     return alloc_buf;
@@ -592,11 +593,11 @@ int rt_pages_free(void *addr, rt_uint32_t size_bits)
     if (p)
     {
         rt_base_t level;
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&_spinlock);
         real_free = _pages_free(page_list, p, size_bits);
         if (real_free)
             TRACE_FREE(p, size_bits);
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_spinlock, level);
     }
 
     return real_free;
@@ -613,7 +614,7 @@ void list_page(void)
     rt_size_t installed = page_nr;
 
     rt_base_t level;
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
     {
@@ -637,7 +638,7 @@ void list_page(void)
         LOG_RAW("\n");
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     LOG_RAW("-------------------------------\n");
     LOG_RAW("Page Summary:\n => free/installed: 0x%lx/0x%lx (%ld/%ld KB)\n", free, installed, PGNR2SIZE(free), PGNR2SIZE(installed));
     LOG_RAW("-------------------------------\n");
@@ -650,7 +651,7 @@ void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
     rt_size_t total_free = 0;
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
     {
         struct rt_page *p = page_list_low[i];
@@ -671,7 +672,7 @@ void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
             p = p->next;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     *total_nr = page_nr;
     *free_nr = total_free;
 }
@@ -682,7 +683,7 @@ void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
     rt_size_t total_free = 0;
     rt_base_t level;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     for (i = 0; i < RT_PAGE_MAX_ORDER; i++)
     {
         struct rt_page *p = page_list_high[i];
@@ -693,7 +694,7 @@ void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
             p = p->next;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     *total_nr = _high_pages_nr;
     *free_nr = total_free;
 }

+ 7 - 6
components/net/lwip/port/ethernetif.c

@@ -564,6 +564,7 @@ rt_err_t eth_device_init_with_flag(struct eth_device *dev, const char *name, rt_
         return -RT_ERROR;
     }
 
+    rt_spin_lock_init(&(dev->spinlock));
     /* set netif */
     dev->netif = netif;
     dev->flags = flags;
@@ -838,13 +839,13 @@ rt_err_t eth_device_linkchange(struct eth_device* dev, rt_bool_t up)
 
     RT_ASSERT(dev != RT_NULL);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(dev->spinlock));
     dev->link_changed = 0x01;
     if (up == RT_TRUE)
         dev->link_status = 0x01;
     else
         dev->link_status = 0x00;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(dev->spinlock), level);
 
     /* post message to ethernet thread */
     return rt_mb_send(&eth_rx_thread_mb, (rt_ubase_t)dev);
@@ -912,10 +913,10 @@ static void eth_rx_thread_entry(void* parameter)
             {
                 int status;
 
-                level = rt_hw_interrupt_disable();
+                level = rt_spin_lock_irqsave(&(device->spinlock));
                 status = device->link_status;
                 device->link_changed = 0x00;
-                rt_hw_interrupt_enable(level);
+                rt_spin_unlock_irqrestore(&(device->spinlock), level);
 
                 if (status)
                     netifapi_netif_set_link_up(device->netif);
@@ -923,10 +924,10 @@ static void eth_rx_thread_entry(void* parameter)
                     netifapi_netif_set_link_down(device->netif);
             }
 
-            level = rt_hw_interrupt_disable();
+            level = rt_spin_lock_irqsave(&(device->spinlock));
             /* 'rx_notice' will be modify in the interrupt or here */
             device->rx_notice = RT_FALSE;
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(device->spinlock), level);
 
             /* receive all of buffer */
             while (1)

+ 2 - 0
components/net/lwip/port/netif/ethernetif.h

@@ -42,6 +42,8 @@ struct eth_device
     rt_uint8_t  link_status;
     rt_uint8_t  rx_notice;
 
+    struct rt_spinlock spinlock;
+
     /* eth device interface */
     struct pbuf* (*eth_rx)(rt_device_t dev);
     rt_err_t (*eth_tx)(rt_device_t dev, struct pbuf* p);

+ 4 - 2
components/net/lwip/port/sys_arch.c

@@ -511,16 +511,18 @@ sys_thread_t sys_thread_new(const char    *name,
     return t;
 }
 
+static RT_DEFINE_SPINLOCK(_spinlock);
+
 sys_prot_t sys_arch_protect(void)
 {
     rt_base_t level;
-    level = rt_hw_interrupt_disable(); /* disable interrupt */
+    level = rt_spin_lock_irqsave(&_spinlock); /* disable interrupt */
     return level;
 }
 
 void sys_arch_unprotect(sys_prot_t pval)
 {
-    rt_hw_interrupt_enable(pval); /* enable interrupt */
+    rt_spin_unlock_irqrestore(&_spinlock, pval); /* enable interrupt */
 }
 
 void sys_arch_assert(const char *file, int line)

+ 18 - 17
components/net/netdev/src/netdev.c

@@ -34,6 +34,7 @@ struct netdev *netdev_default = RT_NULL;
 /* The global network register callback */
 static netdev_callback_fn g_netdev_register_callback = RT_NULL;
 static netdev_callback_fn g_netdev_default_change_callback = RT_NULL;
+static RT_DEFINE_SPINLOCK(_spinlock);
 
 /**
  * This function will register network interface device and
@@ -96,7 +97,7 @@ int netdev_register(struct netdev *netdev, const char *name, void *user_data)
     /* initialize current network interface device single list */
     rt_slist_init(&(netdev->list));
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     if (netdev_list == RT_NULL)
     {
@@ -108,7 +109,7 @@ int netdev_register(struct netdev *netdev, const char *name, void *user_data)
         rt_slist_append(&(netdev_list->list), &(netdev->list));
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     if (netdev_default == RT_NULL)
     {
@@ -146,7 +147,7 @@ int netdev_unregister(struct netdev *netdev)
         return -RT_ERROR;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (node = &(netdev_list->list); node; node = rt_slist_next(node))
     {
@@ -169,7 +170,7 @@ int netdev_unregister(struct netdev *netdev)
             break;
         }
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     if (netdev_default == RT_NULL)
     {
@@ -219,19 +220,19 @@ struct netdev *netdev_get_first_by_flags(uint16_t flags)
         return RT_NULL;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (node = &(netdev_list->list); node; node = rt_slist_next(node))
     {
         netdev = rt_slist_entry(node, struct netdev, list);
         if (netdev && (netdev->flags & flags) != 0)
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return netdev;
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_NULL;
 }
@@ -256,19 +257,19 @@ struct netdev *netdev_get_by_ipaddr(ip_addr_t *ip_addr)
         return RT_NULL;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (node = &(netdev_list->list); node; node = rt_slist_next(node))
     {
         netdev = rt_slist_entry(node, struct netdev, list);
         if (netdev && ip_addr_cmp(&(netdev->ip_addr), ip_addr))
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return netdev;
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_NULL;
 }
@@ -293,19 +294,19 @@ struct netdev *netdev_get_by_name(const char *name)
         return RT_NULL;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (node = &(netdev_list->list); node; node = rt_slist_next(node))
     {
         netdev = rt_slist_entry(node, struct netdev, list);
         if (netdev && (rt_strncmp(netdev->name, name, rt_strlen(netdev->name) < RT_NAME_MAX ? rt_strlen(netdev->name) : RT_NAME_MAX) == 0))
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return netdev;
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_NULL;
 }
@@ -332,7 +333,7 @@ struct netdev *netdev_get_by_family(int family)
         return RT_NULL;
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
 
     for (node = &(netdev_list->list); node; node = rt_slist_next(node))
     {
@@ -340,7 +341,7 @@ struct netdev *netdev_get_by_family(int family)
         pf = (struct sal_proto_family *) netdev->sal_user_data;
         if (pf && pf->skt_ops && pf->family == family && netdev_is_up(netdev))
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return netdev;
         }
     }
@@ -351,12 +352,12 @@ struct netdev *netdev_get_by_family(int family)
         pf = (struct sal_proto_family *) netdev->sal_user_data;
         if (pf && pf->skt_ops && pf->sec_family == family && netdev_is_up(netdev))
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return netdev;
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_NULL;
 }

+ 4 - 2
components/net/sal/impl/af_inet_lwip.c

@@ -79,6 +79,8 @@ struct lwip_sock {
 };
 #endif /* LWIP_VERSION >= 0x20100ff */
 
+static RT_DEFINE_SPINLOCK(_spinlock);
+
 extern struct lwip_sock *lwip_tryget_socket(int s);
 
 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
@@ -262,7 +264,7 @@ static int inet_poll(struct dfs_file *file, struct rt_pollreq *req)
 
         rt_poll_add(&sock->wait_head, req);
 
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&_spinlock);
 
 #if LWIP_VERSION >= 0x20100ff
         if ((void*)(sock->lastdata.pbuf) || sock->rcvevent)
@@ -282,7 +284,7 @@ static int inet_poll(struct dfs_file *file, struct rt_pollreq *req)
             /* clean error event */
             sock->errevent = 0;
         }
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&_spinlock, level);
     }
 
     return mask;

+ 8 - 7
components/utilities/resource/resource_id.c

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-08-25     RT-Thread    First version
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 #include <rthw.h>
 #include <rtthread.h>
@@ -19,6 +20,7 @@ void resource_id_init(resource_id_t *mgr, int size, void **res)
         mgr->_res = res;
         mgr->noused = 0;
         mgr->_free = RT_NULL;
+        rt_spin_lock_init(&(mgr->spinlock));
     }
 }
 
@@ -27,21 +29,21 @@ int resource_id_get(resource_id_t *mgr)
     rt_base_t level;
     void **cur;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(mgr->spinlock));
     if (mgr->_free)
     {
         cur = mgr->_free;
         mgr->_free = (void **)*mgr->_free;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(mgr->spinlock), level);
         return cur - mgr->_res;
     }
     else if (mgr->noused < mgr->size)
     {
         cur = &mgr->_res[mgr->noused++];
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(mgr->spinlock), level);
         return cur - mgr->_res;
     }
-    rt_hw_interrupt_enable(level);
+
     return -1;
 }
 
@@ -52,11 +54,10 @@ void resource_id_put(resource_id_t *mgr, int no)
 
     if (no >= 0 && no < mgr->size)
     {
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(mgr->spinlock));
         cur = &mgr->_res[no];
         *cur = (void *)mgr->_free;
         mgr->_free = cur;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(mgr->spinlock), level);
     }
 }
-

+ 3 - 1
components/utilities/resource/resource_id.h

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-08-25     RT-Thread    First version
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #ifndef  RESOURCE_ID_H__
@@ -14,7 +15,7 @@
 #include <rthw.h>
 #include <rtthread.h>
 
-#define RESOURCE_ID_INIT(size, pool)  {size, pool, 0, RT_NULL}
+#define RESOURCE_ID_INIT(size, pool)  {size, pool, 0, RT_NULL, RT_SPINLOCK_INIT}
 
 typedef struct
 {
@@ -22,6 +23,7 @@ typedef struct
     void **_res;
     int noused;
     void **_free;
+    struct rt_spinlock spinlock;
 } resource_id_t;
 
 void resource_id_init(resource_id_t *mgr, int size, void **res);

+ 12 - 11
components/utilities/ulog/ulog.c

@@ -138,6 +138,7 @@ static const char * const color_output_info[] =
 
 /* ulog local object */
 static struct rt_ulog ulog = { 0 };
+static RT_DEFINE_SPINLOCK(_spinlock);
 
 rt_size_t ulog_strcpy(rt_size_t cur_len, char *dst, const char *src)
 {
@@ -197,7 +198,7 @@ static void output_unlock(void)
     else
     {
 #ifdef ULOG_USING_ISR_LOG
-        rt_hw_interrupt_enable(ulog.output_locker_isr_lvl);
+        rt_spin_unlock_irqrestore(&_spinlock, ulog.output_locker_isr_lvl);
 #endif
     }
 }
@@ -218,7 +219,7 @@ static void output_lock(void)
     else
     {
 #ifdef ULOG_USING_ISR_LOG
-        ulog.output_locker_isr_lvl = rt_hw_interrupt_disable();
+        ulog.output_locker_isr_lvl = rt_spin_lock_irqsave(&_spinlock);
 #endif
     }
 }
@@ -1273,9 +1274,9 @@ rt_err_t ulog_backend_register(ulog_backend_t backend, const char *name, rt_bool
     backend->out_level = LOG_FILTER_LVL_ALL;
     rt_strncpy(backend->name, name, RT_NAME_MAX);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     rt_slist_append(&ulog.backend_list, &backend->list);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_EOK;
 }
@@ -1292,9 +1293,9 @@ rt_err_t ulog_backend_unregister(ulog_backend_t backend)
         backend->deinit(backend);
     }
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     rt_slist_remove(&ulog.backend_list, &backend->list);
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_EOK;
 }
@@ -1304,9 +1305,9 @@ rt_err_t ulog_backend_set_filter(ulog_backend_t backend, ulog_backend_filter_t f
     rt_base_t level;
     RT_ASSERT(backend);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     backend->filter = filter;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 
     return RT_EOK;
 }
@@ -1319,18 +1320,18 @@ ulog_backend_t ulog_backend_find(const char *name)
 
     RT_ASSERT(ulog.init_ok);
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_spinlock);
     for (node = rt_slist_first(&ulog.backend_list); node; node = rt_slist_next(node))
     {
         backend = rt_slist_entry(node, struct ulog_backend, list);
         if (rt_strncmp(backend->name, name, RT_NAME_MAX) == 0)
         {
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&_spinlock, level);
             return backend;
         }
     }
 
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
     return RT_NULL;
 }
 

+ 1 - 0
examples/utest/testcases/kernel/atomic_tc.c

@@ -7,6 +7,7 @@
  * Date           Author       Notes
  * 2022-07-27     flybreak     the first version
  * 2023-03-21     WangShun     add atomic test
+ * 2023-09-15     xqyjlj       change stack size in cpu64
  */
 
 #include <rtthread.h>

+ 2 - 0
examples/utest/testcases/kernel/event_tc.c

@@ -6,12 +6,14 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-08-15     liukang     the first version
+ * 2023-09-15     xqyjlj       change stack size in cpu64
  */
 
 #include <rtthread.h>
 #include "utest.h"
 #include <stdlib.h>
 
+#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
 #define EVENT_FLAG3 (1 << 3)
 #define EVENT_FLAG5 (1 << 5)
 

+ 3 - 0
examples/utest/testcases/kernel/mailbox_tc.c

@@ -6,12 +6,15 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-09-08     liukang     the first version
+ * 2023-09-15     xqyjlj       change stack size in cpu64
  */
 
 #include <rtthread.h>
 #include "utest.h"
 #include <stdlib.h>
 
+#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
+
 static struct rt_mailbox test_static_mb;
 static char mb_pool[128];
 

+ 9 - 0
examples/utest/testcases/kernel/messagequeue_tc.c

@@ -6,11 +6,15 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-08-28     Sherman      the first version
+ * 2023-09-15     xqyjlj       change stack size in cpu64
+ *                             fix in smp
  */
 
 #include <rtthread.h>
 #include "utest.h"
 
+#define THREAD_STACKSIZE UTEST_THR_STACK_SIZE
+
 #define MSG_SIZE    4
 #define MAX_MSGS    5
 
@@ -202,6 +206,11 @@ static rt_err_t utest_tc_init(void)
     if(ret != RT_EOK)
         return -RT_ERROR;
 
+#ifdef RT_USING_SMP
+    rt_thread_control(&mq_send_thread, RT_THREAD_CTRL_BIND_CPU, (void *)0);
+    rt_thread_control(&mq_recv_thread, RT_THREAD_CTRL_BIND_CPU, (void *)0);
+#endif
+
     ret = rt_event_init(&finish_e, "finish", RT_IPC_FLAG_FIFO);
     if(ret != RT_EOK)
             return -RT_ERROR;

+ 13 - 6
examples/utest/testcases/kernel/mutex_tc.c

@@ -6,12 +6,19 @@
  * Change Logs:
  * Date           Author       Notes
  * 2021-09.01     luckyzjq     the first version
+ * 2023-09-15     xqyjlj       change stack size in cpu64
  */
 
 #include <rtthread.h>
 #include <stdlib.h>
 #include "utest.h"
 
+#ifdef ARCH_CPU_64BIT
+#define THREAD_STACKSIZE 4096
+#else
+#define THREAD_STACKSIZE 1024
+#endif
+
 static struct rt_mutex static_mutex;
 
 #ifdef RT_USING_HEAP
@@ -84,7 +91,7 @@ static void test_static_mutex_take(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        static_mutex_take_entry,
                                        &static_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)
@@ -145,7 +152,7 @@ static void test_static_mutex_release(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        static_mutex_release_entry,
                                        &static_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)
@@ -200,7 +207,7 @@ static void test_static_mutex_trytake(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        static_mutex_trytake_entry,
                                        &static_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)
@@ -397,7 +404,7 @@ static void test_dynamic_mutex_take(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        dynamic_mutex_take_entry,
                                        dynamic_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)
@@ -458,7 +465,7 @@ static void test_dynamic_mutex_release(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        dynamic_mutex_release_entry,
                                        dynamic_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)
@@ -513,7 +520,7 @@ static void test_dynamic_mutex_trytake(void)
     rt_thread_t tid = rt_thread_create("mutex_th",
                                        dynamic_mutex_trytake_entry,
                                        dynamic_mutex,
-                                       2048,
+                                       THREAD_STACKSIZE,
                                        10,
                                        10);
     if (RT_NULL == tid)

+ 0 - 7
include/rtatomic.h

@@ -27,13 +27,6 @@ void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr);
 rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr);
 rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *expected, rt_atomic_t desired);
 
-/* To detect stdatomic */
-#if !defined(RT_USING_HW_ATOMIC) && !defined(RT_USING_STDC_ATOMIC)
-#if defined(__GNUC__) && defined(RT_USING_LIBC) && !defined(__STDC_NO_ATOMICS__)
-#define RT_USING_STDC_ATOMIC
-#endif /* __GNUC__ && .. */
-#endif /* !RT_USING_HW_ATOMIC && !RT_USING_STDC_ATOMIC */
-
 #if defined(RT_USING_HW_ATOMIC)
 #define rt_atomic_load(ptr) rt_hw_atomic_load(ptr)
 #define rt_atomic_store(ptr, v) rt_hw_atomic_store(ptr, v)

+ 81 - 37
include/rtdef.h

@@ -49,6 +49,7 @@
  * 2022-12-20     Meco Man     add const name for rt_object
  * 2023-04-01     Chushicheng  change version number to v5.0.1
  * 2023-05-20     Bernard      add stdc atomic detection.
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-09-17     Meco Man     add RT_USING_LIBC_ISO_ONLY macro
  * 2023-10-10     Chushicheng  change version number to v5.1.0
  * 2023-10-11     zmshahaha    move specific devices related and driver to components/drivers
@@ -146,22 +147,16 @@ typedef rt_base_t                       rt_flag_t;      /**< Type for flags */
 typedef rt_ubase_t                      rt_dev_t;       /**< Type for device */
 typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 
-#if !defined(__cplusplus)
+#ifdef __cplusplus
+    typedef rt_base_t rt_atomic_t;
+#else
 #if defined(RT_USING_HW_ATOMIC)
     typedef rt_base_t rt_atomic_t;
 #elif defined(RT_USING_STDC_ATOMIC)
     #include <stdatomic.h>
     typedef atomic_size_t rt_atomic_t;
 #else
-
-    /* To detect std atomic */
-    #if defined(RT_USING_LIBC) && defined(__GNUC__) && !defined(__STDC_NO_ATOMICS__)
-        #include <stdatomic.h>
-        typedef atomic_size_t rt_atomic_t;
-    #else
-        typedef rt_base_t rt_atomic_t;
-    #endif /* __GNUC__ && !__STDC_NO_ATOMICS__ */
-
+    typedef rt_base_t rt_atomic_t;
 #endif /* RT_USING_STDC_ATOMIC */
 #endif /* __cplusplus */
 
@@ -179,10 +174,12 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define RT_UINT8_MAX                    UINT8_MAX       /**< Maximum number of UINT8 */
 #define RT_UINT16_MAX                   UINT16_MAX      /**< Maximum number of UINT16 */
 #define RT_UINT32_MAX                   UINT32_MAX      /**< Maximum number of UINT32 */
+#define RT_UINT64_MAX                   UINT64_MAX      /**< Maximum number of UINT64 */
 #else
 #define RT_UINT8_MAX                    0xff            /**< Maximum number of UINT8 */
 #define RT_UINT16_MAX                   0xffff          /**< Maximum number of UINT16 */
 #define RT_UINT32_MAX                   0xffffffff      /**< Maximum number of UINT32 */
+#define RT_UINT64_MAX                   0xffffffffffffffff
 #endif /* RT_USING_LIBC */
 
 #define RT_TICK_MAX                     RT_UINT32_MAX   /**< Maximum number of tick */
@@ -492,6 +489,34 @@ struct rt_slist_node
 };
 typedef struct rt_slist_node rt_slist_t;                /**< Type for single list. */
 
+#ifdef RT_USING_SMP
+#include <cpuport.h> /* for spinlock from arch */
+
+struct rt_spinlock
+{
+    rt_hw_spinlock_t lock;
+#if defined(RT_DEBUGING_SPINLOCK)
+    void *owner;
+    void *pc;
+#endif /* RT_DEBUGING_SPINLOCK */
+};
+typedef struct rt_spinlock rt_spinlock_t;
+
+#ifndef RT_SPINLOCK_INIT
+#define RT_SPINLOCK_INIT {{0}} // default
+#endif /* RT_SPINLOCK_INIT */
+
+#else
+typedef rt_ubase_t rt_spinlock_t;
+struct rt_spinlock
+{
+    rt_spinlock_t lock;
+};
+#define RT_SPINLOCK_INIT {0}
+#endif /* RT_USING_SMP */
+
+#define RT_DEFINE_SPINLOCK(x)  struct rt_spinlock x = RT_SPINLOCK_INIT
+
 /**
  * @addtogroup KernelObject
  */
@@ -521,7 +546,7 @@ struct rt_object
 #endif /* RT_USING_MODULE */
 
 #ifdef RT_USING_SMART
-    int         lwp_ref_count;                           /**< ref count for lwp */
+    rt_atomic_t lwp_ref_count;                           /**< ref count for lwp */
 #endif /* RT_USING_SMART */
 
     rt_list_t   list;                                    /**< list node of kernel object */
@@ -574,6 +599,7 @@ struct rt_object_information
     enum rt_object_class_type type;                     /**< object class type */
     rt_list_t                 object_list;              /**< object list */
     rt_size_t                 object_size;              /**< object size */
+    struct rt_spinlock        spinlock;
 };
 
 /**
@@ -757,23 +783,25 @@ typedef struct rt_cpu_usage_stats *rt_cpu_usage_stats_t;
  */
 struct rt_cpu
 {
-    struct rt_thread *current_thread;
-    struct rt_thread *idle_thread;
-    rt_uint16_t irq_nest;
-    rt_uint8_t  irq_switch_flag;
+    struct rt_thread            *current_thread;
+    struct rt_thread            *idle_thread;
+    rt_atomic_t                 irq_nest;
+    rt_uint8_t                  irq_switch_flag;
 
-    rt_uint8_t current_priority;
-    rt_list_t priority_table[RT_THREAD_PRIORITY_MAX];
+    rt_uint8_t                  current_priority;
+    rt_list_t                   priority_table[RT_THREAD_PRIORITY_MAX];
 #if RT_THREAD_PRIORITY_MAX > 32
-    rt_uint32_t priority_group;
-    rt_uint8_t ready_table[32];
+    rt_uint32_t                 priority_group;
+    rt_uint8_t                  ready_table[32];
 #else
-    rt_uint32_t priority_group;
+    rt_uint32_t                 priority_group;
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 
-    rt_tick_t tick;
+    rt_atomic_t                 tick;
+
+    struct rt_spinlock          spinlock;
 #ifdef RT_USING_SMART
-    struct rt_cpu_usage_stats cpu_stat;
+    struct rt_cpu_usage_stats   cpu_stat;
 #endif
 };
 typedef struct rt_cpu *rt_cpu_t;
@@ -865,6 +893,7 @@ struct rt_thread
 {
     struct rt_object            parent;
     rt_list_t                   tlist;                  /**< the thread list */
+    rt_list_t                   tlist_schedule;         /**< the thread list */
 
     /* stack point and entry */
     void                        *sp;                    /**< stack point */
@@ -882,9 +911,8 @@ struct rt_thread
     rt_uint8_t                  bind_cpu;               /**< thread is bind to cpu */
     rt_uint8_t                  oncpu;                  /**< process on cpu */
 
-    rt_uint16_t                 scheduler_lock_nest;    /**< scheduler lock count */
-    rt_int16_t                  cpus_lock_nest;         /**< cpus lock count */
-    rt_uint16_t                 critical_lock_nest;     /**< critical lock count */
+    rt_atomic_t                 cpus_lock_nest;         /**< cpus lock count */
+    rt_atomic_t                 critical_lock_nest;     /**< critical lock count */
 #endif /*RT_USING_SMP*/
 
     /* priority */
@@ -919,8 +947,8 @@ struct rt_thread
     void                        *si_list;               /**< the signal infor list */
 #endif /* RT_USING_SIGNALS */
 
-    rt_ubase_t                  init_tick;              /**< thread's initialized tick */
-    rt_ubase_t                  remaining_tick;         /**< remaining tick */
+    rt_atomic_t                 init_tick;              /**< thread's initialized tick */
+    rt_atomic_t                 remaining_tick;         /**< remaining tick */
 
 #ifdef RT_USING_CPU_USAGE
     rt_uint64_t                 duration_tick;          /**< cpu usage tick */
@@ -949,8 +977,10 @@ struct rt_thread
     struct lwp_thread_signal    signal;                 /**< lwp signal for user-space thread */
     struct rt_user_context      user_ctx;               /**< user space context */
     struct rt_wakeup            wakeup;                 /**< wakeup data */
-    int                         exit_request;
-    int                         tid;
+    int                         exit_request;           /**< pending exit request of thread */
+    int                         tid;                    /**< thread ID used by process */
+    int                         tid_ref_count;          /**< reference of tid */
+    void                        *susp_recycler;         /**< suspended recycler on this thread */
 
     rt_uint64_t                 user_time;
     rt_uint64_t                 system_time;
@@ -967,6 +997,8 @@ struct rt_thread
     int                         *clear_child_tid;
 #endif /* ARCH_MM_MMU */
 #endif /* RT_USING_SMART */
+    rt_atomic_t                 ref_count;
+    struct rt_spinlock          spinlock;
     rt_ubase_t                  user_data;              /**< private user data beyond this thread */
 };
 typedef struct rt_thread *rt_thread_t;
@@ -977,6 +1009,11 @@ typedef struct rt_thread *rt_thread_t;
 
 /**@}*/
 
+#define rt_atomic_inc(v)                rt_atomic_add((v), 1)
+#define rt_atomic_dec(v)                rt_atomic_sub((v), 1)
+#define rt_get_thread_struct(object)    do { rt_atomic_inc(&(object)->ref_count); } while(0)
+#define rt_put_thread_struct(object)    do { rt_atomic_dec(&(object)->ref_count); } while(0)
+
 /**
  * @addtogroup IPC
  */
@@ -1016,6 +1053,7 @@ struct rt_semaphore
 
     rt_uint16_t          value;                         /**< value of semaphore. */
     rt_uint16_t          reserved;                      /**< reserved field */
+    struct rt_spinlock   spinlock;
 };
 typedef struct rt_semaphore *rt_sem_t;
 #endif /* RT_USING_SEMAPHORE */
@@ -1035,6 +1073,7 @@ struct rt_mutex
 
     struct rt_thread    *owner;                         /**< current owner of mutex */
     rt_list_t            taken_list;                    /**< the object list taken by thread */
+    struct rt_spinlock   spinlock;
 };
 typedef struct rt_mutex *rt_mutex_t;
 #endif /* RT_USING_MUTEX */
@@ -1055,6 +1094,7 @@ struct rt_event
     struct rt_ipc_object parent;                        /**< inherit from ipc_object */
 
     rt_uint32_t          set;                           /**< event set */
+    struct rt_spinlock   spinlock;
 };
 typedef struct rt_event *rt_event_t;
 #endif /* RT_USING_EVENT */
@@ -1076,6 +1116,7 @@ struct rt_mailbox
     rt_uint16_t          out_offset;                    /**< output offset of the message buffer */
 
     rt_list_t            suspend_sender_thread;         /**< sender thread suspended on this mailbox */
+    struct rt_spinlock   spinlock;
 };
 typedef struct rt_mailbox *rt_mailbox_t;
 #endif /* RT_USING_MAILBOX */
@@ -1100,6 +1141,7 @@ struct rt_messagequeue
     void                *msg_queue_free;                /**< pointer indicated the free node of queue */
 
     rt_list_t            suspend_sender_thread;         /**< sender thread suspended on this message queue */
+    struct rt_spinlock   spinlock;
 };
 typedef struct rt_messagequeue *rt_mq_t;
 #endif /* RT_USING_MESSAGEQUEUE */
@@ -1189,18 +1231,19 @@ struct rt_memheap
  */
 struct rt_mempool
 {
-    struct rt_object parent;                            /**< inherit from rt_object */
+    struct rt_object    parent;                            /**< inherit from rt_object */
 
-    void            *start_address;                     /**< memory pool start */
-    rt_size_t        size;                              /**< size of memory pool */
+    void                *start_address;                     /**< memory pool start */
+    rt_size_t           size;                              /**< size of memory pool */
 
-    rt_size_t        block_size;                        /**< size of memory blocks */
-    rt_uint8_t      *block_list;                        /**< memory blocks list */
+    rt_size_t           block_size;                        /**< size of memory blocks */
+    rt_uint8_t          *block_list;                        /**< memory blocks list */
 
-    rt_size_t        block_total_count;                 /**< numbers of memory block */
-    rt_size_t        block_free_count;                  /**< numbers of free memory block */
+    rt_size_t           block_total_count;                 /**< numbers of memory block */
+    rt_size_t           block_free_count;                  /**< numbers of free memory block */
 
-    rt_list_t        suspend_thread;                    /**< threads pended on this resource */
+    rt_list_t           suspend_thread;                    /**< threads pended on this resource */
+    struct rt_spinlock  spinlock;
 };
 typedef struct rt_mempool *rt_mp_t;
 #endif /* RT_USING_MEMPOOL */
@@ -1327,6 +1370,7 @@ struct rt_wqueue
 {
     rt_uint32_t flag;
     rt_list_t waiting_list;
+    struct rt_spinlock spinlock;
 };
 typedef struct rt_wqueue rt_wqueue_t;
 

+ 14 - 15
include/rthw.h

@@ -13,6 +13,7 @@
  * 2018-11-17     Jesven       add rt_hw_spinlock_t
  *                             add smp support
  * 2019-05-18     Bernard      add empty definition for not enable cache case
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-10-16     Shell        Support a new backtrace framework
  */
 
@@ -111,6 +112,9 @@ struct rt_irq_desc
 #ifdef RT_USING_INTERRUPT_INFO
     char             name[RT_NAME_MAX];
     rt_uint32_t      counter;
+#ifdef RT_USING_SMP
+    rt_ubase_t       cpu_counter[RT_CPUS_NR];
+#endif
 #endif
 };
 
@@ -129,11 +133,18 @@ rt_isr_handler_t rt_hw_interrupt_install(int              vector,
 rt_base_t rt_hw_local_irq_disable();
 void rt_hw_local_irq_enable(rt_base_t level);
 
+rt_base_t rt_cpus_lock(void);
+void rt_cpus_unlock(rt_base_t level);
+
 #define rt_hw_interrupt_disable rt_cpus_lock
 #define rt_hw_interrupt_enable rt_cpus_unlock
 #else
 rt_base_t rt_hw_interrupt_disable(void);
 void rt_hw_interrupt_enable(rt_base_t level);
+
+#define rt_hw_local_irq_disable rt_hw_interrupt_disable
+#define rt_hw_local_irq_enable rt_hw_interrupt_enable
+
 #endif /*RT_USING_SMP*/
 rt_bool_t rt_hw_interrupt_is_disabled(void);
 
@@ -187,25 +198,18 @@ void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask);
 
 #ifdef RT_USING_SMP
 
-struct rt_spinlock
-{
-    rt_hw_spinlock_t lock;
-};
-
 void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
 void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
 void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
 
 extern rt_hw_spinlock_t _cpus_lock;
-extern rt_hw_spinlock_t _rt_critical_lock;
 
 #define __RT_HW_SPIN_LOCK_INITIALIZER(lockname) {0}
 
 #define __RT_HW_SPIN_LOCK_UNLOCKED(lockname)    \
     (rt_hw_spinlock_t) __RT_HW_SPIN_LOCK_INITIALIZER(lockname)
 
-#define RT_DEFINE_SPINLOCK(x)  rt_hw_spinlock_t x = __RT_HW_SPIN_LOCK_UNLOCKED(x)
-#define RT_DECLARE_SPINLOCK(x)
+#define RT_DEFINE_HW_SPINLOCK(x)  rt_hw_spinlock_t x = __RT_HW_SPIN_LOCK_UNLOCKED(x)
 
 /**
  * boot secondary cpu
@@ -218,17 +222,12 @@ void rt_hw_secondary_cpu_up(void);
 void rt_hw_secondary_cpu_idle_exec(void);
 #else
 
-#define RT_DEFINE_SPINLOCK(x)    rt_ubase_t x
-#define RT_DECLARE_SPINLOCK(x)
+#define RT_DEFINE_HW_SPINLOCK(x)    rt_ubase_t x
 
 #define rt_hw_spin_lock(lock)     *(lock) = rt_hw_interrupt_disable()
 #define rt_hw_spin_unlock(lock)   rt_hw_interrupt_enable(*(lock))
 
-typedef rt_ubase_t rt_spinlock_t;
-struct rt_spinlock
-{
-    rt_spinlock_t lock;
-};
+
 #endif
 
 #ifndef RT_USING_CACHE

+ 5 - 14
include/rtthread.h

@@ -553,11 +553,11 @@ void rt_spin_unlock(struct rt_spinlock *lock);
 rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
 void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
 #else
-#define rt_spin_lock_init(lock)                 /* nothing */
-#define rt_spin_lock(lock)                      rt_enter_critical()
-#define rt_spin_unlock(lock)                    rt_exit_critical()
-#define rt_spin_lock_irqsave(lock)              rt_hw_interrupt_disable()
-#define rt_spin_unlock_irqrestore(lock, level)  rt_hw_interrupt_enable(level)
+#define rt_spin_lock_init(lock)                  { RT_UNUSED(lock);                                 }
+#define rt_spin_lock(lock)                       { RT_UNUSED(lock);                                 }
+#define rt_spin_unlock(lock)                     { RT_UNUSED(lock);                                 }
+#define rt_spin_lock_irqsave(lock)              ({ RT_UNUSED(lock); rt_hw_interrupt_disable();      })
+#define rt_spin_unlock_irqrestore(lock, level)   { RT_UNUSED(lock); rt_hw_interrupt_enable(level);  }
 #endif /* RT_USING_SMP */
 
 /**@}*/
@@ -742,14 +742,11 @@ if (!(EX))                                                                    \
 #define RT_DEBUG_NOT_IN_INTERRUPT                                             \
 do                                                                            \
 {                                                                             \
-    rt_base_t level;                                                          \
-    level = rt_hw_interrupt_disable();                                        \
     if (rt_interrupt_get_nest() != 0)                                         \
     {                                                                         \
         rt_kprintf("Function[%s] shall not be used in ISR\n", __FUNCTION__);  \
         RT_ASSERT(0)                                                          \
     }                                                                         \
-    rt_hw_interrupt_enable(level);                                            \
 }                                                                             \
 while (0)
 
@@ -760,8 +757,6 @@ while (0)
 #define RT_DEBUG_IN_THREAD_CONTEXT                                            \
 do                                                                            \
 {                                                                             \
-    rt_base_t level;                                                          \
-    level = rt_hw_interrupt_disable();                                        \
     if (rt_thread_self() == RT_NULL)                                          \
     {                                                                         \
         rt_kprintf("Function[%s] shall not be used before scheduler start\n", \
@@ -769,7 +764,6 @@ do                                                                            \
         RT_ASSERT(0)                                                          \
     }                                                                         \
     RT_DEBUG_NOT_IN_INTERRUPT;                                                \
-    rt_hw_interrupt_enable(level);                                            \
 }                                                                             \
 while (0)
 
@@ -785,9 +779,7 @@ do                                                                            \
     if (need_check)                                                           \
     {                                                                         \
         rt_bool_t interrupt_disabled;                                         \
-        rt_base_t level;                                                      \
         interrupt_disabled = rt_hw_interrupt_is_disabled();                   \
-        level = rt_hw_interrupt_disable();                                    \
         if (rt_critical_level() != 0)                                         \
         {                                                                     \
             rt_kprintf("Function[%s]: scheduler is not available\n",          \
@@ -801,7 +793,6 @@ do                                                                            \
             RT_ASSERT(0)                                                      \
         }                                                                     \
         RT_DEBUG_IN_THREAD_CONTEXT;                                           \
-        rt_hw_interrupt_enable(level);                                        \
     }                                                                         \
 }                                                                             \
 while (0)

+ 1 - 0
libcpu/Kconfig

@@ -192,6 +192,7 @@ config ARCH_ARMV8
     bool
     select ARCH_ARM
     select ARCH_ARM_MMU
+    select RT_USING_CPU_FFS
 
 config ARCH_MIPS
     bool

+ 29 - 5
libcpu/aarch64/common/context_gcc.S

@@ -9,10 +9,14 @@
  * 2023-06-24     WangXiaoyao  Support backtrace for user thread
  */
 
+#ifndef __ASSEMBLY__
+#define __ASSEMBLY__
+#endif
+
 #include "rtconfig.h"
 #include "asm-generic.h"
-
 #include "asm-fpu.h"
+#include "armv8.h"
 
 .text
 .weak rt_hw_cpu_id_set
@@ -279,12 +283,30 @@ START_POINT_END(_thread_start)
 1:
 .endm
 
+.macro RESTORE_USER_CTX, ctx
+    LDR     X1, [\ctx, #CONTEXT_OFFSET_SPSR_EL1]
+    AND     X1, X1, 0x1f
+    CMP     X1, XZR
+
+    BNE     1f
+    BL      lwp_uthread_ctx_restore
+1:
+.endm
+
 #ifdef RT_USING_SMP
 #define rt_hw_interrupt_disable rt_hw_local_irq_disable
 #define rt_hw_interrupt_enable rt_hw_local_irq_enable
 #endif
 
 .text
+
+.global rt_hw_interrupt_is_disabled
+rt_hw_interrupt_is_disabled:
+    MRS     X0, DAIF
+    TST     X0, #0xc0
+    CSET    X0, NE
+    RET
+
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
@@ -387,8 +409,7 @@ rt_hw_context_switch_interrupt:
 vector_fiq:
     B       .
 
-.globl vector_irq
-vector_irq:
+START_POINT(vector_irq)
     SAVE_CONTEXT
     STP     X0, X1, [SP, #-0x10]!   /* X0 is thread sp */
 
@@ -399,13 +420,15 @@ vector_irq:
 #endif
     BL      rt_hw_trap_irq
 #ifdef RT_USING_LWP
-    BL      lwp_uthread_ctx_restore
+    LDP     X0, X1, [SP]
+    RESTORE_USER_CTX X0
 #endif
     BL      rt_interrupt_leave
 
     LDP     X0, X1, [SP], #0x10
     BL      rt_scheduler_do_irq_switch
     B       rt_hw_context_switch_exit
+START_POINT_END(vector_irq)
 
 .global rt_hw_context_switch_exit
 rt_hw_context_switch_exit:
@@ -540,7 +563,8 @@ START_POINT(vector_exception)
 
     BL      rt_hw_trap_exception
 #ifdef RT_USING_LWP
-    BL      lwp_uthread_ctx_restore
+    LDP     X0, X1, [SP]
+    RESTORE_USER_CTX X0
 #endif
 
     LDP     X0, X1, [SP], #0x10

+ 48 - 35
libcpu/aarch64/common/cpu.c

@@ -59,52 +59,65 @@ rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
 };
 #endif /* RT_USING_SMART */
 
-void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
+typedef rt_hw_spinlock_t arch_spinlock_t;
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-    lock->slock = 0;
+    unsigned int tmp;
+
+    asm volatile(
+    "   sevl\n"
+    "1: wfe\n"
+    "2: ldaxr   %w0, %1\n"
+    "   cbnz    %w0, 1b\n"
+    "   stxr    %w0, %w2, %1\n"
+    "   cbnz    %w0, 2b\n"
+    : "=&r" (tmp), "+Q" (lock->lock)
+    : "r" (1)
+    : "cc", "memory");
 }
 
-#define TICKET_SHIFT 16
-void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
     unsigned int tmp;
-    struct __arch_tickets lockval, newval;
 
     asm volatile(
-        /* Atomically increment the next ticket. */
-        "   prfm    pstl1strm, %3\n"
-        "1: ldaxr   %w0, %3\n"
-        "   add %w1, %w0, %w5\n"
-        "   stxr    %w2, %w1, %3\n"
-        "   cbnz    %w2, 1b\n"
-        /* Did we get the lock? */
-        "   eor %w1, %w0, %w0, ror #16\n"
-        "   cbz %w1, 3f\n"
-        /*
-         * No: spin on the owner. Send a local event to avoid missing an
-         * unlock before the exclusive load.
-         */
-        "   sevl\n"
-        "2: wfe\n"
-        "   ldaxrh  %w2, %4\n"
-        "   eor %w1, %w2, %w0, lsr #16\n"
-        "   cbnz    %w1, 2b\n"
-        /* We got the lock. Critical section starts here. */
-        "3:"
-        : "=&r"(lockval), "=&r"(newval), "=&r"(tmp), "+Q"(*lock)
-        : "Q"(lock->tickets.owner), "I"(1 << TICKET_SHIFT)
-        : "memory");
-    rt_hw_dmb();
+    "  ldaxr   %w0, %1\n"
+    "  cbnz    %w0, 1f\n"
+    "  stxr    %w0, %w2, %1\n"
+    "1:\n"
+    : "=&r" (tmp), "+Q" (lock->lock)
+    : "r" (1)
+    : "cc", "memory");
+
+    return !tmp;
 }
 
-void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-    rt_hw_dmb();
     asm volatile(
-        "   stlrh   %w1, %0\n"
-        : "=Q"(lock->tickets.owner)
-        : "r"(lock->tickets.owner + 1)
-        : "memory");
+    " stlr    %w1, %0\n"
+    : "=Q" (lock->lock) : "r" (0) : "memory");
+}
+
+void rt_hw_spin_lock_init(arch_spinlock_t *lock)
+{
+    lock->lock = 0;
+}
+
+void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
+{
+    arch_spin_lock(lock);
+}
+
+void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
+{
+    arch_spin_unlock(lock);
+}
+
+rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *lock)
+{
+    return arch_spin_trylock(lock);
 }
 
 static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])

+ 2 - 6
libcpu/aarch64/common/cpuport.h

@@ -14,12 +14,8 @@
 #include <rtdef.h>
 
 #ifdef RT_USING_SMP
-typedef union {
-    unsigned long slock;
-    struct __arch_tickets {
-        unsigned short owner;
-        unsigned short next;
-    } tickets;
+typedef struct {
+    volatile unsigned int lock;
 } rt_hw_spinlock_t;
 #endif
 

+ 47 - 2
libcpu/aarch64/common/interrupt.c

@@ -38,7 +38,7 @@ extern void *system_vectors;
 #ifdef RT_USING_SMP
 #define rt_interrupt_nest rt_cpu_self()->irq_nest
 #else
-extern volatile rt_uint8_t rt_interrupt_nest;
+extern volatile rt_atomic_t rt_interrupt_nest;
 #endif
 
 #ifdef SOC_BCM283x
@@ -85,7 +85,7 @@ void rt_hw_interrupt_init(void)
     }
 
     /* init interrupt nest, and context in thread sp */
-    rt_interrupt_nest = 0;
+    rt_atomic_store(&rt_interrupt_nest, 0);
     rt_interrupt_from_thread = 0;
     rt_interrupt_to_thread = 0;
     rt_thread_switch_interrupt_flag = 0;
@@ -415,3 +415,48 @@ void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
     rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER");
 }
 #endif
+
+#if defined(FINSH_USING_MSH) && defined(RT_USING_INTERRUPT_INFO)
+int list_isr()
+{
+    int idx;
+
+    rt_kprintf("%-*.*s nr   handler            param              counter         ", RT_NAME_MAX, RT_NAME_MAX, "irq");
+#ifdef RT_USING_SMP
+    for (int i = 0; i < RT_CPUS_NR; i++)
+    {
+        rt_kprintf(" cpu%2d  ", i);
+    }
+#endif
+    rt_kprintf("\n");
+    for (int i = 0; i < RT_NAME_MAX; i++)
+    {
+        rt_kprintf("-");
+    }
+    rt_kprintf(" ---- ------------------ ------------------ ----------------");
+#ifdef RT_USING_SMP
+    for (int i = 0; i < RT_CPUS_NR; i++)
+    {
+        rt_kprintf(" -------");
+    }
+#endif
+    rt_kprintf("\n");
+    for (idx = 0; idx < MAX_HANDLERS; idx++)
+    {
+        if (isr_table[idx].handler != RT_NULL)
+        {
+            rt_kprintf("%*.s %4d %p %p %16d", RT_NAME_MAX, isr_table[idx].name, idx, isr_table[idx].handler,
+                       isr_table[idx].param, isr_table[idx].counter);
+#ifdef RT_USING_SMP
+            for (int i = 0; i < RT_CPUS_NR; i++)
+                 rt_kprintf(" %7d", isr_table[idx].cpu_counter[i]);
+#endif
+            rt_kprintf("\n");
+        }
+    }
+    return 0;
+}
+
+#include "finsh.h"
+MSH_CMD_EXPORT(list_isr, list isr)
+#endif

+ 3 - 0
libcpu/aarch64/common/trap.c

@@ -274,6 +274,9 @@ void rt_hw_trap_irq(void)
     isr_func = isr_table[ir_self].handler;
 #ifdef RT_USING_INTERRUPT_INFO
     isr_table[ir_self].counter++;
+#ifdef RT_USING_SMP
+    isr_table[ir_self].cpu_counter[rt_hw_cpu_id()]++;
+#endif
 #endif
     if (isr_func)
     {

+ 7 - 0
src/Kconfig

@@ -219,6 +219,13 @@ menuconfig RT_USING_DEBUG
             bool "Enable page leaking tracer"
             depends on ARCH_MM_MMU
             default n
+
+        if RT_USING_SMP
+            config RT_DEBUGING_SPINLOCK
+            bool "Enable spinlock debugging"
+            default n
+        endif
+
     endif
 
 menu "Inter-Thread communication"

+ 16 - 20
src/clock.c

@@ -15,16 +15,18 @@
  * 2018-11-22     Jesven       add per cpu tick
  * 2020-12-29     Meco Man     implement rt_tick_get_millisecond()
  * 2021-06-01     Meco Man     add critical section projection for rt_tick_increase()
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-10-16     RiceChen     fix: only the main core detection rt_timer_check(), in SMP mode
  */
 
 #include <rthw.h>
 #include <rtthread.h>
+#include <rtatomic.h>
 
 #ifdef RT_USING_SMP
 #define rt_tick rt_cpu_index(0)->tick
 #else
-static volatile rt_tick_t rt_tick = 0;
+static volatile rt_atomic_t rt_tick = 0;
 #endif /* RT_USING_SMP */
 
 #ifndef __on_rt_tick_hook
@@ -67,7 +69,7 @@ void rt_tick_sethook(void (*hook)(void))
 rt_tick_t rt_tick_get(void)
 {
     /* return the global tick */
-    return rt_tick;
+    return (rt_tick_t)rt_atomic_load(&(rt_tick));
 }
 RTM_EXPORT(rt_tick_get);
 
@@ -78,11 +80,7 @@ RTM_EXPORT(rt_tick_get);
  */
 void rt_tick_set(rt_tick_t tick)
 {
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
-    rt_tick = tick;
-    rt_hw_interrupt_enable(level);
+    rt_atomic_store(&(rt_tick), tick);
 }
 
 /**
@@ -93,34 +91,32 @@ void rt_tick_increase(void)
 {
     struct rt_thread *thread;
     rt_base_t level;
+    rt_atomic_t oldval = 0;
 
     RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
-
-    level = rt_hw_interrupt_disable();
-
     /* increase the global tick */
 #ifdef RT_USING_SMP
-    rt_cpu_self()->tick ++;
+    rt_atomic_add(&(rt_cpu_self()->tick), 1);
 #else
-    ++ rt_tick;
+    rt_atomic_add(&(rt_tick), 1);
 #endif /* RT_USING_SMP */
 
     /* check time slice */
     thread = rt_thread_self();
-
-    -- thread->remaining_tick;
-    if (thread->remaining_tick == 0)
+    rt_get_thread_struct(thread);
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    rt_atomic_sub(&(thread->remaining_tick), 1);
+    if (rt_atomic_compare_exchange_strong(&(thread->remaining_tick), &oldval, thread->init_tick))
     {
-        /* change to initialized tick */
-        thread->remaining_tick = thread->init_tick;
         thread->stat |= RT_THREAD_STAT_YIELD;
-
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_put_thread_struct(thread);
         rt_schedule();
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_put_thread_struct(thread);
     }
 
     /* check timer */

+ 5 - 1
src/components.c

@@ -15,6 +15,7 @@
  *                             in some IDEs.
  * 2015-07-29     Arda.Fu      Add support to use RT_USING_USER_MAIN with IAR
  * 2018-11-22     Jesven       Add secondary cpu boot up
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -235,7 +236,10 @@ void rt_application_init(void)
  */
 int rtthread_startup(void)
 {
-    rt_hw_interrupt_disable();
+#ifdef RT_USING_SMP
+    rt_hw_spin_lock_init(&_cpus_lock);
+#endif
+    rt_hw_local_irq_disable();
 
     /* board level initialization
      * NOTE: please initialize heap inside board initialization.

+ 56 - 65
src/cpu.c

@@ -6,6 +6,7 @@
  * Change Logs:
  * Date           Author       Notes
  * 2018-10-30     Bernard      The first version
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 #include <rthw.h>
 #include <rtthread.h>
@@ -17,57 +18,19 @@
 #ifdef RT_USING_SMP
 static struct rt_cpu _cpus[RT_CPUS_NR];
 rt_hw_spinlock_t _cpus_lock;
+#if defined(RT_DEBUGING_SPINLOCK)
+void *_cpus_lock_owner = 0;
+void *_cpus_lock_pc = 0;
 
-/*
- * @brief   disable scheduler
- */
-static void _cpu_preempt_disable(void)
-{
-    rt_base_t level;
-    struct rt_thread *current_thread;
-
-    /* disable interrupt */
-    level = rt_hw_local_irq_disable();
+#define __OWNER_MAGIC ((void *)0xdeadbeaf)
 
-    current_thread = rt_thread_self();
-    if (!current_thread)
-    {
-        rt_hw_local_irq_enable(level);
-        return;
-    }
-
-    /* lock scheduler for local cpu */
-    current_thread->scheduler_lock_nest ++;
-
-    /* enable interrupt */
-    rt_hw_local_irq_enable(level);
-}
-
-/*
- * @brief   enable scheduler
- */
-static void _cpu_preempt_enable(void)
-{
-    rt_base_t level;
-    struct rt_thread *current_thread;
-
-    /* disable interrupt */
-    level = rt_hw_local_irq_disable();
-
-    current_thread = rt_thread_self();
-    if (!current_thread)
-    {
-        rt_hw_local_irq_enable(level);
-        return;
-    }
-
-    /* unlock scheduler for local cpu */
-    current_thread->scheduler_lock_nest --;
+#if defined (__GNUC__)
+#define __GET_RETURN_ADDRESS __builtin_return_address(0)
+#else
+#define __GET_RETURN_ADDRESS RT_NULL
+#endif
 
-    rt_schedule();
-    /* enable interrupt */
-    rt_hw_local_irq_enable(level);
-}
+#endif /* RT_DEBUGING_SPINLOCK */
 
 /**
  * @brief   Initialize a static spinlock object.
@@ -90,8 +53,14 @@ RTM_EXPORT(rt_spin_lock_init)
  */
 void rt_spin_lock(struct rt_spinlock *lock)
 {
-    _cpu_preempt_disable();
     rt_hw_spin_lock(&lock->lock);
+#if defined(RT_DEBUGING_SPINLOCK)
+    if (rt_cpu_self() != RT_NULL)
+    {
+        lock->owner = rt_cpu_self()->current_thread;
+    }
+    lock->pc = __GET_RETURN_ADDRESS;
+#endif /* RT_DEBUGING_SPINLOCK */
 }
 RTM_EXPORT(rt_spin_lock)
 
@@ -103,7 +72,10 @@ RTM_EXPORT(rt_spin_lock)
 void rt_spin_unlock(struct rt_spinlock *lock)
 {
     rt_hw_spin_unlock(&lock->lock);
-    _cpu_preempt_enable();
+#if defined(RT_DEBUGING_SPINLOCK)
+    lock->owner = __OWNER_MAGIC;
+    lock->pc = RT_NULL;
+#endif /* RT_DEBUGING_SPINLOCK */
 }
 RTM_EXPORT(rt_spin_unlock)
 
@@ -121,11 +93,15 @@ rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
 {
     unsigned long level;
 
-    _cpu_preempt_disable();
-
     level = rt_hw_local_irq_disable();
     rt_hw_spin_lock(&lock->lock);
-
+#if defined(RT_DEBUGING_SPINLOCK)
+    if (rt_cpu_self() != RT_NULL)
+    {
+        lock->owner = rt_cpu_self()->current_thread;
+        lock->pc = __GET_RETURN_ADDRESS;
+    }
+#endif /* RT_DEBUGING_SPINLOCK */
     return level;
 }
 RTM_EXPORT(rt_spin_lock_irqsave)
@@ -139,10 +115,12 @@ RTM_EXPORT(rt_spin_lock_irqsave)
  */
 void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
 {
+#if defined(RT_DEBUGING_SPINLOCK)
+    lock->owner = __OWNER_MAGIC;
+    lock->pc = RT_NULL;
+#endif /* RT_DEBUGING_SPINLOCK */
     rt_hw_spin_unlock(&lock->lock);
     rt_hw_local_irq_enable(level);
-
-    _cpu_preempt_enable();
 }
 RTM_EXPORT(rt_spin_unlock_irqrestore)
 
@@ -183,13 +161,16 @@ rt_base_t rt_cpus_lock(void)
     pcpu = rt_cpu_self();
     if (pcpu->current_thread != RT_NULL)
     {
-        register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
+        register rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
 
-        pcpu->current_thread->cpus_lock_nest++;
+        rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
         if (lock_nest == 0)
         {
-            pcpu->current_thread->scheduler_lock_nest++;
             rt_hw_spin_lock(&_cpus_lock);
+#if defined(RT_DEBUGING_SPINLOCK)
+            _cpus_lock_owner = pcpu->current_thread;
+            _cpus_lock_pc = __GET_RETURN_ADDRESS;
+#endif
         }
     }
 
@@ -208,12 +189,15 @@ void rt_cpus_unlock(rt_base_t level)
 
     if (pcpu->current_thread != RT_NULL)
     {
-        RT_ASSERT(pcpu->current_thread->cpus_lock_nest > 0);
-        pcpu->current_thread->cpus_lock_nest--;
+        RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
+        rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
 
         if (pcpu->current_thread->cpus_lock_nest == 0)
         {
-            pcpu->current_thread->scheduler_lock_nest--;
+#if defined(RT_DEBUGING_SPINLOCK)
+            _cpus_lock_owner = __OWNER_MAGIC;
+            _cpus_lock_pc = RT_NULL;
+#endif
             rt_hw_spin_unlock(&_cpus_lock);
         }
     }
@@ -235,11 +219,18 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
 #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
     lwp_aspace_switch(thread);
 #endif
-    pcpu->current_thread = thread;
-    if (!thread->cpus_lock_nest)
+    if (pcpu->current_thread != RT_NULL )
     {
-        rt_hw_spin_unlock(&_cpus_lock);
+        rt_spin_unlock(&(pcpu->current_thread->spinlock));
+        if ((pcpu->current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
+        {
+            rt_schedule_insert_thread(pcpu->current_thread);
+        }
+        rt_put_thread_struct(pcpu->current_thread);
+
     }
+    pcpu->current_thread = thread;
+    rt_get_thread_struct(thread);
 }
 RTM_EXPORT(rt_cpus_lock_status_restore);
-#endif
+#endif /* RT_USING_SMP */

+ 20 - 19
src/idle.c

@@ -16,6 +16,7 @@
  * 2018-11-22     Jesven       add per cpu idle task
  *                             combine the code of primary and secondary cpu
  * 2021-11-15     THEWON       Remove duplicate work between idle and _thread_exit
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -42,7 +43,8 @@
 #define _CPUS_NR                RT_CPUS_NR
 
 static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
-
+static struct rt_mutex _defunct_mutex;
+static rt_atomic_t _idle_inited = 0;
 static struct rt_thread idle_thread[_CPUS_NR];
 rt_align(RT_ALIGN_SIZE)
 static rt_uint8_t idle_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
@@ -78,12 +80,8 @@ static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
 rt_err_t rt_thread_idle_sethook(void (*hook)(void))
 {
     rt_size_t i;
-    rt_base_t level;
     rt_err_t ret = -RT_EFULL;
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
     for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
     {
         if (idle_hook_list[i] == RT_NULL)
@@ -93,9 +91,6 @@ rt_err_t rt_thread_idle_sethook(void (*hook)(void))
             break;
         }
     }
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     return ret;
 }
 
@@ -110,12 +105,8 @@ rt_err_t rt_thread_idle_sethook(void (*hook)(void))
 rt_err_t rt_thread_idle_delhook(void (*hook)(void))
 {
     rt_size_t i;
-    rt_base_t level;
     rt_err_t ret = -RT_ENOSYS;
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
     for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
     {
         if (idle_hook_list[i] == hook)
@@ -125,9 +116,6 @@ rt_err_t rt_thread_idle_delhook(void (*hook)(void))
             break;
         }
     }
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     return ret;
 }
 
@@ -142,7 +130,13 @@ rt_err_t rt_thread_idle_delhook(void (*hook)(void))
  */
 void rt_thread_defunct_enqueue(rt_thread_t thread)
 {
+    if (rt_atomic_load(&_idle_inited) == 0)
+    {
+        return;
+    }
+    rt_mutex_take(&_defunct_mutex, RT_WAITING_FOREVER);
     rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
+    rt_mutex_release(&_defunct_mutex);
 #ifdef RT_USING_SMP
     rt_sem_release(&system_sem);
 #endif
@@ -158,8 +152,7 @@ rt_thread_t rt_thread_defunct_dequeue(void)
     rt_list_t *l = &_rt_thread_defunct;
 
 #ifdef RT_USING_SMP
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    rt_mutex_take(&_defunct_mutex, RT_WAITING_FOREVER);
     if (l->next != l)
     {
         thread = rt_list_entry(l->next,
@@ -167,7 +160,8 @@ rt_thread_t rt_thread_defunct_dequeue(void)
                 tlist);
         rt_list_remove(&(thread->tlist));
     }
-    rt_hw_interrupt_enable(level);
+    rt_mutex_release(&_defunct_mutex);
+    RT_UNUSED(level);
 #else
     if (l->next != l)
     {
@@ -204,6 +198,11 @@ static void rt_defunct_execute(void)
         {
             break;
         }
+
+        while (rt_atomic_load(&(thread->ref_count)))
+        {
+            rt_thread_delay(5);
+        }
 #ifdef RT_USING_MODULE
         module = (struct rt_dlmodule*)thread->parent.module_id;
         if (module)
@@ -341,7 +340,7 @@ void rt_thread_idle_init(void)
 #ifdef RT_USING_SMP
     RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
 
-    rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO);
+    rt_sem_init(&system_sem, "defunct", 0, RT_IPC_FLAG_FIFO);
 
     /* create defunct thread */
     rt_thread_init(&rt_system_thread,
@@ -355,6 +354,8 @@ void rt_thread_idle_init(void)
     /* startup */
     rt_thread_startup(&rt_system_thread);
 #endif
+    rt_mutex_init(&_defunct_mutex, "defunct_mutex", RT_IPC_FLAG_FIFO);
+    rt_atomic_store(&(_idle_inited), 1);
 }
 
 /**

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 126 - 140
src/ipc.c


+ 8 - 22
src/irq.c

@@ -12,6 +12,7 @@
  * 2021-08-15     Supperthomas fix the comment
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to irq.c
  * 2022-07-04     Yunjie       fix RT_DEBUG_LOG
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -71,7 +72,7 @@ void rt_interrupt_leave_sethook(void (*hook)(void))
 #ifdef RT_USING_SMP
 #define rt_interrupt_nest rt_cpu_self()->irq_nest
 #else
-volatile rt_uint8_t rt_interrupt_nest = 0;
+volatile rt_atomic_t rt_interrupt_nest = 0;
 #endif /* RT_USING_SMP */
 
 
@@ -84,15 +85,10 @@ volatile rt_uint8_t rt_interrupt_nest = 0;
  */
 rt_weak void rt_interrupt_enter(void)
 {
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
-    rt_interrupt_nest ++;
+    rt_atomic_add(&(rt_interrupt_nest), 1);
     RT_OBJECT_HOOK_CALL(rt_interrupt_enter_hook,());
-    rt_hw_interrupt_enable(level);
-
     LOG_D("irq has come..., irq current nest:%d",
-          (rt_int32_t)rt_interrupt_nest);
+          (rt_int32_t)rt_atomic_load(&(rt_interrupt_nest)));
 }
 RTM_EXPORT(rt_interrupt_enter);
 
@@ -106,15 +102,11 @@ RTM_EXPORT(rt_interrupt_enter);
  */
 rt_weak void rt_interrupt_leave(void)
 {
-    rt_base_t level;
-
     LOG_D("irq is going to leave, irq current nest:%d",
-                 (rt_int32_t)rt_interrupt_nest);
-
-    level = rt_hw_interrupt_disable();
+                 (rt_int32_t)rt_atomic_load(&(rt_interrupt_nest)));
     RT_OBJECT_HOOK_CALL(rt_interrupt_leave_hook,());
-    rt_interrupt_nest --;
-    rt_hw_interrupt_enable(level);
+    rt_atomic_sub(&(rt_interrupt_nest), 1);
+
 }
 RTM_EXPORT(rt_interrupt_leave);
 
@@ -129,13 +121,7 @@ RTM_EXPORT(rt_interrupt_leave);
  */
 rt_weak rt_uint8_t rt_interrupt_get_nest(void)
 {
-    rt_uint8_t ret;
-    rt_base_t level;
-
-    level = rt_hw_interrupt_disable();
-    ret = rt_interrupt_nest;
-    rt_hw_interrupt_enable(level);
-    return ret;
+    return rt_atomic_load(&rt_interrupt_nest);
 }
 RTM_EXPORT(rt_interrupt_get_nest);
 

+ 15 - 27
src/mempool.c

@@ -15,6 +15,7 @@
  * 2011-01-24     Bernard      add object allocation check.
  * 2012-03-22     Bernard      fix align issue in rt_mp_init and rt_mp_create.
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to mempool.c
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -131,6 +132,7 @@ rt_err_t rt_mp_init(struct rt_mempool *mp,
         RT_NULL;
 
     mp->block_list = block_ptr;
+    rt_spin_lock_init(&(mp->spinlock));
 
     return RT_EOK;
 }
@@ -153,11 +155,10 @@ rt_err_t rt_mp_detach(struct rt_mempool *mp)
     RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
     RT_ASSERT(rt_object_is_systemobject(&mp->parent));
 
+    level = rt_spin_lock_irqsave(&(mp->spinlock));
     /* wake up all suspended threads */
     while (!rt_list_isempty(&(mp->suspend_thread)))
     {
-        /* disable interrupt */
-        level = rt_hw_interrupt_disable();
 
         /* get next suspend thread */
         thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
@@ -170,13 +171,11 @@ rt_err_t rt_mp_detach(struct rt_mempool *mp)
          * suspend list
          */
         rt_thread_resume(thread);
-
-        /* enable interrupt */
-        rt_hw_interrupt_enable(level);
     }
 
     /* detach object */
     rt_object_detach(&(mp->parent));
+    rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
     return RT_EOK;
 }
@@ -249,6 +248,7 @@ rt_mp_t rt_mp_create(const char *name,
         = RT_NULL;
 
     mp->block_list = block_ptr;
+    rt_spin_lock_init(&(mp->spinlock));
 
     return mp;
 }
@@ -273,12 +273,10 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
     RT_ASSERT(rt_object_get_type(&mp->parent) == RT_Object_Class_MemPool);
     RT_ASSERT(rt_object_is_systemobject(&mp->parent) == RT_FALSE);
 
+    level = rt_spin_lock_irqsave(&(mp->spinlock));
     /* wake up all suspended threads */
     while (!rt_list_isempty(&(mp->suspend_thread)))
     {
-        /* disable interrupt */
-        level = rt_hw_interrupt_disable();
-
         /* get next suspend thread */
         thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist);
         /* set error code to -RT_ERROR */
@@ -290,9 +288,6 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
          * suspend list
          */
         rt_thread_resume(thread);
-
-        /* enable interrupt */
-        rt_hw_interrupt_enable(level);
     }
 
     /* release allocated room */
@@ -300,6 +295,7 @@ rt_err_t rt_mp_delete(rt_mp_t mp)
 
     /* detach object */
     rt_object_delete(&(mp->parent));
+    rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
     return RT_EOK;
 }
@@ -329,16 +325,14 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
     /* get current thread */
     thread = rt_thread_self();
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(mp->spinlock));
 
     while (mp->block_free_count == 0)
     {
         /* memory block is unavailable. */
         if (time == 0)
         {
-            /* enable interrupt */
-            rt_hw_interrupt_enable(level);
+            rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
             rt_set_errno(-RT_ETIMEOUT);
 
@@ -366,7 +360,7 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
         }
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
         /* do a schedule */
         rt_schedule();
@@ -380,8 +374,7 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
             if (time < 0)
                 time = 0;
         }
-        /* disable interrupt */
-        level = rt_hw_interrupt_disable();
+        level = rt_spin_lock_irqsave(&(mp->spinlock));
     }
 
     /* memory block is available. decrease the free block counter */
@@ -397,8 +390,7 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time)
     /* point to memory pool */
     *(rt_uint8_t **)block_ptr = (rt_uint8_t *)mp;
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
     RT_OBJECT_HOOK_CALL(rt_mp_alloc_hook,
                         (mp, (rt_uint8_t *)(block_ptr + sizeof(rt_uint8_t *))));
@@ -428,8 +420,7 @@ void rt_mp_free(void *block)
 
     RT_OBJECT_HOOK_CALL(rt_mp_free_hook, (mp, block));
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(mp->spinlock));
 
     /* increase the free block count */
     mp->block_free_count ++;
@@ -451,17 +442,14 @@ void rt_mp_free(void *block)
         /* resume thread */
         rt_thread_resume(thread);
 
-        /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 
         /* do a schedule */
         rt_schedule();
 
         return;
     }
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(mp->spinlock), level);
 }
 RTM_EXPORT(rt_mp_free);
 

+ 46 - 50
src/object.c

@@ -14,6 +14,7 @@
  * 2017-12-10     Bernard      Add object_info enum.
  * 2018-01-25     Bernard      Fix the object find issue when enable MODULE.
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to object.c
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rtthread.h>
@@ -86,53 +87,53 @@ enum rt_object_info_type
 static struct rt_object_information _object_container[RT_Object_Info_Unknown] =
 {
     /* initialize object container - thread */
-    {RT_Object_Class_Thread, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Thread), sizeof(struct rt_thread)},
+    {RT_Object_Class_Thread, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Thread), sizeof(struct rt_thread), RT_SPINLOCK_INIT},
 #ifdef RT_USING_SEMAPHORE
     /* initialize object container - semaphore */
-    {RT_Object_Class_Semaphore, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Semaphore), sizeof(struct rt_semaphore)},
+    {RT_Object_Class_Semaphore, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Semaphore), sizeof(struct rt_semaphore), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_MUTEX
     /* initialize object container - mutex */
-    {RT_Object_Class_Mutex, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Mutex), sizeof(struct rt_mutex)},
+    {RT_Object_Class_Mutex, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Mutex), sizeof(struct rt_mutex), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_EVENT
     /* initialize object container - event */
-    {RT_Object_Class_Event, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Event), sizeof(struct rt_event)},
+    {RT_Object_Class_Event, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Event), sizeof(struct rt_event), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_MAILBOX
     /* initialize object container - mailbox */
-    {RT_Object_Class_MailBox, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MailBox), sizeof(struct rt_mailbox)},
+    {RT_Object_Class_MailBox, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MailBox), sizeof(struct rt_mailbox), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_MESSAGEQUEUE
     /* initialize object container - message queue */
-    {RT_Object_Class_MessageQueue, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MessageQueue), sizeof(struct rt_messagequeue)},
+    {RT_Object_Class_MessageQueue, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MessageQueue), sizeof(struct rt_messagequeue), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_MEMHEAP
     /* initialize object container - memory heap */
-    {RT_Object_Class_MemHeap, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemHeap), sizeof(struct rt_memheap)},
+    {RT_Object_Class_MemHeap, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemHeap), sizeof(struct rt_memheap), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_MEMPOOL
     /* initialize object container - memory pool */
-    {RT_Object_Class_MemPool, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemPool), sizeof(struct rt_mempool)},
+    {RT_Object_Class_MemPool, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_MemPool), sizeof(struct rt_mempool), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_DEVICE
     /* initialize object container - device */
-    {RT_Object_Class_Device, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Device), sizeof(struct rt_device)},
+    {RT_Object_Class_Device, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Device), sizeof(struct rt_device), RT_SPINLOCK_INIT},
 #endif
     /* initialize object container - timer */
-    {RT_Object_Class_Timer, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Timer), sizeof(struct rt_timer)},
+    {RT_Object_Class_Timer, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Timer), sizeof(struct rt_timer), RT_SPINLOCK_INIT},
 #ifdef RT_USING_MODULE
     /* initialize object container - module */
-    {RT_Object_Class_Module, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Module), sizeof(struct rt_dlmodule)},
+    {RT_Object_Class_Module, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Module), sizeof(struct rt_dlmodule), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_HEAP
     /* initialize object container - small memory */
-    {RT_Object_Class_Memory, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Memory), sizeof(struct rt_memory)},
+    {RT_Object_Class_Memory, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Memory), sizeof(struct rt_memory), RT_SPINLOCK_INIT},
 #endif
 #ifdef RT_USING_SMART
     /* initialize object container - module */
-    {RT_Object_Class_Channel, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Channel), sizeof(struct rt_channel)},
-    {RT_Object_Class_Custom, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Custom), sizeof(struct rt_custom_object)},
+    {RT_Object_Class_Channel, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Channel), sizeof(struct rt_channel), RT_SPINLOCK_INIT},
+    {RT_Object_Class_Custom, _OBJ_CONTAINER_LIST_INIT(RT_Object_Info_Custom), sizeof(struct rt_custom_object), RT_SPINLOCK_INIT},
 #endif
 };
 
@@ -257,6 +258,8 @@ rt_object_get_information(enum rt_object_class_type type)
 {
     int index;
 
+    type = type& ~RT_Object_Class_Static;
+
     for (index = 0; index < RT_Object_Info_Unknown; index ++)
         if (_object_container[index].type == type) return &_object_container[index];
 
@@ -282,13 +285,12 @@ int rt_object_get_length(enum rt_object_class_type type)
     information = rt_object_get_information((enum rt_object_class_type)type);
     if (information == RT_NULL) return 0;
 
-    level = rt_hw_interrupt_disable();
-    /* get the count of objects */
+    level = rt_spin_lock_irqsave(&(information->spinlock));
     rt_list_for_each(node, &(information->object_list))
     {
         count ++;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
     return count;
 }
@@ -321,7 +323,7 @@ int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers
     information = rt_object_get_information((enum rt_object_class_type)type);
     if (information == RT_NULL) return 0;
 
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(information->spinlock));
     /* retrieve pointer of object */
     rt_list_for_each(node, &(information->object_list))
     {
@@ -332,7 +334,7 @@ int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers
 
         if (index >= maxlen) break;
     }
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
     return index;
 }
@@ -369,7 +371,7 @@ void rt_object_init(struct rt_object         *object,
     /* check object type to avoid re-initialization */
 
     /* enter critical */
-    rt_enter_critical();
+    level = rt_spin_lock_irqsave(&(information->spinlock));
     /* try to find object */
     for (node  = information->object_list.next;
             node != &(information->object_list);
@@ -381,7 +383,7 @@ void rt_object_init(struct rt_object         *object,
         RT_ASSERT(obj != object);
     }
     /* leave critical */
-    rt_exit_critical();
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 #endif
 
     /* initialize object's parameters */
@@ -395,8 +397,7 @@ void rt_object_init(struct rt_object         *object,
 
     RT_OBJECT_HOOK_CALL(rt_object_attach_hook, (object));
 
-    /* lock interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(information->spinlock));
 
 #ifdef RT_USING_MODULE
     if (module)
@@ -410,9 +411,7 @@ void rt_object_init(struct rt_object         *object,
         /* insert object into information object list */
         rt_list_insert_after(&(information->object_list), &(object->list));
     }
-
-    /* unlock interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 }
 
 /**
@@ -424,23 +423,22 @@ void rt_object_init(struct rt_object         *object,
 void rt_object_detach(rt_object_t object)
 {
     rt_base_t level;
+    struct rt_object_information *information;
 
     /* object check */
     RT_ASSERT(object != RT_NULL);
 
     RT_OBJECT_HOOK_CALL(rt_object_detach_hook, (object));
 
-    /* reset object type */
-    object->type = 0;
-
-    /* lock interrupt */
-    level = rt_hw_interrupt_disable();
+    information = rt_object_get_information(object->type);
+    RT_ASSERT(information != RT_NULL);
 
+    level = rt_spin_lock_irqsave(&(information->spinlock));
     /* remove from old list */
     rt_list_remove(&(object->list));
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
-    /* unlock interrupt */
-    rt_hw_interrupt_enable(level);
+    object->type = 0;
 }
 
 #ifdef RT_USING_HEAP
@@ -494,8 +492,7 @@ rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
 
     RT_OBJECT_HOOK_CALL(rt_object_attach_hook, (object));
 
-    /* lock interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(information->spinlock));
 
 #ifdef RT_USING_MODULE
     if (module)
@@ -509,11 +506,8 @@ rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
         /* insert object into information object list */
         rt_list_insert_after(&(information->object_list), &(object->list));
     }
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
-    /* unlock interrupt */
-    rt_hw_interrupt_enable(level);
-
-    /* return object */
     return object;
 }
 
@@ -525,6 +519,7 @@ rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
 void rt_object_delete(rt_object_t object)
 {
     rt_base_t level;
+    struct rt_object_information *information;
 
     /* object check */
     RT_ASSERT(object != RT_NULL);
@@ -532,17 +527,19 @@ void rt_object_delete(rt_object_t object)
 
     RT_OBJECT_HOOK_CALL(rt_object_detach_hook, (object));
 
-    /* reset object type */
-    object->type = RT_Object_Class_Null;
 
-    /* lock interrupt */
-    level = rt_hw_interrupt_disable();
+    information = rt_object_get_information(object->type);
+    RT_ASSERT(information != RT_NULL);
+
+    level = rt_spin_lock_irqsave(&(information->spinlock));
 
     /* remove from old list */
     rt_list_remove(&(object->list));
 
-    /* unlock interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
+
+    /* reset object type */
+    object->type = RT_Object_Class_Null;
 
     /* free the memory of object */
     RT_KERNEL_FREE(object);
@@ -604,6 +601,7 @@ rt_object_t rt_object_find(const char *name, rt_uint8_t type)
     struct rt_object *object = RT_NULL;
     struct rt_list_node *node = RT_NULL;
     struct rt_object_information *information = RT_NULL;
+    rt_base_t level;
 
     information = rt_object_get_information((enum rt_object_class_type)type);
 
@@ -614,7 +612,7 @@ rt_object_t rt_object_find(const char *name, rt_uint8_t type)
     RT_DEBUG_NOT_IN_INTERRUPT;
 
     /* enter critical */
-    rt_enter_critical();
+    level = rt_spin_lock_irqsave(&(information->spinlock));
 
     /* try to find object */
     rt_list_for_each(node, &(information->object_list))
@@ -622,15 +620,13 @@ rt_object_t rt_object_find(const char *name, rt_uint8_t type)
         object = rt_list_entry(node, struct rt_object, list);
         if (rt_strncmp(object->name, name, RT_NAME_MAX) == 0)
         {
-            /* leave critical */
-            rt_exit_critical();
+            rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
             return object;
         }
     }
 
-    /* leave critical */
-    rt_exit_critical();
+    rt_spin_unlock_irqrestore(&(information->spinlock), level);
 
     return RT_NULL;
 }

+ 286 - 218
src/scheduler_mp.c

@@ -29,6 +29,7 @@
  *                             new task directly
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to scheduler.c
  * 2023-03-27     rose_man     Split into scheduler upc and scheduler_mp.c
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rtthread.h>
@@ -39,6 +40,7 @@
 #include <rtdbg.h>
 
 rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
+static RT_DEFINE_SPINLOCK(_spinlock);
 rt_uint32_t rt_thread_ready_priority_group;
 #if RT_THREAD_PRIORITY_MAX > 32
 /* Maximum priority level, 256 */
@@ -118,7 +120,8 @@ static void _scheduler_stack_check(struct rt_thread *thread)
 
         rt_kprintf("thread:%s stack overflow\n", thread->parent.name);
 
-        level = rt_hw_interrupt_disable();
+        level = rt_hw_local_irq_disable();
+        rt_spin_lock(&_spinlock);
         while (level);
     }
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
@@ -163,14 +166,14 @@ static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *high
         *highest_prio = highest_ready_priority;
         highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
                                   struct rt_thread,
-                                  tlist);
+                                  tlist_schedule);
     }
     else
     {
         *highest_prio = local_highest_ready_priority;
         highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
                                   struct rt_thread,
-                                  tlist);
+                                  tlist_schedule);
     }
 
     return highest_priority_thread;
@@ -208,6 +211,8 @@ void rt_system_scheduler_init(void)
 #if RT_THREAD_PRIORITY_MAX > 32
         rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
+
+        rt_spin_lock_init(&(pcpu->spinlock));
     }
 
     /* initialize ready priority group */
@@ -219,28 +224,6 @@ void rt_system_scheduler_init(void)
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 }
 
-/**
- * @brief This function will startup the scheduler. It will select one thread
- *        with the highest priority level, then switch to it.
- */
-void rt_system_scheduler_start(void)
-{
-    struct rt_thread *to_thread;
-    rt_ubase_t highest_ready_priority;
-
-    to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
-
-    to_thread->oncpu = rt_hw_cpu_id();
-
-    rt_schedule_remove_thread(to_thread);
-    to_thread->stat = RT_THREAD_RUNNING;
-
-    /* switch to new thread */
-    rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
-
-    /* never come back */
-}
-
 /**
  * @addtogroup Thread
  * @cond
@@ -262,6 +245,199 @@ void rt_scheduler_ipi_handler(int vector, void *param)
     rt_schedule();
 }
 
+static void _rt_schedule_insert_thread(struct rt_thread *thread, rt_bool_t is_lock)
+{
+    int cpu_id;
+    int bind_cpu;
+    rt_uint32_t cpu_mask;
+
+    RT_ASSERT(thread != RT_NULL);
+
+    /* disable interrupt */
+    if(is_lock)
+    {
+        rt_spin_lock(&(thread->spinlock));
+    }
+
+    if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
+    {
+        if(is_lock)
+        {
+            rt_spin_unlock(&(thread->spinlock));
+        }
+        return;
+    }
+
+    /* it should be RUNNING thread */
+    if (thread->oncpu != RT_CPU_DETACHED)
+    {
+        thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
+        if(is_lock)
+        {
+            rt_spin_unlock(&(thread->spinlock));
+        }
+        return;
+    }
+
+    /* READY thread, insert to ready queue */
+    thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
+
+    cpu_id   = rt_hw_cpu_id();
+    bind_cpu = thread->bind_cpu ;
+
+    /* insert thread to ready list */
+    if (bind_cpu == RT_CPUS_NR)
+    {
+#if RT_THREAD_PRIORITY_MAX > 32
+        rt_thread_ready_table[thread->number] |= thread->high_mask;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        rt_thread_ready_priority_group |= thread->number_mask;
+
+        /* there is no time slices left(YIELD), inserting thread before ready list*/
+        if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
+        {
+            rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
+                                &(thread->tlist_schedule));
+        }
+        /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
+        else
+        {
+            rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
+                                &(thread->tlist_schedule));
+        }
+        if(is_lock)
+        {
+            rt_spin_unlock(&(thread->spinlock));
+        }
+
+        cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
+        rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
+    }
+    else
+    {
+        struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
+
+        if(is_lock)
+        {
+            rt_spin_lock(&(pcpu->spinlock));
+        }
+#if RT_THREAD_PRIORITY_MAX > 32
+        pcpu->ready_table[thread->number] |= thread->high_mask;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        pcpu->priority_group |= thread->number_mask;
+
+        /* there is no time slices left(YIELD), inserting thread before ready list*/
+        if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
+        {
+            rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
+                                &(thread->tlist_schedule));
+        }
+        /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
+        else
+        {
+            rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
+                                &(thread->tlist_schedule));
+        }
+
+        if(is_lock)
+        {
+            rt_spin_unlock(&(pcpu->spinlock));
+            rt_spin_unlock(&(thread->spinlock));
+        }
+
+        if (cpu_id != bind_cpu)
+        {
+            cpu_mask = 1 << bind_cpu;
+            rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
+        }
+    }
+
+    LOG_D("insert thread[%.*s], the priority: %d",
+          RT_NAME_MAX, thread->parent.name, thread->current_priority);
+}
+
+static void _rt_schedule_remove_thread(struct rt_thread *thread, rt_bool_t is_lock)
+{
+
+    RT_ASSERT(thread != RT_NULL);
+
+    LOG_D("remove thread[%.*s], the priority: %d",
+          RT_NAME_MAX, thread->parent.name,
+          thread->current_priority);
+
+    /* remove thread from ready list */
+    rt_list_remove(&(thread->tlist_schedule));
+    if (thread->bind_cpu == RT_CPUS_NR)
+    {
+        if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
+        {
+#if RT_THREAD_PRIORITY_MAX > 32
+            rt_thread_ready_table[thread->number] &= ~thread->high_mask;
+            if (rt_thread_ready_table[thread->number] == 0)
+            {
+                rt_thread_ready_priority_group &= ~thread->number_mask;
+            }
+#else
+            rt_thread_ready_priority_group &= ~thread->number_mask;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        }
+    }
+    else
+    {
+        struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
+        if(is_lock)
+        {
+            rt_spin_lock(&(pcpu->spinlock));
+        }
+        if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
+        {
+#if RT_THREAD_PRIORITY_MAX > 32
+            pcpu->ready_table[thread->number] &= ~thread->high_mask;
+            if (pcpu->ready_table[thread->number] == 0)
+            {
+                pcpu->priority_group &= ~thread->number_mask;
+            }
+#else
+            pcpu->priority_group &= ~thread->number_mask;
+#endif /* RT_THREAD_PRIORITY_MAX > 32 */
+        }
+        if(is_lock)
+        {
+            rt_spin_unlock(&(pcpu->spinlock));
+        }
+    }
+}
+
+/**
+ * @brief This function will startup the scheduler. It will select one thread
+ *        with the highest priority level, then switch to it.
+ */
+void rt_system_scheduler_start(void)
+{
+    struct rt_thread *to_thread;
+    rt_ubase_t highest_ready_priority;
+
+    rt_hw_local_irq_disable();
+    rt_spin_lock(&_spinlock);
+
+    to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
+    rt_spin_lock(&to_thread->spinlock);
+    to_thread->oncpu = rt_hw_cpu_id();
+
+    _rt_schedule_remove_thread(to_thread, RT_TRUE);
+    to_thread->stat = RT_THREAD_RUNNING;
+
+    rt_spin_unlock(&to_thread->spinlock);
+    rt_spin_unlock(&_spinlock);
+
+    rt_hw_spin_unlock(&_cpus_lock);
+
+    /* switch to new thread */
+    rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
+
+    /* never come back */
+}
+
 /**
  * @brief This function will perform one scheduling. It will select one thread
  *        with the highest priority level in global ready queue or local ready queue,
@@ -269,24 +445,30 @@ void rt_scheduler_ipi_handler(int vector, void *param)
  */
 void rt_schedule(void)
 {
-    rt_base_t level;
+    rt_base_t        level;
     struct rt_thread *to_thread;
     struct rt_thread *current_thread;
     struct rt_cpu    *pcpu;
-    int cpu_id;
+    int              cpu_id;
+    rt_bool_t        need_unlock = RT_TRUE;
 
     /* disable interrupt */
-    level  = rt_hw_interrupt_disable();
+    level  = rt_hw_local_irq_disable();
+
+    rt_spin_lock(&_spinlock);
 
     cpu_id = rt_hw_cpu_id();
     pcpu   = rt_cpu_index(cpu_id);
+    rt_spin_lock(&pcpu->spinlock);
     current_thread = pcpu->current_thread;
 
     /* whether do switch in interrupt */
-    if (pcpu->irq_nest)
+    if (rt_atomic_load(&(pcpu->irq_nest)))
     {
         pcpu->irq_switch_flag = 1;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock(&pcpu->spinlock);
+        rt_spin_unlock(&_spinlock);
+        rt_hw_local_irq_enable(level);
         goto __exit;
     }
 
@@ -306,7 +488,8 @@ void rt_schedule(void)
     }
 #endif /* RT_USING_SIGNALS */
 
-    if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
+    rt_spin_lock(&(current_thread->spinlock));
+    if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0) /* whether lock scheduler */
     {
         rt_ubase_t highest_ready_priority;
 
@@ -328,15 +511,20 @@ void rt_schedule(void)
                     }
                     else
                     {
-                        rt_schedule_insert_thread(current_thread);
+                        _rt_schedule_insert_thread(current_thread, RT_FALSE);
                     }
                 }
                 else
                 {
-                    rt_schedule_insert_thread(current_thread);
+                    _rt_schedule_insert_thread(current_thread, RT_FALSE);
                 }
                 current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
             }
+
+            if (to_thread != current_thread)
+            {
+                rt_spin_lock(&(to_thread->spinlock));
+            }
             to_thread->oncpu = cpu_id;
             if (to_thread != current_thread)
             {
@@ -345,14 +533,14 @@ void rt_schedule(void)
 
                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
 
-                rt_schedule_remove_thread(to_thread);
+                _rt_schedule_remove_thread(to_thread, RT_FALSE);
                 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
 
                 /* switch to new thread */
                 LOG_D("[%d]switch to priority#%d "
                          "thread:%.*s(sp:0x%08x), "
                          "from thread:%.*s(sp: 0x%08x)",
-                         pcpu->irq_nest, highest_ready_priority,
+                         rt_atomic_load(&(pcpu->irq_nest)), highest_ready_priority,
                          RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
                          RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
 
@@ -362,32 +550,42 @@ void rt_schedule(void)
 
                 RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
 
+                rt_spin_unlock(&(to_thread->spinlock));
+                rt_spin_unlock(&pcpu->spinlock);
+                rt_spin_unlock(&_spinlock);
+
+                need_unlock = RT_FALSE;
                 rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
                         (rt_ubase_t)&to_thread->sp, to_thread);
             }
         }
     }
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    if(need_unlock)
+    {
+        rt_spin_unlock(&(current_thread->spinlock));
+        rt_spin_unlock(&pcpu->spinlock);
+        rt_spin_unlock(&_spinlock);
+    }
+    rt_hw_local_irq_enable(level);
 
 #ifdef RT_USING_SIGNALS
     /* check stat of thread for signal */
-    level = rt_hw_interrupt_disable();
+    rt_spin_lock(&(current_thread->spinlock));
     if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
     {
         extern void rt_thread_handle_sig(rt_bool_t clean_state);
 
         current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
 
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock(&(current_thread->spinlock));
 
         /* check signal status */
         rt_thread_handle_sig(RT_TRUE);
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock(&(current_thread->spinlock));
     }
 #endif /* RT_USING_SIGNALS */
 
@@ -402,16 +600,18 @@ __exit:
  */
 void rt_scheduler_do_irq_switch(void *context)
 {
-    int cpu_id;
-    rt_base_t level;
-    struct rt_cpu* pcpu;
+    int              cpu_id;
+    rt_base_t        level;
+    struct rt_cpu    *pcpu;
     struct rt_thread *to_thread;
     struct rt_thread *current_thread;
+    rt_bool_t        need_unlock = RT_TRUE;
 
-    level = rt_hw_interrupt_disable();
-
+    level  = rt_hw_local_irq_disable();
+    rt_spin_lock(&_spinlock);
     cpu_id = rt_hw_cpu_id();
     pcpu   = rt_cpu_index(cpu_id);
+    rt_spin_lock(&pcpu->spinlock);
     current_thread = pcpu->current_thread;
 
 #ifdef RT_USING_SIGNALS
@@ -432,11 +632,14 @@ void rt_scheduler_do_irq_switch(void *context)
 
     if (pcpu->irq_switch_flag == 0)
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock(&pcpu->spinlock);
+        rt_spin_unlock(&_spinlock);
+        rt_hw_local_irq_enable(level);
         return;
     }
-
-    if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
+    rt_spin_lock(&(current_thread->spinlock));
+    if (rt_atomic_load(&(current_thread->critical_lock_nest)) == 0 &&
+        rt_atomic_load(&(pcpu->irq_nest)) == 0)
     {
         rt_ubase_t highest_ready_priority;
 
@@ -461,15 +664,20 @@ void rt_scheduler_do_irq_switch(void *context)
                     }
                     else
                     {
-                        rt_schedule_insert_thread(current_thread);
+                        _rt_schedule_insert_thread(current_thread, RT_FALSE);
                     }
                 }
                 else
                 {
-                    rt_schedule_insert_thread(current_thread);
+                    _rt_schedule_insert_thread(current_thread, RT_FALSE);
                 }
                 current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
             }
+
+            if (to_thread != current_thread)
+            {
+                rt_spin_lock(&(to_thread->spinlock));
+            }
             to_thread->oncpu = cpu_id;
             if (to_thread != current_thread)
             {
@@ -479,7 +687,7 @@ void rt_scheduler_do_irq_switch(void *context)
 
                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
 
-                rt_schedule_remove_thread(to_thread);
+                _rt_schedule_remove_thread(to_thread, RT_FALSE);
                 to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
 
 #ifdef RT_USING_OVERFLOW_CHECK
@@ -487,18 +695,27 @@ void rt_scheduler_do_irq_switch(void *context)
 #endif /* RT_USING_OVERFLOW_CHECK */
                 LOG_D("switch in interrupt");
 
-                RT_ASSERT(current_thread->cpus_lock_nest > 0);
-                current_thread->cpus_lock_nest--;
-                current_thread->scheduler_lock_nest--;
-
                 RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
 
+                rt_spin_unlock(&(to_thread->spinlock));
+                rt_spin_unlock(&pcpu->spinlock);
+                rt_spin_unlock(&_spinlock);
+
+                need_unlock = RT_FALSE;
                 rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
                         (rt_ubase_t)&to_thread->sp, to_thread);
             }
         }
     }
-    rt_hw_interrupt_enable(level);
+
+    if(need_unlock)
+    {
+        rt_spin_unlock(&(current_thread->spinlock));
+        rt_spin_unlock(&pcpu->spinlock);
+        rt_spin_unlock(&_spinlock);
+    }
+
+    rt_hw_local_irq_enable(level);
 }
 
 /**
@@ -511,88 +728,10 @@ void rt_scheduler_do_irq_switch(void *context)
  */
 void rt_schedule_insert_thread(struct rt_thread *thread)
 {
-    int cpu_id;
-    int bind_cpu;
-    rt_uint32_t cpu_mask;
     rt_base_t level;
-
-    RT_ASSERT(thread != RT_NULL);
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
-    /* it should be RUNNING thread */
-    if (thread->oncpu != RT_CPU_DETACHED)
-    {
-        thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
-        goto __exit;
-    }
-
-    /* READY thread, insert to ready queue */
-    thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
-
-    cpu_id   = rt_hw_cpu_id();
-    bind_cpu = thread->bind_cpu ;
-
-    /* insert thread to ready list */
-    if (bind_cpu == RT_CPUS_NR)
-    {
-#if RT_THREAD_PRIORITY_MAX > 32
-        rt_thread_ready_table[thread->number] |= thread->high_mask;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-        rt_thread_ready_priority_group |= thread->number_mask;
-
-        /* there is no time slices left(YIELD), inserting thread before ready list*/
-        if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
-        {
-            rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
-                                &(thread->tlist));
-        }
-        /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
-        else
-        {
-            rt_list_insert_after(&(rt_thread_priority_table[thread->current_priority]),
-                                &(thread->tlist));
-        }
-
-        cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
-        rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
-    }
-    else
-    {
-        struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
-
-#if RT_THREAD_PRIORITY_MAX > 32
-        pcpu->ready_table[thread->number] |= thread->high_mask;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-        pcpu->priority_group |= thread->number_mask;
-
-        /* there is no time slices left(YIELD), inserting thread before ready list*/
-        if((thread->stat & RT_THREAD_STAT_YIELD_MASK) != 0)
-        {
-            rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
-                                &(thread->tlist));
-        }
-        /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
-        else
-        {
-            rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
-                                &(thread->tlist));
-        }
-
-        if (cpu_id != bind_cpu)
-        {
-            cpu_mask = 1 << bind_cpu;
-            rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
-        }
-    }
-
-    LOG_D("insert thread[%.*s], the priority: %d",
-          RT_NAME_MAX, thread->parent.name, thread->current_priority);
-
-__exit:
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    level = rt_spin_lock_irqsave(&_spinlock);
+    _rt_schedule_insert_thread(thread, RT_TRUE);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 }
 
 /**
@@ -605,53 +744,11 @@ __exit:
 void rt_schedule_remove_thread(struct rt_thread *thread)
 {
     rt_base_t level;
-
-    RT_ASSERT(thread != RT_NULL);
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
-    LOG_D("remove thread[%.*s], the priority: %d",
-          RT_NAME_MAX, thread->parent.name,
-          thread->current_priority);
-
-    /* remove thread from ready list */
-    rt_list_remove(&(thread->tlist));
-    if (thread->bind_cpu == RT_CPUS_NR)
-    {
-        if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
-        {
-#if RT_THREAD_PRIORITY_MAX > 32
-            rt_thread_ready_table[thread->number] &= ~thread->high_mask;
-            if (rt_thread_ready_table[thread->number] == 0)
-            {
-                rt_thread_ready_priority_group &= ~thread->number_mask;
-            }
-#else
-            rt_thread_ready_priority_group &= ~thread->number_mask;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-        }
-    }
-    else
-    {
-        struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
-
-        if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
-        {
-#if RT_THREAD_PRIORITY_MAX > 32
-            pcpu->ready_table[thread->number] &= ~thread->high_mask;
-            if (pcpu->ready_table[thread->number] == 0)
-            {
-                pcpu->priority_group &= ~thread->number_mask;
-            }
-#else
-            pcpu->priority_group &= ~thread->number_mask;
-#endif /* RT_THREAD_PRIORITY_MAX > 32 */
-        }
-    }
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    level = rt_spin_lock_irqsave(&_spinlock);
+    rt_spin_lock(&thread->spinlock);
+    _rt_schedule_remove_thread(thread, RT_TRUE);
+    rt_spin_unlock(&thread->spinlock);
+    rt_spin_unlock_irqrestore(&_spinlock, level);
 }
 
 /**
@@ -672,26 +769,8 @@ void rt_enter_critical(void)
         return;
     }
 
-    /*
-     * the maximal number of nest is RT_UINT16_MAX, which is big
-     * enough and does not check here
-     */
-
-    {
-        rt_uint16_t lock_nest = current_thread->cpus_lock_nest;
-        current_thread->cpus_lock_nest++;
-        RT_ASSERT(current_thread->cpus_lock_nest != 0);
-        if (lock_nest == 0)
-        {
-            current_thread->scheduler_lock_nest ++;
-            rt_hw_spin_lock(&_cpus_lock);
-        }
-    }
     /* critical for local cpu */
-    current_thread->critical_lock_nest ++;
-
-    /* lock scheduler for local cpu */
-    current_thread->scheduler_lock_nest ++;
+    rt_atomic_add(&(current_thread->critical_lock_nest), 1);
 
     /* enable interrupt */
     rt_hw_local_irq_enable(level);
@@ -716,21 +795,11 @@ void rt_exit_critical(void)
         return;
     }
 
-    current_thread->scheduler_lock_nest --;
+    rt_atomic_sub(&(current_thread->critical_lock_nest), 1);
 
-    current_thread->critical_lock_nest --;
-
-    RT_ASSERT(current_thread->cpus_lock_nest > 0);
-    current_thread->cpus_lock_nest--;
-    if (current_thread->cpus_lock_nest == 0)
+    if (rt_atomic_load(&(current_thread->critical_lock_nest)) <= 0)
     {
-        current_thread->scheduler_lock_nest --;
-        rt_hw_spin_unlock(&_cpus_lock);
-    }
-
-    if (current_thread->scheduler_lock_nest <= 0)
-    {
-        current_thread->scheduler_lock_nest = 0;
+        rt_atomic_store(&(current_thread->critical_lock_nest), 0);
         /* enable interrupt */
         rt_hw_local_irq_enable(level);
 
@@ -752,8 +821,7 @@ RTM_EXPORT(rt_exit_critical);
 rt_uint16_t rt_critical_level(void)
 {
     struct rt_thread *current_thread = rt_cpu_self()->current_thread;
-
-    return current_thread->critical_lock_nest;
+    return rt_atomic_load(&(current_thread->critical_lock_nest));
 }
 RTM_EXPORT(rt_critical_level);
 

+ 0 - 11
src/signal.c

@@ -53,17 +53,6 @@ static void _signal_entry(void *parameter)
     rt_thread_handle_sig(RT_FALSE);
 
 #ifdef RT_USING_SMP
-    {
-        struct rt_cpu* pcpu = rt_cpu_self();
-
-        RT_ASSERT(pcpu->current_thread->cpus_lock_nest > 0);
-        pcpu->current_thread->cpus_lock_nest--;
-        if (pcpu->current_thread->cpus_lock_nest == 0)
-        {
-            pcpu->current_thread->scheduler_lock_nest--;
-        }
-
-    }
 #else
     /* return to thread */
     tid->sp = tid->sig_ret;

+ 71 - 97
src/thread.c

@@ -32,6 +32,7 @@
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to thread.c
  * 2022-01-24     THEWON       let rt_thread_sleep return thread->error when using signal
  * 2022-10-15     Bernard      add nested mutex feature
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rthw.h>
@@ -98,26 +99,17 @@ static void _thread_exit(void)
     rt_base_t level;
 
     /* get current thread */
+    LOG_D("line:%d thread:%s exit\n",__LINE__,rt_thread_self()->parent.name);
     thread = rt_thread_self();
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
-    /* remove from schedule */
-    rt_schedule_remove_thread(thread);
-
-    /* remove it from timer list */
+    rt_get_thread_struct(thread);
+    rt_thread_defunct_enqueue(thread);
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
     rt_timer_detach(&thread->thread_timer);
-
-    /* change stat */
-    thread->stat = RT_THREAD_CLOSE;
-
     /* insert to defunct thread list */
-    rt_thread_defunct_enqueue(thread);
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+    LOG_D("line:%d thread:%s exit\n",__LINE__,rt_thread_self()->parent.name);
+    rt_put_thread_struct(thread);
+    thread->stat = RT_THREAD_CLOSE;
     /* switch to next task */
     rt_schedule();
 }
@@ -139,22 +131,15 @@ static void _thread_timeout(void *parameter)
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
     /* set error number */
     thread->error = -RT_ETIMEOUT;
 
     /* remove from suspend list */
     rt_list_remove(&(thread->tlist));
-
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     /* insert to schedule ready list */
     rt_schedule_insert_thread(thread);
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     /* do schedule */
     rt_schedule();
 }
@@ -170,6 +155,7 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 {
     /* init thread list */
     rt_list_init(&(thread->tlist));
+    rt_list_init(&(thread->tlist_schedule));
 
 #ifdef RT_USING_SMART
     thread->wakeup.func = RT_NULL;
@@ -217,8 +203,8 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 
     /* tick init */
-    thread->init_tick      = tick;
-    thread->remaining_tick = tick;
+    rt_atomic_store(&thread->init_tick, tick);
+    rt_atomic_store(&thread->remaining_tick, tick);
 
     /* error and flags */
     thread->error = RT_EOK;
@@ -230,9 +216,8 @@ static rt_err_t _thread_init(struct rt_thread *thread,
     thread->oncpu = RT_CPU_DETACHED;
 
     /* lock init */
-    thread->scheduler_lock_nest = 0;
-    thread->cpus_lock_nest = 0;
-    thread->critical_lock_nest = 0;
+    rt_atomic_store(&thread->cpus_lock_nest, 0);
+    rt_atomic_store(&thread->critical_lock_nest, 0);
 #endif /* RT_USING_SMP */
 
     /* initialize cleanup function and user data */
@@ -260,7 +245,9 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 #endif /* RT_USING_SIGNALS */
 
 #ifdef RT_USING_SMART
+    thread->tid_ref_count = 0;
     thread->lwp = RT_NULL;
+    thread->susp_recycler = RT_NULL;
     rt_list_init(&(thread->sibling));
 
     /* lwp thread-signal init */
@@ -286,6 +273,8 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 #ifdef RT_USING_MODULE
     thread->parent.module_id = 0;
 #endif /* RT_USING_MODULE */
+    rt_atomic_store(&thread->ref_count, 0);
+    rt_spin_lock_init(&thread->spinlock);
 
     RT_OBJECT_HOOK_CALL(rt_thread_inited_hook, (thread));
 
@@ -439,7 +428,7 @@ rt_err_t rt_thread_detach(rt_thread_t thread)
     }
 
     /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
 
     /* release thread timer */
     rt_timer_detach(&(thread->thread_timer));
@@ -456,13 +445,11 @@ rt_err_t rt_thread_detach(rt_thread_t thread)
         thread->pending_object = RT_NULL;
     }
 #endif
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
 
     /* insert to defunct thread list */
     rt_thread_defunct_enqueue(thread);
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     return RT_EOK;
 }
 RTM_EXPORT(rt_thread_detach);
@@ -550,9 +537,7 @@ rt_err_t rt_thread_delete(rt_thread_t thread)
         /* remove from schedule */
         rt_schedule_remove_thread(thread);
     }
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
 
     /* release thread timer */
     rt_timer_detach(&(thread->thread_timer));
@@ -569,13 +554,10 @@ rt_err_t rt_thread_delete(rt_thread_t thread)
         thread->pending_object = RT_NULL;
     }
 #endif
-
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     /* insert to defunct thread list */
     rt_thread_defunct_enqueue(thread);
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     return RT_EOK;
 }
 RTM_EXPORT(rt_thread_delete);
@@ -595,10 +577,10 @@ rt_err_t rt_thread_yield(void)
     rt_base_t level;
 
     thread = rt_thread_self();
-    level = rt_hw_interrupt_disable();
-    thread->remaining_tick = thread->init_tick;
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
+    rt_atomic_store(&thread->remaining_tick, thread->init_tick);
     thread->stat |= RT_THREAD_STAT_YIELD;
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     rt_schedule();
 
     return RT_EOK;
@@ -616,7 +598,7 @@ RTM_EXPORT(rt_thread_yield);
  */
 rt_err_t rt_thread_sleep(rt_tick_t tick)
 {
-    rt_base_t level;
+    rt_base_t level, level_local;
     struct rt_thread *thread;
     int err;
 
@@ -632,16 +614,12 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
 
     /* current context checking */
     RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
-
     /* reset thread error */
     thread->error = RT_EOK;
-
+    level_local = rt_hw_local_irq_disable();
     /* suspend thread */
     err = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
-
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
     /* reset the timeout of thread timer and start it */
     if (err == RT_EOK)
     {
@@ -649,8 +627,8 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
         rt_timer_start(&(thread->thread_timer));
 
         /* enable interrupt */
-        rt_hw_interrupt_enable(level);
-
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_hw_local_irq_enable(level_local);
         thread->error = -RT_EINTR;
 
         rt_schedule();
@@ -661,7 +639,8 @@ rt_err_t rt_thread_sleep(rt_tick_t tick)
     }
     else
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
+        rt_hw_local_irq_enable(level_local);
     }
 
     return err;
@@ -705,7 +684,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
     /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
 
     /* reset thread error */
     thread->error = RT_EOK;
@@ -717,16 +696,14 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
 
         *tick += inc_tick;
         left_tick = *tick - cur_tick;
-
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
         /* suspend thread */
         rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
-
+        level = rt_spin_lock_irqsave(&(thread->spinlock));
         /* reset the timeout of thread timer and start it */
         rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &left_tick);
         rt_timer_start(&(thread->thread_timer));
-
-        /* enable interrupt */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
 
         rt_schedule();
 
@@ -739,7 +716,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
     else
     {
         *tick = cur_tick;
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     }
 
     return thread->error;
@@ -774,7 +751,6 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
         cpu = RT_CPUS_NR;
     }
 
-    level = rt_hw_interrupt_disable();
     if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
     {
         /* unbind */
@@ -791,6 +767,7 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
     }
     else
     {
+        level = rt_spin_lock_irqsave(&(thread->spinlock));
         thread->bind_cpu = cpu;
         if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
         {
@@ -807,7 +784,9 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
                         /* bind to other cpu */
                         rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
                         /* self cpu need reschedule */
+                        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
                         rt_schedule();
+                        level = rt_spin_lock_irqsave(&(thread->spinlock));
                     }
                     /* else do nothing */
                 }
@@ -819,8 +798,8 @@ static void rt_thread_cpu_bind(rt_thread_t thread, int cpu)
             }
             /* else do nothing */
         }
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     }
-    rt_hw_interrupt_enable(level);
 }
 #endif
 
@@ -856,15 +835,12 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
     {
         case RT_THREAD_CTRL_CHANGE_PRIORITY:
         {
-            /* disable interrupt */
-            level = rt_hw_interrupt_disable();
-
             /* for ready thread, change queue */
             if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
             {
                 /* remove thread from schedule queue first */
                 rt_schedule_remove_thread(thread);
-
+                level = rt_spin_lock_irqsave(&(thread->spinlock));
                 /* change thread priority */
                 thread->current_priority = *(rt_uint8_t *)arg;
 
@@ -876,12 +852,14 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
     #else
                 thread->number_mask = 1 << thread->current_priority;
     #endif /* RT_THREAD_PRIORITY_MAX > 32 */
-
+                thread->stat  = RT_THREAD_INIT;
+                rt_spin_unlock_irqrestore(&(thread->spinlock), level);
                 /* insert thread to schedule queue again */
                 rt_schedule_insert_thread(thread);
             }
             else
             {
+                level = rt_spin_lock_irqsave(&(thread->spinlock));
                 thread->current_priority = *(rt_uint8_t *)arg;
 
                 /* recalculate priority attribute */
@@ -892,10 +870,8 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
     #else
                 thread->number_mask = 1 << thread->current_priority;
     #endif /* RT_THREAD_PRIORITY_MAX > 32 */
+                rt_spin_unlock_irqrestore(&(thread->spinlock), level);
             }
-
-            /* enable interrupt */
-            rt_hw_interrupt_enable(level);
             break;
         }
 
@@ -927,10 +903,10 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
         {
             rt_uint8_t cpu;
 
-        cpu = (rt_uint8_t)(size_t)arg;
-        rt_thread_cpu_bind(thread, cpu);
-        break;
-    }
+            cpu = (rt_uint8_t)(size_t)arg;
+            rt_thread_cpu_bind(thread, cpu);
+            break;
+        }
 #endif /*RT_USING_SMP*/
     default:
         break;
@@ -995,15 +971,16 @@ rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
 
     LOG_D("thread suspend:  %s", thread->parent.name);
 
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
+
     stat = thread->stat & RT_THREAD_STAT_MASK;
     if ((stat != RT_THREAD_READY) && (stat != RT_THREAD_RUNNING))
     {
         LOG_D("thread suspend: thread disorder, 0x%2x", thread->stat);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
         return -RT_ERROR;
     }
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
     if (stat == RT_THREAD_RUNNING)
     {
         /* not suspend running status thread on other core */
@@ -1013,20 +990,20 @@ rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
     if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
     {
         /* not to suspend */
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(&(thread->spinlock), level);
         return -RT_EINTR;
     }
 #endif
 
-    /* change thread stat */
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     rt_schedule_remove_thread(thread);
+    level = rt_spin_lock_irqsave(&(thread->spinlock));
+
     rt_thread_set_suspend_state(thread, suspend_flag);
 
     /* stop thread timer anyway */
     rt_timer_stop(&(thread->thread_timer));
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
 
     RT_OBJECT_HOOK_CALL(rt_thread_suspend_hook, (thread));
     return RT_EOK;
@@ -1065,8 +1042,7 @@ rt_err_t rt_thread_resume(rt_thread_t thread)
         return -RT_ERROR;
     }
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&(thread->spinlock)); //TODO need lock for cpu
 
     /* remove from suspend list */
     rt_list_remove(&(thread->tlist));
@@ -1077,9 +1053,7 @@ rt_err_t rt_thread_resume(rt_thread_t thread)
     thread->wakeup.func = RT_NULL;
 #endif
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(&(thread->spinlock), level);
     /* insert to schedule ready list */
     rt_schedule_insert_thread(thread);
 
@@ -1100,22 +1074,22 @@ rt_err_t rt_thread_wakeup(rt_thread_t thread)
 {
     register rt_base_t temp;
     rt_err_t ret;
+    rt_wakeup_func_t func = RT_NULL;
 
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
-    /* disable interrupt */
-    temp = rt_hw_interrupt_disable();
-    if (thread->wakeup.func)
+    temp = rt_spin_lock_irqsave(&(thread->spinlock));
+    func = thread->wakeup.func;
+    thread->wakeup.func = RT_NULL;
+    rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
+    if (func)
     {
-        ret = thread->wakeup.func(thread->wakeup.user_data, thread);
-        thread->wakeup.func = RT_NULL;
+        ret = func(thread->wakeup.user_data, thread);
     }
     else
     {
         ret = rt_thread_resume(thread);
     }
-
-    rt_hw_interrupt_enable(temp);
     return ret;
 }
 RTM_EXPORT(rt_thread_wakeup);
@@ -1127,10 +1101,10 @@ void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void*
     RT_ASSERT(thread != RT_NULL);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
-    temp = rt_hw_interrupt_disable();
+    temp = rt_spin_lock_irqsave(&(thread->spinlock));
     thread->wakeup.func = func;
     thread->wakeup.user_data = user_data;
-    rt_hw_interrupt_enable(temp);
+    rt_spin_unlock_irqrestore(&(thread->spinlock), temp);
 }
 RTM_EXPORT(rt_thread_wakeup_set);
 #endif

+ 92 - 73
src/timer.c

@@ -19,6 +19,7 @@
  * 2021-08-15     supperthomas add the comment
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to timer.c
  * 2022-04-19     Stanley      Correct descriptions
+ * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  */
 
 #include <rtthread.h>
@@ -30,6 +31,7 @@
 
 /* hard timer list */
 static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
+static struct rt_spinlock _hard_spinlock;
 
 #ifdef RT_USING_TIMER_SOFT
 
@@ -48,6 +50,7 @@ static rt_list_t _timer_list[RT_TIMER_SKIP_LIST_LEVEL];
 static rt_uint8_t _soft_timer_status = RT_SOFT_TIMER_IDLE;
 /* soft timer list */
 static rt_list_t _soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL];
+static struct rt_spinlock _soft_spinlock;
 static struct rt_thread _timer_thread;
 rt_align(RT_ALIGN_SIZE)
 static rt_uint8_t _timer_thread_stack[RT_TIMER_THREAD_STACK_SIZE];
@@ -161,26 +164,14 @@ static void _timer_init(rt_timer_t timer,
 static rt_err_t _timer_list_next_timeout(rt_list_t timer_list[], rt_tick_t *timeout_tick)
 {
     struct rt_timer *timer;
-    rt_base_t level;
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
 
     if (!rt_list_isempty(&timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
     {
         timer = rt_list_entry(timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next,
                               struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
         *timeout_tick = timer->timeout_tick;
-
-        /* enable interrupt */
-        rt_hw_interrupt_enable(level);
-
         return RT_EOK;
     }
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
     return -RT_ERROR;
 }
 
@@ -294,22 +285,31 @@ RTM_EXPORT(rt_timer_init);
 rt_err_t rt_timer_detach(rt_timer_t timer)
 {
     rt_base_t level;
+    struct rt_spinlock *spinlock;
 
     /* parameter check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
     RT_ASSERT(rt_object_is_systemobject(&timer->parent));
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+#ifdef RT_USING_TIMER_SOFT
+    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
+    {
+        spinlock = &_soft_spinlock;
+    }
+    else
+#endif /* RT_USING_TIMER_SOFT */
+    {
+        spinlock = &_hard_spinlock;
+    }
+
+    level = rt_spin_lock_irqsave(spinlock);
 
     _timer_remove(timer);
     /* stop timer */
     timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(spinlock, level);
     rt_object_detach(&(timer->parent));
 
     return RT_EOK;
@@ -378,22 +378,30 @@ RTM_EXPORT(rt_timer_create);
 rt_err_t rt_timer_delete(rt_timer_t timer)
 {
     rt_base_t level;
+    struct rt_spinlock *spinlock;
 
     /* parameter check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
     RT_ASSERT(rt_object_is_systemobject(&timer->parent) == RT_FALSE);
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+#ifdef RT_USING_TIMER_SOFT
+    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
+    {
+        spinlock = &_soft_spinlock;
+    }
+    else
+#endif /* RT_USING_TIMER_SOFT */
+    {
+        spinlock = &_hard_spinlock;
+    }
+
+    level = rt_spin_lock_irqsave(spinlock);
 
     _timer_remove(timer);
     /* stop timer */
     timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(spinlock, level);
     rt_object_delete(&(timer->parent));
 
     return RT_EOK;
@@ -413,41 +421,42 @@ rt_err_t rt_timer_start(rt_timer_t timer)
     unsigned int row_lvl;
     rt_list_t *timer_list;
     rt_base_t level;
-    rt_bool_t need_schedule;
     rt_list_t *row_head[RT_TIMER_SKIP_LIST_LEVEL];
     unsigned int tst_nr;
     static unsigned int random_nr;
+    struct rt_spinlock *spinlock;
 
     /* parameter check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
 
-    need_schedule = RT_FALSE;
-
-    /* stop timer firstly */
-    level = rt_hw_interrupt_disable();
-    /* remove timer from list */
-    _timer_remove(timer);
-    /* change status of timer */
-    timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
-
-    RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(timer->parent)));
-
-    timer->timeout_tick = rt_tick_get() + timer->init_tick;
 
 #ifdef RT_USING_TIMER_SOFT
     if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
     {
         /* insert timer to soft timer list */
         timer_list = _soft_timer_list;
+        spinlock = &_soft_spinlock;
     }
     else
 #endif /* RT_USING_TIMER_SOFT */
     {
         /* insert timer to system timer list */
         timer_list = _timer_list;
+        spinlock = &_hard_spinlock;
     }
 
+    /* stop timer firstly */
+    level = rt_spin_lock_irqsave(spinlock);
+    /* remove timer from list */
+    _timer_remove(timer);
+    /* change status of timer */
+    timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
+
+    RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(timer->parent)));
+
+    timer->timeout_tick = rt_tick_get() + timer->init_tick;
+
     row_head[0]  = &timer_list[0];
     for (row_lvl = 0; row_lvl < RT_TIMER_SKIP_LIST_LEVEL; row_lvl++)
     {
@@ -510,18 +519,12 @@ rt_err_t rt_timer_start(rt_timer_t timer)
         {
             /* resume timer thread to check soft timer */
             rt_thread_resume(&_timer_thread);
-            need_schedule = RT_TRUE;
         }
     }
 #endif /* RT_USING_TIMER_SOFT */
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(spinlock, level);
 
-    if (need_schedule)
-    {
-        rt_schedule();
-    }
 
     return RT_EOK;
 }
@@ -537,28 +540,35 @@ RTM_EXPORT(rt_timer_start);
 rt_err_t rt_timer_stop(rt_timer_t timer)
 {
     rt_base_t level;
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    struct rt_spinlock *spinlock;
 
     /* timer check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
 
+#ifdef RT_USING_TIMER_SOFT
+    if (timer->parent.flag & RT_TIMER_FLAG_SOFT_TIMER)
+    {
+        spinlock = &_soft_spinlock;
+    }
+    else
+#endif /* RT_USING_TIMER_SOFT */
+    {
+        spinlock = &_hard_spinlock;
+    }
+    level = rt_spin_lock_irqsave(spinlock);
+
     if (!(timer->parent.flag & RT_TIMER_FLAG_ACTIVATED))
     {
-        rt_hw_interrupt_enable(level);
+        rt_spin_unlock_irqrestore(spinlock, level);
         return -RT_ERROR;
     }
-
     RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(timer->parent)));
 
     _timer_remove(timer);
     /* change status */
     timer->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED;
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+    rt_spin_unlock_irqrestore(spinlock, level);
 
     return RT_EOK;
 }
@@ -575,13 +585,10 @@ RTM_EXPORT(rt_timer_stop);
  */
 rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
 {
-    rt_base_t level;
-
     /* parameter check */
     RT_ASSERT(timer != RT_NULL);
     RT_ASSERT(rt_object_get_type(&timer->parent) == RT_Object_Class_Timer);
 
-    level = rt_hw_interrupt_disable();
     switch (cmd)
     {
     case RT_TIMER_CTRL_GET_TIME:
@@ -636,7 +643,6 @@ rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
     default:
         break;
     }
-    rt_hw_interrupt_enable(level);
 
     return RT_EOK;
 }
@@ -655,14 +661,20 @@ void rt_timer_check(void)
     rt_base_t level;
     rt_list_t list;
 
+#ifdef RT_USING_SMP
+    if (rt_hw_cpu_id() != 0)
+    {
+        return;
+    }
+#endif
+
     rt_list_init(&list);
 
     LOG_D("timer check enter");
 
     current_tick = rt_tick_get();
 
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_hard_spinlock);
 
     while (!rt_list_isempty(&_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
     {
@@ -685,6 +697,7 @@ void rt_timer_check(void)
             }
             /* add timer to temporary list  */
             rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
+            rt_spin_unlock_irqrestore(&_hard_spinlock, level);
             /* call timeout function */
             t->timeout_func(t->parameter);
 
@@ -693,7 +706,7 @@ void rt_timer_check(void)
 
             RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
             LOG_D("current tick: %d", current_tick);
-
+            level = rt_spin_lock_irqsave(&_hard_spinlock);
             /* Check whether the timer object is detached or started again */
             if (rt_list_isempty(&list))
             {
@@ -710,10 +723,7 @@ void rt_timer_check(void)
         }
         else break;
     }
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
-
+    rt_spin_unlock_irqrestore(&_hard_spinlock, level);
     LOG_D("timer check leave");
 }
 
@@ -724,8 +734,13 @@ void rt_timer_check(void)
  */
 rt_tick_t rt_timer_next_timeout_tick(void)
 {
+    rt_base_t level;
     rt_tick_t next_timeout = RT_TICK_MAX;
+
+    level = rt_spin_lock_irqsave(&_hard_spinlock);
     _timer_list_next_timeout(_timer_list, &next_timeout);
+    rt_spin_unlock_irqrestore(&_hard_spinlock, level);
+
     return next_timeout;
 }
 
@@ -742,11 +757,8 @@ void rt_soft_timer_check(void)
     rt_list_t list;
 
     rt_list_init(&list);
-
     LOG_D("software timer check enter");
-
-    /* disable interrupt */
-    level = rt_hw_interrupt_disable();
+    level = rt_spin_lock_irqsave(&_soft_spinlock);
 
     while (!rt_list_isempty(&_soft_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1]))
     {
@@ -773,8 +785,8 @@ void rt_soft_timer_check(void)
             rt_list_insert_after(&list, &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
 
             _soft_timer_status = RT_SOFT_TIMER_BUSY;
-            /* enable interrupt */
-            rt_hw_interrupt_enable(level);
+
+            rt_spin_unlock_irqrestore(&_soft_spinlock, level);
 
             /* call timeout function */
             t->timeout_func(t->parameter);
@@ -782,8 +794,7 @@ void rt_soft_timer_check(void)
             RT_OBJECT_HOOK_CALL(rt_timer_exit_hook, (t));
             LOG_D("current tick: %d", current_tick);
 
-            /* disable interrupt */
-            level = rt_hw_interrupt_disable();
+            level = rt_spin_lock_irqsave(&_soft_spinlock);
 
             _soft_timer_status = RT_SOFT_TIMER_IDLE;
             /* Check whether the timer object is detached or started again */
@@ -802,8 +813,8 @@ void rt_soft_timer_check(void)
         }
         else break; /* not check anymore */
     }
-    /* enable interrupt */
-    rt_hw_interrupt_enable(level);
+
+    rt_spin_unlock_irqrestore(&_soft_spinlock, level);
 
     LOG_D("software timer check leave");
 }
@@ -815,12 +826,18 @@ void rt_soft_timer_check(void)
  */
 static void _timer_thread_entry(void *parameter)
 {
+    rt_err_t ret = RT_ERROR;
     rt_tick_t next_timeout;
+    rt_base_t level;
 
     while (1)
     {
         /* get the next timeout tick */
-        if (_timer_list_next_timeout(_soft_timer_list, &next_timeout) != RT_EOK)
+        level = rt_spin_lock_irqsave(&_soft_spinlock);
+        ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout);
+        rt_spin_unlock_irqrestore(&_soft_spinlock, level);
+
+        if (ret != RT_EOK)
         {
             /* no software timer exist, suspend self. */
             rt_thread_suspend_with_flag(rt_thread_self(), RT_UNINTERRUPTIBLE);
@@ -860,6 +877,7 @@ void rt_system_timer_init(void)
     {
         rt_list_init(_timer_list + i);
     }
+    rt_spin_lock_init(&_hard_spinlock);
 }
 
 /**
@@ -878,6 +896,7 @@ void rt_system_timer_thread_init(void)
     {
         rt_list_init(_soft_timer_list + i);
     }
+    rt_spin_lock_init(&_soft_spinlock);
 
     /* start software timer thread */
     rt_thread_init(&_timer_thread,

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно