Browse Source

[kernel] fixup of data racing accessing percpu objects (#8475)

Signed-off-by: Shell <smokewood@qq.com>
Shell 1 year ago
parent
commit
cc157baf23
7 changed files with 44 additions and 9 deletions
  1. 10 0
      include/rtthread.h
  2. 9 6
      libcpu/aarch64/common/context_gcc.S
  3. 2 0
      src/clock.c
  4. 1 0
      src/cpu.c
  5. 8 1
      src/irq.c
  6. 12 2
      src/scheduler_mp.c
  7. 2 0
      src/timer.c

+ 10 - 0
include/rtthread.h

@@ -835,6 +835,16 @@ while (0)
 #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
 #endif /* RT_DEBUGING_CONTEXT */
 
+rt_inline rt_bool_t rt_in_thread_context(void)
+{
+    return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
+}
+
+rt_inline rt_bool_t rt_scheduler_is_available(void)
+{
+    return !rt_hw_interrupt_is_disabled() && rt_critical_level() == 0 && rt_in_thread_context();
+}
+
 /**@}*/
 
 #ifdef __cplusplus

+ 9 - 6
libcpu/aarch64/common/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2024, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -7,6 +7,7 @@
  * Date           Author       Notes
  * 2021-05-18     Jesven       the first version
  * 2023-06-24     WangXiaoyao  Support backtrace for user thread
+ * 2024-01-06     Shell        Fix barrier on irq_disable/enable
  */
 
 #ifndef __ASSEMBLY__
@@ -36,8 +37,8 @@ int rt_hw_cpu_id(void)
 .weak rt_hw_cpu_id
 .type rt_hw_cpu_id, @function
 rt_hw_cpu_id:
-   mrs x0, tpidr_el1           /* MPIDR_EL1: Multi-Processor Affinity Register */
-   ret
+    mrs x0, tpidr_el1
+    ret
 
 /*
 void rt_hw_set_process_id(size_t id)
@@ -314,7 +315,8 @@ rt_hw_interrupt_is_disabled:
 rt_hw_interrupt_disable:
     MRS     X0, DAIF
     MSR     DAIFSet, #3
-    DSB     SY
+    DSB     NSH
+    ISB
     RET
 
 /*
@@ -322,7 +324,8 @@ rt_hw_interrupt_disable:
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-    DSB     SY
+    ISB
+    DSB     NSH
     AND     X0, X0, #0xc0
     MRS     X1, DAIF
     BIC     X1, X1, #0xc0
@@ -580,4 +583,4 @@ START_POINT(vector_serror)
     STP     X0, X1, [SP, #-0x10]!
     BL      rt_hw_trap_serror
     b .
-START_POINT_END(vector_exception)
+START_POINT_END(vector_serror)

+ 2 - 0
src/clock.c

@@ -89,6 +89,8 @@ void rt_tick_increase(void)
     rt_base_t level;
     rt_atomic_t oldval = 0;
 
+    RT_ASSERT(rt_interrupt_get_nest() > 0);
+
     RT_OBJECT_HOOK_CALL(rt_tick_hook, ());
     /* increase the global tick */
 #ifdef RT_USING_SMP

+ 1 - 0
src/cpu.c

@@ -136,6 +136,7 @@ RTM_EXPORT(rt_spin_unlock_irqrestore)
  */
 struct rt_cpu *rt_cpu_self(void)
 {
+    RT_ASSERT(!rt_scheduler_is_available());
     return &_cpus[rt_hw_cpu_id()];
 }
 

+ 8 - 1
src/irq.c

@@ -13,6 +13,7 @@
  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to irq.c
  * 2022-07-04     Yunjie       fix RT_DEBUG_LOG
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
+ * 2024-01-05     Shell        Fixup of data racing in rt_interrupt_get_nest
  */
 
 #include <rthw.h>
@@ -114,7 +115,13 @@ RTM_EXPORT(rt_interrupt_leave);
  */
 rt_weak rt_uint8_t rt_interrupt_get_nest(void)
 {
-    return rt_atomic_load(&rt_interrupt_nest);
+    rt_uint8_t ret;
+    rt_base_t level;
+
+    level = rt_hw_local_irq_disable();
+    ret = rt_atomic_load(&rt_interrupt_nest);
+    rt_hw_local_irq_enable(level);
+    return ret;
 }
 RTM_EXPORT(rt_interrupt_get_nest);
 

+ 12 - 2
src/scheduler_mp.c

@@ -31,6 +31,7 @@
  * 2023-03-27     rose_man     Split into scheduler upc and scheduler_mp.c
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2023-12-10     xqyjlj       use rt_hw_spinlock
+ * 2024-01-05     Shell        Fixup of data racing in rt_critical_level
  */
 
 #include <rtthread.h>
@@ -848,8 +849,17 @@ RTM_EXPORT(rt_exit_critical);
  */
 rt_uint16_t rt_critical_level(void)
 {
-    struct rt_thread *current_thread = rt_cpu_self()->current_thread;
-    return rt_atomic_load(&(current_thread->critical_lock_nest));
+    rt_base_t level;
+    rt_uint16_t critical_lvl;
+    struct rt_thread *current_thread;
+
+    level = rt_hw_local_irq_disable();
+
+    current_thread = rt_cpu_self()->current_thread;
+    critical_lvl = rt_atomic_load(&(current_thread->critical_lock_nest));
+
+    rt_hw_local_irq_enable(level);
+    return critical_lvl;
 }
 RTM_EXPORT(rt_critical_level);
 

+ 2 - 0
src/timer.c

@@ -658,6 +658,8 @@ void rt_timer_check(void)
     rt_base_t level;
     rt_list_t list;
 
+    RT_ASSERT(rt_interrupt_get_nest() > 0);
+
 #ifdef RT_USING_SMP
     if (rt_hw_cpu_id() != 0)
     {