Browse Source

[src] add rt_hw_cpu_id() wrapper API (#8894)

* [src] add rt_hw_cpu_id() wrapper API

rt_hw_cpu_id() is an unsafe API which should not be used by most codes
directly. It's error-prone because it must be used in proper context,
otherwise it can lead to errors and unpredictable behavior.

This patch adds a wrapper API for rt_hw_cpu_id() to address this risk.
It includes the context-checking functionality and provides a safer
alternative for obtaining CPU IDs, ensuring that it is used correctly
within the appropriate context.

Signed-off-by: Shell <smokewood@qq.com>

* fixup UMP

* update API & comment

* ci: cpp_check

---------

Signed-off-by: Shell <smokewood@qq.com>
Shell 1 year ago
parent
commit
6977cf9101
7 changed files with 50 additions and 13 deletions
  1. 26 3
      include/rtthread.h
  2. 1 1
      src/clock.c
  3. 17 0
      src/cpu_mp.c
  4. 2 6
      src/idle.c
  5. 2 2
      src/signal.c
  6. 1 1
      src/timer.c
  7. 1 0
      tools/ci/cpp_check.py

+ 26 - 3
include/rtthread.h

@@ -676,11 +676,19 @@ void rt_interrupt_leave(void);
 
 rt_base_t rt_cpus_lock(void);
 void rt_cpus_unlock(rt_base_t level);
+void rt_cpus_lock_status_restore(struct rt_thread *thread);
 
 struct rt_cpu *rt_cpu_self(void);
 struct rt_cpu *rt_cpu_index(int index);
 
-void rt_cpus_lock_status_restore(struct rt_thread *thread);
+#ifdef RT_USING_DEBUG
+    rt_base_t rt_cpu_get_id(void);
+#else /* !RT_USING_DEBUG */
+    #define rt_cpu_get_id rt_hw_cpu_id
+#endif /* RT_USING_DEBUG */
+
+#else /* !RT_USING_SMP */
+#define rt_cpu_get_id()  (0)
 
 #endif /* RT_USING_SMP */
 
@@ -781,7 +789,6 @@ while (0)
  *     1) the scheduler has been started.
  *     2) not in interrupt context.
  *     3) scheduler is not locked.
- *     4) interrupt is not disabled.
  */
 #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)                              \
 do                                                                            \
@@ -809,11 +816,27 @@ rt_inline rt_bool_t rt_in_thread_context(void)
     return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
 }
 
+/* is scheduler available */
 rt_inline rt_bool_t rt_scheduler_is_available(void)
 {
-    return !rt_hw_interrupt_is_disabled() && rt_critical_level() == 0 && rt_in_thread_context();
+    return rt_critical_level() == 0 && rt_in_thread_context();
+}
+
+#ifdef RT_USING_SMP
+/* is thread bond on core */
+rt_inline rt_bool_t rt_sched_thread_is_binding(rt_thread_t thread)
+{
+    if (thread == RT_NULL)
+    {
+        thread = rt_thread_self();
+    }
+    return !thread || RT_SCHED_CTX(thread).bind_cpu != RT_CPUS_NR;
 }
 
+#else
+#define rt_sched_thread_is_binding(thread) (RT_TRUE)
+#endif
+
 /**@}*/
 
 #ifdef __cplusplus

+ 1 - 1
src/clock.c

@@ -101,7 +101,7 @@ void rt_tick_increase(void)
 
     /* check timer */
 #ifdef RT_USING_SMP
-    if (rt_hw_cpu_id() != 0)
+    if (rt_cpu_get_id() != 0)
     {
         return;
     }

+ 17 - 0
src/cpu_mp.c

@@ -216,3 +216,20 @@ void rt_cpus_lock_status_restore(struct rt_thread *thread)
     rt_sched_post_ctx_switch(thread);
 }
 RTM_EXPORT(rt_cpus_lock_status_restore);
+
+/* A safe API with debugging feature to be called in most codes */
+
+/**
+ * @brief Get logical CPU ID
+ *
+ * @return logical CPU ID
+ */
+rt_base_t rt_cpu_get_id(void)
+{
+
+    RT_ASSERT(rt_sched_thread_is_binding(RT_NULL) ||
+              rt_hw_interrupt_is_disabled() ||
+              !rt_scheduler_is_available());
+
+    return rt_hw_cpu_id();
+}

+ 2 - 6
src/idle.c

@@ -261,7 +261,7 @@ static void idle_thread_entry(void *parameter)
 {
     RT_UNUSED(parameter);
 #ifdef RT_USING_SMP
-    if (rt_hw_cpu_id() != 0)
+    if (rt_cpu_get_id() != 0)
     {
         while (1)
         {
@@ -380,11 +380,7 @@ void rt_thread_idle_init(void)
  */
 rt_thread_t rt_thread_idle_gethandler(void)
 {
-#ifdef RT_USING_SMP
-    int id = rt_hw_cpu_id();
-#else
-    int id = 0;
-#endif /* RT_USING_SMP */
+    int id = rt_cpu_get_id();
 
     return (rt_thread_t)(&idle_thread[id]);
 }

+ 2 - 2
src/signal.c

@@ -142,7 +142,7 @@ static void _signal_deliver(rt_thread_t tid)
                 int cpu_id;
 
                 cpu_id = RT_SCHED_CTX(tid).oncpu;
-                if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_hw_cpu_id()))
+                if ((cpu_id != RT_CPU_DETACHED) && (cpu_id != rt_cpu_get_id()))
                 {
                     rt_uint32_t cpu_mask;
 
@@ -181,7 +181,7 @@ void *rt_signal_check(void* context)
 
     level = rt_spin_lock_irqsave(&_thread_signal_lock);
 
-    cpu_id = rt_hw_cpu_id();
+    cpu_id = rt_cpu_get_id();
     pcpu   = rt_cpu_index(cpu_id);
     current_thread = pcpu->current_thread;
 

+ 1 - 1
src/timer.c

@@ -684,7 +684,7 @@ void rt_timer_check(void)
 
 #ifdef RT_USING_SMP
     /* Running on core 0 only */
-    if (rt_hw_cpu_id() != 0)
+    if (rt_cpu_get_id() != 0)
     {
         rt_spin_unlock_irqrestore(&_htimer_lock, level);
         return;

+ 1 - 0
tools/ci/cpp_check.py

@@ -27,6 +27,7 @@ class CPPCheck:
                 [
                     'cppcheck',
                     '-DRT_ASSERT(x)=',
+                    '-DRTM_EXPORT(x)=',
                     '-Drt_list_for_each_entry(a,b,c)=a=(void*)b;',
                     '-I include',
                     '-I thread/components/finsh',