|
@@ -676,11 +676,19 @@ void rt_interrupt_leave(void);
|
|
|
|
|
|
rt_base_t rt_cpus_lock(void);
|
|
|
void rt_cpus_unlock(rt_base_t level);
|
|
|
+void rt_cpus_lock_status_restore(struct rt_thread *thread);
|
|
|
|
|
|
struct rt_cpu *rt_cpu_self(void);
|
|
|
struct rt_cpu *rt_cpu_index(int index);
|
|
|
|
|
|
-void rt_cpus_lock_status_restore(struct rt_thread *thread);
|
|
|
+#ifdef RT_USING_DEBUG
|
|
|
+ rt_base_t rt_cpu_get_id(void);
|
|
|
+#else /* !RT_USING_DEBUG */
|
|
|
+ #define rt_cpu_get_id rt_hw_cpu_id
|
|
|
+#endif /* RT_USING_DEBUG */
|
|
|
+
|
|
|
+#else /* !RT_USING_SMP */
|
|
|
+#define rt_cpu_get_id() (0)
|
|
|
|
|
|
#endif /* RT_USING_SMP */
|
|
|
|
|
@@ -781,7 +789,6 @@ while (0)
|
|
|
* 1) the scheduler has been started.
|
|
|
* 2) not in interrupt context.
|
|
|
* 3) scheduler is not locked.
|
|
|
- * 4) interrupt is not disabled.
|
|
|
*/
|
|
|
#define RT_DEBUG_SCHEDULER_AVAILABLE(need_check) \
|
|
|
do \
|
|
@@ -809,11 +816,27 @@ rt_inline rt_bool_t rt_in_thread_context(void)
|
|
|
return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
|
|
|
}
|
|
|
|
|
|
+/* is scheduler available */
|
|
|
rt_inline rt_bool_t rt_scheduler_is_available(void)
|
|
|
{
|
|
|
- return !rt_hw_interrupt_is_disabled() && rt_critical_level() == 0 && rt_in_thread_context();
|
|
|
+ return rt_critical_level() == 0 && rt_in_thread_context();
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef RT_USING_SMP
|
|
|
+/* is thread bond on core */
|
|
|
+rt_inline rt_bool_t rt_sched_thread_is_binding(rt_thread_t thread)
|
|
|
+{
|
|
|
+ if (thread == RT_NULL)
|
|
|
+ {
|
|
|
+ thread = rt_thread_self();
|
|
|
+ }
|
|
|
+ return !thread || RT_SCHED_CTX(thread).bind_cpu != RT_CPUS_NR;
|
|
|
}
|
|
|
|
|
|
+#else
|
|
|
+#define rt_sched_thread_is_binding(thread) (RT_TRUE)
|
|
|
+#endif
|
|
|
+
|
|
|
/**@}*/
|
|
|
|
|
|
#ifdef __cplusplus
|