rtsched.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * Copyright (c) 2023-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-01-19 Shell Seperate schduling statements from rt_thread_t
  9. * to rt_sched_thread_ctx. Add definitions of scheduler.
  10. */
  11. #ifndef __RT_SCHED_H__
  12. #define __RT_SCHED_H__
  13. #include "rttypes.h"
  14. #include "rtcompiler.h"
  15. #ifdef __cplusplus
  16. extern "C" {
  17. #endif
  18. struct rt_thread;
  19. typedef rt_uint8_t rt_sched_thread_status_t;
  20. #ifdef RT_USING_SCHED_THREAD_CTX
  21. /**
  22. * Scheduler private status binding on thread. Caller should never accessing
  23. * these members.
  24. */
  25. struct rt_sched_thread_priv
  26. {
  27. rt_tick_t init_tick; /**< thread's initialized tick */
  28. rt_tick_t remaining_tick; /**< remaining tick */
  29. /* priority */
  30. rt_uint8_t current_priority; /**< current priority */
  31. rt_uint8_t init_priority; /**< initialized priority */
  32. #if RT_THREAD_PRIORITY_MAX > 32
  33. rt_uint8_t number; /**< priority low number */
  34. rt_uint8_t high_mask; /**< priority high mask */
  35. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  36. rt_uint32_t number_mask; /**< priority number mask */
  37. };
  38. /**
  39. * Scheduler public status binding on thread. Caller must hold the scheduler
  40. * lock before access any one of its member.
  41. */
  42. struct rt_sched_thread_ctx
  43. {
  44. rt_list_t thread_list_node; /**< node in thread list */
  45. rt_uint8_t stat; /**< thread status */
  46. rt_uint8_t sched_flag_locked:1; /**< calling thread have the scheduler locked */
  47. rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */
  48. #ifdef ARCH_USING_HW_THREAD_SELF
  49. rt_uint8_t critical_switch_flag:1; /**< critical switch pending */
  50. #endif /* ARCH_USING_HW_THREAD_SELF */
  51. #ifdef RT_USING_SMP
  52. rt_uint8_t bind_cpu; /**< thread is bind to cpu */
  53. rt_uint8_t oncpu; /**< process on cpu */
  54. rt_base_t critical_lock_nest; /**< critical lock count */
  55. #endif
  56. struct rt_sched_thread_priv sched_thread_priv; /**< private context of scheduler */
  57. };
  58. #define RT_SCHED_THREAD_CTX struct rt_sched_thread_ctx sched_thread_ctx;
  59. #define RT_SCHED_PRIV(thread) ((thread)->sched_thread_ctx.sched_thread_priv)
  60. #define RT_SCHED_CTX(thread) ((thread)->sched_thread_ctx)
  61. /**
  62. * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
  63. * to a thread pointer.
  64. */
  65. #define RT_THREAD_LIST_NODE_ENTRY(node) \
  66. rt_container_of( \
  67. rt_list_entry((node), struct rt_sched_thread_ctx, thread_list_node), \
  68. struct rt_thread, sched_thread_ctx)
  69. #define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).thread_list_node)
  70. #else /* !defined(RT_USING_SCHED_THREAD_CTX) */
  71. #if RT_THREAD_PRIORITY_MAX > 32
  72. #define _RT_SCHED_THREAD_CTX_PRIO_EXT \
  73. rt_uint8_t number; /**< priority low number */ \
  74. rt_uint8_t high_mask; /**< priority high mask */
  75. #else /* ! RT_THREAD_PRIORITY_MAX > 32 */
  76. #define _RT_SCHED_THREAD_CTX_PRIO_EXT
  77. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  78. #define RT_SCHED_THREAD_CTX \
  79. rt_list_t tlist; /**< node in thread list */ \
  80. rt_uint8_t stat; /**< thread status */ \
  81. rt_uint8_t sched_flag_locked:1; \
  82. /**< calling thread have the scheduler locked */ \
  83. rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */ \
  84. rt_tick_t init_tick; /**< thread's initialized tick */ \
  85. rt_tick_t remaining_tick; /**< remaining tick */ \
  86. rt_uint8_t current_priority; /**< current priority */ \
  87. rt_uint8_t init_priority; /**< initialized priority */ \
  88. _RT_SCHED_THREAD_CTX_PRIO_EXT \
  89. rt_uint32_t number_mask; /**< priority number mask */
  90. #define RT_SCHED_PRIV(thread) (*thread)
  91. #define RT_SCHED_CTX(thread) (*thread)
  92. /**
  93. * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
  94. * to a thread pointer.
  95. */
  96. #define RT_THREAD_LIST_NODE_ENTRY(node) rt_list_entry((node), struct rt_thread, tlist)
  97. #define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).tlist)
  98. #endif /* RT_USING_SCHED_THREAD_CTX */
  99. /**
  100. * System Scheduler Locking
  101. */
  102. typedef rt_ubase_t rt_sched_lock_level_t;
  103. rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl);
  104. rt_err_t rt_sched_unlock(rt_sched_lock_level_t level);
  105. rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level);
  106. rt_bool_t rt_sched_is_locked(void);
  107. #ifdef RT_USING_SMP
  108. #define RT_SCHED_DEBUG_IS_LOCKED do { RT_ASSERT(rt_sched_is_locked()); } while (0)
  109. #define RT_SCHED_DEBUG_IS_UNLOCKED do { RT_ASSERT(!rt_sched_is_locked()); } while (0)
  110. #else /* !RT_USING_SMP */
  111. #define RT_SCHED_DEBUG_IS_LOCKED
  112. #define RT_SCHED_DEBUG_IS_UNLOCKED
  113. #endif /* RT_USING_SMP */
  114. /**
  115. * NOTE: user should NEVER use these APIs directly. See rt_thread_.* or IPC
  116. * methods instead.
  117. */
  118. #if defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__)
  119. /* thread initialization and startup routine */
  120. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
  121. void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
  122. void rt_sched_thread_startup(struct rt_thread *thread);
  123. /* scheduler related routine */
  124. void rt_sched_post_ctx_switch(struct rt_thread *thread);
  125. rt_err_t rt_sched_tick_increase(void);
  126. /* thread status operation */
  127. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread);
  128. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread);
  129. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread);
  130. rt_err_t rt_sched_thread_yield(struct rt_thread *thread);
  131. rt_err_t rt_sched_thread_close(struct rt_thread *thread);
  132. rt_err_t rt_sched_thread_ready(struct rt_thread *thread);
  133. rt_err_t rt_sched_thread_suspend(struct rt_thread *thread, rt_sched_lock_level_t level);
  134. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority);
  135. rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu);
  136. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread);
  137. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread);
  138. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread);
  139. void rt_sched_insert_thread(struct rt_thread *thread);
  140. void rt_sched_remove_thread(struct rt_thread *thread);
  141. struct rt_thread *rt_sched_thread_self(void);
  142. #endif /* defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__) */
  143. #ifdef __cplusplus
  144. }
  145. #endif
  146. #endif /* __RT_SCHED_H__ */