rtsched.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * Copyright (c) 2023-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2024-01-19 Shell Seperate schduling statements from rt_thread_t
  9. * to rt_sched_thread_ctx. Add definitions of scheduler.
  10. */
  11. #ifndef __RT_SCHED_H__
  12. #define __RT_SCHED_H__
  13. #include "rttypes.h"
  14. #include "rtcompiler.h"
  15. struct rt_thread;
  16. typedef rt_uint8_t rt_sched_thread_status_t;
  17. #ifdef RT_USING_SCHED_THREAD_CTX
  18. /**
  19. * Scheduler private status binding on thread. Caller should never accessing
  20. * these members.
  21. */
  22. struct rt_sched_thread_priv
  23. {
  24. rt_tick_t init_tick; /**< thread's initialized tick */
  25. rt_tick_t remaining_tick; /**< remaining tick */
  26. /* priority */
  27. rt_uint8_t current_priority; /**< current priority */
  28. rt_uint8_t init_priority; /**< initialized priority */
  29. #if RT_THREAD_PRIORITY_MAX > 32
  30. rt_uint8_t number; /**< priority low number */
  31. rt_uint8_t high_mask; /**< priority high mask */
  32. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  33. rt_uint32_t number_mask; /**< priority number mask */
  34. };
  35. /**
  36. * Scheduler public status binding on thread. Caller must hold the scheduler
  37. * lock before access any one of its member.
  38. */
  39. struct rt_sched_thread_ctx
  40. {
  41. rt_list_t thread_list_node; /**< node in thread list */
  42. rt_uint8_t stat; /**< thread status */
  43. rt_uint8_t sched_flag_locked:1; /**< calling thread have the scheduler locked */
  44. rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */
  45. #ifdef RT_USING_SMP
  46. rt_uint8_t bind_cpu; /**< thread is bind to cpu */
  47. rt_uint8_t oncpu; /**< process on cpu */
  48. rt_base_t critical_lock_nest; /**< critical lock count */
  49. #endif
  50. struct rt_sched_thread_priv sched_thread_priv; /**< private context of scheduler */
  51. };
  52. #define RT_SCHED_THREAD_CTX struct rt_sched_thread_ctx sched_thread_ctx
  53. #define RT_SCHED_PRIV(thread) ((thread)->sched_thread_ctx.sched_thread_priv)
  54. #define RT_SCHED_CTX(thread) ((thread)->sched_thread_ctx)
  55. /**
  56. * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
  57. * to a thread pointer.
  58. */
  59. #define RT_THREAD_LIST_NODE_ENTRY(node) \
  60. rt_container_of( \
  61. rt_list_entry((node), struct rt_sched_thread_ctx, thread_list_node), \
  62. struct rt_thread, sched_thread_ctx)
  63. #define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).thread_list_node)
  64. #else /* !defined(RT_USING_SCHED_THREAD_CTX) */
  65. #if RT_THREAD_PRIORITY_MAX > 32
  66. #define _RT_SCHED_THREAD_CTX_PRIO_EXT \
  67. rt_uint8_t number; /**< priority low number */ \
  68. rt_uint8_t high_mask; /**< priority high mask */
  69. #else /* ! RT_THREAD_PRIORITY_MAX > 32 */
  70. #define _RT_SCHED_THREAD_CTX_PRIO_EXT
  71. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  72. #define RT_SCHED_THREAD_CTX \
  73. rt_list_t tlist; /**< node in thread list */ \
  74. rt_uint8_t stat; /**< thread status */ \
  75. rt_uint8_t sched_flag_locked:1; \
  76. /**< calling thread have the scheduler locked */ \
  77. rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */ \
  78. rt_tick_t init_tick; /**< thread's initialized tick */ \
  79. rt_tick_t remaining_tick; /**< remaining tick */ \
  80. rt_uint8_t current_priority; /**< current priority */ \
  81. rt_uint8_t init_priority; /**< initialized priority */ \
  82. _RT_SCHED_THREAD_CTX_PRIO_EXT; \
  83. rt_uint32_t number_mask; /**< priority number mask */
  84. #define RT_SCHED_PRIV(thread) (*thread)
  85. #define RT_SCHED_CTX(thread) (*thread)
  86. /**
  87. * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
  88. * to a thread pointer.
  89. */
  90. #define RT_THREAD_LIST_NODE_ENTRY(node) rt_list_entry((node), struct rt_thread, tlist)
  91. #define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).tlist)
  92. #endif /* RT_USING_SCHED_THREAD_CTX */
  93. /**
  94. * System Scheduler Locking
  95. */
  96. typedef rt_ubase_t rt_sched_lock_level_t;
  97. rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl);
  98. rt_err_t rt_sched_unlock(rt_sched_lock_level_t level);
  99. rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level);
  100. rt_bool_t rt_sched_is_locked(void);
  101. #ifdef RT_USING_SMP
  102. #define RT_SCHED_DEBUG_IS_LOCKED do { RT_ASSERT(rt_sched_is_locked()); } while (0)
  103. #define RT_SCHED_DEBUG_IS_UNLOCKED do { RT_ASSERT(!rt_sched_is_locked()); } while (0)
  104. #else /* !RT_USING_SMP */
  105. #define RT_SCHED_DEBUG_IS_LOCKED
  106. #define RT_SCHED_DEBUG_IS_UNLOCKED
  107. #endif /* RT_USING_SMP */
  108. /**
  109. * NOTE: user should NEVER use these APIs directly. See rt_thread_.* or IPC
  110. * methods instead.
  111. */
  112. #if defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__)
  113. /* thread initialization and startup routine */
  114. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
  115. void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
  116. void rt_sched_thread_startup(struct rt_thread *thread);
  117. /* scheduler related routine */
  118. void rt_sched_post_ctx_switch(struct rt_thread *thread);
  119. rt_err_t rt_sched_tick_increase(void);
  120. /* thread status operation */
  121. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread);
  122. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread);
  123. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread);
  124. rt_err_t rt_sched_thread_yield(struct rt_thread *thread);
  125. rt_err_t rt_sched_thread_close(struct rt_thread *thread);
  126. rt_err_t rt_sched_thread_ready(struct rt_thread *thread);
  127. rt_err_t rt_sched_thread_suspend(struct rt_thread *thread, rt_sched_lock_level_t level);
  128. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority);
  129. rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu);
  130. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread);
  131. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread);
  132. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread);
  133. void rt_sched_insert_thread(struct rt_thread *thread);
  134. void rt_sched_remove_thread(struct rt_thread *thread);
  135. #endif /* defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__) */
  136. #endif /* __RT_SCHED_H__ */