scheduler_comm.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * (scheduler_comm.c) Common API of scheduling routines.
  7. *
  8. * Change Logs:
  9. * Date Author Notes
  10. * 2024-01-18 Shell Separate scheduling related codes from thread.c, scheduler_.*
  11. */
  12. #define DBG_TAG "kernel.sched"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <rtthread.h>
  16. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
  17. {
  18. /* setup thread status */
  19. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  20. #ifdef RT_USING_SMP
  21. /* not bind on any cpu */
  22. RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
  23. RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
  24. #endif /* RT_USING_SMP */
  25. rt_sched_thread_init_priv(thread, tick, priority);
  26. }
  27. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
  28. {
  29. RT_SCHED_DEBUG_IS_LOCKED;
  30. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
  31. return RT_EOK;
  32. }
  33. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
  34. {
  35. rt_err_t error;
  36. RT_SCHED_DEBUG_IS_LOCKED;
  37. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  38. {
  39. error = rt_timer_stop(&thread->thread_timer);
  40. /* mask out timer flag no matter stop success or not */
  41. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
  42. }
  43. else
  44. {
  45. error = RT_EOK;
  46. }
  47. return error;
  48. }
  49. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
  50. {
  51. RT_SCHED_DEBUG_IS_LOCKED;
  52. return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
  53. }
  54. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
  55. {
  56. RT_SCHED_DEBUG_IS_LOCKED;
  57. return RT_SCHED_PRIV(thread).current_priority;
  58. }
  59. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
  60. {
  61. /* read only fields, so lock is unecessary */
  62. return RT_SCHED_PRIV(thread).init_priority;
  63. }
  64. /**
  65. * @note Caller must hold the scheduler lock
  66. */
  67. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
  68. {
  69. RT_SCHED_DEBUG_IS_LOCKED;
  70. return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
  71. }
  72. rt_err_t rt_sched_thread_close(struct rt_thread *thread)
  73. {
  74. RT_SCHED_DEBUG_IS_LOCKED;
  75. RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
  76. return RT_EOK;
  77. }
  78. rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
  79. {
  80. RT_SCHED_DEBUG_IS_LOCKED;
  81. RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
  82. RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
  83. return RT_EOK;
  84. }
  85. rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
  86. {
  87. rt_err_t error;
  88. RT_SCHED_DEBUG_IS_LOCKED;
  89. if (!rt_sched_thread_is_suspended(thread))
  90. {
  91. /* failed to proceed, and that's possibly due to a racing condition */
  92. error = -RT_EINVAL;
  93. }
  94. else
  95. {
  96. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  97. {
  98. /**
  99. * Quiet timeout timer first if set. and don't continue if we
  100. * failed, because it probably means that a timeout ISR racing to
  101. * resume thread before us.
  102. */
  103. error = rt_sched_thread_timer_stop(thread);
  104. }
  105. else
  106. {
  107. error = RT_EOK;
  108. }
  109. if (!error)
  110. {
  111. /* remove from suspend list */
  112. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  113. #ifdef RT_USING_SMART
  114. thread->wakeup_handle.func = RT_NULL;
  115. #endif
  116. /* insert to schedule ready list and remove from susp list */
  117. rt_sched_insert_thread(thread);
  118. }
  119. }
  120. return error;
  121. }
  122. rt_err_t rt_sched_tick_increase(void)
  123. {
  124. struct rt_thread *thread;
  125. rt_sched_lock_level_t slvl;
  126. thread = rt_thread_self();
  127. rt_sched_lock(&slvl);
  128. RT_SCHED_PRIV(thread).remaining_tick--;
  129. if (RT_SCHED_PRIV(thread).remaining_tick)
  130. {
  131. rt_sched_unlock(slvl);
  132. }
  133. else
  134. {
  135. rt_sched_thread_yield(thread);
  136. /* request a rescheduling even though we are probably in an ISR */
  137. rt_sched_unlock_n_resched(slvl);
  138. }
  139. return RT_EOK;
  140. }
  141. /**
  142. * @brief Update priority of the target thread
  143. */
  144. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
  145. {
  146. RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
  147. RT_SCHED_DEBUG_IS_LOCKED;
  148. /* for ready thread, change queue; otherwise simply update the priority */
  149. if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
  150. {
  151. /* remove thread from schedule queue first */
  152. rt_sched_remove_thread(thread);
  153. /* change thread priority */
  154. RT_SCHED_PRIV(thread).current_priority = priority;
  155. /* recalculate priority attribute */
  156. #if RT_THREAD_PRIORITY_MAX > 32
  157. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  158. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  159. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  160. #else
  161. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  162. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  163. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  164. /* insert thread to schedule queue again */
  165. rt_sched_insert_thread(thread);
  166. }
  167. else
  168. {
  169. RT_SCHED_PRIV(thread).current_priority = priority;
  170. /* recalculate priority attribute */
  171. #if RT_THREAD_PRIORITY_MAX > 32
  172. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  173. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  174. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  175. #else
  176. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  177. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  178. }
  179. return RT_EOK;
  180. }