scheduler_comm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * (scheduler_comm.c) Common API of scheduling routines.
  7. *
  8. * Change Logs:
  9. * Date Author Notes
  10. * 2024-01-18 Shell Separate scheduling related codes from thread.c, scheduler_.*
  11. * 2025-09-01 Rbb666 Add thread stack overflow hook.
  12. */
  13. #define DBG_TAG "kernel.sched"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <rtthread.h>
  17. /**
  18. * @brief Initialize thread scheduling context
  19. *
  20. * @param thread The thread to be initialized
  21. * @param tick Initial time slice value for the thread
  22. * @param priority Initial priority of the thread
  23. *
  24. * @details This function performs the following initialization:
  25. * - Sets thread status to INIT
  26. * - For SMP systems:
  27. * * Sets bind CPU to none (RT_CPUS_NR)
  28. * * Marks CPU as detached (RT_CPU_DETACHED)
  29. * - Calls rt_sched_thread_init_priv() for private scheduling data initialization
  30. */
  31. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
  32. {
  33. /* setup thread status */
  34. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  35. #ifdef RT_USING_SMP
  36. /* not bind on any cpu */
  37. RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
  38. RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
  39. #endif /* RT_USING_SMP */
  40. rt_sched_thread_init_priv(thread, tick, priority);
  41. }
  42. /**
  43. * @brief Start the thread timer for scheduling
  44. *
  45. * @param thread The thread whose timer needs to be started
  46. *
  47. * @return rt_err_t Always returns RT_EOK on success
  48. *
  49. * @details This function:
  50. * - Requires scheduler lock to be held.
  51. * - Sets the thread's timer flag (sched_flag_ttmr_set) to indicate timer is active
  52. */
  53. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
  54. {
  55. RT_SCHED_DEBUG_IS_LOCKED;
  56. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
  57. return RT_EOK;
  58. }
  59. /**
  60. * @brief Stop the thread timer for scheduling
  61. *
  62. * @param thread The thread whose timer needs to be stopped
  63. *
  64. * @return rt_err_t
  65. * - RT_EOK if timer was successfully stopped or not active
  66. * - Other error codes from rt_timer_stop() if stop operation failed
  67. */
  68. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
  69. {
  70. rt_err_t error;
  71. RT_SCHED_DEBUG_IS_LOCKED;
  72. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  73. {
  74. error = rt_timer_stop(&thread->thread_timer);
  75. /* mask out timer flag no matter stop success or not */
  76. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
  77. }
  78. else
  79. {
  80. error = RT_EOK;
  81. }
  82. return error;
  83. }
  84. /**
  85. * @brief Get the current status of a thread
  86. *
  87. * @param thread The thread to get status from
  88. *
  89. * @return rt_uint8_t The thread status masked with RT_THREAD_STAT_MASK
  90. *
  91. * @details This function:
  92. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  93. * - Returns the thread's status field masked with RT_THREAD_STAT_MASK
  94. */
  95. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
  96. {
  97. RT_SCHED_DEBUG_IS_LOCKED;
  98. return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
  99. }
  100. /**
  101. * @brief Get the current priority of a thread
  102. *
  103. * @param thread The thread to get priority from
  104. *
  105. * @return rt_uint8_t The current priority value of the thread
  106. *
  107. * @details This function:
  108. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  109. * - Returns the thread's current priority field from its private scheduling data
  110. */
  111. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
  112. {
  113. RT_SCHED_DEBUG_IS_LOCKED;
  114. return RT_SCHED_PRIV(thread).current_priority;
  115. }
  116. /**
  117. * @brief Get the initial priority of a thread
  118. *
  119. * @param thread The thread to get priority from
  120. *
  121. * @return rt_uint8_t The initial priority value of the thread
  122. *
  123. * @details This function:
  124. * - Returns the thread's initial priority field from its private scheduling data
  125. * - Does not require scheduler lock as it accesses read-only fields
  126. */
  127. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
  128. {
  129. /* read only fields, so lock is unnecessary */
  130. return RT_SCHED_PRIV(thread).init_priority;
  131. }
  132. /**
  133. * @brief Check if a thread is in suspended state
  134. *
  135. * @param thread The thread to check
  136. *
  137. * @return rt_uint8_t
  138. * - 1 if thread is suspended (matches RT_THREAD_SUSPEND_MASK)
  139. * - 0 otherwise
  140. *
  141. * @details This function:
  142. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  143. * - Checks thread's status field against RT_THREAD_SUSPEND_MASK
  144. *
  145. * @note Caller must hold the scheduler lock before calling this function
  146. */
  147. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
  148. {
  149. RT_SCHED_DEBUG_IS_LOCKED;
  150. return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
  151. }
  152. /**
  153. * @brief Close a thread by setting its status to CLOSED
  154. *
  155. * @param thread The thread to be closed
  156. * @return rt_err_t Always returns RT_EOK on success
  157. *
  158. * @details This function:
  159. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  160. * - Sets the thread's status to RT_THREAD_CLOSE
  161. *
  162. * @note Must be called with scheduler lock held
  163. */
  164. rt_err_t rt_sched_thread_close(struct rt_thread *thread)
  165. {
  166. RT_SCHED_DEBUG_IS_LOCKED;
  167. RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
  168. return RT_EOK;
  169. }
  170. /**
  171. * @brief Yield the current thread's remaining time slice
  172. *
  173. * @param thread The thread to yield
  174. * @return rt_err_t Always returns RT_EOK on success
  175. *
  176. * @details This function:
  177. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  178. * - Resets the thread's remaining tick count to its initial value
  179. * - Sets the thread's status to YIELD state
  180. *
  181. * @note Must be called with scheduler lock held
  182. */
  183. rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
  184. {
  185. RT_SCHED_DEBUG_IS_LOCKED;
  186. RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
  187. RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
  188. return RT_EOK;
  189. }
  190. /**
  191. * @brief Make a suspended thread ready for scheduling
  192. *
  193. * @param thread The thread to be made ready
  194. *
  195. * @return rt_err_t
  196. * - RT_EOK if operation succeeded
  197. * - -RT_EINVAL if thread is not suspended
  198. * - Other error codes from rt_sched_thread_timer_stop() if timer stop failed
  199. *
  200. * @details This function:
  201. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  202. * - Checks if thread is suspended (returns -RT_EINVAL if not)
  203. * - Stops thread timer if active
  204. * - Removes thread from suspend list
  205. * - Clears wakeup handler (if RT_USING_SMART is defined)
  206. * - Inserts thread into ready queue
  207. *
  208. * @note Must be called with scheduler lock held
  209. * May fail due to racing conditions with timeout ISR
  210. */
  211. rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
  212. {
  213. rt_err_t error;
  214. RT_SCHED_DEBUG_IS_LOCKED;
  215. if (!rt_sched_thread_is_suspended(thread))
  216. {
  217. /* failed to proceed, and that's possibly due to a racing condition */
  218. error = -RT_EINVAL;
  219. }
  220. else
  221. {
  222. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  223. {
  224. /**
  225. * Quiet timeout timer first if set. and don't continue if we
  226. * failed, because it probably means that a timeout ISR racing to
  227. * resume thread before us.
  228. */
  229. error = rt_sched_thread_timer_stop(thread);
  230. }
  231. else
  232. {
  233. error = RT_EOK;
  234. }
  235. if (!error)
  236. {
  237. /* remove from suspend list */
  238. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  239. #ifdef RT_USING_SMART
  240. thread->wakeup_handle.func = RT_NULL;
  241. #endif
  242. /* insert to schedule ready list and remove from susp list */
  243. rt_sched_insert_thread(thread);
  244. }
  245. }
  246. return error;
  247. }
  248. /**
  249. * @brief Increase the system tick and update thread's remaining time slice
  250. *
  251. * @param tick The number of ticks to increase
  252. * @return rt_err_t Always returns RT_EOK
  253. *
  254. * @details This function:
  255. * - Gets the current thread
  256. * - Locks the scheduler
  257. * - Decreases the thread's remaining tick count by the specified amount
  258. * - If remaining ticks reach zero:
  259. * * Calls rt_sched_thread_yield() to yield the thread
  260. * * Requests a reschedule with rt_sched_unlock_n_resched()
  261. * - Otherwise simply unlocks the scheduler
  262. *
  263. * @note This function is typically called from timer interrupt context
  264. * It handles both SMP and non-SMP cases
  265. */
  266. rt_err_t rt_sched_tick_increase(rt_tick_t tick)
  267. {
  268. struct rt_thread *thread;
  269. rt_sched_lock_level_t slvl;
  270. thread = rt_thread_self();
  271. rt_sched_lock(&slvl);
  272. if(RT_SCHED_PRIV(thread).remaining_tick > tick)
  273. {
  274. RT_SCHED_PRIV(thread).remaining_tick -= tick;
  275. }
  276. else
  277. {
  278. RT_SCHED_PRIV(thread).remaining_tick = 0;
  279. }
  280. if (RT_SCHED_PRIV(thread).remaining_tick)
  281. {
  282. rt_sched_unlock(slvl);
  283. }
  284. else
  285. {
  286. rt_sched_thread_yield(thread);
  287. /* request a rescheduling even though we are probably in an ISR */
  288. rt_sched_unlock_n_resched(slvl);
  289. }
  290. return RT_EOK;
  291. }
  292. /**
  293. * @brief Update thread priority and adjust scheduling attributes
  294. *
  295. * @param thread The thread to update priority for
  296. * @param priority New priority value to set
  297. * @param update_init_prio Flag to determine if initial priority should also be updated
  298. * @return rt_err_t Always returns RT_EOK on success
  299. *
  300. * @details This function:
  301. * - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
  302. * - For ready threads:
  303. * * Removes from ready queue
  304. * * Updates priority values
  305. * * Recalculates priority attributes (number, mask, etc.)
  306. * * Reinserts into ready queue with new priority
  307. * - For non-ready threads:
  308. * * Only updates priority values and attributes
  309. * - Handles both 32-bit and >32-bit priority systems
  310. *
  311. * @note Must be called with scheduler lock held
  312. * Thread status must be valid before calling
  313. */
  314. static rt_err_t _rt_sched_update_priority(struct rt_thread *thread, rt_uint8_t priority, rt_bool_t update_init_prio)
  315. {
  316. RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
  317. RT_SCHED_DEBUG_IS_LOCKED;
  318. /* for ready thread, change queue; otherwise simply update the priority */
  319. if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
  320. {
  321. /* remove thread from schedule queue first */
  322. rt_sched_remove_thread(thread);
  323. /* change thread priority */
  324. if (update_init_prio)
  325. {
  326. RT_SCHED_PRIV(thread).init_priority = priority;
  327. }
  328. RT_SCHED_PRIV(thread).current_priority = priority;
  329. /* recalculate priority attribute */
  330. #if RT_THREAD_PRIORITY_MAX > 32
  331. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  332. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  333. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  334. #else
  335. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  336. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  337. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  338. /* insert thread to schedule queue again */
  339. rt_sched_insert_thread(thread);
  340. }
  341. else
  342. {
  343. if (update_init_prio)
  344. {
  345. RT_SCHED_PRIV(thread).init_priority = priority;
  346. }
  347. RT_SCHED_PRIV(thread).current_priority = priority;
  348. /* recalculate priority attribute */
  349. #if RT_THREAD_PRIORITY_MAX > 32
  350. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  351. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  352. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  353. #else
  354. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  355. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  356. }
  357. return RT_EOK;
  358. }
  359. /**
  360. * @brief Update priority of the target thread
  361. */
  362. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
  363. {
  364. return _rt_sched_update_priority(thread, priority, RT_FALSE);
  365. }
  366. /**
  367. * @brief Reset priority of the target thread
  368. */
  369. rt_err_t rt_sched_thread_reset_priority(struct rt_thread *thread, rt_uint8_t priority)
  370. {
  371. return _rt_sched_update_priority(thread, priority, RT_TRUE);
  372. }
  373. #ifdef RT_USING_OVERFLOW_CHECK
  374. #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
  375. static rt_err_t (*rt_stack_overflow_hook)(struct rt_thread *thread);
  376. /**
  377. * @brief Set a hook function to be called when stack overflow is detected
  378. *
  379. * @param hook The function pointer to be called when stack overflow is detected.
  380. * Pass RT_NULL to disable the hook.
  381. * The hook function should return RT_EOK if overflow is handled,
  382. * otherwise the system will halt in an infinite loop.
  383. *
  384. * @note The hook function must be simple and never be blocked or suspended.
  385. * This function is typically used for error logging, recovery, or graceful shutdown.
  386. *
  387. * @details Hook function behavior:
  388. * - Return RT_EOK: System continues execution after overflow handling
  389. * - Return any other value: System enters infinite loop (halt)
  390. * - Hook is called from rt_scheduler_stack_check() when overflow is detected
  391. * - Hook execution context depends on when stack check is performed
  392. *
  393. * @see rt_scheduler_stack_check()
  394. */
  395. void rt_scheduler_stack_overflow_sethook(rt_err_t (*hook)(struct rt_thread *thread))
  396. {
  397. rt_stack_overflow_hook = hook;
  398. }
  399. #endif /* RT_USING_HOOK */
  400. /**
  401. * @brief Check thread stack for overflow or near-overflow conditions
  402. *
  403. * @param thread The thread to check stack for
  404. *
  405. * @details This function performs the following checks:
  406. * - For SMART mode without MMU: skips check if SP is in user data section
  407. * - Without hardware stack guard:
  408. * * For upward-growing stacks: checks magic number at top and SP range
  409. * * For downward-growing stacks: checks magic number at bottom and SP range
  410. * * Triggers error and infinite loop on overflow
  411. * - Additional warnings when stack pointer is near boundaries
  412. */
  413. void rt_scheduler_stack_check(struct rt_thread *thread)
  414. {
  415. RT_ASSERT(thread != RT_NULL);
  416. #ifdef RT_USING_SMART
  417. #ifndef ARCH_MM_MMU
  418. struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
  419. /* if stack pointer locate in user data section skip stack check. */
  420. if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
  421. (rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
  422. {
  423. return;
  424. }
  425. #endif /* not defined ARCH_MM_MMU */
  426. #endif /* RT_USING_SMART */
  427. #ifndef RT_USING_HW_STACK_GUARD
  428. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  429. if (*((rt_uint8_t *)((rt_uintptr_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
  430. #else
  431. if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
  432. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  433. (rt_uintptr_t)thread->sp <= (rt_uintptr_t)thread->stack_addr ||
  434. (rt_uintptr_t)thread->sp >
  435. (rt_uintptr_t)thread->stack_addr + (rt_uintptr_t)thread->stack_size)
  436. {
  437. rt_base_t dummy = 1;
  438. rt_err_t hook_result = -RT_ERROR;
  439. LOG_E("thread:%s stack overflow\n", thread->parent.name);
  440. #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
  441. if (rt_stack_overflow_hook != RT_NULL)
  442. {
  443. hook_result = rt_stack_overflow_hook(thread);
  444. }
  445. #endif /* RT_USING_HOOK */
  446. /* If hook handled the overflow successfully, don't enter infinite loop */
  447. if (hook_result != RT_EOK)
  448. {
  449. while (dummy);
  450. }
  451. }
  452. #endif /* RT_USING_HW_STACK_GUARD */
  453. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  454. #ifndef RT_USING_HW_STACK_GUARD
  455. else if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  456. #else
  457. if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  458. #endif
  459. {
  460. LOG_W("warning: %s stack is close to the top of stack address.\n",
  461. thread->parent.name);
  462. }
  463. #else
  464. #ifndef RT_USING_HW_STACK_GUARD
  465. else if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  466. #else
  467. if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  468. #endif
  469. {
  470. LOG_W("warning: %s stack is close to end of stack address.\n",
  471. thread->parent.name);
  472. }
  473. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  474. }
  475. #endif /* RT_USING_OVERFLOW_CHECK */