scheduler_mp.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-17 Bernard the first version
  9. * 2006-04-28 Bernard fix the scheduler algorthm
  10. * 2006-04-30 Bernard add SCHEDULER_DEBUG
  11. * 2006-05-27 Bernard fix the scheduler algorthm for same priority
  12. * thread schedule
  13. * 2006-06-04 Bernard rewrite the scheduler algorithm
  14. * 2006-08-03 Bernard add hook support
  15. * 2006-09-05 Bernard add 32 priority level support
  16. * 2006-09-24 Bernard add rt_system_scheduler_start function
  17. * 2009-09-16 Bernard fix _rt_scheduler_stack_check
  18. * 2010-04-11 yi.qiu add module feature
  19. * 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
  20. * issue found by kuronca
  21. * 2010-12-13 Bernard add defunct list initialization even if not use heap.
  22. * 2011-05-10 Bernard clean scheduler debug log.
  23. * 2013-12-21 Grissiom add rt_critical_level
  24. * 2018-11-22 Jesven remove the current task from ready queue
  25. * add per cpu ready queue
  26. * add _scheduler_get_highest_priority_thread to find highest priority task
  27. * rt_schedule_insert_thread won't insert current task to ready queue
  28. * in smp version, rt_hw_context_switch_interrupt maybe switch to
  29. * new task directly
  30. * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
  31. * 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
  32. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  33. * 2023-12-10 xqyjlj use rt_hw_spinlock
  34. * 2024-01-05 Shell Fixup of data racing in rt_critical_level
  35. * 2024-01-18 Shell support rt_sched_thread of scheduling status for better mt protection
  36. * 2024-01-18 Shell support rt_hw_thread_self to improve overall performance
  37. */
  38. #include <rtthread.h>
  39. #include <rthw.h>
  40. #define DBG_TAG "kernel.scheduler"
  41. #define DBG_LVL DBG_INFO
  42. #include <rtdbg.h>
  43. rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
  44. static struct rt_spinlock _mp_scheduler_lock;
  45. #define SCHEDULER_LOCK_FLAG(percpu) ((percpu)->sched_lock_flag)
  46. #define SCHEDULER_ENTER_CRITICAL(curthr) \
  47. do \
  48. { \
  49. if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest++; \
  50. } while (0)
  51. #define SCHEDULER_EXIT_CRITICAL(curthr) \
  52. do \
  53. { \
  54. if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest--; \
  55. } while (0)
  56. #define SCHEDULER_CONTEXT_LOCK(percpu) \
  57. do \
  58. { \
  59. RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 0); \
  60. _fast_spin_lock(&_mp_scheduler_lock); \
  61. SCHEDULER_LOCK_FLAG(percpu) = 1; \
  62. } while (0)
  63. #define SCHEDULER_CONTEXT_UNLOCK(percpu) \
  64. do \
  65. { \
  66. RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 1); \
  67. SCHEDULER_LOCK_FLAG(percpu) = 0; \
  68. _fast_spin_unlock(&_mp_scheduler_lock); \
  69. } while (0)
  70. #define SCHEDULER_LOCK(level) \
  71. do \
  72. { \
  73. rt_thread_t _curthr; \
  74. struct rt_cpu *_percpu; \
  75. level = rt_hw_local_irq_disable(); \
  76. _percpu = rt_cpu_self(); \
  77. _curthr = _percpu->current_thread; \
  78. SCHEDULER_ENTER_CRITICAL(_curthr); \
  79. SCHEDULER_CONTEXT_LOCK(_percpu); \
  80. } while (0)
  81. #define SCHEDULER_UNLOCK(level) \
  82. do \
  83. { \
  84. rt_thread_t _curthr; \
  85. struct rt_cpu *_percpu; \
  86. _percpu = rt_cpu_self(); \
  87. _curthr = _percpu->current_thread; \
  88. SCHEDULER_CONTEXT_UNLOCK(_percpu); \
  89. SCHEDULER_EXIT_CRITICAL(_curthr); \
  90. rt_hw_local_irq_enable(level); \
  91. } while (0)
  92. #ifdef ARCH_USING_HW_THREAD_SELF
  93. #define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag)
  94. #define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 1)
  95. #define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 0)
  96. #else /* !ARCH_USING_HW_THREAD_SELF */
  97. #define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) ((pcpu)->critical_switch_flag)
  98. #define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 1)
  99. #define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 0)
  100. #endif /* ARCH_USING_HW_THREAD_SELF */
  101. static rt_uint32_t rt_thread_ready_priority_group;
  102. #if RT_THREAD_PRIORITY_MAX > 32
  103. /* Maximum priority level, 256 */
  104. static rt_uint8_t rt_thread_ready_table[32];
  105. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  106. /**
  107. * Used only on scheduler for optimization of control flows, where the critical
  108. * region is already guaranteed.
  109. */
  110. rt_inline void _fast_spin_lock(struct rt_spinlock *lock)
  111. {
  112. rt_hw_spin_lock(&lock->lock);
  113. RT_SPIN_LOCK_DEBUG(lock);
  114. }
  115. rt_inline void _fast_spin_unlock(struct rt_spinlock *lock)
  116. {
  117. rt_base_t critical_level;
  118. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  119. /* for the scenario of sched, we don't check critical level */
  120. RT_UNUSED(critical_level);
  121. rt_hw_spin_unlock(&lock->lock);
  122. }
  123. #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
  124. static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
  125. static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
  126. /**
  127. * @addtogroup Hook
  128. */
  129. /**@{*/
  130. /**
  131. * @brief This function will set a hook function, which will be invoked when thread
  132. * switch happens.
  133. *
  134. * @param hook is the hook function.
  135. */
  136. void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
  137. {
  138. rt_scheduler_hook = hook;
  139. }
  140. /**
  141. * @brief This function will set a hook function, which will be invoked when context
  142. * switch happens.
  143. *
  144. * @param hook is the hook function.
  145. */
  146. void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
  147. {
  148. rt_scheduler_switch_hook = hook;
  149. }
  150. /**@}*/
  151. #endif /* RT_USING_HOOK */
  152. #if RT_THREAD_PRIORITY_MAX > 32
  153. rt_inline rt_base_t _get_global_highest_ready_prio(void)
  154. {
  155. rt_ubase_t number;
  156. rt_ubase_t highest_ready_priority;
  157. number = __rt_ffs(rt_thread_ready_priority_group) - 1;
  158. if (number != -1)
  159. {
  160. highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
  161. }
  162. else
  163. {
  164. highest_ready_priority = -1;
  165. }
  166. return highest_ready_priority;
  167. }
  168. rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu* pcpu)
  169. {
  170. rt_ubase_t number;
  171. rt_ubase_t local_highest_ready_priority;
  172. number = __rt_ffs(pcpu->priority_group) - 1;
  173. if (number != -1)
  174. {
  175. local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
  176. }
  177. else
  178. {
  179. local_highest_ready_priority = -1;
  180. }
  181. return local_highest_ready_priority;
  182. }
  183. #else /* if RT_THREAD_PRIORITY_MAX <= 32 */
  184. rt_inline rt_base_t _get_global_highest_ready_prio(void)
  185. {
  186. return __rt_ffs(rt_thread_ready_priority_group) - 1;
  187. }
  188. rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu* pcpu)
  189. {
  190. return __rt_ffs(pcpu->priority_group) - 1;
  191. }
  192. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  193. /*
  194. * get the highest priority thread in ready queue
  195. */
  196. static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
  197. {
  198. struct rt_thread *highest_priority_thread;
  199. rt_ubase_t highest_ready_priority, local_highest_ready_priority;
  200. struct rt_cpu* pcpu = rt_cpu_self();
  201. highest_ready_priority = _get_global_highest_ready_prio();
  202. local_highest_ready_priority = _get_local_highest_ready_prio(pcpu);
  203. /* get highest ready priority thread */
  204. if (highest_ready_priority < local_highest_ready_priority)
  205. {
  206. *highest_prio = highest_ready_priority;
  207. highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
  208. rt_thread_priority_table[highest_ready_priority].next);
  209. }
  210. else
  211. {
  212. *highest_prio = local_highest_ready_priority;
  213. if (local_highest_ready_priority != -1)
  214. {
  215. highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
  216. pcpu->priority_table[local_highest_ready_priority].next);
  217. }
  218. else
  219. {
  220. highest_priority_thread = RT_NULL;
  221. }
  222. }
  223. RT_ASSERT(!highest_priority_thread ||
  224. rt_object_get_type(&highest_priority_thread->parent) == RT_Object_Class_Thread);
  225. return highest_priority_thread;
  226. }
  227. /**
  228. * @brief set READY and insert thread to ready queue
  229. *
  230. * @note caller must holding the `_mp_scheduler_lock` lock
  231. */
  232. static void _sched_insert_thread_locked(struct rt_thread *thread)
  233. {
  234. int cpu_id;
  235. int bind_cpu;
  236. rt_uint32_t cpu_mask;
  237. if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
  238. {
  239. /* already in ready queue */
  240. return ;
  241. }
  242. else if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  243. {
  244. /**
  245. * only YIELD -> READY, SUSPEND -> READY is allowed by this API. However,
  246. * this is a RUNNING thread. So here we reset it's status and let it go.
  247. */
  248. RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
  249. return ;
  250. }
  251. /* READY thread, insert to ready queue */
  252. RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
  253. cpu_id = rt_hw_cpu_id();
  254. bind_cpu = RT_SCHED_CTX(thread).bind_cpu;
  255. /* insert thread to ready list */
  256. if (bind_cpu == RT_CPUS_NR)
  257. {
  258. #if RT_THREAD_PRIORITY_MAX > 32
  259. rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
  260. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  261. rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
  262. /* there is no time slices left(YIELD), inserting thread before ready list*/
  263. if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
  264. {
  265. rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
  266. &RT_THREAD_LIST_NODE(thread));
  267. }
  268. /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
  269. else
  270. {
  271. rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
  272. &RT_THREAD_LIST_NODE(thread));
  273. }
  274. cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
  275. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  276. }
  277. else
  278. {
  279. struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
  280. #if RT_THREAD_PRIORITY_MAX > 32
  281. pcpu->ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
  282. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  283. pcpu->priority_group |= RT_SCHED_PRIV(thread).number_mask;
  284. /* there is no time slices left(YIELD), inserting thread before ready list*/
  285. if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
  286. {
  287. rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
  288. &RT_THREAD_LIST_NODE(thread));
  289. }
  290. /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
  291. else
  292. {
  293. rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
  294. &RT_THREAD_LIST_NODE(thread));
  295. }
  296. if (cpu_id != bind_cpu)
  297. {
  298. cpu_mask = 1 << bind_cpu;
  299. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  300. }
  301. }
  302. LOG_D("insert thread[%.*s], the priority: %d",
  303. RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  304. }
  305. /* remove thread from ready queue */
  306. static void _sched_remove_thread_locked(struct rt_thread *thread)
  307. {
  308. LOG_D("%s [%.*s], the priority: %d", __func__,
  309. RT_NAME_MAX, thread->parent.name,
  310. RT_SCHED_PRIV(thread).current_priority);
  311. /* remove thread from ready list */
  312. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  313. if (RT_SCHED_CTX(thread).bind_cpu == RT_CPUS_NR)
  314. {
  315. if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
  316. {
  317. #if RT_THREAD_PRIORITY_MAX > 32
  318. rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
  319. if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
  320. {
  321. rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
  322. }
  323. #else
  324. rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
  325. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  326. }
  327. }
  328. else
  329. {
  330. struct rt_cpu *pcpu = rt_cpu_index(RT_SCHED_CTX(thread).bind_cpu);
  331. if (rt_list_isempty(&(pcpu->priority_table[RT_SCHED_PRIV(thread).current_priority])))
  332. {
  333. #if RT_THREAD_PRIORITY_MAX > 32
  334. pcpu->ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
  335. if (pcpu->ready_table[RT_SCHED_PRIV(thread).number] == 0)
  336. {
  337. pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
  338. }
  339. #else
  340. pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
  341. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  342. }
  343. }
  344. }
  345. /**
  346. * @brief This function will initialize the system scheduler.
  347. */
  348. void rt_system_scheduler_init(void)
  349. {
  350. int cpu;
  351. rt_base_t offset;
  352. LOG_D("start scheduler: max priority 0x%02x",
  353. RT_THREAD_PRIORITY_MAX);
  354. rt_spin_lock_init(&_mp_scheduler_lock);
  355. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  356. {
  357. rt_list_init(&rt_thread_priority_table[offset]);
  358. }
  359. for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
  360. {
  361. struct rt_cpu *pcpu = rt_cpu_index(cpu);
  362. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  363. {
  364. rt_list_init(&pcpu->priority_table[offset]);
  365. }
  366. pcpu->irq_switch_flag = 0;
  367. pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
  368. pcpu->current_thread = RT_NULL;
  369. pcpu->priority_group = 0;
  370. #if RT_THREAD_PRIORITY_MAX > 32
  371. rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
  372. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  373. #ifdef RT_USING_SMART
  374. rt_spin_lock_init(&(pcpu->spinlock));
  375. #endif
  376. }
  377. /* initialize ready priority group */
  378. rt_thread_ready_priority_group = 0;
  379. #if RT_THREAD_PRIORITY_MAX > 32
  380. /* initialize ready table */
  381. rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
  382. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  383. }
  384. /**
  385. * @brief This function will startup the scheduler. It will select one thread
  386. * with the highest priority level, then switch to it.
  387. */
  388. void rt_system_scheduler_start(void)
  389. {
  390. struct rt_thread *to_thread;
  391. rt_ubase_t highest_ready_priority;
  392. /**
  393. * legacy rt_cpus_lock. some bsp codes still use it as for it's critical
  394. * region. Since scheduler is never touching this, here we just release it
  395. * on the entry.
  396. */
  397. rt_hw_spin_unlock(&_cpus_lock);
  398. /* ISR will corrupt the coherency of running frame */
  399. rt_hw_local_irq_disable();
  400. /**
  401. * for the accessing of the scheduler context. Noted that we don't have
  402. * current_thread at this point
  403. */
  404. _fast_spin_lock(&_mp_scheduler_lock);
  405. /* get the thread scheduling to */
  406. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  407. RT_ASSERT(to_thread);
  408. /* to_thread is picked to running on current core, so remove it from ready queue */
  409. _sched_remove_thread_locked(to_thread);
  410. /* dedigate current core to `to_thread` */
  411. RT_SCHED_CTX(to_thread).oncpu = rt_hw_cpu_id();
  412. RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
  413. LOG_D("[cpu#%d] switch to priority#%d thread:%.*s(sp:0x%08x)",
  414. rt_hw_cpu_id(), RT_SCHED_PRIV(to_thread).current_priority,
  415. RT_NAME_MAX, to_thread->parent.name, to_thread->sp);
  416. _fast_spin_unlock(&_mp_scheduler_lock);
  417. /* switch to new thread */
  418. rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
  419. /* never come back */
  420. }
  421. /**
  422. * @addtogroup Thread
  423. * @cond
  424. */
  425. /**@{*/
  426. /**
  427. * @brief This function will handle IPI interrupt and do a scheduling in system.
  428. *
  429. * @param vector is the number of IPI interrupt for system scheduling.
  430. *
  431. * @param param is not used, and can be set to RT_NULL.
  432. *
  433. * @note this function should be invoke or register as ISR in BSP.
  434. */
  435. void rt_scheduler_ipi_handler(int vector, void *param)
  436. {
  437. rt_schedule();
  438. }
  439. /**
  440. * @brief Lock the system scheduler
  441. *
  442. * @param plvl pointer to the object where lock level stores to
  443. *
  444. * @return rt_err_t RT_EOK
  445. */
  446. rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
  447. {
  448. rt_base_t level;
  449. if (!plvl)
  450. return -RT_EINVAL;
  451. SCHEDULER_LOCK(level);
  452. *plvl = level;
  453. return RT_EOK;
  454. }
  455. /**
  456. * @brief Unlock the system scheduler
  457. * @note this will not cause the scheduler to do a reschedule
  458. *
  459. * @param level the lock level of previous call to rt_sched_lock()
  460. *
  461. * @return rt_err_t RT_EOK
  462. */
  463. rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
  464. {
  465. SCHEDULER_UNLOCK(level);
  466. return RT_EOK;
  467. }
  468. rt_bool_t rt_sched_is_locked(void)
  469. {
  470. rt_bool_t rc;
  471. rt_base_t level;
  472. struct rt_cpu *pcpu;
  473. level = rt_hw_local_irq_disable();
  474. pcpu = rt_cpu_self();
  475. /* get lock stat which is a boolean value */
  476. rc = pcpu->sched_lock_flag;
  477. rt_hw_local_irq_enable(level);
  478. return rc;
  479. }
  480. /**
  481. * @brief Pick the highest runnable thread, and pass the control to it
  482. *
  483. * @note caller should hold the scheduler context lock. lock will be released
  484. * before return from this routine
  485. */
  486. static rt_thread_t _prepare_context_switch_locked(int cpu_id,
  487. struct rt_cpu *pcpu,
  488. rt_thread_t current_thread)
  489. {
  490. rt_thread_t to_thread = RT_NULL;
  491. rt_ubase_t highest_ready_priority;
  492. /* quickly check if any other ready threads queuing */
  493. if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
  494. {
  495. /* pick the highest ready thread */
  496. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  497. /* detach current thread from percpu scheduling context */
  498. RT_SCHED_CTX(current_thread).oncpu = RT_CPU_DETACHED;
  499. /* check if current thread should be put to ready queue, or scheduling again */
  500. if ((RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  501. {
  502. /* check if current thread can be running on current core again */
  503. if (RT_SCHED_CTX(current_thread).bind_cpu == RT_CPUS_NR
  504. || RT_SCHED_CTX(current_thread).bind_cpu == cpu_id)
  505. {
  506. /* if current_thread is the highest runnable thread */
  507. if (RT_SCHED_PRIV(current_thread).current_priority < highest_ready_priority)
  508. {
  509. to_thread = current_thread;
  510. }
  511. /* or no higher-priority thread existed and it has remaining ticks */
  512. else if (RT_SCHED_PRIV(current_thread).current_priority == highest_ready_priority &&
  513. (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  514. {
  515. to_thread = current_thread;
  516. }
  517. /* otherwise give out the core */
  518. else
  519. {
  520. _sched_insert_thread_locked(current_thread);
  521. }
  522. }
  523. else
  524. {
  525. /* put current_thread to ready queue of another core */
  526. _sched_insert_thread_locked(current_thread);
  527. }
  528. /* consume the yield flags after scheduling */
  529. RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
  530. }
  531. /**
  532. * Now destination thread is determined, core is passed to it. Though
  533. * the percpu scheduling context is not updated here, since the cpu
  534. * is locked contiguously before all the scheduling works are done, it's
  535. * safe to observe that current thread as the running thread on this
  536. * core for any observers if they properly do the synchronization
  537. * (take the SCHEDULER_LOCK).
  538. */
  539. RT_SCHED_CTX(to_thread).oncpu = cpu_id;
  540. /* check if context switch is required */
  541. if (to_thread != current_thread)
  542. {
  543. pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
  544. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
  545. /* remove to_thread from ready queue and update its status to RUNNING */
  546. _sched_remove_thread_locked(to_thread);
  547. RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
  548. RT_SCHEDULER_STACK_CHECK(to_thread);
  549. RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
  550. }
  551. else
  552. {
  553. /* current thread is still the best runnable thread */
  554. to_thread = RT_NULL;
  555. }
  556. }
  557. else
  558. {
  559. /* no ready threads */
  560. to_thread = RT_NULL;
  561. }
  562. return to_thread;
  563. }
  564. #ifdef RT_USING_SIGNALS
  565. static void _sched_thread_preprocess_signal(struct rt_thread *current_thread)
  566. {
  567. /* should process signal? */
  568. if (rt_sched_thread_is_suspended(current_thread))
  569. {
  570. /* if current_thread signal is in pending */
  571. if ((RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
  572. {
  573. #ifdef RT_USING_SMART
  574. rt_thread_wakeup(current_thread);
  575. #else
  576. rt_thread_resume(current_thread);
  577. #endif
  578. }
  579. }
  580. }
  581. static void _sched_thread_process_signal(struct rt_thread *current_thread)
  582. {
  583. rt_base_t level;
  584. SCHEDULER_LOCK(level);
  585. /* check stat of thread for signal */
  586. if (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
  587. {
  588. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  589. RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
  590. SCHEDULER_UNLOCK(level);
  591. /* check signal status */
  592. rt_thread_handle_sig(RT_TRUE);
  593. }
  594. else
  595. {
  596. SCHEDULER_UNLOCK(level);
  597. }
  598. /* lock is released above */
  599. }
  600. #define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr) \
  601. do \
  602. { \
  603. SCHEDULER_CONTEXT_LOCK(pcpu); \
  604. _sched_thread_preprocess_signal(curthr); \
  605. SCHEDULER_CONTEXT_UNLOCK(pcpu); \
  606. } while (0)
  607. #define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr) \
  608. _sched_thread_preprocess_signal(curthr)
  609. #define SCHED_THREAD_PROCESS_SIGNAL(curthr) _sched_thread_process_signal(curthr)
  610. #else /* ! RT_USING_SIGNALS */
  611. #define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr)
  612. #define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr)
  613. #define SCHED_THREAD_PROCESS_SIGNAL(curthr)
  614. #endif /* RT_USING_SIGNALS */
  615. rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
  616. {
  617. struct rt_thread *to_thread;
  618. struct rt_thread *current_thread;
  619. struct rt_cpu *pcpu;
  620. int cpu_id;
  621. rt_err_t error = RT_EOK;
  622. cpu_id = rt_hw_cpu_id();
  623. pcpu = rt_cpu_index(cpu_id);
  624. current_thread = pcpu->current_thread;
  625. if (!current_thread)
  626. {
  627. /* scheduler is unavailable yet */
  628. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  629. SCHEDULER_EXIT_CRITICAL(current_thread);
  630. rt_hw_local_irq_enable(level);
  631. return -RT_EBUSY;
  632. }
  633. /* whether do switch in interrupt */
  634. if (rt_atomic_load(&(pcpu->irq_nest)))
  635. {
  636. pcpu->irq_switch_flag = 1;
  637. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  638. SCHEDULER_EXIT_CRITICAL(current_thread);
  639. rt_hw_local_irq_enable(level);
  640. return -RT_ESCHEDISR;
  641. }
  642. /* prepare current_thread for processing if signals existed */
  643. SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(current_thread);
  644. /* whether caller had locked the local scheduler already */
  645. if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
  646. {
  647. /* leaving critical region of global context since we can't schedule */
  648. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  649. SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  650. error = -RT_ESCHEDLOCKED;
  651. SCHEDULER_EXIT_CRITICAL(current_thread);
  652. }
  653. else
  654. {
  655. /* flush critical switch flag since a scheduling is done */
  656. CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  657. /* pick the highest runnable thread, and pass the control to it */
  658. to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
  659. if (to_thread)
  660. {
  661. /* switch to new thread */
  662. LOG_D("[cpu#%d] UNLOCK switch to priority#%d "
  663. "thread:%.*s(sp:0x%08x), "
  664. "from thread:%.*s(sp: 0x%08x)",
  665. cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
  666. RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
  667. RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
  668. rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
  669. (rt_ubase_t)&to_thread->sp, to_thread);
  670. }
  671. else
  672. {
  673. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  674. SCHEDULER_EXIT_CRITICAL(current_thread);
  675. }
  676. }
  677. /* leaving critical region of percpu scheduling context */
  678. rt_hw_local_irq_enable(level);
  679. /* process signals on thread if any existed */
  680. SCHED_THREAD_PROCESS_SIGNAL(current_thread);
  681. return error;
  682. }
  683. /**
  684. * @brief This function will perform one scheduling. It will select one thread
  685. * with the highest priority level in global ready queue or local ready queue,
  686. * then switch to it.
  687. */
  688. void rt_schedule(void)
  689. {
  690. rt_base_t level;
  691. struct rt_thread *to_thread;
  692. struct rt_thread *current_thread;
  693. struct rt_cpu *pcpu;
  694. int cpu_id;
  695. /* enter ciritical region of percpu scheduling context */
  696. level = rt_hw_local_irq_disable();
  697. /* get percpu scheduling context */
  698. cpu_id = rt_hw_cpu_id();
  699. pcpu = rt_cpu_index(cpu_id);
  700. current_thread = pcpu->current_thread;
  701. /* whether do switch in interrupt */
  702. if (rt_atomic_load(&(pcpu->irq_nest)))
  703. {
  704. pcpu->irq_switch_flag = 1;
  705. rt_hw_local_irq_enable(level);
  706. return ; /* -RT_ESCHEDISR */
  707. }
  708. /* forbid any recursive entries of schedule() */
  709. SCHEDULER_ENTER_CRITICAL(current_thread);
  710. /* prepare current_thread for processing if signals existed */
  711. SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
  712. /* whether caller had locked the local scheduler already */
  713. if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
  714. {
  715. SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  716. SCHEDULER_EXIT_CRITICAL(current_thread);
  717. /* -RT_ESCHEDLOCKED */
  718. }
  719. else
  720. {
  721. /* flush critical switch flag since a scheduling is done */
  722. CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  723. pcpu->irq_switch_flag = 0;
  724. /**
  725. * take the context lock before we do the real scheduling works. Context
  726. * lock will be released before returning from this _schedule_locked()
  727. */
  728. SCHEDULER_CONTEXT_LOCK(pcpu);
  729. /* pick the highest runnable thread, and pass the control to it */
  730. to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
  731. if (to_thread)
  732. {
  733. LOG_D("[cpu#%d] switch to priority#%d "
  734. "thread:%.*s(sp:0x%08x), "
  735. "from thread:%.*s(sp: 0x%08x)",
  736. cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
  737. RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
  738. RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
  739. rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
  740. (rt_ubase_t)&to_thread->sp, to_thread);
  741. }
  742. else
  743. {
  744. /* current thread continue to take the core */
  745. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  746. SCHEDULER_EXIT_CRITICAL(current_thread);
  747. }
  748. }
  749. /* leaving critical region of percpu scheduling context */
  750. rt_hw_local_irq_enable(level);
  751. /* process signals on thread if any existed */
  752. SCHED_THREAD_PROCESS_SIGNAL(current_thread);
  753. }
  754. /**
  755. * @brief This function checks whether a scheduling is needed after an IRQ context switching. If yes,
  756. * it will select one thread with the highest priority level, and then switch
  757. * to it.
  758. */
  759. void rt_scheduler_do_irq_switch(void *context)
  760. {
  761. int cpu_id;
  762. rt_base_t level;
  763. struct rt_cpu *pcpu;
  764. struct rt_thread *to_thread;
  765. struct rt_thread *current_thread;
  766. level = rt_hw_local_irq_disable();
  767. cpu_id = rt_hw_cpu_id();
  768. pcpu = rt_cpu_index(cpu_id);
  769. current_thread = pcpu->current_thread;
  770. /* forbid any recursive entries of schedule() */
  771. SCHEDULER_ENTER_CRITICAL(current_thread);
  772. SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
  773. /* any pending scheduling existed? */
  774. if (pcpu->irq_switch_flag == 0)
  775. {
  776. /* if no, just continue execution of current_thread */
  777. SCHEDULER_EXIT_CRITICAL(current_thread);
  778. rt_hw_local_irq_enable(level);
  779. return;
  780. }
  781. /* whether caller had locked the local scheduler already */
  782. if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
  783. {
  784. SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  785. SCHEDULER_EXIT_CRITICAL(current_thread);
  786. }
  787. else if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
  788. {
  789. /* flush critical & irq switch flag since a scheduling is done */
  790. CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  791. pcpu->irq_switch_flag = 0;
  792. SCHEDULER_CONTEXT_LOCK(pcpu);
  793. /* pick the highest runnable thread, and pass the control to it */
  794. to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
  795. if (to_thread)
  796. {
  797. LOG_D("[cpu#%d] IRQ switch to priority#%d "
  798. "thread:%.*s(sp:0x%08x), "
  799. "from thread:%.*s(sp: 0x%08x)",
  800. cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
  801. RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
  802. RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
  803. rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
  804. (rt_ubase_t)&to_thread->sp, to_thread);
  805. }
  806. else
  807. {
  808. /* current thread continue to take the core */
  809. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  810. SCHEDULER_EXIT_CRITICAL(current_thread);
  811. }
  812. }
  813. else
  814. {
  815. SCHEDULER_EXIT_CRITICAL(current_thread);
  816. }
  817. /* leaving critical region of percpu scheduling context */
  818. rt_hw_local_irq_enable(level);
  819. }
  820. /**
  821. * @brief This function will insert a thread to the system ready queue. The state of
  822. * thread will be set as READY and the thread will be removed from suspend queue.
  823. *
  824. * @param thread is the thread to be inserted.
  825. *
  826. * @note Please do not invoke this function in user application.
  827. * Caller must hold the scheduler lock
  828. */
  829. void rt_sched_insert_thread(struct rt_thread *thread)
  830. {
  831. RT_ASSERT(thread != RT_NULL);
  832. RT_SCHED_DEBUG_IS_LOCKED;
  833. /* set READY and insert thread to ready queue */
  834. _sched_insert_thread_locked(thread);
  835. }
  836. /**
  837. * @brief This function will remove a thread from system ready queue.
  838. *
  839. * @param thread is the thread to be removed.
  840. *
  841. * @note Please do not invoke this function in user application.
  842. */
  843. void rt_sched_remove_thread(struct rt_thread *thread)
  844. {
  845. RT_ASSERT(thread != RT_NULL);
  846. RT_SCHED_DEBUG_IS_LOCKED;
  847. /* remove thread from scheduler ready list */
  848. _sched_remove_thread_locked(thread);
  849. RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
  850. }
  851. /* thread status initialization and setting up on startup */
  852. void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
  853. {
  854. rt_list_init(&RT_THREAD_LIST_NODE(thread));
  855. /* priority init */
  856. RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
  857. RT_SCHED_PRIV(thread).init_priority = priority;
  858. RT_SCHED_PRIV(thread).current_priority = priority;
  859. /* don't add to scheduler queue as init thread */
  860. RT_SCHED_PRIV(thread).number_mask = 0;
  861. #if RT_THREAD_PRIORITY_MAX > 32
  862. RT_SCHED_PRIV(thread).number = 0;
  863. RT_SCHED_PRIV(thread).high_mask = 0;
  864. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  865. /* tick init */
  866. RT_SCHED_PRIV(thread).init_tick = tick;
  867. RT_SCHED_PRIV(thread).remaining_tick = tick;
  868. #ifdef RT_USING_SMP
  869. /* lock init */
  870. RT_SCHED_CTX(thread).critical_lock_nest = 0;
  871. #endif /* RT_USING_SMP */
  872. }
  873. /* Normally, there isn't anyone racing with us so this operation is lockless */
  874. void rt_sched_thread_startup(struct rt_thread *thread)
  875. {
  876. #if RT_THREAD_PRIORITY_MAX > 32
  877. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  878. RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
  879. RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  880. #else
  881. RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
  882. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  883. /* change thread stat, so we can resume it */
  884. RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
  885. }
  886. /**
  887. * @brief Update scheduling status of thread. this operation is taken as an
  888. * atomic operation of the update of SP. Since the local irq is disabled,
  889. * it's okay to assume that the stack will not be modified meanwhile.
  890. */
  891. void rt_sched_post_ctx_switch(struct rt_thread *thread)
  892. {
  893. struct rt_cpu* pcpu = rt_cpu_self();
  894. rt_thread_t from_thread = pcpu->current_thread;
  895. RT_ASSERT(rt_hw_interrupt_is_disabled());
  896. if (from_thread)
  897. {
  898. RT_ASSERT(RT_SCHED_CTX(from_thread).critical_lock_nest == 1);
  899. /* release the scheduler lock since we are done with critical region */
  900. RT_SCHED_CTX(from_thread).critical_lock_nest = 0;
  901. SCHEDULER_CONTEXT_UNLOCK(pcpu);
  902. }
  903. /* safe to access since irq is masked out */
  904. pcpu->current_thread = thread;
  905. #ifdef ARCH_USING_HW_THREAD_SELF
  906. rt_hw_thread_set_self(thread);
  907. #endif /* ARCH_USING_HW_THREAD_SELF */
  908. }
  909. #ifdef RT_DEBUGING_CRITICAL
  910. static volatile int _critical_error_occurred = 0;
  911. void rt_exit_critical_safe(rt_base_t critical_level)
  912. {
  913. struct rt_cpu *pcpu = rt_cpu_self();
  914. rt_thread_t current_thread = pcpu->current_thread;
  915. if (current_thread && !_critical_error_occurred)
  916. {
  917. if (critical_level != RT_SCHED_CTX(current_thread).critical_lock_nest)
  918. {
  919. int dummy = 1;
  920. _critical_error_occurred = 1;
  921. rt_kprintf("%s: un-compatible critical level\n" \
  922. "\tCurrent %d\n\tCaller %d\n",
  923. __func__, RT_SCHED_CTX(current_thread).critical_lock_nest,
  924. critical_level);
  925. rt_backtrace();
  926. while (dummy) ;
  927. }
  928. }
  929. rt_exit_critical();
  930. }
  931. #else /* !RT_DEBUGING_CRITICAL */
  932. void rt_exit_critical_safe(rt_base_t critical_level)
  933. {
  934. RT_UNUSED(critical_level);
  935. return rt_exit_critical();
  936. }
  937. #endif /* RT_DEBUGING_CRITICAL */
  938. RTM_EXPORT(rt_exit_critical_safe);
  939. #ifdef ARCH_USING_HW_THREAD_SELF
  940. #define FREE_THREAD_SELF(lvl)
  941. #else /* !ARCH_USING_HW_THREAD_SELF */
  942. #define FREE_THREAD_SELF(lvl) \
  943. do \
  944. { \
  945. rt_hw_local_irq_enable(lvl); \
  946. } while (0)
  947. #endif /* ARCH_USING_HW_THREAD_SELF */
  948. /**
  949. * @brief This function will lock the thread scheduler.
  950. */
  951. rt_base_t rt_enter_critical(void)
  952. {
  953. rt_base_t critical_level;
  954. struct rt_thread *current_thread;
  955. #ifndef ARCH_USING_HW_THREAD_SELF
  956. rt_base_t level;
  957. struct rt_cpu *pcpu;
  958. /* disable interrupt */
  959. level = rt_hw_local_irq_disable();
  960. pcpu = rt_cpu_self();
  961. current_thread = pcpu->current_thread;
  962. #else /* !ARCH_USING_HW_THREAD_SELF */
  963. current_thread = rt_hw_thread_self();
  964. #endif /* ARCH_USING_HW_THREAD_SELF */
  965. if (!current_thread)
  966. {
  967. FREE_THREAD_SELF(level);
  968. /* scheduler unavailable */
  969. return -RT_EINVAL;
  970. }
  971. /* critical for local cpu */
  972. RT_SCHED_CTX(current_thread).critical_lock_nest++;
  973. critical_level = RT_SCHED_CTX(current_thread).critical_lock_nest;
  974. FREE_THREAD_SELF(level);
  975. return critical_level;
  976. }
  977. RTM_EXPORT(rt_enter_critical);
  978. /**
  979. * @brief This function will unlock the thread scheduler.
  980. */
  981. void rt_exit_critical(void)
  982. {
  983. struct rt_thread *current_thread;
  984. rt_bool_t need_resched;
  985. #ifndef ARCH_USING_HW_THREAD_SELF
  986. rt_base_t level;
  987. struct rt_cpu *pcpu;
  988. /* disable interrupt */
  989. level = rt_hw_local_irq_disable();
  990. pcpu = rt_cpu_self();
  991. current_thread = pcpu->current_thread;
  992. #else /* !ARCH_USING_HW_THREAD_SELF */
  993. current_thread = rt_hw_thread_self();
  994. #endif /* ARCH_USING_HW_THREAD_SELF */
  995. if (!current_thread)
  996. {
  997. FREE_THREAD_SELF(level);
  998. return;
  999. }
  1000. /* the necessary memory barrier is done on irq_(dis|en)able */
  1001. RT_SCHED_CTX(current_thread).critical_lock_nest--;
  1002. /* may need a rescheduling */
  1003. if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
  1004. {
  1005. /* is there any scheduling request unfinished? */
  1006. need_resched = IS_CRITICAL_SWITCH_PEND(pcpu, current_thread);
  1007. CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
  1008. FREE_THREAD_SELF(level);
  1009. if (need_resched)
  1010. rt_schedule();
  1011. }
  1012. else
  1013. {
  1014. /* each exit_critical is strictly corresponding to an enter_critical */
  1015. RT_ASSERT(RT_SCHED_CTX(current_thread).critical_lock_nest > 0);
  1016. FREE_THREAD_SELF(level);
  1017. }
  1018. }
  1019. RTM_EXPORT(rt_exit_critical);
  1020. /**
  1021. * @brief Get the scheduler lock level.
  1022. *
  1023. * @return the level of the scheduler lock. 0 means unlocked.
  1024. */
  1025. rt_uint16_t rt_critical_level(void)
  1026. {
  1027. rt_base_t level;
  1028. rt_uint16_t critical_lvl;
  1029. struct rt_thread *current_thread;
  1030. level = rt_hw_local_irq_disable();
  1031. current_thread = rt_cpu_self()->current_thread;
  1032. if (current_thread)
  1033. {
  1034. /* the necessary memory barrier is done on irq_(dis|en)able */
  1035. critical_lvl = RT_SCHED_CTX(current_thread).critical_lock_nest;
  1036. }
  1037. else
  1038. {
  1039. critical_lvl = 0;
  1040. }
  1041. rt_hw_local_irq_enable(level);
  1042. return critical_lvl;
  1043. }
  1044. RTM_EXPORT(rt_critical_level);
  1045. rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
  1046. {
  1047. rt_sched_lock_level_t slvl;
  1048. rt_uint8_t thread_stat;
  1049. RT_SCHED_DEBUG_IS_UNLOCKED;
  1050. if (cpu >= RT_CPUS_NR)
  1051. {
  1052. cpu = RT_CPUS_NR;
  1053. }
  1054. rt_sched_lock(&slvl);
  1055. thread_stat = rt_sched_thread_get_stat(thread);
  1056. if (thread_stat == RT_THREAD_READY)
  1057. {
  1058. /* unbind */
  1059. /* remove from old ready queue */
  1060. rt_sched_remove_thread(thread);
  1061. /* change thread bind cpu */
  1062. RT_SCHED_CTX(thread).bind_cpu = cpu;
  1063. /* add to new ready queue */
  1064. rt_sched_insert_thread(thread);
  1065. if (rt_thread_self() != RT_NULL)
  1066. {
  1067. rt_sched_unlock_n_resched(slvl);
  1068. }
  1069. else
  1070. {
  1071. rt_sched_unlock(slvl);
  1072. }
  1073. }
  1074. else
  1075. {
  1076. RT_SCHED_CTX(thread).bind_cpu = cpu;
  1077. if (thread_stat == RT_THREAD_RUNNING)
  1078. {
  1079. /* thread is running on a cpu */
  1080. int current_cpu = rt_hw_cpu_id();
  1081. if (cpu != RT_CPUS_NR)
  1082. {
  1083. if (RT_SCHED_CTX(thread).oncpu == current_cpu)
  1084. {
  1085. /* current thread on current cpu */
  1086. if (cpu != current_cpu)
  1087. {
  1088. /* bind to other cpu */
  1089. rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
  1090. /* self cpu need reschedule */
  1091. rt_sched_unlock_n_resched(slvl);
  1092. }
  1093. else
  1094. {
  1095. /* else do nothing */
  1096. rt_sched_unlock(slvl);
  1097. }
  1098. }
  1099. else
  1100. {
  1101. /* no running on self cpu, but dest cpu can be itself */
  1102. rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << RT_SCHED_CTX(thread).oncpu);
  1103. rt_sched_unlock(slvl);
  1104. }
  1105. }
  1106. else
  1107. {
  1108. /* else do nothing */
  1109. rt_sched_unlock(slvl);
  1110. }
  1111. }
  1112. else
  1113. {
  1114. rt_sched_unlock(slvl);
  1115. }
  1116. }
  1117. return RT_EOK;
  1118. }
  1119. /**@}*/
  1120. /**@endcond*/