scheduler.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-17 Bernard the first version
  9. * 2006-04-28 Bernard fix the scheduler algorthm
  10. * 2006-04-30 Bernard add SCHEDULER_DEBUG
  11. * 2006-05-27 Bernard fix the scheduler algorthm for same priority
  12. * thread schedule
  13. * 2006-06-04 Bernard rewrite the scheduler algorithm
  14. * 2006-08-03 Bernard add hook support
  15. * 2006-09-05 Bernard add 32 priority level support
  16. * 2006-09-24 Bernard add rt_system_scheduler_start function
  17. * 2009-09-16 Bernard fix _rt_scheduler_stack_check
  18. * 2010-04-11 yi.qiu add module feature
  19. * 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
  20. * issue found by kuronca
  21. * 2010-12-13 Bernard add defunct list initialization even if not use heap.
  22. * 2011-05-10 Bernard clean scheduler debug log.
  23. * 2013-12-21 Grissiom add rt_critical_level
  24. * 2018-11-22 Jesven remove the current task from ready queue
  25. * add per cpu ready queue
  26. * add _get_highest_priority_thread to find highest priority task
  27. * rt_schedule_insert_thread won't insert current task to ready queue
  28. * in smp version, rt_hw_context_switch_interrupt maybe switch to
  29. * new task directly
  30. *
  31. */
  32. #include <rtthread.h>
  33. #include <rthw.h>
  34. #ifdef RT_USING_SMP
  35. rt_hw_spinlock_t _rt_critical_lock;
  36. #endif /*RT_USING_SMP*/
  37. rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
  38. rt_uint32_t rt_thread_ready_priority_group;
  39. #if RT_THREAD_PRIORITY_MAX > 32
  40. /* Maximum priority level, 256 */
  41. rt_uint8_t rt_thread_ready_table[32];
  42. #endif
  43. #ifndef RT_USING_SMP
  44. extern volatile rt_uint8_t rt_interrupt_nest;
  45. static rt_int16_t rt_scheduler_lock_nest;
  46. struct rt_thread *rt_current_thread;
  47. rt_uint8_t rt_current_priority;
  48. #endif /*RT_USING_SMP*/
  49. rt_list_t rt_thread_defunct;
  50. #ifdef RT_USING_HOOK
  51. static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
  52. /**
  53. * @addtogroup Hook
  54. */
  55. /**@{*/
  56. /**
  57. * This function will set a hook function, which will be invoked when thread
  58. * switch happens.
  59. *
  60. * @param hook the hook function
  61. */
  62. void
  63. rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
  64. {
  65. rt_scheduler_hook = hook;
  66. }
  67. /**@}*/
  68. #endif
  69. #ifdef RT_USING_OVERFLOW_CHECK
  70. static void _rt_scheduler_stack_check(struct rt_thread *thread)
  71. {
  72. RT_ASSERT(thread != RT_NULL);
  73. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  74. if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
  75. #else
  76. if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
  77. #endif
  78. (rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
  79. (rt_ubase_t)thread->sp >
  80. (rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
  81. {
  82. rt_ubase_t level;
  83. rt_kprintf("thread:%s stack overflow\n", thread->name);
  84. #ifdef RT_USING_FINSH
  85. {
  86. extern long list_thread(void);
  87. list_thread();
  88. }
  89. #endif
  90. level = rt_hw_interrupt_disable();
  91. while (level);
  92. }
  93. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  94. else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
  95. {
  96. rt_kprintf("warning: %s stack is close to the top of stack address.\n",
  97. thread->name);
  98. }
  99. #else
  100. else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
  101. {
  102. rt_kprintf("warning: %s stack is close to end of stack address.\n",
  103. thread->name);
  104. }
  105. #endif
  106. }
  107. #endif
  108. /*
  109. * get the highest priority thread in ready queue
  110. */
  111. #ifdef RT_USING_SMP
  112. static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
  113. {
  114. register struct rt_thread *highest_priority_thread;
  115. register rt_ubase_t highest_ready_priority, local_highest_ready_priority;
  116. struct rt_cpu* pcpu = rt_cpu_self();
  117. #if RT_THREAD_PRIORITY_MAX > 32
  118. register rt_ubase_t number;
  119. number = __rt_ffs(rt_thread_ready_priority_group) - 1;
  120. highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
  121. number = __rt_ffs(pcpu->priority_group) - 1;
  122. local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
  123. #else
  124. highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
  125. local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1;
  126. #endif
  127. /* get highest ready priority thread */
  128. if (highest_ready_priority < local_highest_ready_priority)
  129. {
  130. *highest_prio = highest_ready_priority;
  131. highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
  132. struct rt_thread,
  133. tlist);
  134. }
  135. else
  136. {
  137. *highest_prio = local_highest_ready_priority;
  138. highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
  139. struct rt_thread,
  140. tlist);
  141. }
  142. return highest_priority_thread;
  143. }
  144. #else
  145. static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio)
  146. {
  147. register struct rt_thread *highest_priority_thread;
  148. register rt_ubase_t highest_ready_priority;
  149. #if RT_THREAD_PRIORITY_MAX > 32
  150. register rt_ubase_t number;
  151. number = __rt_ffs(rt_thread_ready_priority_group) - 1;
  152. highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
  153. #else
  154. highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
  155. #endif
  156. /* get highest ready priority thread */
  157. highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
  158. struct rt_thread,
  159. tlist);
  160. *highest_prio = highest_ready_priority;
  161. return highest_priority_thread;
  162. }
  163. #endif
  164. /**
  165. * @ingroup SystemInit
  166. * This function will initialize the system scheduler
  167. */
  168. void rt_system_scheduler_init(void)
  169. {
  170. #ifdef RT_USING_SMP
  171. int cpu;
  172. #endif /*RT_USING_SMP*/
  173. register rt_base_t offset;
  174. #ifndef RT_USING_SMP
  175. rt_scheduler_lock_nest = 0;
  176. #endif /*RT_USING_SMP*/
  177. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
  178. RT_THREAD_PRIORITY_MAX));
  179. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  180. {
  181. rt_list_init(&rt_thread_priority_table[offset]);
  182. }
  183. #ifdef RT_USING_SMP
  184. for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
  185. {
  186. struct rt_cpu *pcpu = rt_cpu_index(cpu);
  187. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  188. {
  189. rt_list_init(&pcpu->priority_table[offset]);
  190. }
  191. pcpu->irq_switch_flag = 0;
  192. pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
  193. pcpu->current_thread = RT_NULL;
  194. pcpu->priority_group = 0;
  195. #if RT_THREAD_PRIORITY_MAX > 32
  196. rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
  197. #endif
  198. }
  199. #endif /*RT_USING_SMP*/
  200. /* initialize ready priority group */
  201. rt_thread_ready_priority_group = 0;
  202. #if RT_THREAD_PRIORITY_MAX > 32
  203. /* initialize ready table */
  204. rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
  205. #endif
  206. /* initialize thread defunct */
  207. rt_list_init(&rt_thread_defunct);
  208. }
  209. /**
  210. * @ingroup SystemInit
  211. * This function will startup scheduler. It will select one thread
  212. * with the highest priority level, then switch to it.
  213. */
  214. void rt_system_scheduler_start(void)
  215. {
  216. register struct rt_thread *to_thread;
  217. rt_ubase_t highest_ready_priority;
  218. to_thread = _get_highest_priority_thread(&highest_ready_priority);
  219. #ifdef RT_USING_SMP
  220. to_thread->oncpu = rt_hw_cpu_id();
  221. #else
  222. rt_current_thread = to_thread;
  223. #endif /*RT_USING_SMP*/
  224. rt_schedule_remove_thread(to_thread);
  225. to_thread->stat = RT_THREAD_RUNNING;
  226. /* switch to new thread */
  227. #ifdef RT_USING_SMP
  228. rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
  229. #else
  230. rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
  231. #endif /*RT_USING_SMP*/
  232. /* never come back */
  233. }
  234. /**
  235. * @addtogroup Thread
  236. */
  237. /**@{*/
  238. #ifdef RT_USING_SMP
  239. /**
  240. * This function will handle IPI interrupt and do a scheduling in system;
  241. *
  242. * @param vector, the number of IPI interrupt for system scheduling
  243. * @param param, use RT_NULL
  244. *
  245. * NOTE: this function should be invoke or register as ISR in BSP.
  246. */
  247. void rt_scheduler_ipi_handler(int vector, void *param)
  248. {
  249. rt_schedule();
  250. }
  251. /**
  252. * This function will perform one scheduling. It will select one thread
  253. * with the highest priority level in global ready queue or local ready queue,
  254. * then switch to it.
  255. */
  256. void rt_schedule(void)
  257. {
  258. rt_base_t level;
  259. struct rt_thread *to_thread;
  260. struct rt_thread *current_thread;
  261. struct rt_cpu *pcpu;
  262. int cpu_id;
  263. /* disable interrupt */
  264. level = rt_hw_interrupt_disable();
  265. cpu_id = rt_hw_cpu_id();
  266. pcpu = rt_cpu_index(cpu_id);
  267. current_thread = pcpu->current_thread;
  268. /* whether do switch in interrupt */
  269. if (pcpu->irq_nest)
  270. {
  271. pcpu->irq_switch_flag = 1;
  272. rt_hw_interrupt_enable(level);
  273. goto __exit;
  274. }
  275. #ifdef RT_USING_SIGNALS
  276. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)
  277. {
  278. /* if current_thread signal is in pending */
  279. if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
  280. {
  281. rt_thread_resume(current_thread);
  282. }
  283. }
  284. #endif
  285. if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
  286. {
  287. rt_ubase_t highest_ready_priority;
  288. if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
  289. {
  290. to_thread = _get_highest_priority_thread(&highest_ready_priority);
  291. current_thread->oncpu = RT_CPU_DETACHED;
  292. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  293. {
  294. if (current_thread->current_priority < highest_ready_priority)
  295. {
  296. to_thread = current_thread;
  297. }
  298. else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  299. {
  300. to_thread = current_thread;
  301. }
  302. else
  303. {
  304. current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  305. rt_schedule_insert_thread(current_thread);
  306. }
  307. }
  308. to_thread->oncpu = cpu_id;
  309. if (to_thread != current_thread)
  310. {
  311. /* if the destination thread is not the same as current thread */
  312. pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
  313. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
  314. rt_schedule_remove_thread(to_thread);
  315. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  316. /* switch to new thread */
  317. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
  318. ("[%d]switch to priority#%d "
  319. "thread:%.*s(sp:0x%08x), "
  320. "from thread:%.*s(sp: 0x%08x)\n",
  321. pcpu->irq_nest, highest_ready_priority,
  322. RT_NAME_MAX, to_thread->name, to_thread->sp,
  323. RT_NAME_MAX, current_thread->name, current_thread->sp));
  324. #ifdef RT_USING_OVERFLOW_CHECK
  325. _rt_scheduler_stack_check(to_thread);
  326. #endif
  327. rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
  328. (rt_ubase_t)&to_thread->sp, to_thread);
  329. }
  330. }
  331. }
  332. /* enable interrupt */
  333. rt_hw_interrupt_enable(level);
  334. #ifdef RT_USING_SIGNALS
  335. /* check stat of thread for signal */
  336. level = rt_hw_interrupt_disable();
  337. if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
  338. {
  339. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  340. current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
  341. rt_hw_interrupt_enable(level);
  342. /* check signal status */
  343. rt_thread_handle_sig(RT_TRUE);
  344. }
  345. else
  346. {
  347. rt_hw_interrupt_enable(level);
  348. }
  349. #endif
  350. __exit:
  351. return ;
  352. }
  353. #else
  354. /**
  355. * This function will perform one schedule. It will select one thread
  356. * with the highest priority level, then switch to it.
  357. */
  358. void rt_schedule(void)
  359. {
  360. rt_base_t level;
  361. struct rt_thread *to_thread;
  362. struct rt_thread *from_thread;
  363. /* disable interrupt */
  364. level = rt_hw_interrupt_disable();
  365. /* check the scheduler is enabled or not */
  366. if (rt_scheduler_lock_nest == 0)
  367. {
  368. rt_ubase_t highest_ready_priority;
  369. if (rt_thread_ready_priority_group != 0)
  370. {
  371. /* need_insert_from_thread: need to insert from_thread to ready queue */
  372. int need_insert_from_thread = 0;
  373. to_thread = _get_highest_priority_thread(&highest_ready_priority);
  374. if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  375. {
  376. if (rt_current_thread->current_priority < highest_ready_priority)
  377. {
  378. to_thread = rt_current_thread;
  379. }
  380. else if (rt_current_thread->current_priority == highest_ready_priority && (rt_current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  381. {
  382. to_thread = rt_current_thread;
  383. }
  384. else
  385. {
  386. rt_current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  387. need_insert_from_thread = 1;
  388. }
  389. }
  390. if (to_thread != rt_current_thread)
  391. {
  392. /* if the destination thread is not the same as current thread */
  393. rt_current_priority = (rt_uint8_t)highest_ready_priority;
  394. from_thread = rt_current_thread;
  395. rt_current_thread = to_thread;
  396. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
  397. if (need_insert_from_thread)
  398. {
  399. rt_schedule_insert_thread(from_thread);
  400. }
  401. rt_schedule_remove_thread(to_thread);
  402. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  403. /* switch to new thread */
  404. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
  405. ("[%d]switch to priority#%d "
  406. "thread:%.*s(sp:0x%08x), "
  407. "from thread:%.*s(sp: 0x%08x)\n",
  408. rt_interrupt_nest, highest_ready_priority,
  409. RT_NAME_MAX, to_thread->name, to_thread->sp,
  410. RT_NAME_MAX, from_thread->name, from_thread->sp));
  411. #ifdef RT_USING_OVERFLOW_CHECK
  412. _rt_scheduler_stack_check(to_thread);
  413. #endif
  414. if (rt_interrupt_nest == 0)
  415. {
  416. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  417. rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
  418. (rt_ubase_t)&to_thread->sp);
  419. /* enable interrupt */
  420. rt_hw_interrupt_enable(level);
  421. #ifdef RT_USING_SIGNALS
  422. /* check stat of thread for signal */
  423. level = rt_hw_interrupt_disable();
  424. if (rt_current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
  425. {
  426. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  427. rt_current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
  428. rt_hw_interrupt_enable(level);
  429. /* check signal status */
  430. rt_thread_handle_sig(RT_TRUE);
  431. }
  432. else
  433. {
  434. rt_hw_interrupt_enable(level);
  435. }
  436. #endif
  437. goto __exit;
  438. }
  439. else
  440. {
  441. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
  442. rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
  443. (rt_ubase_t)&to_thread->sp);
  444. }
  445. }
  446. else
  447. {
  448. rt_schedule_remove_thread(rt_current_thread);
  449. rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
  450. }
  451. }
  452. }
  453. /* enable interrupt */
  454. rt_hw_interrupt_enable(level);
  455. __exit:
  456. return;
  457. }
  458. #endif /*RT_USING_SMP*/
  459. /**
  460. * This function checks if a scheduling is needed after IRQ context. If yes,
  461. * it will select one thread with the highest priority level, and then switch
  462. * to it.
  463. */
  464. #ifdef RT_USING_SMP
  465. void rt_scheduler_do_irq_switch(void *context)
  466. {
  467. int cpu_id;
  468. rt_base_t level;
  469. struct rt_cpu* pcpu;
  470. struct rt_thread *to_thread;
  471. struct rt_thread *current_thread;
  472. level = rt_hw_interrupt_disable();
  473. cpu_id = rt_hw_cpu_id();
  474. pcpu = rt_cpu_index(cpu_id);
  475. current_thread = pcpu->current_thread;
  476. #ifdef RT_USING_SIGNALS
  477. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)
  478. {
  479. /* if current_thread signal is in pending */
  480. if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
  481. {
  482. rt_thread_resume(current_thread);
  483. }
  484. }
  485. #endif
  486. if (pcpu->irq_switch_flag == 0)
  487. {
  488. rt_hw_interrupt_enable(level);
  489. return;
  490. }
  491. if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
  492. {
  493. rt_ubase_t highest_ready_priority;
  494. /* clear irq switch flag */
  495. pcpu->irq_switch_flag = 0;
  496. if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
  497. {
  498. to_thread = _get_highest_priority_thread(&highest_ready_priority);
  499. current_thread->oncpu = RT_CPU_DETACHED;
  500. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  501. {
  502. if (current_thread->current_priority < highest_ready_priority)
  503. {
  504. to_thread = current_thread;
  505. }
  506. else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  507. {
  508. to_thread = current_thread;
  509. }
  510. else
  511. {
  512. current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  513. rt_schedule_insert_thread(current_thread);
  514. }
  515. }
  516. to_thread->oncpu = cpu_id;
  517. if (to_thread != current_thread)
  518. {
  519. /* if the destination thread is not the same as current thread */
  520. pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
  521. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
  522. rt_schedule_remove_thread(to_thread);
  523. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  524. #ifdef RT_USING_OVERFLOW_CHECK
  525. _rt_scheduler_stack_check(to_thread);
  526. #endif
  527. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
  528. current_thread->cpus_lock_nest--;
  529. current_thread->scheduler_lock_nest--;
  530. rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
  531. (rt_ubase_t)&to_thread->sp, to_thread);
  532. }
  533. }
  534. }
  535. rt_hw_interrupt_enable(level);
  536. }
  537. #endif /*RT_USING_SMP*/
  538. /*
  539. * This function will insert a thread to system ready queue. The state of
  540. * thread will be set as READY and remove from suspend queue.
  541. *
  542. * @param thread the thread to be inserted
  543. * @note Please do not invoke this function in user application.
  544. */
  545. #ifdef RT_USING_SMP
  546. void rt_schedule_insert_thread(struct rt_thread *thread)
  547. {
  548. int cpu_id;
  549. int bind_cpu;
  550. rt_uint32_t cpu_mask;
  551. register rt_base_t level;
  552. RT_ASSERT(thread != RT_NULL);
  553. /* disable interrupt */
  554. level = rt_hw_interrupt_disable();
  555. /* it should be RUNNING thread */
  556. if (thread->oncpu != RT_CPU_DETACHED)
  557. {
  558. thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
  559. goto __exit;
  560. }
  561. /* READY thread, insert to ready queue */
  562. thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
  563. cpu_id = rt_hw_cpu_id();
  564. bind_cpu = thread->bind_cpu ;
  565. /* insert thread to ready list */
  566. if (bind_cpu == RT_CPUS_NR)
  567. {
  568. #if RT_THREAD_PRIORITY_MAX > 32
  569. rt_thread_ready_table[thread->number] |= thread->high_mask;
  570. #endif
  571. rt_thread_ready_priority_group |= thread->number_mask;
  572. rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
  573. &(thread->tlist));
  574. cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
  575. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  576. }
  577. else
  578. {
  579. struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
  580. #if RT_THREAD_PRIORITY_MAX > 32
  581. pcpu->ready_table[thread->number] |= thread->high_mask;
  582. #endif
  583. pcpu->priority_group |= thread->number_mask;
  584. rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
  585. &(thread->tlist));
  586. if (cpu_id != bind_cpu)
  587. {
  588. cpu_mask = 1 << bind_cpu;
  589. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  590. }
  591. }
  592. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
  593. RT_NAME_MAX, thread->name, thread->current_priority));
  594. __exit:
  595. /* enable interrupt */
  596. rt_hw_interrupt_enable(level);
  597. }
  598. #else
  599. void rt_schedule_insert_thread(struct rt_thread *thread)
  600. {
  601. register rt_base_t temp;
  602. RT_ASSERT(thread != RT_NULL);
  603. /* disable interrupt */
  604. temp = rt_hw_interrupt_disable();
  605. /* it's current thread, it should be RUNNING thread */
  606. if (thread == rt_current_thread)
  607. {
  608. thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
  609. goto __exit;
  610. }
  611. /* READY thread, insert to ready queue */
  612. thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
  613. /* insert thread to ready list */
  614. rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
  615. &(thread->tlist));
  616. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
  617. RT_NAME_MAX, thread->name, thread->current_priority));
  618. /* set priority mask */
  619. #if RT_THREAD_PRIORITY_MAX > 32
  620. rt_thread_ready_table[thread->number] |= thread->high_mask;
  621. #endif
  622. rt_thread_ready_priority_group |= thread->number_mask;
  623. __exit:
  624. /* enable interrupt */
  625. rt_hw_interrupt_enable(temp);
  626. }
  627. #endif /*RT_USING_SMP*/
  628. /*
  629. * This function will remove a thread from system ready queue.
  630. *
  631. * @param thread the thread to be removed
  632. *
  633. * @note Please do not invoke this function in user application.
  634. */
  635. #ifdef RT_USING_SMP
  636. void rt_schedule_remove_thread(struct rt_thread *thread)
  637. {
  638. register rt_base_t level;
  639. RT_ASSERT(thread != RT_NULL);
  640. /* disable interrupt */
  641. level = rt_hw_interrupt_disable();
  642. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
  643. RT_NAME_MAX, thread->name,
  644. thread->current_priority));
  645. /* remove thread from ready list */
  646. rt_list_remove(&(thread->tlist));
  647. if (thread->bind_cpu == RT_CPUS_NR)
  648. {
  649. if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
  650. {
  651. #if RT_THREAD_PRIORITY_MAX > 32
  652. rt_thread_ready_table[thread->number] &= ~thread->high_mask;
  653. if (rt_thread_ready_table[thread->number] == 0)
  654. {
  655. rt_thread_ready_priority_group &= ~thread->number_mask;
  656. }
  657. #else
  658. rt_thread_ready_priority_group &= ~thread->number_mask;
  659. #endif
  660. }
  661. }
  662. else
  663. {
  664. struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
  665. if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
  666. {
  667. #if RT_THREAD_PRIORITY_MAX > 32
  668. pcpu->ready_table[thread->number] &= ~thread->high_mask;
  669. if (rt_thread_ready_table[thread->number] == 0)
  670. {
  671. pcpu->priority_group &= ~thread->number_mask;
  672. }
  673. #else
  674. pcpu->priority_group &= ~thread->number_mask;
  675. #endif
  676. }
  677. }
  678. /* enable interrupt */
  679. rt_hw_interrupt_enable(level);
  680. }
  681. #else
  682. void rt_schedule_remove_thread(struct rt_thread *thread)
  683. {
  684. register rt_base_t level;
  685. RT_ASSERT(thread != RT_NULL);
  686. /* disable interrupt */
  687. level = rt_hw_interrupt_disable();
  688. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
  689. RT_NAME_MAX, thread->name,
  690. thread->current_priority));
  691. /* remove thread from ready list */
  692. rt_list_remove(&(thread->tlist));
  693. if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
  694. {
  695. #if RT_THREAD_PRIORITY_MAX > 32
  696. rt_thread_ready_table[thread->number] &= ~thread->high_mask;
  697. if (rt_thread_ready_table[thread->number] == 0)
  698. {
  699. rt_thread_ready_priority_group &= ~thread->number_mask;
  700. }
  701. #else
  702. rt_thread_ready_priority_group &= ~thread->number_mask;
  703. #endif
  704. }
  705. /* enable interrupt */
  706. rt_hw_interrupt_enable(level);
  707. }
  708. #endif /*RT_USING_SMP*/
  709. /**
  710. * This function will lock the thread scheduler.
  711. */
  712. #ifdef RT_USING_SMP
  713. void rt_enter_critical(void)
  714. {
  715. register rt_base_t level;
  716. struct rt_thread *current_thread;
  717. /* disable interrupt */
  718. level = rt_hw_local_irq_disable();
  719. current_thread = rt_cpu_self()->current_thread;
  720. if (!current_thread)
  721. {
  722. rt_hw_local_irq_enable(level);
  723. return ;
  724. }
  725. /*
  726. * the maximal number of nest is RT_UINT16_MAX, which is big
  727. * enough and does not check here
  728. */
  729. /* lock scheduler for all cpus */
  730. if (current_thread->critical_lock_nest == 0)
  731. {
  732. rt_hw_spin_lock(&_rt_critical_lock);
  733. }
  734. /* critical for local cpu */
  735. current_thread->critical_lock_nest ++;
  736. /* lock scheduler for local cpu */
  737. current_thread->scheduler_lock_nest ++;
  738. /* enable interrupt */
  739. rt_hw_local_irq_enable(level);
  740. }
  741. #else
  742. void rt_enter_critical(void)
  743. {
  744. register rt_base_t level;
  745. /* disable interrupt */
  746. level = rt_hw_interrupt_disable();
  747. /*
  748. * the maximal number of nest is RT_UINT16_MAX, which is big
  749. * enough and does not check here
  750. */
  751. rt_scheduler_lock_nest ++;
  752. /* enable interrupt */
  753. rt_hw_interrupt_enable(level);
  754. }
  755. #endif /*RT_USING_SMP*/
  756. RTM_EXPORT(rt_enter_critical);
  757. /**
  758. * This function will unlock the thread scheduler.
  759. */
  760. #ifdef RT_USING_SMP
  761. void rt_exit_critical(void)
  762. {
  763. register rt_base_t level;
  764. struct rt_thread *current_thread;
  765. /* disable interrupt */
  766. level = rt_hw_local_irq_disable();
  767. current_thread = rt_cpu_self()->current_thread;
  768. if (!current_thread)
  769. {
  770. rt_hw_local_irq_enable(level);
  771. return ;
  772. }
  773. current_thread->scheduler_lock_nest --;
  774. current_thread->critical_lock_nest --;
  775. if (current_thread->critical_lock_nest == 0)
  776. {
  777. rt_hw_spin_unlock(&_rt_critical_lock);
  778. }
  779. if (current_thread->scheduler_lock_nest <= 0)
  780. {
  781. current_thread->scheduler_lock_nest = 0;
  782. /* enable interrupt */
  783. rt_hw_local_irq_enable(level);
  784. rt_schedule();
  785. }
  786. else
  787. {
  788. /* enable interrupt */
  789. rt_hw_local_irq_enable(level);
  790. }
  791. }
  792. #else
  793. void rt_exit_critical(void)
  794. {
  795. register rt_base_t level;
  796. /* disable interrupt */
  797. level = rt_hw_interrupt_disable();
  798. rt_scheduler_lock_nest --;
  799. if (rt_scheduler_lock_nest <= 0)
  800. {
  801. rt_scheduler_lock_nest = 0;
  802. /* enable interrupt */
  803. rt_hw_interrupt_enable(level);
  804. if (rt_current_thread)
  805. {
  806. /* if scheduler is started, do a schedule */
  807. rt_schedule();
  808. }
  809. }
  810. else
  811. {
  812. /* enable interrupt */
  813. rt_hw_interrupt_enable(level);
  814. }
  815. }
  816. #endif /*RT_USING_SMP*/
  817. RTM_EXPORT(rt_exit_critical);
  818. /**
  819. * Get the scheduler lock level
  820. *
  821. * @return the level of the scheduler lock. 0 means unlocked.
  822. */
  823. rt_uint16_t rt_critical_level(void)
  824. {
  825. #ifdef RT_USING_SMP
  826. struct rt_thread *current_thread = rt_cpu_self()->current_thread;
  827. return current_thread->critical_lock_nest;
  828. #else
  829. return rt_scheduler_lock_nest;
  830. #endif /*RT_USING_SMP*/
  831. }
  832. RTM_EXPORT(rt_critical_level);
  833. /**@}*/