idle.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-23 Bernard the first version
  9. * 2010-11-10 Bernard add cleanup callback function in thread exit.
  10. * 2012-12-29 Bernard fix compiling warning.
  11. * 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
  12. * dead thread.
  13. * 2016-08-09 ArdaFu add method to get the handler of the idle thread.
  14. * 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
  15. * 2018-07-14 armink add idle hook list
  16. * 2018-11-22 Jesven add per cpu idle task
  17. * combine the code of primary and secondary cpu
  18. * 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
  19. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  20. * 2023-11-07 xqyjlj fix thread exit
  21. * 2023-12-10 xqyjlj add _hook_spinlock
  22. */
  23. #include <rthw.h>
  24. #include <rtthread.h>
  25. #ifdef RT_USING_MODULE
  26. #include <dlmodule.h>
  27. #endif /* RT_USING_MODULE */
  28. #ifdef RT_USING_HOOK
  29. #ifndef RT_USING_IDLE_HOOK
  30. #define RT_USING_IDLE_HOOK
  31. #endif /* RT_USING_IDLE_HOOK */
  32. #endif /* RT_USING_HOOK */
  33. #ifndef IDLE_THREAD_STACK_SIZE
  34. #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
  35. #define IDLE_THREAD_STACK_SIZE 256
  36. #else
  37. #define IDLE_THREAD_STACK_SIZE 128
  38. #endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
  39. #endif /* IDLE_THREAD_STACK_SIZE */
  40. #define _CPUS_NR RT_CPUS_NR
  41. static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
  42. static struct rt_spinlock _defunct_spinlock;
  43. static struct rt_thread idle_thread[_CPUS_NR];
  44. rt_align(RT_ALIGN_SIZE)
  45. static rt_uint8_t idle_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
  46. #ifdef RT_USING_SMP
  47. #ifndef SYSTEM_THREAD_STACK_SIZE
  48. #define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
  49. #endif
  50. static struct rt_thread rt_system_thread;
  51. rt_align(RT_ALIGN_SIZE)
  52. static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
  53. static struct rt_semaphore system_sem;
  54. #endif
  55. #ifdef RT_USING_IDLE_HOOK
  56. #ifndef RT_IDLE_HOOK_LIST_SIZE
  57. #define RT_IDLE_HOOK_LIST_SIZE 4
  58. #endif /* RT_IDLE_HOOK_LIST_SIZE */
  59. static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
  60. static struct rt_spinlock _hook_spinlock;
  61. /**
  62. * @brief This function sets a hook function to idle thread loop. When the system performs
  63. * idle loop, this hook function should be invoked.
  64. *
  65. * @param hook the specified hook function.
  66. *
  67. * @return RT_EOK: set OK.
  68. * -RT_EFULL: hook list is full.
  69. *
  70. * @note the hook function must be simple and never be blocked or suspend.
  71. */
  72. rt_err_t rt_thread_idle_sethook(void (*hook)(void))
  73. {
  74. rt_size_t i;
  75. rt_err_t ret = -RT_EFULL;
  76. rt_base_t level;
  77. level = rt_spin_lock_irqsave(&_hook_spinlock);
  78. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  79. {
  80. if (idle_hook_list[i] == RT_NULL)
  81. {
  82. idle_hook_list[i] = hook;
  83. ret = RT_EOK;
  84. break;
  85. }
  86. }
  87. rt_spin_unlock_irqrestore(&_hook_spinlock, level);
  88. return ret;
  89. }
  90. /**
  91. * @brief delete the idle hook on hook list.
  92. *
  93. * @param hook the specified hook function.
  94. *
  95. * @return RT_EOK: delete OK.
  96. * -RT_ENOSYS: hook was not found.
  97. */
  98. rt_err_t rt_thread_idle_delhook(void (*hook)(void))
  99. {
  100. rt_size_t i;
  101. rt_err_t ret = -RT_ENOSYS;
  102. rt_base_t level;
  103. level = rt_spin_lock_irqsave(&_hook_spinlock);
  104. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  105. {
  106. if (idle_hook_list[i] == hook)
  107. {
  108. idle_hook_list[i] = RT_NULL;
  109. ret = RT_EOK;
  110. break;
  111. }
  112. }
  113. rt_spin_unlock_irqrestore(&_hook_spinlock, level);
  114. return ret;
  115. }
  116. #endif /* RT_USING_IDLE_HOOK */
  117. /**
  118. * @brief Enqueue a thread to defunct queue.
  119. *
  120. * @param thread the thread to be enqueued.
  121. *
  122. * @note It must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
  123. */
  124. void rt_thread_defunct_enqueue(rt_thread_t thread)
  125. {
  126. rt_base_t level;
  127. level = rt_spin_lock_irqsave(&_defunct_spinlock);
  128. rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
  129. rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
  130. #ifdef RT_USING_SMP
  131. rt_sem_release(&system_sem);
  132. #endif
  133. }
  134. /**
  135. * @brief Dequeue a thread from defunct queue.
  136. */
  137. rt_thread_t rt_thread_defunct_dequeue(void)
  138. {
  139. rt_base_t level;
  140. rt_thread_t thread = RT_NULL;
  141. rt_list_t *l = &_rt_thread_defunct;
  142. #ifdef RT_USING_SMP
  143. level = rt_spin_lock_irqsave(&_defunct_spinlock);
  144. if (l->next != l)
  145. {
  146. thread = rt_list_entry(l->next,
  147. struct rt_thread,
  148. tlist);
  149. rt_list_remove(&(thread->tlist));
  150. }
  151. rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
  152. #else
  153. if (l->next != l)
  154. {
  155. thread = rt_list_entry(l->next,
  156. struct rt_thread,
  157. tlist);
  158. level = rt_hw_interrupt_disable();
  159. rt_list_remove(&(thread->tlist));
  160. rt_hw_interrupt_enable(level);
  161. }
  162. #endif
  163. return thread;
  164. }
  165. /**
  166. * @brief This function will perform system background job when system idle.
  167. */
  168. static void rt_defunct_execute(void)
  169. {
  170. /* Loop until there is no dead thread. So one call to rt_defunct_execute
  171. * will do all the cleanups. */
  172. while (1)
  173. {
  174. rt_thread_t thread;
  175. rt_bool_t object_is_systemobject;
  176. void (*cleanup)(struct rt_thread *tid);
  177. #ifdef RT_USING_MODULE
  178. struct rt_dlmodule *module = RT_NULL;
  179. #endif
  180. /* get defunct thread */
  181. thread = rt_thread_defunct_dequeue();
  182. if (thread == RT_NULL)
  183. {
  184. break;
  185. }
  186. while (rt_atomic_load(&(thread->ref_count)))
  187. {
  188. rt_thread_delay(5);
  189. }
  190. #ifdef RT_USING_MODULE
  191. module = (struct rt_dlmodule*)thread->parent.module_id;
  192. if (module)
  193. {
  194. dlmodule_destroy(module);
  195. }
  196. #endif
  197. #ifdef RT_USING_SIGNALS
  198. rt_thread_free_sig(thread);
  199. #endif
  200. /* store the point of "thread->cleanup" avoid to lose */
  201. cleanup = thread->cleanup;
  202. /* if it's a system object, not delete it */
  203. object_is_systemobject = rt_object_is_systemobject((rt_object_t)thread);
  204. if (object_is_systemobject == RT_TRUE)
  205. {
  206. /* detach this object */
  207. rt_object_detach((rt_object_t)thread);
  208. }
  209. /* invoke thread cleanup */
  210. if (cleanup != RT_NULL)
  211. {
  212. cleanup(thread);
  213. }
  214. #ifdef RT_USING_HEAP
  215. #ifdef RT_USING_MEM_PROTECTION
  216. if (thread->mem_regions != RT_NULL)
  217. {
  218. RT_KERNEL_FREE(thread->mem_regions);
  219. }
  220. #endif
  221. /* if need free, delete it */
  222. if (object_is_systemobject == RT_FALSE)
  223. {
  224. /* release thread's stack */
  225. #ifdef RT_USING_HW_STACK_GUARD
  226. RT_KERNEL_FREE(thread->stack_buf);
  227. #else
  228. RT_KERNEL_FREE(thread->stack_addr);
  229. #endif
  230. /* delete thread object */
  231. rt_object_delete((rt_object_t)thread);
  232. }
  233. #endif
  234. }
  235. }
  236. static void idle_thread_entry(void *parameter)
  237. {
  238. #ifdef RT_USING_SMP
  239. if (rt_hw_cpu_id() != 0)
  240. {
  241. while (1)
  242. {
  243. rt_hw_secondary_cpu_idle_exec();
  244. }
  245. }
  246. #endif /* RT_USING_SMP */
  247. while (1)
  248. {
  249. #ifdef RT_USING_IDLE_HOOK
  250. rt_size_t i;
  251. void (*idle_hook)(void);
  252. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  253. {
  254. idle_hook = idle_hook_list[i];
  255. if (idle_hook != RT_NULL)
  256. {
  257. idle_hook();
  258. }
  259. }
  260. #endif /* RT_USING_IDLE_HOOK */
  261. #ifndef RT_USING_SMP
  262. rt_defunct_execute();
  263. #endif /* RT_USING_SMP */
  264. #ifdef RT_USING_PM
  265. void rt_system_power_manager(void);
  266. rt_system_power_manager();
  267. #endif /* RT_USING_PM */
  268. }
  269. }
  270. #ifdef RT_USING_SMP
  271. static void rt_thread_system_entry(void *parameter)
  272. {
  273. while (1)
  274. {
  275. int ret= rt_sem_take(&system_sem, RT_WAITING_FOREVER);
  276. if (ret != RT_EOK)
  277. {
  278. RT_ASSERT(0);
  279. }
  280. rt_defunct_execute();
  281. }
  282. }
  283. #endif
  284. /**
  285. * @brief This function will initialize idle thread, then start it.
  286. *
  287. * @note this function must be invoked when system init.
  288. */
  289. void rt_thread_idle_init(void)
  290. {
  291. rt_ubase_t i;
  292. #if RT_NAME_MAX > 0
  293. char idle_thread_name[RT_NAME_MAX];
  294. #endif /* RT_NAME_MAX > 0 */
  295. for (i = 0; i < _CPUS_NR; i++)
  296. {
  297. #if RT_NAME_MAX > 0
  298. rt_snprintf(idle_thread_name, RT_NAME_MAX, "tidle%d", i);
  299. #endif /* RT_NAME_MAX > 0 */
  300. rt_thread_init(&idle_thread[i],
  301. #if RT_NAME_MAX > 0
  302. idle_thread_name,
  303. #else
  304. "tidle",
  305. #endif /* RT_NAME_MAX > 0 */
  306. idle_thread_entry,
  307. RT_NULL,
  308. &idle_thread_stack[i][0],
  309. sizeof(idle_thread_stack[i]),
  310. RT_THREAD_PRIORITY_MAX - 1,
  311. 32);
  312. #ifdef RT_USING_SMP
  313. rt_thread_control(&idle_thread[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
  314. rt_cpu_index(i)->idle_thread = &idle_thread[i];
  315. #endif /* RT_USING_SMP */
  316. /* startup */
  317. rt_thread_startup(&idle_thread[i]);
  318. }
  319. #ifdef RT_USING_SMP
  320. RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
  321. rt_spin_lock_init(&_defunct_spinlock);
  322. rt_spin_lock_init(&_hook_spinlock);
  323. rt_sem_init(&system_sem, "defunct", 0, RT_IPC_FLAG_FIFO);
  324. /* create defunct thread */
  325. rt_thread_init(&rt_system_thread,
  326. "tsystem",
  327. rt_thread_system_entry,
  328. RT_NULL,
  329. rt_system_stack,
  330. sizeof(rt_system_stack),
  331. RT_THREAD_PRIORITY_MAX - 2,
  332. 32);
  333. /* startup */
  334. rt_thread_startup(&rt_system_thread);
  335. #endif
  336. }
  337. /**
  338. * @brief This function will get the handler of the idle thread.
  339. */
  340. rt_thread_t rt_thread_idle_gethandler(void)
  341. {
  342. #ifdef RT_USING_SMP
  343. int id = rt_hw_cpu_id();
  344. #else
  345. int id = 0;
  346. #endif /* RT_USING_SMP */
  347. return (rt_thread_t)(&idle_thread[id]);
  348. }