idle.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-23 Bernard the first version
  9. * 2010-11-10 Bernard add cleanup callback function in thread exit.
  10. * 2012-12-29 Bernard fix compiling warning.
  11. * 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
  12. * dead thread.
  13. * 2016-08-09 ArdaFu add method to get the handler of the idle thread.
  14. * 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
  15. * 2018-07-14 armink add idle hook list
  16. * 2018-11-22 Jesven add per cpu idle task
  17. * combine the code of primary and secondary cpu
  18. * 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
  19. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  20. */
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #ifdef RT_USING_MODULE
  24. #include <dlmodule.h>
  25. #endif /* RT_USING_MODULE */
  26. #ifdef RT_USING_HOOK
  27. #ifndef RT_USING_IDLE_HOOK
  28. #define RT_USING_IDLE_HOOK
  29. #endif /* RT_USING_IDLE_HOOK */
  30. #endif /* RT_USING_HOOK */
  31. #ifndef IDLE_THREAD_STACK_SIZE
  32. #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
  33. #define IDLE_THREAD_STACK_SIZE 256
  34. #else
  35. #define IDLE_THREAD_STACK_SIZE 128
  36. #endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
  37. #endif /* IDLE_THREAD_STACK_SIZE */
  38. #define _CPUS_NR RT_CPUS_NR
  39. static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
  40. static struct rt_mutex _defunct_mutex;
  41. static rt_atomic_t _idle_inited = 0;
  42. static struct rt_thread idle_thread[_CPUS_NR];
  43. rt_align(RT_ALIGN_SIZE)
  44. static rt_uint8_t idle_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
  45. #ifdef RT_USING_SMP
  46. #ifndef SYSTEM_THREAD_STACK_SIZE
  47. #define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
  48. #endif
  49. static struct rt_thread rt_system_thread;
  50. rt_align(RT_ALIGN_SIZE)
  51. static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
  52. static struct rt_semaphore system_sem;
  53. #endif
  54. #ifdef RT_USING_IDLE_HOOK
  55. #ifndef RT_IDLE_HOOK_LIST_SIZE
  56. #define RT_IDLE_HOOK_LIST_SIZE 4
  57. #endif /* RT_IDLE_HOOK_LIST_SIZE */
  58. static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
  59. /**
  60. * @brief This function sets a hook function to idle thread loop. When the system performs
  61. * idle loop, this hook function should be invoked.
  62. *
  63. * @param hook the specified hook function.
  64. *
  65. * @return RT_EOK: set OK.
  66. * -RT_EFULL: hook list is full.
  67. *
  68. * @note the hook function must be simple and never be blocked or suspend.
  69. */
  70. rt_err_t rt_thread_idle_sethook(void (*hook)(void))
  71. {
  72. rt_size_t i;
  73. rt_err_t ret = -RT_EFULL;
  74. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  75. {
  76. if (idle_hook_list[i] == RT_NULL)
  77. {
  78. idle_hook_list[i] = hook;
  79. ret = RT_EOK;
  80. break;
  81. }
  82. }
  83. return ret;
  84. }
  85. /**
  86. * @brief delete the idle hook on hook list.
  87. *
  88. * @param hook the specified hook function.
  89. *
  90. * @return RT_EOK: delete OK.
  91. * -RT_ENOSYS: hook was not found.
  92. */
  93. rt_err_t rt_thread_idle_delhook(void (*hook)(void))
  94. {
  95. rt_size_t i;
  96. rt_err_t ret = -RT_ENOSYS;
  97. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  98. {
  99. if (idle_hook_list[i] == hook)
  100. {
  101. idle_hook_list[i] = RT_NULL;
  102. ret = RT_EOK;
  103. break;
  104. }
  105. }
  106. return ret;
  107. }
  108. #endif /* RT_USING_IDLE_HOOK */
  109. /**
  110. * @brief Enqueue a thread to defunct queue.
  111. *
  112. * @param thread the thread to be enqueued.
  113. *
  114. * @note It must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
  115. */
  116. void rt_thread_defunct_enqueue(rt_thread_t thread)
  117. {
  118. if (rt_atomic_load(&_idle_inited) == 0)
  119. {
  120. return;
  121. }
  122. rt_mutex_take(&_defunct_mutex, RT_WAITING_FOREVER);
  123. rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
  124. rt_mutex_release(&_defunct_mutex);
  125. #ifdef RT_USING_SMP
  126. rt_sem_release(&system_sem);
  127. #endif
  128. }
  129. /**
  130. * @brief Dequeue a thread from defunct queue.
  131. */
  132. rt_thread_t rt_thread_defunct_dequeue(void)
  133. {
  134. rt_base_t level;
  135. rt_thread_t thread = RT_NULL;
  136. rt_list_t *l = &_rt_thread_defunct;
  137. #ifdef RT_USING_SMP
  138. rt_mutex_take(&_defunct_mutex, RT_WAITING_FOREVER);
  139. if (l->next != l)
  140. {
  141. thread = rt_list_entry(l->next,
  142. struct rt_thread,
  143. tlist);
  144. rt_list_remove(&(thread->tlist));
  145. }
  146. rt_mutex_release(&_defunct_mutex);
  147. RT_UNUSED(level);
  148. #else
  149. if (l->next != l)
  150. {
  151. thread = rt_list_entry(l->next,
  152. struct rt_thread,
  153. tlist);
  154. level = rt_hw_interrupt_disable();
  155. rt_list_remove(&(thread->tlist));
  156. rt_hw_interrupt_enable(level);
  157. }
  158. #endif
  159. return thread;
  160. }
  161. /**
  162. * @brief This function will perform system background job when system idle.
  163. */
  164. static void rt_defunct_execute(void)
  165. {
  166. /* Loop until there is no dead thread. So one call to rt_defunct_execute
  167. * will do all the cleanups. */
  168. while (1)
  169. {
  170. rt_thread_t thread;
  171. rt_bool_t object_is_systemobject;
  172. void (*cleanup)(struct rt_thread *tid);
  173. #ifdef RT_USING_MODULE
  174. struct rt_dlmodule *module = RT_NULL;
  175. #endif
  176. /* get defunct thread */
  177. thread = rt_thread_defunct_dequeue();
  178. if (thread == RT_NULL)
  179. {
  180. break;
  181. }
  182. while (rt_atomic_load(&(thread->ref_count)))
  183. {
  184. rt_thread_delay(5);
  185. }
  186. #ifdef RT_USING_MODULE
  187. module = (struct rt_dlmodule*)thread->parent.module_id;
  188. if (module)
  189. {
  190. dlmodule_destroy(module);
  191. }
  192. #endif
  193. #ifdef RT_USING_SIGNALS
  194. rt_thread_free_sig(thread);
  195. #endif
  196. /* store the point of "thread->cleanup" avoid to lose */
  197. cleanup = thread->cleanup;
  198. /* if it's a system object, not delete it */
  199. object_is_systemobject = rt_object_is_systemobject((rt_object_t)thread);
  200. if (object_is_systemobject == RT_TRUE)
  201. {
  202. /* detach this object */
  203. rt_object_detach((rt_object_t)thread);
  204. }
  205. /* invoke thread cleanup */
  206. if (cleanup != RT_NULL)
  207. {
  208. cleanup(thread);
  209. }
  210. #ifdef RT_USING_HEAP
  211. #ifdef RT_USING_MEM_PROTECTION
  212. if (thread->mem_regions != RT_NULL)
  213. {
  214. RT_KERNEL_FREE(thread->mem_regions);
  215. }
  216. #endif
  217. /* if need free, delete it */
  218. if (object_is_systemobject == RT_FALSE)
  219. {
  220. /* release thread's stack */
  221. #ifdef RT_USING_HW_STACK_GUARD
  222. RT_KERNEL_FREE(thread->stack_buf);
  223. #else
  224. RT_KERNEL_FREE(thread->stack_addr);
  225. #endif
  226. /* delete thread object */
  227. rt_object_delete((rt_object_t)thread);
  228. }
  229. #endif
  230. }
  231. }
  232. static void idle_thread_entry(void *parameter)
  233. {
  234. #ifdef RT_USING_SMP
  235. if (rt_hw_cpu_id() != 0)
  236. {
  237. while (1)
  238. {
  239. rt_hw_secondary_cpu_idle_exec();
  240. }
  241. }
  242. #endif /* RT_USING_SMP */
  243. while (1)
  244. {
  245. #ifdef RT_USING_IDLE_HOOK
  246. rt_size_t i;
  247. void (*idle_hook)(void);
  248. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  249. {
  250. idle_hook = idle_hook_list[i];
  251. if (idle_hook != RT_NULL)
  252. {
  253. idle_hook();
  254. }
  255. }
  256. #endif /* RT_USING_IDLE_HOOK */
  257. #ifndef RT_USING_SMP
  258. rt_defunct_execute();
  259. #endif /* RT_USING_SMP */
  260. #ifdef RT_USING_PM
  261. void rt_system_power_manager(void);
  262. rt_system_power_manager();
  263. #endif /* RT_USING_PM */
  264. }
  265. }
  266. #ifdef RT_USING_SMP
  267. static void rt_thread_system_entry(void *parameter)
  268. {
  269. while (1)
  270. {
  271. int ret= rt_sem_take(&system_sem, RT_WAITING_FOREVER);
  272. if (ret != RT_EOK)
  273. {
  274. RT_ASSERT(0);
  275. }
  276. rt_defunct_execute();
  277. }
  278. }
  279. #endif
  280. /**
  281. * @brief This function will initialize idle thread, then start it.
  282. *
  283. * @note this function must be invoked when system init.
  284. */
  285. void rt_thread_idle_init(void)
  286. {
  287. rt_ubase_t i;
  288. #if RT_NAME_MAX > 0
  289. char idle_thread_name[RT_NAME_MAX];
  290. #endif /* RT_NAME_MAX > 0 */
  291. for (i = 0; i < _CPUS_NR; i++)
  292. {
  293. #if RT_NAME_MAX > 0
  294. rt_snprintf(idle_thread_name, RT_NAME_MAX, "tidle%d", i);
  295. #endif /* RT_NAME_MAX > 0 */
  296. rt_thread_init(&idle_thread[i],
  297. #if RT_NAME_MAX > 0
  298. idle_thread_name,
  299. #else
  300. "tidle",
  301. #endif /* RT_NAME_MAX > 0 */
  302. idle_thread_entry,
  303. RT_NULL,
  304. &idle_thread_stack[i][0],
  305. sizeof(idle_thread_stack[i]),
  306. RT_THREAD_PRIORITY_MAX - 1,
  307. 32);
  308. #ifdef RT_USING_SMP
  309. rt_thread_control(&idle_thread[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
  310. rt_cpu_index(i)->idle_thread = &idle_thread[i];
  311. #endif /* RT_USING_SMP */
  312. /* startup */
  313. rt_thread_startup(&idle_thread[i]);
  314. }
  315. #ifdef RT_USING_SMP
  316. RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
  317. rt_sem_init(&system_sem, "defunct", 0, RT_IPC_FLAG_FIFO);
  318. /* create defunct thread */
  319. rt_thread_init(&rt_system_thread,
  320. "tsystem",
  321. rt_thread_system_entry,
  322. RT_NULL,
  323. rt_system_stack,
  324. sizeof(rt_system_stack),
  325. RT_THREAD_PRIORITY_MAX - 2,
  326. 32);
  327. /* startup */
  328. rt_thread_startup(&rt_system_thread);
  329. #endif
  330. rt_mutex_init(&_defunct_mutex, "defunct_mutex", RT_IPC_FLAG_FIFO);
  331. rt_atomic_store(&(_idle_inited), 1);
  332. }
  333. /**
  334. * @brief This function will get the handler of the idle thread.
  335. */
  336. rt_thread_t rt_thread_idle_gethandler(void)
  337. {
  338. #ifdef RT_USING_SMP
  339. int id = rt_hw_cpu_id();
  340. #else
  341. int id = 0;
  342. #endif /* RT_USING_SMP */
  343. return (rt_thread_t)(&idle_thread[id]);
  344. }