lwp_pid.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * Add support for waitpid(options=WNOHANG)
  18. * 2023-11-16 xqyjlj Fix the case where pid is 0
  19. * 2023-11-17 xqyjlj add process group and session support
  20. * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
  21. * Reimplement the waitpid with a wait queue method, and fixup problem
  22. * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
  23. * process can be traced while waiter suspend
  24. * 2024-01-25 shell porting to new sched API
  25. */
  26. /* includes scheduler related API */
  27. #define __RT_IPC_SOURCE__
  28. /* for waitpid, we are compatible to GNU extension */
  29. #define _GNU_SOURCE
  30. #define DBG_TAG "lwp.pid"
  31. #define DBG_LVL DBG_INFO
  32. #include <rtdbg.h>
  33. #include "lwp_internal.h"
  34. #include <rthw.h>
  35. #include <rtthread.h>
  36. #include <dfs_file.h>
  37. #include <unistd.h>
  38. #include <stdio.h> /* rename() */
  39. #include <stdlib.h>
  40. #include <sys/stat.h>
  41. #include <sys/statfs.h> /* statfs() */
  42. #include <stdatomic.h>
  43. #ifdef ARCH_MM_MMU
  44. #include "lwp_user_mm.h"
  45. #endif
  46. #ifdef RT_USING_DFS_PROCFS
  47. #include "proc.h"
  48. #include "procfs.h"
  49. #endif
  50. #define PID_MAX 10000
  51. #define PID_CT_ASSERT(name, x) \
  52. struct assert_##name {char ary[2 * (x) - 1];}
  53. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  54. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  55. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  56. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  57. static int lwp_pid_ary_alloced = 0;
  58. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  59. static pid_t current_pid = 0;
  60. static struct rt_mutex pid_mtx;
  61. int lwp_pid_init(void)
  62. {
  63. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  64. return 0;
  65. }
  66. void lwp_pid_lock_take(void)
  67. {
  68. LWP_DEF_RETURN_CODE(rc);
  69. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  70. /* should never failed */
  71. RT_ASSERT(rc == RT_EOK);
  72. RT_UNUSED(rc);
  73. }
  74. void lwp_pid_lock_release(void)
  75. {
  76. /* should never failed */
  77. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  78. RT_ASSERT(0);
  79. }
  80. struct lwp_avl_struct *lwp_get_pid_ary(void)
  81. {
  82. return lwp_pid_ary;
  83. }
  84. static pid_t lwp_pid_get_locked(void)
  85. {
  86. struct lwp_avl_struct *p;
  87. pid_t pid = 0;
  88. p = lwp_pid_free_head;
  89. if (p)
  90. {
  91. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  92. }
  93. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  94. {
  95. p = lwp_pid_ary + lwp_pid_ary_alloced;
  96. lwp_pid_ary_alloced++;
  97. }
  98. if (p)
  99. {
  100. int found_noused = 0;
  101. RT_ASSERT(p->data == RT_NULL);
  102. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  103. {
  104. if (!lwp_avl_find(pid, lwp_pid_root))
  105. {
  106. found_noused = 1;
  107. break;
  108. }
  109. }
  110. if (!found_noused)
  111. {
  112. for (pid = 1; pid <= current_pid; pid++)
  113. {
  114. if (!lwp_avl_find(pid, lwp_pid_root))
  115. {
  116. found_noused = 1;
  117. break;
  118. }
  119. }
  120. }
  121. p->avl_key = pid;
  122. lwp_avl_insert(p, &lwp_pid_root);
  123. current_pid = pid;
  124. }
  125. return pid;
  126. }
  127. static void lwp_pid_put_locked(pid_t pid)
  128. {
  129. struct lwp_avl_struct *p;
  130. if (pid == 0)
  131. {
  132. return;
  133. }
  134. p = lwp_avl_find(pid, lwp_pid_root);
  135. if (p)
  136. {
  137. p->data = RT_NULL;
  138. lwp_avl_remove(p, &lwp_pid_root);
  139. p->avl_right = lwp_pid_free_head;
  140. lwp_pid_free_head = p;
  141. }
  142. }
  143. #ifdef RT_USING_DFS_PROCFS
  144. rt_inline void _free_proc_dentry(rt_lwp_t lwp)
  145. {
  146. char pid_str[64] = {0};
  147. rt_snprintf(pid_str, 64, "%d", lwp->pid);
  148. pid_str[63] = 0;
  149. proc_remove_dentry(pid_str, 0);
  150. }
  151. #else
  152. #define _free_proc_dentry(lwp)
  153. #endif
  154. void lwp_pid_put(struct rt_lwp *lwp)
  155. {
  156. _free_proc_dentry(lwp);
  157. lwp_pid_lock_take();
  158. lwp_pid_put_locked(lwp->pid);
  159. lwp_pid_lock_release();
  160. /* reset pid field */
  161. lwp->pid = 0;
  162. /* clear reference */
  163. lwp_ref_dec(lwp);
  164. }
  165. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  166. {
  167. struct lwp_avl_struct *p;
  168. p = lwp_avl_find(pid, lwp_pid_root);
  169. if (p)
  170. {
  171. p->data = lwp;
  172. lwp_ref_inc(lwp);
  173. #ifdef RT_USING_DFS_PROCFS
  174. if (pid)
  175. {
  176. proc_pid(pid);
  177. }
  178. #endif
  179. }
  180. }
  181. static void __exit_files(struct rt_lwp *lwp)
  182. {
  183. int fd = lwp->fdt.maxfd - 1;
  184. while (fd >= 0)
  185. {
  186. struct dfs_file *d;
  187. d = lwp->fdt.fds[fd];
  188. if (d)
  189. {
  190. dfs_file_close(d);
  191. fdt_fd_release(&lwp->fdt, fd);
  192. }
  193. fd--;
  194. }
  195. }
  196. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  197. {
  198. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  199. }
  200. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  201. {
  202. rt_mutex_detach(&lwp->object_mutex);
  203. }
  204. void lwp_user_object_lock(struct rt_lwp *lwp)
  205. {
  206. if (lwp)
  207. {
  208. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  209. }
  210. else
  211. {
  212. RT_ASSERT(0);
  213. }
  214. }
  215. void lwp_user_object_unlock(struct rt_lwp *lwp)
  216. {
  217. if (lwp)
  218. {
  219. rt_mutex_release(&lwp->object_mutex);
  220. }
  221. else
  222. {
  223. RT_ASSERT(0);
  224. }
  225. }
  226. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  227. {
  228. int ret = -1;
  229. if (lwp && object)
  230. {
  231. lwp_user_object_lock(lwp);
  232. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  233. {
  234. struct lwp_avl_struct *node;
  235. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  236. if (node)
  237. {
  238. rt_atomic_add(&object->lwp_ref_count, 1);
  239. node->avl_key = (avl_key_t)object;
  240. lwp_avl_insert(node, &lwp->object_root);
  241. ret = 0;
  242. }
  243. }
  244. lwp_user_object_unlock(lwp);
  245. }
  246. return ret;
  247. }
  248. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  249. {
  250. rt_err_t ret = -1;
  251. rt_object_t object;
  252. if (!lwp || !node)
  253. {
  254. return ret;
  255. }
  256. object = (rt_object_t)node->avl_key;
  257. object->lwp_ref_count--;
  258. if (object->lwp_ref_count == 0)
  259. {
  260. /* remove from kernel object list */
  261. switch (object->type)
  262. {
  263. case RT_Object_Class_Semaphore:
  264. ret = rt_sem_delete((rt_sem_t)object);
  265. break;
  266. case RT_Object_Class_Mutex:
  267. ret = rt_mutex_delete((rt_mutex_t)object);
  268. break;
  269. case RT_Object_Class_Event:
  270. ret = rt_event_delete((rt_event_t)object);
  271. break;
  272. case RT_Object_Class_MailBox:
  273. ret = rt_mb_delete((rt_mailbox_t)object);
  274. break;
  275. case RT_Object_Class_MessageQueue:
  276. ret = rt_mq_delete((rt_mq_t)object);
  277. break;
  278. case RT_Object_Class_Timer:
  279. ret = rt_timer_delete((rt_timer_t)object);
  280. break;
  281. case RT_Object_Class_Custom:
  282. ret = rt_custom_object_destroy(object);
  283. break;
  284. default:
  285. LOG_E("input object type(%d) error", object->type);
  286. break;
  287. }
  288. }
  289. else
  290. {
  291. ret = 0;
  292. }
  293. lwp_avl_remove(node, &lwp->object_root);
  294. rt_free(node);
  295. return ret;
  296. }
  297. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  298. {
  299. rt_err_t ret = -1;
  300. if (lwp && object)
  301. {
  302. struct lwp_avl_struct *node;
  303. lwp_user_object_lock(lwp);
  304. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  305. ret = _object_node_delete(lwp, node);
  306. lwp_user_object_unlock(lwp);
  307. }
  308. return ret;
  309. }
  310. void lwp_user_object_clear(struct rt_lwp *lwp)
  311. {
  312. struct lwp_avl_struct *node;
  313. lwp_user_object_lock(lwp);
  314. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  315. {
  316. _object_node_delete(lwp, node);
  317. }
  318. lwp_user_object_unlock(lwp);
  319. }
  320. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  321. {
  322. rt_object_t object;
  323. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  324. object = (rt_object_t)node->avl_key;
  325. lwp_user_object_add(dst_lwp, object);
  326. return 0;
  327. }
  328. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  329. {
  330. lwp_user_object_lock(src_lwp);
  331. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  332. lwp_user_object_unlock(src_lwp);
  333. }
  334. rt_lwp_t lwp_create(rt_base_t flags)
  335. {
  336. pid_t pid;
  337. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  338. if (new_lwp)
  339. {
  340. /* minimal setup of lwp object */
  341. new_lwp->ref = 1;
  342. #ifdef RT_USING_SMP
  343. new_lwp->bind_cpu = RT_CPUS_NR;
  344. #endif
  345. new_lwp->exe_file = RT_NULL;
  346. rt_list_init(&new_lwp->t_grp);
  347. rt_list_init(&new_lwp->pgrp_node);
  348. rt_list_init(&new_lwp->timer);
  349. lwp_user_object_lock_init(new_lwp);
  350. rt_wqueue_init(&new_lwp->wait_queue);
  351. rt_wqueue_init(&new_lwp->waitpid_waiters);
  352. lwp_signal_init(&new_lwp->signal);
  353. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  354. if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
  355. new_lwp->did_exec = RT_TRUE;
  356. /* lwp with pid */
  357. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  358. {
  359. lwp_pid_lock_take();
  360. pid = lwp_pid_get_locked();
  361. if (pid == 0)
  362. {
  363. lwp_user_object_lock_destroy(new_lwp);
  364. rt_free(new_lwp);
  365. new_lwp = RT_NULL;
  366. LOG_E("%s: pid slot fulled", __func__);
  367. }
  368. else
  369. {
  370. new_lwp->pid = pid;
  371. lwp_pid_set_lwp_locked(pid, new_lwp);
  372. }
  373. lwp_pid_lock_release();
  374. }
  375. rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
  376. if (flags & LWP_CREATE_FLAG_INIT_USPACE)
  377. {
  378. rt_err_t error = lwp_user_space_init(new_lwp, 0);
  379. if (error)
  380. {
  381. lwp_pid_put(new_lwp);
  382. lwp_user_object_lock_destroy(new_lwp);
  383. rt_free(new_lwp);
  384. new_lwp = RT_NULL;
  385. LOG_E("%s: failed to initialize user space", __func__);
  386. }
  387. }
  388. }
  389. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  390. return new_lwp;
  391. }
  392. /** when reference is 0, a lwp can be released */
  393. void lwp_free(struct rt_lwp* lwp)
  394. {
  395. rt_processgroup_t group = RT_NULL;
  396. if (lwp == RT_NULL)
  397. {
  398. return;
  399. }
  400. /**
  401. * Brief: Recycle the lwp when reference is cleared
  402. *
  403. * Note: Critical Section
  404. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  405. * all the reference is clear)
  406. */
  407. LOG_D("lwp free: %p", lwp);
  408. rt_free(lwp->exe_file);
  409. group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
  410. if (group)
  411. lwp_pgrp_remove(group, lwp);
  412. LWP_LOCK(lwp);
  413. if (lwp->args != RT_NULL)
  414. {
  415. #ifndef ARCH_MM_MMU
  416. lwp->args_length = RT_NULL;
  417. #ifndef ARCH_MM_MPU
  418. rt_free(lwp->args);
  419. #endif /* not defined ARCH_MM_MPU */
  420. #endif /* ARCH_MM_MMU */
  421. lwp->args = RT_NULL;
  422. }
  423. lwp_user_object_clear(lwp);
  424. lwp_user_object_lock_destroy(lwp);
  425. /* free data section */
  426. if (lwp->data_entry != RT_NULL)
  427. {
  428. #ifdef ARCH_MM_MMU
  429. rt_free_align(lwp->data_entry);
  430. #else
  431. #ifdef ARCH_MM_MPU
  432. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  433. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  434. #else
  435. rt_free_align(lwp->data_entry);
  436. #endif /* ARCH_MM_MPU */
  437. #endif /* ARCH_MM_MMU */
  438. lwp->data_entry = RT_NULL;
  439. }
  440. /* free text section */
  441. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  442. {
  443. if (lwp->text_entry)
  444. {
  445. LOG_D("lwp text free: %p", lwp->text_entry);
  446. #ifndef ARCH_MM_MMU
  447. rt_free((void*)lwp->text_entry);
  448. #endif /* not defined ARCH_MM_MMU */
  449. lwp->text_entry = RT_NULL;
  450. }
  451. }
  452. #ifdef ARCH_MM_MMU
  453. lwp_unmap_user_space(lwp);
  454. #endif
  455. timer_list_free(&lwp->timer);
  456. LWP_UNLOCK(lwp);
  457. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  458. rt_mutex_detach(&lwp->lwp_lock);
  459. /**
  460. * pid must have release before enter lwp_free()
  461. * otherwise this is a data racing
  462. */
  463. RT_ASSERT(lwp->pid == 0);
  464. rt_free(lwp);
  465. }
  466. rt_inline rt_noreturn
  467. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  468. {
  469. LWP_LOCK(lwp);
  470. lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
  471. lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  472. lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
  473. lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  474. rt_list_remove(&thread->sibling);
  475. LWP_UNLOCK(lwp);
  476. lwp_futex_exit_robust_list(thread);
  477. /**
  478. * Note: the tid tree always hold a reference to thread, hence the tid must
  479. * be release before cleanup of thread
  480. */
  481. lwp_tid_put(thread->tid);
  482. thread->tid = 0;
  483. rt_thread_delete(thread);
  484. rt_schedule();
  485. while (1) ;
  486. }
  487. rt_inline void _clear_child_tid(rt_thread_t thread)
  488. {
  489. if (thread->clear_child_tid)
  490. {
  491. int t = 0;
  492. int *clear_child_tid = thread->clear_child_tid;
  493. thread->clear_child_tid = RT_NULL;
  494. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  495. sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
  496. }
  497. }
  498. void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
  499. {
  500. rt_thread_t thread;
  501. if (!lwp)
  502. {
  503. LOG_W("%s: lwp should not be null", __func__);
  504. return ;
  505. }
  506. thread = rt_thread_self();
  507. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  508. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  509. #ifdef ARCH_MM_MMU
  510. _clear_child_tid(thread);
  511. LWP_LOCK(lwp);
  512. /**
  513. * Brief: only one thread should calls exit_group(),
  514. * but we can not ensured that during run-time
  515. */
  516. lwp->lwp_status = status;
  517. LWP_UNLOCK(lwp);
  518. lwp_terminate(lwp);
  519. #else
  520. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  521. if (main_thread == tid)
  522. {
  523. rt_thread_t sub_thread;
  524. rt_list_t *list;
  525. lwp_terminate(lwp);
  526. /* delete all subthread */
  527. while ((list = tid->sibling.prev) != &lwp->t_grp)
  528. {
  529. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  530. rt_list_remove(&sub_thread->sibling);
  531. rt_thread_delete(sub_thread);
  532. }
  533. lwp->lwp_ret = value;
  534. }
  535. #endif /* ARCH_MM_MMU */
  536. _thread_exit(lwp, thread);
  537. }
  538. void lwp_thread_exit(rt_thread_t thread, int status)
  539. {
  540. rt_thread_t header_thr;
  541. struct rt_lwp *lwp;
  542. LOG_D("%s", __func__);
  543. RT_ASSERT(thread == rt_thread_self());
  544. lwp = (struct rt_lwp *)thread->lwp;
  545. RT_ASSERT(lwp != RT_NULL);
  546. #ifdef ARCH_MM_MMU
  547. _clear_child_tid(thread);
  548. LWP_LOCK(lwp);
  549. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  550. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  551. {
  552. /**
  553. * if thread exit, treated as process exit normally.
  554. * This is reasonable since trap event is exited through lwp_exit()
  555. */
  556. lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
  557. LWP_UNLOCK(lwp);
  558. lwp_terminate(lwp);
  559. }
  560. else
  561. {
  562. LWP_UNLOCK(lwp);
  563. }
  564. #endif /* ARCH_MM_MMU */
  565. _thread_exit(lwp, thread);
  566. }
  567. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  568. int lwp_ref_inc(struct rt_lwp *lwp)
  569. {
  570. int ref;
  571. ref = rt_atomic_add(&lwp->ref, 1);
  572. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  573. return ref;
  574. }
  575. int lwp_ref_dec(struct rt_lwp *lwp)
  576. {
  577. int ref;
  578. ref = rt_atomic_add(&lwp->ref, -1);
  579. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  580. if (ref == 1)
  581. {
  582. struct rt_channel_msg msg;
  583. if (lwp->debug)
  584. {
  585. memset(&msg, 0, sizeof msg);
  586. rt_raw_channel_send(gdb_server_channel(), &msg);
  587. }
  588. #ifndef ARCH_MM_MMU
  589. #ifdef RT_LWP_USING_SHM
  590. lwp_shm_lwp_free(lwp);
  591. #endif /* RT_LWP_USING_SHM */
  592. #endif /* not defined ARCH_MM_MMU */
  593. lwp_free(lwp);
  594. }
  595. else
  596. {
  597. /* reference must be a positive integer */
  598. RT_ASSERT(ref > 1);
  599. }
  600. return ref;
  601. }
  602. struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
  603. {
  604. struct lwp_avl_struct *p;
  605. struct rt_lwp *lwp = RT_NULL;
  606. p = lwp_avl_find(pid, lwp_pid_root);
  607. if (p)
  608. {
  609. lwp = (struct rt_lwp *)p->data;
  610. }
  611. return lwp;
  612. }
  613. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  614. {
  615. struct rt_lwp* lwp;
  616. lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
  617. return lwp;
  618. }
  619. pid_t lwp_to_pid(struct rt_lwp* lwp)
  620. {
  621. if (!lwp)
  622. {
  623. return 0;
  624. }
  625. return lwp->pid;
  626. }
  627. char* lwp_pid2name(int32_t pid)
  628. {
  629. struct rt_lwp *lwp;
  630. char* process_name = RT_NULL;
  631. lwp_pid_lock_take();
  632. lwp = lwp_from_pid_locked(pid);
  633. if (lwp)
  634. {
  635. process_name = strrchr(lwp->cmd, '/');
  636. process_name = process_name? process_name + 1: lwp->cmd;
  637. }
  638. lwp_pid_lock_release();
  639. return process_name;
  640. }
  641. pid_t lwp_name2pid(const char *name)
  642. {
  643. int idx;
  644. pid_t pid = 0;
  645. rt_thread_t main_thread;
  646. char* process_name = RT_NULL;
  647. rt_sched_lock_level_t slvl;
  648. lwp_pid_lock_take();
  649. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  650. {
  651. /* 0 is reserved */
  652. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  653. if (lwp)
  654. {
  655. process_name = strrchr(lwp->cmd, '/');
  656. process_name = process_name? process_name + 1: lwp->cmd;
  657. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  658. {
  659. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  660. rt_sched_lock(&slvl);
  661. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  662. {
  663. pid = lwp->pid;
  664. }
  665. rt_sched_unlock(slvl);
  666. }
  667. }
  668. }
  669. lwp_pid_lock_release();
  670. return pid;
  671. }
  672. int lwp_getpid(void)
  673. {
  674. rt_lwp_t lwp = lwp_self();
  675. return lwp ? lwp->pid : 1;
  676. // return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  677. }
  678. rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
  679. {
  680. struct rusage rt_rusage;
  681. if (uru != RT_NULL)
  682. {
  683. rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
  684. rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
  685. rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
  686. rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
  687. lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
  688. }
  689. }
  690. /* do statistical summary and reap the child if neccessary */
  691. static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
  692. struct rt_lwp *self_lwp, int *ustatus,
  693. int options, struct rusage *uru)
  694. {
  695. int lwp_stat = child->lwp_status;
  696. /* report statistical data to process */
  697. _update_ru(child, self_lwp, uru);
  698. if (child->terminated && !(options & WNOWAIT))
  699. {
  700. /** Reap the child process if it's exited */
  701. LOG_D("func %s: child detached", __func__);
  702. lwp_pid_put(child);
  703. lwp_children_unregister(self_lwp, child);
  704. }
  705. if (ustatus)
  706. lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
  707. return RT_EOK;
  708. }
  709. #define HAS_CHILD_BUT_NO_EVT (-1024)
  710. /* check if the process is already terminate */
  711. static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
  712. int options, int *status)
  713. {
  714. sysret_t rc;
  715. LWP_LOCK(child);
  716. if (child->terminated)
  717. {
  718. rc = child->pid;
  719. }
  720. else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
  721. {
  722. child->wait_reap_stp = 1;
  723. rc = child->pid;
  724. }
  725. else
  726. {
  727. rc = HAS_CHILD_BUT_NO_EVT;
  728. }
  729. LWP_UNLOCK(child);
  730. LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
  731. return rc;
  732. }
  733. /* verify if the process is child, and reap it */
  734. static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  735. pid_t wait_pid, int options, int *ustatus,
  736. struct rusage *uru)
  737. {
  738. sysret_t rc;
  739. struct rt_lwp *child;
  740. /* check if pid is reference to a valid child */
  741. lwp_pid_lock_take();
  742. child = lwp_from_pid_locked(wait_pid);
  743. if (!child)
  744. rc = -EINVAL;
  745. else if (child->parent != self_lwp)
  746. rc = -ESRCH;
  747. else
  748. rc = wait_pid;
  749. lwp_pid_lock_release();
  750. if (rc > 0)
  751. {
  752. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  753. if (rc > 0)
  754. {
  755. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  756. }
  757. }
  758. return rc;
  759. }
  760. /* try to reap any child */
  761. static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
  762. int options, int *ustatus, struct rusage *uru)
  763. {
  764. sysret_t rc = -ECHILD;
  765. struct rt_lwp *child;
  766. LWP_LOCK(self_lwp);
  767. child = self_lwp->first_child;
  768. /* find a exited child if any */
  769. while (child)
  770. {
  771. if (pair_pgid && child->pgid != pair_pgid)
  772. continue;
  773. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  774. if (rc > 0)
  775. break;
  776. child = child->sibling;
  777. }
  778. LWP_UNLOCK(self_lwp);
  779. if (rc > 0)
  780. {
  781. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  782. }
  783. return rc;
  784. }
  785. rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
  786. {
  787. /* waker provide the message mainly through its lwp_status */
  788. rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
  789. return RT_EOK;
  790. }
  791. struct waitpid_handle {
  792. struct rt_wqueue_node wq_node;
  793. int options;
  794. rt_lwp_t waker_lwp;
  795. };
  796. /* the IPC message is setup and notify the parent */
  797. static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
  798. {
  799. int can_accept_evt = 0;
  800. rt_thread_t waiter = wait_node->polling_thread;
  801. pid_t destiny = (pid_t)wait_node->key;
  802. rt_lwp_t waker_lwp = key;
  803. struct waitpid_handle *handle;
  804. rt_ubase_t options;
  805. handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
  806. RT_ASSERT(waiter != RT_NULL);
  807. options = handle->options;
  808. /* filter out if waker is not the one */
  809. if (destiny > 0)
  810. {
  811. /**
  812. * in waitpid immediately return routine, we already do the check
  813. * that pid is one of the child process of waiting thread
  814. */
  815. can_accept_evt = waker_lwp->pid == destiny;
  816. }
  817. else if (destiny == -1)
  818. {
  819. can_accept_evt = waker_lwp->parent == waiter->lwp;
  820. }
  821. else
  822. {
  823. /* destiny == 0 || destiny == -pgid */
  824. pid_t waiter_pgid;
  825. if (destiny == 0)
  826. {
  827. waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
  828. }
  829. else
  830. {
  831. waiter_pgid = -destiny;
  832. }
  833. can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
  834. }
  835. /* filter out if event is not desired */
  836. if (can_accept_evt)
  837. {
  838. if ((options & WEXITED) && waker_lwp->terminated)
  839. can_accept_evt = 1;
  840. else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
  841. can_accept_evt = 1;
  842. else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
  843. can_accept_evt = 1;
  844. else
  845. can_accept_evt = 0;
  846. }
  847. /* setup message for waiter if accepted */
  848. if (can_accept_evt)
  849. handle->waker_lwp = waker_lwp;
  850. /* 0 if event is accepted, otherwise discard */
  851. return !can_accept_evt;
  852. }
  853. /* the waiter cleanup IPC message and wait for desired event here */
  854. static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  855. struct waitpid_handle *handle, pid_t destiny)
  856. {
  857. rt_err_t ret;
  858. /* current context checking */
  859. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  860. handle->wq_node.polling_thread = cur_thr;
  861. handle->wq_node.key = destiny;
  862. handle->wq_node.wakeup = _waitq_filter;
  863. handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
  864. rt_list_init(&handle->wq_node.list);
  865. cur_thr->error = RT_EOK;
  866. LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
  867. rt_enter_critical();
  868. ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  869. if (ret == RT_EOK)
  870. {
  871. rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
  872. rt_exit_critical();
  873. rt_schedule();
  874. ret = cur_thr->error;
  875. /**
  876. * cur_thr error is a positive value, but some legacy implementation
  877. * use a negative one. So we check to avoid errors
  878. */
  879. ret = ret > 0 ? -ret : ret;
  880. /**
  881. * we dont rely on this actually, but we cleanup it since wakeup API
  882. * set this up durint operation, and this will cause some messy condition
  883. */
  884. handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
  885. rt_wqueue_remove(&handle->wq_node);
  886. }
  887. else
  888. {
  889. /* failed to suspend, return immediately with failure */
  890. rt_exit_critical();
  891. }
  892. return ret;
  893. }
  894. /* wait for IPC event and do the cleanup if neccessary */
  895. static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
  896. int options, int *ustatus, struct rusage *uru)
  897. {
  898. sysret_t rc;
  899. struct waitpid_handle handle;
  900. rt_lwp_t waker;
  901. /* wait for SIGCHLD or other async events */
  902. handle.options = options;
  903. handle.waker_lwp = 0;
  904. rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
  905. waker = handle.waker_lwp;
  906. if (waker != RT_NULL)
  907. {
  908. rc = waker->pid;
  909. /* check out if any process exited */
  910. LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
  911. _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
  912. }
  913. /**
  914. * else if (rc != RT_EOK)
  915. * unable to do a suspend, or wakeup unexpectedly
  916. * -> then returned a failure
  917. */
  918. return rc;
  919. }
  920. pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
  921. {
  922. pid_t rc = -1;
  923. struct rt_thread *cur_thr;
  924. struct rt_lwp *self_lwp;
  925. cur_thr = rt_thread_self();
  926. self_lwp = lwp_self();
  927. if (!cur_thr || !self_lwp)
  928. {
  929. rc = -EINVAL;
  930. }
  931. else
  932. {
  933. /* check if able to reap desired child immediately */
  934. if (pid > 0)
  935. {
  936. /* if pid is child then try to reap it */
  937. rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  938. }
  939. else if (pid == -1)
  940. {
  941. /* any terminated child */
  942. rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
  943. }
  944. else
  945. {
  946. /**
  947. * (pid < -1 || pid == 0)
  948. * any terminated child with matched pgid
  949. */
  950. pid_t pair_pgid;
  951. if (pid == 0)
  952. {
  953. pair_pgid = lwp_pgid_get_byprocess(self_lwp);
  954. }
  955. else
  956. {
  957. pair_pgid = -pid;
  958. }
  959. rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
  960. }
  961. if (rc == HAS_CHILD_BUT_NO_EVT)
  962. {
  963. if (!(options & WNOHANG))
  964. {
  965. /* otherwise, arrange a suspend and wait for async event */
  966. options |= WEXITED;
  967. rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  968. }
  969. else
  970. {
  971. /**
  972. * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
  973. * it has at least one child process specified by pid for which
  974. * status is not available, and status is not available for any
  975. * process specified by pid, 0 is returned
  976. */
  977. rc = 0;
  978. }
  979. }
  980. else
  981. {
  982. RT_ASSERT(rc != 0);
  983. }
  984. }
  985. LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
  986. return rc;
  987. }
  988. pid_t waitpid(pid_t pid, int *status, int options)
  989. {
  990. return lwp_waitpid(pid, status, options, RT_NULL);
  991. }
  992. #ifdef RT_USING_FINSH
  993. /* copy from components/finsh/cmd.c */
  994. static void object_split(int len)
  995. {
  996. while (len--)
  997. {
  998. rt_kprintf("-");
  999. }
  1000. }
  1001. static void print_thread_info(struct rt_thread* thread, int maxlen)
  1002. {
  1003. rt_uint8_t *ptr;
  1004. rt_uint8_t stat;
  1005. #ifdef RT_USING_SMP
  1006. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  1007. rt_kprintf("%3d %3d ", RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  1008. else
  1009. rt_kprintf("N/A %3d ", RT_SCHED_PRIV(thread).current_priority);
  1010. #else
  1011. rt_kprintf("%3d ", RT_SCHED_PRIV(thread).current_priority);
  1012. #endif /*RT_USING_SMP*/
  1013. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  1014. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  1015. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  1016. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  1017. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  1018. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  1019. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  1020. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  1021. while (*ptr == '#')ptr--;
  1022. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1023. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  1024. thread->stack_size,
  1025. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  1026. thread->remaining_tick,
  1027. thread->error);
  1028. #else
  1029. ptr = (rt_uint8_t *)thread->stack_addr;
  1030. while (*ptr == '#')ptr++;
  1031. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d",
  1032. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  1033. thread->stack_size,
  1034. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  1035. / thread->stack_size,
  1036. RT_SCHED_PRIV(thread).remaining_tick,
  1037. thread->error);
  1038. #endif
  1039. rt_kprintf(" %-.*s\n",rt_strlen(thread->parent.name), thread->parent.name);
  1040. }
  1041. long list_process(void)
  1042. {
  1043. int index;
  1044. int maxlen;
  1045. rt_ubase_t level;
  1046. struct rt_thread *thread;
  1047. struct rt_list_node *node, *list;
  1048. const char *item_title = "thread";
  1049. int count = 0;
  1050. struct rt_thread **threads;
  1051. maxlen = RT_NAME_MAX;
  1052. #ifdef RT_USING_SMP
  1053. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error %-*.s\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1054. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1055. rt_kprintf( "--- --- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1056. #else
  1057. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1058. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1059. rt_kprintf( "--- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1060. #endif /*RT_USING_SMP*/
  1061. count = rt_object_get_length(RT_Object_Class_Thread);
  1062. if (count > 0)
  1063. {
  1064. /* get thread pointers */
  1065. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  1066. if (threads)
  1067. {
  1068. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  1069. if (index > 0)
  1070. {
  1071. for (index = 0; index <count; index++)
  1072. {
  1073. struct rt_thread th;
  1074. thread = threads[index];
  1075. level = rt_spin_lock_irqsave(&thread->spinlock);
  1076. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  1077. {
  1078. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1079. continue;
  1080. }
  1081. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  1082. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1083. if (th.lwp == RT_NULL)
  1084. {
  1085. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  1086. print_thread_info(&th, maxlen);
  1087. }
  1088. }
  1089. }
  1090. rt_free(threads);
  1091. }
  1092. }
  1093. for (index = 0; index < RT_LWP_MAX_NR; index++)
  1094. {
  1095. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  1096. if (lwp)
  1097. {
  1098. list = &lwp->t_grp;
  1099. for (node = list->next; node != list; node = node->next)
  1100. {
  1101. thread = rt_list_entry(node, struct rt_thread, sibling);
  1102. rt_kprintf("%4d %4d %-*.*s ", lwp_to_pid(lwp), thread->tid, maxlen, RT_NAME_MAX, lwp->cmd);
  1103. print_thread_info(thread, maxlen);
  1104. }
  1105. }
  1106. }
  1107. return 0;
  1108. }
  1109. MSH_CMD_EXPORT(list_process, list process);
  1110. static void cmd_kill(int argc, char** argv)
  1111. {
  1112. int pid;
  1113. int sig = SIGKILL;
  1114. if (argc < 2)
  1115. {
  1116. rt_kprintf("kill pid or kill pid -s signal\n");
  1117. return;
  1118. }
  1119. pid = atoi(argv[1]);
  1120. if (argc >= 4)
  1121. {
  1122. if (argv[2][0] == '-' && argv[2][1] == 's')
  1123. {
  1124. sig = atoi(argv[3]);
  1125. }
  1126. }
  1127. lwp_pid_lock_take();
  1128. lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
  1129. lwp_pid_lock_release();
  1130. }
  1131. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  1132. static void cmd_killall(int argc, char** argv)
  1133. {
  1134. int pid;
  1135. if (argc < 2)
  1136. {
  1137. rt_kprintf("killall processes_name\n");
  1138. return;
  1139. }
  1140. while((pid = lwp_name2pid(argv[1])) > 0)
  1141. {
  1142. lwp_pid_lock_take();
  1143. lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
  1144. lwp_pid_lock_release();
  1145. rt_thread_mdelay(100);
  1146. }
  1147. }
  1148. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  1149. #endif
  1150. int lwp_check_exit_request(void)
  1151. {
  1152. rt_thread_t thread = rt_thread_self();
  1153. rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  1154. if (!thread->lwp)
  1155. {
  1156. return 0;
  1157. }
  1158. return atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1159. LWP_EXIT_REQUEST_IN_PROCESS);
  1160. }
  1161. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1162. static void _resr_cleanup(struct rt_lwp *lwp);
  1163. void lwp_terminate(struct rt_lwp *lwp)
  1164. {
  1165. if (!lwp)
  1166. {
  1167. /* kernel thread not support */
  1168. return;
  1169. }
  1170. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1171. LWP_LOCK(lwp);
  1172. if (!lwp->terminated)
  1173. {
  1174. /* stop the receiving of signals */
  1175. lwp->terminated = RT_TRUE;
  1176. LWP_UNLOCK(lwp);
  1177. _wait_sibling_exit(lwp, rt_thread_self());
  1178. _resr_cleanup(lwp);
  1179. }
  1180. else
  1181. {
  1182. LWP_UNLOCK(lwp);
  1183. }
  1184. }
  1185. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1186. {
  1187. rt_sched_lock_level_t slvl;
  1188. rt_list_t *list;
  1189. rt_thread_t thread;
  1190. rt_size_t expected = LWP_EXIT_REQUEST_NONE;
  1191. /* broadcast exit request for sibling threads */
  1192. LWP_LOCK(lwp);
  1193. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1194. {
  1195. thread = rt_list_entry(list, struct rt_thread, sibling);
  1196. atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1197. LWP_EXIT_REQUEST_TRIGGERED);
  1198. rt_sched_lock(&slvl);
  1199. /* dont release, otherwise thread may have been freed */
  1200. if (rt_sched_thread_is_suspended(thread))
  1201. {
  1202. thread->error = RT_EINTR;
  1203. rt_sched_unlock(slvl);
  1204. rt_thread_wakeup(thread);
  1205. }
  1206. else
  1207. {
  1208. rt_sched_unlock(slvl);
  1209. }
  1210. }
  1211. LWP_UNLOCK(lwp);
  1212. while (1)
  1213. {
  1214. int subthread_is_terminated;
  1215. LOG_D("%s: wait for subthread exiting", __func__);
  1216. /**
  1217. * Brief: wait for all *running* sibling threads to exit
  1218. *
  1219. * Note: Critical Section
  1220. * - sibling list of lwp (RW. It will clear all siblings finally)
  1221. */
  1222. LWP_LOCK(lwp);
  1223. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1224. if (!subthread_is_terminated)
  1225. {
  1226. rt_sched_lock_level_t slvl;
  1227. rt_thread_t sub_thread;
  1228. rt_list_t *list;
  1229. int all_subthread_in_init = 1;
  1230. /* check all subthread is in init state */
  1231. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1232. {
  1233. rt_sched_lock(&slvl);
  1234. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1235. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1236. {
  1237. rt_sched_unlock(slvl);
  1238. all_subthread_in_init = 0;
  1239. break;
  1240. }
  1241. else
  1242. {
  1243. rt_sched_unlock(slvl);
  1244. }
  1245. }
  1246. if (all_subthread_in_init)
  1247. {
  1248. /* delete all subthread */
  1249. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1250. {
  1251. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1252. rt_list_remove(&sub_thread->sibling);
  1253. /**
  1254. * Note: Critical Section
  1255. * - thread control block (RW. Since it will free the thread
  1256. * control block, it must ensure no one else can access
  1257. * thread any more)
  1258. */
  1259. lwp_tid_put(sub_thread->tid);
  1260. sub_thread->tid = 0;
  1261. rt_thread_delete(sub_thread);
  1262. }
  1263. subthread_is_terminated = 1;
  1264. }
  1265. }
  1266. LWP_UNLOCK(lwp);
  1267. if (subthread_is_terminated)
  1268. {
  1269. break;
  1270. }
  1271. rt_thread_mdelay(10);
  1272. }
  1273. }
  1274. static void _notify_parent(rt_lwp_t lwp)
  1275. {
  1276. int si_code;
  1277. int signo_or_exitcode;
  1278. lwp_siginfo_ext_t ext;
  1279. lwp_status_t lwp_status = lwp->lwp_status;
  1280. rt_lwp_t parent = lwp->parent;
  1281. if (WIFSIGNALED(lwp_status))
  1282. {
  1283. si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
  1284. signo_or_exitcode = WTERMSIG(lwp_status);
  1285. }
  1286. else
  1287. {
  1288. si_code = CLD_EXITED;
  1289. signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
  1290. }
  1291. lwp_waitpid_kick(parent, lwp);
  1292. ext = rt_malloc(sizeof(struct lwp_siginfo));
  1293. if (ext)
  1294. {
  1295. rt_thread_t cur_thr = rt_thread_self();
  1296. ext->sigchld.status = signo_or_exitcode;
  1297. ext->sigchld.stime = cur_thr->system_time;
  1298. ext->sigchld.utime = cur_thr->user_time;
  1299. }
  1300. lwp_signal_kill(parent, SIGCHLD, si_code, ext);
  1301. }
  1302. static void _resr_cleanup(struct rt_lwp *lwp)
  1303. {
  1304. lwp_jobctrl_on_exit(lwp);
  1305. LWP_LOCK(lwp);
  1306. lwp_signal_detach(&lwp->signal);
  1307. /**
  1308. * @brief Detach children from lwp
  1309. *
  1310. * @note Critical Section
  1311. * - the lwp (RW. Release lwp)
  1312. * - the pid resource manager (RW. Release the pid)
  1313. */
  1314. while (lwp->first_child)
  1315. {
  1316. struct rt_lwp *child;
  1317. child = lwp->first_child;
  1318. lwp->first_child = child->sibling;
  1319. /** @note safe since the slist node is release */
  1320. LWP_UNLOCK(lwp);
  1321. LWP_LOCK(child);
  1322. if (child->terminated)
  1323. {
  1324. lwp_pid_put(child);
  1325. }
  1326. else
  1327. {
  1328. child->sibling = RT_NULL;
  1329. /* info: this may cause an orphan lwp */
  1330. child->parent = RT_NULL;
  1331. }
  1332. LWP_UNLOCK(child);
  1333. lwp_ref_dec(child);
  1334. lwp_ref_dec(lwp);
  1335. LWP_LOCK(lwp);
  1336. }
  1337. LWP_UNLOCK(lwp);
  1338. /**
  1339. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1340. * will be sent to parent
  1341. *
  1342. * @note Critical Section
  1343. * - the parent lwp (RW.)
  1344. */
  1345. LWP_LOCK(lwp);
  1346. if (lwp->parent &&
  1347. !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
  1348. {
  1349. /* if successfully race to setup lwp->terminated before parent detach */
  1350. LWP_UNLOCK(lwp);
  1351. /**
  1352. * Note: children cannot detach itself and must wait for parent to take
  1353. * care of it
  1354. */
  1355. _notify_parent(lwp);
  1356. }
  1357. else
  1358. {
  1359. LWP_UNLOCK(lwp);
  1360. /**
  1361. * if process is orphan, it doesn't have parent to do the recycling.
  1362. * Otherwise, its parent had setup a flag to mask out recycling event
  1363. */
  1364. lwp_pid_put(lwp);
  1365. }
  1366. LWP_LOCK(lwp);
  1367. if (lwp->fdt.fds != RT_NULL)
  1368. {
  1369. struct dfs_file **fds;
  1370. /* auto clean fds */
  1371. __exit_files(lwp);
  1372. fds = lwp->fdt.fds;
  1373. lwp->fdt.fds = RT_NULL;
  1374. LWP_UNLOCK(lwp);
  1375. rt_free(fds);
  1376. }
  1377. else
  1378. {
  1379. LWP_UNLOCK(lwp);
  1380. }
  1381. }
  1382. static int _lwp_setaffinity(int tid, int cpu)
  1383. {
  1384. rt_thread_t thread;
  1385. int ret = -1;
  1386. thread = lwp_tid_get_thread_and_inc_ref(tid);
  1387. if (thread)
  1388. {
  1389. #ifdef RT_USING_SMP
  1390. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
  1391. #endif
  1392. ret = 0;
  1393. }
  1394. lwp_tid_dec_ref(thread);
  1395. return ret;
  1396. }
  1397. int lwp_setaffinity(int tid, int cpu)
  1398. {
  1399. int ret;
  1400. #ifdef RT_USING_SMP
  1401. if (cpu < 0 || cpu > RT_CPUS_NR)
  1402. {
  1403. cpu = RT_CPUS_NR;
  1404. }
  1405. #endif
  1406. ret = _lwp_setaffinity(tid, cpu);
  1407. return ret;
  1408. }
  1409. #ifdef RT_USING_SMP
  1410. static void cmd_cpu_bind(int argc, char** argv)
  1411. {
  1412. int pid;
  1413. int cpu;
  1414. if (argc < 3)
  1415. {
  1416. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1417. return;
  1418. }
  1419. pid = atoi(argv[1]);
  1420. cpu = atoi(argv[2]);
  1421. lwp_setaffinity((pid_t)pid, cpu);
  1422. }
  1423. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1424. #endif