lwp_pid.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * Add support for waitpid(options=WNOHANG)
  18. * 2023-11-16 xqyjlj Fix the case where pid is 0
  19. * 2023-11-17 xqyjlj add process group and session support
  20. * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
  21. * Reimplement the waitpid with a wait queue method, and fixup problem
  22. * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
  23. * process can be traced while waiter suspend
  24. * 2024-01-25 shell porting to new sched API
  25. */
  26. /* includes scheduler related API */
  27. #define __RT_IPC_SOURCE__
  28. /* for waitpid, we are compatible to GNU extension */
  29. #define _GNU_SOURCE
  30. #define DBG_TAG "lwp.pid"
  31. #define DBG_LVL DBG_INFO
  32. #include <rtdbg.h>
  33. #include "lwp_internal.h"
  34. #include <rthw.h>
  35. #include <rtthread.h>
  36. #include <dfs_file.h>
  37. #include <unistd.h>
  38. #include <stdio.h> /* rename() */
  39. #include <stdlib.h>
  40. #include <sys/stat.h>
  41. #include <sys/statfs.h> /* statfs() */
  42. #include <stdatomic.h>
  43. #ifdef ARCH_MM_MMU
  44. #include "lwp_user_mm.h"
  45. #endif
  46. #ifdef RT_USING_DFS_PROCFS
  47. #include "proc.h"
  48. #include "procfs.h"
  49. #endif
  50. #define PID_MAX 10000
  51. #define PID_CT_ASSERT(name, x) \
  52. struct assert_##name {char ary[2 * (x) - 1];}
  53. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  54. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  55. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  56. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  57. static int lwp_pid_ary_alloced = 0;
  58. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  59. static pid_t current_pid = 0;
  60. static struct rt_mutex pid_mtx;
  61. static struct rt_wqueue _pid_emptyq;
  62. int lwp_pid_init(void)
  63. {
  64. rt_wqueue_init(&_pid_emptyq);
  65. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  66. return 0;
  67. }
  68. int lwp_pid_wait_for_empty(int wait_flags, rt_tick_t to)
  69. {
  70. int error;
  71. if (wait_flags == RT_INTERRUPTIBLE)
  72. {
  73. error = rt_wqueue_wait_interruptible(&_pid_emptyq, 0, to);
  74. }
  75. else
  76. {
  77. error = rt_wqueue_wait_killable(&_pid_emptyq, 0, to);
  78. }
  79. return error;
  80. }
  81. void lwp_pid_lock_take(void)
  82. {
  83. LWP_DEF_RETURN_CODE(rc);
  84. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  85. /* should never failed */
  86. RT_ASSERT(rc == RT_EOK);
  87. RT_UNUSED(rc);
  88. }
  89. void lwp_pid_lock_release(void)
  90. {
  91. /* should never failed */
  92. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  93. RT_ASSERT(0);
  94. }
  95. struct pid_foreach_param
  96. {
  97. int (*cb)(pid_t pid, void *data);
  98. void *data;
  99. };
  100. static int _before_cb(struct lwp_avl_struct *node, void *data)
  101. {
  102. struct pid_foreach_param *param = data;
  103. pid_t pid = node->avl_key;
  104. return param->cb(pid, param->data);
  105. }
  106. int lwp_pid_for_each(int (*cb)(pid_t pid, void *data), void *data)
  107. {
  108. int error;
  109. struct pid_foreach_param buf =
  110. {
  111. .cb = cb,
  112. .data = data,
  113. };
  114. lwp_pid_lock_take();
  115. error = lwp_avl_traversal(lwp_pid_root, _before_cb, &buf);
  116. lwp_pid_lock_release();
  117. return error;
  118. }
  119. struct lwp_avl_struct *lwp_get_pid_ary(void)
  120. {
  121. return lwp_pid_ary;
  122. }
  123. static pid_t lwp_pid_get_locked(void)
  124. {
  125. struct lwp_avl_struct *p;
  126. pid_t pid = 0;
  127. p = lwp_pid_free_head;
  128. if (p)
  129. {
  130. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  131. }
  132. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  133. {
  134. p = lwp_pid_ary + lwp_pid_ary_alloced;
  135. lwp_pid_ary_alloced++;
  136. }
  137. if (p)
  138. {
  139. int found_noused = 0;
  140. RT_ASSERT(p->data == RT_NULL);
  141. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  142. {
  143. if (!lwp_avl_find(pid, lwp_pid_root))
  144. {
  145. found_noused = 1;
  146. break;
  147. }
  148. }
  149. if (!found_noused)
  150. {
  151. for (pid = 1; pid <= current_pid; pid++)
  152. {
  153. if (!lwp_avl_find(pid, lwp_pid_root))
  154. {
  155. found_noused = 1;
  156. break;
  157. }
  158. }
  159. }
  160. p->avl_key = pid;
  161. lwp_avl_insert(p, &lwp_pid_root);
  162. current_pid = pid;
  163. }
  164. return pid;
  165. }
  166. static void lwp_pid_put_locked(pid_t pid)
  167. {
  168. struct lwp_avl_struct *p;
  169. if (pid == 0)
  170. {
  171. return;
  172. }
  173. p = lwp_avl_find(pid, lwp_pid_root);
  174. if (p)
  175. {
  176. p->data = RT_NULL;
  177. lwp_avl_remove(p, &lwp_pid_root);
  178. p->avl_right = lwp_pid_free_head;
  179. lwp_pid_free_head = p;
  180. }
  181. }
  182. #ifdef RT_USING_DFS_PROCFS
  183. rt_inline void _free_proc_dentry(rt_lwp_t lwp)
  184. {
  185. char pid_str[64] = {0};
  186. rt_snprintf(pid_str, 64, "%d", lwp->pid);
  187. pid_str[63] = 0;
  188. proc_remove_dentry(pid_str, 0);
  189. }
  190. #else
  191. #define _free_proc_dentry(lwp)
  192. #endif
  193. void lwp_pid_put(struct rt_lwp *lwp)
  194. {
  195. _free_proc_dentry(lwp);
  196. lwp_pid_lock_take();
  197. lwp_pid_put_locked(lwp->pid);
  198. if (lwp_pid_root == AVL_EMPTY)
  199. {
  200. rt_wqueue_wakeup_all(&_pid_emptyq, RT_NULL);
  201. /* refuse any new pid allocation now */
  202. }
  203. else
  204. {
  205. lwp_pid_lock_release();
  206. }
  207. /* reset pid field */
  208. lwp->pid = 0;
  209. /* clear reference */
  210. lwp_ref_dec(lwp);
  211. }
  212. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  213. {
  214. struct lwp_avl_struct *p;
  215. p = lwp_avl_find(pid, lwp_pid_root);
  216. if (p)
  217. {
  218. p->data = lwp;
  219. lwp_ref_inc(lwp);
  220. #ifdef RT_USING_DFS_PROCFS
  221. if (pid)
  222. {
  223. proc_pid(pid);
  224. }
  225. #endif
  226. }
  227. }
  228. static void __exit_files(struct rt_lwp *lwp)
  229. {
  230. int fd = lwp->fdt.maxfd - 1;
  231. while (fd >= 0)
  232. {
  233. struct dfs_file *d;
  234. d = lwp->fdt.fds[fd];
  235. if (d)
  236. {
  237. dfs_file_close(d);
  238. fdt_fd_release(&lwp->fdt, fd);
  239. }
  240. fd--;
  241. }
  242. }
  243. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  244. {
  245. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  246. }
  247. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  248. {
  249. rt_mutex_detach(&lwp->object_mutex);
  250. }
  251. void lwp_user_object_lock(struct rt_lwp *lwp)
  252. {
  253. if (lwp)
  254. {
  255. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  256. }
  257. else
  258. {
  259. RT_ASSERT(0);
  260. }
  261. }
  262. void lwp_user_object_unlock(struct rt_lwp *lwp)
  263. {
  264. if (lwp)
  265. {
  266. rt_mutex_release(&lwp->object_mutex);
  267. }
  268. else
  269. {
  270. RT_ASSERT(0);
  271. }
  272. }
  273. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  274. {
  275. int ret = -1;
  276. if (lwp && object)
  277. {
  278. lwp_user_object_lock(lwp);
  279. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  280. {
  281. struct lwp_avl_struct *node;
  282. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  283. if (node)
  284. {
  285. rt_atomic_add(&object->lwp_ref_count, 1);
  286. node->avl_key = (avl_key_t)object;
  287. lwp_avl_insert(node, &lwp->object_root);
  288. ret = 0;
  289. }
  290. }
  291. lwp_user_object_unlock(lwp);
  292. }
  293. return ret;
  294. }
  295. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  296. {
  297. rt_err_t ret = -1;
  298. rt_object_t object;
  299. if (!lwp || !node)
  300. {
  301. return ret;
  302. }
  303. object = (rt_object_t)node->avl_key;
  304. object->lwp_ref_count--;
  305. if (object->lwp_ref_count == 0)
  306. {
  307. /* remove from kernel object list */
  308. switch (object->type)
  309. {
  310. case RT_Object_Class_Semaphore:
  311. ret = rt_sem_delete((rt_sem_t)object);
  312. break;
  313. case RT_Object_Class_Mutex:
  314. ret = rt_mutex_delete((rt_mutex_t)object);
  315. break;
  316. case RT_Object_Class_Event:
  317. ret = rt_event_delete((rt_event_t)object);
  318. break;
  319. case RT_Object_Class_MailBox:
  320. ret = rt_mb_delete((rt_mailbox_t)object);
  321. break;
  322. case RT_Object_Class_MessageQueue:
  323. ret = rt_mq_delete((rt_mq_t)object);
  324. break;
  325. case RT_Object_Class_Timer:
  326. ret = rt_timer_delete((rt_timer_t)object);
  327. break;
  328. case RT_Object_Class_Custom:
  329. ret = rt_custom_object_destroy(object);
  330. break;
  331. default:
  332. LOG_E("input object type(%d) error", object->type);
  333. break;
  334. }
  335. }
  336. else
  337. {
  338. ret = 0;
  339. }
  340. lwp_avl_remove(node, &lwp->object_root);
  341. rt_free(node);
  342. return ret;
  343. }
  344. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  345. {
  346. rt_err_t ret = -1;
  347. if (lwp && object)
  348. {
  349. struct lwp_avl_struct *node;
  350. lwp_user_object_lock(lwp);
  351. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  352. ret = _object_node_delete(lwp, node);
  353. lwp_user_object_unlock(lwp);
  354. }
  355. return ret;
  356. }
  357. void lwp_user_object_clear(struct rt_lwp *lwp)
  358. {
  359. struct lwp_avl_struct *node;
  360. lwp_user_object_lock(lwp);
  361. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  362. {
  363. _object_node_delete(lwp, node);
  364. }
  365. lwp_user_object_unlock(lwp);
  366. }
  367. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  368. {
  369. rt_object_t object;
  370. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  371. object = (rt_object_t)node->avl_key;
  372. lwp_user_object_add(dst_lwp, object);
  373. return 0;
  374. }
  375. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  376. {
  377. lwp_user_object_lock(src_lwp);
  378. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  379. lwp_user_object_unlock(src_lwp);
  380. }
  381. rt_lwp_t lwp_create(rt_base_t flags)
  382. {
  383. pid_t pid;
  384. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  385. if (new_lwp)
  386. {
  387. /* minimal setup of lwp object */
  388. new_lwp->ref = 1;
  389. #ifdef RT_USING_SMP
  390. new_lwp->bind_cpu = RT_CPUS_NR;
  391. #endif
  392. new_lwp->exe_file = RT_NULL;
  393. rt_list_init(&new_lwp->t_grp);
  394. rt_list_init(&new_lwp->pgrp_node);
  395. rt_list_init(&new_lwp->timer);
  396. lwp_user_object_lock_init(new_lwp);
  397. rt_wqueue_init(&new_lwp->wait_queue);
  398. rt_wqueue_init(&new_lwp->waitpid_waiters);
  399. lwp_signal_init(&new_lwp->signal);
  400. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  401. if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
  402. new_lwp->did_exec = RT_TRUE;
  403. /* lwp with pid */
  404. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  405. {
  406. lwp_pid_lock_take();
  407. pid = lwp_pid_get_locked();
  408. if (pid == 0)
  409. {
  410. lwp_user_object_lock_destroy(new_lwp);
  411. rt_free(new_lwp);
  412. new_lwp = RT_NULL;
  413. LOG_E("%s: pid slot fulled", __func__);
  414. }
  415. else
  416. {
  417. new_lwp->pid = pid;
  418. lwp_pid_set_lwp_locked(pid, new_lwp);
  419. }
  420. lwp_pid_lock_release();
  421. }
  422. rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
  423. if (flags & LWP_CREATE_FLAG_INIT_USPACE)
  424. {
  425. rt_err_t error = lwp_user_space_init(new_lwp, 0);
  426. if (error)
  427. {
  428. lwp_pid_put(new_lwp);
  429. lwp_user_object_lock_destroy(new_lwp);
  430. rt_free(new_lwp);
  431. new_lwp = RT_NULL;
  432. LOG_E("%s: failed to initialize user space", __func__);
  433. }
  434. }
  435. }
  436. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  437. return new_lwp;
  438. }
  439. /** when reference is 0, a lwp can be released */
  440. void lwp_free(struct rt_lwp* lwp)
  441. {
  442. rt_processgroup_t group = RT_NULL;
  443. if (lwp == RT_NULL)
  444. {
  445. return;
  446. }
  447. /**
  448. * Brief: Recycle the lwp when reference is cleared
  449. *
  450. * Note: Critical Section
  451. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  452. * all the reference is clear)
  453. */
  454. LOG_D("lwp free: %p", lwp);
  455. rt_free(lwp->exe_file);
  456. group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
  457. if (group)
  458. lwp_pgrp_remove(group, lwp);
  459. LWP_LOCK(lwp);
  460. if (lwp->args != RT_NULL)
  461. {
  462. #ifndef ARCH_MM_MMU
  463. lwp->args_length = RT_NULL;
  464. #ifndef ARCH_MM_MPU
  465. rt_free(lwp->args);
  466. #endif /* not defined ARCH_MM_MPU */
  467. #endif /* ARCH_MM_MMU */
  468. lwp->args = RT_NULL;
  469. }
  470. lwp_user_object_clear(lwp);
  471. lwp_user_object_lock_destroy(lwp);
  472. /* free data section */
  473. if (lwp->data_entry != RT_NULL)
  474. {
  475. #ifdef ARCH_MM_MMU
  476. rt_free_align(lwp->data_entry);
  477. #else
  478. #ifdef ARCH_MM_MPU
  479. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  480. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  481. #else
  482. rt_free_align(lwp->data_entry);
  483. #endif /* ARCH_MM_MPU */
  484. #endif /* ARCH_MM_MMU */
  485. lwp->data_entry = RT_NULL;
  486. }
  487. /* free text section */
  488. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  489. {
  490. if (lwp->text_entry)
  491. {
  492. LOG_D("lwp text free: %p", lwp->text_entry);
  493. #ifndef ARCH_MM_MMU
  494. rt_free((void*)lwp->text_entry);
  495. #endif /* not defined ARCH_MM_MMU */
  496. lwp->text_entry = RT_NULL;
  497. }
  498. }
  499. #ifdef ARCH_MM_MMU
  500. lwp_unmap_user_space(lwp);
  501. #endif
  502. timer_list_free(&lwp->timer);
  503. LWP_UNLOCK(lwp);
  504. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  505. rt_mutex_detach(&lwp->lwp_lock);
  506. /**
  507. * pid must have release before enter lwp_free()
  508. * otherwise this is a data racing
  509. */
  510. RT_ASSERT(lwp->pid == 0);
  511. rt_free(lwp);
  512. }
  513. rt_inline rt_noreturn
  514. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  515. {
  516. LWP_LOCK(lwp);
  517. lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
  518. lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  519. lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
  520. lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  521. rt_list_remove(&thread->sibling);
  522. LWP_UNLOCK(lwp);
  523. lwp_futex_exit_robust_list(thread);
  524. /**
  525. * Note: the tid tree always hold a reference to thread, hence the tid must
  526. * be release before cleanup of thread
  527. */
  528. lwp_tid_put(thread->tid);
  529. thread->tid = 0;
  530. rt_thread_delete(thread);
  531. rt_schedule();
  532. while (1) ;
  533. }
  534. rt_inline void _clear_child_tid(rt_thread_t thread)
  535. {
  536. if (thread->clear_child_tid)
  537. {
  538. int t = 0;
  539. int *clear_child_tid = thread->clear_child_tid;
  540. thread->clear_child_tid = RT_NULL;
  541. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  542. sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
  543. }
  544. }
  545. void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
  546. {
  547. rt_thread_t thread;
  548. if (!lwp)
  549. {
  550. LOG_W("%s: lwp should not be null", __func__);
  551. return ;
  552. }
  553. thread = rt_thread_self();
  554. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  555. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  556. #ifdef ARCH_MM_MMU
  557. _clear_child_tid(thread);
  558. LWP_LOCK(lwp);
  559. /**
  560. * Brief: only one thread should calls exit_group(),
  561. * but we can not ensured that during run-time
  562. */
  563. lwp->lwp_status = status;
  564. LWP_UNLOCK(lwp);
  565. lwp_terminate(lwp);
  566. #else
  567. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  568. if (main_thread == tid)
  569. {
  570. rt_thread_t sub_thread;
  571. rt_list_t *list;
  572. lwp_terminate(lwp);
  573. /* delete all subthread */
  574. while ((list = tid->sibling.prev) != &lwp->t_grp)
  575. {
  576. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  577. rt_list_remove(&sub_thread->sibling);
  578. rt_thread_delete(sub_thread);
  579. }
  580. lwp->lwp_ret = value;
  581. }
  582. #endif /* ARCH_MM_MMU */
  583. _thread_exit(lwp, thread);
  584. }
  585. void lwp_thread_exit(rt_thread_t thread, int status)
  586. {
  587. rt_thread_t header_thr;
  588. struct rt_lwp *lwp;
  589. LOG_D("%s", __func__);
  590. RT_ASSERT(thread == rt_thread_self());
  591. lwp = (struct rt_lwp *)thread->lwp;
  592. RT_ASSERT(lwp != RT_NULL);
  593. #ifdef ARCH_MM_MMU
  594. _clear_child_tid(thread);
  595. LWP_LOCK(lwp);
  596. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  597. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  598. {
  599. /**
  600. * if thread exit, treated as process exit normally.
  601. * This is reasonable since trap event is exited through lwp_exit()
  602. */
  603. lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
  604. LWP_UNLOCK(lwp);
  605. lwp_terminate(lwp);
  606. }
  607. else
  608. {
  609. LWP_UNLOCK(lwp);
  610. }
  611. #endif /* ARCH_MM_MMU */
  612. _thread_exit(lwp, thread);
  613. }
  614. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  615. int lwp_ref_inc(struct rt_lwp *lwp)
  616. {
  617. int ref;
  618. ref = rt_atomic_add(&lwp->ref, 1);
  619. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  620. return ref;
  621. }
  622. int lwp_ref_dec(struct rt_lwp *lwp)
  623. {
  624. int ref;
  625. ref = rt_atomic_add(&lwp->ref, -1);
  626. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  627. if (ref == 1)
  628. {
  629. struct rt_channel_msg msg;
  630. if (lwp->debug)
  631. {
  632. memset(&msg, 0, sizeof msg);
  633. rt_raw_channel_send(gdb_server_channel(), &msg);
  634. }
  635. #ifndef ARCH_MM_MMU
  636. #ifdef RT_LWP_USING_SHM
  637. lwp_shm_lwp_free(lwp);
  638. #endif /* RT_LWP_USING_SHM */
  639. #endif /* not defined ARCH_MM_MMU */
  640. lwp_free(lwp);
  641. }
  642. else
  643. {
  644. /* reference must be a positive integer */
  645. RT_ASSERT(ref > 1);
  646. }
  647. return ref;
  648. }
  649. struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
  650. {
  651. struct lwp_avl_struct *p;
  652. struct rt_lwp *lwp = RT_NULL;
  653. p = lwp_avl_find(pid, lwp_pid_root);
  654. if (p)
  655. {
  656. lwp = (struct rt_lwp *)p->data;
  657. }
  658. return lwp;
  659. }
  660. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  661. {
  662. struct rt_lwp* lwp;
  663. lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
  664. return lwp;
  665. }
  666. pid_t lwp_to_pid(struct rt_lwp* lwp)
  667. {
  668. if (!lwp)
  669. {
  670. return 0;
  671. }
  672. return lwp->pid;
  673. }
  674. char* lwp_pid2name(int32_t pid)
  675. {
  676. struct rt_lwp *lwp;
  677. char* process_name = RT_NULL;
  678. lwp_pid_lock_take();
  679. lwp = lwp_from_pid_locked(pid);
  680. if (lwp)
  681. {
  682. process_name = strrchr(lwp->cmd, '/');
  683. process_name = process_name? process_name + 1: lwp->cmd;
  684. }
  685. lwp_pid_lock_release();
  686. return process_name;
  687. }
  688. pid_t lwp_name2pid(const char *name)
  689. {
  690. int idx;
  691. pid_t pid = 0;
  692. rt_thread_t main_thread;
  693. char* process_name = RT_NULL;
  694. rt_sched_lock_level_t slvl;
  695. lwp_pid_lock_take();
  696. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  697. {
  698. /* 0 is reserved */
  699. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  700. if (lwp)
  701. {
  702. process_name = strrchr(lwp->exe_file, '/');
  703. process_name = process_name? process_name + 1: lwp->cmd;
  704. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  705. {
  706. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  707. rt_sched_lock(&slvl);
  708. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  709. {
  710. pid = lwp->pid;
  711. }
  712. rt_sched_unlock(slvl);
  713. }
  714. }
  715. }
  716. lwp_pid_lock_release();
  717. return pid;
  718. }
  719. int lwp_getpid(void)
  720. {
  721. rt_lwp_t lwp = lwp_self();
  722. return lwp ? lwp->pid : 1;
  723. // return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  724. }
  725. rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
  726. {
  727. struct rusage rt_rusage;
  728. if (uru != RT_NULL)
  729. {
  730. rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
  731. rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
  732. rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
  733. rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
  734. lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
  735. }
  736. }
  737. /* do statistical summary and reap the child if neccessary */
  738. static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
  739. struct rt_lwp *self_lwp, int *ustatus,
  740. int options, struct rusage *uru)
  741. {
  742. int lwp_stat = child->lwp_status;
  743. /* report statistical data to process */
  744. _update_ru(child, self_lwp, uru);
  745. if (child->terminated && !(options & WNOWAIT))
  746. {
  747. /** Reap the child process if it's exited */
  748. LOG_D("func %s: child detached", __func__);
  749. lwp_pid_put(child);
  750. lwp_children_unregister(self_lwp, child);
  751. }
  752. if (ustatus)
  753. lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
  754. return RT_EOK;
  755. }
  756. #define HAS_CHILD_BUT_NO_EVT (-1024)
  757. /* check if the process is already terminate */
  758. static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
  759. int options, int *status)
  760. {
  761. sysret_t rc;
  762. LWP_LOCK(child);
  763. if (child->terminated)
  764. {
  765. rc = child->pid;
  766. }
  767. else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
  768. {
  769. child->wait_reap_stp = 1;
  770. rc = child->pid;
  771. }
  772. else
  773. {
  774. rc = HAS_CHILD_BUT_NO_EVT;
  775. }
  776. LWP_UNLOCK(child);
  777. LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
  778. return rc;
  779. }
  780. /* verify if the process is child, and reap it */
  781. static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  782. pid_t wait_pid, int options, int *ustatus,
  783. struct rusage *uru)
  784. {
  785. sysret_t rc;
  786. struct rt_lwp *child;
  787. /* check if pid is reference to a valid child */
  788. lwp_pid_lock_take();
  789. child = lwp_from_pid_locked(wait_pid);
  790. if (!child)
  791. rc = -EINVAL;
  792. else if (child->parent != self_lwp)
  793. rc = -ESRCH;
  794. else
  795. rc = wait_pid;
  796. lwp_pid_lock_release();
  797. if (rc > 0)
  798. {
  799. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  800. if (rc > 0)
  801. {
  802. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  803. }
  804. }
  805. return rc;
  806. }
  807. /* try to reap any child */
  808. static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
  809. int options, int *ustatus, struct rusage *uru)
  810. {
  811. sysret_t rc = -ECHILD;
  812. struct rt_lwp *child;
  813. LWP_LOCK(self_lwp);
  814. child = self_lwp->first_child;
  815. /* find a exited child if any */
  816. while (child)
  817. {
  818. if (pair_pgid && child->pgid != pair_pgid)
  819. continue;
  820. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  821. if (rc > 0)
  822. break;
  823. child = child->sibling;
  824. }
  825. LWP_UNLOCK(self_lwp);
  826. if (rc > 0)
  827. {
  828. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  829. }
  830. return rc;
  831. }
  832. rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
  833. {
  834. /* waker provide the message mainly through its lwp_status */
  835. rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
  836. return RT_EOK;
  837. }
  838. struct waitpid_handle {
  839. struct rt_wqueue_node wq_node;
  840. int options;
  841. rt_lwp_t waker_lwp;
  842. };
  843. /* the IPC message is setup and notify the parent */
  844. static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
  845. {
  846. int can_accept_evt = 0;
  847. rt_thread_t waiter = wait_node->polling_thread;
  848. pid_t destiny = (pid_t)wait_node->key;
  849. rt_lwp_t waker_lwp = key;
  850. struct waitpid_handle *handle;
  851. rt_ubase_t options;
  852. handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
  853. RT_ASSERT(waiter != RT_NULL);
  854. options = handle->options;
  855. /* filter out if waker is not the one */
  856. if (destiny > 0)
  857. {
  858. /**
  859. * in waitpid immediately return routine, we already do the check
  860. * that pid is one of the child process of waiting thread
  861. */
  862. can_accept_evt = waker_lwp->pid == destiny;
  863. }
  864. else if (destiny == -1)
  865. {
  866. can_accept_evt = waker_lwp->parent == waiter->lwp;
  867. }
  868. else
  869. {
  870. /* destiny == 0 || destiny == -pgid */
  871. pid_t waiter_pgid;
  872. if (destiny == 0)
  873. {
  874. waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
  875. }
  876. else
  877. {
  878. waiter_pgid = -destiny;
  879. }
  880. can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
  881. }
  882. /* filter out if event is not desired */
  883. if (can_accept_evt)
  884. {
  885. if ((options & WEXITED) && waker_lwp->terminated)
  886. can_accept_evt = 1;
  887. else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
  888. can_accept_evt = 1;
  889. else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
  890. can_accept_evt = 1;
  891. else
  892. can_accept_evt = 0;
  893. }
  894. /* setup message for waiter if accepted */
  895. if (can_accept_evt)
  896. handle->waker_lwp = waker_lwp;
  897. /* 0 if event is accepted, otherwise discard */
  898. return !can_accept_evt;
  899. }
  900. /* the waiter cleanup IPC message and wait for desired event here */
  901. static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  902. struct waitpid_handle *handle, pid_t destiny)
  903. {
  904. rt_err_t ret;
  905. /* current context checking */
  906. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  907. handle->wq_node.polling_thread = cur_thr;
  908. handle->wq_node.key = destiny;
  909. handle->wq_node.wakeup = _waitq_filter;
  910. handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
  911. rt_list_init(&handle->wq_node.list);
  912. cur_thr->error = RT_EOK;
  913. LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
  914. rt_enter_critical();
  915. ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  916. if (ret == RT_EOK)
  917. {
  918. rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
  919. rt_exit_critical();
  920. rt_schedule();
  921. ret = cur_thr->error;
  922. /**
  923. * cur_thr error is a positive value, but some legacy implementation
  924. * use a negative one. So we check to avoid errors
  925. */
  926. ret = ret > 0 ? -ret : ret;
  927. /**
  928. * we dont rely on this actually, but we cleanup it since wakeup API
  929. * set this up durint operation, and this will cause some messy condition
  930. */
  931. handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
  932. rt_wqueue_remove(&handle->wq_node);
  933. }
  934. else
  935. {
  936. /* failed to suspend, return immediately with failure */
  937. rt_exit_critical();
  938. }
  939. return ret;
  940. }
  941. /* wait for IPC event and do the cleanup if neccessary */
  942. static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
  943. int options, int *ustatus, struct rusage *uru)
  944. {
  945. sysret_t rc;
  946. struct waitpid_handle handle;
  947. rt_lwp_t waker;
  948. /* wait for SIGCHLD or other async events */
  949. handle.options = options;
  950. handle.waker_lwp = 0;
  951. rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
  952. waker = handle.waker_lwp;
  953. if (waker != RT_NULL)
  954. {
  955. rc = waker->pid;
  956. /* check out if any process exited */
  957. LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
  958. _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
  959. }
  960. /**
  961. * else if (rc != RT_EOK)
  962. * unable to do a suspend, or wakeup unexpectedly
  963. * -> then returned a failure
  964. */
  965. return rc;
  966. }
  967. pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
  968. {
  969. pid_t rc = -1;
  970. struct rt_thread *cur_thr;
  971. struct rt_lwp *self_lwp;
  972. cur_thr = rt_thread_self();
  973. self_lwp = lwp_self();
  974. if (!cur_thr || !self_lwp)
  975. {
  976. rc = -EINVAL;
  977. }
  978. else
  979. {
  980. /* check if able to reap desired child immediately */
  981. if (pid > 0)
  982. {
  983. /* if pid is child then try to reap it */
  984. rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  985. }
  986. else if (pid == -1)
  987. {
  988. /* any terminated child */
  989. rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
  990. }
  991. else
  992. {
  993. /**
  994. * (pid < -1 || pid == 0)
  995. * any terminated child with matched pgid
  996. */
  997. pid_t pair_pgid;
  998. if (pid == 0)
  999. {
  1000. pair_pgid = lwp_pgid_get_byprocess(self_lwp);
  1001. }
  1002. else
  1003. {
  1004. pair_pgid = -pid;
  1005. }
  1006. rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
  1007. }
  1008. if (rc == HAS_CHILD_BUT_NO_EVT)
  1009. {
  1010. if (!(options & WNOHANG))
  1011. {
  1012. /* otherwise, arrange a suspend and wait for async event */
  1013. options |= WEXITED;
  1014. rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  1015. }
  1016. else
  1017. {
  1018. /**
  1019. * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
  1020. * it has at least one child process specified by pid for which
  1021. * status is not available, and status is not available for any
  1022. * process specified by pid, 0 is returned
  1023. */
  1024. rc = 0;
  1025. }
  1026. }
  1027. else
  1028. {
  1029. RT_ASSERT(rc != 0);
  1030. }
  1031. }
  1032. LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
  1033. return rc;
  1034. }
  1035. pid_t waitpid(pid_t pid, int *status, int options)
  1036. {
  1037. return lwp_waitpid(pid, status, options, RT_NULL);
  1038. }
  1039. #ifdef RT_USING_FINSH
  1040. /* copy from components/finsh/cmd.c */
  1041. static void object_split(int len)
  1042. {
  1043. while (len--)
  1044. {
  1045. rt_kprintf("-");
  1046. }
  1047. }
  1048. static void print_thread_info(struct rt_thread* thread, int maxlen)
  1049. {
  1050. rt_uint8_t *ptr;
  1051. rt_uint8_t stat;
  1052. #ifdef RT_USING_SMP
  1053. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  1054. rt_kprintf("%3d %3d ", RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  1055. else
  1056. rt_kprintf("N/A %3d ", RT_SCHED_PRIV(thread).current_priority);
  1057. #else
  1058. rt_kprintf("%3d ", RT_SCHED_PRIV(thread).current_priority);
  1059. #endif /*RT_USING_SMP*/
  1060. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  1061. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  1062. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  1063. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  1064. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  1065. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  1066. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  1067. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  1068. while (*ptr == '#')ptr--;
  1069. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1070. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  1071. thread->stack_size,
  1072. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  1073. thread->remaining_tick,
  1074. thread->error);
  1075. #else
  1076. ptr = (rt_uint8_t *)thread->stack_addr;
  1077. while (*ptr == '#')ptr++;
  1078. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d",
  1079. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  1080. thread->stack_size,
  1081. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  1082. / thread->stack_size,
  1083. RT_SCHED_PRIV(thread).remaining_tick,
  1084. thread->error);
  1085. #endif
  1086. rt_kprintf(" %-.*s\n",rt_strlen(thread->parent.name), thread->parent.name);
  1087. }
  1088. long list_process(void)
  1089. {
  1090. int index;
  1091. int maxlen;
  1092. rt_ubase_t level;
  1093. struct rt_thread *thread;
  1094. struct rt_list_node *node, *list;
  1095. const char *item_title = "thread";
  1096. int count = 0;
  1097. struct rt_thread **threads;
  1098. maxlen = RT_NAME_MAX;
  1099. #ifdef RT_USING_SMP
  1100. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error %-*.s\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1101. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1102. rt_kprintf( "--- --- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1103. #else
  1104. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", 4, "TID", maxlen, item_title, maxlen, "cmd");
  1105. object_split(4);rt_kprintf(" ");object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1106. rt_kprintf( "--- ------- ---------- ---------- -------- ---------- -----");rt_kprintf(" ");object_split(maxlen);rt_kprintf("\n");
  1107. #endif /*RT_USING_SMP*/
  1108. count = rt_object_get_length(RT_Object_Class_Thread);
  1109. if (count > 0)
  1110. {
  1111. /* get thread pointers */
  1112. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  1113. if (threads)
  1114. {
  1115. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  1116. if (index > 0)
  1117. {
  1118. for (index = 0; index <count; index++)
  1119. {
  1120. struct rt_thread th;
  1121. thread = threads[index];
  1122. level = rt_spin_lock_irqsave(&thread->spinlock);
  1123. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  1124. {
  1125. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1126. continue;
  1127. }
  1128. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  1129. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1130. if (th.lwp == RT_NULL)
  1131. {
  1132. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  1133. print_thread_info(&th, maxlen);
  1134. }
  1135. }
  1136. }
  1137. rt_free(threads);
  1138. }
  1139. }
  1140. for (index = 0; index < RT_LWP_MAX_NR; index++)
  1141. {
  1142. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  1143. if (lwp)
  1144. {
  1145. list = &lwp->t_grp;
  1146. for (node = list->next; node != list; node = node->next)
  1147. {
  1148. thread = rt_list_entry(node, struct rt_thread, sibling);
  1149. rt_kprintf("%4d %4d %-*.*s ", lwp_to_pid(lwp), thread->tid, maxlen, RT_NAME_MAX, lwp->cmd);
  1150. print_thread_info(thread, maxlen);
  1151. }
  1152. }
  1153. }
  1154. return 0;
  1155. }
  1156. MSH_CMD_EXPORT(list_process, list process);
  1157. static void cmd_kill(int argc, char** argv)
  1158. {
  1159. int pid;
  1160. int sig = SIGKILL;
  1161. if (argc < 2)
  1162. {
  1163. rt_kprintf("kill pid or kill pid -s signal\n");
  1164. return;
  1165. }
  1166. pid = atoi(argv[1]);
  1167. if (argc >= 4)
  1168. {
  1169. if (argv[2][0] == '-' && argv[2][1] == 's')
  1170. {
  1171. sig = atoi(argv[3]);
  1172. }
  1173. }
  1174. lwp_pid_lock_take();
  1175. lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
  1176. lwp_pid_lock_release();
  1177. }
  1178. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  1179. static void cmd_killall(int argc, char** argv)
  1180. {
  1181. int pid;
  1182. if (argc < 2)
  1183. {
  1184. rt_kprintf("killall processes_name\n");
  1185. return;
  1186. }
  1187. while((pid = lwp_name2pid(argv[1])) > 0)
  1188. {
  1189. lwp_pid_lock_take();
  1190. lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
  1191. lwp_pid_lock_release();
  1192. rt_thread_mdelay(100);
  1193. }
  1194. }
  1195. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  1196. #endif
  1197. int lwp_check_exit_request(void)
  1198. {
  1199. rt_thread_t thread = rt_thread_self();
  1200. rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  1201. if (!thread->lwp)
  1202. {
  1203. return 0;
  1204. }
  1205. return atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1206. LWP_EXIT_REQUEST_IN_PROCESS);
  1207. }
  1208. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1209. static void _resr_cleanup(struct rt_lwp *lwp);
  1210. void lwp_terminate(struct rt_lwp *lwp)
  1211. {
  1212. if (!lwp)
  1213. {
  1214. /* kernel thread not support */
  1215. return;
  1216. }
  1217. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1218. LWP_LOCK(lwp);
  1219. if (!lwp->terminated)
  1220. {
  1221. /* stop the receiving of signals */
  1222. lwp->terminated = RT_TRUE;
  1223. LWP_UNLOCK(lwp);
  1224. _wait_sibling_exit(lwp, rt_thread_self());
  1225. _resr_cleanup(lwp);
  1226. }
  1227. else
  1228. {
  1229. LWP_UNLOCK(lwp);
  1230. }
  1231. }
  1232. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1233. {
  1234. rt_sched_lock_level_t slvl;
  1235. rt_list_t *list;
  1236. rt_thread_t thread;
  1237. rt_size_t expected = LWP_EXIT_REQUEST_NONE;
  1238. /* broadcast exit request for sibling threads */
  1239. LWP_LOCK(lwp);
  1240. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1241. {
  1242. thread = rt_list_entry(list, struct rt_thread, sibling);
  1243. atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1244. LWP_EXIT_REQUEST_TRIGGERED);
  1245. rt_sched_lock(&slvl);
  1246. /* dont release, otherwise thread may have been freed */
  1247. if (rt_sched_thread_is_suspended(thread))
  1248. {
  1249. thread->error = RT_EINTR;
  1250. rt_sched_unlock(slvl);
  1251. rt_thread_wakeup(thread);
  1252. }
  1253. else
  1254. {
  1255. rt_sched_unlock(slvl);
  1256. }
  1257. }
  1258. LWP_UNLOCK(lwp);
  1259. while (1)
  1260. {
  1261. int subthread_is_terminated;
  1262. LOG_D("%s: wait for subthread exiting", __func__);
  1263. /**
  1264. * Brief: wait for all *running* sibling threads to exit
  1265. *
  1266. * Note: Critical Section
  1267. * - sibling list of lwp (RW. It will clear all siblings finally)
  1268. */
  1269. LWP_LOCK(lwp);
  1270. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1271. if (!subthread_is_terminated)
  1272. {
  1273. rt_sched_lock_level_t slvl;
  1274. rt_thread_t sub_thread;
  1275. rt_list_t *list;
  1276. int all_subthread_in_init = 1;
  1277. /* check all subthread is in init state */
  1278. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1279. {
  1280. rt_sched_lock(&slvl);
  1281. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1282. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1283. {
  1284. rt_sched_unlock(slvl);
  1285. all_subthread_in_init = 0;
  1286. break;
  1287. }
  1288. else
  1289. {
  1290. rt_sched_unlock(slvl);
  1291. }
  1292. }
  1293. if (all_subthread_in_init)
  1294. {
  1295. /* delete all subthread */
  1296. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1297. {
  1298. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1299. rt_list_remove(&sub_thread->sibling);
  1300. /**
  1301. * Note: Critical Section
  1302. * - thread control block (RW. Since it will free the thread
  1303. * control block, it must ensure no one else can access
  1304. * thread any more)
  1305. */
  1306. lwp_tid_put(sub_thread->tid);
  1307. sub_thread->tid = 0;
  1308. rt_thread_delete(sub_thread);
  1309. }
  1310. subthread_is_terminated = 1;
  1311. }
  1312. }
  1313. LWP_UNLOCK(lwp);
  1314. if (subthread_is_terminated)
  1315. {
  1316. break;
  1317. }
  1318. rt_thread_mdelay(10);
  1319. }
  1320. }
  1321. static void _notify_parent(rt_lwp_t lwp)
  1322. {
  1323. int si_code;
  1324. int signo_or_exitcode;
  1325. lwp_siginfo_ext_t ext;
  1326. lwp_status_t lwp_status = lwp->lwp_status;
  1327. rt_lwp_t parent = lwp->parent;
  1328. if (WIFSIGNALED(lwp_status))
  1329. {
  1330. si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
  1331. signo_or_exitcode = WTERMSIG(lwp_status);
  1332. }
  1333. else
  1334. {
  1335. si_code = CLD_EXITED;
  1336. signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
  1337. }
  1338. lwp_waitpid_kick(parent, lwp);
  1339. ext = rt_malloc(sizeof(struct lwp_siginfo));
  1340. if (ext)
  1341. {
  1342. rt_thread_t cur_thr = rt_thread_self();
  1343. ext->sigchld.status = signo_or_exitcode;
  1344. ext->sigchld.stime = cur_thr->system_time;
  1345. ext->sigchld.utime = cur_thr->user_time;
  1346. }
  1347. lwp_signal_kill(parent, SIGCHLD, si_code, ext);
  1348. }
  1349. static void _resr_cleanup(struct rt_lwp *lwp)
  1350. {
  1351. int need_cleanup_pid = RT_FALSE;
  1352. lwp_jobctrl_on_exit(lwp);
  1353. LWP_LOCK(lwp);
  1354. lwp_signal_detach(&lwp->signal);
  1355. /**
  1356. * @brief Detach children from lwp
  1357. *
  1358. * @note Critical Section
  1359. * - the lwp (RW. Release lwp)
  1360. * - the pid resource manager (RW. Release the pid)
  1361. */
  1362. while (lwp->first_child)
  1363. {
  1364. struct rt_lwp *child;
  1365. child = lwp->first_child;
  1366. lwp->first_child = child->sibling;
  1367. /** @note safe since the slist node is release */
  1368. LWP_UNLOCK(lwp);
  1369. LWP_LOCK(child);
  1370. if (child->terminated)
  1371. {
  1372. lwp_pid_put(child);
  1373. }
  1374. else
  1375. {
  1376. child->sibling = RT_NULL;
  1377. /* info: this may cause an orphan lwp */
  1378. child->parent = RT_NULL;
  1379. }
  1380. LWP_UNLOCK(child);
  1381. lwp_ref_dec(child);
  1382. lwp_ref_dec(lwp);
  1383. LWP_LOCK(lwp);
  1384. }
  1385. LWP_UNLOCK(lwp);
  1386. /**
  1387. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1388. * will be sent to parent
  1389. *
  1390. * @note Critical Section
  1391. * - the parent lwp (RW.)
  1392. */
  1393. LWP_LOCK(lwp);
  1394. if (lwp->parent &&
  1395. !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
  1396. {
  1397. /* if successfully race to setup lwp->terminated before parent detach */
  1398. LWP_UNLOCK(lwp);
  1399. /**
  1400. * Note: children cannot detach itself and must wait for parent to take
  1401. * care of it
  1402. */
  1403. _notify_parent(lwp);
  1404. }
  1405. else
  1406. {
  1407. LWP_UNLOCK(lwp);
  1408. /**
  1409. * if process is orphan, it doesn't have parent to do the recycling.
  1410. * Otherwise, its parent had setup a flag to mask out recycling event
  1411. */
  1412. need_cleanup_pid = RT_TRUE;
  1413. }
  1414. LWP_LOCK(lwp);
  1415. if (lwp->fdt.fds != RT_NULL)
  1416. {
  1417. struct dfs_file **fds;
  1418. /* auto clean fds */
  1419. __exit_files(lwp);
  1420. fds = lwp->fdt.fds;
  1421. lwp->fdt.fds = RT_NULL;
  1422. LWP_UNLOCK(lwp);
  1423. rt_free(fds);
  1424. }
  1425. else
  1426. {
  1427. LWP_UNLOCK(lwp);
  1428. }
  1429. if (need_cleanup_pid)
  1430. {
  1431. lwp_pid_put(lwp);
  1432. }
  1433. }
  1434. static int _lwp_setaffinity(int tid, int cpu)
  1435. {
  1436. rt_thread_t thread;
  1437. int ret = -1;
  1438. thread = lwp_tid_get_thread_and_inc_ref(tid);
  1439. if (thread)
  1440. {
  1441. #ifdef RT_USING_SMP
  1442. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
  1443. #endif
  1444. ret = 0;
  1445. }
  1446. lwp_tid_dec_ref(thread);
  1447. return ret;
  1448. }
  1449. int lwp_setaffinity(int tid, int cpu)
  1450. {
  1451. int ret;
  1452. #ifdef RT_USING_SMP
  1453. if (cpu < 0 || cpu > RT_CPUS_NR)
  1454. {
  1455. cpu = RT_CPUS_NR;
  1456. }
  1457. #endif
  1458. ret = _lwp_setaffinity(tid, cpu);
  1459. return ret;
  1460. }
  1461. #ifdef RT_USING_SMP
  1462. static void cmd_cpu_bind(int argc, char** argv)
  1463. {
  1464. int pid;
  1465. int cpu;
  1466. if (argc < 3)
  1467. {
  1468. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1469. return;
  1470. }
  1471. pid = atoi(argv[1]);
  1472. cpu = atoi(argv[2]);
  1473. lwp_setaffinity((pid_t)pid, cpu);
  1474. }
  1475. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1476. #endif