lwp_pid.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. */
  16. #include <rthw.h>
  17. #include <rtthread.h>
  18. #define DBG_TAG "lwp.pid"
  19. #define DBG_LVL DBG_INFO
  20. #include <rtdbg.h>
  21. #include <dfs_file.h>
  22. #include <unistd.h>
  23. #include <stdio.h> /* rename() */
  24. #include <sys/stat.h>
  25. #include <sys/statfs.h> /* statfs() */
  26. #include "lwp_internal.h"
  27. #include "tty.h"
  28. #ifdef ARCH_MM_MMU
  29. #include "lwp_user_mm.h"
  30. #endif
  31. #define PID_MAX 10000
  32. #define PID_CT_ASSERT(name, x) \
  33. struct assert_##name {char ary[2 * (x) - 1];}
  34. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  35. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  36. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  37. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  38. static int lwp_pid_ary_alloced = 0;
  39. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  40. static pid_t current_pid = 0;
  41. static struct rt_mutex pid_mtx;
  42. int lwp_pid_init(void)
  43. {
  44. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  45. return 0;
  46. }
  47. void lwp_pid_lock_take(void)
  48. {
  49. DEF_RETURN_CODE(rc);
  50. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  51. /* should never failed */
  52. RT_ASSERT(rc == RT_EOK);
  53. }
  54. void lwp_pid_lock_release(void)
  55. {
  56. /* should never failed */
  57. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  58. RT_ASSERT(0);
  59. }
  60. struct lwp_avl_struct *lwp_get_pid_ary(void)
  61. {
  62. return lwp_pid_ary;
  63. }
  64. static pid_t lwp_pid_get_locked(void)
  65. {
  66. struct lwp_avl_struct *p;
  67. pid_t pid = 0;
  68. p = lwp_pid_free_head;
  69. if (p)
  70. {
  71. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  72. }
  73. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  74. {
  75. p = lwp_pid_ary + lwp_pid_ary_alloced;
  76. lwp_pid_ary_alloced++;
  77. }
  78. if (p)
  79. {
  80. int found_noused = 0;
  81. RT_ASSERT(p->data == RT_NULL);
  82. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  83. {
  84. if (!lwp_avl_find(pid, lwp_pid_root))
  85. {
  86. found_noused = 1;
  87. break;
  88. }
  89. }
  90. if (!found_noused)
  91. {
  92. for (pid = 1; pid <= current_pid; pid++)
  93. {
  94. if (!lwp_avl_find(pid, lwp_pid_root))
  95. {
  96. found_noused = 1;
  97. break;
  98. }
  99. }
  100. }
  101. p->avl_key = pid;
  102. lwp_avl_insert(p, &lwp_pid_root);
  103. current_pid = pid;
  104. }
  105. return pid;
  106. }
  107. static void lwp_pid_put_locked(pid_t pid)
  108. {
  109. struct lwp_avl_struct *p;
  110. if (pid == 0)
  111. {
  112. return;
  113. }
  114. p = lwp_avl_find(pid, lwp_pid_root);
  115. if (p)
  116. {
  117. p->data = RT_NULL;
  118. lwp_avl_remove(p, &lwp_pid_root);
  119. p->avl_right = lwp_pid_free_head;
  120. lwp_pid_free_head = p;
  121. }
  122. }
  123. void lwp_pid_put(struct rt_lwp *lwp)
  124. {
  125. lwp_pid_lock_take();
  126. lwp_pid_put_locked(lwp->pid);
  127. lwp_pid_lock_release();
  128. /* reset pid field */
  129. lwp->pid = 0;
  130. }
  131. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  132. {
  133. struct lwp_avl_struct *p;
  134. p = lwp_avl_find(pid, lwp_pid_root);
  135. if (p)
  136. {
  137. p->data = lwp;
  138. }
  139. }
  140. static void __exit_files(struct rt_lwp *lwp)
  141. {
  142. int fd = lwp->fdt.maxfd - 1;
  143. while (fd >= 0)
  144. {
  145. struct dfs_file *d;
  146. d = lwp->fdt.fds[fd];
  147. if (d)
  148. {
  149. dfs_file_close(d);
  150. fdt_fd_release(&lwp->fdt, fd);
  151. }
  152. fd--;
  153. }
  154. }
  155. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  156. {
  157. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  158. }
  159. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  160. {
  161. rt_mutex_detach(&lwp->object_mutex);
  162. }
  163. void lwp_user_object_lock(struct rt_lwp *lwp)
  164. {
  165. if (lwp)
  166. {
  167. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  168. }
  169. else
  170. {
  171. RT_ASSERT(0);
  172. }
  173. }
  174. void lwp_user_object_unlock(struct rt_lwp *lwp)
  175. {
  176. if (lwp)
  177. {
  178. rt_mutex_release(&lwp->object_mutex);
  179. }
  180. else
  181. {
  182. RT_ASSERT(0);
  183. }
  184. }
  185. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  186. {
  187. int ret = -1;
  188. if (lwp && object)
  189. {
  190. lwp_user_object_lock(lwp);
  191. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  192. {
  193. struct lwp_avl_struct *node;
  194. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  195. if (node)
  196. {
  197. rt_atomic_add(&object->lwp_ref_count, 1);
  198. node->avl_key = (avl_key_t)object;
  199. lwp_avl_insert(node, &lwp->object_root);
  200. ret = 0;
  201. }
  202. }
  203. lwp_user_object_unlock(lwp);
  204. }
  205. return ret;
  206. }
  207. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  208. {
  209. rt_err_t ret = -1;
  210. rt_object_t object;
  211. if (!lwp || !node)
  212. {
  213. return ret;
  214. }
  215. object = (rt_object_t)node->avl_key;
  216. object->lwp_ref_count--;
  217. if (object->lwp_ref_count == 0)
  218. {
  219. /* remove from kernel object list */
  220. switch (object->type)
  221. {
  222. case RT_Object_Class_Semaphore:
  223. ret = rt_sem_delete((rt_sem_t)object);
  224. break;
  225. case RT_Object_Class_Mutex:
  226. ret = rt_mutex_delete((rt_mutex_t)object);
  227. break;
  228. case RT_Object_Class_Event:
  229. ret = rt_event_delete((rt_event_t)object);
  230. break;
  231. case RT_Object_Class_MailBox:
  232. ret = rt_mb_delete((rt_mailbox_t)object);
  233. break;
  234. case RT_Object_Class_MessageQueue:
  235. ret = rt_mq_delete((rt_mq_t)object);
  236. break;
  237. case RT_Object_Class_Timer:
  238. ret = rt_timer_delete((rt_timer_t)object);
  239. break;
  240. case RT_Object_Class_Custom:
  241. ret = rt_custom_object_destroy(object);
  242. break;
  243. default:
  244. LOG_E("input object type(%d) error", object->type);
  245. break;
  246. }
  247. }
  248. else
  249. {
  250. ret = 0;
  251. }
  252. lwp_avl_remove(node, &lwp->object_root);
  253. rt_free(node);
  254. return ret;
  255. }
  256. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  257. {
  258. rt_err_t ret = -1;
  259. if (lwp && object)
  260. {
  261. struct lwp_avl_struct *node;
  262. lwp_user_object_lock(lwp);
  263. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  264. ret = _object_node_delete(lwp, node);
  265. lwp_user_object_unlock(lwp);
  266. }
  267. return ret;
  268. }
  269. void lwp_user_object_clear(struct rt_lwp *lwp)
  270. {
  271. struct lwp_avl_struct *node;
  272. lwp_user_object_lock(lwp);
  273. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  274. {
  275. _object_node_delete(lwp, node);
  276. }
  277. lwp_user_object_unlock(lwp);
  278. }
  279. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  280. {
  281. rt_object_t object;
  282. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  283. object = (rt_object_t)node->avl_key;
  284. lwp_user_object_add(dst_lwp, object);
  285. return 0;
  286. }
  287. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  288. {
  289. lwp_user_object_lock(src_lwp);
  290. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  291. lwp_user_object_unlock(src_lwp);
  292. }
  293. rt_lwp_t lwp_create(rt_base_t flags)
  294. {
  295. pid_t pid;
  296. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  297. if (new_lwp)
  298. {
  299. /* minimal setup of lwp object */
  300. new_lwp->session = -1;
  301. new_lwp->ref = 1;
  302. rt_list_init(&new_lwp->wait_list);
  303. rt_list_init(&new_lwp->t_grp);
  304. rt_list_init(&new_lwp->timer);
  305. lwp_user_object_lock_init(new_lwp);
  306. rt_wqueue_init(&new_lwp->wait_queue);
  307. lwp_signal_init(&new_lwp->signal);
  308. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  309. /* lwp with pid */
  310. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  311. {
  312. lwp_pid_lock_take();
  313. pid = lwp_pid_get_locked();
  314. if (pid == 0)
  315. {
  316. lwp_user_object_lock_destroy(new_lwp);
  317. rt_free(new_lwp);
  318. new_lwp = RT_NULL;
  319. LOG_E("pid slot fulled!\n");
  320. }
  321. else
  322. {
  323. new_lwp->pid = pid;
  324. lwp_pid_set_lwp_locked(pid, new_lwp);
  325. }
  326. lwp_pid_lock_release();
  327. }
  328. }
  329. LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
  330. return new_lwp;
  331. }
  332. /** when reference is 0, a lwp can be released */
  333. void lwp_free(struct rt_lwp* lwp)
  334. {
  335. if (lwp == RT_NULL)
  336. {
  337. return;
  338. }
  339. /**
  340. * Brief: Recycle the lwp when reference is cleared
  341. *
  342. * Note: Critical Section
  343. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  344. * all the reference is clear)
  345. */
  346. LOG_D("lwp free: %p\n", lwp);
  347. if (lwp->args != RT_NULL)
  348. {
  349. #ifndef ARCH_MM_MMU
  350. lwp->args_length = RT_NULL;
  351. #ifndef ARCH_MM_MPU
  352. rt_free(lwp->args);
  353. #endif /* not defined ARCH_MM_MPU */
  354. #endif /* ARCH_MM_MMU */
  355. lwp->args = RT_NULL;
  356. }
  357. if (lwp->fdt.fds != RT_NULL)
  358. {
  359. /* auto clean fds */
  360. __exit_files(lwp);
  361. rt_free(lwp->fdt.fds);
  362. lwp->fdt.fds = RT_NULL;
  363. }
  364. lwp_user_object_clear(lwp);
  365. lwp_user_object_lock_destroy(lwp);
  366. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  367. rt_mutex_detach(&lwp->lwp_lock);
  368. /* free data section */
  369. if (lwp->data_entry != RT_NULL)
  370. {
  371. #ifdef ARCH_MM_MMU
  372. rt_free_align(lwp->data_entry);
  373. #else
  374. #ifdef ARCH_MM_MPU
  375. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  376. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  377. #else
  378. rt_free_align(lwp->data_entry);
  379. #endif /* ARCH_MM_MPU */
  380. #endif /* ARCH_MM_MMU */
  381. lwp->data_entry = RT_NULL;
  382. }
  383. /* free text section */
  384. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  385. {
  386. if (lwp->text_entry)
  387. {
  388. LOG_D("lwp text free: %p", lwp->text_entry);
  389. #ifndef ARCH_MM_MMU
  390. rt_free((void*)lwp->text_entry);
  391. #endif /* not defined ARCH_MM_MMU */
  392. lwp->text_entry = RT_NULL;
  393. }
  394. }
  395. #ifdef ARCH_MM_MMU
  396. lwp_unmap_user_space(lwp);
  397. #endif
  398. timer_list_free(&lwp->timer);
  399. /* for children */
  400. while (lwp->first_child)
  401. {
  402. struct rt_lwp *child;
  403. child = lwp->first_child;
  404. lwp->first_child = child->sibling;
  405. if (child->terminated)
  406. {
  407. lwp_pid_put(child);
  408. rt_free(child);
  409. }
  410. else
  411. {
  412. /** Note: safe since the slist node is release */
  413. child->sibling = RT_NULL;
  414. /* Note: this may cause an orphan lwp */
  415. child->parent = RT_NULL;
  416. }
  417. }
  418. if (!lwp->background)
  419. {
  420. struct termios *old_stdin_termios = get_old_termios();
  421. struct rt_lwp *old_lwp = NULL;
  422. if (lwp->session == -1)
  423. {
  424. tcsetattr(1, 0, old_stdin_termios);
  425. }
  426. if (lwp->tty != RT_NULL)
  427. {
  428. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  429. if (lwp->tty->foreground == lwp)
  430. {
  431. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  432. lwp->tty->foreground = old_lwp;
  433. }
  434. else
  435. {
  436. tty_pop(&lwp->tty->head, lwp);
  437. }
  438. rt_mutex_release(&lwp->tty->lock);
  439. lwp->tty = RT_NULL;
  440. }
  441. }
  442. /* for parent */
  443. if (lwp->parent)
  444. {
  445. struct rt_thread *thread;
  446. if (!rt_list_isempty(&lwp->wait_list))
  447. {
  448. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  449. thread->error = RT_EOK;
  450. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  451. rt_thread_resume(thread);
  452. return;
  453. }
  454. else
  455. {
  456. struct rt_lwp **it = &lwp->parent->first_child;
  457. while (*it != lwp)
  458. {
  459. it = &(*it)->sibling;
  460. }
  461. *it = lwp->sibling;
  462. }
  463. }
  464. lwp_pid_put(lwp);
  465. rt_free(lwp);
  466. }
  467. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  468. int lwp_ref_inc(struct rt_lwp *lwp)
  469. {
  470. int ref;
  471. ref = rt_atomic_add(&lwp->ref, 1);
  472. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  473. return ref;
  474. }
  475. int lwp_ref_dec(struct rt_lwp *lwp)
  476. {
  477. int ref;
  478. ref = rt_atomic_add(&lwp->ref, -1);
  479. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  480. if (ref == 1)
  481. {
  482. struct rt_channel_msg msg;
  483. if (lwp->debug)
  484. {
  485. memset(&msg, 0, sizeof msg);
  486. rt_raw_channel_send(gdb_server_channel(), &msg);
  487. }
  488. #ifndef ARCH_MM_MMU
  489. #ifdef RT_LWP_USING_SHM
  490. lwp_shm_lwp_free(lwp);
  491. #endif /* RT_LWP_USING_SHM */
  492. #endif /* not defined ARCH_MM_MMU */
  493. lwp_free(lwp);
  494. }
  495. else
  496. {
  497. /* reference must be a positive integer */
  498. RT_ASSERT(ref > 1);
  499. }
  500. return ref;
  501. }
  502. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  503. {
  504. struct lwp_avl_struct *p;
  505. struct rt_lwp *lwp = RT_NULL;
  506. p = lwp_avl_find(pid, lwp_pid_root);
  507. if (p)
  508. {
  509. lwp = (struct rt_lwp *)p->data;
  510. }
  511. return lwp;
  512. }
  513. pid_t lwp_to_pid(struct rt_lwp* lwp)
  514. {
  515. if (!lwp)
  516. {
  517. return 0;
  518. }
  519. return lwp->pid;
  520. }
  521. char* lwp_pid2name(int32_t pid)
  522. {
  523. struct rt_lwp *lwp;
  524. char* process_name = RT_NULL;
  525. lwp_pid_lock_take();
  526. lwp = lwp_from_pid_locked(pid);
  527. if (lwp)
  528. {
  529. process_name = strrchr(lwp->cmd, '/');
  530. process_name = process_name? process_name + 1: lwp->cmd;
  531. }
  532. lwp_pid_lock_release();
  533. return process_name;
  534. }
  535. pid_t lwp_name2pid(const char *name)
  536. {
  537. int idx;
  538. pid_t pid = 0;
  539. rt_thread_t main_thread;
  540. char* process_name = RT_NULL;
  541. lwp_pid_lock_take();
  542. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  543. {
  544. /* 0 is reserved */
  545. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  546. if (lwp)
  547. {
  548. process_name = strrchr(lwp->cmd, '/');
  549. process_name = process_name? process_name + 1: lwp->cmd;
  550. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  551. {
  552. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  553. if (!(main_thread->stat & RT_THREAD_CLOSE))
  554. {
  555. pid = lwp->pid;
  556. }
  557. }
  558. }
  559. }
  560. lwp_pid_lock_release();
  561. return pid;
  562. }
  563. int lwp_getpid(void)
  564. {
  565. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  566. }
  567. /**
  568. * @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
  569. * status code for user
  570. */
  571. static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
  572. struct rt_lwp *self_lwp, int *status,
  573. int options)
  574. {
  575. sysret_t error;
  576. int lwp_stat;
  577. int terminated;
  578. if (!child)
  579. {
  580. error = -RT_ERROR;
  581. }
  582. else
  583. {
  584. /**
  585. * Note: Critical Section
  586. * - child lwp (RW. This will modify its parent if valid)
  587. */
  588. LWP_LOCK(child);
  589. if (child->terminated)
  590. {
  591. error = child->pid;
  592. }
  593. else if (rt_list_isempty(&child->wait_list))
  594. {
  595. /**
  596. * Note: only one thread can wait on wait_list.
  597. * dont reschedule before mutex unlock
  598. */
  599. rt_enter_critical();
  600. error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  601. if (error == 0)
  602. {
  603. rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
  604. LWP_UNLOCK(child);
  605. rt_exit_critical();
  606. rt_schedule();
  607. if (child->terminated)
  608. error = child->pid;
  609. else
  610. error = -RT_EINTR;
  611. }
  612. else
  613. rt_exit_critical();
  614. }
  615. else
  616. error = -RT_EINTR;
  617. lwp_stat = child->lwp_ret;
  618. terminated = child->terminated;
  619. if (!terminated)
  620. LWP_UNLOCK(child);
  621. if (error > 0)
  622. {
  623. if (terminated)
  624. {
  625. /** Reap the child process if it's exited */
  626. lwp_children_unregister(self_lwp, child);
  627. child->parent = RT_NULL;
  628. lwp_pid_put(child);
  629. }
  630. if (status)
  631. lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
  632. }
  633. }
  634. return error;
  635. }
  636. pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
  637. pid_t lwp_waitpid(const pid_t pid, int *status, int options)
  638. {
  639. pid_t rc = -1;
  640. struct rt_thread *thread;
  641. struct rt_lwp *child;
  642. struct rt_lwp *self_lwp;
  643. thread = rt_thread_self();
  644. self_lwp = lwp_self();
  645. if (!self_lwp)
  646. {
  647. rc = -RT_EINVAL;
  648. }
  649. else
  650. {
  651. if (pid > 0)
  652. {
  653. lwp_pid_lock_take();
  654. child = lwp_from_pid_locked(pid);
  655. if (child->parent != self_lwp)
  656. rc = -RT_ERROR;
  657. else
  658. rc = RT_EOK;
  659. lwp_pid_lock_release();
  660. if (rc == RT_EOK)
  661. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  662. }
  663. else if (pid == -1)
  664. {
  665. LWP_LOCK(self_lwp);
  666. child = self_lwp->first_child;
  667. LWP_UNLOCK(self_lwp);
  668. RT_ASSERT(!child || child->parent == self_lwp);
  669. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  670. }
  671. else
  672. {
  673. /* not supported yet */
  674. rc = -RT_EINVAL;
  675. }
  676. }
  677. if (rc > 0)
  678. {
  679. LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
  680. }
  681. else
  682. {
  683. RT_ASSERT(rc != 0);
  684. LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
  685. }
  686. return rc;
  687. }
  688. #ifdef RT_USING_FINSH
  689. /* copy from components/finsh/cmd.c */
  690. static void object_split(int len)
  691. {
  692. while (len--)
  693. {
  694. rt_kprintf("-");
  695. }
  696. }
  697. static void print_thread_info(struct rt_thread* thread, int maxlen)
  698. {
  699. rt_uint8_t *ptr;
  700. rt_uint8_t stat;
  701. #ifdef RT_USING_SMP
  702. if (thread->oncpu != RT_CPU_DETACHED)
  703. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
  704. else
  705. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  706. #else
  707. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  708. #endif /*RT_USING_SMP*/
  709. stat = (thread->stat & RT_THREAD_STAT_MASK);
  710. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  711. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  712. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  713. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  714. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  715. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  716. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  717. while (*ptr == '#')ptr--;
  718. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  719. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  720. thread->stack_size,
  721. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  722. thread->remaining_tick,
  723. thread->error);
  724. #else
  725. ptr = (rt_uint8_t *)thread->stack_addr;
  726. while (*ptr == '#')ptr++;
  727. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  728. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  729. thread->stack_size,
  730. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  731. / thread->stack_size,
  732. thread->remaining_tick,
  733. thread->error);
  734. #endif
  735. }
  736. long list_process(void)
  737. {
  738. int index;
  739. int maxlen;
  740. rt_ubase_t level;
  741. struct rt_thread *thread;
  742. struct rt_list_node *node, *list;
  743. const char *item_title = "thread";
  744. int count = 0;
  745. struct rt_thread **threads;
  746. maxlen = RT_NAME_MAX;
  747. #ifdef RT_USING_SMP
  748. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  749. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  750. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  751. #else
  752. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  753. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  754. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  755. #endif /*RT_USING_SMP*/
  756. count = rt_object_get_length(RT_Object_Class_Thread);
  757. if (count > 0)
  758. {
  759. /* get thread pointers */
  760. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  761. if (threads)
  762. {
  763. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  764. if (index > 0)
  765. {
  766. for (index = 0; index <count; index++)
  767. {
  768. struct rt_thread th;
  769. thread = threads[index];
  770. /** FIXME: take the rt_thread_t lock */
  771. level = rt_hw_interrupt_disable();
  772. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  773. {
  774. rt_hw_interrupt_enable(level);
  775. continue;
  776. }
  777. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  778. rt_hw_interrupt_enable(level);
  779. if (th.lwp == RT_NULL)
  780. {
  781. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  782. print_thread_info(&th, maxlen);
  783. }
  784. }
  785. }
  786. rt_free(threads);
  787. }
  788. }
  789. for (index = 0; index < RT_LWP_MAX_NR; index++)
  790. {
  791. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  792. if (lwp)
  793. {
  794. list = &lwp->t_grp;
  795. for (node = list->next; node != list; node = node->next)
  796. {
  797. thread = rt_list_entry(node, struct rt_thread, sibling);
  798. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  799. print_thread_info(thread, maxlen);
  800. }
  801. }
  802. }
  803. return 0;
  804. }
  805. MSH_CMD_EXPORT(list_process, list process);
  806. static void cmd_kill(int argc, char** argv)
  807. {
  808. int pid;
  809. int sig = SIGKILL;
  810. if (argc < 2)
  811. {
  812. rt_kprintf("kill pid or kill pid -s signal\n");
  813. return;
  814. }
  815. pid = atoi(argv[1]);
  816. if (argc >= 4)
  817. {
  818. if (argv[2][0] == '-' && argv[2][1] == 's')
  819. {
  820. sig = atoi(argv[3]);
  821. }
  822. }
  823. lwp_pid_lock_take();
  824. lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
  825. lwp_pid_lock_release();
  826. }
  827. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  828. static void cmd_killall(int argc, char** argv)
  829. {
  830. int pid;
  831. if (argc < 2)
  832. {
  833. rt_kprintf("killall processes_name\n");
  834. return;
  835. }
  836. while((pid = lwp_name2pid(argv[1])) > 0)
  837. {
  838. lwp_pid_lock_take();
  839. lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
  840. lwp_pid_lock_release();
  841. rt_thread_mdelay(100);
  842. }
  843. }
  844. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  845. #endif
  846. int lwp_check_exit_request(void)
  847. {
  848. rt_thread_t thread = rt_thread_self();
  849. if (!thread->lwp)
  850. {
  851. return 0;
  852. }
  853. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  854. {
  855. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  856. return 1;
  857. }
  858. return 0;
  859. }
  860. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  861. {
  862. int found = 0;
  863. rt_base_t level;
  864. rt_list_t *list;
  865. /** FIXME: take the rt_thread_t lock */
  866. level = rt_hw_interrupt_disable();
  867. list = lwp->t_grp.next;
  868. while (list != &lwp->t_grp)
  869. {
  870. rt_thread_t iter_thread;
  871. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  872. if (thread == iter_thread)
  873. {
  874. found = 1;
  875. break;
  876. }
  877. list = list->next;
  878. }
  879. rt_hw_interrupt_enable(level);
  880. return found;
  881. }
  882. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  883. {
  884. rt_thread_t main_thread;
  885. rt_base_t level;
  886. rt_list_t *list;
  887. struct rt_lwp *lwp;
  888. lwp = lwp_self();
  889. if ((!thread_to_exit) || (!lwp))
  890. {
  891. return;
  892. }
  893. /* FIXME: take the rt_thread_t lock */
  894. level = rt_hw_interrupt_disable();
  895. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  896. if (thread_to_exit == main_thread)
  897. {
  898. goto finish;
  899. }
  900. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  901. {
  902. goto finish;
  903. }
  904. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  905. {
  906. rt_thread_t thread;
  907. thread = rt_list_entry(list, struct rt_thread, sibling);
  908. if (thread != thread_to_exit)
  909. {
  910. continue;
  911. }
  912. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  913. {
  914. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  915. }
  916. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  917. {
  918. thread->error = -RT_EINTR;
  919. rt_hw_dsb();
  920. rt_thread_wakeup(thread);
  921. }
  922. break;
  923. }
  924. while (found_thread(lwp, thread_to_exit))
  925. {
  926. rt_thread_mdelay(10);
  927. }
  928. finish:
  929. rt_hw_interrupt_enable(level);
  930. return;
  931. }
  932. void lwp_terminate(struct rt_lwp *lwp)
  933. {
  934. rt_list_t *list;
  935. if (!lwp)
  936. {
  937. /* kernel thread not support */
  938. return;
  939. }
  940. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  941. LWP_LOCK(lwp);
  942. if (!lwp->terminated)
  943. {
  944. /* stop the receiving of signals */
  945. lwp->terminated = RT_TRUE;
  946. /* broadcast exit request for sibling threads */
  947. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  948. {
  949. rt_thread_t thread;
  950. thread = rt_list_entry(list, struct rt_thread, sibling);
  951. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  952. {
  953. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  954. }
  955. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  956. {
  957. thread->error = RT_EINTR;
  958. rt_hw_dsb();
  959. rt_thread_wakeup(thread);
  960. }
  961. }
  962. }
  963. LWP_UNLOCK(lwp);
  964. }
  965. void lwp_wait_subthread_exit(void)
  966. {
  967. struct rt_lwp *lwp;
  968. rt_thread_t thread;
  969. rt_thread_t main_thread;
  970. lwp = lwp_self();
  971. if (!lwp)
  972. {
  973. return;
  974. }
  975. thread = rt_thread_self();
  976. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  977. if (thread != main_thread)
  978. {
  979. return;
  980. }
  981. while (1)
  982. {
  983. int subthread_is_terminated;
  984. LOG_D("%s: wait for subthread exiting", __func__);
  985. /**
  986. * Brief: wait for all *running* sibling threads to exit
  987. *
  988. * Note: Critical Section
  989. * - sibling list of lwp (RW. It will clear all siblings finally)
  990. */
  991. LWP_LOCK(lwp);
  992. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  993. if (!subthread_is_terminated)
  994. {
  995. rt_thread_t sub_thread;
  996. rt_list_t *list;
  997. int all_subthread_in_init = 1;
  998. /* check all subthread is in init state */
  999. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1000. {
  1001. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1002. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  1003. {
  1004. all_subthread_in_init = 0;
  1005. break;
  1006. }
  1007. }
  1008. if (all_subthread_in_init)
  1009. {
  1010. /* delete all subthread */
  1011. while ((list = thread->sibling.prev) != &lwp->t_grp)
  1012. {
  1013. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1014. rt_list_remove(&sub_thread->sibling);
  1015. /**
  1016. * Note: Critical Section
  1017. * - thread control block (RW. Since it will free the thread
  1018. * control block, it must ensure no one else can access
  1019. * thread any more)
  1020. */
  1021. lwp_tid_put(sub_thread->tid);
  1022. sub_thread->tid = 0;
  1023. rt_thread_delete(sub_thread);
  1024. }
  1025. subthread_is_terminated = 1;
  1026. }
  1027. }
  1028. LWP_UNLOCK(lwp);
  1029. if (subthread_is_terminated)
  1030. {
  1031. break;
  1032. }
  1033. rt_thread_mdelay(10);
  1034. }
  1035. }
  1036. static int _lwp_setaffinity(pid_t pid, int cpu)
  1037. {
  1038. struct rt_lwp *lwp;
  1039. int ret = -1;
  1040. lwp_pid_lock_take();
  1041. lwp = lwp_from_pid_locked(pid);
  1042. if (lwp)
  1043. {
  1044. #ifdef RT_USING_SMP
  1045. rt_list_t *list;
  1046. lwp->bind_cpu = cpu;
  1047. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1048. {
  1049. rt_thread_t thread;
  1050. thread = rt_list_entry(list, struct rt_thread, sibling);
  1051. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  1052. }
  1053. #endif
  1054. ret = 0;
  1055. }
  1056. lwp_pid_lock_release();
  1057. return ret;
  1058. }
  1059. int lwp_setaffinity(pid_t pid, int cpu)
  1060. {
  1061. int ret;
  1062. #ifdef RT_USING_SMP
  1063. if (cpu < 0 || cpu > RT_CPUS_NR)
  1064. {
  1065. cpu = RT_CPUS_NR;
  1066. }
  1067. #endif
  1068. ret = _lwp_setaffinity(pid, cpu);
  1069. return ret;
  1070. }
  1071. #ifdef RT_USING_SMP
  1072. static void cmd_cpu_bind(int argc, char** argv)
  1073. {
  1074. int pid;
  1075. int cpu;
  1076. if (argc < 3)
  1077. {
  1078. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1079. return;
  1080. }
  1081. pid = atoi(argv[1]);
  1082. cpu = atoi(argv[2]);
  1083. lwp_setaffinity((pid_t)pid, cpu);
  1084. }
  1085. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1086. #endif