lwp_pid.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_file.h>
  16. #include <unistd.h>
  17. #include <stdio.h> /* rename() */
  18. #include <sys/stat.h>
  19. #include <sys/statfs.h> /* statfs() */
  20. #include "lwp.h"
  21. #include "lwp_pid.h"
  22. #include "lwp_signal.h"
  23. #include "tty.h"
  24. #ifdef ARCH_MM_MMU
  25. #include "lwp_user_mm.h"
  26. #endif
  27. #define DBG_TAG "LWP_PID"
  28. #define DBG_LVL DBG_INFO
  29. #include <rtdbg.h>
  30. #define PID_MAX 10000
  31. #define PID_CT_ASSERT(name, x) \
  32. struct assert_##name {char ary[2 * (x) - 1];}
  33. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  34. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  35. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  36. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  37. static int lwp_pid_ary_alloced = 0;
  38. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  39. static pid_t current_pid = 0;
  40. struct lwp_avl_struct *lwp_get_pid_ary(void)
  41. {
  42. return lwp_pid_ary;
  43. }
  44. static pid_t lwp_pid_get(void)
  45. {
  46. rt_base_t level;
  47. struct lwp_avl_struct *p;
  48. pid_t pid = 0;
  49. level = rt_hw_interrupt_disable();
  50. p = lwp_pid_free_head;
  51. if (p)
  52. {
  53. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  54. }
  55. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  56. {
  57. p = lwp_pid_ary + lwp_pid_ary_alloced;
  58. lwp_pid_ary_alloced++;
  59. }
  60. if (p)
  61. {
  62. int found_noused = 0;
  63. RT_ASSERT(p->data == RT_NULL);
  64. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  65. {
  66. if (!lwp_avl_find(pid, lwp_pid_root))
  67. {
  68. found_noused = 1;
  69. break;
  70. }
  71. }
  72. if (!found_noused)
  73. {
  74. for (pid = 1; pid <= current_pid; pid++)
  75. {
  76. if (!lwp_avl_find(pid, lwp_pid_root))
  77. {
  78. found_noused = 1;
  79. break;
  80. }
  81. }
  82. }
  83. p->avl_key = pid;
  84. lwp_avl_insert(p, &lwp_pid_root);
  85. current_pid = pid;
  86. }
  87. rt_hw_interrupt_enable(level);
  88. return pid;
  89. }
  90. static void lwp_pid_put(pid_t pid)
  91. {
  92. rt_base_t level;
  93. struct lwp_avl_struct *p;
  94. if (pid == 0)
  95. {
  96. return;
  97. }
  98. level = rt_hw_interrupt_disable();
  99. p = lwp_avl_find(pid, lwp_pid_root);
  100. if (p)
  101. {
  102. p->data = RT_NULL;
  103. lwp_avl_remove(p, &lwp_pid_root);
  104. p->avl_right = lwp_pid_free_head;
  105. lwp_pid_free_head = p;
  106. }
  107. rt_hw_interrupt_enable(level);
  108. }
  109. static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
  110. {
  111. rt_base_t level;
  112. struct lwp_avl_struct *p;
  113. level = rt_hw_interrupt_disable();
  114. p = lwp_avl_find(pid, lwp_pid_root);
  115. if (p)
  116. {
  117. p->data = lwp;
  118. }
  119. rt_hw_interrupt_enable(level);
  120. }
  121. static void __exit_files(struct rt_lwp *lwp)
  122. {
  123. int fd = lwp->fdt.maxfd - 1;
  124. while (fd >= 0)
  125. {
  126. struct dfs_file *d;
  127. d = lwp->fdt.fds[fd];
  128. if (d)
  129. {
  130. dfs_file_close(d);
  131. fdt_fd_release(&lwp->fdt, fd);
  132. }
  133. fd--;
  134. }
  135. }
  136. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  137. {
  138. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  139. }
  140. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  141. {
  142. rt_mutex_detach(&lwp->object_mutex);
  143. }
  144. void lwp_user_object_lock(struct rt_lwp *lwp)
  145. {
  146. if (lwp)
  147. {
  148. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  149. }
  150. else
  151. {
  152. RT_ASSERT(0);
  153. }
  154. }
  155. void lwp_user_object_unlock(struct rt_lwp *lwp)
  156. {
  157. if (lwp)
  158. {
  159. rt_mutex_release(&lwp->object_mutex);
  160. }
  161. else
  162. {
  163. RT_ASSERT(0);
  164. }
  165. }
  166. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  167. {
  168. int ret = -1;
  169. if (lwp && object)
  170. {
  171. lwp_user_object_lock(lwp);
  172. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  173. {
  174. struct lwp_avl_struct *node;
  175. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  176. if (node)
  177. {
  178. rt_base_t level;
  179. level = rt_hw_interrupt_disable();
  180. object->lwp_ref_count++;
  181. rt_hw_interrupt_enable(level);
  182. node->avl_key = (avl_key_t)object;
  183. lwp_avl_insert(node, &lwp->object_root);
  184. ret = 0;
  185. }
  186. }
  187. lwp_user_object_unlock(lwp);
  188. }
  189. return ret;
  190. }
  191. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  192. {
  193. rt_err_t ret = -1;
  194. rt_object_t object;
  195. if (!lwp || !node)
  196. {
  197. return ret;
  198. }
  199. object = (rt_object_t)node->avl_key;
  200. object->lwp_ref_count--;
  201. if (object->lwp_ref_count == 0)
  202. {
  203. /* remove from kernel object list */
  204. switch (object->type)
  205. {
  206. case RT_Object_Class_Semaphore:
  207. ret = rt_sem_delete((rt_sem_t)object);
  208. break;
  209. case RT_Object_Class_Mutex:
  210. ret = rt_mutex_delete((rt_mutex_t)object);
  211. break;
  212. case RT_Object_Class_Event:
  213. ret = rt_event_delete((rt_event_t)object);
  214. break;
  215. case RT_Object_Class_MailBox:
  216. ret = rt_mb_delete((rt_mailbox_t)object);
  217. break;
  218. case RT_Object_Class_MessageQueue:
  219. ret = rt_mq_delete((rt_mq_t)object);
  220. break;
  221. case RT_Object_Class_Timer:
  222. ret = rt_timer_delete((rt_timer_t)object);
  223. break;
  224. case RT_Object_Class_Custom:
  225. ret = rt_custom_object_destroy(object);
  226. break;
  227. default:
  228. LOG_E("input object type(%d) error", object->type);
  229. break;
  230. }
  231. }
  232. else
  233. {
  234. ret = 0;
  235. }
  236. lwp_avl_remove(node, &lwp->object_root);
  237. rt_free(node);
  238. return ret;
  239. }
  240. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  241. {
  242. rt_err_t ret = -1;
  243. if (lwp && object)
  244. {
  245. struct lwp_avl_struct *node;
  246. lwp_user_object_lock(lwp);
  247. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  248. ret = _object_node_delete(lwp, node);
  249. lwp_user_object_unlock(lwp);
  250. }
  251. return ret;
  252. }
  253. void lwp_user_object_clear(struct rt_lwp *lwp)
  254. {
  255. struct lwp_avl_struct *node;
  256. lwp_user_object_lock(lwp);
  257. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  258. {
  259. _object_node_delete(lwp, node);
  260. }
  261. lwp_user_object_unlock(lwp);
  262. }
  263. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  264. {
  265. rt_object_t object;
  266. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  267. object = (rt_object_t)node->avl_key;
  268. lwp_user_object_add(dst_lwp, object);
  269. return 0;
  270. }
  271. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  272. {
  273. lwp_user_object_lock(src_lwp);
  274. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  275. lwp_user_object_unlock(src_lwp);
  276. }
  277. struct rt_lwp* lwp_new(void)
  278. {
  279. pid_t pid;
  280. rt_base_t level;
  281. struct rt_lwp* lwp = RT_NULL;
  282. lwp = (struct rt_lwp *)rt_malloc(sizeof(struct rt_lwp));
  283. if (lwp == RT_NULL)
  284. {
  285. return lwp;
  286. }
  287. memset(lwp, 0, sizeof(*lwp));
  288. //lwp->tgroup_leader = RT_NULL;
  289. rt_list_init(&lwp->wait_list);
  290. lwp->leader = 0;
  291. lwp->session = -1;
  292. lwp->tty = RT_NULL;
  293. rt_list_init(&lwp->t_grp);
  294. rt_list_init(&lwp->timer);
  295. lwp_user_object_lock_init(lwp);
  296. lwp->address_search_head = RT_NULL;
  297. rt_wqueue_init(&lwp->wait_queue);
  298. lwp->ref = 1;
  299. lwp_signal_init(&lwp->signal);
  300. level = rt_hw_interrupt_disable();
  301. pid = lwp_pid_get();
  302. if (pid == 0)
  303. {
  304. lwp_user_object_lock_destroy(lwp);
  305. rt_free(lwp);
  306. lwp = RT_NULL;
  307. LOG_E("pid slot fulled!\n");
  308. goto out;
  309. }
  310. lwp->pid = pid;
  311. lwp_pid_set_lwp(pid, lwp);
  312. #ifdef LWP_ENABLE_ASID
  313. lwp->generation = 0;
  314. lwp->asid = 0;
  315. #endif
  316. out:
  317. rt_hw_interrupt_enable(level);
  318. return lwp;
  319. }
  320. void lwp_free(struct rt_lwp* lwp)
  321. {
  322. rt_base_t level;
  323. if (lwp == RT_NULL)
  324. {
  325. return;
  326. }
  327. LOG_D("lwp free: %p\n", lwp);
  328. level = rt_hw_interrupt_disable();
  329. lwp->finish = 1;
  330. rt_hw_interrupt_enable(level);
  331. if (lwp->args != RT_NULL)
  332. {
  333. #ifndef ARCH_MM_MMU
  334. lwp->args_length = RT_NULL;
  335. #ifndef ARCH_MM_MPU
  336. rt_free(lwp->args);
  337. #endif /* not defined ARCH_MM_MPU */
  338. #endif /* ARCH_MM_MMU */
  339. lwp->args = RT_NULL;
  340. }
  341. if (lwp->fdt.fds != RT_NULL)
  342. {
  343. /* auto clean fds */
  344. __exit_files(lwp);
  345. rt_free(lwp->fdt.fds);
  346. lwp->fdt.fds = RT_NULL;
  347. }
  348. lwp_user_object_clear(lwp);
  349. lwp_user_object_lock_destroy(lwp);
  350. /* free data section */
  351. if (lwp->data_entry != RT_NULL)
  352. {
  353. #ifdef ARCH_MM_MMU
  354. rt_free_align(lwp->data_entry);
  355. #else
  356. #ifdef ARCH_MM_MPU
  357. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  358. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  359. #else
  360. rt_free_align(lwp->data_entry);
  361. #endif /* ARCH_MM_MPU */
  362. #endif /* ARCH_MM_MMU */
  363. lwp->data_entry = RT_NULL;
  364. }
  365. /* free text section */
  366. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  367. {
  368. if (lwp->text_entry)
  369. {
  370. LOG_D("lwp text free: %p", lwp->text_entry);
  371. #ifndef ARCH_MM_MMU
  372. rt_free((void*)lwp->text_entry);
  373. #endif /* not defined ARCH_MM_MMU */
  374. lwp->text_entry = RT_NULL;
  375. }
  376. }
  377. #ifdef ARCH_MM_MMU
  378. lwp_unmap_user_space(lwp);
  379. #endif
  380. level = rt_hw_interrupt_disable();
  381. /* for children */
  382. while (lwp->first_child)
  383. {
  384. struct rt_lwp *child;
  385. child = lwp->first_child;
  386. lwp->first_child = child->sibling;
  387. if (child->finish)
  388. {
  389. lwp_pid_put(lwp_to_pid(child));
  390. rt_hw_interrupt_enable(level);
  391. rt_free(child);
  392. level = rt_hw_interrupt_disable();
  393. }
  394. else
  395. {
  396. child->sibling = RT_NULL;
  397. child->parent = RT_NULL;
  398. }
  399. }
  400. rt_hw_interrupt_enable(level);
  401. if (!lwp->background)
  402. {
  403. struct termios *old_stdin_termios = get_old_termios();
  404. struct rt_lwp *old_lwp = NULL;
  405. if (lwp->session == -1)
  406. {
  407. tcsetattr(1, 0, old_stdin_termios);
  408. }
  409. level = rt_hw_interrupt_disable();
  410. if (lwp->tty != RT_NULL)
  411. {
  412. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  413. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  414. rt_mutex_release(&lwp->tty->lock);
  415. if (lwp->tty->foreground == lwp)
  416. {
  417. lwp->tty->foreground = old_lwp;
  418. lwp->tty = RT_NULL;
  419. }
  420. }
  421. }
  422. else
  423. {
  424. level = rt_hw_interrupt_disable();
  425. }
  426. /* for parent */
  427. if (lwp->parent)
  428. {
  429. struct rt_thread *thread;
  430. if (!rt_list_isempty(&lwp->wait_list))
  431. {
  432. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  433. thread->error = RT_EOK;
  434. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  435. rt_thread_resume(thread);
  436. rt_hw_interrupt_enable(level);
  437. return;
  438. }
  439. else
  440. {
  441. struct rt_lwp **it = &lwp->parent->first_child;
  442. while (*it != lwp)
  443. {
  444. it = &(*it)->sibling;
  445. }
  446. *it = lwp->sibling;
  447. }
  448. }
  449. timer_list_free(&lwp->timer);
  450. lwp_pid_put(lwp_to_pid(lwp));
  451. rt_hw_interrupt_enable(level);
  452. rt_free(lwp);
  453. }
  454. int lwp_ref_inc(struct rt_lwp *lwp)
  455. {
  456. rt_base_t level;
  457. level = rt_hw_interrupt_disable();
  458. lwp->ref++;
  459. rt_hw_interrupt_enable(level);
  460. return 0;
  461. }
  462. int lwp_ref_dec(struct rt_lwp *lwp)
  463. {
  464. rt_base_t level;
  465. int ref = -1;
  466. level = rt_hw_interrupt_disable();
  467. if (lwp->ref)
  468. {
  469. lwp->ref--;
  470. ref = lwp->ref;
  471. }
  472. rt_hw_interrupt_enable(level);
  473. if (!ref)
  474. {
  475. struct rt_channel_msg msg;
  476. if (lwp->debug)
  477. {
  478. memset(&msg, 0, sizeof msg);
  479. rt_raw_channel_send(gdb_server_channel(), &msg);
  480. }
  481. lwp_signal_detach(&lwp->signal);
  482. #ifndef ARCH_MM_MMU
  483. #ifdef RT_LWP_USING_SHM
  484. lwp_shm_lwp_free(lwp);
  485. #endif /* RT_LWP_USING_SHM */
  486. #endif /* not defined ARCH_MM_MMU */
  487. lwp_free(lwp);
  488. return 0;
  489. }
  490. return -1;
  491. }
  492. struct rt_lwp* lwp_from_pid(pid_t pid)
  493. {
  494. rt_base_t level;
  495. struct lwp_avl_struct *p;
  496. struct rt_lwp *lwp = RT_NULL;
  497. level = rt_hw_interrupt_disable();
  498. p = lwp_avl_find(pid, lwp_pid_root);
  499. if (p)
  500. {
  501. lwp = (struct rt_lwp *)p->data;
  502. }
  503. rt_hw_interrupt_enable(level);
  504. return lwp;
  505. }
  506. pid_t lwp_to_pid(struct rt_lwp* lwp)
  507. {
  508. if (!lwp)
  509. {
  510. return 0;
  511. }
  512. return lwp->pid;
  513. }
  514. char* lwp_pid2name(int32_t pid)
  515. {
  516. struct rt_lwp *lwp;
  517. char* process_name = RT_NULL;
  518. lwp = lwp_from_pid(pid);
  519. if (lwp)
  520. {
  521. process_name = strrchr(lwp->cmd, '/');
  522. process_name = process_name? process_name + 1: lwp->cmd;
  523. }
  524. return process_name;
  525. }
  526. pid_t lwp_name2pid(const char *name)
  527. {
  528. int idx;
  529. pid_t pid = 0;
  530. rt_thread_t main_thread;
  531. char* process_name = RT_NULL;
  532. rt_base_t level;
  533. level = rt_hw_interrupt_disable();
  534. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  535. {
  536. /* 0 is reserved */
  537. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  538. if (lwp)
  539. {
  540. process_name = strrchr(lwp->cmd, '/');
  541. process_name = process_name? process_name + 1: lwp->cmd;
  542. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  543. {
  544. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  545. if (!(main_thread->stat & RT_THREAD_CLOSE))
  546. {
  547. pid = lwp->pid;
  548. }
  549. }
  550. }
  551. }
  552. rt_hw_interrupt_enable(level);
  553. return pid;
  554. }
  555. int lwp_getpid(void)
  556. {
  557. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  558. }
  559. pid_t waitpid(pid_t pid, int *status, int options)
  560. {
  561. pid_t ret = -1;
  562. rt_base_t level;
  563. struct rt_thread *thread;
  564. struct rt_lwp *lwp;
  565. struct rt_lwp *lwp_self;
  566. level = rt_hw_interrupt_disable();
  567. lwp = lwp_from_pid(pid);
  568. if (!lwp)
  569. {
  570. goto quit;
  571. }
  572. lwp_self = (struct rt_lwp *)rt_thread_self()->lwp;
  573. if (!lwp_self)
  574. {
  575. goto quit;
  576. }
  577. if (lwp->parent != lwp_self)
  578. {
  579. goto quit;
  580. }
  581. if (lwp->finish)
  582. {
  583. ret = pid;
  584. }
  585. else
  586. {
  587. if (!rt_list_isempty(&lwp->wait_list))
  588. {
  589. goto quit;
  590. }
  591. thread = rt_thread_self();
  592. rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
  593. rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
  594. rt_schedule();
  595. if (thread->error == RT_EOK)
  596. {
  597. ret = pid;
  598. }
  599. }
  600. if (ret != -1)
  601. {
  602. /* delete from sibling list of its parent */
  603. struct rt_lwp **lwp_node;
  604. *status = lwp->lwp_ret;
  605. lwp_node = &lwp_self->first_child;
  606. while (*lwp_node != lwp)
  607. {
  608. RT_ASSERT(*lwp_node != RT_NULL);
  609. lwp_node = &(*lwp_node)->sibling;
  610. }
  611. (*lwp_node) = lwp->sibling;
  612. lwp->parent = RT_NULL;
  613. }
  614. quit:
  615. rt_hw_interrupt_enable(level);
  616. return ret;
  617. }
  618. #ifdef RT_USING_FINSH
  619. /* copy from components/finsh/cmd.c */
  620. static void object_split(int len)
  621. {
  622. while (len--)
  623. {
  624. rt_kprintf("-");
  625. }
  626. }
  627. static void print_thread_info(struct rt_thread* thread, int maxlen)
  628. {
  629. rt_uint8_t *ptr;
  630. rt_uint8_t stat;
  631. #ifdef RT_USING_SMP
  632. if (thread->oncpu != RT_CPU_DETACHED)
  633. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
  634. else
  635. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  636. #else
  637. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  638. #endif /*RT_USING_SMP*/
  639. stat = (thread->stat & RT_THREAD_STAT_MASK);
  640. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  641. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  642. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  643. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  644. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  645. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  646. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  647. while (*ptr == '#')ptr--;
  648. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  649. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  650. thread->stack_size,
  651. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  652. thread->remaining_tick,
  653. thread->error);
  654. #else
  655. ptr = (rt_uint8_t *)thread->stack_addr;
  656. while (*ptr == '#')ptr++;
  657. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  658. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  659. thread->stack_size,
  660. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  661. / thread->stack_size,
  662. thread->remaining_tick,
  663. thread->error);
  664. #endif
  665. }
  666. long list_process(void)
  667. {
  668. int index;
  669. int maxlen;
  670. rt_ubase_t level;
  671. struct rt_thread *thread;
  672. struct rt_list_node *node, *list;
  673. const char *item_title = "thread";
  674. int count = 0;
  675. struct rt_thread **threads;
  676. maxlen = RT_NAME_MAX;
  677. #ifdef RT_USING_SMP
  678. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  679. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  680. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  681. #else
  682. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  683. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  684. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  685. #endif /*RT_USING_SMP*/
  686. count = rt_object_get_length(RT_Object_Class_Thread);
  687. if (count > 0)
  688. {
  689. /* get thread pointers */
  690. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  691. if (threads)
  692. {
  693. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  694. if (index > 0)
  695. {
  696. for (index = 0; index <count; index++)
  697. {
  698. struct rt_thread th;
  699. thread = threads[index];
  700. level = rt_hw_interrupt_disable();
  701. if ((thread->parent.type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  702. {
  703. rt_hw_interrupt_enable(level);
  704. continue;
  705. }
  706. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  707. rt_hw_interrupt_enable(level);
  708. if (th.lwp == RT_NULL)
  709. {
  710. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  711. print_thread_info(&th, maxlen);
  712. }
  713. }
  714. }
  715. rt_free(threads);
  716. }
  717. }
  718. for (index = 0; index < RT_LWP_MAX_NR; index++)
  719. {
  720. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  721. if (lwp)
  722. {
  723. list = &lwp->t_grp;
  724. for (node = list->next; node != list; node = node->next)
  725. {
  726. thread = rt_list_entry(node, struct rt_thread, sibling);
  727. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  728. print_thread_info(thread, maxlen);
  729. }
  730. }
  731. }
  732. return 0;
  733. }
  734. MSH_CMD_EXPORT(list_process, list process);
  735. static void cmd_kill(int argc, char** argv)
  736. {
  737. int pid;
  738. int sig = 0;
  739. if (argc < 2)
  740. {
  741. rt_kprintf("kill pid or kill pid -s signal\n");
  742. return;
  743. }
  744. pid = atoi(argv[1]);
  745. if (argc >= 4)
  746. {
  747. if (argv[2][0] == '-' && argv[2][1] == 's')
  748. {
  749. sig = atoi(argv[3]);
  750. }
  751. }
  752. lwp_signal_kill(lwp_from_pid(pid), sig, SI_USER, 0);
  753. }
  754. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  755. static void cmd_killall(int argc, char** argv)
  756. {
  757. int pid;
  758. if (argc < 2)
  759. {
  760. rt_kprintf("killall processes_name\n");
  761. return;
  762. }
  763. while((pid = lwp_name2pid(argv[1])) > 0)
  764. {
  765. lwp_signal_kill(lwp_from_pid(pid), SIGKILL, SI_USER, 0);
  766. rt_thread_mdelay(100);
  767. }
  768. }
  769. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  770. #endif
  771. int lwp_check_exit_request(void)
  772. {
  773. rt_thread_t thread = rt_thread_self();
  774. if (!thread->lwp)
  775. {
  776. return 0;
  777. }
  778. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  779. {
  780. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  781. return 1;
  782. }
  783. return 0;
  784. }
  785. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  786. {
  787. int found = 0;
  788. rt_base_t level;
  789. rt_list_t *list;
  790. level = rt_hw_interrupt_disable();
  791. list = lwp->t_grp.next;
  792. while (list != &lwp->t_grp)
  793. {
  794. rt_thread_t iter_thread;
  795. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  796. if (thread == iter_thread)
  797. {
  798. found = 1;
  799. break;
  800. }
  801. list = list->next;
  802. }
  803. rt_hw_interrupt_enable(level);
  804. return found;
  805. }
  806. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  807. {
  808. rt_thread_t main_thread;
  809. rt_base_t level;
  810. rt_list_t *list;
  811. struct rt_lwp *lwp;
  812. lwp = lwp_self();
  813. if ((!thread_to_exit) || (!lwp))
  814. {
  815. return;
  816. }
  817. level = rt_hw_interrupt_disable();
  818. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  819. if (thread_to_exit == main_thread)
  820. {
  821. goto finish;
  822. }
  823. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  824. {
  825. goto finish;
  826. }
  827. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  828. {
  829. rt_thread_t thread;
  830. thread = rt_list_entry(list, struct rt_thread, sibling);
  831. if (thread != thread_to_exit)
  832. {
  833. continue;
  834. }
  835. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  836. {
  837. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  838. }
  839. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  840. {
  841. thread->error = -RT_EINTR;
  842. rt_hw_dsb();
  843. rt_thread_wakeup(thread);
  844. }
  845. break;
  846. }
  847. while (found_thread(lwp, thread_to_exit))
  848. {
  849. rt_thread_mdelay(10);
  850. }
  851. finish:
  852. rt_hw_interrupt_enable(level);
  853. return;
  854. }
  855. void lwp_terminate(struct rt_lwp *lwp)
  856. {
  857. rt_base_t level;
  858. rt_list_t *list;
  859. if (!lwp)
  860. {
  861. /* kernel thread not support */
  862. return;
  863. }
  864. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  865. level = rt_hw_interrupt_disable();
  866. /* stop the receiving of signals */
  867. if (!lwp->terminated)
  868. {
  869. lwp->terminated = RT_TRUE;
  870. /* broadcast exit request for sibling threads */
  871. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  872. {
  873. rt_thread_t thread;
  874. thread = rt_list_entry(list, struct rt_thread, sibling);
  875. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  876. {
  877. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  878. }
  879. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  880. {
  881. thread->error = RT_EINTR;
  882. rt_hw_dsb();
  883. rt_thread_wakeup(thread);
  884. }
  885. }
  886. }
  887. rt_hw_interrupt_enable(level);
  888. }
  889. void lwp_wait_subthread_exit(void)
  890. {
  891. rt_base_t level;
  892. struct rt_lwp *lwp;
  893. rt_thread_t thread;
  894. rt_thread_t main_thread;
  895. lwp = lwp_self();
  896. if (!lwp)
  897. {
  898. return;
  899. }
  900. thread = rt_thread_self();
  901. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  902. if (thread != main_thread)
  903. {
  904. return;
  905. }
  906. while (1)
  907. {
  908. int subthread_is_terminated;
  909. LOG_D("%s: wait for subthread exiting", __func__);
  910. level = rt_hw_interrupt_disable();
  911. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  912. if (!subthread_is_terminated)
  913. {
  914. rt_thread_t sub_thread;
  915. rt_list_t *list;
  916. int all_subthread_in_init = 1;
  917. /* check all subthread is in init state */
  918. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  919. {
  920. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  921. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  922. {
  923. all_subthread_in_init = 0;
  924. break;
  925. }
  926. }
  927. if (all_subthread_in_init)
  928. {
  929. /* delete all subthread */
  930. while ((list = thread->sibling.prev) != &lwp->t_grp)
  931. {
  932. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  933. rt_list_remove(&sub_thread->sibling);
  934. rt_thread_delete(sub_thread);
  935. }
  936. subthread_is_terminated = 1;
  937. }
  938. }
  939. rt_hw_interrupt_enable(level);
  940. if (subthread_is_terminated)
  941. {
  942. break;
  943. }
  944. rt_thread_mdelay(10);
  945. }
  946. }
  947. static int _lwp_setaffinity(pid_t pid, int cpu)
  948. {
  949. struct rt_lwp *lwp;
  950. int ret = -1;
  951. lwp = lwp_from_pid(pid);
  952. if (lwp)
  953. {
  954. #ifdef RT_USING_SMP
  955. rt_list_t *list;
  956. lwp->bind_cpu = cpu;
  957. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  958. {
  959. rt_thread_t thread;
  960. thread = rt_list_entry(list, struct rt_thread, sibling);
  961. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  962. }
  963. #endif
  964. ret = 0;
  965. }
  966. return ret;
  967. }
  968. int lwp_setaffinity(pid_t pid, int cpu)
  969. {
  970. rt_base_t level;
  971. int ret;
  972. #ifdef RT_USING_SMP
  973. if (cpu < 0 || cpu > RT_CPUS_NR)
  974. {
  975. cpu = RT_CPUS_NR;
  976. }
  977. #endif
  978. level = rt_hw_interrupt_disable();
  979. ret = _lwp_setaffinity(pid, cpu);
  980. rt_hw_interrupt_enable(level);
  981. return ret;
  982. }
  983. #ifdef RT_USING_SMP
  984. static void cmd_cpu_bind(int argc, char** argv)
  985. {
  986. int pid;
  987. int cpu;
  988. if (argc < 3)
  989. {
  990. rt_kprintf("Useage: cpu_bind pid cpu\n");
  991. return;
  992. }
  993. pid = atoi(argv[1]);
  994. cpu = atoi(argv[2]);
  995. lwp_setaffinity((pid_t)pid, cpu);
  996. }
  997. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  998. #endif