lwp_pid.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_file.h>
  16. #include <unistd.h>
  17. #include <stdio.h> /* rename() */
  18. #include <sys/stat.h>
  19. #include <sys/statfs.h> /* statfs() */
  20. #include "lwp.h"
  21. #include "lwp_pid.h"
  22. #include "tty.h"
  23. #ifdef ARCH_MM_MMU
  24. #include "lwp_user_mm.h"
  25. #endif
  26. #define DBG_TAG "LWP_PID"
  27. #define DBG_LVL DBG_INFO
  28. #include <rtdbg.h>
  29. #define PID_MAX 10000
  30. #define PID_CT_ASSERT(name, x) \
  31. struct assert_##name {char ary[2 * (x) - 1];}
  32. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  33. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  34. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  35. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  36. static int lwp_pid_ary_alloced = 0;
  37. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  38. static pid_t current_pid = 0;
  39. struct lwp_avl_struct *lwp_get_pid_ary(void)
  40. {
  41. return lwp_pid_ary;
  42. }
  43. static pid_t lwp_pid_get(void)
  44. {
  45. rt_base_t level;
  46. struct lwp_avl_struct *p;
  47. pid_t pid = 0;
  48. level = rt_hw_interrupt_disable();
  49. p = lwp_pid_free_head;
  50. if (p)
  51. {
  52. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  53. }
  54. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  55. {
  56. p = lwp_pid_ary + lwp_pid_ary_alloced;
  57. lwp_pid_ary_alloced++;
  58. }
  59. if (p)
  60. {
  61. int found_noused = 0;
  62. RT_ASSERT(p->data == RT_NULL);
  63. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  64. {
  65. if (!lwp_avl_find(pid, lwp_pid_root))
  66. {
  67. found_noused = 1;
  68. break;
  69. }
  70. }
  71. if (!found_noused)
  72. {
  73. for (pid = 1; pid <= current_pid; pid++)
  74. {
  75. if (!lwp_avl_find(pid, lwp_pid_root))
  76. {
  77. found_noused = 1;
  78. break;
  79. }
  80. }
  81. }
  82. p->avl_key = pid;
  83. lwp_avl_insert(p, &lwp_pid_root);
  84. current_pid = pid;
  85. }
  86. rt_hw_interrupt_enable(level);
  87. return pid;
  88. }
  89. static void lwp_pid_put(pid_t pid)
  90. {
  91. rt_base_t level;
  92. struct lwp_avl_struct *p;
  93. level = rt_hw_interrupt_disable();
  94. p = lwp_avl_find(pid, lwp_pid_root);
  95. if (p)
  96. {
  97. p->data = RT_NULL;
  98. lwp_avl_remove(p, &lwp_pid_root);
  99. p->avl_right = lwp_pid_free_head;
  100. lwp_pid_free_head = p;
  101. }
  102. rt_hw_interrupt_enable(level);
  103. }
  104. static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
  105. {
  106. rt_base_t level;
  107. struct lwp_avl_struct *p;
  108. level = rt_hw_interrupt_disable();
  109. p = lwp_avl_find(pid, lwp_pid_root);
  110. if (p)
  111. {
  112. p->data = lwp;
  113. }
  114. rt_hw_interrupt_enable(level);
  115. }
  116. static void __exit_files(struct rt_lwp *lwp)
  117. {
  118. int fd = lwp->fdt.maxfd - 1;
  119. while (fd >= 0)
  120. {
  121. struct dfs_file *d;
  122. d = lwp->fdt.fds[fd];
  123. if (d)
  124. {
  125. dfs_file_close(d);
  126. fdt_fd_release(&lwp->fdt, fd);
  127. }
  128. fd--;
  129. }
  130. }
  131. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  132. {
  133. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  134. }
  135. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  136. {
  137. rt_mutex_detach(&lwp->object_mutex);
  138. }
  139. void lwp_user_object_lock(struct rt_lwp *lwp)
  140. {
  141. if (lwp)
  142. {
  143. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  144. }
  145. else
  146. {
  147. RT_ASSERT(0);
  148. }
  149. }
  150. void lwp_user_object_unlock(struct rt_lwp *lwp)
  151. {
  152. if (lwp)
  153. {
  154. rt_mutex_release(&lwp->object_mutex);
  155. }
  156. else
  157. {
  158. RT_ASSERT(0);
  159. }
  160. }
  161. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  162. {
  163. int ret = -1;
  164. if (lwp && object)
  165. {
  166. lwp_user_object_lock(lwp);
  167. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  168. {
  169. struct lwp_avl_struct *node;
  170. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  171. if (node)
  172. {
  173. rt_base_t level;
  174. level = rt_hw_interrupt_disable();
  175. object->lwp_ref_count++;
  176. rt_hw_interrupt_enable(level);
  177. node->avl_key = (avl_key_t)object;
  178. lwp_avl_insert(node, &lwp->object_root);
  179. ret = 0;
  180. }
  181. }
  182. lwp_user_object_unlock(lwp);
  183. }
  184. return ret;
  185. }
  186. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  187. {
  188. rt_err_t ret = -1;
  189. rt_object_t object;
  190. if (!lwp || !node)
  191. {
  192. return ret;
  193. }
  194. object = (rt_object_t)node->avl_key;
  195. object->lwp_ref_count--;
  196. if (object->lwp_ref_count == 0)
  197. {
  198. /* remove from kernel object list */
  199. switch (object->type)
  200. {
  201. case RT_Object_Class_Semaphore:
  202. ret = rt_sem_delete((rt_sem_t)object);
  203. break;
  204. case RT_Object_Class_Mutex:
  205. ret = rt_mutex_delete((rt_mutex_t)object);
  206. break;
  207. case RT_Object_Class_Event:
  208. ret = rt_event_delete((rt_event_t)object);
  209. break;
  210. case RT_Object_Class_MailBox:
  211. ret = rt_mb_delete((rt_mailbox_t)object);
  212. break;
  213. case RT_Object_Class_MessageQueue:
  214. ret = rt_mq_delete((rt_mq_t)object);
  215. break;
  216. case RT_Object_Class_Timer:
  217. ret = rt_timer_delete((rt_timer_t)object);
  218. break;
  219. case RT_Object_Class_Custom:
  220. ret = rt_custom_object_destroy(object);
  221. break;
  222. default:
  223. LOG_E("input object type(%d) error", object->type);
  224. break;
  225. }
  226. }
  227. else
  228. {
  229. ret = 0;
  230. }
  231. lwp_avl_remove(node, &lwp->object_root);
  232. rt_free(node);
  233. return ret;
  234. }
  235. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  236. {
  237. rt_err_t ret = -1;
  238. if (lwp && object)
  239. {
  240. struct lwp_avl_struct *node;
  241. lwp_user_object_lock(lwp);
  242. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  243. ret = _object_node_delete(lwp, node);
  244. lwp_user_object_unlock(lwp);
  245. }
  246. return ret;
  247. }
  248. void lwp_user_object_clear(struct rt_lwp *lwp)
  249. {
  250. struct lwp_avl_struct *node;
  251. lwp_user_object_lock(lwp);
  252. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  253. {
  254. _object_node_delete(lwp, node);
  255. }
  256. lwp_user_object_unlock(lwp);
  257. }
  258. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  259. {
  260. rt_object_t object;
  261. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  262. object = (rt_object_t)node->avl_key;
  263. lwp_user_object_add(dst_lwp, object);
  264. return 0;
  265. }
  266. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  267. {
  268. lwp_user_object_lock(src_lwp);
  269. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  270. lwp_user_object_unlock(src_lwp);
  271. }
  272. struct rt_lwp* lwp_new(void)
  273. {
  274. pid_t pid;
  275. rt_base_t level;
  276. struct rt_lwp* lwp = RT_NULL;
  277. lwp = (struct rt_lwp *)rt_malloc(sizeof(struct rt_lwp));
  278. if (lwp == RT_NULL)
  279. {
  280. return lwp;
  281. }
  282. rt_memset(lwp, 0, sizeof(*lwp));
  283. //lwp->tgroup_leader = RT_NULL;
  284. rt_list_init(&lwp->wait_list);
  285. lwp->leader = 0;
  286. lwp->session = -1;
  287. lwp->tty = RT_NULL;
  288. rt_list_init(&lwp->t_grp);
  289. lwp_user_object_lock_init(lwp);
  290. lwp->address_search_head = RT_NULL;
  291. rt_wqueue_init(&lwp->wait_queue);
  292. lwp->ref = 1;
  293. level = rt_hw_interrupt_disable();
  294. pid = lwp_pid_get();
  295. if (pid == 0)
  296. {
  297. lwp_user_object_lock_destroy(lwp);
  298. rt_free(lwp);
  299. lwp = RT_NULL;
  300. LOG_E("pid slot fulled!\n");
  301. goto out;
  302. }
  303. lwp->pid = pid;
  304. lwp_pid_set_lwp(pid, lwp);
  305. #ifdef LWP_ENABLE_ASID
  306. lwp->generation = 0;
  307. lwp->asid = 0;
  308. #endif
  309. out:
  310. rt_hw_interrupt_enable(level);
  311. return lwp;
  312. }
  313. void lwp_free(struct rt_lwp* lwp)
  314. {
  315. rt_base_t level;
  316. if (lwp == RT_NULL)
  317. {
  318. return;
  319. }
  320. LOG_D("lwp free: %p\n", lwp);
  321. level = rt_hw_interrupt_disable();
  322. lwp->finish = 1;
  323. rt_hw_interrupt_enable(level);
  324. if (lwp->args != RT_NULL)
  325. {
  326. #ifndef ARCH_MM_MMU
  327. lwp->args_length = RT_NULL;
  328. #ifndef ARCH_MM_MPU
  329. rt_free(lwp->args);
  330. #endif /* not defined ARCH_MM_MPU */
  331. #endif /* ARCH_MM_MMU */
  332. lwp->args = RT_NULL;
  333. }
  334. if (lwp->fdt.fds != RT_NULL)
  335. {
  336. /* auto clean fds */
  337. __exit_files(lwp);
  338. rt_free(lwp->fdt.fds);
  339. lwp->fdt.fds = RT_NULL;
  340. }
  341. lwp_user_object_clear(lwp);
  342. lwp_user_object_lock_destroy(lwp);
  343. /* free data section */
  344. if (lwp->data_entry != RT_NULL)
  345. {
  346. #ifdef ARCH_MM_MMU
  347. rt_free_align(lwp->data_entry);
  348. #else
  349. #ifdef ARCH_MM_MPU
  350. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  351. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  352. #else
  353. rt_free_align(lwp->data_entry);
  354. #endif /* ARCH_MM_MPU */
  355. #endif /* ARCH_MM_MMU */
  356. lwp->data_entry = RT_NULL;
  357. }
  358. /* free text section */
  359. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  360. {
  361. if (lwp->text_entry)
  362. {
  363. LOG_D("lwp text free: %p", lwp->text_entry);
  364. #ifndef ARCH_MM_MMU
  365. rt_free((void*)lwp->text_entry);
  366. #endif /* not defined ARCH_MM_MMU */
  367. lwp->text_entry = RT_NULL;
  368. }
  369. }
  370. #ifdef ARCH_MM_MMU
  371. lwp_unmap_user_space(lwp);
  372. #endif
  373. level = rt_hw_interrupt_disable();
  374. /* for children */
  375. while (lwp->first_child)
  376. {
  377. struct rt_lwp *child;
  378. child = lwp->first_child;
  379. lwp->first_child = child->sibling;
  380. if (child->finish)
  381. {
  382. lwp_pid_put(lwp_to_pid(child));
  383. rt_hw_interrupt_enable(level);
  384. rt_free(child);
  385. level = rt_hw_interrupt_disable();
  386. }
  387. else
  388. {
  389. child->sibling = RT_NULL;
  390. child->parent = RT_NULL;
  391. }
  392. }
  393. rt_hw_interrupt_enable(level);
  394. if (!lwp->background)
  395. {
  396. struct termios *old_stdin_termios = get_old_termios();
  397. struct rt_lwp *old_lwp = NULL;
  398. if (lwp->session == -1)
  399. {
  400. tcsetattr(1, 0, old_stdin_termios);
  401. }
  402. level = rt_hw_interrupt_disable();
  403. if (lwp->tty != RT_NULL)
  404. {
  405. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  406. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  407. rt_mutex_release(&lwp->tty->lock);
  408. if (lwp->tty->foreground == lwp)
  409. {
  410. lwp->tty->foreground = old_lwp;
  411. lwp->tty = RT_NULL;
  412. }
  413. }
  414. }
  415. else
  416. {
  417. level = rt_hw_interrupt_disable();
  418. }
  419. /* for parent */
  420. {
  421. if (lwp->parent)
  422. {
  423. struct rt_thread *thread;
  424. if (!rt_list_isempty(&lwp->wait_list))
  425. {
  426. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  427. thread->error = RT_EOK;
  428. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  429. rt_thread_resume(thread);
  430. rt_hw_interrupt_enable(level);
  431. return;
  432. }
  433. else
  434. {
  435. struct rt_lwp **it = &lwp->parent->first_child;
  436. while (*it != lwp)
  437. {
  438. it = &(*it)->sibling;
  439. }
  440. *it = lwp->sibling;
  441. }
  442. }
  443. lwp_pid_put(lwp_to_pid(lwp));
  444. rt_hw_interrupt_enable(level);
  445. rt_free(lwp);
  446. }
  447. }
  448. int lwp_ref_inc(struct rt_lwp *lwp)
  449. {
  450. rt_base_t level;
  451. level = rt_hw_interrupt_disable();
  452. lwp->ref++;
  453. rt_hw_interrupt_enable(level);
  454. return 0;
  455. }
  456. int lwp_ref_dec(struct rt_lwp *lwp)
  457. {
  458. rt_base_t level;
  459. int ref = -1;
  460. level = rt_hw_interrupt_disable();
  461. if (lwp->ref)
  462. {
  463. lwp->ref--;
  464. ref = lwp->ref;
  465. }
  466. rt_hw_interrupt_enable(level);
  467. if (!ref)
  468. {
  469. struct rt_channel_msg msg;
  470. if (lwp->debug)
  471. {
  472. memset(&msg, 0, sizeof msg);
  473. rt_raw_channel_send(gdb_server_channel(), &msg);
  474. }
  475. #ifndef ARCH_MM_MMU
  476. #ifdef RT_LWP_USING_SHM
  477. lwp_shm_lwp_free(lwp);
  478. #endif /* RT_LWP_USING_SHM */
  479. #endif /* not defined ARCH_MM_MMU */
  480. lwp_free(lwp);
  481. return 0;
  482. }
  483. return -1;
  484. }
  485. struct rt_lwp* lwp_from_pid(pid_t pid)
  486. {
  487. rt_base_t level;
  488. struct lwp_avl_struct *p;
  489. struct rt_lwp *lwp = RT_NULL;
  490. level = rt_hw_interrupt_disable();
  491. p = lwp_avl_find(pid, lwp_pid_root);
  492. if (p)
  493. {
  494. lwp = (struct rt_lwp *)p->data;
  495. }
  496. rt_hw_interrupt_enable(level);
  497. return lwp;
  498. }
  499. pid_t lwp_to_pid(struct rt_lwp* lwp)
  500. {
  501. if (!lwp)
  502. {
  503. return 0;
  504. }
  505. return lwp->pid;
  506. }
  507. char* lwp_pid2name(int32_t pid)
  508. {
  509. struct rt_lwp *lwp;
  510. char* process_name = RT_NULL;
  511. lwp = lwp_from_pid(pid);
  512. if (lwp)
  513. {
  514. process_name = strrchr(lwp->cmd, '/');
  515. process_name = process_name? process_name + 1: lwp->cmd;
  516. }
  517. return process_name;
  518. }
  519. pid_t lwp_name2pid(const char *name)
  520. {
  521. int idx;
  522. pid_t pid = 0;
  523. rt_thread_t main_thread;
  524. char* process_name = RT_NULL;
  525. rt_base_t level;
  526. level = rt_hw_interrupt_disable();
  527. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  528. {
  529. /* 0 is reserved */
  530. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  531. if (lwp)
  532. {
  533. process_name = strrchr(lwp->cmd, '/');
  534. process_name = process_name? process_name + 1: lwp->cmd;
  535. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  536. {
  537. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  538. if (!(main_thread->stat & RT_THREAD_CLOSE))
  539. {
  540. pid = lwp->pid;
  541. }
  542. }
  543. }
  544. }
  545. rt_hw_interrupt_enable(level);
  546. return pid;
  547. }
  548. int lwp_getpid(void)
  549. {
  550. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  551. }
  552. pid_t waitpid(pid_t pid, int *status, int options)
  553. {
  554. pid_t ret = -1;
  555. rt_base_t level;
  556. struct rt_thread *thread;
  557. struct rt_lwp *lwp;
  558. struct rt_lwp *lwp_self;
  559. level = rt_hw_interrupt_disable();
  560. lwp = lwp_from_pid(pid);
  561. if (!lwp)
  562. {
  563. goto quit;
  564. }
  565. lwp_self = (struct rt_lwp *)rt_thread_self()->lwp;
  566. if (!lwp_self)
  567. {
  568. goto quit;
  569. }
  570. if (lwp->parent != lwp_self)
  571. {
  572. goto quit;
  573. }
  574. if (lwp->finish)
  575. {
  576. ret = pid;
  577. }
  578. else
  579. {
  580. if (!rt_list_isempty(&lwp->wait_list))
  581. {
  582. goto quit;
  583. }
  584. thread = rt_thread_self();
  585. rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
  586. rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
  587. rt_schedule();
  588. if (thread->error == RT_EOK)
  589. {
  590. ret = pid;
  591. }
  592. }
  593. if (ret != -1)
  594. {
  595. /* delete from sibling list of its parent */
  596. struct rt_lwp **lwp_node;
  597. *status = lwp->lwp_ret;
  598. lwp_node = &lwp_self->first_child;
  599. while (*lwp_node != lwp)
  600. {
  601. RT_ASSERT(*lwp_node != RT_NULL);
  602. lwp_node = &(*lwp_node)->sibling;
  603. }
  604. (*lwp_node) = lwp->sibling;
  605. lwp->parent = RT_NULL;
  606. }
  607. quit:
  608. rt_hw_interrupt_enable(level);
  609. return ret;
  610. }
  611. #ifdef RT_USING_FINSH
  612. /* copy from components/finsh/cmd.c */
  613. static void object_split(int len)
  614. {
  615. while (len--)
  616. {
  617. rt_kprintf("-");
  618. }
  619. }
  620. static void print_thread_info(struct rt_thread* thread, int maxlen)
  621. {
  622. rt_uint8_t *ptr;
  623. rt_uint8_t stat;
  624. #ifdef RT_USING_SMP
  625. if (thread->oncpu != RT_CPU_DETACHED)
  626. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
  627. else
  628. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  629. #else
  630. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  631. #endif /*RT_USING_SMP*/
  632. stat = (thread->stat & RT_THREAD_STAT_MASK);
  633. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  634. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  635. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  636. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  637. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  638. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  639. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  640. while (*ptr == '#')ptr--;
  641. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  642. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  643. thread->stack_size,
  644. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  645. thread->remaining_tick,
  646. thread->error);
  647. #else
  648. ptr = (rt_uint8_t *)thread->stack_addr;
  649. while (*ptr == '#')ptr++;
  650. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  651. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  652. thread->stack_size,
  653. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  654. / thread->stack_size,
  655. thread->remaining_tick,
  656. thread->error);
  657. #endif
  658. }
  659. long list_process(void)
  660. {
  661. int index;
  662. int maxlen;
  663. rt_ubase_t level;
  664. struct rt_thread *thread;
  665. struct rt_list_node *node, *list;
  666. const char *item_title = "thread";
  667. int count = 0;
  668. struct rt_thread **threads;
  669. maxlen = RT_NAME_MAX;
  670. #ifdef RT_USING_SMP
  671. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  672. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  673. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  674. #else
  675. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  676. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  677. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  678. #endif /*RT_USING_SMP*/
  679. count = rt_object_get_length(RT_Object_Class_Thread);
  680. if (count > 0)
  681. {
  682. /* get thread pointers */
  683. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  684. if (threads)
  685. {
  686. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  687. if (index > 0)
  688. {
  689. for (index = 0; index <count; index++)
  690. {
  691. struct rt_thread th;
  692. thread = threads[index];
  693. level = rt_hw_interrupt_disable();
  694. if ((thread->parent.type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  695. {
  696. rt_hw_interrupt_enable(level);
  697. continue;
  698. }
  699. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  700. rt_hw_interrupt_enable(level);
  701. if (th.lwp == RT_NULL)
  702. {
  703. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  704. print_thread_info(&th, maxlen);
  705. }
  706. }
  707. }
  708. rt_free(threads);
  709. }
  710. }
  711. for (index = 0; index < RT_LWP_MAX_NR; index++)
  712. {
  713. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  714. if (lwp)
  715. {
  716. list = &lwp->t_grp;
  717. for (node = list->next; node != list; node = node->next)
  718. {
  719. thread = rt_list_entry(node, struct rt_thread, sibling);
  720. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  721. print_thread_info(thread, maxlen);
  722. }
  723. }
  724. }
  725. return 0;
  726. }
  727. MSH_CMD_EXPORT(list_process, list process);
  728. static void cmd_kill(int argc, char** argv)
  729. {
  730. int pid;
  731. int sig = 0;
  732. if (argc < 2)
  733. {
  734. rt_kprintf("kill pid or kill pid -s signal\n");
  735. return;
  736. }
  737. pid = atoi(argv[1]);
  738. if (argc >= 4)
  739. {
  740. if (argv[2][0] == '-' && argv[2][1] == 's')
  741. {
  742. sig = atoi(argv[3]);
  743. }
  744. }
  745. lwp_kill(pid, sig);
  746. }
  747. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  748. static void cmd_killall(int argc, char** argv)
  749. {
  750. int pid;
  751. if (argc < 2)
  752. {
  753. rt_kprintf("killall processes_name\n");
  754. return;
  755. }
  756. while((pid = lwp_name2pid(argv[1])) > 0)
  757. {
  758. lwp_kill(pid, SIGKILL);
  759. rt_thread_mdelay(100);
  760. }
  761. }
  762. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  763. #endif
  764. int lwp_check_exit_request(void)
  765. {
  766. rt_thread_t thread = rt_thread_self();
  767. if (!thread->lwp)
  768. {
  769. return 0;
  770. }
  771. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  772. {
  773. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  774. return 1;
  775. }
  776. return 0;
  777. }
  778. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  779. {
  780. int found = 0;
  781. rt_base_t level;
  782. rt_list_t *list;
  783. level = rt_hw_interrupt_disable();
  784. list = lwp->t_grp.next;
  785. while (list != &lwp->t_grp)
  786. {
  787. rt_thread_t iter_thread;
  788. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  789. if (thread == iter_thread)
  790. {
  791. found = 1;
  792. break;
  793. }
  794. list = list->next;
  795. }
  796. rt_hw_interrupt_enable(level);
  797. return found;
  798. }
  799. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  800. {
  801. rt_thread_t main_thread;
  802. rt_base_t level;
  803. rt_list_t *list;
  804. struct rt_lwp *lwp;
  805. lwp = lwp_self();
  806. if ((!thread_to_exit) || (!lwp))
  807. {
  808. return;
  809. }
  810. level = rt_hw_interrupt_disable();
  811. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  812. if (thread_to_exit == main_thread)
  813. {
  814. goto finish;
  815. }
  816. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  817. {
  818. goto finish;
  819. }
  820. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  821. {
  822. rt_thread_t thread;
  823. thread = rt_list_entry(list, struct rt_thread, sibling);
  824. if (thread != thread_to_exit)
  825. {
  826. continue;
  827. }
  828. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  829. {
  830. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  831. }
  832. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  833. {
  834. thread->error = -RT_EINTR;
  835. rt_hw_dsb();
  836. rt_thread_wakeup(thread);
  837. }
  838. break;
  839. }
  840. while (found_thread(lwp, thread_to_exit))
  841. {
  842. rt_thread_mdelay(10);
  843. }
  844. finish:
  845. rt_hw_interrupt_enable(level);
  846. return;
  847. }
  848. void lwp_terminate(struct rt_lwp *lwp)
  849. {
  850. rt_base_t level;
  851. rt_list_t *list;
  852. if (!lwp)
  853. {
  854. /* kernel thread not support */
  855. return;
  856. }
  857. level = rt_hw_interrupt_disable();
  858. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  859. {
  860. rt_thread_t thread;
  861. thread = rt_list_entry(list, struct rt_thread, sibling);
  862. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  863. {
  864. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  865. }
  866. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  867. {
  868. thread->error = RT_EINTR;
  869. rt_hw_dsb();
  870. rt_thread_wakeup(thread);
  871. }
  872. }
  873. rt_hw_interrupt_enable(level);
  874. }
  875. void lwp_wait_subthread_exit(void)
  876. {
  877. rt_base_t level;
  878. struct rt_lwp *lwp;
  879. rt_thread_t thread;
  880. rt_thread_t main_thread;
  881. lwp = lwp_self();
  882. if (!lwp)
  883. {
  884. return;
  885. }
  886. thread = rt_thread_self();
  887. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  888. if (thread != main_thread)
  889. {
  890. return;
  891. }
  892. while (1)
  893. {
  894. int subthread_is_terminated;
  895. level = rt_hw_interrupt_disable();
  896. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  897. if (!subthread_is_terminated)
  898. {
  899. rt_thread_t sub_thread;
  900. rt_list_t *list;
  901. int all_subthread_in_init = 1;
  902. /* check all subthread is in init state */
  903. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  904. {
  905. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  906. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  907. {
  908. all_subthread_in_init = 0;
  909. break;
  910. }
  911. }
  912. if (all_subthread_in_init)
  913. {
  914. /* delete all subthread */
  915. while ((list = thread->sibling.prev) != &lwp->t_grp)
  916. {
  917. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  918. rt_list_remove(&sub_thread->sibling);
  919. rt_thread_delete(sub_thread);
  920. }
  921. subthread_is_terminated = 1;
  922. }
  923. }
  924. rt_hw_interrupt_enable(level);
  925. if (subthread_is_terminated)
  926. {
  927. break;
  928. }
  929. rt_thread_mdelay(10);
  930. }
  931. }
  932. static int _lwp_setaffinity(pid_t pid, int cpu)
  933. {
  934. struct rt_lwp *lwp;
  935. int ret = -1;
  936. lwp = lwp_from_pid(pid);
  937. if (lwp)
  938. {
  939. #ifdef RT_USING_SMP
  940. rt_list_t *list;
  941. lwp->bind_cpu = cpu;
  942. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  943. {
  944. rt_thread_t thread;
  945. thread = rt_list_entry(list, struct rt_thread, sibling);
  946. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  947. }
  948. #endif
  949. ret = 0;
  950. }
  951. return ret;
  952. }
  953. int lwp_setaffinity(pid_t pid, int cpu)
  954. {
  955. rt_base_t level;
  956. int ret;
  957. #ifdef RT_USING_SMP
  958. if (cpu < 0 || cpu > RT_CPUS_NR)
  959. {
  960. cpu = RT_CPUS_NR;
  961. }
  962. #endif
  963. level = rt_hw_interrupt_disable();
  964. ret = _lwp_setaffinity(pid, cpu);
  965. rt_hw_interrupt_enable(level);
  966. return ret;
  967. }
  968. #ifdef RT_USING_SMP
  969. static void cmd_cpu_bind(int argc, char** argv)
  970. {
  971. int pid;
  972. int cpu;
  973. if (argc < 3)
  974. {
  975. rt_kprintf("Useage: cpu_bind pid cpu\n");
  976. return;
  977. }
  978. pid = atoi(argv[1]);
  979. cpu = atoi(argv[2]);
  980. lwp_setaffinity((pid_t)pid, cpu);
  981. }
  982. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  983. #endif