lwp_pid.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <dfs_posix.h>
  14. #include "lwp.h"
  15. #include "lwp_pid.h"
  16. #include "tty.h"
  17. #ifdef RT_USING_USERSPACE
  18. #include "lwp_user_mm.h"
  19. #endif
  20. #define DBG_TAG "LWP_PID"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #define PID_MAX 10000
  24. #define PID_CT_ASSERT(name, x) \
  25. struct assert_##name {char ary[2 * (x) - 1];}
  26. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  27. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  28. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  29. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  30. static int lwp_pid_ary_alloced = 0;
  31. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  32. static pid_t current_pid = 0;
  33. struct lwp_avl_struct *lwp_get_pid_ary(void)
  34. {
  35. return lwp_pid_ary;
  36. }
  37. static pid_t lwp_pid_get(void)
  38. {
  39. rt_base_t level;
  40. struct lwp_avl_struct *p;
  41. pid_t pid = 0;
  42. level = rt_hw_interrupt_disable();
  43. p = lwp_pid_free_head;
  44. if (p)
  45. {
  46. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  47. }
  48. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  49. {
  50. p = lwp_pid_ary + lwp_pid_ary_alloced;
  51. lwp_pid_ary_alloced++;
  52. }
  53. if (p)
  54. {
  55. int found_noused = 0;
  56. RT_ASSERT(p->data == RT_NULL);
  57. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  58. {
  59. if (!lwp_avl_find(pid, lwp_pid_root))
  60. {
  61. found_noused = 1;
  62. break;
  63. }
  64. }
  65. if (!found_noused)
  66. {
  67. for (pid = 1; pid <= current_pid; pid++)
  68. {
  69. if (!lwp_avl_find(pid, lwp_pid_root))
  70. {
  71. found_noused = 1;
  72. break;
  73. }
  74. }
  75. }
  76. p->avl_key = pid;
  77. lwp_avl_insert(p, &lwp_pid_root);
  78. current_pid = pid;
  79. }
  80. rt_hw_interrupt_enable(level);
  81. return pid;
  82. }
  83. static void lwp_pid_put(pid_t pid)
  84. {
  85. rt_base_t level;
  86. struct lwp_avl_struct *p;
  87. level = rt_hw_interrupt_disable();
  88. p = lwp_avl_find(pid, lwp_pid_root);
  89. if (p)
  90. {
  91. p->data = RT_NULL;
  92. lwp_avl_remove(p, &lwp_pid_root);
  93. p->avl_right = lwp_pid_free_head;
  94. lwp_pid_free_head = p;
  95. }
  96. rt_hw_interrupt_enable(level);
  97. }
  98. static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
  99. {
  100. rt_base_t level;
  101. struct lwp_avl_struct *p;
  102. level = rt_hw_interrupt_disable();
  103. p = lwp_avl_find(pid, lwp_pid_root);
  104. if (p)
  105. {
  106. p->data = lwp;
  107. }
  108. rt_hw_interrupt_enable(level);
  109. }
  110. static void __exit_files(struct rt_lwp *lwp)
  111. {
  112. int fd = lwp->fdt.maxfd - 1;
  113. while (fd >= 0)
  114. {
  115. struct dfs_fd *d;
  116. d = lwp->fdt.fds[fd];
  117. if (d)
  118. {
  119. dfs_file_close(d);
  120. fdt_fd_release(&lwp->fdt, fd);
  121. }
  122. fd--;
  123. }
  124. }
  125. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  126. {
  127. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  128. }
  129. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  130. {
  131. rt_mutex_detach(&lwp->object_mutex);
  132. }
  133. void lwp_user_object_lock(struct rt_lwp *lwp)
  134. {
  135. if (lwp)
  136. {
  137. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  138. }
  139. else
  140. {
  141. RT_ASSERT(0);
  142. }
  143. }
  144. void lwp_user_object_unlock(struct rt_lwp *lwp)
  145. {
  146. if (lwp)
  147. {
  148. rt_mutex_release(&lwp->object_mutex);
  149. }
  150. else
  151. {
  152. RT_ASSERT(0);
  153. }
  154. }
  155. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  156. {
  157. int ret = -1;
  158. if (lwp && object)
  159. {
  160. lwp_user_object_lock(lwp);
  161. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  162. {
  163. struct lwp_avl_struct *node;
  164. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  165. if (node)
  166. {
  167. rt_base_t level;
  168. level = rt_hw_interrupt_disable();
  169. object->lwp_ref_count++;
  170. rt_hw_interrupt_enable(level);
  171. node->avl_key = (avl_key_t)object;
  172. lwp_avl_insert(node, &lwp->object_root);
  173. ret = 0;
  174. }
  175. }
  176. lwp_user_object_unlock(lwp);
  177. }
  178. return ret;
  179. }
  180. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  181. {
  182. rt_err_t ret = -1;
  183. rt_object_t object;
  184. if (!lwp || !node)
  185. {
  186. return ret;
  187. }
  188. object = (rt_object_t)node->avl_key;
  189. object->lwp_ref_count--;
  190. if (object->lwp_ref_count == 0)
  191. {
  192. /* remove from kernel object list */
  193. switch (object->type)
  194. {
  195. case RT_Object_Class_Semaphore:
  196. ret = rt_sem_delete((rt_sem_t)object);
  197. break;
  198. case RT_Object_Class_Mutex:
  199. ret = rt_mutex_delete((rt_mutex_t)object);
  200. break;
  201. case RT_Object_Class_Event:
  202. ret = rt_event_delete((rt_event_t)object);
  203. break;
  204. case RT_Object_Class_MailBox:
  205. ret = rt_mb_delete((rt_mailbox_t)object);
  206. break;
  207. case RT_Object_Class_MessageQueue:
  208. ret = rt_mq_delete((rt_mq_t)object);
  209. break;
  210. case RT_Object_Class_Timer:
  211. ret = rt_timer_delete((rt_timer_t)object);
  212. break;
  213. case RT_Object_Class_Custom:
  214. ret = rt_custom_object_destroy(object);
  215. break;
  216. default:
  217. LOG_E("input object type(%d) error", object->type);
  218. break;
  219. }
  220. }
  221. else
  222. {
  223. ret = 0;
  224. }
  225. lwp_avl_remove(node, &lwp->object_root);
  226. rt_free(node);
  227. return ret;
  228. }
  229. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  230. {
  231. rt_err_t ret = -1;
  232. if (lwp && object)
  233. {
  234. struct lwp_avl_struct *node;
  235. lwp_user_object_lock(lwp);
  236. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  237. ret = _object_node_delete(lwp, node);
  238. lwp_user_object_unlock(lwp);
  239. }
  240. return ret;
  241. }
  242. void lwp_user_object_clear(struct rt_lwp *lwp)
  243. {
  244. struct lwp_avl_struct *node;
  245. lwp_user_object_lock(lwp);
  246. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  247. {
  248. _object_node_delete(lwp, node);
  249. }
  250. lwp_user_object_unlock(lwp);
  251. }
  252. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  253. {
  254. rt_object_t object;
  255. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  256. object = (rt_object_t)node->avl_key;
  257. lwp_user_object_add(dst_lwp, object);
  258. return 0;
  259. }
  260. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  261. {
  262. lwp_user_object_lock(src_lwp);
  263. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  264. lwp_user_object_unlock(src_lwp);
  265. }
  266. struct rt_lwp* lwp_new(void)
  267. {
  268. pid_t pid;
  269. rt_base_t level;
  270. struct rt_lwp* lwp = RT_NULL;
  271. level = rt_hw_interrupt_disable();
  272. pid = lwp_pid_get();
  273. if (pid == 0)
  274. {
  275. LOG_E("pid slot fulled!\n");
  276. goto out;
  277. }
  278. lwp = (struct rt_lwp *)rt_malloc(sizeof(struct rt_lwp));
  279. if (lwp == RT_NULL)
  280. {
  281. lwp_pid_put(pid);
  282. LOG_E("no memory for lwp struct!\n");
  283. goto out;
  284. }
  285. rt_memset(lwp, 0, sizeof(*lwp));
  286. rt_list_init(&lwp->wait_list);
  287. lwp->pid = pid;
  288. lwp->leader = 0;
  289. lwp->session = -1;
  290. lwp->tty = RT_NULL;
  291. //lwp->tgroup_leader = RT_NULL;
  292. lwp_pid_set_lwp(pid, lwp);
  293. rt_list_init(&lwp->t_grp);
  294. lwp_user_object_lock_init(lwp);
  295. lwp->address_search_head = RT_NULL;
  296. rt_wqueue_init(&lwp->wait_queue);
  297. lwp->ref = 1;
  298. out:
  299. rt_hw_interrupt_enable(level);
  300. return lwp;
  301. }
  302. void lwp_free(struct rt_lwp* lwp)
  303. {
  304. rt_base_t level;
  305. if (lwp == RT_NULL)
  306. {
  307. return;
  308. }
  309. LOG_D("lwp free: %p\n", lwp);
  310. level = rt_hw_interrupt_disable();
  311. lwp->finish = 1;
  312. if (lwp->args != RT_NULL)
  313. {
  314. #ifndef ARCH_MM_MMU
  315. lwp->args_length = RT_NULL;
  316. #ifndef ARCH_MM_MPU
  317. rt_free(lwp->args);
  318. #endif /* not defined ARCH_MM_MPU */
  319. #endif /* ARCH_MM_MMU */
  320. lwp->args = RT_NULL;
  321. }
  322. if (lwp->fdt.fds != RT_NULL)
  323. {
  324. /* auto clean fds */
  325. __exit_files(lwp);
  326. rt_free(lwp->fdt.fds);
  327. lwp->fdt.fds = RT_NULL;
  328. }
  329. lwp_user_object_clear(lwp);
  330. lwp_user_object_lock_destroy(lwp);
  331. /* free data section */
  332. if (lwp->data_entry != RT_NULL)
  333. {
  334. #ifdef ARCH_MM_MMU
  335. rt_free_align(lwp->data_entry);
  336. #else
  337. #ifdef ARCH_MM_MPU
  338. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  339. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  340. #else
  341. rt_free_align(lwp->data_entry);
  342. #endif /* ARCH_MM_MPU */
  343. #endif /* ARCH_MM_MMU */
  344. lwp->data_entry = RT_NULL;
  345. }
  346. /* free text section */
  347. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  348. {
  349. if (lwp->text_entry)
  350. {
  351. LOG_D("lwp text free: %p", lwp->text_entry);
  352. #ifndef ARCH_MM_MMU
  353. rt_free((void*)lwp->text_entry);
  354. #endif /* not defined ARCH_MM_MMU */
  355. lwp->text_entry = RT_NULL;
  356. }
  357. }
  358. #ifdef RT_USING_USERSPACE
  359. lwp_unmap_user_space(lwp);
  360. #endif
  361. /* for children */
  362. while (lwp->first_child)
  363. {
  364. struct rt_lwp *child;
  365. child = lwp->first_child;
  366. lwp->first_child = child->sibling;
  367. if (child->finish)
  368. {
  369. lwp_pid_put(lwp_to_pid(child));
  370. rt_free(child);
  371. }
  372. else
  373. {
  374. child->sibling = RT_NULL;
  375. child->parent = RT_NULL;
  376. }
  377. }
  378. /* for parent */
  379. {
  380. struct termios *old_stdin_termios = get_old_termios();
  381. struct rt_lwp *self_lwp = (struct rt_lwp *)lwp_self();
  382. if (lwp->session == -1)
  383. {
  384. tcsetattr(1, 0, old_stdin_termios);
  385. }
  386. if (lwp->tty != RT_NULL)
  387. {
  388. if (lwp->tty->foreground == lwp)
  389. {
  390. lwp->tty->foreground = self_lwp;
  391. lwp->tty = RT_NULL;
  392. }
  393. }
  394. if (lwp->parent)
  395. {
  396. struct rt_thread *thread;
  397. if (!rt_list_isempty(&lwp->wait_list))
  398. {
  399. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  400. thread->error = RT_EOK;
  401. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  402. rt_thread_resume(thread);
  403. rt_hw_interrupt_enable(level);
  404. return;
  405. }
  406. else
  407. {
  408. struct rt_lwp **it = &lwp->parent->first_child;
  409. while (*it != lwp)
  410. {
  411. it = &(*it)->sibling;
  412. }
  413. *it = lwp->sibling;
  414. }
  415. }
  416. lwp_pid_put(lwp_to_pid(lwp));
  417. rt_free(lwp);
  418. }
  419. rt_hw_interrupt_enable(level);
  420. }
  421. void lwp_ref_inc(struct rt_lwp *lwp)
  422. {
  423. rt_base_t level;
  424. level = rt_hw_interrupt_disable();
  425. lwp->ref++;
  426. rt_hw_interrupt_enable(level);
  427. }
  428. void lwp_ref_dec(struct rt_lwp *lwp)
  429. {
  430. rt_base_t level;
  431. int ref;
  432. level = rt_hw_interrupt_disable();
  433. if (lwp->ref)
  434. {
  435. lwp->ref--;
  436. ref = lwp->ref;
  437. if (!ref)
  438. {
  439. struct rt_channel_msg msg;
  440. if (lwp->debug)
  441. {
  442. memset(&msg, 0, sizeof msg);
  443. rt_raw_channel_send(gdb_server_channel(), &msg);
  444. }
  445. #ifndef ARCH_MM_MMU
  446. #ifdef RT_LWP_USING_SHM
  447. lwp_shm_lwp_free(lwp);
  448. #endif /* RT_LWP_USING_SHM */
  449. #endif /* not defined ARCH_MM_MMU */
  450. lwp_free(lwp);
  451. }
  452. }
  453. rt_hw_interrupt_enable(level);
  454. }
  455. struct rt_lwp* lwp_from_pid(pid_t pid)
  456. {
  457. rt_base_t level;
  458. struct lwp_avl_struct *p;
  459. struct rt_lwp *lwp = RT_NULL;
  460. level = rt_hw_interrupt_disable();
  461. p = lwp_avl_find(pid, lwp_pid_root);
  462. if (p)
  463. {
  464. lwp = (struct rt_lwp *)p->data;
  465. }
  466. rt_hw_interrupt_enable(level);
  467. return lwp;
  468. }
  469. pid_t lwp_to_pid(struct rt_lwp* lwp)
  470. {
  471. if (!lwp)
  472. {
  473. return 0;
  474. }
  475. return lwp->pid;
  476. }
  477. char* lwp_pid2name(int32_t pid)
  478. {
  479. struct rt_lwp *lwp;
  480. char* process_name = RT_NULL;
  481. lwp = lwp_from_pid(pid);
  482. if (lwp)
  483. {
  484. process_name = strrchr(lwp->cmd, '/');
  485. process_name = process_name? process_name + 1: lwp->cmd;
  486. }
  487. return process_name;
  488. }
  489. pid_t lwp_name2pid(const char *name)
  490. {
  491. int idx;
  492. pid_t pid = 0;
  493. rt_thread_t main_thread;
  494. char* process_name = RT_NULL;
  495. rt_base_t level;
  496. level = rt_hw_interrupt_disable();
  497. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  498. {
  499. /* 0 is reserved */
  500. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  501. if (lwp)
  502. {
  503. process_name = strrchr(lwp->cmd, '/');
  504. process_name = process_name? process_name + 1: lwp->cmd;
  505. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  506. {
  507. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  508. if (!(main_thread->stat & RT_THREAD_CLOSE))
  509. {
  510. pid = lwp->pid;
  511. }
  512. }
  513. }
  514. }
  515. rt_hw_interrupt_enable(level);
  516. return pid;
  517. }
  518. int lwp_getpid(void)
  519. {
  520. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  521. }
  522. pid_t waitpid(pid_t pid, int *status, int options)
  523. {
  524. pid_t ret = -1;
  525. rt_base_t level;
  526. struct rt_thread *thread;
  527. struct rt_lwp *lwp;
  528. struct rt_lwp *lwp_self;
  529. level = rt_hw_interrupt_disable();
  530. lwp = lwp_from_pid(pid);
  531. if (!lwp)
  532. {
  533. goto quit;
  534. }
  535. lwp_self = (struct rt_lwp *)rt_thread_self()->lwp;
  536. if (!lwp_self)
  537. {
  538. goto quit;
  539. }
  540. if (lwp->parent != lwp_self)
  541. {
  542. goto quit;
  543. }
  544. if (lwp->finish)
  545. {
  546. ret = pid;
  547. }
  548. else
  549. {
  550. if (!rt_list_isempty(&lwp->wait_list))
  551. {
  552. goto quit;
  553. }
  554. thread = rt_thread_self();
  555. rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
  556. rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
  557. rt_schedule();
  558. if (thread->error == RT_EOK)
  559. {
  560. ret = pid;
  561. }
  562. }
  563. if (ret != -1)
  564. {
  565. struct rt_lwp **lwp_node;
  566. *status = lwp->lwp_ret;
  567. lwp_node = &lwp_self->first_child;
  568. while (*lwp_node != lwp)
  569. {
  570. RT_ASSERT(*lwp_node != RT_NULL);
  571. lwp_node = &(*lwp_node)->sibling;
  572. }
  573. (*lwp_node) = lwp->sibling;
  574. lwp_pid_put(pid);
  575. rt_free(lwp);
  576. }
  577. quit:
  578. rt_hw_interrupt_enable(level);
  579. return ret;
  580. }
  581. #ifdef RT_USING_FINSH
  582. /* copy from components/finsh/cmd.c */
  583. static void object_split(int len)
  584. {
  585. while (len--)
  586. {
  587. rt_kprintf("-");
  588. }
  589. }
  590. static void print_thread_info(struct rt_thread* thread, int maxlen)
  591. {
  592. rt_uint8_t *ptr;
  593. rt_uint8_t stat;
  594. #ifdef RT_USING_SMP
  595. if (thread->oncpu != RT_CPU_DETACHED)
  596. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->name, thread->oncpu, thread->current_priority);
  597. else
  598. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  599. #else
  600. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  601. #endif /*RT_USING_SMP*/
  602. stat = (thread->stat & RT_THREAD_STAT_MASK);
  603. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  604. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  605. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  606. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  607. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  608. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  609. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  610. while (*ptr == '#')ptr--;
  611. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  612. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  613. thread->stack_size,
  614. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  615. thread->remaining_tick,
  616. thread->error);
  617. #else
  618. ptr = (rt_uint8_t *)thread->stack_addr;
  619. while (*ptr == '#')ptr++;
  620. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  621. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  622. thread->stack_size,
  623. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  624. / thread->stack_size,
  625. thread->remaining_tick,
  626. thread->error);
  627. #endif
  628. }
  629. long list_process(void)
  630. {
  631. int index;
  632. int maxlen;
  633. rt_ubase_t level;
  634. struct rt_thread *thread;
  635. struct rt_list_node *node, *list;
  636. const char *item_title = "thread";
  637. int count = 0;
  638. struct rt_thread **threads;
  639. maxlen = RT_NAME_MAX;
  640. #ifdef RT_USING_SMP
  641. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  642. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  643. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  644. #else
  645. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  646. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  647. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  648. #endif /*RT_USING_SMP*/
  649. count = rt_object_get_length(RT_Object_Class_Thread);
  650. if (count > 0)
  651. {
  652. /* get thread pointers */
  653. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  654. if (threads)
  655. {
  656. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  657. if (index > 0)
  658. {
  659. for (index = 0; index <count; index++)
  660. {
  661. struct rt_thread th;
  662. thread = threads[index];
  663. level = rt_hw_interrupt_disable();
  664. if ((thread->type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  665. {
  666. rt_hw_interrupt_enable(level);
  667. continue;
  668. }
  669. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  670. rt_hw_interrupt_enable(level);
  671. if (th.lwp == RT_NULL)
  672. {
  673. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  674. print_thread_info(&th, maxlen);
  675. }
  676. }
  677. }
  678. rt_free(threads);
  679. }
  680. }
  681. for (index = 0; index < RT_LWP_MAX_NR; index++)
  682. {
  683. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  684. if (lwp)
  685. {
  686. list = &lwp->t_grp;
  687. for (node = list->next; node != list; node = node->next)
  688. {
  689. thread = rt_list_entry(node, struct rt_thread, sibling);
  690. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  691. print_thread_info(thread, maxlen);
  692. }
  693. }
  694. }
  695. return 0;
  696. }
  697. MSH_CMD_EXPORT(list_process, list process);
  698. static void cmd_kill(int argc, char** argv)
  699. {
  700. int pid;
  701. int sig = 0;
  702. if (argc < 2)
  703. {
  704. rt_kprintf("kill pid or kill pid -s signal\n");
  705. return;
  706. }
  707. pid = atoi(argv[1]);
  708. if (argc >= 4)
  709. {
  710. if (argv[2][0] == '-' && argv[2][1] == 's')
  711. {
  712. sig = atoi(argv[3]);
  713. }
  714. }
  715. lwp_kill(pid, sig);
  716. }
  717. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  718. static void cmd_killall(int argc, char** argv)
  719. {
  720. int pid;
  721. if (argc < 2)
  722. {
  723. rt_kprintf("killall processes_name\n");
  724. return;
  725. }
  726. while((pid = lwp_name2pid(argv[1])) >= 0)
  727. {
  728. lwp_kill(pid, 0);
  729. rt_thread_mdelay(100);
  730. }
  731. }
  732. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  733. #endif
  734. int lwp_check_exit_request(void)
  735. {
  736. rt_thread_t thread = rt_thread_self();
  737. if (!thread->lwp)
  738. {
  739. return 0;
  740. }
  741. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  742. {
  743. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  744. return 1;
  745. }
  746. return 0;
  747. }
  748. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  749. {
  750. int found = 0;
  751. rt_base_t level;
  752. rt_list_t *list;
  753. level = rt_hw_interrupt_disable();
  754. list = lwp->t_grp.next;
  755. while (list != &lwp->t_grp)
  756. {
  757. rt_thread_t iter_thread;
  758. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  759. if (thread == iter_thread)
  760. {
  761. found = 1;
  762. break;
  763. }
  764. list = list->next;
  765. }
  766. rt_hw_interrupt_enable(level);
  767. return found;
  768. }
  769. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  770. {
  771. rt_thread_t main_thread;
  772. rt_base_t level;
  773. rt_list_t *list;
  774. struct rt_lwp *lwp;
  775. lwp = lwp_self();
  776. if ((!thread_to_exit) || (!lwp))
  777. {
  778. return;
  779. }
  780. level = rt_hw_interrupt_disable();
  781. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  782. if (thread_to_exit == main_thread)
  783. {
  784. goto finish;
  785. }
  786. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  787. {
  788. goto finish;
  789. }
  790. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  791. {
  792. rt_thread_t thread;
  793. thread = rt_list_entry(list, struct rt_thread, sibling);
  794. if (thread != thread_to_exit)
  795. {
  796. continue;
  797. }
  798. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  799. {
  800. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  801. }
  802. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  803. {
  804. thread->error = -RT_EINTR;
  805. rt_hw_dsb();
  806. rt_thread_wakeup(thread);
  807. }
  808. break;
  809. }
  810. while (found_thread(lwp, thread_to_exit))
  811. {
  812. rt_thread_mdelay(10);
  813. }
  814. finish:
  815. rt_hw_interrupt_enable(level);
  816. return;
  817. }
  818. void lwp_terminate(struct rt_lwp *lwp)
  819. {
  820. rt_base_t level;
  821. rt_list_t *list;
  822. if (!lwp)
  823. {
  824. /* kernel thread not support */
  825. return;
  826. }
  827. level = rt_hw_interrupt_disable();
  828. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  829. {
  830. rt_thread_t thread;
  831. thread = rt_list_entry(list, struct rt_thread, sibling);
  832. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  833. {
  834. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  835. }
  836. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  837. {
  838. thread->error = RT_EINTR;
  839. rt_hw_dsb();
  840. rt_thread_wakeup(thread);
  841. }
  842. }
  843. rt_hw_interrupt_enable(level);
  844. }
  845. void lwp_wait_subthread_exit(void)
  846. {
  847. rt_base_t level;
  848. struct rt_lwp *lwp;
  849. rt_thread_t thread;
  850. rt_thread_t main_thread;
  851. lwp = lwp_self();
  852. if (!lwp)
  853. {
  854. return;
  855. }
  856. thread = rt_thread_self();
  857. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  858. if (thread != main_thread)
  859. {
  860. return;
  861. }
  862. while (1)
  863. {
  864. int subthread_is_terminated;
  865. level = rt_hw_interrupt_disable();
  866. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  867. if (!subthread_is_terminated)
  868. {
  869. rt_thread_t sub_thread;
  870. rt_list_t *list;
  871. int all_subthread_in_init = 1;
  872. /* check all subthread is in init state */
  873. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  874. {
  875. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  876. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  877. {
  878. all_subthread_in_init = 0;
  879. break;
  880. }
  881. }
  882. if (all_subthread_in_init)
  883. {
  884. /* delete all subthread */
  885. while ((list = thread->sibling.prev) != &lwp->t_grp)
  886. {
  887. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  888. rt_list_remove(&sub_thread->sibling);
  889. rt_thread_delete(sub_thread);
  890. }
  891. subthread_is_terminated = 1;
  892. }
  893. }
  894. rt_hw_interrupt_enable(level);
  895. if (subthread_is_terminated)
  896. {
  897. break;
  898. }
  899. rt_thread_mdelay(10);
  900. }
  901. }