1
0

lwp_pid.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <dfs_posix.h>
  14. #include "lwp.h"
  15. #include "lwp_pid.h"
  16. #include "tty.h"
  17. #ifdef RT_USING_USERSPACE
  18. #include "lwp_user_mm.h"
  19. #endif
  20. #define DBG_TAG "LWP_PID"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #define PID_MAX 10000
  24. #define PID_CT_ASSERT(name, x) \
  25. struct assert_##name {char ary[2 * (x) - 1];}
  26. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  27. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  28. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  29. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  30. static int lwp_pid_ary_alloced = 0;
  31. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  32. static pid_t current_pid = 0;
  33. struct lwp_avl_struct *lwp_get_pid_ary(void)
  34. {
  35. return lwp_pid_ary;
  36. }
  37. static pid_t lwp_pid_get(void)
  38. {
  39. rt_base_t level;
  40. struct lwp_avl_struct *p;
  41. pid_t pid = 0;
  42. level = rt_hw_interrupt_disable();
  43. p = lwp_pid_free_head;
  44. if (p)
  45. {
  46. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  47. }
  48. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  49. {
  50. p = lwp_pid_ary + lwp_pid_ary_alloced;
  51. lwp_pid_ary_alloced++;
  52. }
  53. if (p)
  54. {
  55. int found_noused = 0;
  56. RT_ASSERT(p->data == RT_NULL);
  57. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  58. {
  59. if (!lwp_avl_find(pid, lwp_pid_root))
  60. {
  61. found_noused = 1;
  62. break;
  63. }
  64. }
  65. if (!found_noused)
  66. {
  67. for (pid = 1; pid <= current_pid; pid++)
  68. {
  69. if (!lwp_avl_find(pid, lwp_pid_root))
  70. {
  71. found_noused = 1;
  72. break;
  73. }
  74. }
  75. }
  76. p->avl_key = pid;
  77. lwp_avl_insert(p, &lwp_pid_root);
  78. current_pid = pid;
  79. }
  80. rt_hw_interrupt_enable(level);
  81. return pid;
  82. }
  83. static void lwp_pid_put(pid_t pid)
  84. {
  85. rt_base_t level;
  86. struct lwp_avl_struct *p;
  87. level = rt_hw_interrupt_disable();
  88. p = lwp_avl_find(pid, lwp_pid_root);
  89. if (p)
  90. {
  91. p->data = RT_NULL;
  92. lwp_avl_remove(p, &lwp_pid_root);
  93. p->avl_right = lwp_pid_free_head;
  94. lwp_pid_free_head = p;
  95. }
  96. rt_hw_interrupt_enable(level);
  97. }
  98. static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
  99. {
  100. rt_base_t level;
  101. struct lwp_avl_struct *p;
  102. level = rt_hw_interrupt_disable();
  103. p = lwp_avl_find(pid, lwp_pid_root);
  104. if (p)
  105. {
  106. p->data = lwp;
  107. }
  108. rt_hw_interrupt_enable(level);
  109. }
  110. static void __exit_files(struct rt_lwp *lwp)
  111. {
  112. int fd = lwp->fdt.maxfd - 1;
  113. while (fd >= 0)
  114. {
  115. struct dfs_fd *d;
  116. d = lwp->fdt.fds[fd];
  117. if (d)
  118. {
  119. dfs_file_close(d);
  120. fdt_fd_release(&lwp->fdt, fd);
  121. }
  122. fd--;
  123. }
  124. }
  125. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  126. {
  127. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  128. }
  129. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  130. {
  131. rt_mutex_detach(&lwp->object_mutex);
  132. }
  133. void lwp_user_object_lock(struct rt_lwp *lwp)
  134. {
  135. if (lwp)
  136. {
  137. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  138. }
  139. else
  140. {
  141. RT_ASSERT(0);
  142. }
  143. }
  144. void lwp_user_object_unlock(struct rt_lwp *lwp)
  145. {
  146. if (lwp)
  147. {
  148. rt_mutex_release(&lwp->object_mutex);
  149. }
  150. else
  151. {
  152. RT_ASSERT(0);
  153. }
  154. }
  155. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  156. {
  157. int ret = -1;
  158. if (lwp && object)
  159. {
  160. lwp_user_object_lock(lwp);
  161. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  162. {
  163. struct lwp_avl_struct *node;
  164. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  165. if (node)
  166. {
  167. rt_base_t level;
  168. level = rt_hw_interrupt_disable();
  169. object->lwp_ref_count++;
  170. rt_hw_interrupt_enable(level);
  171. node->avl_key = (avl_key_t)object;
  172. lwp_avl_insert(node, &lwp->object_root);
  173. ret = 0;
  174. }
  175. }
  176. lwp_user_object_unlock(lwp);
  177. }
  178. return ret;
  179. }
  180. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  181. {
  182. rt_err_t ret = -1;
  183. rt_object_t object;
  184. if (!lwp || !node)
  185. {
  186. return ret;
  187. }
  188. object = (rt_object_t)node->avl_key;
  189. object->lwp_ref_count--;
  190. if (object->lwp_ref_count == 0)
  191. {
  192. /* remove from kernel object list */
  193. switch (object->type)
  194. {
  195. case RT_Object_Class_Semaphore:
  196. ret = rt_sem_delete((rt_sem_t)object);
  197. break;
  198. case RT_Object_Class_Mutex:
  199. ret = rt_mutex_delete((rt_mutex_t)object);
  200. break;
  201. case RT_Object_Class_Event:
  202. ret = rt_event_delete((rt_event_t)object);
  203. break;
  204. case RT_Object_Class_MailBox:
  205. ret = rt_mb_delete((rt_mailbox_t)object);
  206. break;
  207. case RT_Object_Class_MessageQueue:
  208. ret = rt_mq_delete((rt_mq_t)object);
  209. break;
  210. case RT_Object_Class_Timer:
  211. ret = rt_timer_delete((rt_timer_t)object);
  212. break;
  213. case RT_Object_Class_Custom:
  214. ret = rt_custom_object_destroy(object);
  215. break;
  216. default:
  217. LOG_E("input object type(%d) error", object->type);
  218. break;
  219. }
  220. }
  221. else
  222. {
  223. ret = 0;
  224. }
  225. lwp_avl_remove(node, &lwp->object_root);
  226. rt_free(node);
  227. return ret;
  228. }
  229. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  230. {
  231. rt_err_t ret = -1;
  232. if (lwp && object)
  233. {
  234. struct lwp_avl_struct *node;
  235. lwp_user_object_lock(lwp);
  236. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  237. ret = _object_node_delete(lwp, node);
  238. lwp_user_object_unlock(lwp);
  239. }
  240. return ret;
  241. }
  242. void lwp_user_object_clear(struct rt_lwp *lwp)
  243. {
  244. struct lwp_avl_struct *node;
  245. lwp_user_object_lock(lwp);
  246. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  247. {
  248. _object_node_delete(lwp, node);
  249. }
  250. lwp_user_object_unlock(lwp);
  251. }
  252. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  253. {
  254. rt_object_t object;
  255. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  256. object = (rt_object_t)node->avl_key;
  257. lwp_user_object_add(dst_lwp, object);
  258. return 0;
  259. }
  260. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  261. {
  262. lwp_user_object_lock(src_lwp);
  263. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  264. lwp_user_object_unlock(src_lwp);
  265. }
  266. struct rt_lwp* lwp_new(void)
  267. {
  268. pid_t pid;
  269. rt_base_t level;
  270. struct rt_lwp* lwp = RT_NULL;
  271. lwp = (struct rt_lwp *)rt_malloc(sizeof(struct rt_lwp));
  272. if (lwp == RT_NULL)
  273. {
  274. return lwp;
  275. }
  276. rt_memset(lwp, 0, sizeof(*lwp));
  277. //lwp->tgroup_leader = RT_NULL;
  278. rt_list_init(&lwp->wait_list);
  279. lwp->leader = 0;
  280. lwp->session = -1;
  281. lwp->tty = RT_NULL;
  282. rt_list_init(&lwp->t_grp);
  283. lwp_user_object_lock_init(lwp);
  284. lwp->address_search_head = RT_NULL;
  285. rt_wqueue_init(&lwp->wait_queue);
  286. lwp->ref = 1;
  287. level = rt_hw_interrupt_disable();
  288. pid = lwp_pid_get();
  289. if (pid == 0)
  290. {
  291. lwp_user_object_lock_destroy(lwp);
  292. rt_free(lwp);
  293. lwp = RT_NULL;
  294. LOG_E("pid slot fulled!\n");
  295. goto out;
  296. }
  297. lwp->pid = pid;
  298. lwp_pid_set_lwp(pid, lwp);
  299. out:
  300. rt_hw_interrupt_enable(level);
  301. return lwp;
  302. }
  303. void lwp_free(struct rt_lwp* lwp)
  304. {
  305. rt_base_t level;
  306. if (lwp == RT_NULL)
  307. {
  308. return;
  309. }
  310. LOG_D("lwp free: %p\n", lwp);
  311. level = rt_hw_interrupt_disable();
  312. lwp->finish = 1;
  313. rt_hw_interrupt_enable(level);
  314. if (lwp->args != RT_NULL)
  315. {
  316. #ifndef ARCH_MM_MMU
  317. lwp->args_length = RT_NULL;
  318. #ifndef ARCH_MM_MPU
  319. rt_free(lwp->args);
  320. #endif /* not defined ARCH_MM_MPU */
  321. #endif /* ARCH_MM_MMU */
  322. lwp->args = RT_NULL;
  323. }
  324. if (lwp->fdt.fds != RT_NULL)
  325. {
  326. /* auto clean fds */
  327. __exit_files(lwp);
  328. rt_free(lwp->fdt.fds);
  329. lwp->fdt.fds = RT_NULL;
  330. }
  331. lwp_user_object_clear(lwp);
  332. lwp_user_object_lock_destroy(lwp);
  333. /* free data section */
  334. if (lwp->data_entry != RT_NULL)
  335. {
  336. #ifdef ARCH_MM_MMU
  337. rt_free_align(lwp->data_entry);
  338. #else
  339. #ifdef ARCH_MM_MPU
  340. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  341. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  342. #else
  343. rt_free_align(lwp->data_entry);
  344. #endif /* ARCH_MM_MPU */
  345. #endif /* ARCH_MM_MMU */
  346. lwp->data_entry = RT_NULL;
  347. }
  348. /* free text section */
  349. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  350. {
  351. if (lwp->text_entry)
  352. {
  353. LOG_D("lwp text free: %p", lwp->text_entry);
  354. #ifndef ARCH_MM_MMU
  355. rt_free((void*)lwp->text_entry);
  356. #endif /* not defined ARCH_MM_MMU */
  357. lwp->text_entry = RT_NULL;
  358. }
  359. }
  360. #ifdef RT_USING_USERSPACE
  361. lwp_unmap_user_space(lwp);
  362. #endif
  363. level = rt_hw_interrupt_disable();
  364. /* for children */
  365. while (lwp->first_child)
  366. {
  367. struct rt_lwp *child;
  368. child = lwp->first_child;
  369. lwp->first_child = child->sibling;
  370. if (child->finish)
  371. {
  372. lwp_pid_put(lwp_to_pid(child));
  373. rt_hw_interrupt_enable(level);
  374. rt_free(child);
  375. level = rt_hw_interrupt_disable();
  376. }
  377. else
  378. {
  379. child->sibling = RT_NULL;
  380. child->parent = RT_NULL;
  381. }
  382. }
  383. rt_hw_interrupt_enable(level);
  384. /* for parent */
  385. {
  386. struct termios *old_stdin_termios = get_old_termios();
  387. struct rt_lwp *self_lwp = (struct rt_lwp *)lwp_self();
  388. if (lwp->session == -1)
  389. {
  390. tcsetattr(1, 0, old_stdin_termios);
  391. }
  392. level = rt_hw_interrupt_disable();
  393. if (lwp->tty != RT_NULL)
  394. {
  395. if (lwp->tty->foreground == lwp)
  396. {
  397. lwp->tty->foreground = self_lwp;
  398. lwp->tty = RT_NULL;
  399. }
  400. }
  401. if (lwp->parent)
  402. {
  403. struct rt_thread *thread;
  404. if (!rt_list_isempty(&lwp->wait_list))
  405. {
  406. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  407. thread->error = RT_EOK;
  408. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  409. rt_thread_resume(thread);
  410. rt_hw_interrupt_enable(level);
  411. return;
  412. }
  413. else
  414. {
  415. struct rt_lwp **it = &lwp->parent->first_child;
  416. while (*it != lwp)
  417. {
  418. it = &(*it)->sibling;
  419. }
  420. *it = lwp->sibling;
  421. }
  422. }
  423. lwp_pid_put(lwp_to_pid(lwp));
  424. rt_hw_interrupt_enable(level);
  425. rt_free(lwp);
  426. }
  427. }
  428. void lwp_ref_inc(struct rt_lwp *lwp)
  429. {
  430. rt_base_t level;
  431. level = rt_hw_interrupt_disable();
  432. lwp->ref++;
  433. rt_hw_interrupt_enable(level);
  434. }
  435. void lwp_ref_dec(struct rt_lwp *lwp)
  436. {
  437. rt_base_t level;
  438. int ref = -1;
  439. level = rt_hw_interrupt_disable();
  440. if (lwp->ref)
  441. {
  442. lwp->ref--;
  443. ref = lwp->ref;
  444. }
  445. rt_hw_interrupt_enable(level);
  446. if (!ref)
  447. {
  448. struct rt_channel_msg msg;
  449. if (lwp->debug)
  450. {
  451. memset(&msg, 0, sizeof msg);
  452. rt_raw_channel_send(gdb_server_channel(), &msg);
  453. }
  454. #ifndef ARCH_MM_MMU
  455. #ifdef RT_LWP_USING_SHM
  456. lwp_shm_lwp_free(lwp);
  457. #endif /* RT_LWP_USING_SHM */
  458. #endif /* not defined ARCH_MM_MMU */
  459. lwp_free(lwp);
  460. }
  461. }
  462. struct rt_lwp* lwp_from_pid(pid_t pid)
  463. {
  464. rt_base_t level;
  465. struct lwp_avl_struct *p;
  466. struct rt_lwp *lwp = RT_NULL;
  467. level = rt_hw_interrupt_disable();
  468. p = lwp_avl_find(pid, lwp_pid_root);
  469. if (p)
  470. {
  471. lwp = (struct rt_lwp *)p->data;
  472. }
  473. rt_hw_interrupt_enable(level);
  474. return lwp;
  475. }
  476. pid_t lwp_to_pid(struct rt_lwp* lwp)
  477. {
  478. if (!lwp)
  479. {
  480. return 0;
  481. }
  482. return lwp->pid;
  483. }
  484. char* lwp_pid2name(int32_t pid)
  485. {
  486. struct rt_lwp *lwp;
  487. char* process_name = RT_NULL;
  488. lwp = lwp_from_pid(pid);
  489. if (lwp)
  490. {
  491. process_name = strrchr(lwp->cmd, '/');
  492. process_name = process_name? process_name + 1: lwp->cmd;
  493. }
  494. return process_name;
  495. }
  496. pid_t lwp_name2pid(const char *name)
  497. {
  498. int idx;
  499. pid_t pid = 0;
  500. rt_thread_t main_thread;
  501. char* process_name = RT_NULL;
  502. rt_base_t level;
  503. level = rt_hw_interrupt_disable();
  504. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  505. {
  506. /* 0 is reserved */
  507. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  508. if (lwp)
  509. {
  510. process_name = strrchr(lwp->cmd, '/');
  511. process_name = process_name? process_name + 1: lwp->cmd;
  512. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  513. {
  514. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  515. if (!(main_thread->stat & RT_THREAD_CLOSE))
  516. {
  517. pid = lwp->pid;
  518. }
  519. }
  520. }
  521. }
  522. rt_hw_interrupt_enable(level);
  523. return pid;
  524. }
  525. int lwp_getpid(void)
  526. {
  527. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  528. }
  529. pid_t waitpid(pid_t pid, int *status, int options)
  530. {
  531. pid_t ret = -1;
  532. rt_base_t level;
  533. struct rt_thread *thread;
  534. struct rt_lwp *lwp;
  535. struct rt_lwp *lwp_self;
  536. level = rt_hw_interrupt_disable();
  537. lwp = lwp_from_pid(pid);
  538. if (!lwp)
  539. {
  540. goto quit;
  541. }
  542. lwp_self = (struct rt_lwp *)rt_thread_self()->lwp;
  543. if (!lwp_self)
  544. {
  545. goto quit;
  546. }
  547. if (lwp->parent != lwp_self)
  548. {
  549. goto quit;
  550. }
  551. if (lwp->finish)
  552. {
  553. ret = pid;
  554. }
  555. else
  556. {
  557. if (!rt_list_isempty(&lwp->wait_list))
  558. {
  559. goto quit;
  560. }
  561. thread = rt_thread_self();
  562. rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
  563. rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
  564. rt_schedule();
  565. if (thread->error == RT_EOK)
  566. {
  567. ret = pid;
  568. }
  569. }
  570. if (ret != -1)
  571. {
  572. struct rt_lwp **lwp_node;
  573. *status = lwp->lwp_ret;
  574. lwp_node = &lwp_self->first_child;
  575. while (*lwp_node != lwp)
  576. {
  577. RT_ASSERT(*lwp_node != RT_NULL);
  578. lwp_node = &(*lwp_node)->sibling;
  579. }
  580. (*lwp_node) = lwp->sibling;
  581. lwp_pid_put(pid);
  582. rt_free(lwp);
  583. }
  584. quit:
  585. rt_hw_interrupt_enable(level);
  586. return ret;
  587. }
  588. #ifdef RT_USING_FINSH
  589. /* copy from components/finsh/cmd.c */
  590. static void object_split(int len)
  591. {
  592. while (len--)
  593. {
  594. rt_kprintf("-");
  595. }
  596. }
  597. static void print_thread_info(struct rt_thread* thread, int maxlen)
  598. {
  599. rt_uint8_t *ptr;
  600. rt_uint8_t stat;
  601. #ifdef RT_USING_SMP
  602. if (thread->oncpu != RT_CPU_DETACHED)
  603. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->name, thread->oncpu, thread->current_priority);
  604. else
  605. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  606. #else
  607. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  608. #endif /*RT_USING_SMP*/
  609. stat = (thread->stat & RT_THREAD_STAT_MASK);
  610. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  611. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  612. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  613. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  614. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  615. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  616. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  617. while (*ptr == '#')ptr--;
  618. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  619. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  620. thread->stack_size,
  621. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  622. thread->remaining_tick,
  623. thread->error);
  624. #else
  625. ptr = (rt_uint8_t *)thread->stack_addr;
  626. while (*ptr == '#')ptr++;
  627. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  628. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  629. thread->stack_size,
  630. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  631. / thread->stack_size,
  632. thread->remaining_tick,
  633. thread->error);
  634. #endif
  635. }
  636. long list_process(void)
  637. {
  638. int index;
  639. int maxlen;
  640. rt_ubase_t level;
  641. struct rt_thread *thread;
  642. struct rt_list_node *node, *list;
  643. const char *item_title = "thread";
  644. int count = 0;
  645. struct rt_thread **threads;
  646. maxlen = RT_NAME_MAX;
  647. #ifdef RT_USING_SMP
  648. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  649. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  650. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  651. #else
  652. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  653. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  654. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  655. #endif /*RT_USING_SMP*/
  656. count = rt_object_get_length(RT_Object_Class_Thread);
  657. if (count > 0)
  658. {
  659. /* get thread pointers */
  660. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  661. if (threads)
  662. {
  663. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  664. if (index > 0)
  665. {
  666. for (index = 0; index <count; index++)
  667. {
  668. struct rt_thread th;
  669. thread = threads[index];
  670. level = rt_hw_interrupt_disable();
  671. if ((thread->type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  672. {
  673. rt_hw_interrupt_enable(level);
  674. continue;
  675. }
  676. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  677. rt_hw_interrupt_enable(level);
  678. if (th.lwp == RT_NULL)
  679. {
  680. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  681. print_thread_info(&th, maxlen);
  682. }
  683. }
  684. }
  685. rt_free(threads);
  686. }
  687. }
  688. for (index = 0; index < RT_LWP_MAX_NR; index++)
  689. {
  690. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  691. if (lwp)
  692. {
  693. list = &lwp->t_grp;
  694. for (node = list->next; node != list; node = node->next)
  695. {
  696. thread = rt_list_entry(node, struct rt_thread, sibling);
  697. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  698. print_thread_info(thread, maxlen);
  699. }
  700. }
  701. }
  702. return 0;
  703. }
  704. MSH_CMD_EXPORT(list_process, list process);
  705. static void cmd_kill(int argc, char** argv)
  706. {
  707. int pid;
  708. int sig = 0;
  709. if (argc < 2)
  710. {
  711. rt_kprintf("kill pid or kill pid -s signal\n");
  712. return;
  713. }
  714. pid = atoi(argv[1]);
  715. if (argc >= 4)
  716. {
  717. if (argv[2][0] == '-' && argv[2][1] == 's')
  718. {
  719. sig = atoi(argv[3]);
  720. }
  721. }
  722. lwp_kill(pid, sig);
  723. }
  724. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  725. static void cmd_killall(int argc, char** argv)
  726. {
  727. int pid;
  728. if (argc < 2)
  729. {
  730. rt_kprintf("killall processes_name\n");
  731. return;
  732. }
  733. while((pid = lwp_name2pid(argv[1])) >= 0)
  734. {
  735. lwp_kill(pid, 0);
  736. rt_thread_mdelay(100);
  737. }
  738. }
  739. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  740. #endif
  741. int lwp_check_exit_request(void)
  742. {
  743. rt_thread_t thread = rt_thread_self();
  744. if (!thread->lwp)
  745. {
  746. return 0;
  747. }
  748. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  749. {
  750. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  751. return 1;
  752. }
  753. return 0;
  754. }
  755. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  756. {
  757. int found = 0;
  758. rt_base_t level;
  759. rt_list_t *list;
  760. level = rt_hw_interrupt_disable();
  761. list = lwp->t_grp.next;
  762. while (list != &lwp->t_grp)
  763. {
  764. rt_thread_t iter_thread;
  765. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  766. if (thread == iter_thread)
  767. {
  768. found = 1;
  769. break;
  770. }
  771. list = list->next;
  772. }
  773. rt_hw_interrupt_enable(level);
  774. return found;
  775. }
  776. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  777. {
  778. rt_thread_t main_thread;
  779. rt_base_t level;
  780. rt_list_t *list;
  781. struct rt_lwp *lwp;
  782. lwp = lwp_self();
  783. if ((!thread_to_exit) || (!lwp))
  784. {
  785. return;
  786. }
  787. level = rt_hw_interrupt_disable();
  788. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  789. if (thread_to_exit == main_thread)
  790. {
  791. goto finish;
  792. }
  793. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  794. {
  795. goto finish;
  796. }
  797. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  798. {
  799. rt_thread_t thread;
  800. thread = rt_list_entry(list, struct rt_thread, sibling);
  801. if (thread != thread_to_exit)
  802. {
  803. continue;
  804. }
  805. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  806. {
  807. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  808. }
  809. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  810. {
  811. thread->error = -RT_EINTR;
  812. rt_hw_dsb();
  813. rt_thread_wakeup(thread);
  814. }
  815. break;
  816. }
  817. while (found_thread(lwp, thread_to_exit))
  818. {
  819. rt_thread_mdelay(10);
  820. }
  821. finish:
  822. rt_hw_interrupt_enable(level);
  823. return;
  824. }
  825. void lwp_terminate(struct rt_lwp *lwp)
  826. {
  827. rt_base_t level;
  828. rt_list_t *list;
  829. if (!lwp)
  830. {
  831. /* kernel thread not support */
  832. return;
  833. }
  834. level = rt_hw_interrupt_disable();
  835. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  836. {
  837. rt_thread_t thread;
  838. thread = rt_list_entry(list, struct rt_thread, sibling);
  839. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  840. {
  841. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  842. }
  843. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  844. {
  845. thread->error = RT_EINTR;
  846. rt_hw_dsb();
  847. rt_thread_wakeup(thread);
  848. }
  849. }
  850. rt_hw_interrupt_enable(level);
  851. }
  852. void lwp_wait_subthread_exit(void)
  853. {
  854. rt_base_t level;
  855. struct rt_lwp *lwp;
  856. rt_thread_t thread;
  857. rt_thread_t main_thread;
  858. lwp = lwp_self();
  859. if (!lwp)
  860. {
  861. return;
  862. }
  863. thread = rt_thread_self();
  864. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  865. if (thread != main_thread)
  866. {
  867. return;
  868. }
  869. while (1)
  870. {
  871. int subthread_is_terminated;
  872. level = rt_hw_interrupt_disable();
  873. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  874. if (!subthread_is_terminated)
  875. {
  876. rt_thread_t sub_thread;
  877. rt_list_t *list;
  878. int all_subthread_in_init = 1;
  879. /* check all subthread is in init state */
  880. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  881. {
  882. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  883. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  884. {
  885. all_subthread_in_init = 0;
  886. break;
  887. }
  888. }
  889. if (all_subthread_in_init)
  890. {
  891. /* delete all subthread */
  892. while ((list = thread->sibling.prev) != &lwp->t_grp)
  893. {
  894. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  895. rt_list_remove(&sub_thread->sibling);
  896. rt_thread_delete(sub_thread);
  897. }
  898. subthread_is_terminated = 1;
  899. }
  900. }
  901. rt_hw_interrupt_enable(level);
  902. if (subthread_is_terminated)
  903. {
  904. break;
  905. }
  906. rt_thread_mdelay(10);
  907. }
  908. }
  909. static int _lwp_setaffinity(pid_t pid, int cpu)
  910. {
  911. struct rt_lwp *lwp;
  912. int ret = -1;
  913. lwp = lwp_from_pid(pid);
  914. if (lwp)
  915. {
  916. #ifdef RT_USING_SMP
  917. rt_list_t *list;
  918. lwp->bind_cpu = cpu;
  919. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  920. {
  921. rt_thread_t thread;
  922. thread = rt_list_entry(list, struct rt_thread, sibling);
  923. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  924. }
  925. #endif
  926. ret = 0;
  927. }
  928. return ret;
  929. }
  930. int lwp_setaffinity(pid_t pid, int cpu)
  931. {
  932. rt_base_t level;
  933. int ret;
  934. #ifdef RT_USING_SMP
  935. if (cpu < 0 || cpu > RT_CPUS_NR)
  936. {
  937. cpu = RT_CPUS_NR;
  938. }
  939. #endif
  940. level = rt_hw_interrupt_disable();
  941. ret = _lwp_setaffinity(pid, cpu);
  942. rt_hw_interrupt_enable(level);
  943. return ret;
  944. }
  945. #ifdef RT_USING_SMP
  946. static void cmd_cpu_bind(int argc, char** argv)
  947. {
  948. int pid;
  949. int cpu;
  950. if (argc < 3)
  951. {
  952. rt_kprintf("Useage: cpu_bind pid cpu\n");
  953. return;
  954. }
  955. pid = atoi(argv[1]);
  956. cpu = atoi(argv[2]);
  957. lwp_setaffinity((pid_t)pid, cpu);
  958. }
  959. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  960. #endif