lwp_pid.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. */
  11. #include <rthw.h>
  12. #include <rtthread.h>
  13. #include <dfs_posix.h>
  14. #include "lwp.h"
  15. #include "lwp_pid.h"
  16. #include "tty.h"
  17. #ifdef RT_USING_USERSPACE
  18. #include "lwp_user_mm.h"
  19. #endif
  20. #define DBG_TAG "LWP_PID"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #define PID_MAX 10000
  24. #define PID_CT_ASSERT(name, x) \
  25. struct assert_##name {char ary[2 * (x) - 1];}
  26. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  27. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  28. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  29. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  30. static int lwp_pid_ary_alloced = 0;
  31. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  32. static pid_t current_pid = 0;
  33. struct lwp_avl_struct *lwp_get_pid_ary(void)
  34. {
  35. return lwp_pid_ary;
  36. }
  37. static pid_t lwp_pid_get(void)
  38. {
  39. rt_base_t level;
  40. struct lwp_avl_struct *p;
  41. pid_t pid = 0;
  42. level = rt_hw_interrupt_disable();
  43. p = lwp_pid_free_head;
  44. if (p)
  45. {
  46. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  47. }
  48. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  49. {
  50. p = lwp_pid_ary + lwp_pid_ary_alloced;
  51. lwp_pid_ary_alloced++;
  52. }
  53. if (p)
  54. {
  55. int found_noused = 0;
  56. RT_ASSERT(p->data == RT_NULL);
  57. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  58. {
  59. if (!lwp_avl_find(pid, lwp_pid_root))
  60. {
  61. found_noused = 1;
  62. break;
  63. }
  64. }
  65. if (!found_noused)
  66. {
  67. for (pid = 1; pid <= current_pid; pid++)
  68. {
  69. if (!lwp_avl_find(pid, lwp_pid_root))
  70. {
  71. found_noused = 1;
  72. break;
  73. }
  74. }
  75. }
  76. p->avl_key = pid;
  77. lwp_avl_insert(p, &lwp_pid_root);
  78. current_pid = pid;
  79. }
  80. rt_hw_interrupt_enable(level);
  81. return pid;
  82. }
  83. static void lwp_pid_put(pid_t pid)
  84. {
  85. rt_base_t level;
  86. struct lwp_avl_struct *p;
  87. level = rt_hw_interrupt_disable();
  88. p = lwp_avl_find(pid, lwp_pid_root);
  89. if (p)
  90. {
  91. p->data = RT_NULL;
  92. lwp_avl_remove(p, &lwp_pid_root);
  93. p->avl_right = lwp_pid_free_head;
  94. lwp_pid_free_head = p;
  95. }
  96. rt_hw_interrupt_enable(level);
  97. }
  98. static void lwp_pid_set_lwp(pid_t pid, struct rt_lwp *lwp)
  99. {
  100. rt_base_t level;
  101. struct lwp_avl_struct *p;
  102. level = rt_hw_interrupt_disable();
  103. p = lwp_avl_find(pid, lwp_pid_root);
  104. if (p)
  105. {
  106. p->data = lwp;
  107. }
  108. rt_hw_interrupt_enable(level);
  109. }
  110. static void __exit_files(struct rt_lwp *lwp)
  111. {
  112. int fd = lwp->fdt.maxfd - 1;
  113. while (fd >= 0)
  114. {
  115. struct dfs_fd *d;
  116. d = lwp->fdt.fds[fd];
  117. if (d)
  118. {
  119. dfs_file_close(d);
  120. fdt_fd_release(&lwp->fdt, fd);
  121. }
  122. fd--;
  123. }
  124. }
  125. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  126. {
  127. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  128. }
  129. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  130. {
  131. rt_mutex_detach(&lwp->object_mutex);
  132. }
  133. void lwp_user_object_lock(struct rt_lwp *lwp)
  134. {
  135. if (lwp)
  136. {
  137. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  138. }
  139. else
  140. {
  141. RT_ASSERT(0);
  142. }
  143. }
  144. void lwp_user_object_unlock(struct rt_lwp *lwp)
  145. {
  146. if (lwp)
  147. {
  148. rt_mutex_release(&lwp->object_mutex);
  149. }
  150. else
  151. {
  152. RT_ASSERT(0);
  153. }
  154. }
  155. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  156. {
  157. int ret = -1;
  158. if (lwp && object)
  159. {
  160. lwp_user_object_lock(lwp);
  161. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  162. {
  163. struct lwp_avl_struct *node;
  164. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  165. if (node)
  166. {
  167. rt_base_t level;
  168. level = rt_hw_interrupt_disable();
  169. object->lwp_ref_count++;
  170. rt_hw_interrupt_enable(level);
  171. node->avl_key = (avl_key_t)object;
  172. lwp_avl_insert(node, &lwp->object_root);
  173. ret = 0;
  174. }
  175. }
  176. lwp_user_object_unlock(lwp);
  177. }
  178. return ret;
  179. }
  180. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  181. {
  182. rt_err_t ret = -1;
  183. rt_object_t object;
  184. if (!lwp || !node)
  185. {
  186. return ret;
  187. }
  188. object = (rt_object_t)node->avl_key;
  189. object->lwp_ref_count--;
  190. if (object->lwp_ref_count == 0)
  191. {
  192. /* remove from kernel object list */
  193. switch (object->type)
  194. {
  195. case RT_Object_Class_Semaphore:
  196. ret = rt_sem_delete((rt_sem_t)object);
  197. break;
  198. case RT_Object_Class_Mutex:
  199. ret = rt_mutex_delete((rt_mutex_t)object);
  200. break;
  201. case RT_Object_Class_Event:
  202. ret = rt_event_delete((rt_event_t)object);
  203. break;
  204. case RT_Object_Class_MailBox:
  205. ret = rt_mb_delete((rt_mailbox_t)object);
  206. break;
  207. case RT_Object_Class_MessageQueue:
  208. ret = rt_mq_delete((rt_mq_t)object);
  209. break;
  210. case RT_Object_Class_Timer:
  211. ret = rt_timer_delete((rt_timer_t)object);
  212. break;
  213. case RT_Object_Class_Custom:
  214. ret = rt_custom_object_destroy(object);
  215. break;
  216. default:
  217. LOG_E("input object type(%d) error", object->type);
  218. break;
  219. }
  220. }
  221. else
  222. {
  223. ret = 0;
  224. }
  225. lwp_avl_remove(node, &lwp->object_root);
  226. rt_free(node);
  227. return ret;
  228. }
  229. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  230. {
  231. rt_err_t ret = -1;
  232. if (lwp && object)
  233. {
  234. struct lwp_avl_struct *node;
  235. lwp_user_object_lock(lwp);
  236. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  237. ret = _object_node_delete(lwp, node);
  238. lwp_user_object_unlock(lwp);
  239. }
  240. return ret;
  241. }
  242. void lwp_user_object_clear(struct rt_lwp *lwp)
  243. {
  244. struct lwp_avl_struct *node;
  245. lwp_user_object_lock(lwp);
  246. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  247. {
  248. _object_node_delete(lwp, node);
  249. }
  250. lwp_user_object_unlock(lwp);
  251. }
  252. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  253. {
  254. rt_object_t object;
  255. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  256. object = (rt_object_t)node->avl_key;
  257. lwp_user_object_add(dst_lwp, object);
  258. return 0;
  259. }
  260. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  261. {
  262. lwp_user_object_lock(src_lwp);
  263. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  264. lwp_user_object_unlock(src_lwp);
  265. }
  266. struct rt_lwp* lwp_new(void)
  267. {
  268. pid_t pid;
  269. rt_base_t level;
  270. struct rt_lwp* lwp = RT_NULL;
  271. lwp = (struct rt_lwp *)rt_malloc(sizeof(struct rt_lwp));
  272. if (lwp == RT_NULL)
  273. {
  274. return lwp;
  275. }
  276. rt_memset(lwp, 0, sizeof(*lwp));
  277. //lwp->tgroup_leader = RT_NULL;
  278. rt_list_init(&lwp->wait_list);
  279. lwp->leader = 0;
  280. lwp->session = -1;
  281. lwp->tty = RT_NULL;
  282. rt_list_init(&lwp->t_grp);
  283. lwp_user_object_lock_init(lwp);
  284. lwp->address_search_head = RT_NULL;
  285. rt_wqueue_init(&lwp->wait_queue);
  286. lwp->ref = 1;
  287. level = rt_hw_interrupt_disable();
  288. pid = lwp_pid_get();
  289. if (pid == 0)
  290. {
  291. rt_free(lwp);
  292. LOG_E("pid slot fulled!\n");
  293. goto out;
  294. }
  295. lwp->pid = pid;
  296. lwp_pid_set_lwp(pid, lwp);
  297. out:
  298. rt_hw_interrupt_enable(level);
  299. return lwp;
  300. }
  301. void lwp_free(struct rt_lwp* lwp)
  302. {
  303. rt_base_t level;
  304. if (lwp == RT_NULL)
  305. {
  306. return;
  307. }
  308. LOG_D("lwp free: %p\n", lwp);
  309. level = rt_hw_interrupt_disable();
  310. lwp->finish = 1;
  311. rt_hw_interrupt_enable(level);
  312. if (lwp->args != RT_NULL)
  313. {
  314. #ifndef ARCH_MM_MMU
  315. lwp->args_length = RT_NULL;
  316. #ifndef ARCH_MM_MPU
  317. rt_free(lwp->args);
  318. #endif /* not defined ARCH_MM_MPU */
  319. #endif /* ARCH_MM_MMU */
  320. lwp->args = RT_NULL;
  321. }
  322. if (lwp->fdt.fds != RT_NULL)
  323. {
  324. /* auto clean fds */
  325. __exit_files(lwp);
  326. rt_free(lwp->fdt.fds);
  327. lwp->fdt.fds = RT_NULL;
  328. }
  329. lwp_user_object_clear(lwp);
  330. lwp_user_object_lock_destroy(lwp);
  331. /* free data section */
  332. if (lwp->data_entry != RT_NULL)
  333. {
  334. #ifdef ARCH_MM_MMU
  335. rt_free_align(lwp->data_entry);
  336. #else
  337. #ifdef ARCH_MM_MPU
  338. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  339. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  340. #else
  341. rt_free_align(lwp->data_entry);
  342. #endif /* ARCH_MM_MPU */
  343. #endif /* ARCH_MM_MMU */
  344. lwp->data_entry = RT_NULL;
  345. }
  346. /* free text section */
  347. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  348. {
  349. if (lwp->text_entry)
  350. {
  351. LOG_D("lwp text free: %p", lwp->text_entry);
  352. #ifndef ARCH_MM_MMU
  353. rt_free((void*)lwp->text_entry);
  354. #endif /* not defined ARCH_MM_MMU */
  355. lwp->text_entry = RT_NULL;
  356. }
  357. }
  358. #ifdef RT_USING_USERSPACE
  359. lwp_unmap_user_space(lwp);
  360. #endif
  361. level = rt_hw_interrupt_disable();
  362. /* for children */
  363. while (lwp->first_child)
  364. {
  365. struct rt_lwp *child;
  366. child = lwp->first_child;
  367. lwp->first_child = child->sibling;
  368. if (child->finish)
  369. {
  370. lwp_pid_put(lwp_to_pid(child));
  371. rt_hw_interrupt_enable(level);
  372. rt_free(child);
  373. level = rt_hw_interrupt_disable();
  374. }
  375. else
  376. {
  377. child->sibling = RT_NULL;
  378. child->parent = RT_NULL;
  379. }
  380. }
  381. rt_hw_interrupt_enable(level);
  382. /* for parent */
  383. {
  384. struct termios *old_stdin_termios = get_old_termios();
  385. struct rt_lwp *self_lwp = (struct rt_lwp *)lwp_self();
  386. if (lwp->session == -1)
  387. {
  388. tcsetattr(1, 0, old_stdin_termios);
  389. }
  390. level = rt_hw_interrupt_disable();
  391. if (lwp->tty != RT_NULL)
  392. {
  393. if (lwp->tty->foreground == lwp)
  394. {
  395. lwp->tty->foreground = self_lwp;
  396. lwp->tty = RT_NULL;
  397. }
  398. }
  399. if (lwp->parent)
  400. {
  401. struct rt_thread *thread;
  402. if (!rt_list_isempty(&lwp->wait_list))
  403. {
  404. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  405. thread->error = RT_EOK;
  406. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  407. rt_thread_resume(thread);
  408. rt_hw_interrupt_enable(level);
  409. return;
  410. }
  411. else
  412. {
  413. struct rt_lwp **it = &lwp->parent->first_child;
  414. while (*it != lwp)
  415. {
  416. it = &(*it)->sibling;
  417. }
  418. *it = lwp->sibling;
  419. }
  420. }
  421. lwp_pid_put(lwp_to_pid(lwp));
  422. rt_hw_interrupt_enable(level);
  423. rt_free(lwp);
  424. }
  425. }
  426. void lwp_ref_inc(struct rt_lwp *lwp)
  427. {
  428. rt_base_t level;
  429. level = rt_hw_interrupt_disable();
  430. lwp->ref++;
  431. rt_hw_interrupt_enable(level);
  432. }
  433. void lwp_ref_dec(struct rt_lwp *lwp)
  434. {
  435. rt_base_t level;
  436. int ref = -1;
  437. level = rt_hw_interrupt_disable();
  438. if (lwp->ref)
  439. {
  440. lwp->ref--;
  441. ref = lwp->ref;
  442. }
  443. rt_hw_interrupt_enable(level);
  444. if (!ref)
  445. {
  446. struct rt_channel_msg msg;
  447. if (lwp->debug)
  448. {
  449. memset(&msg, 0, sizeof msg);
  450. rt_raw_channel_send(gdb_server_channel(), &msg);
  451. }
  452. #ifndef ARCH_MM_MMU
  453. #ifdef RT_LWP_USING_SHM
  454. lwp_shm_lwp_free(lwp);
  455. #endif /* RT_LWP_USING_SHM */
  456. #endif /* not defined ARCH_MM_MMU */
  457. lwp_free(lwp);
  458. }
  459. }
  460. struct rt_lwp* lwp_from_pid(pid_t pid)
  461. {
  462. rt_base_t level;
  463. struct lwp_avl_struct *p;
  464. struct rt_lwp *lwp = RT_NULL;
  465. level = rt_hw_interrupt_disable();
  466. p = lwp_avl_find(pid, lwp_pid_root);
  467. if (p)
  468. {
  469. lwp = (struct rt_lwp *)p->data;
  470. }
  471. rt_hw_interrupt_enable(level);
  472. return lwp;
  473. }
  474. pid_t lwp_to_pid(struct rt_lwp* lwp)
  475. {
  476. if (!lwp)
  477. {
  478. return 0;
  479. }
  480. return lwp->pid;
  481. }
  482. char* lwp_pid2name(int32_t pid)
  483. {
  484. struct rt_lwp *lwp;
  485. char* process_name = RT_NULL;
  486. lwp = lwp_from_pid(pid);
  487. if (lwp)
  488. {
  489. process_name = strrchr(lwp->cmd, '/');
  490. process_name = process_name? process_name + 1: lwp->cmd;
  491. }
  492. return process_name;
  493. }
  494. pid_t lwp_name2pid(const char *name)
  495. {
  496. int idx;
  497. pid_t pid = 0;
  498. rt_thread_t main_thread;
  499. char* process_name = RT_NULL;
  500. rt_base_t level;
  501. level = rt_hw_interrupt_disable();
  502. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  503. {
  504. /* 0 is reserved */
  505. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  506. if (lwp)
  507. {
  508. process_name = strrchr(lwp->cmd, '/');
  509. process_name = process_name? process_name + 1: lwp->cmd;
  510. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  511. {
  512. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  513. if (!(main_thread->stat & RT_THREAD_CLOSE))
  514. {
  515. pid = lwp->pid;
  516. }
  517. }
  518. }
  519. }
  520. rt_hw_interrupt_enable(level);
  521. return pid;
  522. }
  523. int lwp_getpid(void)
  524. {
  525. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  526. }
  527. pid_t waitpid(pid_t pid, int *status, int options)
  528. {
  529. pid_t ret = -1;
  530. rt_base_t level;
  531. struct rt_thread *thread;
  532. struct rt_lwp *lwp;
  533. struct rt_lwp *lwp_self;
  534. level = rt_hw_interrupt_disable();
  535. lwp = lwp_from_pid(pid);
  536. if (!lwp)
  537. {
  538. goto quit;
  539. }
  540. lwp_self = (struct rt_lwp *)rt_thread_self()->lwp;
  541. if (!lwp_self)
  542. {
  543. goto quit;
  544. }
  545. if (lwp->parent != lwp_self)
  546. {
  547. goto quit;
  548. }
  549. if (lwp->finish)
  550. {
  551. ret = pid;
  552. }
  553. else
  554. {
  555. if (!rt_list_isempty(&lwp->wait_list))
  556. {
  557. goto quit;
  558. }
  559. thread = rt_thread_self();
  560. rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE);
  561. rt_list_insert_before(&lwp->wait_list, &(thread->tlist));
  562. rt_schedule();
  563. if (thread->error == RT_EOK)
  564. {
  565. ret = pid;
  566. }
  567. }
  568. if (ret != -1)
  569. {
  570. struct rt_lwp **lwp_node;
  571. *status = lwp->lwp_ret;
  572. lwp_node = &lwp_self->first_child;
  573. while (*lwp_node != lwp)
  574. {
  575. RT_ASSERT(*lwp_node != RT_NULL);
  576. lwp_node = &(*lwp_node)->sibling;
  577. }
  578. (*lwp_node) = lwp->sibling;
  579. lwp_pid_put(pid);
  580. rt_free(lwp);
  581. }
  582. quit:
  583. rt_hw_interrupt_enable(level);
  584. return ret;
  585. }
  586. #ifdef RT_USING_FINSH
  587. /* copy from components/finsh/cmd.c */
  588. static void object_split(int len)
  589. {
  590. while (len--)
  591. {
  592. rt_kprintf("-");
  593. }
  594. }
  595. static void print_thread_info(struct rt_thread* thread, int maxlen)
  596. {
  597. rt_uint8_t *ptr;
  598. rt_uint8_t stat;
  599. #ifdef RT_USING_SMP
  600. if (thread->oncpu != RT_CPU_DETACHED)
  601. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->name, thread->oncpu, thread->current_priority);
  602. else
  603. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  604. #else
  605. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority);
  606. #endif /*RT_USING_SMP*/
  607. stat = (thread->stat & RT_THREAD_STAT_MASK);
  608. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  609. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  610. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  611. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  612. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  613. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  614. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  615. while (*ptr == '#')ptr--;
  616. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  617. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  618. thread->stack_size,
  619. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  620. thread->remaining_tick,
  621. thread->error);
  622. #else
  623. ptr = (rt_uint8_t *)thread->stack_addr;
  624. while (*ptr == '#')ptr++;
  625. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  626. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  627. thread->stack_size,
  628. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  629. / thread->stack_size,
  630. thread->remaining_tick,
  631. thread->error);
  632. #endif
  633. }
  634. long list_process(void)
  635. {
  636. int index;
  637. int maxlen;
  638. rt_ubase_t level;
  639. struct rt_thread *thread;
  640. struct rt_list_node *node, *list;
  641. const char *item_title = "thread";
  642. int count = 0;
  643. struct rt_thread **threads;
  644. maxlen = RT_NAME_MAX;
  645. #ifdef RT_USING_SMP
  646. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  647. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  648. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  649. #else
  650. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  651. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  652. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  653. #endif /*RT_USING_SMP*/
  654. count = rt_object_get_length(RT_Object_Class_Thread);
  655. if (count > 0)
  656. {
  657. /* get thread pointers */
  658. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  659. if (threads)
  660. {
  661. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  662. if (index > 0)
  663. {
  664. for (index = 0; index <count; index++)
  665. {
  666. struct rt_thread th;
  667. thread = threads[index];
  668. level = rt_hw_interrupt_disable();
  669. if ((thread->type & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  670. {
  671. rt_hw_interrupt_enable(level);
  672. continue;
  673. }
  674. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  675. rt_hw_interrupt_enable(level);
  676. if (th.lwp == RT_NULL)
  677. {
  678. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  679. print_thread_info(&th, maxlen);
  680. }
  681. }
  682. }
  683. rt_free(threads);
  684. }
  685. }
  686. for (index = 0; index < RT_LWP_MAX_NR; index++)
  687. {
  688. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  689. if (lwp)
  690. {
  691. list = &lwp->t_grp;
  692. for (node = list->next; node != list; node = node->next)
  693. {
  694. thread = rt_list_entry(node, struct rt_thread, sibling);
  695. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  696. print_thread_info(thread, maxlen);
  697. }
  698. }
  699. }
  700. return 0;
  701. }
  702. MSH_CMD_EXPORT(list_process, list process);
  703. static void cmd_kill(int argc, char** argv)
  704. {
  705. int pid;
  706. int sig = 0;
  707. if (argc < 2)
  708. {
  709. rt_kprintf("kill pid or kill pid -s signal\n");
  710. return;
  711. }
  712. pid = atoi(argv[1]);
  713. if (argc >= 4)
  714. {
  715. if (argv[2][0] == '-' && argv[2][1] == 's')
  716. {
  717. sig = atoi(argv[3]);
  718. }
  719. }
  720. lwp_kill(pid, sig);
  721. }
  722. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  723. static void cmd_killall(int argc, char** argv)
  724. {
  725. int pid;
  726. if (argc < 2)
  727. {
  728. rt_kprintf("killall processes_name\n");
  729. return;
  730. }
  731. while((pid = lwp_name2pid(argv[1])) >= 0)
  732. {
  733. lwp_kill(pid, 0);
  734. rt_thread_mdelay(100);
  735. }
  736. }
  737. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  738. #endif
  739. int lwp_check_exit_request(void)
  740. {
  741. rt_thread_t thread = rt_thread_self();
  742. if (!thread->lwp)
  743. {
  744. return 0;
  745. }
  746. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  747. {
  748. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  749. return 1;
  750. }
  751. return 0;
  752. }
  753. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  754. {
  755. int found = 0;
  756. rt_base_t level;
  757. rt_list_t *list;
  758. level = rt_hw_interrupt_disable();
  759. list = lwp->t_grp.next;
  760. while (list != &lwp->t_grp)
  761. {
  762. rt_thread_t iter_thread;
  763. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  764. if (thread == iter_thread)
  765. {
  766. found = 1;
  767. break;
  768. }
  769. list = list->next;
  770. }
  771. rt_hw_interrupt_enable(level);
  772. return found;
  773. }
  774. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  775. {
  776. rt_thread_t main_thread;
  777. rt_base_t level;
  778. rt_list_t *list;
  779. struct rt_lwp *lwp;
  780. lwp = lwp_self();
  781. if ((!thread_to_exit) || (!lwp))
  782. {
  783. return;
  784. }
  785. level = rt_hw_interrupt_disable();
  786. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  787. if (thread_to_exit == main_thread)
  788. {
  789. goto finish;
  790. }
  791. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  792. {
  793. goto finish;
  794. }
  795. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  796. {
  797. rt_thread_t thread;
  798. thread = rt_list_entry(list, struct rt_thread, sibling);
  799. if (thread != thread_to_exit)
  800. {
  801. continue;
  802. }
  803. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  804. {
  805. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  806. }
  807. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  808. {
  809. thread->error = -RT_EINTR;
  810. rt_hw_dsb();
  811. rt_thread_wakeup(thread);
  812. }
  813. break;
  814. }
  815. while (found_thread(lwp, thread_to_exit))
  816. {
  817. rt_thread_mdelay(10);
  818. }
  819. finish:
  820. rt_hw_interrupt_enable(level);
  821. return;
  822. }
  823. void lwp_terminate(struct rt_lwp *lwp)
  824. {
  825. rt_base_t level;
  826. rt_list_t *list;
  827. if (!lwp)
  828. {
  829. /* kernel thread not support */
  830. return;
  831. }
  832. level = rt_hw_interrupt_disable();
  833. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  834. {
  835. rt_thread_t thread;
  836. thread = rt_list_entry(list, struct rt_thread, sibling);
  837. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  838. {
  839. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  840. }
  841. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  842. {
  843. thread->error = RT_EINTR;
  844. rt_hw_dsb();
  845. rt_thread_wakeup(thread);
  846. }
  847. }
  848. rt_hw_interrupt_enable(level);
  849. }
  850. void lwp_wait_subthread_exit(void)
  851. {
  852. rt_base_t level;
  853. struct rt_lwp *lwp;
  854. rt_thread_t thread;
  855. rt_thread_t main_thread;
  856. lwp = lwp_self();
  857. if (!lwp)
  858. {
  859. return;
  860. }
  861. thread = rt_thread_self();
  862. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  863. if (thread != main_thread)
  864. {
  865. return;
  866. }
  867. while (1)
  868. {
  869. int subthread_is_terminated;
  870. level = rt_hw_interrupt_disable();
  871. subthread_is_terminated = (int)(thread->sibling.prev == &lwp->t_grp);
  872. if (!subthread_is_terminated)
  873. {
  874. rt_thread_t sub_thread;
  875. rt_list_t *list;
  876. int all_subthread_in_init = 1;
  877. /* check all subthread is in init state */
  878. for (list = thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  879. {
  880. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  881. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  882. {
  883. all_subthread_in_init = 0;
  884. break;
  885. }
  886. }
  887. if (all_subthread_in_init)
  888. {
  889. /* delete all subthread */
  890. while ((list = thread->sibling.prev) != &lwp->t_grp)
  891. {
  892. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  893. rt_list_remove(&sub_thread->sibling);
  894. rt_thread_delete(sub_thread);
  895. }
  896. subthread_is_terminated = 1;
  897. }
  898. }
  899. rt_hw_interrupt_enable(level);
  900. if (subthread_is_terminated)
  901. {
  902. break;
  903. }
  904. rt_thread_mdelay(10);
  905. }
  906. }
  907. static int _lwp_setaffinity(pid_t pid, int cpu)
  908. {
  909. struct rt_lwp *lwp;
  910. int ret = -1;
  911. lwp = lwp_from_pid(pid);
  912. if (lwp)
  913. {
  914. #ifdef RT_USING_SMP
  915. rt_list_t *list;
  916. lwp->bind_cpu = cpu;
  917. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  918. {
  919. rt_thread_t thread;
  920. thread = rt_list_entry(list, struct rt_thread, sibling);
  921. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  922. }
  923. #endif
  924. ret = 0;
  925. }
  926. return ret;
  927. }
  928. int lwp_setaffinity(pid_t pid, int cpu)
  929. {
  930. rt_base_t level;
  931. int ret;
  932. #ifdef RT_USING_SMP
  933. if (cpu < 0 || cpu > RT_CPUS_NR)
  934. {
  935. cpu = RT_CPUS_NR;
  936. }
  937. #endif
  938. level = rt_hw_interrupt_disable();
  939. ret = _lwp_setaffinity(pid, cpu);
  940. rt_hw_interrupt_enable(level);
  941. return ret;
  942. }
  943. #ifdef RT_USING_SMP
  944. static void cmd_cpu_bind(int argc, char** argv)
  945. {
  946. int pid;
  947. int cpu;
  948. if (argc < 3)
  949. {
  950. rt_kprintf("Useage: cpu_bind pid cpu\n");
  951. return;
  952. }
  953. pid = atoi(argv[1]);
  954. cpu = atoi(argv[2]);
  955. lwp_setaffinity((pid_t)pid, cpu);
  956. }
  957. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  958. #endif