1
0

lwp_pid.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. */
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #define DBG_TAG "lwp.pid"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #include <dfs_file.h>
  24. #include <unistd.h>
  25. #include <stdio.h> /* rename() */
  26. #include <sys/stat.h>
  27. #include <sys/statfs.h> /* statfs() */
  28. #include "lwp_internal.h"
  29. #include "tty.h"
  30. #ifdef ARCH_MM_MMU
  31. #include "lwp_user_mm.h"
  32. #endif
  33. #define PID_MAX 10000
  34. #define PID_CT_ASSERT(name, x) \
  35. struct assert_##name {char ary[2 * (x) - 1];}
  36. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  37. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  38. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  39. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  40. static int lwp_pid_ary_alloced = 0;
  41. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  42. static pid_t current_pid = 0;
  43. static struct rt_mutex pid_mtx;
  44. int lwp_pid_init(void)
  45. {
  46. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  47. return 0;
  48. }
  49. void lwp_pid_lock_take(void)
  50. {
  51. DEF_RETURN_CODE(rc);
  52. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  53. /* should never failed */
  54. RT_ASSERT(rc == RT_EOK);
  55. }
  56. void lwp_pid_lock_release(void)
  57. {
  58. /* should never failed */
  59. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  60. RT_ASSERT(0);
  61. }
  62. struct lwp_avl_struct *lwp_get_pid_ary(void)
  63. {
  64. return lwp_pid_ary;
  65. }
  66. static pid_t lwp_pid_get_locked(void)
  67. {
  68. struct lwp_avl_struct *p;
  69. pid_t pid = 0;
  70. p = lwp_pid_free_head;
  71. if (p)
  72. {
  73. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  74. }
  75. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  76. {
  77. p = lwp_pid_ary + lwp_pid_ary_alloced;
  78. lwp_pid_ary_alloced++;
  79. }
  80. if (p)
  81. {
  82. int found_noused = 0;
  83. RT_ASSERT(p->data == RT_NULL);
  84. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  85. {
  86. if (!lwp_avl_find(pid, lwp_pid_root))
  87. {
  88. found_noused = 1;
  89. break;
  90. }
  91. }
  92. if (!found_noused)
  93. {
  94. for (pid = 1; pid <= current_pid; pid++)
  95. {
  96. if (!lwp_avl_find(pid, lwp_pid_root))
  97. {
  98. found_noused = 1;
  99. break;
  100. }
  101. }
  102. }
  103. p->avl_key = pid;
  104. lwp_avl_insert(p, &lwp_pid_root);
  105. current_pid = pid;
  106. }
  107. return pid;
  108. }
  109. static void lwp_pid_put_locked(pid_t pid)
  110. {
  111. struct lwp_avl_struct *p;
  112. if (pid == 0)
  113. {
  114. return;
  115. }
  116. p = lwp_avl_find(pid, lwp_pid_root);
  117. if (p)
  118. {
  119. p->data = RT_NULL;
  120. lwp_avl_remove(p, &lwp_pid_root);
  121. p->avl_right = lwp_pid_free_head;
  122. lwp_pid_free_head = p;
  123. }
  124. }
  125. void lwp_pid_put(struct rt_lwp *lwp)
  126. {
  127. lwp_pid_lock_take();
  128. lwp_pid_put_locked(lwp->pid);
  129. lwp_pid_lock_release();
  130. /* reset pid field */
  131. lwp->pid = 0;
  132. /* clear reference */
  133. lwp_ref_dec(lwp);
  134. }
  135. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  136. {
  137. struct lwp_avl_struct *p;
  138. p = lwp_avl_find(pid, lwp_pid_root);
  139. if (p)
  140. {
  141. p->data = lwp;
  142. lwp_ref_inc(lwp);
  143. }
  144. }
  145. static void __exit_files(struct rt_lwp *lwp)
  146. {
  147. int fd = lwp->fdt.maxfd - 1;
  148. while (fd >= 0)
  149. {
  150. struct dfs_file *d;
  151. d = lwp->fdt.fds[fd];
  152. if (d)
  153. {
  154. dfs_file_close(d);
  155. fdt_fd_release(&lwp->fdt, fd);
  156. }
  157. fd--;
  158. }
  159. }
  160. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  161. {
  162. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  163. }
  164. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  165. {
  166. rt_mutex_detach(&lwp->object_mutex);
  167. }
  168. void lwp_user_object_lock(struct rt_lwp *lwp)
  169. {
  170. if (lwp)
  171. {
  172. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  173. }
  174. else
  175. {
  176. RT_ASSERT(0);
  177. }
  178. }
  179. void lwp_user_object_unlock(struct rt_lwp *lwp)
  180. {
  181. if (lwp)
  182. {
  183. rt_mutex_release(&lwp->object_mutex);
  184. }
  185. else
  186. {
  187. RT_ASSERT(0);
  188. }
  189. }
  190. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  191. {
  192. int ret = -1;
  193. if (lwp && object)
  194. {
  195. lwp_user_object_lock(lwp);
  196. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  197. {
  198. struct lwp_avl_struct *node;
  199. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  200. if (node)
  201. {
  202. rt_atomic_add(&object->lwp_ref_count, 1);
  203. node->avl_key = (avl_key_t)object;
  204. lwp_avl_insert(node, &lwp->object_root);
  205. ret = 0;
  206. }
  207. }
  208. lwp_user_object_unlock(lwp);
  209. }
  210. return ret;
  211. }
  212. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  213. {
  214. rt_err_t ret = -1;
  215. rt_object_t object;
  216. if (!lwp || !node)
  217. {
  218. return ret;
  219. }
  220. object = (rt_object_t)node->avl_key;
  221. object->lwp_ref_count--;
  222. if (object->lwp_ref_count == 0)
  223. {
  224. /* remove from kernel object list */
  225. switch (object->type)
  226. {
  227. case RT_Object_Class_Semaphore:
  228. ret = rt_sem_delete((rt_sem_t)object);
  229. break;
  230. case RT_Object_Class_Mutex:
  231. ret = rt_mutex_delete((rt_mutex_t)object);
  232. break;
  233. case RT_Object_Class_Event:
  234. ret = rt_event_delete((rt_event_t)object);
  235. break;
  236. case RT_Object_Class_MailBox:
  237. ret = rt_mb_delete((rt_mailbox_t)object);
  238. break;
  239. case RT_Object_Class_MessageQueue:
  240. ret = rt_mq_delete((rt_mq_t)object);
  241. break;
  242. case RT_Object_Class_Timer:
  243. ret = rt_timer_delete((rt_timer_t)object);
  244. break;
  245. case RT_Object_Class_Custom:
  246. ret = rt_custom_object_destroy(object);
  247. break;
  248. default:
  249. LOG_E("input object type(%d) error", object->type);
  250. break;
  251. }
  252. }
  253. else
  254. {
  255. ret = 0;
  256. }
  257. lwp_avl_remove(node, &lwp->object_root);
  258. rt_free(node);
  259. return ret;
  260. }
  261. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  262. {
  263. rt_err_t ret = -1;
  264. if (lwp && object)
  265. {
  266. struct lwp_avl_struct *node;
  267. lwp_user_object_lock(lwp);
  268. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  269. ret = _object_node_delete(lwp, node);
  270. lwp_user_object_unlock(lwp);
  271. }
  272. return ret;
  273. }
  274. void lwp_user_object_clear(struct rt_lwp *lwp)
  275. {
  276. struct lwp_avl_struct *node;
  277. lwp_user_object_lock(lwp);
  278. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  279. {
  280. _object_node_delete(lwp, node);
  281. }
  282. lwp_user_object_unlock(lwp);
  283. }
  284. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  285. {
  286. rt_object_t object;
  287. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  288. object = (rt_object_t)node->avl_key;
  289. lwp_user_object_add(dst_lwp, object);
  290. return 0;
  291. }
  292. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  293. {
  294. lwp_user_object_lock(src_lwp);
  295. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  296. lwp_user_object_unlock(src_lwp);
  297. }
  298. rt_lwp_t lwp_create(rt_base_t flags)
  299. {
  300. pid_t pid;
  301. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  302. if (new_lwp)
  303. {
  304. /* minimal setup of lwp object */
  305. new_lwp->session = -1;
  306. new_lwp->ref = 1;
  307. #ifdef RT_USING_SMP
  308. new_lwp->bind_cpu = RT_CPUS_NR;
  309. #endif
  310. rt_list_init(&new_lwp->wait_list);
  311. rt_list_init(&new_lwp->t_grp);
  312. rt_list_init(&new_lwp->timer);
  313. lwp_user_object_lock_init(new_lwp);
  314. rt_wqueue_init(&new_lwp->wait_queue);
  315. lwp_signal_init(&new_lwp->signal);
  316. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  317. /* lwp with pid */
  318. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  319. {
  320. lwp_pid_lock_take();
  321. pid = lwp_pid_get_locked();
  322. if (pid == 0)
  323. {
  324. lwp_user_object_lock_destroy(new_lwp);
  325. rt_free(new_lwp);
  326. new_lwp = RT_NULL;
  327. LOG_E("pid slot fulled!\n");
  328. }
  329. else
  330. {
  331. new_lwp->pid = pid;
  332. lwp_pid_set_lwp_locked(pid, new_lwp);
  333. }
  334. lwp_pid_lock_release();
  335. }
  336. }
  337. LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
  338. return new_lwp;
  339. }
  340. /** when reference is 0, a lwp can be released */
  341. void lwp_free(struct rt_lwp* lwp)
  342. {
  343. if (lwp == RT_NULL)
  344. {
  345. return;
  346. }
  347. /**
  348. * Brief: Recycle the lwp when reference is cleared
  349. *
  350. * Note: Critical Section
  351. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  352. * all the reference is clear)
  353. */
  354. LOG_D("lwp free: %p", lwp);
  355. LWP_LOCK(lwp);
  356. if (lwp->args != RT_NULL)
  357. {
  358. #ifndef ARCH_MM_MMU
  359. lwp->args_length = RT_NULL;
  360. #ifndef ARCH_MM_MPU
  361. rt_free(lwp->args);
  362. #endif /* not defined ARCH_MM_MPU */
  363. #endif /* ARCH_MM_MMU */
  364. lwp->args = RT_NULL;
  365. }
  366. lwp_user_object_clear(lwp);
  367. lwp_user_object_lock_destroy(lwp);
  368. /* free data section */
  369. if (lwp->data_entry != RT_NULL)
  370. {
  371. #ifdef ARCH_MM_MMU
  372. rt_free_align(lwp->data_entry);
  373. #else
  374. #ifdef ARCH_MM_MPU
  375. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  376. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  377. #else
  378. rt_free_align(lwp->data_entry);
  379. #endif /* ARCH_MM_MPU */
  380. #endif /* ARCH_MM_MMU */
  381. lwp->data_entry = RT_NULL;
  382. }
  383. /* free text section */
  384. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  385. {
  386. if (lwp->text_entry)
  387. {
  388. LOG_D("lwp text free: %p", lwp->text_entry);
  389. #ifndef ARCH_MM_MMU
  390. rt_free((void*)lwp->text_entry);
  391. #endif /* not defined ARCH_MM_MMU */
  392. lwp->text_entry = RT_NULL;
  393. }
  394. }
  395. #ifdef ARCH_MM_MMU
  396. lwp_unmap_user_space(lwp);
  397. #endif
  398. timer_list_free(&lwp->timer);
  399. LWP_UNLOCK(lwp);
  400. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  401. rt_mutex_detach(&lwp->lwp_lock);
  402. /**
  403. * pid must have release before enter lwp_free()
  404. * otherwise this is a data racing
  405. */
  406. RT_ASSERT(lwp->pid == 0);
  407. rt_free(lwp);
  408. }
  409. rt_inline rt_noreturn
  410. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  411. {
  412. /**
  413. * Note: the tid tree always hold a reference to thread, hence the tid must
  414. * be release before cleanup of thread
  415. */
  416. lwp_tid_put(thread->tid);
  417. thread->tid = 0;
  418. LWP_LOCK(lwp);
  419. rt_list_remove(&thread->sibling);
  420. LWP_UNLOCK(lwp);
  421. rt_thread_delete(thread);
  422. rt_schedule();
  423. while (1) ;
  424. }
  425. rt_inline void _clear_child_tid(rt_thread_t thread)
  426. {
  427. if (thread->clear_child_tid)
  428. {
  429. int t = 0;
  430. int *clear_child_tid = thread->clear_child_tid;
  431. thread->clear_child_tid = RT_NULL;
  432. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  433. sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
  434. }
  435. }
  436. void lwp_exit(rt_lwp_t lwp, rt_base_t status)
  437. {
  438. rt_thread_t thread;
  439. if (!lwp)
  440. {
  441. LOG_W("%s: lwp should not be null", __func__);
  442. return ;
  443. }
  444. thread = rt_thread_self();
  445. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  446. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  447. #ifdef ARCH_MM_MMU
  448. _clear_child_tid(thread);
  449. LWP_LOCK(lwp);
  450. /**
  451. * Brief: only one thread should calls exit_group(),
  452. * but we can not ensured that during run-time
  453. */
  454. lwp->lwp_ret = LWP_CREATE_STAT(status);
  455. LWP_UNLOCK(lwp);
  456. lwp_terminate(lwp);
  457. #else
  458. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  459. if (main_thread == tid)
  460. {
  461. rt_thread_t sub_thread;
  462. rt_list_t *list;
  463. lwp_terminate(lwp);
  464. /* delete all subthread */
  465. while ((list = tid->sibling.prev) != &lwp->t_grp)
  466. {
  467. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  468. rt_list_remove(&sub_thread->sibling);
  469. rt_thread_delete(sub_thread);
  470. }
  471. lwp->lwp_ret = value;
  472. }
  473. #endif /* ARCH_MM_MMU */
  474. _thread_exit(lwp, thread);
  475. }
  476. void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
  477. {
  478. rt_thread_t header_thr;
  479. struct rt_lwp *lwp;
  480. LOG_D("%s", __func__);
  481. RT_ASSERT(thread == rt_thread_self());
  482. lwp = (struct rt_lwp *)thread->lwp;
  483. RT_ASSERT(lwp != RT_NULL);
  484. #ifdef ARCH_MM_MMU
  485. _clear_child_tid(thread);
  486. LWP_LOCK(lwp);
  487. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  488. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  489. {
  490. lwp->lwp_ret = LWP_CREATE_STAT(status);
  491. LWP_UNLOCK(lwp);
  492. lwp_terminate(lwp);
  493. }
  494. else
  495. {
  496. LWP_UNLOCK(lwp);
  497. }
  498. #endif /* ARCH_MM_MMU */
  499. _thread_exit(lwp, thread);
  500. }
  501. static void _pop_tty(rt_lwp_t lwp)
  502. {
  503. if (!lwp->background)
  504. {
  505. struct termios *old_stdin_termios = get_old_termios();
  506. struct rt_lwp *old_lwp = NULL;
  507. if (lwp->session == -1)
  508. {
  509. tcsetattr(1, 0, old_stdin_termios);
  510. }
  511. if (lwp->tty != RT_NULL)
  512. {
  513. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  514. if (lwp->tty->foreground == lwp)
  515. {
  516. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  517. lwp->tty->foreground = old_lwp;
  518. }
  519. else
  520. {
  521. tty_pop(&lwp->tty->head, lwp);
  522. }
  523. rt_mutex_release(&lwp->tty->lock);
  524. LWP_LOCK(lwp);
  525. lwp->tty = RT_NULL;
  526. LWP_UNLOCK(lwp);
  527. }
  528. }
  529. }
  530. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  531. int lwp_ref_inc(struct rt_lwp *lwp)
  532. {
  533. int ref;
  534. ref = rt_atomic_add(&lwp->ref, 1);
  535. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  536. return ref;
  537. }
  538. int lwp_ref_dec(struct rt_lwp *lwp)
  539. {
  540. int ref;
  541. ref = rt_atomic_add(&lwp->ref, -1);
  542. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  543. if (ref == 1)
  544. {
  545. struct rt_channel_msg msg;
  546. if (lwp->debug)
  547. {
  548. memset(&msg, 0, sizeof msg);
  549. rt_raw_channel_send(gdb_server_channel(), &msg);
  550. }
  551. #ifndef ARCH_MM_MMU
  552. #ifdef RT_LWP_USING_SHM
  553. lwp_shm_lwp_free(lwp);
  554. #endif /* RT_LWP_USING_SHM */
  555. #endif /* not defined ARCH_MM_MMU */
  556. lwp_free(lwp);
  557. }
  558. else
  559. {
  560. /* reference must be a positive integer */
  561. RT_ASSERT(ref > 1);
  562. }
  563. return ref;
  564. }
  565. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  566. {
  567. struct lwp_avl_struct *p;
  568. struct rt_lwp *lwp = RT_NULL;
  569. p = lwp_avl_find(pid, lwp_pid_root);
  570. if (p)
  571. {
  572. lwp = (struct rt_lwp *)p->data;
  573. }
  574. return lwp;
  575. }
  576. pid_t lwp_to_pid(struct rt_lwp* lwp)
  577. {
  578. if (!lwp)
  579. {
  580. return 0;
  581. }
  582. return lwp->pid;
  583. }
  584. char* lwp_pid2name(int32_t pid)
  585. {
  586. struct rt_lwp *lwp;
  587. char* process_name = RT_NULL;
  588. lwp_pid_lock_take();
  589. lwp = lwp_from_pid_locked(pid);
  590. if (lwp)
  591. {
  592. process_name = strrchr(lwp->cmd, '/');
  593. process_name = process_name? process_name + 1: lwp->cmd;
  594. }
  595. lwp_pid_lock_release();
  596. return process_name;
  597. }
  598. pid_t lwp_name2pid(const char *name)
  599. {
  600. int idx;
  601. pid_t pid = 0;
  602. rt_thread_t main_thread;
  603. char* process_name = RT_NULL;
  604. lwp_pid_lock_take();
  605. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  606. {
  607. /* 0 is reserved */
  608. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  609. if (lwp)
  610. {
  611. process_name = strrchr(lwp->cmd, '/');
  612. process_name = process_name? process_name + 1: lwp->cmd;
  613. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  614. {
  615. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  616. if (!(main_thread->stat & RT_THREAD_CLOSE))
  617. {
  618. pid = lwp->pid;
  619. }
  620. }
  621. }
  622. }
  623. lwp_pid_lock_release();
  624. return pid;
  625. }
  626. int lwp_getpid(void)
  627. {
  628. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  629. }
  630. /**
  631. * @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
  632. * status code for user
  633. */
  634. static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
  635. struct rt_lwp *self_lwp, int *status,
  636. int options)
  637. {
  638. sysret_t error;
  639. int lwp_stat;
  640. int terminated;
  641. if (!child)
  642. {
  643. error = -RT_ERROR;
  644. }
  645. else
  646. {
  647. /**
  648. * Note: Critical Section
  649. * - child lwp (RW. This will modify its parent if valid)
  650. */
  651. LWP_LOCK(child);
  652. if (child->terminated)
  653. {
  654. error = child->pid;
  655. }
  656. else if (rt_list_isempty(&child->wait_list))
  657. {
  658. /**
  659. * Note: only one thread can wait on wait_list.
  660. * dont reschedule before mutex unlock
  661. */
  662. rt_enter_critical();
  663. error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  664. if (error == 0)
  665. {
  666. rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
  667. LWP_UNLOCK(child);
  668. rt_set_errno(RT_EINTR);
  669. rt_exit_critical();
  670. rt_schedule();
  671. /**
  672. * Since parent is holding a reference to children this lock will
  673. * not be freed before parent dereference to it.
  674. */
  675. LWP_LOCK(child);
  676. error = rt_get_errno();
  677. if (error == RT_EINTR)
  678. {
  679. error = -EINTR;
  680. }
  681. else if (error != RT_EOK)
  682. {
  683. LOG_W("%s: unexpected error code %ld", __func__, error);
  684. }
  685. else
  686. {
  687. error = child->pid;
  688. }
  689. }
  690. else
  691. rt_exit_critical();
  692. }
  693. else
  694. error = -RT_EINTR;
  695. lwp_stat = child->lwp_ret;
  696. terminated = child->terminated;
  697. LWP_UNLOCK(child);
  698. if (error > 0)
  699. {
  700. if (terminated)
  701. {
  702. LOG_D("func %s: child detached", __func__);
  703. /** Reap the child process if it's exited */
  704. lwp_pid_put(child);
  705. lwp_children_unregister(self_lwp, child);
  706. }
  707. if (status)
  708. lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
  709. }
  710. }
  711. return error;
  712. }
  713. pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
  714. pid_t lwp_waitpid(const pid_t pid, int *status, int options)
  715. {
  716. pid_t rc = -1;
  717. struct rt_thread *thread;
  718. struct rt_lwp *child;
  719. struct rt_lwp *self_lwp;
  720. thread = rt_thread_self();
  721. self_lwp = lwp_self();
  722. if (!self_lwp)
  723. {
  724. rc = -RT_EINVAL;
  725. }
  726. else
  727. {
  728. if (pid > 0)
  729. {
  730. lwp_pid_lock_take();
  731. child = lwp_from_pid_locked(pid);
  732. if (child->parent != self_lwp)
  733. rc = -RT_ERROR;
  734. else
  735. rc = RT_EOK;
  736. lwp_pid_lock_release();
  737. if (rc == RT_EOK)
  738. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  739. }
  740. else if (pid == -1)
  741. {
  742. LWP_LOCK(self_lwp);
  743. child = self_lwp->first_child;
  744. LWP_UNLOCK(self_lwp);
  745. RT_ASSERT(!child || child->parent == self_lwp);
  746. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  747. }
  748. else
  749. {
  750. /* not supported yet */
  751. rc = -RT_EINVAL;
  752. }
  753. }
  754. if (rc > 0)
  755. {
  756. LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
  757. }
  758. else
  759. {
  760. RT_ASSERT(rc != 0);
  761. LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
  762. }
  763. return rc;
  764. }
  765. #ifdef RT_USING_FINSH
  766. /* copy from components/finsh/cmd.c */
  767. static void object_split(int len)
  768. {
  769. while (len--)
  770. {
  771. rt_kprintf("-");
  772. }
  773. }
  774. static void print_thread_info(struct rt_thread* thread, int maxlen)
  775. {
  776. rt_uint8_t *ptr;
  777. rt_uint8_t stat;
  778. #ifdef RT_USING_SMP
  779. if (thread->oncpu != RT_CPU_DETACHED)
  780. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
  781. else
  782. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  783. #else
  784. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  785. #endif /*RT_USING_SMP*/
  786. stat = (thread->stat & RT_THREAD_STAT_MASK);
  787. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  788. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  789. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  790. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  791. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  792. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  793. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  794. while (*ptr == '#')ptr--;
  795. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  796. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  797. thread->stack_size,
  798. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  799. thread->remaining_tick,
  800. thread->error);
  801. #else
  802. ptr = (rt_uint8_t *)thread->stack_addr;
  803. while (*ptr == '#')ptr++;
  804. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  805. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  806. thread->stack_size,
  807. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  808. / thread->stack_size,
  809. thread->remaining_tick,
  810. thread->error);
  811. #endif
  812. }
  813. long list_process(void)
  814. {
  815. int index;
  816. int maxlen;
  817. rt_ubase_t level;
  818. struct rt_thread *thread;
  819. struct rt_list_node *node, *list;
  820. const char *item_title = "thread";
  821. int count = 0;
  822. struct rt_thread **threads;
  823. maxlen = RT_NAME_MAX;
  824. #ifdef RT_USING_SMP
  825. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  826. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  827. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  828. #else
  829. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  830. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  831. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  832. #endif /*RT_USING_SMP*/
  833. count = rt_object_get_length(RT_Object_Class_Thread);
  834. if (count > 0)
  835. {
  836. /* get thread pointers */
  837. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  838. if (threads)
  839. {
  840. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  841. if (index > 0)
  842. {
  843. for (index = 0; index <count; index++)
  844. {
  845. struct rt_thread th;
  846. thread = threads[index];
  847. level = rt_spin_lock_irqsave(&thread->spinlock);
  848. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  849. {
  850. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  851. continue;
  852. }
  853. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  854. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  855. if (th.lwp == RT_NULL)
  856. {
  857. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  858. print_thread_info(&th, maxlen);
  859. }
  860. }
  861. }
  862. rt_free(threads);
  863. }
  864. }
  865. for (index = 0; index < RT_LWP_MAX_NR; index++)
  866. {
  867. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  868. if (lwp)
  869. {
  870. list = &lwp->t_grp;
  871. for (node = list->next; node != list; node = node->next)
  872. {
  873. thread = rt_list_entry(node, struct rt_thread, sibling);
  874. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  875. print_thread_info(thread, maxlen);
  876. }
  877. }
  878. }
  879. return 0;
  880. }
  881. MSH_CMD_EXPORT(list_process, list process);
  882. static void cmd_kill(int argc, char** argv)
  883. {
  884. int pid;
  885. int sig = SIGKILL;
  886. if (argc < 2)
  887. {
  888. rt_kprintf("kill pid or kill pid -s signal\n");
  889. return;
  890. }
  891. pid = atoi(argv[1]);
  892. if (argc >= 4)
  893. {
  894. if (argv[2][0] == '-' && argv[2][1] == 's')
  895. {
  896. sig = atoi(argv[3]);
  897. }
  898. }
  899. lwp_pid_lock_take();
  900. lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
  901. lwp_pid_lock_release();
  902. }
  903. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  904. static void cmd_killall(int argc, char** argv)
  905. {
  906. int pid;
  907. if (argc < 2)
  908. {
  909. rt_kprintf("killall processes_name\n");
  910. return;
  911. }
  912. while((pid = lwp_name2pid(argv[1])) > 0)
  913. {
  914. lwp_pid_lock_take();
  915. lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
  916. lwp_pid_lock_release();
  917. rt_thread_mdelay(100);
  918. }
  919. }
  920. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  921. #endif
  922. int lwp_check_exit_request(void)
  923. {
  924. rt_thread_t thread = rt_thread_self();
  925. if (!thread->lwp)
  926. {
  927. return 0;
  928. }
  929. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  930. {
  931. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  932. return 1;
  933. }
  934. return 0;
  935. }
  936. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  937. {
  938. int found = 0;
  939. rt_base_t level;
  940. rt_list_t *list;
  941. level = rt_spin_lock_irqsave(&thread->spinlock);
  942. list = lwp->t_grp.next;
  943. while (list != &lwp->t_grp)
  944. {
  945. rt_thread_t iter_thread;
  946. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  947. if (thread == iter_thread)
  948. {
  949. found = 1;
  950. break;
  951. }
  952. list = list->next;
  953. }
  954. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  955. return found;
  956. }
  957. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  958. {
  959. rt_thread_t main_thread;
  960. rt_base_t level;
  961. rt_list_t *list;
  962. struct rt_lwp *lwp;
  963. lwp = lwp_self();
  964. if ((!thread_to_exit) || (!lwp))
  965. {
  966. return;
  967. }
  968. level = rt_spin_lock_irqsave(&thread_to_exit->spinlock);
  969. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  970. if (thread_to_exit == main_thread)
  971. {
  972. goto finish;
  973. }
  974. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  975. {
  976. goto finish;
  977. }
  978. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  979. {
  980. rt_thread_t thread;
  981. thread = rt_list_entry(list, struct rt_thread, sibling);
  982. if (thread != thread_to_exit)
  983. {
  984. continue;
  985. }
  986. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  987. {
  988. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  989. }
  990. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  991. {
  992. thread->error = -RT_EINTR;
  993. rt_hw_dsb();
  994. rt_thread_wakeup(thread);
  995. }
  996. break;
  997. }
  998. while (found_thread(lwp, thread_to_exit))
  999. {
  1000. rt_thread_mdelay(10);
  1001. }
  1002. finish:
  1003. rt_spin_unlock_irqrestore(&thread_to_exit->spinlock, level);
  1004. return;
  1005. }
  1006. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1007. static void _resr_cleanup(struct rt_lwp *lwp);
  1008. void lwp_terminate(struct rt_lwp *lwp)
  1009. {
  1010. if (!lwp)
  1011. {
  1012. /* kernel thread not support */
  1013. return;
  1014. }
  1015. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1016. LWP_LOCK(lwp);
  1017. if (!lwp->terminated)
  1018. {
  1019. /* stop the receiving of signals */
  1020. lwp->terminated = RT_TRUE;
  1021. LWP_UNLOCK(lwp);
  1022. _wait_sibling_exit(lwp, rt_thread_self());
  1023. _resr_cleanup(lwp);
  1024. }
  1025. else
  1026. {
  1027. LWP_UNLOCK(lwp);
  1028. }
  1029. }
  1030. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1031. {
  1032. rt_base_t level;
  1033. rt_list_t *list;
  1034. rt_thread_t thread;
  1035. /* broadcast exit request for sibling threads */
  1036. LWP_LOCK(lwp);
  1037. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1038. {
  1039. thread = rt_list_entry(list, struct rt_thread, sibling);
  1040. level = rt_spin_lock_irqsave(&thread->spinlock);
  1041. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  1042. {
  1043. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  1044. }
  1045. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1046. level = rt_spin_lock_irqsave(&thread->spinlock);
  1047. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  1048. {
  1049. thread->error = RT_EINTR;
  1050. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1051. rt_hw_dsb();
  1052. rt_thread_wakeup(thread);
  1053. }
  1054. else
  1055. {
  1056. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1057. }
  1058. }
  1059. LWP_UNLOCK(lwp);
  1060. while (1)
  1061. {
  1062. int subthread_is_terminated;
  1063. LOG_D("%s: wait for subthread exiting", __func__);
  1064. /**
  1065. * Brief: wait for all *running* sibling threads to exit
  1066. *
  1067. * Note: Critical Section
  1068. * - sibling list of lwp (RW. It will clear all siblings finally)
  1069. */
  1070. LWP_LOCK(lwp);
  1071. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1072. if (!subthread_is_terminated)
  1073. {
  1074. rt_thread_t sub_thread;
  1075. rt_list_t *list;
  1076. int all_subthread_in_init = 1;
  1077. /* check all subthread is in init state */
  1078. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1079. {
  1080. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1081. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  1082. {
  1083. all_subthread_in_init = 0;
  1084. break;
  1085. }
  1086. }
  1087. if (all_subthread_in_init)
  1088. {
  1089. /* delete all subthread */
  1090. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1091. {
  1092. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1093. rt_list_remove(&sub_thread->sibling);
  1094. /**
  1095. * Note: Critical Section
  1096. * - thread control block (RW. Since it will free the thread
  1097. * control block, it must ensure no one else can access
  1098. * thread any more)
  1099. */
  1100. lwp_tid_put(sub_thread->tid);
  1101. sub_thread->tid = 0;
  1102. rt_thread_delete(sub_thread);
  1103. }
  1104. subthread_is_terminated = 1;
  1105. }
  1106. }
  1107. LWP_UNLOCK(lwp);
  1108. if (subthread_is_terminated)
  1109. {
  1110. break;
  1111. }
  1112. rt_thread_mdelay(10);
  1113. }
  1114. }
  1115. static void _resr_cleanup(struct rt_lwp *lwp)
  1116. {
  1117. LWP_LOCK(lwp);
  1118. lwp_signal_detach(&lwp->signal);
  1119. /**
  1120. * @brief Detach children from lwp
  1121. *
  1122. * @note Critical Section
  1123. * - the lwp (RW. Release lwp)
  1124. * - the pid resource manager (RW. Release the pid)
  1125. */
  1126. while (lwp->first_child)
  1127. {
  1128. struct rt_lwp *child;
  1129. child = lwp->first_child;
  1130. lwp->first_child = child->sibling;
  1131. /** @note safe since the slist node is release */
  1132. LWP_UNLOCK(lwp);
  1133. LWP_LOCK(child);
  1134. if (child->terminated)
  1135. {
  1136. lwp_pid_put(child);
  1137. }
  1138. else
  1139. {
  1140. child->sibling = RT_NULL;
  1141. /* info: this may cause an orphan lwp */
  1142. child->parent = RT_NULL;
  1143. }
  1144. LWP_UNLOCK(child);
  1145. lwp_ref_dec(child);
  1146. lwp_ref_dec(lwp);
  1147. LWP_LOCK(lwp);
  1148. }
  1149. LWP_UNLOCK(lwp);
  1150. _pop_tty(lwp);
  1151. /**
  1152. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1153. * will be sent to parent
  1154. *
  1155. * @note Critical Section
  1156. * - the parent lwp (RW.)
  1157. */
  1158. LWP_LOCK(lwp);
  1159. if (lwp->parent)
  1160. {
  1161. struct rt_thread *thread;
  1162. LWP_UNLOCK(lwp);
  1163. if (!rt_list_isempty(&lwp->wait_list))
  1164. {
  1165. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  1166. thread->error = RT_EOK;
  1167. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  1168. rt_thread_resume(thread);
  1169. }
  1170. else
  1171. {
  1172. /* children cannot detach itself and must wait for parent to take care of it */
  1173. lwp_signal_kill(lwp->parent, SIGCHLD, CLD_EXITED, 0);
  1174. }
  1175. }
  1176. else
  1177. {
  1178. LWP_UNLOCK(lwp);
  1179. /* INFO: orphan hasn't parents to do the reap of pid */
  1180. lwp_pid_put(lwp);
  1181. }
  1182. LWP_LOCK(lwp);
  1183. if (lwp->fdt.fds != RT_NULL)
  1184. {
  1185. struct dfs_file **fds;
  1186. /* auto clean fds */
  1187. __exit_files(lwp);
  1188. fds = lwp->fdt.fds;
  1189. lwp->fdt.fds = RT_NULL;
  1190. LWP_UNLOCK(lwp);
  1191. rt_free(fds);
  1192. }
  1193. else
  1194. {
  1195. LWP_UNLOCK(lwp);
  1196. }
  1197. }
  1198. static int _lwp_setaffinity(pid_t pid, int cpu)
  1199. {
  1200. struct rt_lwp *lwp;
  1201. int ret = -1;
  1202. lwp_pid_lock_take();
  1203. if(pid == 0)
  1204. lwp = lwp_self();
  1205. else
  1206. lwp = lwp_from_pid_locked(pid);
  1207. if (lwp)
  1208. {
  1209. #ifdef RT_USING_SMP
  1210. rt_list_t *list;
  1211. lwp->bind_cpu = cpu;
  1212. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1213. {
  1214. rt_thread_t thread;
  1215. thread = rt_list_entry(list, struct rt_thread, sibling);
  1216. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  1217. }
  1218. #endif
  1219. ret = 0;
  1220. }
  1221. lwp_pid_lock_release();
  1222. return ret;
  1223. }
  1224. int lwp_setaffinity(pid_t pid, int cpu)
  1225. {
  1226. int ret;
  1227. #ifdef RT_USING_SMP
  1228. if (cpu < 0 || cpu > RT_CPUS_NR)
  1229. {
  1230. cpu = RT_CPUS_NR;
  1231. }
  1232. #endif
  1233. ret = _lwp_setaffinity(pid, cpu);
  1234. return ret;
  1235. }
  1236. #ifdef RT_USING_SMP
  1237. static void cmd_cpu_bind(int argc, char** argv)
  1238. {
  1239. int pid;
  1240. int cpu;
  1241. if (argc < 3)
  1242. {
  1243. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1244. return;
  1245. }
  1246. pid = atoi(argv[1]);
  1247. cpu = atoi(argv[2]);
  1248. lwp_setaffinity((pid_t)pid, cpu);
  1249. }
  1250. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1251. #endif