lwp_pid.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. */
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #define DBG_TAG "lwp.pid"
  21. #define DBG_LVL DBG_INFO
  22. #include <rtdbg.h>
  23. #include <dfs_file.h>
  24. #include <unistd.h>
  25. #include <stdio.h> /* rename() */
  26. #include <sys/stat.h>
  27. #include <sys/statfs.h> /* statfs() */
  28. #include "lwp_internal.h"
  29. #include "tty.h"
  30. #ifdef ARCH_MM_MMU
  31. #include "lwp_user_mm.h"
  32. #endif
  33. #define PID_MAX 10000
  34. #define PID_CT_ASSERT(name, x) \
  35. struct assert_##name {char ary[2 * (x) - 1];}
  36. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  37. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  38. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  39. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  40. static int lwp_pid_ary_alloced = 0;
  41. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  42. static pid_t current_pid = 0;
  43. static struct rt_mutex pid_mtx;
  44. int lwp_pid_init(void)
  45. {
  46. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  47. return 0;
  48. }
  49. void lwp_pid_lock_take(void)
  50. {
  51. DEF_RETURN_CODE(rc);
  52. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  53. /* should never failed */
  54. RT_ASSERT(rc == RT_EOK);
  55. }
  56. void lwp_pid_lock_release(void)
  57. {
  58. /* should never failed */
  59. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  60. RT_ASSERT(0);
  61. }
  62. struct lwp_avl_struct *lwp_get_pid_ary(void)
  63. {
  64. return lwp_pid_ary;
  65. }
  66. static pid_t lwp_pid_get_locked(void)
  67. {
  68. struct lwp_avl_struct *p;
  69. pid_t pid = 0;
  70. p = lwp_pid_free_head;
  71. if (p)
  72. {
  73. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  74. }
  75. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  76. {
  77. p = lwp_pid_ary + lwp_pid_ary_alloced;
  78. lwp_pid_ary_alloced++;
  79. }
  80. if (p)
  81. {
  82. int found_noused = 0;
  83. RT_ASSERT(p->data == RT_NULL);
  84. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  85. {
  86. if (!lwp_avl_find(pid, lwp_pid_root))
  87. {
  88. found_noused = 1;
  89. break;
  90. }
  91. }
  92. if (!found_noused)
  93. {
  94. for (pid = 1; pid <= current_pid; pid++)
  95. {
  96. if (!lwp_avl_find(pid, lwp_pid_root))
  97. {
  98. found_noused = 1;
  99. break;
  100. }
  101. }
  102. }
  103. p->avl_key = pid;
  104. lwp_avl_insert(p, &lwp_pid_root);
  105. current_pid = pid;
  106. }
  107. return pid;
  108. }
  109. static void lwp_pid_put_locked(pid_t pid)
  110. {
  111. struct lwp_avl_struct *p;
  112. if (pid == 0)
  113. {
  114. return;
  115. }
  116. p = lwp_avl_find(pid, lwp_pid_root);
  117. if (p)
  118. {
  119. p->data = RT_NULL;
  120. lwp_avl_remove(p, &lwp_pid_root);
  121. p->avl_right = lwp_pid_free_head;
  122. lwp_pid_free_head = p;
  123. }
  124. }
  125. void lwp_pid_put(struct rt_lwp *lwp)
  126. {
  127. lwp_pid_lock_take();
  128. lwp_pid_put_locked(lwp->pid);
  129. lwp_pid_lock_release();
  130. /* reset pid field */
  131. lwp->pid = 0;
  132. /* clear reference */
  133. lwp_ref_dec(lwp);
  134. }
  135. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  136. {
  137. struct lwp_avl_struct *p;
  138. p = lwp_avl_find(pid, lwp_pid_root);
  139. if (p)
  140. {
  141. p->data = lwp;
  142. lwp_ref_inc(lwp);
  143. }
  144. }
  145. static void __exit_files(struct rt_lwp *lwp)
  146. {
  147. int fd = lwp->fdt.maxfd - 1;
  148. while (fd >= 0)
  149. {
  150. struct dfs_file *d;
  151. d = lwp->fdt.fds[fd];
  152. if (d)
  153. {
  154. dfs_file_close(d);
  155. fdt_fd_release(&lwp->fdt, fd);
  156. }
  157. fd--;
  158. }
  159. }
  160. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  161. {
  162. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  163. }
  164. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  165. {
  166. rt_mutex_detach(&lwp->object_mutex);
  167. }
  168. void lwp_user_object_lock(struct rt_lwp *lwp)
  169. {
  170. if (lwp)
  171. {
  172. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  173. }
  174. else
  175. {
  176. RT_ASSERT(0);
  177. }
  178. }
  179. void lwp_user_object_unlock(struct rt_lwp *lwp)
  180. {
  181. if (lwp)
  182. {
  183. rt_mutex_release(&lwp->object_mutex);
  184. }
  185. else
  186. {
  187. RT_ASSERT(0);
  188. }
  189. }
  190. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  191. {
  192. int ret = -1;
  193. if (lwp && object)
  194. {
  195. lwp_user_object_lock(lwp);
  196. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  197. {
  198. struct lwp_avl_struct *node;
  199. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  200. if (node)
  201. {
  202. rt_atomic_add(&object->lwp_ref_count, 1);
  203. node->avl_key = (avl_key_t)object;
  204. lwp_avl_insert(node, &lwp->object_root);
  205. ret = 0;
  206. }
  207. }
  208. lwp_user_object_unlock(lwp);
  209. }
  210. return ret;
  211. }
  212. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  213. {
  214. rt_err_t ret = -1;
  215. rt_object_t object;
  216. if (!lwp || !node)
  217. {
  218. return ret;
  219. }
  220. object = (rt_object_t)node->avl_key;
  221. object->lwp_ref_count--;
  222. if (object->lwp_ref_count == 0)
  223. {
  224. /* remove from kernel object list */
  225. switch (object->type)
  226. {
  227. case RT_Object_Class_Semaphore:
  228. ret = rt_sem_delete((rt_sem_t)object);
  229. break;
  230. case RT_Object_Class_Mutex:
  231. ret = rt_mutex_delete((rt_mutex_t)object);
  232. break;
  233. case RT_Object_Class_Event:
  234. ret = rt_event_delete((rt_event_t)object);
  235. break;
  236. case RT_Object_Class_MailBox:
  237. ret = rt_mb_delete((rt_mailbox_t)object);
  238. break;
  239. case RT_Object_Class_MessageQueue:
  240. ret = rt_mq_delete((rt_mq_t)object);
  241. break;
  242. case RT_Object_Class_Timer:
  243. ret = rt_timer_delete((rt_timer_t)object);
  244. break;
  245. case RT_Object_Class_Custom:
  246. ret = rt_custom_object_destroy(object);
  247. break;
  248. default:
  249. LOG_E("input object type(%d) error", object->type);
  250. break;
  251. }
  252. }
  253. else
  254. {
  255. ret = 0;
  256. }
  257. lwp_avl_remove(node, &lwp->object_root);
  258. rt_free(node);
  259. return ret;
  260. }
  261. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  262. {
  263. rt_err_t ret = -1;
  264. if (lwp && object)
  265. {
  266. struct lwp_avl_struct *node;
  267. lwp_user_object_lock(lwp);
  268. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  269. ret = _object_node_delete(lwp, node);
  270. lwp_user_object_unlock(lwp);
  271. }
  272. return ret;
  273. }
  274. void lwp_user_object_clear(struct rt_lwp *lwp)
  275. {
  276. struct lwp_avl_struct *node;
  277. lwp_user_object_lock(lwp);
  278. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  279. {
  280. _object_node_delete(lwp, node);
  281. }
  282. lwp_user_object_unlock(lwp);
  283. }
  284. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  285. {
  286. rt_object_t object;
  287. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  288. object = (rt_object_t)node->avl_key;
  289. lwp_user_object_add(dst_lwp, object);
  290. return 0;
  291. }
  292. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  293. {
  294. lwp_user_object_lock(src_lwp);
  295. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  296. lwp_user_object_unlock(src_lwp);
  297. }
  298. rt_lwp_t lwp_create(rt_base_t flags)
  299. {
  300. pid_t pid;
  301. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  302. if (new_lwp)
  303. {
  304. /* minimal setup of lwp object */
  305. new_lwp->session = -1;
  306. new_lwp->ref = 1;
  307. rt_list_init(&new_lwp->wait_list);
  308. rt_list_init(&new_lwp->t_grp);
  309. rt_list_init(&new_lwp->timer);
  310. lwp_user_object_lock_init(new_lwp);
  311. rt_wqueue_init(&new_lwp->wait_queue);
  312. lwp_signal_init(&new_lwp->signal);
  313. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  314. /* lwp with pid */
  315. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  316. {
  317. lwp_pid_lock_take();
  318. pid = lwp_pid_get_locked();
  319. if (pid == 0)
  320. {
  321. lwp_user_object_lock_destroy(new_lwp);
  322. rt_free(new_lwp);
  323. new_lwp = RT_NULL;
  324. LOG_E("pid slot fulled!\n");
  325. }
  326. else
  327. {
  328. new_lwp->pid = pid;
  329. lwp_pid_set_lwp_locked(pid, new_lwp);
  330. }
  331. lwp_pid_lock_release();
  332. }
  333. }
  334. LOG_D("%s(pid=%d) => %p", __func__, new_lwp->pid, new_lwp);
  335. return new_lwp;
  336. }
  337. /** when reference is 0, a lwp can be released */
  338. void lwp_free(struct rt_lwp* lwp)
  339. {
  340. if (lwp == RT_NULL)
  341. {
  342. return;
  343. }
  344. /**
  345. * Brief: Recycle the lwp when reference is cleared
  346. *
  347. * Note: Critical Section
  348. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  349. * all the reference is clear)
  350. */
  351. LOG_D("lwp free: %p", lwp);
  352. LWP_LOCK(lwp);
  353. if (lwp->args != RT_NULL)
  354. {
  355. #ifndef ARCH_MM_MMU
  356. lwp->args_length = RT_NULL;
  357. #ifndef ARCH_MM_MPU
  358. rt_free(lwp->args);
  359. #endif /* not defined ARCH_MM_MPU */
  360. #endif /* ARCH_MM_MMU */
  361. lwp->args = RT_NULL;
  362. }
  363. lwp_user_object_clear(lwp);
  364. lwp_user_object_lock_destroy(lwp);
  365. /* free data section */
  366. if (lwp->data_entry != RT_NULL)
  367. {
  368. #ifdef ARCH_MM_MMU
  369. rt_free_align(lwp->data_entry);
  370. #else
  371. #ifdef ARCH_MM_MPU
  372. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  373. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  374. #else
  375. rt_free_align(lwp->data_entry);
  376. #endif /* ARCH_MM_MPU */
  377. #endif /* ARCH_MM_MMU */
  378. lwp->data_entry = RT_NULL;
  379. }
  380. /* free text section */
  381. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  382. {
  383. if (lwp->text_entry)
  384. {
  385. LOG_D("lwp text free: %p", lwp->text_entry);
  386. #ifndef ARCH_MM_MMU
  387. rt_free((void*)lwp->text_entry);
  388. #endif /* not defined ARCH_MM_MMU */
  389. lwp->text_entry = RT_NULL;
  390. }
  391. }
  392. #ifdef ARCH_MM_MMU
  393. lwp_unmap_user_space(lwp);
  394. #endif
  395. timer_list_free(&lwp->timer);
  396. LWP_UNLOCK(lwp);
  397. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  398. rt_mutex_detach(&lwp->lwp_lock);
  399. /**
  400. * pid must have release before enter lwp_free()
  401. * otherwise this is a data racing
  402. */
  403. RT_ASSERT(lwp->pid == 0);
  404. rt_free(lwp);
  405. }
  406. rt_inline rt_noreturn
  407. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  408. {
  409. /**
  410. * Note: the tid tree always hold a reference to thread, hence the tid must
  411. * be release before cleanup of thread
  412. */
  413. lwp_tid_put(thread->tid);
  414. thread->tid = 0;
  415. LWP_LOCK(lwp);
  416. rt_list_remove(&thread->sibling);
  417. LWP_UNLOCK(lwp);
  418. rt_thread_delete(thread);
  419. rt_schedule();
  420. while (1) ;
  421. }
  422. rt_inline void _clear_child_tid(rt_thread_t thread)
  423. {
  424. if (thread->clear_child_tid)
  425. {
  426. int t = 0;
  427. int *clear_child_tid = thread->clear_child_tid;
  428. thread->clear_child_tid = RT_NULL;
  429. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  430. sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
  431. }
  432. }
  433. void lwp_exit(rt_lwp_t lwp, rt_base_t status)
  434. {
  435. rt_thread_t thread;
  436. if (!lwp)
  437. {
  438. LOG_W("%s: lwp should not be null", __func__);
  439. return ;
  440. }
  441. thread = rt_thread_self();
  442. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  443. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  444. #ifdef ARCH_MM_MMU
  445. _clear_child_tid(thread);
  446. LWP_LOCK(lwp);
  447. /**
  448. * Brief: only one thread should calls exit_group(),
  449. * but we can not ensured that during run-time
  450. */
  451. lwp->lwp_ret = LWP_CREATE_STAT(status);
  452. LWP_UNLOCK(lwp);
  453. lwp_terminate(lwp);
  454. #else
  455. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  456. if (main_thread == tid)
  457. {
  458. rt_thread_t sub_thread;
  459. rt_list_t *list;
  460. lwp_terminate(lwp);
  461. /* delete all subthread */
  462. while ((list = tid->sibling.prev) != &lwp->t_grp)
  463. {
  464. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  465. rt_list_remove(&sub_thread->sibling);
  466. rt_thread_delete(sub_thread);
  467. }
  468. lwp->lwp_ret = value;
  469. }
  470. #endif /* ARCH_MM_MMU */
  471. _thread_exit(lwp, thread);
  472. }
  473. void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
  474. {
  475. rt_thread_t header_thr;
  476. struct rt_lwp *lwp;
  477. LOG_D("%s", __func__);
  478. RT_ASSERT(thread == rt_thread_self());
  479. lwp = (struct rt_lwp *)thread->lwp;
  480. RT_ASSERT(lwp != RT_NULL);
  481. #ifdef ARCH_MM_MMU
  482. _clear_child_tid(thread);
  483. LWP_LOCK(lwp);
  484. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  485. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  486. {
  487. lwp->lwp_ret = LWP_CREATE_STAT(status);
  488. LWP_UNLOCK(lwp);
  489. lwp_terminate(lwp);
  490. }
  491. else
  492. {
  493. LWP_UNLOCK(lwp);
  494. }
  495. #endif /* ARCH_MM_MMU */
  496. _thread_exit(lwp, thread);
  497. }
  498. static void _pop_tty(rt_lwp_t lwp)
  499. {
  500. if (!lwp->background)
  501. {
  502. struct termios *old_stdin_termios = get_old_termios();
  503. struct rt_lwp *old_lwp = NULL;
  504. if (lwp->session == -1)
  505. {
  506. tcsetattr(1, 0, old_stdin_termios);
  507. }
  508. if (lwp->tty != RT_NULL)
  509. {
  510. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  511. if (lwp->tty->foreground == lwp)
  512. {
  513. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  514. lwp->tty->foreground = old_lwp;
  515. }
  516. else
  517. {
  518. tty_pop(&lwp->tty->head, lwp);
  519. }
  520. rt_mutex_release(&lwp->tty->lock);
  521. LWP_LOCK(lwp);
  522. lwp->tty = RT_NULL;
  523. LWP_UNLOCK(lwp);
  524. }
  525. }
  526. }
  527. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  528. int lwp_ref_inc(struct rt_lwp *lwp)
  529. {
  530. int ref;
  531. ref = rt_atomic_add(&lwp->ref, 1);
  532. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  533. return ref;
  534. }
  535. int lwp_ref_dec(struct rt_lwp *lwp)
  536. {
  537. int ref;
  538. ref = rt_atomic_add(&lwp->ref, -1);
  539. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  540. if (ref == 1)
  541. {
  542. struct rt_channel_msg msg;
  543. if (lwp->debug)
  544. {
  545. memset(&msg, 0, sizeof msg);
  546. rt_raw_channel_send(gdb_server_channel(), &msg);
  547. }
  548. #ifndef ARCH_MM_MMU
  549. #ifdef RT_LWP_USING_SHM
  550. lwp_shm_lwp_free(lwp);
  551. #endif /* RT_LWP_USING_SHM */
  552. #endif /* not defined ARCH_MM_MMU */
  553. lwp_free(lwp);
  554. }
  555. else
  556. {
  557. /* reference must be a positive integer */
  558. RT_ASSERT(ref > 1);
  559. }
  560. return ref;
  561. }
  562. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  563. {
  564. struct lwp_avl_struct *p;
  565. struct rt_lwp *lwp = RT_NULL;
  566. p = lwp_avl_find(pid, lwp_pid_root);
  567. if (p)
  568. {
  569. lwp = (struct rt_lwp *)p->data;
  570. }
  571. return lwp;
  572. }
  573. pid_t lwp_to_pid(struct rt_lwp* lwp)
  574. {
  575. if (!lwp)
  576. {
  577. return 0;
  578. }
  579. return lwp->pid;
  580. }
  581. char* lwp_pid2name(int32_t pid)
  582. {
  583. struct rt_lwp *lwp;
  584. char* process_name = RT_NULL;
  585. lwp_pid_lock_take();
  586. lwp = lwp_from_pid_locked(pid);
  587. if (lwp)
  588. {
  589. process_name = strrchr(lwp->cmd, '/');
  590. process_name = process_name? process_name + 1: lwp->cmd;
  591. }
  592. lwp_pid_lock_release();
  593. return process_name;
  594. }
  595. pid_t lwp_name2pid(const char *name)
  596. {
  597. int idx;
  598. pid_t pid = 0;
  599. rt_thread_t main_thread;
  600. char* process_name = RT_NULL;
  601. lwp_pid_lock_take();
  602. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  603. {
  604. /* 0 is reserved */
  605. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  606. if (lwp)
  607. {
  608. process_name = strrchr(lwp->cmd, '/');
  609. process_name = process_name? process_name + 1: lwp->cmd;
  610. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  611. {
  612. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  613. if (!(main_thread->stat & RT_THREAD_CLOSE))
  614. {
  615. pid = lwp->pid;
  616. }
  617. }
  618. }
  619. }
  620. lwp_pid_lock_release();
  621. return pid;
  622. }
  623. int lwp_getpid(void)
  624. {
  625. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  626. }
  627. /**
  628. * @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
  629. * status code for user
  630. */
  631. static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
  632. struct rt_lwp *self_lwp, int *status,
  633. int options)
  634. {
  635. sysret_t error;
  636. int lwp_stat;
  637. int terminated;
  638. if (!child)
  639. {
  640. error = -RT_ERROR;
  641. }
  642. else
  643. {
  644. /**
  645. * Note: Critical Section
  646. * - child lwp (RW. This will modify its parent if valid)
  647. */
  648. LWP_LOCK(child);
  649. if (child->terminated)
  650. {
  651. error = child->pid;
  652. }
  653. else if (rt_list_isempty(&child->wait_list))
  654. {
  655. /**
  656. * Note: only one thread can wait on wait_list.
  657. * dont reschedule before mutex unlock
  658. */
  659. rt_enter_critical();
  660. error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  661. if (error == 0)
  662. {
  663. rt_list_insert_before(&child->wait_list, &(cur_thr->tlist));
  664. LWP_UNLOCK(child);
  665. rt_set_errno(RT_EINTR);
  666. rt_exit_critical();
  667. rt_schedule();
  668. /**
  669. * Since parent is holding a reference to children this lock will
  670. * not be freed before parent dereference to it.
  671. */
  672. LWP_LOCK(child);
  673. error = rt_get_errno();
  674. if (error == RT_EINTR)
  675. {
  676. error = -EINTR;
  677. }
  678. else if (error != RT_EOK)
  679. {
  680. LOG_W("%s: unexpected error code %ld", __func__, error);
  681. }
  682. else
  683. {
  684. error = child->pid;
  685. }
  686. }
  687. else
  688. rt_exit_critical();
  689. }
  690. else
  691. error = -RT_EINTR;
  692. lwp_stat = child->lwp_ret;
  693. terminated = child->terminated;
  694. LWP_UNLOCK(child);
  695. if (error > 0)
  696. {
  697. if (terminated)
  698. {
  699. LOG_D("func %s: child detached", __func__);
  700. /** Reap the child process if it's exited */
  701. lwp_pid_put(child);
  702. lwp_children_unregister(self_lwp, child);
  703. }
  704. if (status)
  705. lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
  706. }
  707. }
  708. return error;
  709. }
  710. pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
  711. pid_t lwp_waitpid(const pid_t pid, int *status, int options)
  712. {
  713. pid_t rc = -1;
  714. struct rt_thread *thread;
  715. struct rt_lwp *child;
  716. struct rt_lwp *self_lwp;
  717. thread = rt_thread_self();
  718. self_lwp = lwp_self();
  719. if (!self_lwp)
  720. {
  721. rc = -RT_EINVAL;
  722. }
  723. else
  724. {
  725. if (pid > 0)
  726. {
  727. lwp_pid_lock_take();
  728. child = lwp_from_pid_locked(pid);
  729. if (child->parent != self_lwp)
  730. rc = -RT_ERROR;
  731. else
  732. rc = RT_EOK;
  733. lwp_pid_lock_release();
  734. if (rc == RT_EOK)
  735. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  736. }
  737. else if (pid == -1)
  738. {
  739. LWP_LOCK(self_lwp);
  740. child = self_lwp->first_child;
  741. LWP_UNLOCK(self_lwp);
  742. RT_ASSERT(!child || child->parent == self_lwp);
  743. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  744. }
  745. else
  746. {
  747. /* not supported yet */
  748. rc = -RT_EINVAL;
  749. }
  750. }
  751. if (rc > 0)
  752. {
  753. LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
  754. }
  755. else
  756. {
  757. RT_ASSERT(rc != 0);
  758. LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
  759. }
  760. return rc;
  761. }
  762. #ifdef RT_USING_FINSH
  763. /* copy from components/finsh/cmd.c */
  764. static void object_split(int len)
  765. {
  766. while (len--)
  767. {
  768. rt_kprintf("-");
  769. }
  770. }
  771. static void print_thread_info(struct rt_thread* thread, int maxlen)
  772. {
  773. rt_uint8_t *ptr;
  774. rt_uint8_t stat;
  775. #ifdef RT_USING_SMP
  776. if (thread->oncpu != RT_CPU_DETACHED)
  777. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->oncpu, thread->current_priority);
  778. else
  779. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  780. #else
  781. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  782. #endif /*RT_USING_SMP*/
  783. stat = (thread->stat & RT_THREAD_STAT_MASK);
  784. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  785. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  786. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  787. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  788. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  789. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  790. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  791. while (*ptr == '#')ptr--;
  792. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  793. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  794. thread->stack_size,
  795. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  796. thread->remaining_tick,
  797. thread->error);
  798. #else
  799. ptr = (rt_uint8_t *)thread->stack_addr;
  800. while (*ptr == '#')ptr++;
  801. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  802. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  803. thread->stack_size,
  804. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  805. / thread->stack_size,
  806. thread->remaining_tick,
  807. thread->error);
  808. #endif
  809. }
  810. long list_process(void)
  811. {
  812. int index;
  813. int maxlen;
  814. rt_ubase_t level;
  815. struct rt_thread *thread;
  816. struct rt_list_node *node, *list;
  817. const char *item_title = "thread";
  818. int count = 0;
  819. struct rt_thread **threads;
  820. maxlen = RT_NAME_MAX;
  821. #ifdef RT_USING_SMP
  822. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  823. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  824. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  825. #else
  826. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  827. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  828. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  829. #endif /*RT_USING_SMP*/
  830. count = rt_object_get_length(RT_Object_Class_Thread);
  831. if (count > 0)
  832. {
  833. /* get thread pointers */
  834. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  835. if (threads)
  836. {
  837. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  838. if (index > 0)
  839. {
  840. for (index = 0; index <count; index++)
  841. {
  842. struct rt_thread th;
  843. thread = threads[index];
  844. level = rt_spin_lock_irqsave(&thread->spinlock);
  845. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  846. {
  847. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  848. continue;
  849. }
  850. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  851. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  852. if (th.lwp == RT_NULL)
  853. {
  854. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  855. print_thread_info(&th, maxlen);
  856. }
  857. }
  858. }
  859. rt_free(threads);
  860. }
  861. }
  862. for (index = 0; index < RT_LWP_MAX_NR; index++)
  863. {
  864. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  865. if (lwp)
  866. {
  867. list = &lwp->t_grp;
  868. for (node = list->next; node != list; node = node->next)
  869. {
  870. thread = rt_list_entry(node, struct rt_thread, sibling);
  871. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  872. print_thread_info(thread, maxlen);
  873. }
  874. }
  875. }
  876. return 0;
  877. }
  878. MSH_CMD_EXPORT(list_process, list process);
  879. static void cmd_kill(int argc, char** argv)
  880. {
  881. int pid;
  882. int sig = SIGKILL;
  883. if (argc < 2)
  884. {
  885. rt_kprintf("kill pid or kill pid -s signal\n");
  886. return;
  887. }
  888. pid = atoi(argv[1]);
  889. if (argc >= 4)
  890. {
  891. if (argv[2][0] == '-' && argv[2][1] == 's')
  892. {
  893. sig = atoi(argv[3]);
  894. }
  895. }
  896. lwp_pid_lock_take();
  897. lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
  898. lwp_pid_lock_release();
  899. }
  900. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  901. static void cmd_killall(int argc, char** argv)
  902. {
  903. int pid;
  904. if (argc < 2)
  905. {
  906. rt_kprintf("killall processes_name\n");
  907. return;
  908. }
  909. while((pid = lwp_name2pid(argv[1])) > 0)
  910. {
  911. lwp_pid_lock_take();
  912. lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
  913. lwp_pid_lock_release();
  914. rt_thread_mdelay(100);
  915. }
  916. }
  917. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  918. #endif
  919. int lwp_check_exit_request(void)
  920. {
  921. rt_thread_t thread = rt_thread_self();
  922. if (!thread->lwp)
  923. {
  924. return 0;
  925. }
  926. if (thread->exit_request == LWP_EXIT_REQUEST_TRIGGERED)
  927. {
  928. thread->exit_request = LWP_EXIT_REQUEST_IN_PROCESS;
  929. return 1;
  930. }
  931. return 0;
  932. }
  933. static int found_thread(struct rt_lwp* lwp, rt_thread_t thread)
  934. {
  935. int found = 0;
  936. rt_base_t level;
  937. rt_list_t *list;
  938. level = rt_spin_lock_irqsave(&thread->spinlock);
  939. list = lwp->t_grp.next;
  940. while (list != &lwp->t_grp)
  941. {
  942. rt_thread_t iter_thread;
  943. iter_thread = rt_list_entry(list, struct rt_thread, sibling);
  944. if (thread == iter_thread)
  945. {
  946. found = 1;
  947. break;
  948. }
  949. list = list->next;
  950. }
  951. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  952. return found;
  953. }
  954. void lwp_request_thread_exit(rt_thread_t thread_to_exit)
  955. {
  956. rt_thread_t main_thread;
  957. rt_base_t level;
  958. rt_list_t *list;
  959. struct rt_lwp *lwp;
  960. lwp = lwp_self();
  961. if ((!thread_to_exit) || (!lwp))
  962. {
  963. return;
  964. }
  965. level = rt_spin_lock_irqsave(&thread_to_exit->spinlock);
  966. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  967. if (thread_to_exit == main_thread)
  968. {
  969. goto finish;
  970. }
  971. if ((struct rt_lwp *)thread_to_exit->lwp != lwp)
  972. {
  973. goto finish;
  974. }
  975. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  976. {
  977. rt_thread_t thread;
  978. thread = rt_list_entry(list, struct rt_thread, sibling);
  979. if (thread != thread_to_exit)
  980. {
  981. continue;
  982. }
  983. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  984. {
  985. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  986. }
  987. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  988. {
  989. thread->error = -RT_EINTR;
  990. rt_hw_dsb();
  991. rt_thread_wakeup(thread);
  992. }
  993. break;
  994. }
  995. while (found_thread(lwp, thread_to_exit))
  996. {
  997. rt_thread_mdelay(10);
  998. }
  999. finish:
  1000. rt_spin_unlock_irqrestore(&thread_to_exit->spinlock, level);
  1001. return;
  1002. }
  1003. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1004. static void _resr_cleanup(struct rt_lwp *lwp);
  1005. void lwp_terminate(struct rt_lwp *lwp)
  1006. {
  1007. if (!lwp)
  1008. {
  1009. /* kernel thread not support */
  1010. return;
  1011. }
  1012. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1013. LWP_LOCK(lwp);
  1014. if (!lwp->terminated)
  1015. {
  1016. /* stop the receiving of signals */
  1017. lwp->terminated = RT_TRUE;
  1018. LWP_UNLOCK(lwp);
  1019. _wait_sibling_exit(lwp, rt_thread_self());
  1020. _resr_cleanup(lwp);
  1021. }
  1022. else
  1023. {
  1024. LWP_UNLOCK(lwp);
  1025. }
  1026. }
  1027. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1028. {
  1029. rt_base_t level;
  1030. rt_list_t *list;
  1031. rt_thread_t thread;
  1032. /* broadcast exit request for sibling threads */
  1033. LWP_LOCK(lwp);
  1034. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1035. {
  1036. thread = rt_list_entry(list, struct rt_thread, sibling);
  1037. level = rt_spin_lock_irqsave(&thread->spinlock);
  1038. if (thread->exit_request == LWP_EXIT_REQUEST_NONE)
  1039. {
  1040. thread->exit_request = LWP_EXIT_REQUEST_TRIGGERED;
  1041. }
  1042. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1043. level = rt_spin_lock_irqsave(&thread->spinlock);
  1044. if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  1045. {
  1046. thread->error = RT_EINTR;
  1047. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1048. rt_hw_dsb();
  1049. rt_thread_wakeup(thread);
  1050. }
  1051. else
  1052. {
  1053. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1054. }
  1055. }
  1056. LWP_UNLOCK(lwp);
  1057. while (1)
  1058. {
  1059. int subthread_is_terminated;
  1060. LOG_D("%s: wait for subthread exiting", __func__);
  1061. /**
  1062. * Brief: wait for all *running* sibling threads to exit
  1063. *
  1064. * Note: Critical Section
  1065. * - sibling list of lwp (RW. It will clear all siblings finally)
  1066. */
  1067. LWP_LOCK(lwp);
  1068. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1069. if (!subthread_is_terminated)
  1070. {
  1071. rt_thread_t sub_thread;
  1072. rt_list_t *list;
  1073. int all_subthread_in_init = 1;
  1074. /* check all subthread is in init state */
  1075. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1076. {
  1077. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1078. if ((sub_thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT)
  1079. {
  1080. all_subthread_in_init = 0;
  1081. break;
  1082. }
  1083. }
  1084. if (all_subthread_in_init)
  1085. {
  1086. /* delete all subthread */
  1087. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1088. {
  1089. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1090. rt_list_remove(&sub_thread->sibling);
  1091. /**
  1092. * Note: Critical Section
  1093. * - thread control block (RW. Since it will free the thread
  1094. * control block, it must ensure no one else can access
  1095. * thread any more)
  1096. */
  1097. lwp_tid_put(sub_thread->tid);
  1098. sub_thread->tid = 0;
  1099. rt_thread_delete(sub_thread);
  1100. }
  1101. subthread_is_terminated = 1;
  1102. }
  1103. }
  1104. LWP_UNLOCK(lwp);
  1105. if (subthread_is_terminated)
  1106. {
  1107. break;
  1108. }
  1109. rt_thread_mdelay(10);
  1110. }
  1111. }
  1112. static void _resr_cleanup(struct rt_lwp *lwp)
  1113. {
  1114. LWP_LOCK(lwp);
  1115. lwp_signal_detach(&lwp->signal);
  1116. /**
  1117. * @brief Detach children from lwp
  1118. *
  1119. * @note Critical Section
  1120. * - the lwp (RW. Release lwp)
  1121. * - the pid resource manager (RW. Release the pid)
  1122. */
  1123. while (lwp->first_child)
  1124. {
  1125. struct rt_lwp *child;
  1126. child = lwp->first_child;
  1127. lwp->first_child = child->sibling;
  1128. /** @note safe since the slist node is release */
  1129. LWP_UNLOCK(lwp);
  1130. LWP_LOCK(child);
  1131. child->sibling = RT_NULL;
  1132. /* info: this may cause an orphan lwp */
  1133. child->parent = RT_NULL;
  1134. LWP_UNLOCK(child);
  1135. lwp_ref_dec(child);
  1136. lwp_ref_dec(lwp);
  1137. LWP_LOCK(lwp);
  1138. }
  1139. LWP_UNLOCK(lwp);
  1140. _pop_tty(lwp);
  1141. /**
  1142. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1143. * will be sent to parent
  1144. *
  1145. * @note Critical Section
  1146. * - the parent lwp (RW.)
  1147. */
  1148. LWP_LOCK(lwp);
  1149. if (lwp->parent)
  1150. {
  1151. struct rt_thread *thread;
  1152. LWP_UNLOCK(lwp);
  1153. if (!rt_list_isempty(&lwp->wait_list))
  1154. {
  1155. thread = rt_list_entry(lwp->wait_list.next, struct rt_thread, tlist);
  1156. thread->error = RT_EOK;
  1157. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  1158. rt_thread_resume(thread);
  1159. }
  1160. else
  1161. {
  1162. /* children cannot detach itself and must wait for parent to take care of it */
  1163. lwp_signal_kill(lwp->parent, SIGCHLD, CLD_EXITED, 0);
  1164. }
  1165. }
  1166. else
  1167. {
  1168. LWP_UNLOCK(lwp);
  1169. /* INFO: orphan hasn't parents to do the reap of pid */
  1170. lwp_pid_put(lwp);
  1171. }
  1172. LWP_LOCK(lwp);
  1173. if (lwp->fdt.fds != RT_NULL)
  1174. {
  1175. struct dfs_file **fds;
  1176. /* auto clean fds */
  1177. __exit_files(lwp);
  1178. fds = lwp->fdt.fds;
  1179. lwp->fdt.fds = RT_NULL;
  1180. LWP_UNLOCK(lwp);
  1181. rt_free(fds);
  1182. }
  1183. else
  1184. {
  1185. LWP_UNLOCK(lwp);
  1186. }
  1187. }
  1188. static int _lwp_setaffinity(pid_t pid, int cpu)
  1189. {
  1190. struct rt_lwp *lwp;
  1191. int ret = -1;
  1192. lwp_pid_lock_take();
  1193. if(pid == 0)
  1194. lwp = lwp_self();
  1195. else
  1196. lwp = lwp_from_pid_locked(pid);
  1197. if (lwp)
  1198. {
  1199. #ifdef RT_USING_SMP
  1200. rt_list_t *list;
  1201. lwp->bind_cpu = cpu;
  1202. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1203. {
  1204. rt_thread_t thread;
  1205. thread = rt_list_entry(list, struct rt_thread, sibling);
  1206. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  1207. }
  1208. #endif
  1209. ret = 0;
  1210. }
  1211. lwp_pid_lock_release();
  1212. return ret;
  1213. }
  1214. int lwp_setaffinity(pid_t pid, int cpu)
  1215. {
  1216. int ret;
  1217. #ifdef RT_USING_SMP
  1218. if (cpu < 0 || cpu > RT_CPUS_NR)
  1219. {
  1220. cpu = RT_CPUS_NR;
  1221. }
  1222. #endif
  1223. ret = _lwp_setaffinity(pid, cpu);
  1224. return ret;
  1225. }
  1226. #ifdef RT_USING_SMP
  1227. static void cmd_cpu_bind(int argc, char** argv)
  1228. {
  1229. int pid;
  1230. int cpu;
  1231. if (argc < 3)
  1232. {
  1233. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1234. return;
  1235. }
  1236. pid = atoi(argv[1]);
  1237. cpu = atoi(argv[2]);
  1238. lwp_setaffinity((pid_t)pid, cpu);
  1239. }
  1240. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1241. #endif