lwp_pid.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * 2024-01-25 shell porting to new sched API
  18. */
  19. /* includes scheduler related API */
  20. #define __RT_IPC_SOURCE__
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #define DBG_TAG "lwp.pid"
  24. #define DBG_LVL DBG_INFO
  25. #include <rtdbg.h>
  26. #include <dfs_file.h>
  27. #include <unistd.h>
  28. #include <stdio.h> /* rename() */
  29. #include <sys/stat.h>
  30. #include <sys/statfs.h> /* statfs() */
  31. #include "lwp_internal.h"
  32. #include "tty.h"
  33. #ifdef ARCH_MM_MMU
  34. #include "lwp_user_mm.h"
  35. #endif
  36. #define PID_MAX 10000
  37. #define PID_CT_ASSERT(name, x) \
  38. struct assert_##name {char ary[2 * (x) - 1];}
  39. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  40. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  41. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  42. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  43. static int lwp_pid_ary_alloced = 0;
  44. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  45. static pid_t current_pid = 0;
  46. static struct rt_mutex pid_mtx;
  47. int lwp_pid_init(void)
  48. {
  49. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  50. return 0;
  51. }
  52. void lwp_pid_lock_take(void)
  53. {
  54. LWP_DEF_RETURN_CODE(rc);
  55. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  56. /* should never failed */
  57. RT_ASSERT(rc == RT_EOK);
  58. }
  59. void lwp_pid_lock_release(void)
  60. {
  61. /* should never failed */
  62. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  63. RT_ASSERT(0);
  64. }
  65. struct lwp_avl_struct *lwp_get_pid_ary(void)
  66. {
  67. return lwp_pid_ary;
  68. }
  69. static pid_t lwp_pid_get_locked(void)
  70. {
  71. struct lwp_avl_struct *p;
  72. pid_t pid = 0;
  73. p = lwp_pid_free_head;
  74. if (p)
  75. {
  76. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  77. }
  78. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  79. {
  80. p = lwp_pid_ary + lwp_pid_ary_alloced;
  81. lwp_pid_ary_alloced++;
  82. }
  83. if (p)
  84. {
  85. int found_noused = 0;
  86. RT_ASSERT(p->data == RT_NULL);
  87. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  88. {
  89. if (!lwp_avl_find(pid, lwp_pid_root))
  90. {
  91. found_noused = 1;
  92. break;
  93. }
  94. }
  95. if (!found_noused)
  96. {
  97. for (pid = 1; pid <= current_pid; pid++)
  98. {
  99. if (!lwp_avl_find(pid, lwp_pid_root))
  100. {
  101. found_noused = 1;
  102. break;
  103. }
  104. }
  105. }
  106. p->avl_key = pid;
  107. lwp_avl_insert(p, &lwp_pid_root);
  108. current_pid = pid;
  109. }
  110. return pid;
  111. }
  112. static void lwp_pid_put_locked(pid_t pid)
  113. {
  114. struct lwp_avl_struct *p;
  115. if (pid == 0)
  116. {
  117. return;
  118. }
  119. p = lwp_avl_find(pid, lwp_pid_root);
  120. if (p)
  121. {
  122. p->data = RT_NULL;
  123. lwp_avl_remove(p, &lwp_pid_root);
  124. p->avl_right = lwp_pid_free_head;
  125. lwp_pid_free_head = p;
  126. }
  127. }
  128. void lwp_pid_put(struct rt_lwp *lwp)
  129. {
  130. lwp_pid_lock_take();
  131. lwp_pid_put_locked(lwp->pid);
  132. lwp_pid_lock_release();
  133. /* reset pid field */
  134. lwp->pid = 0;
  135. /* clear reference */
  136. lwp_ref_dec(lwp);
  137. }
  138. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  139. {
  140. struct lwp_avl_struct *p;
  141. p = lwp_avl_find(pid, lwp_pid_root);
  142. if (p)
  143. {
  144. p->data = lwp;
  145. lwp_ref_inc(lwp);
  146. }
  147. }
  148. static void __exit_files(struct rt_lwp *lwp)
  149. {
  150. int fd = lwp->fdt.maxfd - 1;
  151. while (fd >= 0)
  152. {
  153. struct dfs_file *d;
  154. d = lwp->fdt.fds[fd];
  155. if (d)
  156. {
  157. dfs_file_close(d);
  158. fdt_fd_release(&lwp->fdt, fd);
  159. }
  160. fd--;
  161. }
  162. }
  163. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  164. {
  165. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  166. }
  167. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  168. {
  169. rt_mutex_detach(&lwp->object_mutex);
  170. }
  171. void lwp_user_object_lock(struct rt_lwp *lwp)
  172. {
  173. if (lwp)
  174. {
  175. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  176. }
  177. else
  178. {
  179. RT_ASSERT(0);
  180. }
  181. }
  182. void lwp_user_object_unlock(struct rt_lwp *lwp)
  183. {
  184. if (lwp)
  185. {
  186. rt_mutex_release(&lwp->object_mutex);
  187. }
  188. else
  189. {
  190. RT_ASSERT(0);
  191. }
  192. }
  193. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  194. {
  195. int ret = -1;
  196. if (lwp && object)
  197. {
  198. lwp_user_object_lock(lwp);
  199. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  200. {
  201. struct lwp_avl_struct *node;
  202. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  203. if (node)
  204. {
  205. rt_atomic_add(&object->lwp_ref_count, 1);
  206. node->avl_key = (avl_key_t)object;
  207. lwp_avl_insert(node, &lwp->object_root);
  208. ret = 0;
  209. }
  210. }
  211. lwp_user_object_unlock(lwp);
  212. }
  213. return ret;
  214. }
  215. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  216. {
  217. rt_err_t ret = -1;
  218. rt_object_t object;
  219. if (!lwp || !node)
  220. {
  221. return ret;
  222. }
  223. object = (rt_object_t)node->avl_key;
  224. object->lwp_ref_count--;
  225. if (object->lwp_ref_count == 0)
  226. {
  227. /* remove from kernel object list */
  228. switch (object->type)
  229. {
  230. case RT_Object_Class_Semaphore:
  231. ret = rt_sem_delete((rt_sem_t)object);
  232. break;
  233. case RT_Object_Class_Mutex:
  234. ret = rt_mutex_delete((rt_mutex_t)object);
  235. break;
  236. case RT_Object_Class_Event:
  237. ret = rt_event_delete((rt_event_t)object);
  238. break;
  239. case RT_Object_Class_MailBox:
  240. ret = rt_mb_delete((rt_mailbox_t)object);
  241. break;
  242. case RT_Object_Class_MessageQueue:
  243. ret = rt_mq_delete((rt_mq_t)object);
  244. break;
  245. case RT_Object_Class_Timer:
  246. ret = rt_timer_delete((rt_timer_t)object);
  247. break;
  248. case RT_Object_Class_Custom:
  249. ret = rt_custom_object_destroy(object);
  250. break;
  251. default:
  252. LOG_E("input object type(%d) error", object->type);
  253. break;
  254. }
  255. }
  256. else
  257. {
  258. ret = 0;
  259. }
  260. lwp_avl_remove(node, &lwp->object_root);
  261. rt_free(node);
  262. return ret;
  263. }
  264. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  265. {
  266. rt_err_t ret = -1;
  267. if (lwp && object)
  268. {
  269. struct lwp_avl_struct *node;
  270. lwp_user_object_lock(lwp);
  271. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  272. ret = _object_node_delete(lwp, node);
  273. lwp_user_object_unlock(lwp);
  274. }
  275. return ret;
  276. }
  277. void lwp_user_object_clear(struct rt_lwp *lwp)
  278. {
  279. struct lwp_avl_struct *node;
  280. lwp_user_object_lock(lwp);
  281. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  282. {
  283. _object_node_delete(lwp, node);
  284. }
  285. lwp_user_object_unlock(lwp);
  286. }
  287. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  288. {
  289. rt_object_t object;
  290. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  291. object = (rt_object_t)node->avl_key;
  292. lwp_user_object_add(dst_lwp, object);
  293. return 0;
  294. }
  295. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  296. {
  297. lwp_user_object_lock(src_lwp);
  298. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  299. lwp_user_object_unlock(src_lwp);
  300. }
  301. rt_lwp_t lwp_create(rt_base_t flags)
  302. {
  303. pid_t pid;
  304. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  305. if (new_lwp)
  306. {
  307. /* minimal setup of lwp object */
  308. new_lwp->session = -1;
  309. new_lwp->ref = 1;
  310. #ifdef RT_USING_SMP
  311. new_lwp->bind_cpu = RT_CPUS_NR;
  312. #endif
  313. rt_list_init(&new_lwp->wait_list);
  314. rt_list_init(&new_lwp->t_grp);
  315. rt_list_init(&new_lwp->timer);
  316. lwp_user_object_lock_init(new_lwp);
  317. rt_wqueue_init(&new_lwp->wait_queue);
  318. lwp_signal_init(&new_lwp->signal);
  319. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  320. /* lwp with pid */
  321. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  322. {
  323. lwp_pid_lock_take();
  324. pid = lwp_pid_get_locked();
  325. if (pid == 0)
  326. {
  327. lwp_user_object_lock_destroy(new_lwp);
  328. rt_free(new_lwp);
  329. new_lwp = RT_NULL;
  330. LOG_E("pid slot fulled!\n");
  331. }
  332. else
  333. {
  334. new_lwp->pid = pid;
  335. lwp_pid_set_lwp_locked(pid, new_lwp);
  336. }
  337. lwp_pid_lock_release();
  338. }
  339. }
  340. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  341. return new_lwp;
  342. }
  343. /** when reference is 0, a lwp can be released */
  344. void lwp_free(struct rt_lwp* lwp)
  345. {
  346. if (lwp == RT_NULL)
  347. {
  348. return;
  349. }
  350. /**
  351. * Brief: Recycle the lwp when reference is cleared
  352. *
  353. * Note: Critical Section
  354. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  355. * all the reference is clear)
  356. */
  357. LOG_D("lwp free: %p", lwp);
  358. LWP_LOCK(lwp);
  359. if (lwp->args != RT_NULL)
  360. {
  361. #ifndef ARCH_MM_MMU
  362. lwp->args_length = RT_NULL;
  363. #ifndef ARCH_MM_MPU
  364. rt_free(lwp->args);
  365. #endif /* not defined ARCH_MM_MPU */
  366. #endif /* ARCH_MM_MMU */
  367. lwp->args = RT_NULL;
  368. }
  369. lwp_user_object_clear(lwp);
  370. lwp_user_object_lock_destroy(lwp);
  371. /* free data section */
  372. if (lwp->data_entry != RT_NULL)
  373. {
  374. #ifdef ARCH_MM_MMU
  375. rt_free_align(lwp->data_entry);
  376. #else
  377. #ifdef ARCH_MM_MPU
  378. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  379. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  380. #else
  381. rt_free_align(lwp->data_entry);
  382. #endif /* ARCH_MM_MPU */
  383. #endif /* ARCH_MM_MMU */
  384. lwp->data_entry = RT_NULL;
  385. }
  386. /* free text section */
  387. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  388. {
  389. if (lwp->text_entry)
  390. {
  391. LOG_D("lwp text free: %p", lwp->text_entry);
  392. #ifndef ARCH_MM_MMU
  393. rt_free((void*)lwp->text_entry);
  394. #endif /* not defined ARCH_MM_MMU */
  395. lwp->text_entry = RT_NULL;
  396. }
  397. }
  398. #ifdef ARCH_MM_MMU
  399. lwp_unmap_user_space(lwp);
  400. #endif
  401. timer_list_free(&lwp->timer);
  402. LWP_UNLOCK(lwp);
  403. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  404. rt_mutex_detach(&lwp->lwp_lock);
  405. /**
  406. * pid must have release before enter lwp_free()
  407. * otherwise this is a data racing
  408. */
  409. RT_ASSERT(lwp->pid == 0);
  410. rt_free(lwp);
  411. }
  412. rt_inline rt_noreturn
  413. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  414. {
  415. /**
  416. * Note: the tid tree always hold a reference to thread, hence the tid must
  417. * be release before cleanup of thread
  418. */
  419. lwp_tid_put(thread->tid);
  420. thread->tid = 0;
  421. LWP_LOCK(lwp);
  422. rt_list_remove(&thread->sibling);
  423. LWP_UNLOCK(lwp);
  424. rt_thread_delete(thread);
  425. rt_schedule();
  426. while (1) ;
  427. }
  428. rt_inline void _clear_child_tid(rt_thread_t thread)
  429. {
  430. if (thread->clear_child_tid)
  431. {
  432. int t = 0;
  433. int *clear_child_tid = thread->clear_child_tid;
  434. thread->clear_child_tid = RT_NULL;
  435. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  436. sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
  437. }
  438. }
  439. void lwp_exit(rt_lwp_t lwp, rt_base_t status)
  440. {
  441. rt_thread_t thread;
  442. if (!lwp)
  443. {
  444. LOG_W("%s: lwp should not be null", __func__);
  445. return ;
  446. }
  447. thread = rt_thread_self();
  448. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  449. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  450. #ifdef ARCH_MM_MMU
  451. _clear_child_tid(thread);
  452. LWP_LOCK(lwp);
  453. /**
  454. * Brief: only one thread should calls exit_group(),
  455. * but we can not ensured that during run-time
  456. */
  457. lwp->lwp_ret = LWP_CREATE_STAT(status);
  458. LWP_UNLOCK(lwp);
  459. lwp_terminate(lwp);
  460. #else
  461. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  462. if (main_thread == tid)
  463. {
  464. rt_thread_t sub_thread;
  465. rt_list_t *list;
  466. lwp_terminate(lwp);
  467. /* delete all subthread */
  468. while ((list = tid->sibling.prev) != &lwp->t_grp)
  469. {
  470. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  471. rt_list_remove(&sub_thread->sibling);
  472. rt_thread_delete(sub_thread);
  473. }
  474. lwp->lwp_ret = value;
  475. }
  476. #endif /* ARCH_MM_MMU */
  477. _thread_exit(lwp, thread);
  478. }
  479. void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
  480. {
  481. rt_thread_t header_thr;
  482. struct rt_lwp *lwp;
  483. LOG_D("%s", __func__);
  484. RT_ASSERT(thread == rt_thread_self());
  485. lwp = (struct rt_lwp *)thread->lwp;
  486. RT_ASSERT(lwp != RT_NULL);
  487. #ifdef ARCH_MM_MMU
  488. _clear_child_tid(thread);
  489. LWP_LOCK(lwp);
  490. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  491. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  492. {
  493. lwp->lwp_ret = LWP_CREATE_STAT(status);
  494. LWP_UNLOCK(lwp);
  495. lwp_terminate(lwp);
  496. }
  497. else
  498. {
  499. LWP_UNLOCK(lwp);
  500. }
  501. #endif /* ARCH_MM_MMU */
  502. _thread_exit(lwp, thread);
  503. }
  504. static void _pop_tty(rt_lwp_t lwp)
  505. {
  506. if (!lwp->background)
  507. {
  508. struct termios *old_stdin_termios = get_old_termios();
  509. struct rt_lwp *old_lwp = NULL;
  510. if (lwp->session == -1)
  511. {
  512. tcsetattr(1, 0, old_stdin_termios);
  513. }
  514. if (lwp->tty != RT_NULL)
  515. {
  516. rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
  517. if (lwp->tty->foreground == lwp)
  518. {
  519. old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
  520. lwp->tty->foreground = old_lwp;
  521. }
  522. else
  523. {
  524. tty_pop(&lwp->tty->head, lwp);
  525. }
  526. rt_mutex_release(&lwp->tty->lock);
  527. LWP_LOCK(lwp);
  528. lwp->tty = RT_NULL;
  529. LWP_UNLOCK(lwp);
  530. }
  531. }
  532. }
  533. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  534. int lwp_ref_inc(struct rt_lwp *lwp)
  535. {
  536. int ref;
  537. ref = rt_atomic_add(&lwp->ref, 1);
  538. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  539. return ref;
  540. }
  541. int lwp_ref_dec(struct rt_lwp *lwp)
  542. {
  543. int ref;
  544. ref = rt_atomic_add(&lwp->ref, -1);
  545. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  546. if (ref == 1)
  547. {
  548. struct rt_channel_msg msg;
  549. if (lwp->debug)
  550. {
  551. memset(&msg, 0, sizeof msg);
  552. rt_raw_channel_send(gdb_server_channel(), &msg);
  553. }
  554. #ifndef ARCH_MM_MMU
  555. #ifdef RT_LWP_USING_SHM
  556. lwp_shm_lwp_free(lwp);
  557. #endif /* RT_LWP_USING_SHM */
  558. #endif /* not defined ARCH_MM_MMU */
  559. lwp_free(lwp);
  560. }
  561. else
  562. {
  563. /* reference must be a positive integer */
  564. RT_ASSERT(ref > 1);
  565. }
  566. return ref;
  567. }
  568. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  569. {
  570. struct lwp_avl_struct *p;
  571. struct rt_lwp *lwp = RT_NULL;
  572. p = lwp_avl_find(pid, lwp_pid_root);
  573. if (p)
  574. {
  575. lwp = (struct rt_lwp *)p->data;
  576. }
  577. return lwp;
  578. }
  579. pid_t lwp_to_pid(struct rt_lwp* lwp)
  580. {
  581. if (!lwp)
  582. {
  583. return 0;
  584. }
  585. return lwp->pid;
  586. }
  587. char* lwp_pid2name(int32_t pid)
  588. {
  589. struct rt_lwp *lwp;
  590. char* process_name = RT_NULL;
  591. lwp_pid_lock_take();
  592. lwp = lwp_from_pid_locked(pid);
  593. if (lwp)
  594. {
  595. process_name = strrchr(lwp->cmd, '/');
  596. process_name = process_name? process_name + 1: lwp->cmd;
  597. }
  598. lwp_pid_lock_release();
  599. return process_name;
  600. }
  601. pid_t lwp_name2pid(const char *name)
  602. {
  603. int idx;
  604. pid_t pid = 0;
  605. rt_thread_t main_thread;
  606. char* process_name = RT_NULL;
  607. rt_sched_lock_level_t slvl;
  608. lwp_pid_lock_take();
  609. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  610. {
  611. /* 0 is reserved */
  612. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  613. if (lwp)
  614. {
  615. process_name = strrchr(lwp->cmd, '/');
  616. process_name = process_name? process_name + 1: lwp->cmd;
  617. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  618. {
  619. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  620. rt_sched_lock(&slvl);
  621. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  622. {
  623. pid = lwp->pid;
  624. }
  625. rt_sched_unlock(slvl);
  626. }
  627. }
  628. }
  629. lwp_pid_lock_release();
  630. return pid;
  631. }
  632. int lwp_getpid(void)
  633. {
  634. return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  635. }
  636. /**
  637. * @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
  638. * status code for user
  639. */
  640. static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
  641. struct rt_lwp *self_lwp, int *status,
  642. int options)
  643. {
  644. sysret_t error;
  645. int lwp_stat;
  646. int terminated;
  647. if (!child)
  648. {
  649. error = -RT_ERROR;
  650. }
  651. else
  652. {
  653. /**
  654. * Note: Critical Section
  655. * - child lwp (RW. This will modify its parent if valid)
  656. */
  657. LWP_LOCK(child);
  658. if (child->terminated)
  659. {
  660. error = child->pid;
  661. }
  662. else if (rt_list_isempty(&child->wait_list))
  663. {
  664. /**
  665. * Note: only one thread can wait on wait_list.
  666. * dont reschedule before mutex unlock
  667. */
  668. rt_enter_critical();
  669. error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  670. if (error == 0)
  671. {
  672. rt_list_insert_before(&child->wait_list, &RT_THREAD_LIST_NODE(cur_thr));
  673. LWP_UNLOCK(child);
  674. rt_set_errno(RT_EINTR);
  675. rt_exit_critical();
  676. rt_schedule();
  677. /**
  678. * Since parent is holding a reference to children this lock will
  679. * not be freed before parent dereference to it.
  680. */
  681. LWP_LOCK(child);
  682. error = rt_get_errno();
  683. if (error == RT_EINTR)
  684. {
  685. error = -EINTR;
  686. }
  687. else if (error != RT_EOK)
  688. {
  689. LOG_W("%s: unexpected error code %ld", __func__, error);
  690. }
  691. else
  692. {
  693. error = child->pid;
  694. }
  695. }
  696. else
  697. rt_exit_critical();
  698. }
  699. else
  700. error = -RT_EINTR;
  701. lwp_stat = child->lwp_ret;
  702. terminated = child->terminated;
  703. LWP_UNLOCK(child);
  704. if (error > 0)
  705. {
  706. if (terminated)
  707. {
  708. LOG_D("func %s: child detached", __func__);
  709. /** Reap the child process if it's exited */
  710. lwp_pid_put(child);
  711. lwp_children_unregister(self_lwp, child);
  712. }
  713. if (status)
  714. lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
  715. }
  716. }
  717. return error;
  718. }
  719. pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
  720. pid_t lwp_waitpid(const pid_t pid, int *status, int options)
  721. {
  722. pid_t rc = -1;
  723. struct rt_thread *thread;
  724. struct rt_lwp *child;
  725. struct rt_lwp *self_lwp;
  726. thread = rt_thread_self();
  727. self_lwp = lwp_self();
  728. if (!self_lwp)
  729. {
  730. rc = -RT_EINVAL;
  731. }
  732. else
  733. {
  734. if (pid > 0)
  735. {
  736. lwp_pid_lock_take();
  737. child = lwp_from_pid_locked(pid);
  738. if (child->parent != self_lwp)
  739. rc = -RT_ERROR;
  740. else
  741. rc = RT_EOK;
  742. lwp_pid_lock_release();
  743. if (rc == RT_EOK)
  744. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  745. }
  746. else if (pid == -1)
  747. {
  748. LWP_LOCK(self_lwp);
  749. child = self_lwp->first_child;
  750. LWP_UNLOCK(self_lwp);
  751. RT_ASSERT(!child || child->parent == self_lwp);
  752. rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
  753. }
  754. else
  755. {
  756. /* not supported yet */
  757. rc = -RT_EINVAL;
  758. }
  759. }
  760. if (rc > 0)
  761. {
  762. LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
  763. }
  764. else
  765. {
  766. RT_ASSERT(rc != 0);
  767. LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
  768. }
  769. return rc;
  770. }
  771. #ifdef RT_USING_FINSH
  772. /* copy from components/finsh/cmd.c */
  773. static void object_split(int len)
  774. {
  775. while (len--)
  776. {
  777. rt_kprintf("-");
  778. }
  779. }
  780. static void print_thread_info(struct rt_thread* thread, int maxlen)
  781. {
  782. rt_uint8_t *ptr;
  783. rt_uint8_t stat;
  784. #ifdef RT_USING_SMP
  785. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  786. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  787. else
  788. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  789. #else
  790. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
  791. #endif /*RT_USING_SMP*/
  792. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  793. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  794. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  795. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  796. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  797. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  798. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  799. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  800. while (*ptr == '#')ptr--;
  801. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  802. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  803. thread->stack_size,
  804. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  805. thread->remaining_tick,
  806. thread->error);
  807. #else
  808. ptr = (rt_uint8_t *)thread->stack_addr;
  809. while (*ptr == '#')ptr++;
  810. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  811. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  812. thread->stack_size,
  813. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  814. / thread->stack_size,
  815. RT_SCHED_PRIV(thread).remaining_tick,
  816. thread->error);
  817. #endif
  818. }
  819. long list_process(void)
  820. {
  821. int index;
  822. int maxlen;
  823. rt_ubase_t level;
  824. struct rt_thread *thread;
  825. struct rt_list_node *node, *list;
  826. const char *item_title = "thread";
  827. int count = 0;
  828. struct rt_thread **threads;
  829. maxlen = RT_NAME_MAX;
  830. #ifdef RT_USING_SMP
  831. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  832. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  833. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  834. #else
  835. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  836. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  837. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  838. #endif /*RT_USING_SMP*/
  839. count = rt_object_get_length(RT_Object_Class_Thread);
  840. if (count > 0)
  841. {
  842. /* get thread pointers */
  843. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  844. if (threads)
  845. {
  846. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  847. if (index > 0)
  848. {
  849. for (index = 0; index <count; index++)
  850. {
  851. struct rt_thread th;
  852. thread = threads[index];
  853. level = rt_spin_lock_irqsave(&thread->spinlock);
  854. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  855. {
  856. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  857. continue;
  858. }
  859. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  860. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  861. if (th.lwp == RT_NULL)
  862. {
  863. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  864. print_thread_info(&th, maxlen);
  865. }
  866. }
  867. }
  868. rt_free(threads);
  869. }
  870. }
  871. for (index = 0; index < RT_LWP_MAX_NR; index++)
  872. {
  873. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  874. if (lwp)
  875. {
  876. list = &lwp->t_grp;
  877. for (node = list->next; node != list; node = node->next)
  878. {
  879. thread = rt_list_entry(node, struct rt_thread, sibling);
  880. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  881. print_thread_info(thread, maxlen);
  882. }
  883. }
  884. }
  885. return 0;
  886. }
  887. MSH_CMD_EXPORT(list_process, list process);
  888. static void cmd_kill(int argc, char** argv)
  889. {
  890. int pid;
  891. int sig = SIGKILL;
  892. if (argc < 2)
  893. {
  894. rt_kprintf("kill pid or kill pid -s signal\n");
  895. return;
  896. }
  897. pid = atoi(argv[1]);
  898. if (argc >= 4)
  899. {
  900. if (argv[2][0] == '-' && argv[2][1] == 's')
  901. {
  902. sig = atoi(argv[3]);
  903. }
  904. }
  905. lwp_pid_lock_take();
  906. lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
  907. lwp_pid_lock_release();
  908. }
  909. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  910. static void cmd_killall(int argc, char** argv)
  911. {
  912. int pid;
  913. if (argc < 2)
  914. {
  915. rt_kprintf("killall processes_name\n");
  916. return;
  917. }
  918. while((pid = lwp_name2pid(argv[1])) > 0)
  919. {
  920. lwp_pid_lock_take();
  921. lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
  922. lwp_pid_lock_release();
  923. rt_thread_mdelay(100);
  924. }
  925. }
  926. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  927. #endif
  928. int lwp_check_exit_request(void)
  929. {
  930. rt_thread_t thread = rt_thread_self();
  931. rt_base_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  932. if (!thread->lwp)
  933. {
  934. return 0;
  935. }
  936. return rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
  937. LWP_EXIT_REQUEST_IN_PROCESS);
  938. }
  939. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  940. static void _resr_cleanup(struct rt_lwp *lwp);
  941. void lwp_terminate(struct rt_lwp *lwp)
  942. {
  943. if (!lwp)
  944. {
  945. /* kernel thread not support */
  946. return;
  947. }
  948. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  949. LWP_LOCK(lwp);
  950. if (!lwp->terminated)
  951. {
  952. /* stop the receiving of signals */
  953. lwp->terminated = RT_TRUE;
  954. LWP_UNLOCK(lwp);
  955. _wait_sibling_exit(lwp, rt_thread_self());
  956. _resr_cleanup(lwp);
  957. }
  958. else
  959. {
  960. LWP_UNLOCK(lwp);
  961. }
  962. }
  963. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  964. {
  965. rt_sched_lock_level_t slvl;
  966. rt_list_t *list;
  967. rt_thread_t thread;
  968. rt_base_t expected = LWP_EXIT_REQUEST_NONE;
  969. /* broadcast exit request for sibling threads */
  970. LWP_LOCK(lwp);
  971. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  972. {
  973. thread = rt_list_entry(list, struct rt_thread, sibling);
  974. rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
  975. LWP_EXIT_REQUEST_TRIGGERED);
  976. rt_sched_lock(&slvl);
  977. /* dont release, otherwise thread may have been freed */
  978. if (rt_sched_thread_is_suspended(thread))
  979. {
  980. thread->error = RT_EINTR;
  981. rt_sched_unlock(slvl);
  982. rt_thread_wakeup(thread);
  983. }
  984. else
  985. {
  986. rt_sched_unlock(slvl);
  987. }
  988. }
  989. LWP_UNLOCK(lwp);
  990. while (1)
  991. {
  992. int subthread_is_terminated;
  993. LOG_D("%s: wait for subthread exiting", __func__);
  994. /**
  995. * Brief: wait for all *running* sibling threads to exit
  996. *
  997. * Note: Critical Section
  998. * - sibling list of lwp (RW. It will clear all siblings finally)
  999. */
  1000. LWP_LOCK(lwp);
  1001. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1002. if (!subthread_is_terminated)
  1003. {
  1004. rt_sched_lock_level_t slvl;
  1005. rt_thread_t sub_thread;
  1006. rt_list_t *list;
  1007. int all_subthread_in_init = 1;
  1008. /* check all subthread is in init state */
  1009. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1010. {
  1011. rt_sched_lock(&slvl);
  1012. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1013. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1014. {
  1015. rt_sched_unlock(slvl);
  1016. all_subthread_in_init = 0;
  1017. break;
  1018. }
  1019. else
  1020. {
  1021. rt_sched_unlock(slvl);
  1022. }
  1023. }
  1024. if (all_subthread_in_init)
  1025. {
  1026. /* delete all subthread */
  1027. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1028. {
  1029. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1030. rt_list_remove(&sub_thread->sibling);
  1031. /**
  1032. * Note: Critical Section
  1033. * - thread control block (RW. Since it will free the thread
  1034. * control block, it must ensure no one else can access
  1035. * thread any more)
  1036. */
  1037. lwp_tid_put(sub_thread->tid);
  1038. sub_thread->tid = 0;
  1039. rt_thread_delete(sub_thread);
  1040. }
  1041. subthread_is_terminated = 1;
  1042. }
  1043. }
  1044. LWP_UNLOCK(lwp);
  1045. if (subthread_is_terminated)
  1046. {
  1047. break;
  1048. }
  1049. rt_thread_mdelay(10);
  1050. }
  1051. }
  1052. static void _resr_cleanup(struct rt_lwp *lwp)
  1053. {
  1054. LWP_LOCK(lwp);
  1055. lwp_signal_detach(&lwp->signal);
  1056. /**
  1057. * @brief Detach children from lwp
  1058. *
  1059. * @note Critical Section
  1060. * - the lwp (RW. Release lwp)
  1061. * - the pid resource manager (RW. Release the pid)
  1062. */
  1063. while (lwp->first_child)
  1064. {
  1065. struct rt_lwp *child;
  1066. child = lwp->first_child;
  1067. lwp->first_child = child->sibling;
  1068. /** @note safe since the slist node is release */
  1069. LWP_UNLOCK(lwp);
  1070. LWP_LOCK(child);
  1071. if (child->terminated)
  1072. {
  1073. lwp_pid_put(child);
  1074. }
  1075. else
  1076. {
  1077. child->sibling = RT_NULL;
  1078. /* info: this may cause an orphan lwp */
  1079. child->parent = RT_NULL;
  1080. }
  1081. LWP_UNLOCK(child);
  1082. lwp_ref_dec(child);
  1083. lwp_ref_dec(lwp);
  1084. LWP_LOCK(lwp);
  1085. }
  1086. LWP_UNLOCK(lwp);
  1087. _pop_tty(lwp);
  1088. /**
  1089. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1090. * will be sent to parent
  1091. *
  1092. * @note Critical Section
  1093. * - the parent lwp (RW.)
  1094. */
  1095. LWP_LOCK(lwp);
  1096. if (lwp->parent)
  1097. {
  1098. struct rt_thread *thread;
  1099. LWP_UNLOCK(lwp);
  1100. if (!rt_list_isempty(&lwp->wait_list))
  1101. {
  1102. thread = RT_THREAD_LIST_NODE_ENTRY(lwp->wait_list.next);
  1103. thread->error = RT_EOK;
  1104. thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
  1105. rt_thread_resume(thread);
  1106. }
  1107. else
  1108. {
  1109. /* children cannot detach itself and must wait for parent to take care of it */
  1110. lwp_signal_kill(lwp->parent, SIGCHLD, CLD_EXITED, 0);
  1111. }
  1112. }
  1113. else
  1114. {
  1115. LWP_UNLOCK(lwp);
  1116. /* INFO: orphan hasn't parents to do the reap of pid */
  1117. lwp_pid_put(lwp);
  1118. }
  1119. LWP_LOCK(lwp);
  1120. if (lwp->fdt.fds != RT_NULL)
  1121. {
  1122. struct dfs_file **fds;
  1123. /* auto clean fds */
  1124. __exit_files(lwp);
  1125. fds = lwp->fdt.fds;
  1126. lwp->fdt.fds = RT_NULL;
  1127. LWP_UNLOCK(lwp);
  1128. rt_free(fds);
  1129. }
  1130. else
  1131. {
  1132. LWP_UNLOCK(lwp);
  1133. }
  1134. }
  1135. static int _lwp_setaffinity(pid_t pid, int cpu)
  1136. {
  1137. struct rt_lwp *lwp;
  1138. int ret = -1;
  1139. lwp_pid_lock_take();
  1140. if(pid == 0)
  1141. lwp = lwp_self();
  1142. else
  1143. lwp = lwp_from_pid_locked(pid);
  1144. if (lwp)
  1145. {
  1146. #ifdef RT_USING_SMP
  1147. rt_list_t *list;
  1148. lwp->bind_cpu = cpu;
  1149. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1150. {
  1151. rt_thread_t thread;
  1152. thread = rt_list_entry(list, struct rt_thread, sibling);
  1153. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  1154. }
  1155. #endif
  1156. ret = 0;
  1157. }
  1158. lwp_pid_lock_release();
  1159. return ret;
  1160. }
  1161. int lwp_setaffinity(pid_t pid, int cpu)
  1162. {
  1163. int ret;
  1164. #ifdef RT_USING_SMP
  1165. if (cpu < 0 || cpu > RT_CPUS_NR)
  1166. {
  1167. cpu = RT_CPUS_NR;
  1168. }
  1169. #endif
  1170. ret = _lwp_setaffinity(pid, cpu);
  1171. return ret;
  1172. }
  1173. #ifdef RT_USING_SMP
  1174. static void cmd_cpu_bind(int argc, char** argv)
  1175. {
  1176. int pid;
  1177. int cpu;
  1178. if (argc < 3)
  1179. {
  1180. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1181. return;
  1182. }
  1183. pid = atoi(argv[1]);
  1184. cpu = atoi(argv[2]);
  1185. lwp_setaffinity((pid_t)pid, cpu);
  1186. }
  1187. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1188. #endif