1
0

lwp_pid.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * Add support for waitpid(options=WNOHANG)
  18. * 2023-11-16 xqyjlj Fix the case where pid is 0
  19. * 2023-11-17 xqyjlj add process group and session support
  20. * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
  21. * Reimplement the waitpid with a wait queue method, and fixup problem
  22. * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
  23. * process can be traced while waiter suspend
  24. * 2024-01-25 shell porting to new sched API
  25. */
  26. /* includes scheduler related API */
  27. #define __RT_IPC_SOURCE__
  28. /* for waitpid, we are compatible to GNU extension */
  29. #define _GNU_SOURCE
  30. #define DBG_TAG "lwp.pid"
  31. #define DBG_LVL DBG_INFO
  32. #include <rtdbg.h>
  33. #include "lwp_internal.h"
  34. #include <rthw.h>
  35. #include <rtthread.h>
  36. #include <dfs_file.h>
  37. #include <unistd.h>
  38. #include <stdio.h> /* rename() */
  39. #include <stdlib.h>
  40. #include <sys/stat.h>
  41. #include <sys/statfs.h> /* statfs() */
  42. #include <stdatomic.h>
  43. #ifdef ARCH_MM_MMU
  44. #include "lwp_user_mm.h"
  45. #endif
  46. #ifdef RT_USING_DFS_PROCFS
  47. #include "proc.h"
  48. #include "procfs.h"
  49. #endif
  50. #define PID_MAX 10000
  51. #define PID_CT_ASSERT(name, x) \
  52. struct assert_##name {char ary[2 * (x) - 1];}
  53. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  54. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  55. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  56. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  57. static int lwp_pid_ary_alloced = 0;
  58. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  59. static pid_t current_pid = 0;
  60. static struct rt_mutex pid_mtx;
  61. int lwp_pid_init(void)
  62. {
  63. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  64. return 0;
  65. }
  66. void lwp_pid_lock_take(void)
  67. {
  68. LWP_DEF_RETURN_CODE(rc);
  69. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  70. /* should never failed */
  71. RT_ASSERT(rc == RT_EOK);
  72. }
  73. void lwp_pid_lock_release(void)
  74. {
  75. /* should never failed */
  76. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  77. RT_ASSERT(0);
  78. }
  79. struct lwp_avl_struct *lwp_get_pid_ary(void)
  80. {
  81. return lwp_pid_ary;
  82. }
  83. static pid_t lwp_pid_get_locked(void)
  84. {
  85. struct lwp_avl_struct *p;
  86. pid_t pid = 0;
  87. p = lwp_pid_free_head;
  88. if (p)
  89. {
  90. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  91. }
  92. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  93. {
  94. p = lwp_pid_ary + lwp_pid_ary_alloced;
  95. lwp_pid_ary_alloced++;
  96. }
  97. if (p)
  98. {
  99. int found_noused = 0;
  100. RT_ASSERT(p->data == RT_NULL);
  101. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  102. {
  103. if (!lwp_avl_find(pid, lwp_pid_root))
  104. {
  105. found_noused = 1;
  106. break;
  107. }
  108. }
  109. if (!found_noused)
  110. {
  111. for (pid = 1; pid <= current_pid; pid++)
  112. {
  113. if (!lwp_avl_find(pid, lwp_pid_root))
  114. {
  115. found_noused = 1;
  116. break;
  117. }
  118. }
  119. }
  120. p->avl_key = pid;
  121. lwp_avl_insert(p, &lwp_pid_root);
  122. current_pid = pid;
  123. }
  124. return pid;
  125. }
  126. static void lwp_pid_put_locked(pid_t pid)
  127. {
  128. struct lwp_avl_struct *p;
  129. if (pid == 0)
  130. {
  131. return;
  132. }
  133. p = lwp_avl_find(pid, lwp_pid_root);
  134. if (p)
  135. {
  136. p->data = RT_NULL;
  137. lwp_avl_remove(p, &lwp_pid_root);
  138. p->avl_right = lwp_pid_free_head;
  139. lwp_pid_free_head = p;
  140. }
  141. }
  142. #ifdef RT_USING_DFS_PROCFS
  143. rt_inline void _free_proc_dentry(rt_lwp_t lwp)
  144. {
  145. char pid_str[64] = {0};
  146. rt_snprintf(pid_str, 64, "%d", lwp->pid);
  147. pid_str[63] = 0;
  148. proc_remove_dentry(pid_str, 0);
  149. }
  150. #else
  151. #define _free_proc_dentry(lwp)
  152. #endif
  153. void lwp_pid_put(struct rt_lwp *lwp)
  154. {
  155. _free_proc_dentry(lwp);
  156. lwp_pid_lock_take();
  157. lwp_pid_put_locked(lwp->pid);
  158. lwp_pid_lock_release();
  159. /* reset pid field */
  160. lwp->pid = 0;
  161. /* clear reference */
  162. lwp_ref_dec(lwp);
  163. }
  164. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  165. {
  166. struct lwp_avl_struct *p;
  167. p = lwp_avl_find(pid, lwp_pid_root);
  168. if (p)
  169. {
  170. p->data = lwp;
  171. lwp_ref_inc(lwp);
  172. #ifdef RT_USING_DFS_PROCFS
  173. if (pid)
  174. {
  175. proc_pid(pid);
  176. }
  177. #endif
  178. }
  179. }
  180. static void __exit_files(struct rt_lwp *lwp)
  181. {
  182. int fd = lwp->fdt.maxfd - 1;
  183. while (fd >= 0)
  184. {
  185. struct dfs_file *d;
  186. d = lwp->fdt.fds[fd];
  187. if (d)
  188. {
  189. dfs_file_close(d);
  190. fdt_fd_release(&lwp->fdt, fd);
  191. }
  192. fd--;
  193. }
  194. }
  195. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  196. {
  197. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  198. }
  199. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  200. {
  201. rt_mutex_detach(&lwp->object_mutex);
  202. }
  203. void lwp_user_object_lock(struct rt_lwp *lwp)
  204. {
  205. if (lwp)
  206. {
  207. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  208. }
  209. else
  210. {
  211. RT_ASSERT(0);
  212. }
  213. }
  214. void lwp_user_object_unlock(struct rt_lwp *lwp)
  215. {
  216. if (lwp)
  217. {
  218. rt_mutex_release(&lwp->object_mutex);
  219. }
  220. else
  221. {
  222. RT_ASSERT(0);
  223. }
  224. }
  225. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  226. {
  227. int ret = -1;
  228. if (lwp && object)
  229. {
  230. lwp_user_object_lock(lwp);
  231. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  232. {
  233. struct lwp_avl_struct *node;
  234. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  235. if (node)
  236. {
  237. rt_atomic_add(&object->lwp_ref_count, 1);
  238. node->avl_key = (avl_key_t)object;
  239. lwp_avl_insert(node, &lwp->object_root);
  240. ret = 0;
  241. }
  242. }
  243. lwp_user_object_unlock(lwp);
  244. }
  245. return ret;
  246. }
  247. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  248. {
  249. rt_err_t ret = -1;
  250. rt_object_t object;
  251. if (!lwp || !node)
  252. {
  253. return ret;
  254. }
  255. object = (rt_object_t)node->avl_key;
  256. object->lwp_ref_count--;
  257. if (object->lwp_ref_count == 0)
  258. {
  259. /* remove from kernel object list */
  260. switch (object->type)
  261. {
  262. case RT_Object_Class_Semaphore:
  263. ret = rt_sem_delete((rt_sem_t)object);
  264. break;
  265. case RT_Object_Class_Mutex:
  266. ret = rt_mutex_delete((rt_mutex_t)object);
  267. break;
  268. case RT_Object_Class_Event:
  269. ret = rt_event_delete((rt_event_t)object);
  270. break;
  271. case RT_Object_Class_MailBox:
  272. ret = rt_mb_delete((rt_mailbox_t)object);
  273. break;
  274. case RT_Object_Class_MessageQueue:
  275. ret = rt_mq_delete((rt_mq_t)object);
  276. break;
  277. case RT_Object_Class_Timer:
  278. ret = rt_timer_delete((rt_timer_t)object);
  279. break;
  280. case RT_Object_Class_Custom:
  281. ret = rt_custom_object_destroy(object);
  282. break;
  283. default:
  284. LOG_E("input object type(%d) error", object->type);
  285. break;
  286. }
  287. }
  288. else
  289. {
  290. ret = 0;
  291. }
  292. lwp_avl_remove(node, &lwp->object_root);
  293. rt_free(node);
  294. return ret;
  295. }
  296. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  297. {
  298. rt_err_t ret = -1;
  299. if (lwp && object)
  300. {
  301. struct lwp_avl_struct *node;
  302. lwp_user_object_lock(lwp);
  303. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  304. ret = _object_node_delete(lwp, node);
  305. lwp_user_object_unlock(lwp);
  306. }
  307. return ret;
  308. }
  309. void lwp_user_object_clear(struct rt_lwp *lwp)
  310. {
  311. struct lwp_avl_struct *node;
  312. lwp_user_object_lock(lwp);
  313. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  314. {
  315. _object_node_delete(lwp, node);
  316. }
  317. lwp_user_object_unlock(lwp);
  318. }
  319. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  320. {
  321. rt_object_t object;
  322. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  323. object = (rt_object_t)node->avl_key;
  324. lwp_user_object_add(dst_lwp, object);
  325. return 0;
  326. }
  327. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  328. {
  329. lwp_user_object_lock(src_lwp);
  330. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  331. lwp_user_object_unlock(src_lwp);
  332. }
  333. rt_lwp_t lwp_create(rt_base_t flags)
  334. {
  335. pid_t pid;
  336. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  337. if (new_lwp)
  338. {
  339. /* minimal setup of lwp object */
  340. new_lwp->ref = 1;
  341. #ifdef RT_USING_SMP
  342. new_lwp->bind_cpu = RT_CPUS_NR;
  343. #endif
  344. new_lwp->exe_file = RT_NULL;
  345. rt_list_init(&new_lwp->t_grp);
  346. rt_list_init(&new_lwp->pgrp_node);
  347. rt_list_init(&new_lwp->timer);
  348. lwp_user_object_lock_init(new_lwp);
  349. rt_wqueue_init(&new_lwp->wait_queue);
  350. rt_wqueue_init(&new_lwp->waitpid_waiters);
  351. lwp_signal_init(&new_lwp->signal);
  352. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  353. if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
  354. new_lwp->did_exec = RT_TRUE;
  355. /* lwp with pid */
  356. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  357. {
  358. lwp_pid_lock_take();
  359. pid = lwp_pid_get_locked();
  360. if (pid == 0)
  361. {
  362. lwp_user_object_lock_destroy(new_lwp);
  363. rt_free(new_lwp);
  364. new_lwp = RT_NULL;
  365. LOG_E("%s: pid slot fulled", __func__);
  366. }
  367. else
  368. {
  369. new_lwp->pid = pid;
  370. lwp_pid_set_lwp_locked(pid, new_lwp);
  371. }
  372. lwp_pid_lock_release();
  373. }
  374. rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
  375. if (flags & LWP_CREATE_FLAG_INIT_USPACE)
  376. {
  377. rt_err_t error = lwp_user_space_init(new_lwp, 0);
  378. if (error)
  379. {
  380. lwp_pid_put(new_lwp);
  381. lwp_user_object_lock_destroy(new_lwp);
  382. rt_free(new_lwp);
  383. new_lwp = RT_NULL;
  384. LOG_E("%s: failed to initialize user space", __func__);
  385. }
  386. }
  387. }
  388. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  389. return new_lwp;
  390. }
  391. /** when reference is 0, a lwp can be released */
  392. void lwp_free(struct rt_lwp* lwp)
  393. {
  394. rt_processgroup_t group = RT_NULL;
  395. if (lwp == RT_NULL)
  396. {
  397. return;
  398. }
  399. /**
  400. * Brief: Recycle the lwp when reference is cleared
  401. *
  402. * Note: Critical Section
  403. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  404. * all the reference is clear)
  405. */
  406. LOG_D("lwp free: %p", lwp);
  407. rt_free(lwp->exe_file);
  408. group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
  409. if (group)
  410. lwp_pgrp_remove(group, lwp);
  411. LWP_LOCK(lwp);
  412. if (lwp->args != RT_NULL)
  413. {
  414. #ifndef ARCH_MM_MMU
  415. lwp->args_length = RT_NULL;
  416. #ifndef ARCH_MM_MPU
  417. rt_free(lwp->args);
  418. #endif /* not defined ARCH_MM_MPU */
  419. #endif /* ARCH_MM_MMU */
  420. lwp->args = RT_NULL;
  421. }
  422. lwp_user_object_clear(lwp);
  423. lwp_user_object_lock_destroy(lwp);
  424. /* free data section */
  425. if (lwp->data_entry != RT_NULL)
  426. {
  427. #ifdef ARCH_MM_MMU
  428. rt_free_align(lwp->data_entry);
  429. #else
  430. #ifdef ARCH_MM_MPU
  431. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  432. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  433. #else
  434. rt_free_align(lwp->data_entry);
  435. #endif /* ARCH_MM_MPU */
  436. #endif /* ARCH_MM_MMU */
  437. lwp->data_entry = RT_NULL;
  438. }
  439. /* free text section */
  440. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  441. {
  442. if (lwp->text_entry)
  443. {
  444. LOG_D("lwp text free: %p", lwp->text_entry);
  445. #ifndef ARCH_MM_MMU
  446. rt_free((void*)lwp->text_entry);
  447. #endif /* not defined ARCH_MM_MMU */
  448. lwp->text_entry = RT_NULL;
  449. }
  450. }
  451. #ifdef ARCH_MM_MMU
  452. lwp_unmap_user_space(lwp);
  453. #endif
  454. timer_list_free(&lwp->timer);
  455. LWP_UNLOCK(lwp);
  456. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  457. rt_mutex_detach(&lwp->lwp_lock);
  458. /**
  459. * pid must have release before enter lwp_free()
  460. * otherwise this is a data racing
  461. */
  462. RT_ASSERT(lwp->pid == 0);
  463. rt_free(lwp);
  464. }
  465. rt_inline rt_noreturn
  466. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  467. {
  468. LWP_LOCK(lwp);
  469. lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
  470. lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  471. lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
  472. lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  473. rt_list_remove(&thread->sibling);
  474. LWP_UNLOCK(lwp);
  475. lwp_futex_exit_robust_list(thread);
  476. /**
  477. * Note: the tid tree always hold a reference to thread, hence the tid must
  478. * be release before cleanup of thread
  479. */
  480. lwp_tid_put(thread->tid);
  481. thread->tid = 0;
  482. rt_thread_delete(thread);
  483. rt_schedule();
  484. while (1) ;
  485. }
  486. rt_inline void _clear_child_tid(rt_thread_t thread)
  487. {
  488. if (thread->clear_child_tid)
  489. {
  490. int t = 0;
  491. int *clear_child_tid = thread->clear_child_tid;
  492. thread->clear_child_tid = RT_NULL;
  493. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  494. sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
  495. }
  496. }
  497. void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
  498. {
  499. rt_thread_t thread;
  500. if (!lwp)
  501. {
  502. LOG_W("%s: lwp should not be null", __func__);
  503. return ;
  504. }
  505. thread = rt_thread_self();
  506. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  507. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  508. #ifdef ARCH_MM_MMU
  509. _clear_child_tid(thread);
  510. LWP_LOCK(lwp);
  511. /**
  512. * Brief: only one thread should calls exit_group(),
  513. * but we can not ensured that during run-time
  514. */
  515. lwp->lwp_status = status;
  516. LWP_UNLOCK(lwp);
  517. lwp_terminate(lwp);
  518. #else
  519. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  520. if (main_thread == tid)
  521. {
  522. rt_thread_t sub_thread;
  523. rt_list_t *list;
  524. lwp_terminate(lwp);
  525. /* delete all subthread */
  526. while ((list = tid->sibling.prev) != &lwp->t_grp)
  527. {
  528. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  529. rt_list_remove(&sub_thread->sibling);
  530. rt_thread_delete(sub_thread);
  531. }
  532. lwp->lwp_ret = value;
  533. }
  534. #endif /* ARCH_MM_MMU */
  535. _thread_exit(lwp, thread);
  536. }
  537. void lwp_thread_exit(rt_thread_t thread, int status)
  538. {
  539. rt_thread_t header_thr;
  540. struct rt_lwp *lwp;
  541. LOG_D("%s", __func__);
  542. RT_ASSERT(thread == rt_thread_self());
  543. lwp = (struct rt_lwp *)thread->lwp;
  544. RT_ASSERT(lwp != RT_NULL);
  545. #ifdef ARCH_MM_MMU
  546. _clear_child_tid(thread);
  547. LWP_LOCK(lwp);
  548. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  549. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  550. {
  551. /**
  552. * if thread exit, treated as process exit normally.
  553. * This is reasonable since trap event is exited through lwp_exit()
  554. */
  555. lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
  556. LWP_UNLOCK(lwp);
  557. lwp_terminate(lwp);
  558. }
  559. else
  560. {
  561. LWP_UNLOCK(lwp);
  562. }
  563. #endif /* ARCH_MM_MMU */
  564. _thread_exit(lwp, thread);
  565. }
  566. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  567. int lwp_ref_inc(struct rt_lwp *lwp)
  568. {
  569. int ref;
  570. ref = rt_atomic_add(&lwp->ref, 1);
  571. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  572. return ref;
  573. }
  574. int lwp_ref_dec(struct rt_lwp *lwp)
  575. {
  576. int ref;
  577. ref = rt_atomic_add(&lwp->ref, -1);
  578. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  579. if (ref == 1)
  580. {
  581. struct rt_channel_msg msg;
  582. if (lwp->debug)
  583. {
  584. memset(&msg, 0, sizeof msg);
  585. rt_raw_channel_send(gdb_server_channel(), &msg);
  586. }
  587. #ifndef ARCH_MM_MMU
  588. #ifdef RT_LWP_USING_SHM
  589. lwp_shm_lwp_free(lwp);
  590. #endif /* RT_LWP_USING_SHM */
  591. #endif /* not defined ARCH_MM_MMU */
  592. lwp_free(lwp);
  593. }
  594. else
  595. {
  596. /* reference must be a positive integer */
  597. RT_ASSERT(ref > 1);
  598. }
  599. return ref;
  600. }
  601. struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
  602. {
  603. struct lwp_avl_struct *p;
  604. struct rt_lwp *lwp = RT_NULL;
  605. p = lwp_avl_find(pid, lwp_pid_root);
  606. if (p)
  607. {
  608. lwp = (struct rt_lwp *)p->data;
  609. }
  610. return lwp;
  611. }
  612. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  613. {
  614. struct rt_lwp* lwp;
  615. lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
  616. return lwp;
  617. }
  618. pid_t lwp_to_pid(struct rt_lwp* lwp)
  619. {
  620. if (!lwp)
  621. {
  622. return 0;
  623. }
  624. return lwp->pid;
  625. }
  626. char* lwp_pid2name(int32_t pid)
  627. {
  628. struct rt_lwp *lwp;
  629. char* process_name = RT_NULL;
  630. lwp_pid_lock_take();
  631. lwp = lwp_from_pid_locked(pid);
  632. if (lwp)
  633. {
  634. process_name = strrchr(lwp->cmd, '/');
  635. process_name = process_name? process_name + 1: lwp->cmd;
  636. }
  637. lwp_pid_lock_release();
  638. return process_name;
  639. }
  640. pid_t lwp_name2pid(const char *name)
  641. {
  642. int idx;
  643. pid_t pid = 0;
  644. rt_thread_t main_thread;
  645. char* process_name = RT_NULL;
  646. rt_sched_lock_level_t slvl;
  647. lwp_pid_lock_take();
  648. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  649. {
  650. /* 0 is reserved */
  651. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  652. if (lwp)
  653. {
  654. process_name = strrchr(lwp->cmd, '/');
  655. process_name = process_name? process_name + 1: lwp->cmd;
  656. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  657. {
  658. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  659. rt_sched_lock(&slvl);
  660. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  661. {
  662. pid = lwp->pid;
  663. }
  664. rt_sched_unlock(slvl);
  665. }
  666. }
  667. }
  668. lwp_pid_lock_release();
  669. return pid;
  670. }
  671. int lwp_getpid(void)
  672. {
  673. rt_lwp_t lwp = lwp_self();
  674. return lwp ? lwp->pid : 1;
  675. // return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  676. }
  677. rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
  678. {
  679. struct rusage rt_rusage;
  680. if (uru != RT_NULL)
  681. {
  682. rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
  683. rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
  684. rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
  685. rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
  686. lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
  687. }
  688. }
  689. /* do statistical summary and reap the child if neccessary */
  690. static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
  691. struct rt_lwp *self_lwp, int *ustatus,
  692. int options, struct rusage *uru)
  693. {
  694. int lwp_stat = child->lwp_status;
  695. /* report statistical data to process */
  696. _update_ru(child, self_lwp, uru);
  697. if (child->terminated && !(options & WNOWAIT))
  698. {
  699. /** Reap the child process if it's exited */
  700. LOG_D("func %s: child detached", __func__);
  701. lwp_pid_put(child);
  702. lwp_children_unregister(self_lwp, child);
  703. }
  704. if (ustatus)
  705. lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
  706. return RT_EOK;
  707. }
  708. #define HAS_CHILD_BUT_NO_EVT (-1024)
  709. /* check if the process is already terminate */
  710. static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
  711. int options, int *status)
  712. {
  713. sysret_t rc;
  714. LWP_LOCK(child);
  715. if (child->terminated)
  716. {
  717. rc = child->pid;
  718. }
  719. else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
  720. {
  721. child->wait_reap_stp = 1;
  722. rc = child->pid;
  723. }
  724. else
  725. {
  726. rc = HAS_CHILD_BUT_NO_EVT;
  727. }
  728. LWP_UNLOCK(child);
  729. LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
  730. return rc;
  731. }
  732. /* verify if the process is child, and reap it */
  733. static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  734. pid_t wait_pid, int options, int *ustatus,
  735. struct rusage *uru)
  736. {
  737. sysret_t rc;
  738. struct rt_lwp *child;
  739. /* check if pid is reference to a valid child */
  740. lwp_pid_lock_take();
  741. child = lwp_from_pid_locked(wait_pid);
  742. if (!child)
  743. rc = -EINVAL;
  744. else if (child->parent != self_lwp)
  745. rc = -ESRCH;
  746. else
  747. rc = wait_pid;
  748. lwp_pid_lock_release();
  749. if (rc > 0)
  750. {
  751. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  752. if (rc > 0)
  753. {
  754. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  755. }
  756. }
  757. return rc;
  758. }
  759. /* try to reap any child */
  760. static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
  761. int options, int *ustatus, struct rusage *uru)
  762. {
  763. sysret_t rc = -ECHILD;
  764. struct rt_lwp *child;
  765. LWP_LOCK(self_lwp);
  766. child = self_lwp->first_child;
  767. /* find a exited child if any */
  768. while (child)
  769. {
  770. if (pair_pgid && child->pgid != pair_pgid)
  771. continue;
  772. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  773. if (rc > 0)
  774. break;
  775. child = child->sibling;
  776. }
  777. LWP_UNLOCK(self_lwp);
  778. if (rc > 0)
  779. {
  780. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  781. }
  782. return rc;
  783. }
  784. rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
  785. {
  786. /* waker provide the message mainly through its lwp_status */
  787. rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
  788. return RT_EOK;
  789. }
  790. struct waitpid_handle {
  791. struct rt_wqueue_node wq_node;
  792. int options;
  793. rt_lwp_t waker_lwp;
  794. };
  795. /* the IPC message is setup and notify the parent */
  796. static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
  797. {
  798. int can_accept_evt = 0;
  799. rt_thread_t waiter = wait_node->polling_thread;
  800. pid_t destiny = (pid_t)wait_node->key;
  801. rt_lwp_t waker_lwp = key;
  802. struct waitpid_handle *handle;
  803. rt_ubase_t options;
  804. handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
  805. RT_ASSERT(waiter != RT_NULL);
  806. options = handle->options;
  807. /* filter out if waker is not the one */
  808. if (destiny > 0)
  809. {
  810. /**
  811. * in waitpid immediately return routine, we already do the check
  812. * that pid is one of the child process of waiting thread
  813. */
  814. can_accept_evt = waker_lwp->pid == destiny;
  815. }
  816. else if (destiny == -1)
  817. {
  818. can_accept_evt = waker_lwp->parent == waiter->lwp;
  819. }
  820. else
  821. {
  822. /* destiny == 0 || destiny == -pgid */
  823. pid_t waiter_pgid;
  824. if (destiny == 0)
  825. {
  826. waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
  827. }
  828. else
  829. {
  830. waiter_pgid = -destiny;
  831. }
  832. can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
  833. }
  834. /* filter out if event is not desired */
  835. if (can_accept_evt)
  836. {
  837. if ((options & WEXITED) && waker_lwp->terminated)
  838. can_accept_evt = 1;
  839. else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
  840. can_accept_evt = 1;
  841. else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
  842. can_accept_evt = 1;
  843. else
  844. can_accept_evt = 0;
  845. }
  846. /* setup message for waiter if accepted */
  847. if (can_accept_evt)
  848. handle->waker_lwp = waker_lwp;
  849. /* 0 if event is accepted, otherwise discard */
  850. return !can_accept_evt;
  851. }
  852. /* the waiter cleanup IPC message and wait for desired event here */
  853. static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  854. struct waitpid_handle *handle, pid_t destiny)
  855. {
  856. rt_err_t ret;
  857. /* current context checking */
  858. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  859. handle->wq_node.polling_thread = cur_thr;
  860. handle->wq_node.key = destiny;
  861. handle->wq_node.wakeup = _waitq_filter;
  862. handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
  863. rt_list_init(&handle->wq_node.list);
  864. cur_thr->error = RT_EOK;
  865. LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
  866. rt_enter_critical();
  867. ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  868. if (ret == RT_EOK)
  869. {
  870. rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
  871. rt_exit_critical();
  872. rt_schedule();
  873. ret = cur_thr->error;
  874. /**
  875. * cur_thr error is a positive value, but some legacy implementation
  876. * use a negative one. So we check to avoid errors
  877. */
  878. ret = ret > 0 ? -ret : ret;
  879. /**
  880. * we dont rely on this actually, but we cleanup it since wakeup API
  881. * set this up durint operation, and this will cause some messy condition
  882. */
  883. handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
  884. rt_wqueue_remove(&handle->wq_node);
  885. }
  886. else
  887. {
  888. /* failed to suspend, return immediately with failure */
  889. rt_exit_critical();
  890. }
  891. return ret;
  892. }
  893. /* wait for IPC event and do the cleanup if neccessary */
  894. static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
  895. int options, int *ustatus, struct rusage *uru)
  896. {
  897. sysret_t rc;
  898. struct waitpid_handle handle;
  899. rt_lwp_t waker;
  900. /* wait for SIGCHLD or other async events */
  901. handle.options = options;
  902. handle.waker_lwp = 0;
  903. rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
  904. waker = handle.waker_lwp;
  905. if (waker != RT_NULL)
  906. {
  907. rc = waker->pid;
  908. /* check out if any process exited */
  909. LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
  910. _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
  911. }
  912. /**
  913. * else if (rc != RT_EOK)
  914. * unable to do a suspend, or wakeup unexpectedly
  915. * -> then returned a failure
  916. */
  917. return rc;
  918. }
  919. pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
  920. {
  921. pid_t rc = -1;
  922. struct rt_thread *cur_thr;
  923. struct rt_lwp *self_lwp;
  924. cur_thr = rt_thread_self();
  925. self_lwp = lwp_self();
  926. if (!cur_thr || !self_lwp)
  927. {
  928. rc = -EINVAL;
  929. }
  930. else
  931. {
  932. /* check if able to reap desired child immediately */
  933. if (pid > 0)
  934. {
  935. /* if pid is child then try to reap it */
  936. rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  937. }
  938. else if (pid == -1)
  939. {
  940. /* any terminated child */
  941. rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
  942. }
  943. else
  944. {
  945. /**
  946. * (pid < -1 || pid == 0)
  947. * any terminated child with matched pgid
  948. */
  949. pid_t pair_pgid;
  950. if (pid == 0)
  951. {
  952. pair_pgid = lwp_pgid_get_byprocess(self_lwp);
  953. }
  954. else
  955. {
  956. pair_pgid = -pid;
  957. }
  958. rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
  959. }
  960. if (rc == HAS_CHILD_BUT_NO_EVT)
  961. {
  962. if (!(options & WNOHANG))
  963. {
  964. /* otherwise, arrange a suspend and wait for async event */
  965. options |= WEXITED;
  966. rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  967. }
  968. else
  969. {
  970. /**
  971. * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
  972. * it has at least one child process specified by pid for which
  973. * status is not available, and status is not available for any
  974. * process specified by pid, 0 is returned
  975. */
  976. rc = 0;
  977. }
  978. }
  979. else
  980. {
  981. RT_ASSERT(rc != 0);
  982. }
  983. }
  984. LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
  985. return rc;
  986. }
  987. pid_t waitpid(pid_t pid, int *status, int options)
  988. {
  989. return lwp_waitpid(pid, status, options, RT_NULL);
  990. }
  991. #ifdef RT_USING_FINSH
  992. /* copy from components/finsh/cmd.c */
  993. static void object_split(int len)
  994. {
  995. while (len--)
  996. {
  997. rt_kprintf("-");
  998. }
  999. }
  1000. static void print_thread_info(struct rt_thread* thread, int maxlen)
  1001. {
  1002. rt_uint8_t *ptr;
  1003. rt_uint8_t stat;
  1004. #ifdef RT_USING_SMP
  1005. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  1006. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  1007. else
  1008. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  1009. #else
  1010. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  1011. #endif /*RT_USING_SMP*/
  1012. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  1013. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  1014. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  1015. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  1016. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  1017. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  1018. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  1019. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  1020. while (*ptr == '#')ptr--;
  1021. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1022. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  1023. thread->stack_size,
  1024. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  1025. thread->remaining_tick,
  1026. thread->error);
  1027. #else
  1028. ptr = (rt_uint8_t *)thread->stack_addr;
  1029. while (*ptr == '#')ptr++;
  1030. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1031. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  1032. thread->stack_size,
  1033. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  1034. / thread->stack_size,
  1035. RT_SCHED_PRIV(thread).remaining_tick,
  1036. thread->error);
  1037. #endif
  1038. }
  1039. long list_process(void)
  1040. {
  1041. int index;
  1042. int maxlen;
  1043. rt_ubase_t level;
  1044. struct rt_thread *thread;
  1045. struct rt_list_node *node, *list;
  1046. const char *item_title = "thread";
  1047. int count = 0;
  1048. struct rt_thread **threads;
  1049. maxlen = RT_NAME_MAX;
  1050. #ifdef RT_USING_SMP
  1051. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  1052. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1053. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  1054. #else
  1055. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  1056. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1057. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  1058. #endif /*RT_USING_SMP*/
  1059. count = rt_object_get_length(RT_Object_Class_Thread);
  1060. if (count > 0)
  1061. {
  1062. /* get thread pointers */
  1063. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  1064. if (threads)
  1065. {
  1066. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  1067. if (index > 0)
  1068. {
  1069. for (index = 0; index <count; index++)
  1070. {
  1071. struct rt_thread th;
  1072. thread = threads[index];
  1073. level = rt_spin_lock_irqsave(&thread->spinlock);
  1074. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  1075. {
  1076. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1077. continue;
  1078. }
  1079. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  1080. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1081. if (th.lwp == RT_NULL)
  1082. {
  1083. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  1084. print_thread_info(&th, maxlen);
  1085. }
  1086. }
  1087. }
  1088. rt_free(threads);
  1089. }
  1090. }
  1091. for (index = 0; index < RT_LWP_MAX_NR; index++)
  1092. {
  1093. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  1094. if (lwp)
  1095. {
  1096. list = &lwp->t_grp;
  1097. for (node = list->next; node != list; node = node->next)
  1098. {
  1099. thread = rt_list_entry(node, struct rt_thread, sibling);
  1100. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  1101. print_thread_info(thread, maxlen);
  1102. }
  1103. }
  1104. }
  1105. return 0;
  1106. }
  1107. MSH_CMD_EXPORT(list_process, list process);
  1108. static void cmd_kill(int argc, char** argv)
  1109. {
  1110. int pid;
  1111. int sig = SIGKILL;
  1112. if (argc < 2)
  1113. {
  1114. rt_kprintf("kill pid or kill pid -s signal\n");
  1115. return;
  1116. }
  1117. pid = atoi(argv[1]);
  1118. if (argc >= 4)
  1119. {
  1120. if (argv[2][0] == '-' && argv[2][1] == 's')
  1121. {
  1122. sig = atoi(argv[3]);
  1123. }
  1124. }
  1125. lwp_pid_lock_take();
  1126. lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
  1127. lwp_pid_lock_release();
  1128. }
  1129. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  1130. static void cmd_killall(int argc, char** argv)
  1131. {
  1132. int pid;
  1133. if (argc < 2)
  1134. {
  1135. rt_kprintf("killall processes_name\n");
  1136. return;
  1137. }
  1138. while((pid = lwp_name2pid(argv[1])) > 0)
  1139. {
  1140. lwp_pid_lock_take();
  1141. lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
  1142. lwp_pid_lock_release();
  1143. rt_thread_mdelay(100);
  1144. }
  1145. }
  1146. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  1147. #endif
  1148. int lwp_check_exit_request(void)
  1149. {
  1150. rt_thread_t thread = rt_thread_self();
  1151. rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  1152. if (!thread->lwp)
  1153. {
  1154. return 0;
  1155. }
  1156. return atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1157. LWP_EXIT_REQUEST_IN_PROCESS);
  1158. }
  1159. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1160. static void _resr_cleanup(struct rt_lwp *lwp);
  1161. void lwp_terminate(struct rt_lwp *lwp)
  1162. {
  1163. if (!lwp)
  1164. {
  1165. /* kernel thread not support */
  1166. return;
  1167. }
  1168. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1169. LWP_LOCK(lwp);
  1170. if (!lwp->terminated)
  1171. {
  1172. /* stop the receiving of signals */
  1173. lwp->terminated = RT_TRUE;
  1174. LWP_UNLOCK(lwp);
  1175. _wait_sibling_exit(lwp, rt_thread_self());
  1176. _resr_cleanup(lwp);
  1177. }
  1178. else
  1179. {
  1180. LWP_UNLOCK(lwp);
  1181. }
  1182. }
  1183. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1184. {
  1185. rt_sched_lock_level_t slvl;
  1186. rt_list_t *list;
  1187. rt_thread_t thread;
  1188. rt_size_t expected = LWP_EXIT_REQUEST_NONE;
  1189. /* broadcast exit request for sibling threads */
  1190. LWP_LOCK(lwp);
  1191. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1192. {
  1193. thread = rt_list_entry(list, struct rt_thread, sibling);
  1194. atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1195. LWP_EXIT_REQUEST_TRIGGERED);
  1196. rt_sched_lock(&slvl);
  1197. /* dont release, otherwise thread may have been freed */
  1198. if (rt_sched_thread_is_suspended(thread))
  1199. {
  1200. thread->error = RT_EINTR;
  1201. rt_sched_unlock(slvl);
  1202. rt_thread_wakeup(thread);
  1203. }
  1204. else
  1205. {
  1206. rt_sched_unlock(slvl);
  1207. }
  1208. }
  1209. LWP_UNLOCK(lwp);
  1210. while (1)
  1211. {
  1212. int subthread_is_terminated;
  1213. LOG_D("%s: wait for subthread exiting", __func__);
  1214. /**
  1215. * Brief: wait for all *running* sibling threads to exit
  1216. *
  1217. * Note: Critical Section
  1218. * - sibling list of lwp (RW. It will clear all siblings finally)
  1219. */
  1220. LWP_LOCK(lwp);
  1221. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1222. if (!subthread_is_terminated)
  1223. {
  1224. rt_sched_lock_level_t slvl;
  1225. rt_thread_t sub_thread;
  1226. rt_list_t *list;
  1227. int all_subthread_in_init = 1;
  1228. /* check all subthread is in init state */
  1229. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1230. {
  1231. rt_sched_lock(&slvl);
  1232. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1233. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1234. {
  1235. rt_sched_unlock(slvl);
  1236. all_subthread_in_init = 0;
  1237. break;
  1238. }
  1239. else
  1240. {
  1241. rt_sched_unlock(slvl);
  1242. }
  1243. }
  1244. if (all_subthread_in_init)
  1245. {
  1246. /* delete all subthread */
  1247. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1248. {
  1249. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1250. rt_list_remove(&sub_thread->sibling);
  1251. /**
  1252. * Note: Critical Section
  1253. * - thread control block (RW. Since it will free the thread
  1254. * control block, it must ensure no one else can access
  1255. * thread any more)
  1256. */
  1257. lwp_tid_put(sub_thread->tid);
  1258. sub_thread->tid = 0;
  1259. rt_thread_delete(sub_thread);
  1260. }
  1261. subthread_is_terminated = 1;
  1262. }
  1263. }
  1264. LWP_UNLOCK(lwp);
  1265. if (subthread_is_terminated)
  1266. {
  1267. break;
  1268. }
  1269. rt_thread_mdelay(10);
  1270. }
  1271. }
  1272. static void _notify_parent(rt_lwp_t lwp)
  1273. {
  1274. int si_code;
  1275. int signo_or_exitcode;
  1276. lwp_siginfo_ext_t ext;
  1277. lwp_status_t lwp_status = lwp->lwp_status;
  1278. rt_lwp_t parent = lwp->parent;
  1279. if (WIFSIGNALED(lwp_status))
  1280. {
  1281. si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
  1282. signo_or_exitcode = WTERMSIG(lwp_status);
  1283. }
  1284. else
  1285. {
  1286. si_code = CLD_EXITED;
  1287. signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
  1288. }
  1289. lwp_waitpid_kick(parent, lwp);
  1290. ext = rt_malloc(sizeof(struct lwp_siginfo));
  1291. if (ext)
  1292. {
  1293. rt_thread_t cur_thr = rt_thread_self();
  1294. ext->sigchld.status = signo_or_exitcode;
  1295. ext->sigchld.stime = cur_thr->system_time;
  1296. ext->sigchld.utime = cur_thr->user_time;
  1297. }
  1298. lwp_signal_kill(parent, SIGCHLD, si_code, ext);
  1299. }
  1300. static void _resr_cleanup(struct rt_lwp *lwp)
  1301. {
  1302. lwp_jobctrl_on_exit(lwp);
  1303. LWP_LOCK(lwp);
  1304. lwp_signal_detach(&lwp->signal);
  1305. /**
  1306. * @brief Detach children from lwp
  1307. *
  1308. * @note Critical Section
  1309. * - the lwp (RW. Release lwp)
  1310. * - the pid resource manager (RW. Release the pid)
  1311. */
  1312. while (lwp->first_child)
  1313. {
  1314. struct rt_lwp *child;
  1315. child = lwp->first_child;
  1316. lwp->first_child = child->sibling;
  1317. /** @note safe since the slist node is release */
  1318. LWP_UNLOCK(lwp);
  1319. LWP_LOCK(child);
  1320. if (child->terminated)
  1321. {
  1322. lwp_pid_put(child);
  1323. }
  1324. else
  1325. {
  1326. child->sibling = RT_NULL;
  1327. /* info: this may cause an orphan lwp */
  1328. child->parent = RT_NULL;
  1329. }
  1330. LWP_UNLOCK(child);
  1331. lwp_ref_dec(child);
  1332. lwp_ref_dec(lwp);
  1333. LWP_LOCK(lwp);
  1334. }
  1335. LWP_UNLOCK(lwp);
  1336. /**
  1337. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1338. * will be sent to parent
  1339. *
  1340. * @note Critical Section
  1341. * - the parent lwp (RW.)
  1342. */
  1343. LWP_LOCK(lwp);
  1344. if (lwp->parent &&
  1345. !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
  1346. {
  1347. /* if successfully race to setup lwp->terminated before parent detach */
  1348. LWP_UNLOCK(lwp);
  1349. /**
  1350. * Note: children cannot detach itself and must wait for parent to take
  1351. * care of it
  1352. */
  1353. _notify_parent(lwp);
  1354. }
  1355. else
  1356. {
  1357. LWP_UNLOCK(lwp);
  1358. /**
  1359. * if process is orphan, it doesn't have parent to do the recycling.
  1360. * Otherwise, its parent had setup a flag to mask out recycling event
  1361. */
  1362. lwp_pid_put(lwp);
  1363. }
  1364. LWP_LOCK(lwp);
  1365. if (lwp->fdt.fds != RT_NULL)
  1366. {
  1367. struct dfs_file **fds;
  1368. /* auto clean fds */
  1369. __exit_files(lwp);
  1370. fds = lwp->fdt.fds;
  1371. lwp->fdt.fds = RT_NULL;
  1372. LWP_UNLOCK(lwp);
  1373. rt_free(fds);
  1374. }
  1375. else
  1376. {
  1377. LWP_UNLOCK(lwp);
  1378. }
  1379. }
  1380. static int _lwp_setaffinity(pid_t pid, int cpu)
  1381. {
  1382. struct rt_lwp *lwp;
  1383. int ret = -1;
  1384. lwp_pid_lock_take();
  1385. lwp = lwp_from_pid_locked(pid);
  1386. if (lwp)
  1387. {
  1388. #ifdef RT_USING_SMP
  1389. rt_list_t *list;
  1390. lwp->bind_cpu = cpu;
  1391. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1392. {
  1393. rt_thread_t thread;
  1394. thread = rt_list_entry(list, struct rt_thread, sibling);
  1395. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_size_t)cpu);
  1396. }
  1397. #endif
  1398. ret = 0;
  1399. }
  1400. lwp_pid_lock_release();
  1401. return ret;
  1402. }
  1403. int lwp_setaffinity(pid_t pid, int cpu)
  1404. {
  1405. int ret;
  1406. #ifdef RT_USING_SMP
  1407. if (cpu < 0 || cpu > RT_CPUS_NR)
  1408. {
  1409. cpu = RT_CPUS_NR;
  1410. }
  1411. #endif
  1412. ret = _lwp_setaffinity(pid, cpu);
  1413. return ret;
  1414. }
  1415. #ifdef RT_USING_SMP
  1416. static void cmd_cpu_bind(int argc, char** argv)
  1417. {
  1418. int pid;
  1419. int cpu;
  1420. if (argc < 3)
  1421. {
  1422. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1423. return;
  1424. }
  1425. pid = atoi(argv[1]);
  1426. cpu = atoi(argv[2]);
  1427. lwp_setaffinity((pid_t)pid, cpu);
  1428. }
  1429. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1430. #endif