lwp_pid.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-10-16 zhangjun first version
  9. * 2021-02-20 lizhirui fix warning
  10. * 2023-06-26 shell clear ref to parent on waitpid()
  11. * Remove recycling of lwp on waitpid() and leave it to defunct routine
  12. * 2023-07-27 shell Move the detach of children process on parent exit to lwp_terminate.
  13. * Make lwp_from_pid locked by caller to avoid possible use-after-free
  14. * error
  15. * 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
  16. * Add reference on pid/tid, so the resource is not freed while using.
  17. * Add support for waitpid(options=WNOHANG)
  18. * 2023-11-16 xqyjlj Fix the case where pid is 0
  19. * 2023-11-17 xqyjlj add process group and session support
  20. * 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
  21. * Reimplement the waitpid with a wait queue method, and fixup problem
  22. * with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
  23. * process can be traced while waiter suspend
  24. * 2024-01-25 shell porting to new sched API
  25. */
  26. /* includes scheduler related API */
  27. #define __RT_IPC_SOURCE__
  28. /* for waitpid, we are compatible to GNU extension */
  29. #define _GNU_SOURCE
  30. #define DBG_TAG "lwp.pid"
  31. #define DBG_LVL DBG_INFO
  32. #include <rtdbg.h>
  33. #include "lwp_internal.h"
  34. #include <rthw.h>
  35. #include <rtthread.h>
  36. #include <dfs_file.h>
  37. #include <unistd.h>
  38. #include <stdio.h> /* rename() */
  39. #include <stdlib.h>
  40. #include <sys/stat.h>
  41. #include <sys/statfs.h> /* statfs() */
  42. #include <stdatomic.h>
  43. #ifdef ARCH_MM_MMU
  44. #include "lwp_user_mm.h"
  45. #endif
  46. #ifdef RT_USING_DFS_PROCFS
  47. #include "proc.h"
  48. #include "procfs.h"
  49. #endif
  50. #define PID_MAX 10000
  51. #define PID_CT_ASSERT(name, x) \
  52. struct assert_##name {char ary[2 * (x) - 1];}
  53. PID_CT_ASSERT(pid_min_nr, RT_LWP_MAX_NR > 1);
  54. PID_CT_ASSERT(pid_max_nr, RT_LWP_MAX_NR < PID_MAX);
  55. static struct lwp_avl_struct lwp_pid_ary[RT_LWP_MAX_NR];
  56. static struct lwp_avl_struct *lwp_pid_free_head = RT_NULL;
  57. static int lwp_pid_ary_alloced = 0;
  58. static struct lwp_avl_struct *lwp_pid_root = RT_NULL;
  59. static pid_t current_pid = 0;
  60. static struct rt_mutex pid_mtx;
  61. int lwp_pid_init(void)
  62. {
  63. rt_mutex_init(&pid_mtx, "pidmtx", RT_IPC_FLAG_PRIO);
  64. return 0;
  65. }
  66. void lwp_pid_lock_take(void)
  67. {
  68. LWP_DEF_RETURN_CODE(rc);
  69. rc = lwp_mutex_take_safe(&pid_mtx, RT_WAITING_FOREVER, 0);
  70. /* should never failed */
  71. RT_ASSERT(rc == RT_EOK);
  72. RT_UNUSED(rc);
  73. }
  74. void lwp_pid_lock_release(void)
  75. {
  76. /* should never failed */
  77. if (lwp_mutex_release_safe(&pid_mtx) != RT_EOK)
  78. RT_ASSERT(0);
  79. }
  80. struct lwp_avl_struct *lwp_get_pid_ary(void)
  81. {
  82. return lwp_pid_ary;
  83. }
  84. static pid_t lwp_pid_get_locked(void)
  85. {
  86. struct lwp_avl_struct *p;
  87. pid_t pid = 0;
  88. p = lwp_pid_free_head;
  89. if (p)
  90. {
  91. lwp_pid_free_head = (struct lwp_avl_struct *)p->avl_right;
  92. }
  93. else if (lwp_pid_ary_alloced < RT_LWP_MAX_NR)
  94. {
  95. p = lwp_pid_ary + lwp_pid_ary_alloced;
  96. lwp_pid_ary_alloced++;
  97. }
  98. if (p)
  99. {
  100. int found_noused = 0;
  101. RT_ASSERT(p->data == RT_NULL);
  102. for (pid = current_pid + 1; pid < PID_MAX; pid++)
  103. {
  104. if (!lwp_avl_find(pid, lwp_pid_root))
  105. {
  106. found_noused = 1;
  107. break;
  108. }
  109. }
  110. if (!found_noused)
  111. {
  112. for (pid = 1; pid <= current_pid; pid++)
  113. {
  114. if (!lwp_avl_find(pid, lwp_pid_root))
  115. {
  116. found_noused = 1;
  117. break;
  118. }
  119. }
  120. }
  121. p->avl_key = pid;
  122. lwp_avl_insert(p, &lwp_pid_root);
  123. current_pid = pid;
  124. }
  125. return pid;
  126. }
  127. static void lwp_pid_put_locked(pid_t pid)
  128. {
  129. struct lwp_avl_struct *p;
  130. if (pid == 0)
  131. {
  132. return;
  133. }
  134. p = lwp_avl_find(pid, lwp_pid_root);
  135. if (p)
  136. {
  137. p->data = RT_NULL;
  138. lwp_avl_remove(p, &lwp_pid_root);
  139. p->avl_right = lwp_pid_free_head;
  140. lwp_pid_free_head = p;
  141. }
  142. }
  143. #ifdef RT_USING_DFS_PROCFS
  144. rt_inline void _free_proc_dentry(rt_lwp_t lwp)
  145. {
  146. char pid_str[64] = {0};
  147. rt_snprintf(pid_str, 64, "%d", lwp->pid);
  148. pid_str[63] = 0;
  149. proc_remove_dentry(pid_str, 0);
  150. }
  151. #else
  152. #define _free_proc_dentry(lwp)
  153. #endif
  154. void lwp_pid_put(struct rt_lwp *lwp)
  155. {
  156. _free_proc_dentry(lwp);
  157. lwp_pid_lock_take();
  158. lwp_pid_put_locked(lwp->pid);
  159. lwp_pid_lock_release();
  160. /* reset pid field */
  161. lwp->pid = 0;
  162. /* clear reference */
  163. lwp_ref_dec(lwp);
  164. }
  165. static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
  166. {
  167. struct lwp_avl_struct *p;
  168. p = lwp_avl_find(pid, lwp_pid_root);
  169. if (p)
  170. {
  171. p->data = lwp;
  172. lwp_ref_inc(lwp);
  173. #ifdef RT_USING_DFS_PROCFS
  174. if (pid)
  175. {
  176. proc_pid(pid);
  177. }
  178. #endif
  179. }
  180. }
  181. static void __exit_files(struct rt_lwp *lwp)
  182. {
  183. int fd = lwp->fdt.maxfd - 1;
  184. while (fd >= 0)
  185. {
  186. struct dfs_file *d;
  187. d = lwp->fdt.fds[fd];
  188. if (d)
  189. {
  190. dfs_file_close(d);
  191. fdt_fd_release(&lwp->fdt, fd);
  192. }
  193. fd--;
  194. }
  195. }
  196. void lwp_user_object_lock_init(struct rt_lwp *lwp)
  197. {
  198. rt_mutex_init(&lwp->object_mutex, "lwp_obj", RT_IPC_FLAG_PRIO);
  199. }
  200. void lwp_user_object_lock_destroy(struct rt_lwp *lwp)
  201. {
  202. rt_mutex_detach(&lwp->object_mutex);
  203. }
  204. void lwp_user_object_lock(struct rt_lwp *lwp)
  205. {
  206. if (lwp)
  207. {
  208. rt_mutex_take(&lwp->object_mutex, RT_WAITING_FOREVER);
  209. }
  210. else
  211. {
  212. RT_ASSERT(0);
  213. }
  214. }
  215. void lwp_user_object_unlock(struct rt_lwp *lwp)
  216. {
  217. if (lwp)
  218. {
  219. rt_mutex_release(&lwp->object_mutex);
  220. }
  221. else
  222. {
  223. RT_ASSERT(0);
  224. }
  225. }
  226. int lwp_user_object_add(struct rt_lwp *lwp, rt_object_t object)
  227. {
  228. int ret = -1;
  229. if (lwp && object)
  230. {
  231. lwp_user_object_lock(lwp);
  232. if (!lwp_avl_find((avl_key_t)object, lwp->object_root))
  233. {
  234. struct lwp_avl_struct *node;
  235. node = (struct lwp_avl_struct *)rt_malloc(sizeof(struct lwp_avl_struct));
  236. if (node)
  237. {
  238. rt_atomic_add(&object->lwp_ref_count, 1);
  239. node->avl_key = (avl_key_t)object;
  240. lwp_avl_insert(node, &lwp->object_root);
  241. ret = 0;
  242. }
  243. }
  244. lwp_user_object_unlock(lwp);
  245. }
  246. return ret;
  247. }
  248. static rt_err_t _object_node_delete(struct rt_lwp *lwp, struct lwp_avl_struct *node)
  249. {
  250. rt_err_t ret = -1;
  251. rt_object_t object;
  252. if (!lwp || !node)
  253. {
  254. return ret;
  255. }
  256. object = (rt_object_t)node->avl_key;
  257. object->lwp_ref_count--;
  258. if (object->lwp_ref_count == 0)
  259. {
  260. /* remove from kernel object list */
  261. switch (object->type)
  262. {
  263. case RT_Object_Class_Semaphore:
  264. ret = rt_sem_delete((rt_sem_t)object);
  265. break;
  266. case RT_Object_Class_Mutex:
  267. ret = rt_mutex_delete((rt_mutex_t)object);
  268. break;
  269. case RT_Object_Class_Event:
  270. ret = rt_event_delete((rt_event_t)object);
  271. break;
  272. case RT_Object_Class_MailBox:
  273. ret = rt_mb_delete((rt_mailbox_t)object);
  274. break;
  275. case RT_Object_Class_MessageQueue:
  276. ret = rt_mq_delete((rt_mq_t)object);
  277. break;
  278. case RT_Object_Class_Timer:
  279. ret = rt_timer_delete((rt_timer_t)object);
  280. break;
  281. case RT_Object_Class_Custom:
  282. ret = rt_custom_object_destroy(object);
  283. break;
  284. default:
  285. LOG_E("input object type(%d) error", object->type);
  286. break;
  287. }
  288. }
  289. else
  290. {
  291. ret = 0;
  292. }
  293. lwp_avl_remove(node, &lwp->object_root);
  294. rt_free(node);
  295. return ret;
  296. }
  297. rt_err_t lwp_user_object_delete(struct rt_lwp *lwp, rt_object_t object)
  298. {
  299. rt_err_t ret = -1;
  300. if (lwp && object)
  301. {
  302. struct lwp_avl_struct *node;
  303. lwp_user_object_lock(lwp);
  304. node = lwp_avl_find((avl_key_t)object, lwp->object_root);
  305. ret = _object_node_delete(lwp, node);
  306. lwp_user_object_unlock(lwp);
  307. }
  308. return ret;
  309. }
  310. void lwp_user_object_clear(struct rt_lwp *lwp)
  311. {
  312. struct lwp_avl_struct *node;
  313. lwp_user_object_lock(lwp);
  314. while ((node = lwp_map_find_first(lwp->object_root)) != RT_NULL)
  315. {
  316. _object_node_delete(lwp, node);
  317. }
  318. lwp_user_object_unlock(lwp);
  319. }
  320. static int _object_dup(struct lwp_avl_struct *node, void *arg)
  321. {
  322. rt_object_t object;
  323. struct rt_lwp *dst_lwp = (struct rt_lwp *)arg;
  324. object = (rt_object_t)node->avl_key;
  325. lwp_user_object_add(dst_lwp, object);
  326. return 0;
  327. }
  328. void lwp_user_object_dup(struct rt_lwp *dst_lwp, struct rt_lwp *src_lwp)
  329. {
  330. lwp_user_object_lock(src_lwp);
  331. lwp_avl_traversal(src_lwp->object_root, _object_dup, dst_lwp);
  332. lwp_user_object_unlock(src_lwp);
  333. }
  334. rt_lwp_t lwp_create(rt_base_t flags)
  335. {
  336. pid_t pid;
  337. rt_lwp_t new_lwp = rt_calloc(1, sizeof(struct rt_lwp));
  338. if (new_lwp)
  339. {
  340. /* minimal setup of lwp object */
  341. new_lwp->ref = 1;
  342. #ifdef RT_USING_SMP
  343. new_lwp->bind_cpu = RT_CPUS_NR;
  344. #endif
  345. new_lwp->exe_file = RT_NULL;
  346. rt_list_init(&new_lwp->t_grp);
  347. rt_list_init(&new_lwp->pgrp_node);
  348. rt_list_init(&new_lwp->timer);
  349. lwp_user_object_lock_init(new_lwp);
  350. rt_wqueue_init(&new_lwp->wait_queue);
  351. rt_wqueue_init(&new_lwp->waitpid_waiters);
  352. lwp_signal_init(&new_lwp->signal);
  353. rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
  354. if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
  355. new_lwp->did_exec = RT_TRUE;
  356. /* lwp with pid */
  357. if (flags & LWP_CREATE_FLAG_ALLOC_PID)
  358. {
  359. lwp_pid_lock_take();
  360. pid = lwp_pid_get_locked();
  361. if (pid == 0)
  362. {
  363. lwp_user_object_lock_destroy(new_lwp);
  364. rt_free(new_lwp);
  365. new_lwp = RT_NULL;
  366. LOG_E("%s: pid slot fulled", __func__);
  367. }
  368. else
  369. {
  370. new_lwp->pid = pid;
  371. lwp_pid_set_lwp_locked(pid, new_lwp);
  372. }
  373. lwp_pid_lock_release();
  374. }
  375. rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
  376. if (flags & LWP_CREATE_FLAG_INIT_USPACE)
  377. {
  378. rt_err_t error = lwp_user_space_init(new_lwp, 0);
  379. if (error)
  380. {
  381. lwp_pid_put(new_lwp);
  382. lwp_user_object_lock_destroy(new_lwp);
  383. rt_free(new_lwp);
  384. new_lwp = RT_NULL;
  385. LOG_E("%s: failed to initialize user space", __func__);
  386. }
  387. }
  388. }
  389. LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
  390. return new_lwp;
  391. }
  392. /** when reference is 0, a lwp can be released */
  393. void lwp_free(struct rt_lwp* lwp)
  394. {
  395. rt_processgroup_t group = RT_NULL;
  396. if (lwp == RT_NULL)
  397. {
  398. return;
  399. }
  400. /**
  401. * Brief: Recycle the lwp when reference is cleared
  402. *
  403. * Note: Critical Section
  404. * - lwp (RW. there is no other writer/reader compete with lwp_free, since
  405. * all the reference is clear)
  406. */
  407. LOG_D("lwp free: %p", lwp);
  408. rt_free(lwp->exe_file);
  409. group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
  410. if (group)
  411. lwp_pgrp_remove(group, lwp);
  412. LWP_LOCK(lwp);
  413. if (lwp->args != RT_NULL)
  414. {
  415. #ifndef ARCH_MM_MMU
  416. lwp->args_length = RT_NULL;
  417. #ifndef ARCH_MM_MPU
  418. rt_free(lwp->args);
  419. #endif /* not defined ARCH_MM_MPU */
  420. #endif /* ARCH_MM_MMU */
  421. lwp->args = RT_NULL;
  422. }
  423. lwp_user_object_clear(lwp);
  424. lwp_user_object_lock_destroy(lwp);
  425. /* free data section */
  426. if (lwp->data_entry != RT_NULL)
  427. {
  428. #ifdef ARCH_MM_MMU
  429. rt_free_align(lwp->data_entry);
  430. #else
  431. #ifdef ARCH_MM_MPU
  432. rt_lwp_umap_user(lwp, lwp->text_entry, 0);
  433. rt_lwp_free_user(lwp, lwp->data_entry, lwp->data_size);
  434. #else
  435. rt_free_align(lwp->data_entry);
  436. #endif /* ARCH_MM_MPU */
  437. #endif /* ARCH_MM_MMU */
  438. lwp->data_entry = RT_NULL;
  439. }
  440. /* free text section */
  441. if (lwp->lwp_type == LWP_TYPE_DYN_ADDR)
  442. {
  443. if (lwp->text_entry)
  444. {
  445. LOG_D("lwp text free: %p", lwp->text_entry);
  446. #ifndef ARCH_MM_MMU
  447. rt_free((void*)lwp->text_entry);
  448. #endif /* not defined ARCH_MM_MMU */
  449. lwp->text_entry = RT_NULL;
  450. }
  451. }
  452. #ifdef ARCH_MM_MMU
  453. lwp_unmap_user_space(lwp);
  454. #endif
  455. timer_list_free(&lwp->timer);
  456. LWP_UNLOCK(lwp);
  457. RT_ASSERT(lwp->lwp_lock.owner == RT_NULL);
  458. rt_mutex_detach(&lwp->lwp_lock);
  459. /**
  460. * pid must have release before enter lwp_free()
  461. * otherwise this is a data racing
  462. */
  463. RT_ASSERT(lwp->pid == 0);
  464. rt_free(lwp);
  465. }
  466. rt_inline rt_noreturn
  467. void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
  468. {
  469. LWP_LOCK(lwp);
  470. lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
  471. lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  472. lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
  473. lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
  474. rt_list_remove(&thread->sibling);
  475. LWP_UNLOCK(lwp);
  476. lwp_futex_exit_robust_list(thread);
  477. /**
  478. * Note: the tid tree always hold a reference to thread, hence the tid must
  479. * be release before cleanup of thread
  480. */
  481. lwp_tid_put(thread->tid);
  482. thread->tid = 0;
  483. rt_thread_delete(thread);
  484. rt_schedule();
  485. while (1) ;
  486. }
  487. rt_inline void _clear_child_tid(rt_thread_t thread)
  488. {
  489. if (thread->clear_child_tid)
  490. {
  491. int t = 0;
  492. int *clear_child_tid = thread->clear_child_tid;
  493. thread->clear_child_tid = RT_NULL;
  494. lwp_put_to_user(clear_child_tid, &t, sizeof t);
  495. sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
  496. }
  497. }
  498. void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
  499. {
  500. rt_thread_t thread;
  501. if (!lwp)
  502. {
  503. LOG_W("%s: lwp should not be null", __func__);
  504. return ;
  505. }
  506. thread = rt_thread_self();
  507. RT_ASSERT((struct rt_lwp *)thread->lwp == lwp);
  508. LOG_D("process(lwp.pid=%d) exit", lwp->pid);
  509. #ifdef ARCH_MM_MMU
  510. _clear_child_tid(thread);
  511. LWP_LOCK(lwp);
  512. /**
  513. * Brief: only one thread should calls exit_group(),
  514. * but we can not ensured that during run-time
  515. */
  516. lwp->lwp_status = status;
  517. LWP_UNLOCK(lwp);
  518. lwp_terminate(lwp);
  519. #else
  520. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  521. if (main_thread == tid)
  522. {
  523. rt_thread_t sub_thread;
  524. rt_list_t *list;
  525. lwp_terminate(lwp);
  526. /* delete all subthread */
  527. while ((list = tid->sibling.prev) != &lwp->t_grp)
  528. {
  529. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  530. rt_list_remove(&sub_thread->sibling);
  531. rt_thread_delete(sub_thread);
  532. }
  533. lwp->lwp_ret = value;
  534. }
  535. #endif /* ARCH_MM_MMU */
  536. _thread_exit(lwp, thread);
  537. }
  538. void lwp_thread_exit(rt_thread_t thread, int status)
  539. {
  540. rt_thread_t header_thr;
  541. struct rt_lwp *lwp;
  542. LOG_D("%s", __func__);
  543. RT_ASSERT(thread == rt_thread_self());
  544. lwp = (struct rt_lwp *)thread->lwp;
  545. RT_ASSERT(lwp != RT_NULL);
  546. #ifdef ARCH_MM_MMU
  547. _clear_child_tid(thread);
  548. LWP_LOCK(lwp);
  549. header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  550. if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
  551. {
  552. /**
  553. * if thread exit, treated as process exit normally.
  554. * This is reasonable since trap event is exited through lwp_exit()
  555. */
  556. lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
  557. LWP_UNLOCK(lwp);
  558. lwp_terminate(lwp);
  559. }
  560. else
  561. {
  562. LWP_UNLOCK(lwp);
  563. }
  564. #endif /* ARCH_MM_MMU */
  565. _thread_exit(lwp, thread);
  566. }
  567. /** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
  568. int lwp_ref_inc(struct rt_lwp *lwp)
  569. {
  570. int ref;
  571. ref = rt_atomic_add(&lwp->ref, 1);
  572. LOG_D("%s(%p(%s)): before %d", __func__, lwp, lwp->cmd, ref);
  573. return ref;
  574. }
  575. int lwp_ref_dec(struct rt_lwp *lwp)
  576. {
  577. int ref;
  578. ref = rt_atomic_add(&lwp->ref, -1);
  579. LOG_D("%s(lwp=%p,lwp->cmd=%s): before ref=%d", __func__, lwp, lwp->cmd, ref);
  580. if (ref == 1)
  581. {
  582. struct rt_channel_msg msg;
  583. if (lwp->debug)
  584. {
  585. memset(&msg, 0, sizeof msg);
  586. rt_raw_channel_send(gdb_server_channel(), &msg);
  587. }
  588. #ifndef ARCH_MM_MMU
  589. #ifdef RT_LWP_USING_SHM
  590. lwp_shm_lwp_free(lwp);
  591. #endif /* RT_LWP_USING_SHM */
  592. #endif /* not defined ARCH_MM_MMU */
  593. lwp_free(lwp);
  594. }
  595. else
  596. {
  597. /* reference must be a positive integer */
  598. RT_ASSERT(ref > 1);
  599. }
  600. return ref;
  601. }
  602. struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
  603. {
  604. struct lwp_avl_struct *p;
  605. struct rt_lwp *lwp = RT_NULL;
  606. p = lwp_avl_find(pid, lwp_pid_root);
  607. if (p)
  608. {
  609. lwp = (struct rt_lwp *)p->data;
  610. }
  611. return lwp;
  612. }
  613. struct rt_lwp* lwp_from_pid_locked(pid_t pid)
  614. {
  615. struct rt_lwp* lwp;
  616. lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
  617. return lwp;
  618. }
  619. pid_t lwp_to_pid(struct rt_lwp* lwp)
  620. {
  621. if (!lwp)
  622. {
  623. return 0;
  624. }
  625. return lwp->pid;
  626. }
  627. char* lwp_pid2name(int32_t pid)
  628. {
  629. struct rt_lwp *lwp;
  630. char* process_name = RT_NULL;
  631. lwp_pid_lock_take();
  632. lwp = lwp_from_pid_locked(pid);
  633. if (lwp)
  634. {
  635. process_name = strrchr(lwp->cmd, '/');
  636. process_name = process_name? process_name + 1: lwp->cmd;
  637. }
  638. lwp_pid_lock_release();
  639. return process_name;
  640. }
  641. pid_t lwp_name2pid(const char *name)
  642. {
  643. int idx;
  644. pid_t pid = 0;
  645. rt_thread_t main_thread;
  646. char* process_name = RT_NULL;
  647. rt_sched_lock_level_t slvl;
  648. lwp_pid_lock_take();
  649. for (idx = 0; idx < RT_LWP_MAX_NR; idx++)
  650. {
  651. /* 0 is reserved */
  652. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[idx].data;
  653. if (lwp)
  654. {
  655. process_name = strrchr(lwp->cmd, '/');
  656. process_name = process_name? process_name + 1: lwp->cmd;
  657. if (!rt_strncmp(name, process_name, RT_NAME_MAX))
  658. {
  659. main_thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  660. rt_sched_lock(&slvl);
  661. if (!(rt_sched_thread_get_stat(main_thread) == RT_THREAD_CLOSE))
  662. {
  663. pid = lwp->pid;
  664. }
  665. rt_sched_unlock(slvl);
  666. }
  667. }
  668. }
  669. lwp_pid_lock_release();
  670. return pid;
  671. }
  672. int lwp_getpid(void)
  673. {
  674. rt_lwp_t lwp = lwp_self();
  675. return lwp ? lwp->pid : 1;
  676. // return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
  677. }
  678. rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
  679. {
  680. struct rusage rt_rusage;
  681. if (uru != RT_NULL)
  682. {
  683. rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
  684. rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
  685. rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
  686. rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
  687. lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
  688. }
  689. }
  690. /* do statistical summary and reap the child if neccessary */
  691. static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
  692. struct rt_lwp *self_lwp, int *ustatus,
  693. int options, struct rusage *uru)
  694. {
  695. int lwp_stat = child->lwp_status;
  696. /* report statistical data to process */
  697. _update_ru(child, self_lwp, uru);
  698. if (child->terminated && !(options & WNOWAIT))
  699. {
  700. /** Reap the child process if it's exited */
  701. LOG_D("func %s: child detached", __func__);
  702. lwp_pid_put(child);
  703. lwp_children_unregister(self_lwp, child);
  704. }
  705. if (ustatus)
  706. lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
  707. return RT_EOK;
  708. }
  709. #define HAS_CHILD_BUT_NO_EVT (-1024)
  710. /* check if the process is already terminate */
  711. static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
  712. int options, int *status)
  713. {
  714. sysret_t rc;
  715. LWP_LOCK(child);
  716. if (child->terminated)
  717. {
  718. rc = child->pid;
  719. }
  720. else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
  721. {
  722. child->wait_reap_stp = 1;
  723. rc = child->pid;
  724. }
  725. else
  726. {
  727. rc = HAS_CHILD_BUT_NO_EVT;
  728. }
  729. LWP_UNLOCK(child);
  730. LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
  731. return rc;
  732. }
  733. /* verify if the process is child, and reap it */
  734. static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  735. pid_t wait_pid, int options, int *ustatus,
  736. struct rusage *uru)
  737. {
  738. sysret_t rc;
  739. struct rt_lwp *child;
  740. /* check if pid is reference to a valid child */
  741. lwp_pid_lock_take();
  742. child = lwp_from_pid_locked(wait_pid);
  743. if (!child)
  744. rc = -EINVAL;
  745. else if (child->parent != self_lwp)
  746. rc = -ESRCH;
  747. else
  748. rc = wait_pid;
  749. lwp_pid_lock_release();
  750. if (rc > 0)
  751. {
  752. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  753. if (rc > 0)
  754. {
  755. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  756. }
  757. }
  758. return rc;
  759. }
  760. /* try to reap any child */
  761. static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
  762. int options, int *ustatus, struct rusage *uru)
  763. {
  764. sysret_t rc = -ECHILD;
  765. struct rt_lwp *child;
  766. LWP_LOCK(self_lwp);
  767. child = self_lwp->first_child;
  768. /* find a exited child if any */
  769. while (child)
  770. {
  771. if (pair_pgid && child->pgid != pair_pgid)
  772. continue;
  773. rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
  774. if (rc > 0)
  775. break;
  776. child = child->sibling;
  777. }
  778. LWP_UNLOCK(self_lwp);
  779. if (rc > 0)
  780. {
  781. _stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
  782. }
  783. return rc;
  784. }
  785. rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
  786. {
  787. /* waker provide the message mainly through its lwp_status */
  788. rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
  789. return RT_EOK;
  790. }
  791. struct waitpid_handle {
  792. struct rt_wqueue_node wq_node;
  793. int options;
  794. rt_lwp_t waker_lwp;
  795. };
  796. /* the IPC message is setup and notify the parent */
  797. static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
  798. {
  799. int can_accept_evt = 0;
  800. rt_thread_t waiter = wait_node->polling_thread;
  801. pid_t destiny = (pid_t)wait_node->key;
  802. rt_lwp_t waker_lwp = key;
  803. struct waitpid_handle *handle;
  804. rt_ubase_t options;
  805. handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
  806. RT_ASSERT(waiter != RT_NULL);
  807. options = handle->options;
  808. /* filter out if waker is not the one */
  809. if (destiny > 0)
  810. {
  811. /**
  812. * in waitpid immediately return routine, we already do the check
  813. * that pid is one of the child process of waiting thread
  814. */
  815. can_accept_evt = waker_lwp->pid == destiny;
  816. }
  817. else if (destiny == -1)
  818. {
  819. can_accept_evt = waker_lwp->parent == waiter->lwp;
  820. }
  821. else
  822. {
  823. /* destiny == 0 || destiny == -pgid */
  824. pid_t waiter_pgid;
  825. if (destiny == 0)
  826. {
  827. waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
  828. }
  829. else
  830. {
  831. waiter_pgid = -destiny;
  832. }
  833. can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
  834. }
  835. /* filter out if event is not desired */
  836. if (can_accept_evt)
  837. {
  838. if ((options & WEXITED) && waker_lwp->terminated)
  839. can_accept_evt = 1;
  840. else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
  841. can_accept_evt = 1;
  842. else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
  843. can_accept_evt = 1;
  844. else
  845. can_accept_evt = 0;
  846. }
  847. /* setup message for waiter if accepted */
  848. if (can_accept_evt)
  849. handle->waker_lwp = waker_lwp;
  850. /* 0 if event is accepted, otherwise discard */
  851. return !can_accept_evt;
  852. }
  853. /* the waiter cleanup IPC message and wait for desired event here */
  854. static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
  855. struct waitpid_handle *handle, pid_t destiny)
  856. {
  857. rt_err_t ret;
  858. /* current context checking */
  859. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  860. handle->wq_node.polling_thread = cur_thr;
  861. handle->wq_node.key = destiny;
  862. handle->wq_node.wakeup = _waitq_filter;
  863. handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
  864. rt_list_init(&handle->wq_node.list);
  865. cur_thr->error = RT_EOK;
  866. LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
  867. rt_enter_critical();
  868. ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
  869. if (ret == RT_EOK)
  870. {
  871. rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
  872. rt_exit_critical();
  873. rt_schedule();
  874. ret = cur_thr->error;
  875. /**
  876. * cur_thr error is a positive value, but some legacy implementation
  877. * use a negative one. So we check to avoid errors
  878. */
  879. ret = ret > 0 ? -ret : ret;
  880. /**
  881. * we dont rely on this actually, but we cleanup it since wakeup API
  882. * set this up durint operation, and this will cause some messy condition
  883. */
  884. handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
  885. rt_wqueue_remove(&handle->wq_node);
  886. }
  887. else
  888. {
  889. /* failed to suspend, return immediately with failure */
  890. rt_exit_critical();
  891. }
  892. return ret;
  893. }
  894. /* wait for IPC event and do the cleanup if neccessary */
  895. static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
  896. int options, int *ustatus, struct rusage *uru)
  897. {
  898. sysret_t rc;
  899. struct waitpid_handle handle;
  900. rt_lwp_t waker;
  901. /* wait for SIGCHLD or other async events */
  902. handle.options = options;
  903. handle.waker_lwp = 0;
  904. rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
  905. waker = handle.waker_lwp;
  906. if (waker != RT_NULL)
  907. {
  908. rc = waker->pid;
  909. /* check out if any process exited */
  910. LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
  911. _stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
  912. }
  913. /**
  914. * else if (rc != RT_EOK)
  915. * unable to do a suspend, or wakeup unexpectedly
  916. * -> then returned a failure
  917. */
  918. return rc;
  919. }
  920. pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
  921. {
  922. pid_t rc = -1;
  923. struct rt_thread *cur_thr;
  924. struct rt_lwp *self_lwp;
  925. cur_thr = rt_thread_self();
  926. self_lwp = lwp_self();
  927. if (!cur_thr || !self_lwp)
  928. {
  929. rc = -EINVAL;
  930. }
  931. else
  932. {
  933. /* check if able to reap desired child immediately */
  934. if (pid > 0)
  935. {
  936. /* if pid is child then try to reap it */
  937. rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  938. }
  939. else if (pid == -1)
  940. {
  941. /* any terminated child */
  942. rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
  943. }
  944. else
  945. {
  946. /**
  947. * (pid < -1 || pid == 0)
  948. * any terminated child with matched pgid
  949. */
  950. pid_t pair_pgid;
  951. if (pid == 0)
  952. {
  953. pair_pgid = lwp_pgid_get_byprocess(self_lwp);
  954. }
  955. else
  956. {
  957. pair_pgid = -pid;
  958. }
  959. rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
  960. }
  961. if (rc == HAS_CHILD_BUT_NO_EVT)
  962. {
  963. if (!(options & WNOHANG))
  964. {
  965. /* otherwise, arrange a suspend and wait for async event */
  966. options |= WEXITED;
  967. rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
  968. }
  969. else
  970. {
  971. /**
  972. * POSIX.1: If waitpid() was invoked with WNOHANG set in options,
  973. * it has at least one child process specified by pid for which
  974. * status is not available, and status is not available for any
  975. * process specified by pid, 0 is returned
  976. */
  977. rc = 0;
  978. }
  979. }
  980. else
  981. {
  982. RT_ASSERT(rc != 0);
  983. }
  984. }
  985. LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
  986. return rc;
  987. }
  988. pid_t waitpid(pid_t pid, int *status, int options)
  989. {
  990. return lwp_waitpid(pid, status, options, RT_NULL);
  991. }
  992. #ifdef RT_USING_FINSH
  993. /* copy from components/finsh/cmd.c */
  994. static void object_split(int len)
  995. {
  996. while (len--)
  997. {
  998. rt_kprintf("-");
  999. }
  1000. }
  1001. static void print_thread_info(struct rt_thread* thread, int maxlen)
  1002. {
  1003. rt_uint8_t *ptr;
  1004. rt_uint8_t stat;
  1005. #ifdef RT_USING_SMP
  1006. if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
  1007. rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_CTX(thread).oncpu, RT_SCHED_PRIV(thread).current_priority);
  1008. else
  1009. rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  1010. #else
  1011. rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
  1012. #endif /*RT_USING_SMP*/
  1013. stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
  1014. if (stat == RT_THREAD_READY) rt_kprintf(" ready ");
  1015. else if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK) rt_kprintf(" suspend");
  1016. else if (stat == RT_THREAD_INIT) rt_kprintf(" init ");
  1017. else if (stat == RT_THREAD_CLOSE) rt_kprintf(" close ");
  1018. else if (stat == RT_THREAD_RUNNING) rt_kprintf(" running");
  1019. #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
  1020. ptr = (rt_uint8_t *)thread->stack_addr + thread->stack_size;
  1021. while (*ptr == '#')ptr--;
  1022. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1023. ((rt_uint32_t)thread->sp - (rt_uint32_t)thread->stack_addr),
  1024. thread->stack_size,
  1025. ((rt_uint32_t)ptr - (rt_uint32_t)thread->stack_addr) * 100 / thread->stack_size,
  1026. thread->remaining_tick,
  1027. thread->error);
  1028. #else
  1029. ptr = (rt_uint8_t *)thread->stack_addr;
  1030. while (*ptr == '#')ptr++;
  1031. rt_kprintf(" 0x%08x 0x%08x %02d%% 0x%08x %03d\n",
  1032. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)thread->sp),
  1033. thread->stack_size,
  1034. (thread->stack_size + (rt_uint32_t)(rt_size_t)thread->stack_addr - (rt_uint32_t)(rt_size_t)ptr) * 100
  1035. / thread->stack_size,
  1036. RT_SCHED_PRIV(thread).remaining_tick,
  1037. thread->error);
  1038. #endif
  1039. }
  1040. long list_process(void)
  1041. {
  1042. int index;
  1043. int maxlen;
  1044. rt_ubase_t level;
  1045. struct rt_thread *thread;
  1046. struct rt_list_node *node, *list;
  1047. const char *item_title = "thread";
  1048. int count = 0;
  1049. struct rt_thread **threads;
  1050. maxlen = RT_NAME_MAX;
  1051. #ifdef RT_USING_SMP
  1052. rt_kprintf("%-*.s %-*.s %-*.s cpu pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  1053. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1054. rt_kprintf( "--- --- ------- ---------- ---------- ------ ---------- ---\n");
  1055. #else
  1056. rt_kprintf("%-*.s %-*.s %-*.s pri status sp stack size max used left tick error\n", 4, "PID", maxlen, "CMD", maxlen, item_title);
  1057. object_split(4);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");object_split(maxlen);rt_kprintf(" ");
  1058. rt_kprintf( "--- ------- ---------- ---------- ------ ---------- ---\n");
  1059. #endif /*RT_USING_SMP*/
  1060. count = rt_object_get_length(RT_Object_Class_Thread);
  1061. if (count > 0)
  1062. {
  1063. /* get thread pointers */
  1064. threads = (struct rt_thread **)rt_calloc(count, sizeof(struct rt_thread *));
  1065. if (threads)
  1066. {
  1067. index = rt_object_get_pointers(RT_Object_Class_Thread, (rt_object_t *)threads, count);
  1068. if (index > 0)
  1069. {
  1070. for (index = 0; index <count; index++)
  1071. {
  1072. struct rt_thread th;
  1073. thread = threads[index];
  1074. level = rt_spin_lock_irqsave(&thread->spinlock);
  1075. if ((rt_object_get_type(&thread->parent) & ~RT_Object_Class_Static) != RT_Object_Class_Thread)
  1076. {
  1077. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1078. continue;
  1079. }
  1080. rt_memcpy(&th, thread, sizeof(struct rt_thread));
  1081. rt_spin_unlock_irqrestore(&thread->spinlock, level);
  1082. if (th.lwp == RT_NULL)
  1083. {
  1084. rt_kprintf(" %-*.*s ", maxlen, RT_NAME_MAX, "kernel");
  1085. print_thread_info(&th, maxlen);
  1086. }
  1087. }
  1088. }
  1089. rt_free(threads);
  1090. }
  1091. }
  1092. for (index = 0; index < RT_LWP_MAX_NR; index++)
  1093. {
  1094. struct rt_lwp *lwp = (struct rt_lwp *)lwp_pid_ary[index].data;
  1095. if (lwp)
  1096. {
  1097. list = &lwp->t_grp;
  1098. for (node = list->next; node != list; node = node->next)
  1099. {
  1100. thread = rt_list_entry(node, struct rt_thread, sibling);
  1101. rt_kprintf("%4d %-*.*s ", lwp_to_pid(lwp), maxlen, RT_NAME_MAX, lwp->cmd);
  1102. print_thread_info(thread, maxlen);
  1103. }
  1104. }
  1105. }
  1106. return 0;
  1107. }
  1108. MSH_CMD_EXPORT(list_process, list process);
  1109. static void cmd_kill(int argc, char** argv)
  1110. {
  1111. int pid;
  1112. int sig = SIGKILL;
  1113. if (argc < 2)
  1114. {
  1115. rt_kprintf("kill pid or kill pid -s signal\n");
  1116. return;
  1117. }
  1118. pid = atoi(argv[1]);
  1119. if (argc >= 4)
  1120. {
  1121. if (argv[2][0] == '-' && argv[2][1] == 's')
  1122. {
  1123. sig = atoi(argv[3]);
  1124. }
  1125. }
  1126. lwp_pid_lock_take();
  1127. lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
  1128. lwp_pid_lock_release();
  1129. }
  1130. MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
  1131. static void cmd_killall(int argc, char** argv)
  1132. {
  1133. int pid;
  1134. if (argc < 2)
  1135. {
  1136. rt_kprintf("killall processes_name\n");
  1137. return;
  1138. }
  1139. while((pid = lwp_name2pid(argv[1])) > 0)
  1140. {
  1141. lwp_pid_lock_take();
  1142. lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
  1143. lwp_pid_lock_release();
  1144. rt_thread_mdelay(100);
  1145. }
  1146. }
  1147. MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
  1148. #endif
  1149. int lwp_check_exit_request(void)
  1150. {
  1151. rt_thread_t thread = rt_thread_self();
  1152. rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
  1153. if (!thread->lwp)
  1154. {
  1155. return 0;
  1156. }
  1157. return atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1158. LWP_EXIT_REQUEST_IN_PROCESS);
  1159. }
  1160. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
  1161. static void _resr_cleanup(struct rt_lwp *lwp);
  1162. void lwp_terminate(struct rt_lwp *lwp)
  1163. {
  1164. if (!lwp)
  1165. {
  1166. /* kernel thread not support */
  1167. return;
  1168. }
  1169. LOG_D("%s(lwp=%p \"%s\")", __func__, lwp, lwp->cmd);
  1170. LWP_LOCK(lwp);
  1171. if (!lwp->terminated)
  1172. {
  1173. /* stop the receiving of signals */
  1174. lwp->terminated = RT_TRUE;
  1175. LWP_UNLOCK(lwp);
  1176. _wait_sibling_exit(lwp, rt_thread_self());
  1177. _resr_cleanup(lwp);
  1178. }
  1179. else
  1180. {
  1181. LWP_UNLOCK(lwp);
  1182. }
  1183. }
  1184. static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
  1185. {
  1186. rt_sched_lock_level_t slvl;
  1187. rt_list_t *list;
  1188. rt_thread_t thread;
  1189. rt_size_t expected = LWP_EXIT_REQUEST_NONE;
  1190. /* broadcast exit request for sibling threads */
  1191. LWP_LOCK(lwp);
  1192. for (list = lwp->t_grp.next; list != &lwp->t_grp; list = list->next)
  1193. {
  1194. thread = rt_list_entry(list, struct rt_thread, sibling);
  1195. atomic_compare_exchange_strong(&thread->exit_request, &expected,
  1196. LWP_EXIT_REQUEST_TRIGGERED);
  1197. rt_sched_lock(&slvl);
  1198. /* dont release, otherwise thread may have been freed */
  1199. if (rt_sched_thread_is_suspended(thread))
  1200. {
  1201. thread->error = RT_EINTR;
  1202. rt_sched_unlock(slvl);
  1203. rt_thread_wakeup(thread);
  1204. }
  1205. else
  1206. {
  1207. rt_sched_unlock(slvl);
  1208. }
  1209. }
  1210. LWP_UNLOCK(lwp);
  1211. while (1)
  1212. {
  1213. int subthread_is_terminated;
  1214. LOG_D("%s: wait for subthread exiting", __func__);
  1215. /**
  1216. * Brief: wait for all *running* sibling threads to exit
  1217. *
  1218. * Note: Critical Section
  1219. * - sibling list of lwp (RW. It will clear all siblings finally)
  1220. */
  1221. LWP_LOCK(lwp);
  1222. subthread_is_terminated = (int)(curr_thread->sibling.prev == &lwp->t_grp);
  1223. if (!subthread_is_terminated)
  1224. {
  1225. rt_sched_lock_level_t slvl;
  1226. rt_thread_t sub_thread;
  1227. rt_list_t *list;
  1228. int all_subthread_in_init = 1;
  1229. /* check all subthread is in init state */
  1230. for (list = curr_thread->sibling.prev; list != &lwp->t_grp; list = list->prev)
  1231. {
  1232. rt_sched_lock(&slvl);
  1233. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1234. if (rt_sched_thread_get_stat(sub_thread) != RT_THREAD_INIT)
  1235. {
  1236. rt_sched_unlock(slvl);
  1237. all_subthread_in_init = 0;
  1238. break;
  1239. }
  1240. else
  1241. {
  1242. rt_sched_unlock(slvl);
  1243. }
  1244. }
  1245. if (all_subthread_in_init)
  1246. {
  1247. /* delete all subthread */
  1248. while ((list = curr_thread->sibling.prev) != &lwp->t_grp)
  1249. {
  1250. sub_thread = rt_list_entry(list, struct rt_thread, sibling);
  1251. rt_list_remove(&sub_thread->sibling);
  1252. /**
  1253. * Note: Critical Section
  1254. * - thread control block (RW. Since it will free the thread
  1255. * control block, it must ensure no one else can access
  1256. * thread any more)
  1257. */
  1258. lwp_tid_put(sub_thread->tid);
  1259. sub_thread->tid = 0;
  1260. rt_thread_delete(sub_thread);
  1261. }
  1262. subthread_is_terminated = 1;
  1263. }
  1264. }
  1265. LWP_UNLOCK(lwp);
  1266. if (subthread_is_terminated)
  1267. {
  1268. break;
  1269. }
  1270. rt_thread_mdelay(10);
  1271. }
  1272. }
  1273. static void _notify_parent(rt_lwp_t lwp)
  1274. {
  1275. int si_code;
  1276. int signo_or_exitcode;
  1277. lwp_siginfo_ext_t ext;
  1278. lwp_status_t lwp_status = lwp->lwp_status;
  1279. rt_lwp_t parent = lwp->parent;
  1280. if (WIFSIGNALED(lwp_status))
  1281. {
  1282. si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
  1283. signo_or_exitcode = WTERMSIG(lwp_status);
  1284. }
  1285. else
  1286. {
  1287. si_code = CLD_EXITED;
  1288. signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
  1289. }
  1290. lwp_waitpid_kick(parent, lwp);
  1291. ext = rt_malloc(sizeof(struct lwp_siginfo));
  1292. if (ext)
  1293. {
  1294. rt_thread_t cur_thr = rt_thread_self();
  1295. ext->sigchld.status = signo_or_exitcode;
  1296. ext->sigchld.stime = cur_thr->system_time;
  1297. ext->sigchld.utime = cur_thr->user_time;
  1298. }
  1299. lwp_signal_kill(parent, SIGCHLD, si_code, ext);
  1300. }
  1301. static void _resr_cleanup(struct rt_lwp *lwp)
  1302. {
  1303. lwp_jobctrl_on_exit(lwp);
  1304. LWP_LOCK(lwp);
  1305. lwp_signal_detach(&lwp->signal);
  1306. /**
  1307. * @brief Detach children from lwp
  1308. *
  1309. * @note Critical Section
  1310. * - the lwp (RW. Release lwp)
  1311. * - the pid resource manager (RW. Release the pid)
  1312. */
  1313. while (lwp->first_child)
  1314. {
  1315. struct rt_lwp *child;
  1316. child = lwp->first_child;
  1317. lwp->first_child = child->sibling;
  1318. /** @note safe since the slist node is release */
  1319. LWP_UNLOCK(lwp);
  1320. LWP_LOCK(child);
  1321. if (child->terminated)
  1322. {
  1323. lwp_pid_put(child);
  1324. }
  1325. else
  1326. {
  1327. child->sibling = RT_NULL;
  1328. /* info: this may cause an orphan lwp */
  1329. child->parent = RT_NULL;
  1330. }
  1331. LWP_UNLOCK(child);
  1332. lwp_ref_dec(child);
  1333. lwp_ref_dec(lwp);
  1334. LWP_LOCK(lwp);
  1335. }
  1336. LWP_UNLOCK(lwp);
  1337. /**
  1338. * @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
  1339. * will be sent to parent
  1340. *
  1341. * @note Critical Section
  1342. * - the parent lwp (RW.)
  1343. */
  1344. LWP_LOCK(lwp);
  1345. if (lwp->parent &&
  1346. !lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
  1347. {
  1348. /* if successfully race to setup lwp->terminated before parent detach */
  1349. LWP_UNLOCK(lwp);
  1350. /**
  1351. * Note: children cannot detach itself and must wait for parent to take
  1352. * care of it
  1353. */
  1354. _notify_parent(lwp);
  1355. }
  1356. else
  1357. {
  1358. LWP_UNLOCK(lwp);
  1359. /**
  1360. * if process is orphan, it doesn't have parent to do the recycling.
  1361. * Otherwise, its parent had setup a flag to mask out recycling event
  1362. */
  1363. lwp_pid_put(lwp);
  1364. }
  1365. LWP_LOCK(lwp);
  1366. if (lwp->fdt.fds != RT_NULL)
  1367. {
  1368. struct dfs_file **fds;
  1369. /* auto clean fds */
  1370. __exit_files(lwp);
  1371. fds = lwp->fdt.fds;
  1372. lwp->fdt.fds = RT_NULL;
  1373. LWP_UNLOCK(lwp);
  1374. rt_free(fds);
  1375. }
  1376. else
  1377. {
  1378. LWP_UNLOCK(lwp);
  1379. }
  1380. }
  1381. static int _lwp_setaffinity(int tid, int cpu)
  1382. {
  1383. rt_thread_t thread;
  1384. int ret = -1;
  1385. thread = lwp_tid_get_thread_and_inc_ref(tid);
  1386. if (thread)
  1387. {
  1388. #ifdef RT_USING_SMP
  1389. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void *)(rt_ubase_t)cpu);
  1390. #endif
  1391. ret = 0;
  1392. }
  1393. lwp_tid_dec_ref(thread);
  1394. return ret;
  1395. }
  1396. int lwp_setaffinity(int tid, int cpu)
  1397. {
  1398. int ret;
  1399. #ifdef RT_USING_SMP
  1400. if (cpu < 0 || cpu > RT_CPUS_NR)
  1401. {
  1402. cpu = RT_CPUS_NR;
  1403. }
  1404. #endif
  1405. ret = _lwp_setaffinity(tid, cpu);
  1406. return ret;
  1407. }
  1408. #ifdef RT_USING_SMP
  1409. static void cmd_cpu_bind(int argc, char** argv)
  1410. {
  1411. int pid;
  1412. int cpu;
  1413. if (argc < 3)
  1414. {
  1415. rt_kprintf("Useage: cpu_bind pid cpu\n");
  1416. return;
  1417. }
  1418. pid = atoi(argv[1]);
  1419. cpu = atoi(argv[2]);
  1420. lwp_setaffinity((pid_t)pid, cpu);
  1421. }
  1422. MSH_CMD_EXPORT_ALIAS(cmd_cpu_bind, cpu_bind, set a process bind to a cpu);
  1423. #endif