lwp_signal.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-11-12 Jesven first version
  9. * 2023-02-23 Shell Support sigtimedwait
  10. * 2023-07-04 Shell Support siginfo, sigqueue
  11. * remove lwp_signal_backup/restore() to reduce architecture codes
  12. * update the generation, pending and delivery routines
  13. * 2023-11-22 Shell Support for job control signal. Fixup of signal catch while
  14. * some of the signals is blocked, but no more further dequeue is applied.
  15. * Add itimer support
  16. */
  17. #define __RT_IPC_SOURCE__
  18. #define DBG_TAG "lwp.signal"
  19. #define DBG_LVL DBG_INFO
  20. #include <rtdbg.h>
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #include <string.h>
  24. #include "lwp_internal.h"
  25. #include "sys/signal.h"
  26. #include "syscall_generic.h"
  27. static lwp_siginfo_t siginfo_create(rt_thread_t current, int signo, int code, lwp_siginfo_ext_t ext)
  28. {
  29. lwp_siginfo_t siginfo;
  30. struct rt_lwp *self_lwp;
  31. rt_thread_t self_thr;
  32. siginfo = rt_malloc(sizeof(*siginfo));
  33. if (siginfo)
  34. {
  35. siginfo->ksiginfo.signo = signo;
  36. siginfo->ksiginfo.code = code;
  37. siginfo->ext = ext;
  38. self_thr = current;
  39. self_lwp = current->lwp;
  40. if (self_lwp)
  41. {
  42. siginfo->ksiginfo.from_pid = self_lwp->pid;
  43. siginfo->ksiginfo.from_tid = self_thr->tid;
  44. }
  45. else
  46. {
  47. siginfo->ksiginfo.from_pid = 0;
  48. siginfo->ksiginfo.from_tid = 0;
  49. }
  50. }
  51. return siginfo;
  52. }
  53. rt_inline void siginfo_delete(lwp_siginfo_t siginfo)
  54. {
  55. if (siginfo->ext)
  56. {
  57. rt_free(siginfo->ext);
  58. siginfo->ext = RT_NULL;
  59. }
  60. rt_free(siginfo);
  61. }
  62. rt_inline void _sigorsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
  63. {
  64. switch (_LWP_NSIG_WORDS)
  65. {
  66. case 4:
  67. dset->sig[3] = set0->sig[3] | set1->sig[3];
  68. dset->sig[2] = set0->sig[2] | set1->sig[2];
  69. case 2:
  70. dset->sig[1] = set0->sig[1] | set1->sig[1];
  71. case 1:
  72. dset->sig[0] = set0->sig[0] | set1->sig[0];
  73. default:
  74. return;
  75. }
  76. }
  77. rt_inline void _sigandsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
  78. {
  79. switch (_LWP_NSIG_WORDS)
  80. {
  81. case 4:
  82. dset->sig[3] = set0->sig[3] & set1->sig[3];
  83. dset->sig[2] = set0->sig[2] & set1->sig[2];
  84. case 2:
  85. dset->sig[1] = set0->sig[1] & set1->sig[1];
  86. case 1:
  87. dset->sig[0] = set0->sig[0] & set1->sig[0];
  88. default:
  89. return;
  90. }
  91. }
  92. rt_inline void _signotsets(lwp_sigset_t *dset, const lwp_sigset_t *set)
  93. {
  94. switch (_LWP_NSIG_WORDS)
  95. {
  96. case 4:
  97. dset->sig[3] = ~set->sig[3];
  98. dset->sig[2] = ~set->sig[2];
  99. case 2:
  100. dset->sig[1] = ~set->sig[1];
  101. case 1:
  102. dset->sig[0] = ~set->sig[0];
  103. default:
  104. return;
  105. }
  106. }
  107. rt_inline void _sigaddset(lwp_sigset_t *set, int _sig)
  108. {
  109. unsigned long sig = _sig - 1;
  110. if (_LWP_NSIG_WORDS == 1)
  111. {
  112. set->sig[0] |= 1UL << sig;
  113. }
  114. else
  115. {
  116. set->sig[sig / _LWP_NSIG_BPW] |= 1UL << (sig % _LWP_NSIG_BPW);
  117. }
  118. }
  119. rt_inline void _sigdelset(lwp_sigset_t *set, int _sig)
  120. {
  121. unsigned long sig = _sig - 1;
  122. if (_LWP_NSIG_WORDS == 1)
  123. {
  124. set->sig[0] &= ~(1UL << sig);
  125. }
  126. else
  127. {
  128. set->sig[sig / _LWP_NSIG_BPW] &= ~(1UL << (sig % _LWP_NSIG_BPW));
  129. }
  130. }
  131. rt_inline int _sigisemptyset(lwp_sigset_t *set)
  132. {
  133. switch (_LWP_NSIG_WORDS)
  134. {
  135. case 4:
  136. return (set->sig[3] | set->sig[2] |
  137. set->sig[1] | set->sig[0]) == 0;
  138. case 2:
  139. return (set->sig[1] | set->sig[0]) == 0;
  140. case 1:
  141. return set->sig[0] == 0;
  142. default:
  143. return 1;
  144. }
  145. }
  146. rt_inline int _sigismember(lwp_sigset_t *set, int _sig)
  147. {
  148. unsigned long sig = _sig - 1;
  149. if (_LWP_NSIG_WORDS == 1)
  150. {
  151. return 1 & (set->sig[0] >> sig);
  152. }
  153. else
  154. {
  155. return 1 & (set->sig[sig / _LWP_NSIG_BPW] >> (sig % _LWP_NSIG_BPW));
  156. }
  157. }
  158. rt_inline int _next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
  159. {
  160. unsigned long i, *s, *m, x;
  161. int sig = 0;
  162. s = pending->sig;
  163. m = mask->sig;
  164. x = *s & ~*m;
  165. if (x)
  166. {
  167. sig = rt_hw_ffz(~x) + 1;
  168. return sig;
  169. }
  170. switch (_LWP_NSIG_WORDS)
  171. {
  172. default:
  173. for (i = 1; i < _LWP_NSIG_WORDS; ++i)
  174. {
  175. x = *++s &~ *++m;
  176. if (!x)
  177. continue;
  178. sig = rt_hw_ffz(~x) + i*_LWP_NSIG_BPW + 1;
  179. break;
  180. }
  181. break;
  182. case 2:
  183. x = s[1] &~ m[1];
  184. if (!x)
  185. break;
  186. sig = rt_hw_ffz(~x) + _LWP_NSIG_BPW + 1;
  187. break;
  188. case 1:
  189. /* Nothing to do */
  190. break;
  191. }
  192. return sig;
  193. }
  194. #define _SIGQ(tp) (&(tp)->signal.sig_queue)
  195. rt_inline int sigqueue_isempty(lwp_sigqueue_t sigqueue)
  196. {
  197. return _sigisemptyset(&sigqueue->sigset_pending);
  198. }
  199. rt_inline int sigqueue_ismember(lwp_sigqueue_t sigqueue, int signo)
  200. {
  201. return _sigismember(&sigqueue->sigset_pending, signo);
  202. }
  203. rt_inline int sigqueue_peek(lwp_sigqueue_t sigqueue, lwp_sigset_t *mask)
  204. {
  205. return _next_signal(&sigqueue->sigset_pending, mask);
  206. }
  207. rt_inline int sigqueue_examine(lwp_sigqueue_t sigqueue, lwp_sigset_t *pending)
  208. {
  209. int is_empty = sigqueue_isempty(sigqueue);
  210. if (!is_empty)
  211. {
  212. _sigorsets(pending, &sigqueue->sigset_pending, &sigqueue->sigset_pending);
  213. }
  214. return is_empty;
  215. }
  216. static void sigqueue_enqueue(lwp_sigqueue_t sigqueue, lwp_siginfo_t siginfo)
  217. {
  218. lwp_siginfo_t idx;
  219. rt_bool_t inserted = RT_FALSE;
  220. rt_list_for_each_entry(idx, &sigqueue->siginfo_list, node)
  221. {
  222. if (idx->ksiginfo.signo >= siginfo->ksiginfo.signo)
  223. {
  224. rt_list_insert_after(&idx->node, &siginfo->node);
  225. inserted = RT_TRUE;
  226. break;
  227. }
  228. }
  229. if (!inserted)
  230. rt_list_insert_before(&sigqueue->siginfo_list, &siginfo->node);
  231. _sigaddset(&sigqueue->sigset_pending, siginfo->ksiginfo.signo);
  232. return ;
  233. }
  234. /**
  235. * dequeue a siginfo matching the signo which is likely to be existed, and
  236. * test if any other siblings remains
  237. */
  238. static lwp_siginfo_t sigqueue_dequeue(lwp_sigqueue_t sigqueue, int signo)
  239. {
  240. lwp_siginfo_t found;
  241. lwp_siginfo_t candidate;
  242. lwp_siginfo_t next;
  243. rt_bool_t is_empty;
  244. found = RT_NULL;
  245. is_empty = RT_TRUE;
  246. rt_list_for_each_entry_safe(candidate, next, &sigqueue->siginfo_list, node)
  247. {
  248. if (candidate->ksiginfo.signo == signo)
  249. {
  250. if (found)
  251. {
  252. /* already found */
  253. is_empty = RT_FALSE;
  254. break;
  255. }
  256. else
  257. {
  258. /* found first */
  259. found = candidate;
  260. rt_list_remove(&found->node);
  261. }
  262. }
  263. else if (candidate->ksiginfo.signo > signo)
  264. break;
  265. }
  266. if (found && is_empty)
  267. _sigdelset(&sigqueue->sigset_pending, signo);
  268. return found;
  269. }
  270. /**
  271. * Discard all the signal matching `signo` in sigqueue
  272. */
  273. static void sigqueue_discard(lwp_sigqueue_t sigqueue, int signo)
  274. {
  275. lwp_siginfo_t queuing_si;
  276. while (sigqueue_ismember(sigqueue, signo))
  277. {
  278. queuing_si = sigqueue_dequeue(sigqueue, signo);
  279. siginfo_delete(queuing_si);
  280. }
  281. }
  282. /**
  283. * Discard all the queuing signals in sigset
  284. */
  285. static void sigqueue_discard_sigset(lwp_sigqueue_t sigqueue, lwp_sigset_t *sigset)
  286. {
  287. lwp_siginfo_t queuing_si;
  288. lwp_sigset_t mask;
  289. int signo;
  290. _signotsets(&mask, sigset);
  291. while ((signo = sigqueue_peek(sigqueue, &mask)) != 0)
  292. {
  293. queuing_si = sigqueue_dequeue(sigqueue, signo);
  294. siginfo_delete(queuing_si);
  295. }
  296. }
  297. /* assuming that (void *) is compatible to long at length */
  298. RT_STATIC_ASSERT(lp_width_same, sizeof(void *) == sizeof(long));
  299. /** translate lwp siginfo to user siginfo_t */
  300. rt_inline void siginfo_k2u(lwp_siginfo_t ksigi, siginfo_t *usigi)
  301. {
  302. int signo = ksigi->ksiginfo.signo;
  303. usigi->si_code = ksigi->ksiginfo.code;
  304. usigi->si_signo = signo;
  305. usigi->si_pid = ksigi->ksiginfo.from_pid;
  306. if (ksigi->ext)
  307. {
  308. if (signo == SIGCHLD)
  309. {
  310. usigi->si_status = ksigi->ext->sigchld.status;
  311. usigi->si_utime = ksigi->ext->sigchld.stime;
  312. usigi->si_stime = ksigi->ext->sigchld.utime;
  313. }
  314. }
  315. /* deprecated field */
  316. usigi->si_errno = 0;
  317. }
  318. /* must called in locked context */
  319. rt_inline lwp_sighandler_t _get_sighandler_locked(struct rt_lwp *lwp, int signo)
  320. {
  321. return lwp->signal.sig_action[signo - 1];
  322. }
  323. static lwp_sigset_t *_mask_block_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
  324. {
  325. _sigorsets(new_set, &thread->signal.sigset_mask, sigset);
  326. return new_set;
  327. }
  328. static lwp_sigset_t *_mask_unblock_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
  329. {
  330. lwp_sigset_t complement;
  331. _signotsets(&complement, sigset);
  332. _sigandsets(new_set, &thread->signal.sigset_mask, &complement);
  333. return new_set;
  334. }
  335. static lwp_sigset_t *_mask_set_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
  336. {
  337. memcpy(new_set, sigset, sizeof(*sigset));
  338. return new_set;
  339. }
  340. static lwp_sigset_t *(*_sig_mask_fn[__LWP_SIG_MASK_CMD_WATERMARK])
  341. (rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set) = {
  342. [LWP_SIG_MASK_CMD_BLOCK] = _mask_block_fn,
  343. [LWP_SIG_MASK_CMD_UNBLOCK] = _mask_unblock_fn,
  344. [LWP_SIG_MASK_CMD_SET_MASK] = _mask_set_fn,
  345. };
  346. static void _thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
  347. const lwp_sigset_t *sigset, lwp_sigset_t *oset)
  348. {
  349. lwp_sigset_t new_set;
  350. /**
  351. * Note: POSIX wants this API to be capable to query the current mask
  352. * by passing NULL in `sigset`
  353. */
  354. if (oset)
  355. memcpy(oset, &thread->signal.sigset_mask, sizeof(lwp_sigset_t));
  356. if (sigset)
  357. {
  358. _sig_mask_fn[how](thread, sigset, &new_set);
  359. /* remove un-maskable signal from set */
  360. _sigdelset(&new_set, SIGKILL);
  361. _sigdelset(&new_set, SIGSTOP);
  362. memcpy(&thread->signal.sigset_mask, &new_set, sizeof(lwp_sigset_t));
  363. }
  364. }
  365. void lwp_sigqueue_clear(lwp_sigqueue_t sigq)
  366. {
  367. lwp_siginfo_t this, next;
  368. if (!sigqueue_isempty(sigq))
  369. {
  370. rt_list_for_each_entry_safe(this, next, &sigq->siginfo_list, node)
  371. {
  372. siginfo_delete(this);
  373. }
  374. }
  375. }
  376. static void lwp_signal_notify(rt_slist_t *list_head, lwp_siginfo_t siginfo)
  377. {
  378. rt_slist_t *node;
  379. rt_slist_for_each(node, list_head)
  380. {
  381. struct rt_lwp_notify *n = rt_slist_entry(node, struct rt_lwp_notify, list_node);
  382. if (n->notify)
  383. {
  384. n->notify(n->signalfd_queue, siginfo->ksiginfo.signo);
  385. }
  386. }
  387. }
  388. rt_err_t lwp_signal_init(struct lwp_signal *sig)
  389. {
  390. rt_err_t rc = RT_EOK;
  391. sig->real_timer = LWP_SIG_INVALID_TIMER;
  392. memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
  393. memset(&sig->sig_action, 0, sizeof(sig->sig_action));
  394. memset(&sig->sig_action_nodefer, 0, sizeof(sig->sig_action_nodefer));
  395. memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
  396. memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
  397. memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
  398. memset(&sig->sig_action_nocldstop, 0, sizeof(sig->sig_action_nocldstop));
  399. memset(&sig->sig_action_nocldwait, 0, sizeof(sig->sig_action_nocldwait));
  400. lwp_sigqueue_init(&sig->sig_queue);
  401. return rc;
  402. }
  403. rt_err_t lwp_signal_detach(struct lwp_signal *signal)
  404. {
  405. rt_err_t ret = RT_EOK;
  406. timer_delete(signal->real_timer);
  407. lwp_sigqueue_clear(&signal->sig_queue);
  408. return ret;
  409. }
  410. int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
  411. {
  412. struct rt_lwp *lwp = (struct rt_lwp *)thread->lwp;
  413. lwp_sigset_t sigmask = thread->signal.sigset_mask;
  414. int ret = 0;
  415. _sigaddset(&sigmask, SIGCONT);
  416. switch (suspend_flag)
  417. {
  418. case RT_INTERRUPTIBLE:
  419. if (sigqueue_peek(_SIGQ(thread), &sigmask))
  420. {
  421. break;
  422. }
  423. if (thread->lwp && sigqueue_peek(_SIGQ(lwp), &sigmask))
  424. {
  425. break;
  426. }
  427. ret = 1;
  428. break;
  429. case RT_KILLABLE:
  430. if (sigqueue_ismember(_SIGQ(thread), SIGKILL))
  431. {
  432. break;
  433. }
  434. if (thread->lwp && sigqueue_ismember(_SIGQ(lwp), SIGKILL))
  435. {
  436. break;
  437. }
  438. ret = 1;
  439. break;
  440. case RT_UNINTERRUPTIBLE:
  441. ret = 1;
  442. break;
  443. default:
  444. RT_ASSERT(0);
  445. break;
  446. }
  447. return ret;
  448. }
  449. rt_inline rt_bool_t _is_jobctl_signal(rt_lwp_t lwp, int signo)
  450. {
  451. lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
  452. return lwp_sigismember(&jobctl_sigset, signo);
  453. }
  454. rt_inline rt_bool_t _is_stop_signal(rt_lwp_t lwp, int signo)
  455. {
  456. lwp_sigset_t stop_sigset = lwp_sigset_init(LWP_SIG_STOP_SET);
  457. return lwp_sigismember(&stop_sigset, signo);
  458. }
  459. rt_inline rt_bool_t _need_notify_status_changed(rt_lwp_t lwp, int signo)
  460. {
  461. RT_ASSERT(lwp_sigismember(&lwp_sigset_init(LWP_SIG_JOBCTL_SET), signo));
  462. return !lwp_sigismember(&lwp->signal.sig_action_nocldstop, SIGCHLD);
  463. }
  464. /**
  465. * wakeup the waitpid_waiters if any, and try to generate SIGCHLD if they are
  466. * not disable explicitly by user.
  467. *
  468. * TODO: This event is always per-process and doesn't make whole lot of
  469. * sense for ptracers, who shouldn't consume the state via wait(2) either,
  470. * but, for backward compatibility, notify the ptracer of the group leader
  471. * too unless it's gonna be a duplicate.
  472. */
  473. static void _notify_parent_and_leader(rt_lwp_t child_lwp, rt_thread_t child_thr, int trig_signo, rt_bool_t is_stop)
  474. {
  475. int si_code;
  476. lwp_siginfo_ext_t ext;
  477. rt_lwp_t parent_lwp = child_lwp->parent;
  478. if (!parent_lwp)
  479. return ;
  480. /* prepare the event data for parent to query */
  481. if (is_stop)
  482. {
  483. si_code = CLD_STOPPED;
  484. child_lwp->lwp_status = LWP_CREATE_STAT_STOPPED(trig_signo);
  485. }
  486. else
  487. {
  488. si_code = CLD_CONTINUED;
  489. child_lwp->lwp_status = LWP_CREATE_STAT_CONTINUED;
  490. }
  491. /* wakeup waiter on waitpid(2) */
  492. lwp_waitpid_kick(parent_lwp, child_lwp);
  493. if (_need_notify_status_changed(parent_lwp, trig_signo))
  494. {
  495. ext = rt_malloc(sizeof(struct lwp_siginfo_ext));
  496. if (ext)
  497. {
  498. ext->sigchld.status = trig_signo;
  499. /* TODO: signal usage is not supported */
  500. ext->sigchld.stime = child_thr->system_time;
  501. ext->sigchld.utime = child_thr->user_time;
  502. }
  503. /* generate SIGCHLD for parent */
  504. lwp_signal_kill(parent_lwp, SIGCHLD, si_code, ext);
  505. }
  506. }
  507. static int _do_signal_wakeup(rt_thread_t thread, int sig);
  508. static rt_err_t _stop_thread_locked(rt_lwp_t self_lwp, rt_thread_t cur_thr, int signo,
  509. lwp_siginfo_t si, lwp_sigqueue_t sq)
  510. {
  511. rt_err_t error;
  512. int jobctl_stopped = self_lwp->jobctl_stopped;
  513. rt_thread_t iter;
  514. /* race to setup jobctl stopped flags */
  515. if (!jobctl_stopped)
  516. {
  517. self_lwp->jobctl_stopped = RT_TRUE;
  518. self_lwp->wait_reap_stp = RT_FALSE;
  519. rt_list_for_each_entry(iter, &self_lwp->t_grp, sibling)
  520. {
  521. if (iter != cur_thr)
  522. _do_signal_wakeup(iter, signo);
  523. }
  524. }
  525. /**
  526. * raise the event again so siblings is able to catch it again.
  527. * `si` will be discarded while SIGCONT is generatd
  528. */
  529. sigqueue_enqueue(sq, si);
  530. /* release the lwp lock so we can happily suspend */
  531. LWP_UNLOCK(self_lwp);
  532. rt_set_errno(RT_EOK);
  533. /* After suspension, only the SIGKILL and SIGCONT will wake this thread up */
  534. error = rt_thread_suspend_with_flag(cur_thr, RT_KILLABLE);
  535. if (error == RT_EOK)
  536. {
  537. rt_schedule();
  538. error = rt_get_errno();
  539. error = error > 0 ? -error : error;
  540. }
  541. if (!jobctl_stopped &&
  542. (sigqueue_ismember(_SIGQ(self_lwp), SIGCONT) ||
  543. sigqueue_ismember(_SIGQ(cur_thr), SIGCONT)))
  544. {
  545. /**
  546. * if we are resumed by a SIGCONT and we are the winner of racing
  547. * notify parent of the incoming event
  548. */
  549. _notify_parent_and_leader(self_lwp, cur_thr, SIGCONT, RT_FALSE);
  550. }
  551. /* reacquire the lock since we release it before */
  552. LWP_LOCK(self_lwp);
  553. return error;
  554. }
  555. static void _catch_signal_locked(rt_lwp_t lwp, rt_thread_t thread, int signo,
  556. lwp_siginfo_t siginfo, lwp_sighandler_t handler,
  557. void *exp_frame)
  558. {
  559. lwp_sigset_t new_sig_mask;
  560. lwp_sigset_t save_sig_mask;
  561. siginfo_t usiginfo;
  562. siginfo_t *p_usi;
  563. /* siginfo is need for signal action */
  564. if (_sigismember(&lwp->signal.sig_action_siginfo, signo))
  565. {
  566. siginfo_k2u(siginfo, &usiginfo);
  567. p_usi = &usiginfo;
  568. }
  569. else
  570. {
  571. p_usi = RT_NULL;
  572. }
  573. /**
  574. * lock is acquired by caller. Release it so that we can happily go to the
  575. * signal handler in user space
  576. */
  577. LWP_UNLOCK(lwp);
  578. siginfo_delete(siginfo);
  579. /* signal default handler */
  580. if (handler == LWP_SIG_ACT_DFL)
  581. {
  582. lwp_sigset_t ign_sigset;
  583. ign_sigset = lwp_sigset_init(LWP_SIG_IGNORE_SET);
  584. if (signo == SIGCONT)
  585. {
  586. arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
  587. arch_thread_signal_enter(signo, p_usi, exp_frame, 0, &thread->signal.sigset_mask);
  588. }
  589. else if (!lwp_sigismember(&ign_sigset, signo) && !lwp->sig_protected)
  590. {
  591. /* for those defautl handler is to terminate process */
  592. LOG_D("%s: default handler; and exit", __func__);
  593. /* TODO: coredump if neccessary */
  594. lwp_exit(lwp, LWP_CREATE_STAT_SIGNALED(signo, 0));
  595. }
  596. /**
  597. * otherwise is to ignore the signal,
  598. * -> then reacquire the lock and return
  599. */
  600. }
  601. else if (handler == LWP_SIG_ACT_IGN)
  602. {
  603. /* do nothing */
  604. }
  605. else
  606. {
  607. /* copy the blocked signal mask from the registered signal action */
  608. memcpy(&new_sig_mask, &lwp->signal.sig_action_mask[signo - 1], sizeof(new_sig_mask));
  609. if (!_sigismember(&lwp->signal.sig_action_nodefer, signo))
  610. _sigaddset(&new_sig_mask, signo);
  611. _thread_signal_mask(thread, LWP_SIG_MASK_CMD_BLOCK, &new_sig_mask, &save_sig_mask);
  612. if (_sigismember(&lwp->signal.sig_action_restart, signo))
  613. {
  614. arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
  615. }
  616. /**
  617. * enter signal action of user
  618. * Note: that the p_usi is release before entering signal action by
  619. * reseting the kernel sp.
  620. */
  621. LOG_D("%s: enter signal handler(signo=%d) at %p", __func__, signo, handler);
  622. arch_thread_signal_enter(signo, p_usi, exp_frame, handler, &save_sig_mask);
  623. /* the arch_thread_signal_enter() never return */
  624. RT_ASSERT(0);
  625. }
  626. /* reacquire the lock because we release it before */
  627. LWP_LOCK(lwp);
  628. }
  629. void lwp_thread_signal_catch(void *exp_frame)
  630. {
  631. struct rt_thread *thread;
  632. struct rt_lwp *lwp;
  633. lwp_sigqueue_t pending;
  634. lwp_sigset_t *sig_mask;
  635. int retry_signal_catch;
  636. int signo;
  637. thread = rt_thread_self();
  638. lwp = (struct rt_lwp *)thread->lwp;
  639. RT_ASSERT(!!lwp);
  640. LWP_LOCK(lwp);
  641. do {
  642. /* if stopped process resume, we will retry to catch the signal */
  643. retry_signal_catch = 0;
  644. signo = 0;
  645. /* try to peek a signal which is pending and not blocked by this thread */
  646. if (!sigqueue_isempty(_SIGQ(thread)))
  647. {
  648. pending = _SIGQ(thread);
  649. sig_mask = &thread->signal.sigset_mask;
  650. signo = sigqueue_peek(pending, sig_mask);
  651. }
  652. if (!signo && !sigqueue_isempty(_SIGQ(lwp)))
  653. {
  654. pending = _SIGQ(lwp);
  655. sig_mask = &thread->signal.sigset_mask;
  656. signo = sigqueue_peek(pending, sig_mask);
  657. }
  658. if (signo)
  659. {
  660. lwp_siginfo_t siginfo;
  661. lwp_sighandler_t handler;
  662. LOG_D("%s(signo=%d)", __func__, signo);
  663. siginfo = sigqueue_dequeue(pending, signo);
  664. RT_ASSERT(siginfo != RT_NULL);
  665. handler = _get_sighandler_locked(lwp, signo);
  666. if (_is_stop_signal(lwp, signo) && handler == LWP_SIG_ACT_DFL)
  667. {
  668. /* notify the status update for parent process */
  669. _notify_parent_and_leader(lwp, thread, signo, RT_TRUE);
  670. LOG_D("%s: pid=%d stopped", __func__, lwp->pid);
  671. _stop_thread_locked(lwp, thread, signo, siginfo, pending);
  672. LOG_D("%s: pid=%d continued", __func__, lwp->pid);
  673. /* wakeup and retry to catch signals send to us */
  674. retry_signal_catch = 1;
  675. }
  676. else
  677. {
  678. /* do a normal, non-jobctl signal handling */
  679. _catch_signal_locked(lwp, thread, signo, siginfo, handler, exp_frame);
  680. }
  681. }
  682. } while (retry_signal_catch);
  683. LWP_UNLOCK(lwp);
  684. }
  685. static int _do_signal_wakeup(rt_thread_t thread, int sig)
  686. {
  687. int need_schedule;
  688. rt_sched_lock_level_t slvl;
  689. if (!_sigismember(&thread->signal.sigset_mask, sig))
  690. {
  691. int stat;
  692. rt_sched_lock(&slvl);
  693. stat = rt_sched_thread_get_stat(thread);
  694. if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
  695. {
  696. if ((stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
  697. {
  698. thread->error = RT_EINTR;
  699. rt_sched_unlock(slvl);
  700. rt_thread_wakeup(thread);
  701. need_schedule = 1;
  702. }
  703. else if ((sig == SIGKILL || sig == SIGSTOP) &&
  704. ((stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
  705. {
  706. thread->error = RT_EINTR;
  707. rt_sched_unlock(slvl);
  708. rt_thread_wakeup(thread);
  709. need_schedule = 1;
  710. }
  711. else
  712. {
  713. rt_sched_unlock(slvl);
  714. need_schedule = 0;
  715. }
  716. }
  717. else
  718. {
  719. rt_sched_unlock(slvl);
  720. need_schedule = 0;
  721. }
  722. RT_SCHED_DEBUG_IS_UNLOCKED;
  723. }
  724. else
  725. need_schedule = 0;
  726. return need_schedule;
  727. }
  728. /** find a candidate to be notified of the arrival */
  729. static rt_thread_t _signal_find_catcher(struct rt_lwp *lwp, int signo)
  730. {
  731. rt_thread_t catcher = RT_NULL;
  732. rt_thread_t candidate;
  733. candidate = lwp->signal.sig_dispatch_thr[signo - 1];
  734. if (candidate != RT_NULL && !_sigismember(&candidate->signal.sigset_mask, signo))
  735. {
  736. catcher = candidate;
  737. }
  738. else
  739. {
  740. candidate = rt_thread_self();
  741. /** Note: lwp of current is a const value that can be safely read */
  742. if (candidate->lwp == lwp &&
  743. !_sigismember(&candidate->signal.sigset_mask, signo))
  744. {
  745. catcher = candidate;
  746. }
  747. else
  748. {
  749. rt_list_for_each_entry(candidate, &lwp->t_grp, sibling)
  750. {
  751. if (!_sigismember(&candidate->signal.sigset_mask, signo))
  752. {
  753. catcher = candidate;
  754. break;
  755. }
  756. }
  757. /* fall back to main thread */
  758. if (catcher == RT_NULL)
  759. catcher = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
  760. }
  761. /* reset the cache thread to catcher (even if catcher is main thread) */
  762. lwp->signal.sig_dispatch_thr[signo - 1] = catcher;
  763. }
  764. return catcher;
  765. }
  766. static int _siginfo_deliver_to_lwp(struct rt_lwp *lwp, lwp_siginfo_t siginfo)
  767. {
  768. rt_thread_t catcher;
  769. catcher = _signal_find_catcher(lwp, siginfo->ksiginfo.signo);
  770. sigqueue_enqueue(&lwp->signal.sig_queue, siginfo);
  771. return _do_signal_wakeup(catcher, siginfo->ksiginfo.signo);
  772. }
  773. static int _siginfo_deliver_to_thread(rt_thread_t thread, lwp_siginfo_t siginfo)
  774. {
  775. sigqueue_enqueue(_SIGQ(thread), siginfo);
  776. return _do_signal_wakeup(thread, siginfo->ksiginfo.signo);
  777. }
  778. rt_inline rt_bool_t _sighandler_is_ignored(struct rt_lwp *lwp, int signo)
  779. {
  780. rt_bool_t is_ignored;
  781. lwp_sighandler_t action;
  782. lwp_sigset_t ign_set = lwp_sigset_init(LWP_SIG_IGNORE_SET);
  783. action = _get_sighandler_locked(lwp, signo);
  784. if (action == LWP_SIG_ACT_IGN)
  785. is_ignored = RT_TRUE;
  786. else if (action == LWP_SIG_ACT_DFL && _sigismember(&ign_set, signo))
  787. is_ignored = RT_TRUE;
  788. else
  789. is_ignored = RT_FALSE;
  790. return is_ignored;
  791. }
  792. rt_inline rt_bool_t _sighandler_cannot_caught(struct rt_lwp *lwp, int signo)
  793. {
  794. return signo == SIGKILL || signo == SIGSTOP;
  795. }
  796. /* before signal is killed to target process/thread */
  797. static void _before_sending_jobctl_signal(int signo, rt_lwp_t target_lwp, lwp_siginfo_t si)
  798. {
  799. rt_thread_t thr_iter;
  800. rt_sched_lock_level_t slvl;
  801. lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
  802. LWP_ASSERT_LOCKED(target_lwp);
  803. /**
  804. * dequeue all the pending jobctl signals (including
  805. * the one we are adding, since we don't want to pend it)
  806. */
  807. sigqueue_discard_sigset(_SIGQ(target_lwp), &jobctl_sigset);
  808. if (signo == SIGCONT)
  809. {
  810. target_lwp->jobctl_stopped = RT_FALSE;
  811. rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
  812. {
  813. rt_base_t stat;
  814. sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
  815. /**
  816. * Note: all stopped thread will be resumed
  817. */
  818. rt_sched_lock(&slvl);
  819. stat = rt_sched_thread_get_stat(thr_iter);
  820. if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK &&
  821. (stat & RT_SIGNAL_KILL_WAKEUP_MASK) == 0)
  822. {
  823. thr_iter->error = RT_EINTR;
  824. /**
  825. * don't matter if we failed to resume the thread, since we
  826. * only care about the event passing, but not ordering here
  827. */
  828. rt_sched_unlock(slvl);
  829. rt_thread_wakeup(thr_iter);
  830. }
  831. else
  832. {
  833. rt_sched_unlock(slvl);
  834. }
  835. }
  836. }
  837. else
  838. {
  839. rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
  840. {
  841. sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
  842. }
  843. }
  844. }
  845. rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, lwp_siginfo_ext_t value)
  846. {
  847. rt_err_t ret = -1;
  848. lwp_siginfo_t siginfo;
  849. rt_bool_t terminated;
  850. rt_bool_t need_schedule;
  851. /** must be able to be suspended */
  852. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  853. if (!lwp || signo < 0 || signo > _LWP_NSIG)
  854. {
  855. ret = -RT_EINVAL;
  856. }
  857. else if (signo == 0)
  858. {
  859. /* process exist and current process have privileges */
  860. ret = 0;
  861. }
  862. else
  863. {
  864. LOG_D("%s(lwp=%p \"%s\",signo=%ld,code=%ld,value=%ld)",
  865. __func__, lwp, lwp->cmd, signo, code, value);
  866. need_schedule = RT_FALSE;
  867. LWP_LOCK(lwp);
  868. terminated = lwp->terminated;
  869. /* short-circuit code for inactive task, ignored signals */
  870. if (terminated)
  871. {
  872. /* no one rely on this, then free the resource */
  873. if (value)
  874. rt_free(value);
  875. ret = 0;
  876. }
  877. else
  878. {
  879. siginfo = siginfo_create(rt_thread_self(), signo, code, value);
  880. if (siginfo)
  881. {
  882. if (_is_jobctl_signal(lwp, signo))
  883. _before_sending_jobctl_signal(signo, lwp, siginfo);
  884. need_schedule = _siginfo_deliver_to_lwp(lwp, siginfo);
  885. lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
  886. ret = 0;
  887. }
  888. else
  889. {
  890. LOG_I("%s: siginfo malloc failed", __func__);
  891. ret = -RT_ENOMEM;
  892. }
  893. }
  894. LWP_UNLOCK(lwp);
  895. if (need_schedule)
  896. rt_schedule();
  897. }
  898. return ret;
  899. }
  900. static void _signal_action_flag_k2u(int signo, struct lwp_signal *signal, struct lwp_sigaction *act)
  901. {
  902. long flags = 0;
  903. if (_sigismember(&signal->sig_action_nodefer, signo))
  904. flags |= SA_NODEFER;
  905. if (_sigismember(&signal->sig_action_onstack, signo))
  906. flags |= SA_ONSTACK;
  907. if (_sigismember(&signal->sig_action_restart, signo))
  908. flags |= SA_RESTART;
  909. if (_sigismember(&signal->sig_action_siginfo, signo))
  910. flags |= SA_SIGINFO;
  911. if (_sigismember(&signal->sig_action_nocldstop, signo))
  912. flags |= SA_NOCLDSTOP;
  913. if (_sigismember(&signal->sig_action_nocldwait, signo))
  914. flags |= SA_NOCLDWAIT;
  915. act->sa_flags = flags;
  916. }
  917. static void _signal_action_flag_u2k(int signo, struct lwp_signal *signal, const struct lwp_sigaction *act)
  918. {
  919. long flags = act->sa_flags;
  920. if (flags & SA_NODEFER)
  921. _sigaddset(&signal->sig_action_nodefer, signo);
  922. if (flags & SA_ONSTACK)
  923. _sigaddset(&signal->sig_action_onstack, signo);
  924. if (flags & SA_RESTART)
  925. _sigaddset(&signal->sig_action_restart, signo);
  926. if (flags & SA_SIGINFO)
  927. _sigaddset(&signal->sig_action_siginfo, signo);
  928. if (signo == SIGCHLD)
  929. {
  930. /* These flags are meaningful only when establishing a handler for SIGCHLD */
  931. if (flags & SA_NOCLDSTOP)
  932. _sigaddset(&signal->sig_action_nocldstop, signo);
  933. if (flags & SA_NOCLDWAIT)
  934. _sigaddset(&signal->sig_action_nocldwait, signo);
  935. }
  936. #define _HANDLE_FLAGS (SA_RESTORER | SA_NODEFER | SA_ONSTACK | SA_RESTART | SA_SIGINFO | SA_NOCLDSTOP | SA_NOCLDWAIT)
  937. if (flags & ~_HANDLE_FLAGS)
  938. LOG_W("Unhandled flags: 0x%lx", flags & ~_HANDLE_FLAGS);
  939. }
  940. rt_bool_t lwp_sigisign(struct rt_lwp *lwp, int _sig)
  941. {
  942. unsigned long sig = _sig - 1;
  943. return lwp->signal.sig_action[sig] == LWP_SIG_ACT_IGN;
  944. }
  945. rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
  946. const struct lwp_sigaction *restrict act,
  947. struct lwp_sigaction *restrict oact)
  948. {
  949. lwp_sighandler_t prev_handler;
  950. lwp_sigqueue_t thread_sigq;
  951. rt_list_t *thread_list;
  952. rt_err_t ret = RT_EOK;
  953. if (lwp)
  954. {
  955. /** acquire READ access to lwp */
  956. LWP_LOCK(lwp);
  957. if (oact)
  958. {
  959. oact->sa_mask = lwp->signal.sig_action_mask[signo - 1];
  960. oact->__sa_handler._sa_handler = lwp->signal.sig_action[signo - 1];
  961. oact->sa_restorer = RT_NULL;
  962. _signal_action_flag_k2u(signo, &lwp->signal, oact);
  963. }
  964. if (act)
  965. {
  966. /**
  967. * Note: POSIX.1-2017 requires calls to sigaction() that supply a NULL act
  968. * argument succeed, even in the case of signals that cannot be caught or ignored
  969. */
  970. if (_sighandler_cannot_caught(lwp, signo))
  971. ret = -EINVAL;
  972. else
  973. {
  974. prev_handler = _get_sighandler_locked(lwp, signo);
  975. lwp->signal.sig_action_mask[signo - 1] = act->sa_mask;
  976. if (act->__sa_handler._sa_handler == SIG_IGN)
  977. {
  978. lwp_sigset_t no_ign_set = lwp_sigset_init(LWP_SIG_NO_IGN_SET);
  979. if (!lwp_sigismember(&no_ign_set, signo))
  980. {
  981. /* except those unignorable signals, discard them for proc */
  982. lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_IGN;
  983. }
  984. else
  985. {
  986. /* POSIX.1: SIG_IGN and SIG_DFL are equivalent for SIGCONT */
  987. lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_DFL;
  988. }
  989. }
  990. else
  991. {
  992. lwp->signal.sig_action[signo - 1] = act->__sa_handler._sa_handler;
  993. }
  994. _signal_action_flag_u2k(signo, &lwp->signal, act);
  995. /**
  996. * Brief: Discard the pending signal if signal action is set to SIG_IGN
  997. *
  998. * Note: POSIX.1-2017: Setting a signal action to SIG_IGN for a signal
  999. * that is pending shall cause the pending signal to be discarded,
  1000. * whether or not it is blocked.
  1001. */
  1002. if (prev_handler != LWP_SIG_ACT_IGN &&
  1003. _get_sighandler_locked(lwp, signo) == LWP_SIG_ACT_IGN)
  1004. {
  1005. sigqueue_discard(_SIGQ(lwp), signo);
  1006. for (thread_list = lwp->t_grp.next;
  1007. thread_list != &lwp->t_grp;
  1008. thread_list = thread_list->next)
  1009. {
  1010. thread_sigq = _SIGQ(rt_list_entry(thread_list, struct rt_thread, sibling));
  1011. sigqueue_discard(thread_sigq, signo);
  1012. }
  1013. }
  1014. }
  1015. }
  1016. LWP_UNLOCK(lwp);
  1017. }
  1018. else
  1019. ret = -EINVAL;
  1020. return ret;
  1021. }
  1022. rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, lwp_siginfo_ext_t value)
  1023. {
  1024. rt_err_t ret = -1;
  1025. struct rt_lwp *lwp;
  1026. lwp_siginfo_t siginfo;
  1027. rt_bool_t need_schedule;
  1028. /** must be able to be suspended */
  1029. RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
  1030. LOG_D("%s(signo=%d)", __func__, signo);
  1031. if (!thread || signo < 0 || signo >= _LWP_NSIG)
  1032. {
  1033. ret = -RT_EINVAL;
  1034. }
  1035. else if (signo == 0)
  1036. {
  1037. /* thread exist and current thread have privileges */
  1038. ret = 0;
  1039. }
  1040. else
  1041. {
  1042. lwp = thread->lwp;
  1043. need_schedule = RT_FALSE;
  1044. RT_ASSERT(lwp);
  1045. LWP_LOCK(lwp);
  1046. if (!lwp)
  1047. ret = -RT_EPERM;
  1048. else if (lwp->terminated || _sighandler_is_ignored(lwp, signo))
  1049. ret = 0;
  1050. else
  1051. {
  1052. siginfo = siginfo_create(rt_thread_self(), signo, code, value);
  1053. if (siginfo)
  1054. {
  1055. need_schedule = _siginfo_deliver_to_thread(thread, siginfo);
  1056. lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
  1057. ret = 0;
  1058. }
  1059. else
  1060. {
  1061. LOG_I("%s: siginfo malloc failed", __func__);
  1062. ret = -RT_ENOMEM;
  1063. }
  1064. }
  1065. LWP_UNLOCK(lwp);
  1066. if (need_schedule)
  1067. rt_schedule();
  1068. }
  1069. return ret;
  1070. }
  1071. #ifndef ARCH_MM_MMU
  1072. void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
  1073. {
  1074. rt_base_t level;
  1075. if (sig == 0 || sig > _LWP_NSIG)
  1076. return;
  1077. level = rt_hw_interrupt_disable();
  1078. rt_thread_self()->signal_handler[sig - 1] = func;
  1079. rt_hw_interrupt_enable(level);
  1080. }
  1081. #endif
  1082. rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
  1083. const lwp_sigset_t *sigset, lwp_sigset_t *oset)
  1084. {
  1085. rt_err_t ret = -1;
  1086. struct rt_lwp *lwp;
  1087. if (thread)
  1088. {
  1089. lwp = (struct rt_lwp *)thread->lwp;
  1090. LWP_LOCK(lwp);
  1091. if (!lwp)
  1092. {
  1093. ret = -RT_EPERM;
  1094. }
  1095. else
  1096. {
  1097. ret = 0;
  1098. _thread_signal_mask(thread, how, sigset, oset);
  1099. }
  1100. LWP_UNLOCK(lwp);
  1101. }
  1102. else
  1103. ret = -RT_EINVAL;
  1104. return ret;
  1105. }
  1106. static int _dequeue_signal(rt_thread_t thread, lwp_sigset_t *mask, siginfo_t *usi)
  1107. {
  1108. int signo;
  1109. lwp_siginfo_t si;
  1110. struct rt_lwp *lwp;
  1111. lwp_sigset_t *pending;
  1112. lwp_sigqueue_t sigqueue;
  1113. lwp = thread->lwp;
  1114. RT_ASSERT(lwp);
  1115. sigqueue = _SIGQ(thread);
  1116. pending = &sigqueue->sigset_pending;
  1117. signo = _next_signal(pending, mask);
  1118. if (!signo)
  1119. {
  1120. sigqueue = _SIGQ(lwp);
  1121. pending = &sigqueue->sigset_pending;
  1122. signo = _next_signal(pending, mask);
  1123. }
  1124. if (!signo)
  1125. return signo;
  1126. si = sigqueue_dequeue(sigqueue, signo);
  1127. RT_ASSERT(!!si);
  1128. siginfo_k2u(si, usi);
  1129. siginfo_delete(si);
  1130. return signo;
  1131. }
  1132. rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
  1133. siginfo_t *usi, struct timespec *timeout)
  1134. {
  1135. rt_err_t ret;
  1136. lwp_sigset_t saved_sigset;
  1137. lwp_sigset_t blocked_sigset;
  1138. lwp_sigset_t dontwait_sigset;
  1139. int sig;
  1140. struct rt_lwp *lwp = thread->lwp;
  1141. /**
  1142. * Brief: POSIX
  1143. * If one of the signals in set is already pending for the calling thread,
  1144. * sigwaitinfo() will return immediately
  1145. */
  1146. /* Create a mask of signals user dont want or cannot catch */
  1147. _sigdelset(sigset, SIGKILL);
  1148. _sigdelset(sigset, SIGSTOP);
  1149. _signotsets(&dontwait_sigset, sigset);
  1150. LWP_LOCK(lwp);
  1151. sig = _dequeue_signal(thread, &dontwait_sigset, usi);
  1152. LWP_UNLOCK(lwp);
  1153. if (sig)
  1154. return sig;
  1155. /**
  1156. * Brief: POSIX
  1157. * if none of the signals specified by set are pending, sigtimedwait() shall
  1158. * wait for the time interval specified in the timespec structure referenced
  1159. * by timeout.
  1160. *
  1161. * Note: If the pending signal arrives before thread suspend, the suspend
  1162. * operation will return a failure
  1163. */
  1164. _sigandsets(&blocked_sigset, &thread->signal.sigset_mask, &dontwait_sigset);
  1165. _thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &blocked_sigset, &saved_sigset);
  1166. if (timeout)
  1167. {
  1168. rt_tick_t time;
  1169. time = (timeout->tv_sec * RT_TICK_PER_SECOND) + ((timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND);
  1170. /**
  1171. * Brief: POSIX
  1172. * If the timespec structure pointed to by timeout is zero-valued and
  1173. * if none of the signals specified by set are pending, then
  1174. * sigtimedwait() shall return immediately with an error
  1175. */
  1176. if (time == 0)
  1177. return -EAGAIN;
  1178. rt_enter_critical();
  1179. ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
  1180. rt_timer_control(&(thread->thread_timer),
  1181. RT_TIMER_CTRL_SET_TIME,
  1182. &time);
  1183. rt_timer_start(&(thread->thread_timer));
  1184. rt_exit_critical();
  1185. }
  1186. else
  1187. {
  1188. /* suspend kernel forever until signal was received */
  1189. ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
  1190. }
  1191. if (ret == RT_EOK)
  1192. {
  1193. rt_schedule();
  1194. /* If thread->error reliable? */
  1195. if (thread->error == RT_EINTR)
  1196. ret = -EINTR;
  1197. else
  1198. ret = -EAGAIN;
  1199. }
  1200. /* else ret == -EINTR */
  1201. _thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &saved_sigset, RT_NULL);
  1202. LWP_LOCK(lwp);
  1203. sig = _dequeue_signal(thread, &dontwait_sigset, usi);
  1204. LWP_UNLOCK(lwp);
  1205. return sig ? sig : ret;
  1206. }
  1207. void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
  1208. {
  1209. struct rt_lwp *lwp;
  1210. lwp = thread->lwp;
  1211. if (lwp)
  1212. {
  1213. memset(pending, 0, sizeof(*pending));
  1214. LWP_LOCK(lwp);
  1215. sigqueue_examine(_SIGQ(thread), pending);
  1216. sigqueue_examine(_SIGQ(lwp), pending);
  1217. LWP_UNLOCK(lwp);
  1218. _sigandsets(pending, pending, &thread->signal.sigset_mask);
  1219. }
  1220. }
  1221. rt_err_t lwp_pgrp_signal_kill(rt_processgroup_t pgrp, long signo, long code,
  1222. lwp_siginfo_ext_t value)
  1223. {
  1224. struct rt_lwp *lwp;
  1225. rt_err_t rc = 0;
  1226. PGRP_ASSERT_LOCKED(pgrp);
  1227. if (pgrp)
  1228. {
  1229. rt_list_for_each_entry(lwp, &pgrp->process, pgrp_node)
  1230. {
  1231. lwp_signal_kill(lwp, signo, code, value);
  1232. }
  1233. }
  1234. return rc;
  1235. }
  1236. struct kill_all_param
  1237. {
  1238. long signo;
  1239. long code;
  1240. lwp_siginfo_ext_t value;
  1241. };
  1242. static int _kill_each(pid_t pid, void *data)
  1243. {
  1244. struct kill_all_param *param = data;
  1245. rt_lwp_t lwp;
  1246. rt_err_t error;
  1247. lwp = lwp_from_pid_locked(pid);
  1248. if (lwp && !lwp->sig_protected)
  1249. {
  1250. error = lwp_signal_kill(lwp, param->signo, param->code, param->value);
  1251. }
  1252. else
  1253. {
  1254. error = RT_EOK;
  1255. }
  1256. return error;
  1257. }
  1258. rt_err_t lwp_signal_kill_all(long signo, long code, lwp_siginfo_ext_t value)
  1259. {
  1260. struct kill_all_param buf =
  1261. {
  1262. .signo = signo,
  1263. .code = code,
  1264. .value = value,
  1265. };
  1266. return lwp_pid_for_each(_kill_each, &buf);
  1267. }