|
@@ -1,21 +1,115 @@
|
|
|
/*
|
|
|
- * Copyright (c) 2006-2020, RT-Thread Development Team
|
|
|
+ * Copyright (c) 2006-2023, RT-Thread Development Team
|
|
|
*
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
*
|
|
|
* Change Logs:
|
|
|
* Date Author Notes
|
|
|
* 2019-11-12 Jesven first version
|
|
|
+ * 2023-02-23 Shell Support sigtimedwait
|
|
|
+ * 2023-07-04 Shell Support siginfo, sigqueue
|
|
|
+ * remove lwp_signal_backup/restore() to reduce architecture codes
|
|
|
+ * update the generation, pending and delivery routines
|
|
|
*/
|
|
|
|
|
|
+#define DBG_TAG "LWP_SIGNAL"
|
|
|
+#define DBG_LVL DBG_INFO
|
|
|
+#include <rtdbg.h>
|
|
|
+
|
|
|
#include <rthw.h>
|
|
|
#include <rtthread.h>
|
|
|
+#include <string.h>
|
|
|
|
|
|
#include "lwp.h"
|
|
|
#include "lwp_arch.h"
|
|
|
+#include "lwp_signal.h"
|
|
|
#include "sys/signal.h"
|
|
|
+#include "syscall_generic.h"
|
|
|
+
|
|
|
+static lwp_siginfo_t siginfo_create(int signo, int code, int value)
|
|
|
+{
|
|
|
+ lwp_siginfo_t siginfo;
|
|
|
+ struct rt_lwp *self_lwp;
|
|
|
+ rt_thread_t self_thr;
|
|
|
+
|
|
|
+ siginfo = rt_malloc(sizeof(*siginfo));
|
|
|
+ if (siginfo)
|
|
|
+ {
|
|
|
+ siginfo->ksiginfo.signo = signo;
|
|
|
+ siginfo->ksiginfo.code = code;
|
|
|
+ siginfo->ksiginfo.value = value;
|
|
|
+
|
|
|
+ self_lwp = lwp_self();
|
|
|
+ self_thr = rt_thread_self();
|
|
|
+ if (self_lwp)
|
|
|
+ {
|
|
|
+ siginfo->ksiginfo.from_pid = self_lwp->pid;
|
|
|
+ siginfo->ksiginfo.from_tid = self_thr->tid;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ siginfo->ksiginfo.from_pid = 0;
|
|
|
+ siginfo->ksiginfo.from_tid = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return siginfo;
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline void siginfo_delete(lwp_siginfo_t siginfo)
|
|
|
+{
|
|
|
+ rt_free(siginfo);
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline void _sigorsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
|
|
+{
|
|
|
+ switch (_LWP_NSIG_WORDS)
|
|
|
+ {
|
|
|
+ case 4:
|
|
|
+ dset->sig[3] = set0->sig[3] | set1->sig[3];
|
|
|
+ dset->sig[2] = set0->sig[2] | set1->sig[2];
|
|
|
+ case 2:
|
|
|
+ dset->sig[1] = set0->sig[1] | set1->sig[1];
|
|
|
+ case 1:
|
|
|
+ dset->sig[0] = set0->sig[0] | set1->sig[0];
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline void _sigandsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
|
|
+{
|
|
|
+ switch (_LWP_NSIG_WORDS)
|
|
|
+ {
|
|
|
+ case 4:
|
|
|
+ dset->sig[3] = set0->sig[3] & set1->sig[3];
|
|
|
+ dset->sig[2] = set0->sig[2] & set1->sig[2];
|
|
|
+ case 2:
|
|
|
+ dset->sig[1] = set0->sig[1] & set1->sig[1];
|
|
|
+ case 1:
|
|
|
+ dset->sig[0] = set0->sig[0] & set1->sig[0];
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline void _signotsets(lwp_sigset_t *dset, const lwp_sigset_t *set)
|
|
|
+{
|
|
|
+ switch (_LWP_NSIG_WORDS)
|
|
|
+ {
|
|
|
+ case 4:
|
|
|
+ dset->sig[3] = ~set->sig[3];
|
|
|
+ dset->sig[2] = ~set->sig[2];
|
|
|
+ case 2:
|
|
|
+ dset->sig[1] = ~set->sig[1];
|
|
|
+ case 1:
|
|
|
+ dset->sig[0] = ~set->sig[0];
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
-rt_inline void lwp_sigaddset(lwp_sigset_t *set, int _sig)
|
|
|
+rt_inline void _sigaddset(lwp_sigset_t *set, int _sig)
|
|
|
{
|
|
|
unsigned long sig = _sig - 1;
|
|
|
|
|
@@ -29,7 +123,7 @@ rt_inline void lwp_sigaddset(lwp_sigset_t *set, int _sig)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-rt_inline void lwp_sigdelset(lwp_sigset_t *set, int _sig)
|
|
|
+rt_inline void _sigdelset(lwp_sigset_t *set, int _sig)
|
|
|
{
|
|
|
unsigned long sig = _sig - 1;
|
|
|
|
|
@@ -43,7 +137,7 @@ rt_inline void lwp_sigdelset(lwp_sigset_t *set, int _sig)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-rt_inline int lwp_sigisemptyset(lwp_sigset_t *set)
|
|
|
+rt_inline int _sigisemptyset(lwp_sigset_t *set)
|
|
|
{
|
|
|
switch (_LWP_NSIG_WORDS)
|
|
|
{
|
|
@@ -59,7 +153,7 @@ rt_inline int lwp_sigisemptyset(lwp_sigset_t *set)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-rt_inline int lwp_sigismember(lwp_sigset_t *set, int _sig)
|
|
|
+rt_inline int _sigismember(lwp_sigset_t *set, int _sig)
|
|
|
{
|
|
|
unsigned long sig = _sig - 1;
|
|
|
|
|
@@ -73,7 +167,7 @@ rt_inline int lwp_sigismember(lwp_sigset_t *set, int _sig)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
|
|
|
+rt_inline int _next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
|
|
|
{
|
|
|
unsigned long i, *s, *m, x;
|
|
|
int sig = 0;
|
|
@@ -116,7 +210,218 @@ rt_inline int next_signal(lwp_sigset_t *pending, lwp_sigset_t *mask)
|
|
|
return sig;
|
|
|
}
|
|
|
|
|
|
-int lwp_suspend_sigcheck(rt_thread_t thread, int suspend_flag)
|
|
|
+#define _SIGQ(tp) (&(tp)->signal.sig_queue)
|
|
|
+
|
|
|
+rt_inline int sigqueue_isempty(lwp_sigqueue_t sigqueue)
|
|
|
+{
|
|
|
+ return _sigisemptyset(&sigqueue->sigset_pending);
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline int sigqueue_ismember(lwp_sigqueue_t sigqueue, int signo)
|
|
|
+{
|
|
|
+ return _sigismember(&sigqueue->sigset_pending, signo);
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline int sigqueue_peek(lwp_sigqueue_t sigqueue, lwp_sigset_t *mask)
|
|
|
+{
|
|
|
+ return _next_signal(&sigqueue->sigset_pending, mask);
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline int sigqueue_examine(lwp_sigqueue_t sigqueue, lwp_sigset_t *pending)
|
|
|
+{
|
|
|
+ lwp_sigset_t not_mask;
|
|
|
+ int is_empty = sigqueue_isempty(sigqueue);
|
|
|
+ if (!is_empty)
|
|
|
+ {
|
|
|
+ _sigorsets(pending, &sigqueue->sigset_pending, ¬_mask);
|
|
|
+ }
|
|
|
+ return is_empty;
|
|
|
+}
|
|
|
+
|
|
|
+static void sigqueue_enqueue(lwp_sigqueue_t sigqueue, lwp_siginfo_t siginfo)
|
|
|
+{
|
|
|
+ lwp_siginfo_t idx;
|
|
|
+ rt_bool_t inserted = RT_FALSE;
|
|
|
+ rt_list_for_each_entry(idx, &sigqueue->siginfo_list, node)
|
|
|
+ {
|
|
|
+ if (idx->ksiginfo.signo >= siginfo->ksiginfo.signo)
|
|
|
+ {
|
|
|
+ rt_list_insert_after(&idx->node, &siginfo->node);
|
|
|
+ inserted = RT_TRUE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!inserted)
|
|
|
+ rt_list_insert_before(&sigqueue->siginfo_list, &siginfo->node);
|
|
|
+
|
|
|
+ _sigaddset(&sigqueue->sigset_pending, siginfo->ksiginfo.signo);
|
|
|
+ return ;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dequeue a siginfo matching the signo which is likely to be existed, and
|
|
|
+ * test if any other siblings remains
|
|
|
+ */
|
|
|
+static lwp_siginfo_t sigqueue_dequeue(lwp_sigqueue_t sigqueue, int signo)
|
|
|
+{
|
|
|
+ lwp_siginfo_t found;
|
|
|
+ lwp_siginfo_t candidate;
|
|
|
+ lwp_siginfo_t next;
|
|
|
+ rt_bool_t is_empty;
|
|
|
+
|
|
|
+ found = RT_NULL;
|
|
|
+ is_empty = RT_TRUE;
|
|
|
+ rt_list_for_each_entry_safe(candidate, next, &sigqueue->siginfo_list, node)
|
|
|
+ {
|
|
|
+ if (candidate->ksiginfo.signo == signo)
|
|
|
+ {
|
|
|
+ if (found)
|
|
|
+ {
|
|
|
+ /* already found */
|
|
|
+ is_empty = RT_FALSE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ /* found first */
|
|
|
+ found = candidate;
|
|
|
+ rt_list_remove(&found->node);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else if (candidate->ksiginfo.signo > signo)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (found && is_empty)
|
|
|
+ _sigdelset(&sigqueue->sigset_pending, signo);
|
|
|
+
|
|
|
+ return found;
|
|
|
+}
|
|
|
+
|
|
|
+static void sigqueue_discard(lwp_sigqueue_t sigqueue, int signo)
|
|
|
+{
|
|
|
+ lwp_siginfo_t queuing_si;
|
|
|
+ while (!sigqueue_isempty(sigqueue))
|
|
|
+ {
|
|
|
+ queuing_si = sigqueue_dequeue(sigqueue, signo);
|
|
|
+ siginfo_delete(queuing_si);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/* assuming that (void *) is compatible to long at length */
|
|
|
+RT_CTASSERT(lp_width_same, sizeof(void *) == sizeof(long));
|
|
|
+
|
|
|
+/** translate lwp siginfo to user siginfo_t */
|
|
|
+rt_inline void siginfo_k2u(lwp_siginfo_t ksigi, siginfo_t *usigi)
|
|
|
+{
|
|
|
+ usigi->si_code = ksigi->ksiginfo.code;
|
|
|
+ usigi->si_signo = ksigi->ksiginfo.signo;
|
|
|
+ usigi->si_value.sival_ptr = (void *)ksigi->ksiginfo.value;
|
|
|
+ usigi->si_pid = ksigi->ksiginfo.from_pid;
|
|
|
+
|
|
|
+ /* deprecated field */
|
|
|
+ usigi->si_errno = 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* must called in locked context */
|
|
|
+rt_inline lwp_sighandler_t _get_sighandler_locked(struct rt_lwp *lwp, int signo)
|
|
|
+{
|
|
|
+ return lwp->signal.sig_action[signo - 1];
|
|
|
+}
|
|
|
+
|
|
|
+static lwp_sigset_t *_mask_block_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
|
|
|
+{
|
|
|
+ _sigorsets(new_set, &thread->signal.sigset_mask, sigset);
|
|
|
+ return new_set;
|
|
|
+}
|
|
|
+
|
|
|
+static lwp_sigset_t *_mask_unblock_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
|
|
|
+{
|
|
|
+ lwp_sigset_t complement;
|
|
|
+ _signotsets(&complement, sigset);
|
|
|
+ _sigandsets(new_set, &thread->signal.sigset_mask, &complement);
|
|
|
+ return new_set;
|
|
|
+}
|
|
|
+
|
|
|
+static lwp_sigset_t *_mask_set_fn(rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set)
|
|
|
+{
|
|
|
+ memcpy(new_set, sigset, sizeof(*sigset));
|
|
|
+ return new_set;
|
|
|
+}
|
|
|
+
|
|
|
+static lwp_sigset_t *(*_sig_mask_fn[__LWP_SIG_MASK_CMD_WATERMARK])
|
|
|
+ (rt_thread_t thread, const lwp_sigset_t *sigset, lwp_sigset_t *new_set) = {
|
|
|
+ [LWP_SIG_MASK_CMD_BLOCK] = _mask_block_fn,
|
|
|
+ [LWP_SIG_MASK_CMD_UNBLOCK] = _mask_unblock_fn,
|
|
|
+ [LWP_SIG_MASK_CMD_SET_MASK] = _mask_set_fn,
|
|
|
+ };
|
|
|
+
|
|
|
+static void _thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
|
|
|
+ const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
|
|
+{
|
|
|
+ lwp_sigset_t new_set;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @note POSIX wants this API to be capable to query the current mask
|
|
|
+ * by passing NULL in `sigset`
|
|
|
+ */
|
|
|
+ if (oset)
|
|
|
+ memcpy(oset, &thread->signal.sigset_mask, sizeof(lwp_sigset_t));
|
|
|
+
|
|
|
+ if (sigset)
|
|
|
+ {
|
|
|
+ _sig_mask_fn[how](thread, sigset, &new_set);
|
|
|
+
|
|
|
+ /* remove un-maskable signal from set */
|
|
|
+ _sigdelset(&new_set, SIGKILL);
|
|
|
+ _sigdelset(&new_set, SIGSTOP);
|
|
|
+
|
|
|
+ memcpy(&thread->signal.sigset_mask, &new_set, sizeof(lwp_sigset_t));
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void lwp_sigqueue_clear(lwp_sigqueue_t sigq)
|
|
|
+{
|
|
|
+ lwp_siginfo_t this, next;
|
|
|
+ if (!sigqueue_isempty(sigq))
|
|
|
+ {
|
|
|
+ rt_list_for_each_entry_safe(this, next, &sigq->siginfo_list, node)
|
|
|
+ {
|
|
|
+ siginfo_delete(this);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+rt_err_t lwp_signal_init(struct lwp_signal *sig)
|
|
|
+{
|
|
|
+ rt_err_t rc;
|
|
|
+ rc = rt_mutex_init(&sig->sig_lock, "lwpsig", RT_IPC_FLAG_FIFO);
|
|
|
+ if (rc == RT_EOK)
|
|
|
+ {
|
|
|
+ memset(&sig->sig_dispatch_thr, 0, sizeof(sig->sig_dispatch_thr));
|
|
|
+
|
|
|
+ memset(&sig->sig_action, 0, sizeof(sig->sig_action));
|
|
|
+ memset(&sig->sig_action_nodefer, 0, sizeof(sig->sig_action_nodefer));
|
|
|
+ memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
|
|
|
+ memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
|
|
|
+ memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
|
|
|
+ lwp_sigqueue_init(&sig->sig_queue);
|
|
|
+ }
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+rt_err_t lwp_signal_detach(struct lwp_signal *signal)
|
|
|
+{
|
|
|
+ rt_err_t ret;
|
|
|
+
|
|
|
+ lwp_sigqueue_clear(&signal->sig_queue);
|
|
|
+ ret = rt_mutex_detach(&signal->sig_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
|
|
|
{
|
|
|
struct rt_lwp *lwp = (struct rt_lwp*)thread->lwp;
|
|
|
int ret = 0;
|
|
@@ -124,22 +429,22 @@ int lwp_suspend_sigcheck(rt_thread_t thread, int suspend_flag)
|
|
|
switch (suspend_flag)
|
|
|
{
|
|
|
case RT_INTERRUPTIBLE:
|
|
|
- if (!lwp_sigisemptyset(&thread->signal))
|
|
|
+ if (!sigqueue_isempty(_SIGQ(thread)))
|
|
|
{
|
|
|
break;
|
|
|
}
|
|
|
- if (thread->lwp && !lwp_sigisemptyset(&lwp->signal))
|
|
|
+ if (thread->lwp && !sigqueue_isempty(_SIGQ(lwp)))
|
|
|
{
|
|
|
break;
|
|
|
}
|
|
|
ret = 1;
|
|
|
break;
|
|
|
case RT_KILLABLE:
|
|
|
- if (lwp_sigismember(&thread->signal, SIGKILL))
|
|
|
+ if (sigqueue_ismember(_SIGQ(thread), SIGKILL))
|
|
|
{
|
|
|
break;
|
|
|
}
|
|
|
- if (thread->lwp && lwp_sigismember(&lwp->signal, SIGKILL))
|
|
|
+ if (thread->lwp && sigqueue_ismember(_SIGQ(lwp), SIGKILL))
|
|
|
{
|
|
|
break;
|
|
|
}
|
|
@@ -155,452 +460,573 @@ int lwp_suspend_sigcheck(rt_thread_t thread, int suspend_flag)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int lwp_signal_check(void)
|
|
|
+void lwp_thread_signal_catch(void *exp_frame)
|
|
|
{
|
|
|
rt_base_t level;
|
|
|
+ int signo;
|
|
|
struct rt_thread *thread;
|
|
|
struct rt_lwp *lwp;
|
|
|
- uint32_t have_signal = 0;
|
|
|
-
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
+ lwp_siginfo_t siginfo;
|
|
|
+ lwp_sigqueue_t pending;
|
|
|
+ lwp_sigset_t *sig_mask;
|
|
|
+ lwp_sigset_t save_sig_mask;
|
|
|
+ lwp_sigset_t new_sig_mask;
|
|
|
+ lwp_sighandler_t handler;
|
|
|
+ siginfo_t usiginfo;
|
|
|
+ siginfo_t *p_usi;
|
|
|
|
|
|
thread = rt_thread_self();
|
|
|
-
|
|
|
- if (thread->signal_in_process)
|
|
|
- {
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
lwp = (struct rt_lwp*)thread->lwp;
|
|
|
|
|
|
- if (lwp->signal_in_process)
|
|
|
+ RT_ASSERT(!!lwp);
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+
|
|
|
+ /* check if signal exist */
|
|
|
+ if (!sigqueue_isempty(_SIGQ(thread)))
|
|
|
{
|
|
|
- goto out;
|
|
|
+ pending = _SIGQ(thread);
|
|
|
+ sig_mask = &thread->signal.sigset_mask;
|
|
|
}
|
|
|
-
|
|
|
- have_signal = !lwp_sigisemptyset(&thread->signal);
|
|
|
- if (have_signal)
|
|
|
+ else if (!sigqueue_isempty(_SIGQ(lwp)))
|
|
|
{
|
|
|
- thread->signal_in_process = 1;
|
|
|
- goto out;
|
|
|
+ pending = _SIGQ(lwp);
|
|
|
+ sig_mask = &thread->signal.sigset_mask;
|
|
|
}
|
|
|
- have_signal = !lwp_sigisemptyset(&lwp->signal);
|
|
|
- if (have_signal)
|
|
|
+ else
|
|
|
{
|
|
|
- lwp->signal_in_process = 1;
|
|
|
+ pending = RT_NULL;
|
|
|
}
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
- return have_signal;
|
|
|
-}
|
|
|
-
|
|
|
-int lwp_signal_backup(void *user_sp, void *user_pc, void* user_flag)
|
|
|
-{
|
|
|
- rt_base_t level;
|
|
|
- struct rt_thread *thread;
|
|
|
- struct rt_lwp *lwp;
|
|
|
- int signal;
|
|
|
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- thread = rt_thread_self();
|
|
|
- if (thread->signal_in_process)
|
|
|
+ if (pending)
|
|
|
{
|
|
|
- thread->user_ctx.sp = user_sp;
|
|
|
- thread->user_ctx.pc = user_pc;
|
|
|
- thread->user_ctx.flag = user_flag;
|
|
|
+ /* peek the pending signal */
|
|
|
+ signo = sigqueue_peek(pending, sig_mask);
|
|
|
+ if (signo)
|
|
|
+ {
|
|
|
+ siginfo = sigqueue_dequeue(pending, signo);
|
|
|
+ RT_ASSERT(siginfo != RT_NULL);
|
|
|
+ handler = _get_sighandler_locked(lwp, signo);
|
|
|
|
|
|
- signal = next_signal(&thread->signal, &thread->signal_mask);
|
|
|
- RT_ASSERT(signal != 0);
|
|
|
- lwp_sigaddset(&thread->signal_mask, signal);
|
|
|
- thread->signal_mask_bak = signal;
|
|
|
- lwp_sigdelset(&thread->signal, signal);
|
|
|
- }
|
|
|
- else
|
|
|
- {
|
|
|
- lwp = (struct rt_lwp*)thread->lwp;
|
|
|
- lwp->user_ctx.sp = user_sp;
|
|
|
- lwp->user_ctx.pc = user_pc;
|
|
|
- lwp->user_ctx.flag = user_flag;
|
|
|
+ /* IGN signal will never be queued */
|
|
|
+ RT_ASSERT(handler != LWP_SIG_ACT_IGN);
|
|
|
|
|
|
- signal = next_signal(&lwp->signal, &lwp->signal_mask);
|
|
|
- RT_ASSERT(signal != 0);
|
|
|
- lwp_sigaddset(&lwp->signal_mask, signal);
|
|
|
- lwp->signal_mask_bak = signal;
|
|
|
- lwp_sigdelset(&lwp->signal, signal);
|
|
|
- }
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
- return signal;
|
|
|
-}
|
|
|
+ /* copy the blocked signal mask from the registered signal action */
|
|
|
+ memcpy(&new_sig_mask, &lwp->signal.sig_action_mask[signo - 1], sizeof(new_sig_mask));
|
|
|
|
|
|
-struct rt_user_context *lwp_signal_restore(void)
|
|
|
-{
|
|
|
- rt_base_t level;
|
|
|
- struct rt_thread *thread;
|
|
|
- struct rt_lwp *lwp;
|
|
|
- struct rt_user_context *ctx;
|
|
|
+ if (!_sigismember(&lwp->signal.sig_action_nodefer, signo))
|
|
|
+ _sigaddset(&new_sig_mask, signo);
|
|
|
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- thread = rt_thread_self();
|
|
|
- if (thread->signal_in_process)
|
|
|
- {
|
|
|
- ctx = &thread->user_ctx;
|
|
|
- thread->signal_in_process = 0;
|
|
|
+ _thread_signal_mask(thread, LWP_SIG_MASK_CMD_BLOCK, &new_sig_mask, &save_sig_mask);
|
|
|
|
|
|
- lwp_sigdelset(&thread->signal_mask, thread->signal_mask_bak);
|
|
|
- thread->signal_mask_bak = 0;
|
|
|
+ /* siginfo is need for signal action */
|
|
|
+ if (_sigismember(&lwp->signal.sig_action_siginfo, signo))
|
|
|
+ {
|
|
|
+ siginfo_k2u(siginfo, &usiginfo);
|
|
|
+ p_usi = &usiginfo;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ p_usi = RT_NULL;
|
|
|
+ }
|
|
|
}
|
|
|
- else
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+
|
|
|
+ if (pending && signo)
|
|
|
{
|
|
|
- lwp = (struct rt_lwp*)thread->lwp;
|
|
|
- ctx = &lwp->user_ctx;
|
|
|
- RT_ASSERT(lwp->signal_in_process != 0);
|
|
|
- lwp->signal_in_process = 0;
|
|
|
+ siginfo_delete(siginfo);
|
|
|
|
|
|
- lwp_sigdelset(&lwp->signal_mask, lwp->signal_mask_bak);
|
|
|
- lwp->signal_mask_bak = 0;
|
|
|
+ /* signal default handler */
|
|
|
+ if (handler == LWP_SIG_ACT_DFL)
|
|
|
+ {
|
|
|
+ LOG_D("%s: default handler; and exit", __func__);
|
|
|
+ sys_exit(0);
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * enter signal action of user
|
|
|
+ * @note that the p_usi is release before entering signal action by
|
|
|
+ * reseting the kernel sp.
|
|
|
+ */
|
|
|
+ arch_thread_signal_enter(signo, p_usi, exp_frame, handler, &save_sig_mask);
|
|
|
+ /* the arch_thread_signal_enter() never return */
|
|
|
+ RT_ASSERT(0);
|
|
|
}
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
- return ctx;
|
|
|
}
|
|
|
|
|
|
-rt_inline int _lwp_check_ignore(int sig)
|
|
|
+static int _do_signal_wakeup(rt_thread_t thread, int sig)
|
|
|
{
|
|
|
- if (sig == SIGCHLD || sig == SIGCONT)
|
|
|
+ int need_schedule;
|
|
|
+ if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
|
|
{
|
|
|
- return 1;
|
|
|
+
|
|
|
+ if ((thread->stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
|
|
|
+ {
|
|
|
+ rt_thread_wakeup(thread);
|
|
|
+ need_schedule = 1;
|
|
|
+ }
|
|
|
+ else if ((sig == SIGKILL) && ((thread->stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
|
|
|
+ {
|
|
|
+ rt_thread_wakeup(thread);
|
|
|
+ need_schedule = 1;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ need_schedule = 0;
|
|
|
+ }
|
|
|
}
|
|
|
- return 0;
|
|
|
+ else
|
|
|
+ need_schedule = 0;
|
|
|
+
|
|
|
+ return need_schedule;
|
|
|
}
|
|
|
|
|
|
-void sys_exit(int value);
|
|
|
-lwp_sighandler_t lwp_sighandler_get(int sig)
|
|
|
+/** find a candidate to be notified of the arrival */
|
|
|
+static rt_thread_t _signal_find_catcher(struct rt_lwp *lwp, int signo)
|
|
|
{
|
|
|
- lwp_sighandler_t func = RT_NULL;
|
|
|
- struct rt_lwp *lwp;
|
|
|
- rt_thread_t thread;
|
|
|
- rt_base_t level;
|
|
|
+ rt_thread_t catcher = RT_NULL;
|
|
|
+ rt_thread_t candidate;
|
|
|
|
|
|
- if (sig == 0 || sig > _LWP_NSIG)
|
|
|
+ candidate = lwp->signal.sig_dispatch_thr[signo - 1];
|
|
|
+ if (candidate != RT_NULL && !_sigismember(&candidate->signal.sigset_mask, signo))
|
|
|
{
|
|
|
- return func;
|
|
|
+ catcher = candidate;
|
|
|
}
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- thread = rt_thread_self();
|
|
|
-#ifndef ARCH_MM_MMU
|
|
|
- if (thread->signal_in_process)
|
|
|
+ else
|
|
|
{
|
|
|
- func = thread->signal_handler[sig - 1];
|
|
|
- goto out;
|
|
|
- }
|
|
|
-#endif
|
|
|
- lwp = (struct rt_lwp*)thread->lwp;
|
|
|
+ candidate = rt_thread_self();
|
|
|
|
|
|
- func = lwp->signal_handler[sig - 1];
|
|
|
- if (!func)
|
|
|
- {
|
|
|
- if (_lwp_check_ignore(sig))
|
|
|
+ /** @note: lwp of current is a const value that can be safely read */
|
|
|
+ if (candidate->lwp == lwp &&
|
|
|
+ !_sigismember(&candidate->signal.sigset_mask, signo))
|
|
|
{
|
|
|
- goto out;
|
|
|
+ catcher = candidate;
|
|
|
}
|
|
|
- if (lwp->signal_in_process)
|
|
|
+ else
|
|
|
{
|
|
|
- lwp_terminate(lwp);
|
|
|
+ rt_list_for_each_entry(candidate, &lwp->t_grp, sibling)
|
|
|
+ {
|
|
|
+ if (!_sigismember(&candidate->signal.sigset_mask, signo))
|
|
|
+ {
|
|
|
+ catcher = candidate;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* fall back to main thread */
|
|
|
+ if (catcher == RT_NULL)
|
|
|
+ catcher = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
|
|
}
|
|
|
- sys_exit(0);
|
|
|
- }
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
|
|
|
- if (func == (lwp_sighandler_t)SIG_IGN)
|
|
|
- {
|
|
|
- func = RT_NULL;
|
|
|
+ /* reset the cache thread to catcher (even if catcher is main thread) */
|
|
|
+ lwp->signal.sig_dispatch_thr[signo - 1] = catcher;
|
|
|
}
|
|
|
- return func;
|
|
|
+
|
|
|
+ return catcher;
|
|
|
}
|
|
|
|
|
|
-void lwp_sighandler_set(int sig, lwp_sighandler_t func)
|
|
|
+static int _siginfo_deliver_to_lwp(struct rt_lwp *lwp, lwp_siginfo_t siginfo)
|
|
|
{
|
|
|
- rt_base_t level;
|
|
|
+ rt_thread_t catcher;
|
|
|
|
|
|
- if (sig == 0 || sig > _LWP_NSIG)
|
|
|
- return;
|
|
|
- if (sig == SIGKILL || sig == SIGSTOP)
|
|
|
- return;
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- ((struct rt_lwp*)rt_thread_self()->lwp)->signal_handler[sig - 1] = func;
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
+ catcher = _signal_find_catcher(lwp, siginfo->ksiginfo.signo);
|
|
|
+
|
|
|
+ sigqueue_enqueue(&lwp->signal.sig_queue, siginfo);
|
|
|
+ return _do_signal_wakeup(catcher, siginfo->ksiginfo.signo);
|
|
|
}
|
|
|
|
|
|
-#ifndef ARCH_MM_MMU
|
|
|
-void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
|
|
|
+static int _siginfo_deliver_to_thread(struct rt_lwp *lwp, rt_thread_t thread, lwp_siginfo_t siginfo)
|
|
|
{
|
|
|
- rt_base_t level;
|
|
|
+ sigqueue_enqueue(_SIGQ(thread), siginfo);
|
|
|
+ return _do_signal_wakeup(thread, siginfo->ksiginfo.signo);
|
|
|
+}
|
|
|
|
|
|
- if (sig == 0 || sig > _LWP_NSIG)
|
|
|
- return;
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- rt_thread_self()->signal_handler[sig - 1] = func;
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
+rt_inline rt_bool_t _sighandler_is_ignored(struct rt_lwp *lwp, int signo)
|
|
|
+{
|
|
|
+ rt_bool_t is_ignored;
|
|
|
+ lwp_sighandler_t action;
|
|
|
+ lwp_sigset_t ign_set = lwp_sigset_init(LWP_SIG_IGNORE_SET);
|
|
|
+
|
|
|
+ action = _get_sighandler_locked(lwp, signo);
|
|
|
+
|
|
|
+ if (action == LWP_SIG_ACT_IGN)
|
|
|
+ is_ignored = RT_TRUE;
|
|
|
+ else if (action == LWP_SIG_ACT_DFL && _sigismember(&ign_set, signo))
|
|
|
+ is_ignored = RT_TRUE;
|
|
|
+ else
|
|
|
+ is_ignored = RT_FALSE;
|
|
|
+
|
|
|
+ return is_ignored;
|
|
|
+}
|
|
|
+
|
|
|
+rt_inline rt_bool_t _sighandler_cannot_caught(struct rt_lwp *lwp, int signo)
|
|
|
+{
|
|
|
+ return signo == SIGKILL || signo == SIGSTOP;
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
-int lwp_sigaction(int sig, const struct lwp_sigaction *act,
|
|
|
- struct lwp_sigaction *oact, size_t sigsetsize)
|
|
|
+rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
|
|
|
{
|
|
|
+ rt_err_t ret = -1;
|
|
|
rt_base_t level;
|
|
|
- struct rt_lwp *lwp;
|
|
|
- int ret = -RT_EINVAL;
|
|
|
- lwp_sigset_t newset;
|
|
|
+ lwp_siginfo_t siginfo;
|
|
|
+ rt_bool_t terminated;
|
|
|
+ rt_bool_t need_schedule;
|
|
|
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- lwp = (struct rt_lwp*)rt_thread_self()->lwp;
|
|
|
- if (!lwp)
|
|
|
- {
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (sigsetsize != sizeof(lwp_sigset_t))
|
|
|
- {
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (!act && !oact)
|
|
|
- {
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (oact)
|
|
|
+ /** must be able to be suspended */
|
|
|
+ RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
|
|
|
+
|
|
|
+ if (!lwp || signo < 0 || signo >= _LWP_NSIG)
|
|
|
{
|
|
|
- oact->sa_flags = lwp->sa_flags;
|
|
|
- oact->sa_mask = lwp->signal_mask;
|
|
|
- oact->sa_restorer = RT_NULL;
|
|
|
- oact->__sa_handler._sa_handler = lwp->signal_handler[sig - 1];
|
|
|
+ ret = -RT_EINVAL;
|
|
|
}
|
|
|
- if (act)
|
|
|
+ else
|
|
|
{
|
|
|
- lwp->sa_flags = act->sa_flags;
|
|
|
- newset = act->sa_mask;
|
|
|
- lwp_sigdelset(&newset, SIGKILL);
|
|
|
- lwp_sigdelset(&newset, SIGSTOP);
|
|
|
- lwp->signal_mask = newset;
|
|
|
- lwp_sighandler_set(sig, act->__sa_handler._sa_handler);
|
|
|
+ need_schedule = RT_FALSE;
|
|
|
+
|
|
|
+ /* FIXME: acquire READ lock to lwp */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ terminated = lwp->terminated;
|
|
|
+
|
|
|
+ /* short-circuit code for inactive task, ignored signals */
|
|
|
+ if (terminated || _sighandler_is_ignored(lwp, signo))
|
|
|
+ {
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ siginfo = siginfo_create(signo, code, value);
|
|
|
+
|
|
|
+ if (siginfo)
|
|
|
+ {
|
|
|
+ need_schedule = _siginfo_deliver_to_lwp(lwp, siginfo);
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ LOG_I("%s: siginfo malloc failed", __func__);
|
|
|
+ ret = -RT_ENOMEM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+
|
|
|
+ if (need_schedule)
|
|
|
+ rt_schedule();
|
|
|
}
|
|
|
- ret = 0;
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-rt_inline void sigorsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
|
|
+static void _signal_action_flag_k2u(int signo, struct lwp_signal *signal, struct lwp_sigaction *act)
|
|
|
{
|
|
|
- switch (_LWP_NSIG_WORDS)
|
|
|
- {
|
|
|
- case 4:
|
|
|
- dset->sig[3] = set0->sig[3] | set1->sig[3];
|
|
|
- dset->sig[2] = set0->sig[2] | set1->sig[2];
|
|
|
- case 2:
|
|
|
- dset->sig[1] = set0->sig[1] | set1->sig[1];
|
|
|
- case 1:
|
|
|
- dset->sig[0] = set0->sig[0] | set1->sig[0];
|
|
|
- default:
|
|
|
- return;
|
|
|
- }
|
|
|
+ long flags = 0;
|
|
|
+ if (_sigismember(&signal->sig_action_nodefer, signo))
|
|
|
+ flags |= SA_NODEFER;
|
|
|
+ if (_sigismember(&signal->sig_action_onstack, signo))
|
|
|
+ flags |= SA_ONSTACK;
|
|
|
+ if (_sigismember(&signal->sig_action_restart, signo))
|
|
|
+ flags |= SA_RESTART;
|
|
|
+ if (_sigismember(&signal->sig_action_siginfo, signo))
|
|
|
+ flags |= SA_SIGINFO;
|
|
|
+
|
|
|
+ act->sa_flags = flags;
|
|
|
}
|
|
|
|
|
|
-rt_inline void sigandsets(lwp_sigset_t *dset, const lwp_sigset_t *set0, const lwp_sigset_t *set1)
|
|
|
+static void _signal_action_flag_u2k(int signo, struct lwp_signal *signal, const struct lwp_sigaction *act)
|
|
|
{
|
|
|
- switch (_LWP_NSIG_WORDS)
|
|
|
- {
|
|
|
- case 4:
|
|
|
- dset->sig[3] = set0->sig[3] & set1->sig[3];
|
|
|
- dset->sig[2] = set0->sig[2] & set1->sig[2];
|
|
|
- case 2:
|
|
|
- dset->sig[1] = set0->sig[1] & set1->sig[1];
|
|
|
- case 1:
|
|
|
- dset->sig[0] = set0->sig[0] & set1->sig[0];
|
|
|
- default:
|
|
|
- return;
|
|
|
- }
|
|
|
+ long flags = act->sa_flags;
|
|
|
+ if (flags & SA_NODEFER)
|
|
|
+ _sigaddset(&signal->sig_action_nodefer, signo);
|
|
|
+ if (flags & SA_ONSTACK)
|
|
|
+ _sigaddset(&signal->sig_action_onstack, signo);
|
|
|
+ if (flags & SA_RESTART)
|
|
|
+ _sigaddset(&signal->sig_action_restart, signo);
|
|
|
+ if (flags & SA_SIGINFO)
|
|
|
+ _sigaddset(&signal->sig_action_siginfo, signo);
|
|
|
}
|
|
|
|
|
|
-int lwp_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
|
|
+rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
|
|
|
+ const struct lwp_sigaction *restrict act,
|
|
|
+ struct lwp_sigaction *restrict oact)
|
|
|
{
|
|
|
- int ret = -1;
|
|
|
+ lwp_sighandler_t prev_handler;
|
|
|
+ lwp_sigqueue_t thread_sigq;
|
|
|
+ rt_list_t *thread_list;
|
|
|
+ rt_err_t ret = RT_EOK;
|
|
|
rt_base_t level;
|
|
|
- struct rt_lwp *lwp;
|
|
|
- struct rt_thread *thread;
|
|
|
- lwp_sigset_t newset;
|
|
|
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
-
|
|
|
- thread = rt_thread_self();
|
|
|
- lwp = (struct rt_lwp*)thread->lwp;
|
|
|
- if (!lwp)
|
|
|
- {
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (oset)
|
|
|
+ if (lwp)
|
|
|
{
|
|
|
- rt_memcpy(oset, &lwp->signal_mask, sizeof(lwp_sigset_t));
|
|
|
- }
|
|
|
+ /** acquire READ access to lwp */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
|
|
|
- if (sigset)
|
|
|
- {
|
|
|
- switch (how)
|
|
|
+ if (oact)
|
|
|
{
|
|
|
- case SIG_BLOCK:
|
|
|
- sigorsets(&newset, &lwp->signal_mask, sigset);
|
|
|
- break;
|
|
|
- case SIG_UNBLOCK:
|
|
|
- sigandsets(&newset, &lwp->signal_mask, sigset);
|
|
|
- break;
|
|
|
- case SIG_SETMASK:
|
|
|
- newset = *sigset;
|
|
|
- break;
|
|
|
- default:
|
|
|
+ oact->sa_mask = lwp->signal.sig_action_mask[signo - 1];
|
|
|
+ oact->__sa_handler._sa_handler = lwp->signal.sig_action[signo - 1];
|
|
|
+ oact->sa_restorer = RT_NULL;
|
|
|
+ _signal_action_flag_k2u(signo, &lwp->signal, oact);
|
|
|
+ }
|
|
|
+ if (act)
|
|
|
+ {
|
|
|
+ /**
|
|
|
+ * @note POSIX.1-2017 requires calls to sigaction() that supply a NULL act
|
|
|
+ * argument succeed, even in the case of signals that cannot be caught or ignored
|
|
|
+ */
|
|
|
+ if (_sighandler_cannot_caught(lwp, signo))
|
|
|
ret = -RT_EINVAL;
|
|
|
- goto out;
|
|
|
+ else
|
|
|
+ {
|
|
|
+ prev_handler = _get_sighandler_locked(lwp, signo);
|
|
|
+ lwp->signal.sig_action_mask[signo - 1] = act->sa_mask;
|
|
|
+ if (act->__sa_handler._sa_handler == SIG_IGN)
|
|
|
+ lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_IGN;
|
|
|
+ else
|
|
|
+ lwp->signal.sig_action[signo - 1] = act->__sa_handler._sa_handler;
|
|
|
+
|
|
|
+ _signal_action_flag_u2k(signo, &lwp->signal, act);
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @brief Discard the pending signal if signal action is set to SIG_IGN
|
|
|
+ *
|
|
|
+ * @note POSIX.1-2017: Setting a signal action to SIG_IGN for a signal
|
|
|
+ * that is pending shall cause the pending signal to be discarded,
|
|
|
+ * whether or not it is blocked.
|
|
|
+ */
|
|
|
+ if (prev_handler != LWP_SIG_ACT_IGN &&
|
|
|
+ _get_sighandler_locked(lwp, signo) == LWP_SIG_ACT_IGN)
|
|
|
+ {
|
|
|
+ sigqueue_discard(_SIGQ(lwp), signo);
|
|
|
+ for (thread_list = lwp->t_grp.next;
|
|
|
+ thread_list != &lwp->t_grp;
|
|
|
+ thread_list = thread_list->next)
|
|
|
+ {
|
|
|
+ thread_sigq = _SIGQ(rt_list_entry(thread_list, struct rt_thread, sibling));
|
|
|
+ sigqueue_discard(thread_sigq, signo);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- lwp_sigdelset(&newset, SIGKILL);
|
|
|
- lwp_sigdelset(&newset, SIGSTOP);
|
|
|
-
|
|
|
- lwp->signal_mask = newset;
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
}
|
|
|
- ret = 0;
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
+ else
|
|
|
+ ret = -RT_EINVAL;
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int lwp_thread_sigprocmask(int how, const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
|
|
+rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long value)
|
|
|
{
|
|
|
+ rt_err_t ret = -1;
|
|
|
rt_base_t level;
|
|
|
- struct rt_thread *thread;
|
|
|
- lwp_sigset_t newset;
|
|
|
+ struct rt_lwp *lwp;
|
|
|
+ lwp_siginfo_t siginfo;
|
|
|
+ rt_bool_t need_schedule;
|
|
|
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- thread = rt_thread_self();
|
|
|
+ /** must be able to be suspended */
|
|
|
+ RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
|
|
|
|
|
|
- if (oset)
|
|
|
+ if (!thread || signo < 0 || signo >= _LWP_NSIG)
|
|
|
{
|
|
|
- rt_memcpy(oset, &thread->signal_mask, sizeof(lwp_sigset_t));
|
|
|
+ ret = -RT_EINVAL;
|
|
|
}
|
|
|
-
|
|
|
- if (sigset)
|
|
|
+ else
|
|
|
{
|
|
|
- switch (how)
|
|
|
+ lwp = thread->lwp;
|
|
|
+ need_schedule = RT_FALSE;
|
|
|
+
|
|
|
+ RT_ASSERT(lwp);
|
|
|
+
|
|
|
+ /* FIXME: acquire READ lock to lwp */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+
|
|
|
+ if (!lwp)
|
|
|
+ ret = -RT_EPERM;
|
|
|
+ else if (lwp->terminated || _sighandler_is_ignored(lwp, signo))
|
|
|
+ ret = 0;
|
|
|
+ else
|
|
|
{
|
|
|
- case SIG_BLOCK:
|
|
|
- sigorsets(&newset, &thread->signal_mask, sigset);
|
|
|
- break;
|
|
|
- case SIG_UNBLOCK:
|
|
|
- sigandsets(&newset, &thread->signal_mask, sigset);
|
|
|
- break;
|
|
|
- case SIG_SETMASK:
|
|
|
- newset = *sigset;
|
|
|
- break;
|
|
|
- default:
|
|
|
- goto out;
|
|
|
+ siginfo = siginfo_create(signo, code, value);
|
|
|
+
|
|
|
+ if (siginfo)
|
|
|
+ {
|
|
|
+ need_schedule = _siginfo_deliver_to_thread(lwp, thread, siginfo);
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ LOG_I("%s: siginfo malloc failed", __func__);
|
|
|
+ ret = -RT_ENOMEM;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- lwp_sigdelset(&newset, SIGKILL);
|
|
|
- lwp_sigdelset(&newset, SIGSTOP);
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
|
|
|
- thread->signal_mask = newset;
|
|
|
+ if (need_schedule)
|
|
|
+ rt_schedule();
|
|
|
}
|
|
|
-out:
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+#ifndef ARCH_MM_MMU
|
|
|
+void lwp_thread_sighandler_set(int sig, lwp_sighandler_t func)
|
|
|
+{
|
|
|
+ rt_base_t level;
|
|
|
+
|
|
|
+ if (sig == 0 || sig > _LWP_NSIG)
|
|
|
+ return;
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ rt_thread_self()->signal_handler[sig - 1] = func;
|
|
|
rt_hw_interrupt_enable(level);
|
|
|
- return 0;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
-static void _do_signal_wakeup(rt_thread_t thread, int sig)
|
|
|
+rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
|
|
|
+ const lwp_sigset_t *sigset, lwp_sigset_t *oset)
|
|
|
{
|
|
|
- if ((thread->stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
|
|
|
+ rt_err_t ret = -1;
|
|
|
+ rt_base_t level;
|
|
|
+ struct rt_lwp *lwp;
|
|
|
+
|
|
|
+ if (thread)
|
|
|
{
|
|
|
- int need_schedule = 1;
|
|
|
+ /** FIXME: acquire READ access to rt_thread */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
|
|
|
- if ((thread->stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
|
|
|
- {
|
|
|
- rt_thread_wakeup(thread);
|
|
|
- }
|
|
|
- else if ((sig == SIGKILL) && ((thread->stat & RT_SIGNAL_KILL_WAKEUP_MASK) != RT_SIGNAL_KILL_WAKEUP_MASK))
|
|
|
+ lwp = (struct rt_lwp*)thread->lwp;
|
|
|
+ if (!lwp)
|
|
|
{
|
|
|
- rt_thread_wakeup(thread);
|
|
|
+ ret = -RT_EPERM;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
- need_schedule = 0;
|
|
|
+ ret = 0;
|
|
|
+ _thread_signal_mask(thread, how, sigset, oset);
|
|
|
}
|
|
|
|
|
|
- /* do schedule */
|
|
|
- if (need_schedule)
|
|
|
- {
|
|
|
- rt_schedule();
|
|
|
- }
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
}
|
|
|
+ else
|
|
|
+ ret = -RT_EINVAL;
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-int lwp_kill(pid_t pid, int sig)
|
|
|
+static int _dequeue_signal(rt_thread_t thread, lwp_sigset_t *mask, siginfo_t *usi)
|
|
|
{
|
|
|
- rt_base_t level;
|
|
|
+ int signo;
|
|
|
+ lwp_siginfo_t si;
|
|
|
struct rt_lwp *lwp;
|
|
|
- int ret = -1;
|
|
|
- rt_thread_t thread;
|
|
|
+ lwp_sigset_t *pending;
|
|
|
|
|
|
- if (sig < 0 || sig >= _LWP_NSIG)
|
|
|
- {
|
|
|
- rt_set_errno(EINVAL);
|
|
|
- return ret;
|
|
|
- }
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- lwp = lwp_from_pid(pid);
|
|
|
- if (!lwp || lwp->finish)
|
|
|
+ pending = &_SIGQ(thread)->sigset_pending;
|
|
|
+ signo = _next_signal(pending, mask);
|
|
|
+ if (!signo)
|
|
|
{
|
|
|
- rt_set_errno(ESRCH);
|
|
|
- goto out;
|
|
|
+ lwp = thread->lwp;
|
|
|
+ RT_ASSERT(lwp);
|
|
|
+ pending = &_SIGQ(lwp)->sigset_pending;
|
|
|
+ signo = _next_signal(pending, mask);
|
|
|
}
|
|
|
- if (sig)
|
|
|
- {
|
|
|
- /* check main thread */
|
|
|
- thread = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
|
|
|
- if (!lwp_sigismember(&lwp->signal_mask, sig)) /* if signal masked */
|
|
|
- {
|
|
|
- lwp_sigaddset(&lwp->signal, sig);
|
|
|
- _do_signal_wakeup(thread, sig);
|
|
|
- }
|
|
|
- }
|
|
|
- ret = 0;
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
- return ret;
|
|
|
+
|
|
|
+ if (!signo)
|
|
|
+ return signo;
|
|
|
+
|
|
|
+ si = sigqueue_dequeue(_SIGQ(thread), signo);
|
|
|
+ RT_ASSERT(!!si);
|
|
|
+
|
|
|
+ siginfo_k2u(si, usi);
|
|
|
+ siginfo_delete(si);
|
|
|
+
|
|
|
+ return signo;
|
|
|
}
|
|
|
|
|
|
-int lwp_thread_kill(rt_thread_t thread, int sig)
|
|
|
+rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
|
|
|
+ siginfo_t *usi, struct timespec *timeout)
|
|
|
{
|
|
|
+ LOG_D("%s", __func__);
|
|
|
+
|
|
|
rt_base_t level;
|
|
|
- int ret = -RT_EINVAL;
|
|
|
+ rt_err_t ret;
|
|
|
+ int sig;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @brief POSIX
|
|
|
+ * If one of the signals in set is already pending for the calling thread,
|
|
|
+ * sigwaitinfo() will return immediately
|
|
|
+ */
|
|
|
|
|
|
- if (!thread)
|
|
|
+ /* Create a mask of signals user dont want or cannot catch */
|
|
|
+ _sigdelset(sigset, SIGKILL);
|
|
|
+ _sigdelset(sigset, SIGSTOP);
|
|
|
+ _signotsets(sigset, sigset);
|
|
|
+
|
|
|
+ /* FIXME: acquire READ lock to lwp */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ sig = _dequeue_signal(thread, sigset, usi);
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+ if (sig)
|
|
|
+ return sig;
|
|
|
+
|
|
|
+ /* WARNING atomic problem, what if pending signal arrives before we sleep */
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @brief POSIX
|
|
|
+ * if none of the signals specified by set are pending, sigtimedwait() shall
|
|
|
+ * wait for the time interval specified in the timespec structure referenced
|
|
|
+ * by timeout.
|
|
|
+ */
|
|
|
+ if (timeout)
|
|
|
{
|
|
|
- rt_set_errno(ESRCH);
|
|
|
- return ret;
|
|
|
+ /* TODO: verify timeout valid ? not overflow 32bits, nanosec valid, ... */
|
|
|
+ rt_uint32_t time;
|
|
|
+ time = rt_timespec_to_tick(timeout);
|
|
|
+
|
|
|
+ /**
|
|
|
+ * @brief POSIX
|
|
|
+ * If the timespec structure pointed to by timeout is zero-valued and
|
|
|
+ * if none of the signals specified by set are pending, then
|
|
|
+ * sigtimedwait() shall return immediately with an error
|
|
|
+ */
|
|
|
+ if (time == 0)
|
|
|
+ return -EAGAIN;
|
|
|
+
|
|
|
+ ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
|
|
+ rt_timer_control(&(thread->thread_timer),
|
|
|
+ RT_TIMER_CTRL_SET_TIME,
|
|
|
+ &timeout);
|
|
|
+ rt_timer_start(&(thread->thread_timer));
|
|
|
+
|
|
|
}
|
|
|
- if (sig < 0 || sig >= _LWP_NSIG)
|
|
|
+ else
|
|
|
{
|
|
|
- rt_set_errno(EINVAL);
|
|
|
- return ret;
|
|
|
+ /* suspend kernel forever until signal was received */
|
|
|
+ ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
|
|
|
}
|
|
|
- level = rt_hw_interrupt_disable();
|
|
|
- if (!thread->lwp)
|
|
|
+
|
|
|
+ if (ret == RT_EOK)
|
|
|
{
|
|
|
- rt_set_errno(EPERM);
|
|
|
- goto out;
|
|
|
+ rt_schedule();
|
|
|
+ ret = -EAGAIN;
|
|
|
}
|
|
|
- if (!lwp_sigismember(&thread->signal_mask, sig)) /* if signal masked */
|
|
|
+ /* else ret == -EINTR */
|
|
|
+
|
|
|
+ /* FIXME: acquire READ lock to lwp */
|
|
|
+ level = rt_hw_interrupt_disable();
|
|
|
+ sig = _dequeue_signal(thread, sigset, usi);
|
|
|
+ rt_hw_interrupt_enable(level);
|
|
|
+
|
|
|
+ return sig ? sig : ret;
|
|
|
+}
|
|
|
+
|
|
|
+void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
|
|
|
+{
|
|
|
+ struct rt_lwp *lwp;
|
|
|
+ lwp = thread->lwp;
|
|
|
+
|
|
|
+ if (lwp)
|
|
|
{
|
|
|
- lwp_sigaddset(&thread->signal, sig);
|
|
|
- _do_signal_wakeup(thread, sig);
|
|
|
+ memset(pending, 0, sizeof(*pending));
|
|
|
+ sigqueue_examine(_SIGQ(thread), pending);
|
|
|
+ sigqueue_examine(_SIGQ(lwp), pending);
|
|
|
}
|
|
|
- ret = 0;
|
|
|
-out:
|
|
|
- rt_hw_interrupt_enable(level);
|
|
|
- return ret;
|
|
|
}
|