void sigsuspendsetup(struct lwp *l, const sigset_t *ss) { struct proc *p = l->l_proc; /* * When returning from sigsuspend/pselect/pollts, we want * the old mask to be restored after the * signal handler has finished. Thus, we * save it here and mark the sigctx structure * to indicate this. */ mutex_enter(p->p_lock); l->l_sigrestore = 1; l->l_sigoldmask = l->l_sigmask; l->l_sigmask = *ss; sigminusset(&sigcantmask, &l->l_sigmask); /* Check for pending signals when sleeping. */ if (sigispending(l, 0)) { lwp_lock(l); l->l_flag |= LW_PENDSIG; lwp_unlock(l); } mutex_exit(p->p_lock); }
int smb_proc_intr(struct lwp *l) { struct proc *p; int error; if (l == NULL) return 0; p = l->l_proc; mutex_enter(p->p_lock); error = sigispending(l, 0); mutex_exit(p->p_lock); return (error != 0 ? EINTR : 0); }
void sigsuspendteardown(struct lwp *l) { struct proc *p = l->l_proc; mutex_enter(p->p_lock); /* Check for pending signals when sleeping. */ if (l->l_sigrestore) { if (sigispending(l, 0)) { lwp_lock(l); l->l_flag |= LW_PENDSIG; lwp_unlock(l); } else { l->l_sigrestore = 0; l->l_sigmask = l->l_sigoldmask; } } mutex_exit(p->p_lock); }
int sigprocmask1(struct lwp *l, int how, const sigset_t *nss, sigset_t *oss) { sigset_t *mask = &l->l_sigmask; bool more; KASSERT(mutex_owned(l->l_proc->p_lock)); if (oss) { *oss = *mask; } if (nss == NULL) { return 0; } switch (how) { case SIG_BLOCK: sigplusset(nss, mask); more = false; break; case SIG_UNBLOCK: sigminusset(nss, mask); more = true; break; case SIG_SETMASK: *mask = *nss; more = true; break; default: return EINVAL; } sigminusset(&sigcantmask, mask); if (more && sigispending(l, 0)) { /* * Check for pending signals on return to user. */ lwp_lock(l); l->l_flag |= LW_PENDSIG; lwp_unlock(l); } return 0; }
int sigaction1(struct lwp *l, int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers) { struct proc *p; struct sigacts *ps; sigset_t tset; int prop, error; ksiginfoq_t kq; static bool v0v1valid; if (signum <= 0 || signum >= NSIG) return EINVAL; p = l->l_proc; error = 0; ksiginfo_queue_init(&kq); /* * Trampoline ABI version 0 is reserved for the legacy kernel * provided on-stack trampoline. Conversely, if we are using a * non-0 ABI version, we must have a trampoline. Only validate the * vers if a new sigaction was supplied and there was an actual * handler specified (not SIG_IGN or SIG_DFL), which don't require * a trampoline. Emulations use legacy kernel trampolines with * version 0, alternatively check for that too. * * If version < 2, we try to autoload the compat module. Note * that we interlock with the unload check in compat_modcmd() * using kernconfig_lock. If the autoload fails, we don't try it * again for this process. */ if (nsa != NULL && nsa->sa_handler != SIG_IGN && nsa->sa_handler != SIG_DFL) { if (__predict_false(vers < 2)) { if (p->p_flag & PK_32) v0v1valid = true; else if ((p->p_lflag & PL_SIGCOMPAT) == 0) { kernconfig_lock(); if (sendsig_sigcontext_vec == NULL) { (void)module_autoload("compat", MODULE_CLASS_ANY); } if (sendsig_sigcontext_vec != NULL) { /* * We need to remember if the * sigcontext method may be useable, * because libc may use it even * if siginfo is available. */ v0v1valid = true; } mutex_enter(proc_lock); /* * Prevent unload of compat module while * this process remains. */ p->p_lflag |= PL_SIGCOMPAT; mutex_exit(proc_lock); kernconfig_unlock(); } } switch (vers) { case 0: /* sigcontext, kernel supplied trampoline. */ if (tramp != NULL || !v0v1valid) { return EINVAL; } break; case 1: /* sigcontext, user supplied trampoline. */ if (tramp == NULL || !v0v1valid) { return EINVAL; } break; case 2: case 3: /* siginfo, user supplied trampoline. */ if (tramp == NULL) { return EINVAL; } break; default: return EINVAL; } } mutex_enter(p->p_lock); ps = p->p_sigacts; if (osa) *osa = SIGACTION_PS(ps, signum); if (!nsa) goto out; prop = sigprop[signum]; if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) { error = EINVAL; goto out; } SIGACTION_PS(ps, signum) = *nsa; ps->sa_sigdesc[signum].sd_tramp = tramp; ps->sa_sigdesc[signum].sd_vers = vers; sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask); if ((prop & SA_NORESET) != 0) SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND; if (signum == SIGCHLD) { if (nsa->sa_flags & SA_NOCLDSTOP) p->p_sflag |= PS_NOCLDSTOP; else p->p_sflag &= ~PS_NOCLDSTOP; if (nsa->sa_flags & SA_NOCLDWAIT) { /* * Paranoia: since SA_NOCLDWAIT is implemented by * reparenting the dying child to PID 1 (and trust * it to reap the zombie), PID 1 itself is forbidden * to set SA_NOCLDWAIT. */ if (p->p_pid == 1) p->p_flag &= ~PK_NOCLDWAIT; else p->p_flag |= PK_NOCLDWAIT; } else p->p_flag &= ~PK_NOCLDWAIT; if (nsa->sa_handler == SIG_IGN) { /* * Paranoia: same as above. */ if (p->p_pid == 1) p->p_flag &= ~PK_CLDSIGIGN; else p->p_flag |= PK_CLDSIGIGN; } else p->p_flag &= ~PK_CLDSIGIGN; } if ((nsa->sa_flags & SA_NODEFER) == 0) sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum); else sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum); /* * Set bit in p_sigctx.ps_sigignore for signals that are set to * SIG_IGN, and for signals set to SIG_DFL where the default is to * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as * we have to restart the process. */ if (nsa->sa_handler == SIG_IGN || (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) { /* Never to be seen again. */ sigemptyset(&tset); sigaddset(&tset, signum); sigclearall(p, &tset, &kq); if (signum != SIGCONT) { /* Easier in psignal */ sigaddset(&p->p_sigctx.ps_sigignore, signum); } sigdelset(&p->p_sigctx.ps_sigcatch, signum); } else { sigdelset(&p->p_sigctx.ps_sigignore, signum); if (nsa->sa_handler == SIG_DFL) sigdelset(&p->p_sigctx.ps_sigcatch, signum); else sigaddset(&p->p_sigctx.ps_sigcatch, signum); } /* * Previously held signals may now have become visible. Ensure that * we check for them before returning to userspace. */ if (sigispending(l, 0)) { lwp_lock(l); l->l_flag |= LW_PENDSIG; lwp_unlock(l); } out: mutex_exit(p->p_lock); ksiginfo_queue_drain(&kq); return error; }