/* ARGSUSED */ int mfs_start(struct mount *mp, int flags) { struct vnode *vp; struct mfsnode *mfsp; struct proc *p; struct buf *bp; void *base; int sleepreturn = 0, refcnt, error; ksiginfoq_t kq; /* * Ensure that file system is still mounted when getting mfsnode. * Add a reference to the mfsnode to prevent it disappearing in * this routine. */ if ((error = vfs_busy(mp, NULL)) != 0) return error; vp = VFSTOUFS(mp)->um_devvp; mfsp = VTOMFS(vp); mutex_enter(&mfs_lock); mfsp->mfs_refcnt++; mutex_exit(&mfs_lock); vfs_unbusy(mp, false, NULL); base = mfsp->mfs_baseoff; mutex_enter(&mfs_lock); while (mfsp->mfs_shutdown != 1) { while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) { mutex_exit(&mfs_lock); mfs_doio(bp, base); mutex_enter(&mfs_lock); } /* * If a non-ignored signal is received, try to unmount. * If that fails, or the filesystem is already in the * process of being unmounted, clear the signal (it has been * "processed"), otherwise we will loop here, as tsleep * will always return EINTR/ERESTART. */ if (sleepreturn != 0) { mutex_exit(&mfs_lock); if (dounmount(mp, 0, curlwp) != 0) { p = curproc; ksiginfo_queue_init(&kq); mutex_enter(p->p_lock); sigclearall(p, NULL, &kq); mutex_exit(p->p_lock); ksiginfo_queue_drain(&kq); } sleepreturn = 0; mutex_enter(&mfs_lock); continue; } sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock); } KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL); refcnt = --mfsp->mfs_refcnt; mutex_exit(&mfs_lock); if (refcnt == 0) { bufq_free(mfsp->mfs_buflist); cv_destroy(&mfsp->mfs_cv); kmem_free(mfsp, sizeof(*mfsp)); } return (sleepreturn); }
int sigaction1(struct lwp *l, int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers) { struct proc *p; struct sigacts *ps; sigset_t tset; int prop, error; ksiginfoq_t kq; static bool v0v1valid; if (signum <= 0 || signum >= NSIG) return EINVAL; p = l->l_proc; error = 0; ksiginfo_queue_init(&kq); /* * Trampoline ABI version 0 is reserved for the legacy kernel * provided on-stack trampoline. Conversely, if we are using a * non-0 ABI version, we must have a trampoline. Only validate the * vers if a new sigaction was supplied and there was an actual * handler specified (not SIG_IGN or SIG_DFL), which don't require * a trampoline. Emulations use legacy kernel trampolines with * version 0, alternatively check for that too. * * If version < 2, we try to autoload the compat module. Note * that we interlock with the unload check in compat_modcmd() * using kernconfig_lock. If the autoload fails, we don't try it * again for this process. */ if (nsa != NULL && nsa->sa_handler != SIG_IGN && nsa->sa_handler != SIG_DFL) { if (__predict_false(vers < 2)) { if (p->p_flag & PK_32) v0v1valid = true; else if ((p->p_lflag & PL_SIGCOMPAT) == 0) { kernconfig_lock(); if (sendsig_sigcontext_vec == NULL) { (void)module_autoload("compat", MODULE_CLASS_ANY); } if (sendsig_sigcontext_vec != NULL) { /* * We need to remember if the * sigcontext method may be useable, * because libc may use it even * if siginfo is available. */ v0v1valid = true; } mutex_enter(proc_lock); /* * Prevent unload of compat module while * this process remains. */ p->p_lflag |= PL_SIGCOMPAT; mutex_exit(proc_lock); kernconfig_unlock(); } } switch (vers) { case 0: /* sigcontext, kernel supplied trampoline. */ if (tramp != NULL || !v0v1valid) { return EINVAL; } break; case 1: /* sigcontext, user supplied trampoline. */ if (tramp == NULL || !v0v1valid) { return EINVAL; } break; case 2: case 3: /* siginfo, user supplied trampoline. */ if (tramp == NULL) { return EINVAL; } break; default: return EINVAL; } } mutex_enter(p->p_lock); ps = p->p_sigacts; if (osa) *osa = SIGACTION_PS(ps, signum); if (!nsa) goto out; prop = sigprop[signum]; if ((nsa->sa_flags & ~SA_ALLBITS) || (prop & SA_CANTMASK)) { error = EINVAL; goto out; } SIGACTION_PS(ps, signum) = *nsa; ps->sa_sigdesc[signum].sd_tramp = tramp; ps->sa_sigdesc[signum].sd_vers = vers; sigminusset(&sigcantmask, &SIGACTION_PS(ps, signum).sa_mask); if ((prop & SA_NORESET) != 0) SIGACTION_PS(ps, signum).sa_flags &= ~SA_RESETHAND; if (signum == SIGCHLD) { if (nsa->sa_flags & SA_NOCLDSTOP) p->p_sflag |= PS_NOCLDSTOP; else p->p_sflag &= ~PS_NOCLDSTOP; if (nsa->sa_flags & SA_NOCLDWAIT) { /* * Paranoia: since SA_NOCLDWAIT is implemented by * reparenting the dying child to PID 1 (and trust * it to reap the zombie), PID 1 itself is forbidden * to set SA_NOCLDWAIT. */ if (p->p_pid == 1) p->p_flag &= ~PK_NOCLDWAIT; else p->p_flag |= PK_NOCLDWAIT; } else p->p_flag &= ~PK_NOCLDWAIT; if (nsa->sa_handler == SIG_IGN) { /* * Paranoia: same as above. */ if (p->p_pid == 1) p->p_flag &= ~PK_CLDSIGIGN; else p->p_flag |= PK_CLDSIGIGN; } else p->p_flag &= ~PK_CLDSIGIGN; } if ((nsa->sa_flags & SA_NODEFER) == 0) sigaddset(&SIGACTION_PS(ps, signum).sa_mask, signum); else sigdelset(&SIGACTION_PS(ps, signum).sa_mask, signum); /* * Set bit in p_sigctx.ps_sigignore for signals that are set to * SIG_IGN, and for signals set to SIG_DFL where the default is to * ignore. However, don't put SIGCONT in p_sigctx.ps_sigignore, as * we have to restart the process. */ if (nsa->sa_handler == SIG_IGN || (nsa->sa_handler == SIG_DFL && (prop & SA_IGNORE) != 0)) { /* Never to be seen again. */ sigemptyset(&tset); sigaddset(&tset, signum); sigclearall(p, &tset, &kq); if (signum != SIGCONT) { /* Easier in psignal */ sigaddset(&p->p_sigctx.ps_sigignore, signum); } sigdelset(&p->p_sigctx.ps_sigcatch, signum); } else { sigdelset(&p->p_sigctx.ps_sigignore, signum); if (nsa->sa_handler == SIG_DFL) sigdelset(&p->p_sigctx.ps_sigcatch, signum); else sigaddset(&p->p_sigctx.ps_sigcatch, signum); } /* * Previously held signals may now have become visible. Ensure that * we check for them before returning to userspace. */ if (sigispending(l, 0)) { lwp_lock(l); l->l_flag |= LW_PENDSIG; lwp_unlock(l); } out: mutex_exit(p->p_lock); ksiginfo_queue_drain(&kq); return error; }