int darwin_sys_sigprocmask(struct lwp *l, const struct darwin_sys_sigprocmask_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(sigset13_t *) set; syscallarg(sigset13_t *) oset; } */ int error; sigset13_t kdset; sigset_t kbset, kboset; if (SCARG(uap, set) != NULL) { error = copyin(SCARG(uap, set), &kdset, sizeof(kdset)); if (error != 0) return error; native_sigset13_to_sigset(&kdset, &kbset); error = sigprocmask1(l, SCARG(uap, how), &kbset, &kboset); } else error = sigprocmask1(l, SCARG(uap, how), NULL, &kboset); if (SCARG(uap, oset) == NULL || error != 0) return error; native_sigset_to_sigset13(&kboset, &kdset); return copyout(&kdset, SCARG(uap, oset), sizeof(kdset)); }
int setucontext32(struct lwp *l, const ucontext32_t *ucp) { struct proc *p = l->l_proc; int error; KASSERT(mutex_owned(p->p_lock)); if ((ucp->uc_flags & _UC_SIGMASK) != 0) { error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL); if (error != 0) return error; } mutex_exit(p->p_lock); error = cpu_setmcontext32(l, &ucp->uc_mcontext, ucp->uc_flags); mutex_enter(p->p_lock); if (error != 0) return (error); l->l_ctxlink = (void *)(intptr_t)ucp->uc_link; /* * If there was stack information, update whether or not we are * still running on an alternate signal stack. */ if ((ucp->uc_flags & _UC_STACK) != 0) { if (ucp->uc_stack.ss_flags & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; } return 0; }
/* * Manipulate signal mask. Note that we receive new mask, not pointer, and * return old mask as return value; the library stub does the rest. */ int sys___sigprocmask14(struct lwp *l, const struct sys___sigprocmask14_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(const sigset_t *) set; syscallarg(sigset_t *) oset; } */ struct proc *p = l->l_proc; sigset_t nss, oss; int error; if (SCARG(uap, set)) { error = copyin(SCARG(uap, set), &nss, sizeof(nss)); if (error) return error; } mutex_enter(p->p_lock); error = sigprocmask1(l, SCARG(uap, how), SCARG(uap, set) ? &nss : 0, SCARG(uap, oset) ? &oss : 0); mutex_exit(p->p_lock); if (error) return error; if (SCARG(uap, oset)) { error = copyout(&oss, SCARG(uap, oset), sizeof(oss)); if (error) return error; } return 0; }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct sigcontext *scp, context; struct trapframe *tf; struct proc *p = l->l_proc; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((void *)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* Restore signal context. */ tf = l->l_md.md_regs; /* Check for security violations. */ if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) return (EINVAL); tf->tf_ssr = context.sc_ssr; tf->tf_r0 = context.sc_r0; tf->tf_r1 = context.sc_r1; tf->tf_r2 = context.sc_r2; tf->tf_r3 = context.sc_r3; tf->tf_r4 = context.sc_r4; tf->tf_r5 = context.sc_r5; tf->tf_r6 = context.sc_r6; tf->tf_r7 = context.sc_r7; tf->tf_r8 = context.sc_r8; tf->tf_r9 = context.sc_r9; tf->tf_r10 = context.sc_r10; tf->tf_r11 = context.sc_r11; tf->tf_r12 = context.sc_r12; tf->tf_r13 = context.sc_r13; tf->tf_r14 = context.sc_r14; tf->tf_spc = context.sc_spc; tf->tf_r15 = context.sc_r15; tf->tf_pr = context.sc_pr; mutex_enter(p->p_lock); /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &context.sc_mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(netbsd32_sigcontextp_t) sigcntxp; } */ struct netbsd32_sigcontext *scp, context; struct proc *p = l->l_proc; struct trapframe *tf; int error; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = NETBSD32PTR64(SCARG(uap, sigcntxp)); if (copyin(scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* * Check for security violations. */ error = check_sigcontext32(l, &context); if (error != 0) return error; /* Restore register context. */ tf = l->l_md.md_regs; tf->tf_ds = context.sc_ds; tf->tf_es = context.sc_es; cpu_fsgs_reload(l, context.sc_fs, context.sc_gs); tf->tf_rflags = context.sc_eflags; tf->tf_rdi = context.sc_edi; tf->tf_rsi = context.sc_esi; tf->tf_rbp = context.sc_ebp; tf->tf_rbx = context.sc_ebx; tf->tf_rdx = context.sc_edx; tf->tf_rcx = context.sc_ecx; tf->tf_rax = context.sc_eax; tf->tf_rip = context.sc_eip; tf->tf_cs = context.sc_cs; tf->tf_rsp = context.sc_esp; tf->tf_ss = context.sc_ss; mutex_enter(p->p_lock); /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &context.sc_mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int linux32_sys_rt_sigprocmask(struct lwp *l, const struct linux32_sys_rt_sigprocmask_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(const linux32_sigsetp_t) set; syscallarg(linux32_sigsetp_t) oset; syscallarg(netbsd32_size_t) sigsetsize; } */ struct proc *p = l->l_proc; linux32_sigset_t nls32, ols32; sigset_t ns, os; int error; int how; if (SCARG(uap, sigsetsize) != sizeof(linux32_sigset_t)) return EINVAL; switch (SCARG(uap, how)) { case LINUX32_SIG_BLOCK: how = SIG_BLOCK; break; case LINUX32_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case LINUX32_SIG_SETMASK: how = SIG_SETMASK; break; default: return EINVAL; break; } if (SCARG_P32(uap, set) != NULL) { if ((error = copyin(SCARG_P32(uap, set), &nls32, sizeof(nls32))) != 0) return error; linux32_to_native_sigset(&ns, &nls32); } mutex_enter(p->p_lock); error = sigprocmask1(l, how, SCARG_P32(uap, set) ? &ns : NULL, SCARG_P32(uap, oset) ? &os : NULL); mutex_exit(p->p_lock); if (error != 0) return error; if (SCARG_P32(uap, oset) != NULL) { native_to_linux32_sigset(&ols32, &os); if ((error = copyout(&ols32, SCARG_P32(uap, oset), sizeof(ols32))) != 0) return error; } return 0; }
int compat_16_sys___sigreturn14(struct lwp *l, void *v, register_t *retval) { struct compat_16_sys___sigreturn14_args /* { syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; struct proc *p = l->l_proc; struct sigcontext *scp, context; struct reg *regs; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* Restore the register context. */ regs = l->l_md.md_regs; /* * Check for security violations. */ if (((context.sc_ps ^ regs->r_psr) & PSL_USERSTATIC) != 0) return (EINVAL); regs->r_fp = context.sc_fp; regs->r_sp = context.sc_sp; regs->r_pc = context.sc_pc; regs->r_psr = context.sc_ps; regs->r_sb = context.sc_sb; regs->r_r7 = context.sc_reg[REG_R7]; regs->r_r6 = context.sc_reg[REG_R6]; regs->r_r5 = context.sc_reg[REG_R5]; regs->r_r4 = context.sc_reg[REG_R4]; regs->r_r3 = context.sc_reg[REG_R3]; regs->r_r2 = context.sc_reg[REG_R2]; regs->r_r1 = context.sc_reg[REG_R1]; regs->r_r0 = context.sc_reg[REG_R0]; /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0); return(EJUSTRETURN); }
int linux_restore_sigcontext(struct lwp *l, struct linux_sigcontext context, sigset_t *mask) { struct proc *p = l->l_proc; struct pcb *pcb; /* * Linux doesn't (yet) have alternate signal stacks. * However, the OSF/1 sigcontext which they use has * an onstack member. This could be needed in the future. */ mutex_enter(p->p_lock); if (context.sc_onstack & LINUX_SA_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Reset the signal mask */ (void) sigprocmask1(l, SIG_SETMASK, mask, 0); mutex_exit(p->p_lock); /* * Check for security violations. * Linux doesn't allow any changes to the PSL. */ if (context.sc_ps != ALPHA_PSL_USERMODE) return(EINVAL); l->l_md.md_tf->tf_regs[FRAME_PC] = context.sc_pc; l->l_md.md_tf->tf_regs[FRAME_PS] = context.sc_ps; regtoframe((struct reg *)context.sc_regs, l->l_md.md_tf); alpha_pal_wrusp(context.sc_regs[R_SP]); if (l == fpcurlwp) fpcurlwp = NULL; /* Restore fp regs and fpr_cr */ pcb = lwp_getpcb(l); memcpy(&pcb->pcb_fp, (struct fpreg *)context.sc_fpregs, sizeof(struct fpreg)); /* XXX sc_ownedfp ? */ /* XXX sc_fp_control ? */ #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("linux_rt_sigreturn(%d): returns\n", p->p_pid); #endif return (EJUSTRETURN); }
int linux_sys_rt_sigprocmask(struct lwp *l, const struct linux_sys_rt_sigprocmask_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(const linux_sigset_t *) set; syscallarg(linux_sigset_t *) oset; syscallarg(size_t) sigsetsize; } */ linux_sigset_t nlss, olss, *oset; const linux_sigset_t *set; struct proc *p = l->l_proc; sigset_t nbss, obss; int error, how; if (SCARG(uap, sigsetsize) != sizeof(linux_sigset_t)) return (EINVAL); switch (SCARG(uap, how)) { case LINUX_SIG_BLOCK: how = SIG_BLOCK; break; case LINUX_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case LINUX_SIG_SETMASK: how = SIG_SETMASK; break; default: return (EINVAL); } set = SCARG(uap, set); oset = SCARG(uap, oset); if (set) { error = copyin(set, &nlss, sizeof(nlss)); if (error) return (error); linux_to_native_sigset(&nbss, &nlss); } mutex_enter(p->p_lock); error = sigprocmask1(l, how, set ? &nbss : NULL, oset ? &obss : NULL); mutex_exit(p->p_lock); if (!error && oset) { native_to_linux_sigset(&olss, &obss); error = copyout(&olss, oset, sizeof(olss)); } return (error); }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). */ int linux_sys_sigreturn(struct lwp *l, const struct linux_sys_sigreturn_args *uap, register_t *retval) { /* { syscallarg(struct linux_sigframe *) sf; } */ struct proc *p = l->l_proc; struct linux_sigframe *sf, ksf; struct frame *f; sigset_t mask; int i, error; #ifdef DEBUG_LINUX printf("linux_sys_sigreturn()\n"); #endif /* DEBUG_LINUX */ /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ sf = SCARG(uap, sf); if ((error = copyin(sf, &ksf, sizeof(ksf))) != 0) return (error); /* Restore the register context. */ f = (struct frame *)l->l_md.md_regs; for (i=0; i<32; i++) f->f_regs[i] = ksf.lsf_sc.lsc_regs[i]; f->f_regs[_R_MULLO] = ksf.lsf_sc.lsc_mdlo; f->f_regs[_R_MULHI] = ksf.lsf_sc.lsc_mdhi; f->f_regs[_R_PC] = ksf.lsf_sc.lsc_pc; f->f_regs[_R_BADVADDR] = ksf.lsf_sc.lsc_badvaddr; f->f_regs[_R_CAUSE] = ksf.lsf_sc.lsc_cause; mutex_enter(p->p_lock); /* Restore signal stack. */ l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ linux_to_native_sigset(&mask, (linux_sigset_t *)&ksf.lsf_mask); (void)sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
/* ARGSUSED */ int linux_sys_siggetmask(struct lwp *l, const void *v, register_t *retval) { struct proc *p = l->l_proc; sigset_t bss; linux_old_sigset_t lss; int error; mutex_enter(p->p_lock); error = sigprocmask1(l, SIG_SETMASK, 0, &bss); mutex_exit(p->p_lock); if (error) return (error); native_to_linux_old_sigset(&lss, &bss); return (0); }
int linux_sigprocmask1(struct lwp *l, int how, const linux_old_sigset_t *set, linux_old_sigset_t *oset) { struct proc *p = l->l_proc; linux_old_sigset_t nlss, olss; sigset_t nbss, obss; int error; switch (how) { case LINUX_SIG_BLOCK: how = SIG_BLOCK; break; case LINUX_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case LINUX_SIG_SETMASK: how = SIG_SETMASK; break; default: return (EINVAL); } if (set) { error = copyin(set, &nlss, sizeof(nlss)); if (error) return (error); linux_old_to_native_sigset(&nbss, &nlss); } mutex_enter(p->p_lock); error = sigprocmask1(l, how, set ? &nbss : NULL, oset ? &obss : NULL); mutex_exit(p->p_lock); if (error) return (error); if (oset) { native_to_linux_old_sigset(&olss, &obss); error = copyout(&olss, oset, sizeof(olss)); if (error) return (error); } return (error); }
int compat_13_netbsd32_sigprocmask(struct lwp *l, const struct compat_13_netbsd32_sigprocmask_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(int) mask; } */ sigset13_t ness, oess; sigset_t nbss, obss; int error; ness = SCARG(uap, mask); native_sigset13_to_sigset(&ness, &nbss); error = sigprocmask1(l, SCARG(uap, how), &nbss, &obss); if (error) return (error); native_sigset_to_sigset13(&obss, &oess); *retval = oess; return (0); }
/* * The following three functions fiddle with a process' signal mask. * Convert the signal masks because of the different signal * values for Linux. The need for this is the reason why * they are here, and have not been mapped directly. */ int linux_sys_sigsetmask(struct lwp *l, const struct linux_sys_sigsetmask_args *uap, register_t *retval) { /* { syscallarg(linux_old_sigset_t) mask; } */ sigset_t nbss, obss; linux_old_sigset_t nlss, olss; struct proc *p = l->l_proc; int error; nlss = SCARG(uap, mask); linux_old_to_native_sigset(&nbss, &nlss); mutex_enter(p->p_lock); error = sigprocmask1(l, SIG_SETMASK, &nbss, &obss); mutex_exit(p->p_lock); if (error) return (error); native_to_linux_old_sigset(&olss, &obss); *retval = olss; return (0); }
int compat_13_sys_sigreturn(struct lwp *l, void *v, register_t *retval) { struct compat_13_sys_sigreturn_args /* { syscallarg(struct sigcontext13 *) sigcntxp; } */ *uap = v; struct proc *p = l->l_proc; struct trapframe *scf; struct sigcontext13 *ucntx; struct sigcontext13 ksc; sigset_t mask; scf = l->l_addr->u_pcb.framep; ucntx = SCARG(uap, sigcntxp); if (copyin((caddr_t)ucntx, (caddr_t)&ksc, sizeof(struct sigcontext))) return EINVAL; /* Compatibility mode? */ if ((ksc.sc_ps & (PSL_IPL | PSL_IS)) || ((ksc.sc_ps & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) || (ksc.sc_ps & PSL_CM)) { return (EINVAL); } if (ksc.sc_onstack & SS_ONSTACK) p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; native_sigset13_to_sigset(&ksc.sc_mask, &mask); (void) sigprocmask1(p, SIG_SETMASK, &mask, 0); scf->fp = ksc.sc_fp; scf->ap = ksc.sc_ap; scf->pc = ksc.sc_pc; scf->sp = ksc.sc_sp; scf->psl = ksc.sc_ps; return (EJUSTRETURN); }
int compat_13_sys_sigprocmask(struct lwp *l, const struct compat_13_sys_sigprocmask_args *uap, register_t *retval) { /* { syscallarg(int) how; syscallarg(int) mask; } */ struct proc *p = l->l_proc; sigset13_t ness, oess; sigset_t nbss, obss; int error; ness = SCARG(uap, mask); native_sigset13_to_sigset(&ness, &nbss); mutex_enter(p->p_lock); error = sigprocmask1(l, SCARG(uap, how), &nbss, &obss); mutex_exit(p->p_lock); if (error) return (error); native_sigset_to_sigset13(&obss, &oess); *retval = oess; return (0); }
static int linux_restore_sigcontext(struct lwp *l, struct linux_sigcontext *scp, register_t *retval) { struct proc *p = l->l_proc; struct sigaltstack *sas = &l->l_sigstk; struct trapframe *tf; sigset_t mask; ssize_t ss_gap; /* Restore register context. */ tf = l->l_md.md_regs; DPRINTF(("sigreturn enter esp=0x%x eip=0x%x\n", tf->tf_esp, tf->tf_eip)); #ifdef VM86 if (scp->sc_eflags & PSL_VM) { void syscall_vm86(struct trapframe *); tf->tf_vm86_gs = scp->sc_gs; tf->tf_vm86_fs = scp->sc_fs; tf->tf_vm86_es = scp->sc_es; tf->tf_vm86_ds = scp->sc_ds; set_vflags(l, scp->sc_eflags); p->p_md.md_syscall = syscall_vm86; } else #endif { /* * Check for security violations. If we're returning to * protected mode, the CPU will validate the segment registers * automatically and generate a trap on violations. We handle * the trap, rather than doing all of the checking here. */ if (((scp->sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || !USERMODE(scp->sc_cs, scp->sc_eflags)) return EINVAL; tf->tf_gs = scp->sc_gs; tf->tf_fs = scp->sc_fs; tf->tf_es = scp->sc_es; tf->tf_ds = scp->sc_ds; #ifdef VM86 if (tf->tf_eflags & PSL_VM) (*p->p_emul->e_syscall_intern)(p); #endif tf->tf_eflags = scp->sc_eflags; } tf->tf_edi = scp->sc_edi; tf->tf_esi = scp->sc_esi; tf->tf_ebp = scp->sc_ebp; tf->tf_ebx = scp->sc_ebx; tf->tf_edx = scp->sc_edx; tf->tf_ecx = scp->sc_ecx; tf->tf_eax = scp->sc_eax; tf->tf_eip = scp->sc_eip; tf->tf_cs = scp->sc_cs; tf->tf_esp = scp->sc_esp_at_signal; tf->tf_ss = scp->sc_ss; /* Restore signal stack. */ /* * Linux really does it this way; it doesn't have space in sigframe * to save the onstack flag. */ mutex_enter(p->p_lock); ss_gap = (ssize_t)((char *)scp->sc_esp_at_signal - (char *)sas->ss_sp); if (ss_gap >= 0 && ss_gap < sas->ss_size) sas->ss_flags |= SS_ONSTACK; else sas->ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ linux_old_to_native_sigset(&mask, &scp->sc_mask); (void) sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); DPRINTF(("sigreturn exit esp=0x%x eip=0x%x\n", tf->tf_esp, tf->tf_eip)); return EJUSTRETURN; }
/* ARGSUSED */ int compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct sigcontext *scp, ksc; struct trapframe * const tf = l->l_md.md_utf; struct proc * const p = l->l_proc; struct pcb * const pcb = lwp_getpcb(l); int error; #if !defined(__mips_o32) if (p->p_md.md_abi != _MIPS_BSD_API_O32) return ENOSYS; #endif /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", l->l_proc->p_pid, scp); #endif if ((error = copyin(scp, &ksc, sizeof(ksc))) != 0) return (error); if ((u_int) ksc.sc_regs[_R_ZERO] != 0xacedbadeU)/* magic number */ return (EINVAL); /* Restore the register context. */ tf->tf_regs[_R_PC] = ksc.sc_pc; tf->tf_regs[_R_MULLO] = ksc.mullo; tf->tf_regs[_R_MULHI] = ksc.mulhi; #if defined(__mips_o32) memcpy(&tf->tf_regs[1], &scp->sc_regs[1], sizeof(scp->sc_regs) - sizeof(scp->sc_regs[0])); #else for (size_t i = 1; i < __arraycount(ksc.sc_regs); i++) tf->tf_regs[i] = ksc.sc_regs[i]; #endif #if !defined(NOFPU) if (scp->sc_fpused) { fpu_discard(); } #endif *(struct fpreg *)&pcb->pcb_fpregs = *(struct fpreg *)scp->sc_fpregs; mutex_enter(p->p_lock); /* Restore signal stack. */ if (ksc.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &ksc.sc_mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int compat_13_sys_sigreturn(struct proc *p, void *v, register_t *retval) { struct compat_13_sys_sigreturn_args /* { syscallarg(struct sigcontext13 *) sigcntxp; } */ *uap = v; struct sigcontext13 *scp, context; struct trapframe *tf; sigset_t mask; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ #ifdef __PROG32 if ((context.sc_spsr & PSR_MODE) != PSR_USR32_MODE || (context.sc_spsr & (I32_bit | F32_bit)) != 0) return (EINVAL); #else /* __PROG26 */ if ((context.sc_pc & R15_MODE) != R15_MODE_USR || (context.sc_pc & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) != 0) return EINVAL; #endif /* Restore register context. */ tf = p->p_addr->u_pcb.pcb_tf; tf->tf_r0 = context.sc_r0; tf->tf_r1 = context.sc_r1; tf->tf_r2 = context.sc_r2; tf->tf_r3 = context.sc_r3; tf->tf_r4 = context.sc_r4; tf->tf_r5 = context.sc_r5; tf->tf_r6 = context.sc_r6; tf->tf_r7 = context.sc_r7; tf->tf_r8 = context.sc_r8; tf->tf_r9 = context.sc_r9; tf->tf_r10 = context.sc_r10; tf->tf_r11 = context.sc_r11; tf->tf_r12 = context.sc_r12; tf->tf_usr_sp = context.sc_usr_sp; tf->tf_usr_lr = context.sc_usr_lr; tf->tf_svc_lr = context.sc_svc_lr; tf->tf_pc = context.sc_pc; tf->tf_spsr = context.sc_spsr; /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ native_sigset13_to_sigset(&context.sc_mask, &mask); (void) sigprocmask1(p, SIG_SETMASK, &mask, 0); return (EJUSTRETURN); }
/* ARGSUSED */ int compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct netbsd32_sigcontext sc, *scp; struct trapframe64 *tf; struct proc *p = l->l_proc; /* First ensure consistent stack state (see sendsig). */ write_user_windows(); if (rwindow_save(l)) { #ifdef DEBUG printf("netbsd32_sigreturn14: rwindow_save(%p) failed, sending SIGILL\n", p); Debugger(); #endif mutex_enter(p->p_lock); sigexit(l, SIGILL); } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("netbsd32_sigreturn14: %s[%d], sigcntxp %p\n", p->p_comm, p->p_pid, SCARG(uap, sigcntxp)); if (sigdebug & SDB_DDB) Debugger(); } #endif scp = (struct netbsd32_sigcontext *)(u_long)SCARG(uap, sigcntxp); if ((vaddr_t)scp & 3 || (copyin((void *)scp, &sc, sizeof sc) != 0)) { #ifdef DEBUG printf("netbsd32_sigreturn14: copyin failed: scp=%p\n", scp); Debugger(); #endif return (EINVAL); } scp = ≻ tf = l->l_md.md_tf; /* * Only the icc bits in the psr are used, so it need not be * verified. pc and npc must be multiples of 4. This is all * that is required; if it holds, just do it. */ if (((sc.sc_pc | sc.sc_npc) & 3) != 0 || (sc.sc_pc == 0) || (sc.sc_npc == 0)) #ifdef DEBUG { printf("netbsd32_sigreturn14: pc %p or npc %p invalid\n", sc.sc_pc, sc.sc_npc); Debugger(); return (EINVAL); } #else return (EINVAL); #endif /* take only psr ICC field */ tf->tf_tstate = (int64_t)(tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(sc.sc_psr); tf->tf_pc = (int64_t)sc.sc_pc; tf->tf_npc = (int64_t)sc.sc_npc; tf->tf_global[1] = (int64_t)sc.sc_g1; tf->tf_out[0] = (int64_t)sc.sc_o0; tf->tf_out[6] = (int64_t)sc.sc_sp; #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("netbsd32_sigreturn14: return trapframe pc=%p sp=%p tstate=%llx\n", (vaddr_t)tf->tf_pc, (vaddr_t)tf->tf_out[6], tf->tf_tstate); if (sigdebug & SDB_DDB) Debugger(); } #endif /* Restore signal stack. */ mutex_enter(p->p_lock); if (sc.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &sc.sc_mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext13 *) sigcntxp; } */ struct proc *p = l->l_proc; struct sigcontext13 *scp, context; struct trapframe *tf; sigset_t mask; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((void *)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* Restore register context. */ tf = l->l_md.md_regs; #ifdef VM86 if (context.sc_eflags & PSL_VM) { void syscall_vm86(struct trapframe *); tf->tf_vm86_gs = context.sc_gs; tf->tf_vm86_fs = context.sc_fs; tf->tf_vm86_es = context.sc_es; tf->tf_vm86_ds = context.sc_ds; set_vflags(l, context.sc_eflags); p->p_md.md_syscall = syscall_vm86; } else #endif { /* * Check for security violations. If we're returning to * protected mode, the CPU will validate the segment registers * automatically and generate a trap on violations. We handle * the trap, rather than doing all of the checking here. */ if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || !USERMODE(context.sc_cs, context.sc_eflags)) return (EINVAL); tf->tf_gs = context.sc_gs; tf->tf_fs = context.sc_fs; tf->tf_es = context.sc_es; tf->tf_ds = context.sc_ds; tf->tf_eflags &= ~PSL_USER; tf->tf_eflags |= context.sc_eflags & PSL_USER; } tf->tf_edi = context.sc_edi; tf->tf_esi = context.sc_esi; tf->tf_ebp = context.sc_ebp; tf->tf_ebx = context.sc_ebx; tf->tf_edx = context.sc_edx; tf->tf_ecx = context.sc_ecx; tf->tf_eax = context.sc_eax; tf->tf_eip = context.sc_eip; tf->tf_cs = context.sc_cs; tf->tf_esp = context.sc_esp; tf->tf_ss = context.sc_ss; mutex_enter(p->p_lock); /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ native_sigset13_to_sigset(&context.sc_mask, &mask); (void) sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext13 *) sigcntxp; } */ struct sigcontext13 *scp, context; struct trapframe * const tf = lwp_trapframe(l); struct proc * const p = l->l_proc; sigset_t mask; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((void *)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ if (!VALID_R15_PSR(context.sc_pc, context.sc_spsr)) return EINVAL; /* Restore register context. */ tf->tf_r0 = context.sc_r0; tf->tf_r1 = context.sc_r1; tf->tf_r2 = context.sc_r2; tf->tf_r3 = context.sc_r3; tf->tf_r4 = context.sc_r4; tf->tf_r5 = context.sc_r5; tf->tf_r6 = context.sc_r6; tf->tf_r7 = context.sc_r7; tf->tf_r8 = context.sc_r8; tf->tf_r9 = context.sc_r9; tf->tf_r10 = context.sc_r10; tf->tf_r11 = context.sc_r11; tf->tf_r12 = context.sc_r12; tf->tf_usr_sp = context.sc_usr_sp; tf->tf_usr_lr = context.sc_usr_lr; tf->tf_svc_lr = context.sc_svc_lr; tf->tf_pc = context.sc_pc; tf->tf_spsr = context.sc_spsr; mutex_enter(p->p_lock); /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ native_sigset13_to_sigset(&context.sc_mask, &mask); (void) sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
int compat_16_sys___sigreturn14(struct lwp *l, void *v, register_t *retval) { struct compat_16_sys___sigreturn14_args /* { syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; struct proc *p = l->l_proc; struct sigcontext *scp; struct trapframe *tf; struct sigcontext tsigc; struct sigstate tstate; int rf, flags; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp); #endif if ((int)scp & 3) return (EINVAL); if (copyin(scp, &tsigc, sizeof(tsigc)) != 0) return (EFAULT); scp = &tsigc; /* Make sure the user isn't pulling a fast one on us! */ /* XXX fredette - until this is done, huge security hole here. */ /* XXX fredette - requiring that PSL_R be zero will hurt debuggers. */ #define PSW_MBS (PSW_C|PSW_Q|PSW_P|PSW_D|PSW_I) #define PSW_MBZ (PSW_Y|PSW_Z|PSW_S|PSW_X|PSW_M|PSW_R) if ((scp->sc_ps & (PSW_MBS|PSW_MBZ)) != PSW_MBS) return (EINVAL); /* Restore register context. */ tf = (struct trapframe *)l->l_md.md_regs; /* * Grab pointer to hardware state information. * If zero, the user is probably doing a longjmp. */ if ((rf = scp->sc_ap) == 0) goto restore; /* * See if there is anything to do before we go to the * expense of copying in the trapframe */ flags = fuword((caddr_t)rf); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): sc_ap %x flags %x\n", p->p_pid, rf, flags); #endif /* fuword failed (bogus sc_ap value). */ if (flags == -1) return (EINVAL); if (flags == 0 || copyin((caddr_t)rf, &tstate, sizeof(tstate)) != 0) goto restore; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sigreturn(%d): ssp %p usp %x scp %p\n", p->p_pid, &flags, scp->sc_sp, SCARG(uap, sigcntxp)); #endif /* * Restore most of the users registers except for those * in the sigcontext; they will be handled below. */ if (flags & SS_USERREGS) { /* * There are more registers that the user can tell * us to bash than registers that, for security * or other reasons, we must protect. So it's * easier (but not faster), to copy these sensitive * register values into the user-provided frame, * then bulk-copy the user-provided frame into * the process' frame. */ #define SIG_PROTECT(r) tstate.ss_frame.r = tf->r /* SRs 5,6,7 must be protected. */ SIG_PROTECT(tf_sr5); SIG_PROTECT(tf_sr6); SIG_PROTECT(tf_sr7); /* all CRs except CR11 must be protected. */ SIG_PROTECT(tf_rctr); /* CR0 */ /* CRs 1-8 are reserved */ SIG_PROTECT(tf_pidr1); /* CR8 */ SIG_PROTECT(tf_pidr2); /* CR9 */ SIG_PROTECT(tf_ccr); /* CR10 */ SIG_PROTECT(tf_pidr3); /* CR12 */ SIG_PROTECT(tf_pidr4); /* CR14 */ SIG_PROTECT(tf_eiem); /* CR15 */ /* CR17 is the IISQ head */ /* CR18 is the IIOQ head */ SIG_PROTECT(tf_iir); /* CR19 */ SIG_PROTECT(tf_isr); /* CR20 */ SIG_PROTECT(tf_ior); /* CR21 */ /* CR22 is the IPSW */ SIG_PROTECT(tf_eirr); /* CR23 */ SIG_PROTECT(tf_hptm); /* CR24 */ SIG_PROTECT(tf_vtop); /* CR25 */ /* XXX where are CR26, CR27, CR29, CR31? */ SIG_PROTECT(tf_cr28); /* CR28 */ SIG_PROTECT(tf_cr30); /* CR30 */ #undef SIG_PROTECT /* The bulk copy. */ *tf = tstate.ss_frame; } /* * Restore the original FP context */ /* XXX fredette */ restore: /* * Restore the user supplied information. * This should be at the last so that the error (EINVAL) * is reported to the sigreturn caller, not to the * jump destination. */ tf->tf_sp = scp->sc_sp; /* XXX should we be doing the space registers? */ tf->tf_iisq_head = scp->sc_pcsqh; tf->tf_iioq_head = scp->sc_pcoqh | HPPA_PC_PRIV_USER; tf->tf_iisq_tail = scp->sc_pcsqt; tf->tf_iioq_tail = scp->sc_pcoqt | HPPA_PC_PRIV_USER; tf->tf_ipsw = scp->sc_ps; /* Restore signal stack. */ if (scp->sc_onstack & SS_ONSTACK) p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(p, SIG_SETMASK, &scp->sc_mask, 0); #ifdef DEBUG #if 0 /* XXX FP state */ if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate) printf("sigreturn(%d): copied in FP state (%x) at %p\n", p->p_pid, *(u_int *)&tstate.ss_fpstate, &tstate.ss_fpstate); #endif if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sigreturn(%d): returns\n", p->p_pid); #endif return (EJUSTRETURN); }
static int linux32_restore_sigcontext(struct lwp *l, struct linux32_sigcontext *scp, register_t *retval) { struct trapframe *tf; struct proc *p = l->l_proc; struct sigaltstack *sas = &l->l_sigstk; struct pcb *pcb; sigset_t mask; ssize_t ss_gap; register_t fssel, gssel; /* Restore register context. */ tf = l->l_md.md_regs; pcb = lwp_getpcb(l); DPRINTF(("sigreturn enter rsp=0x%lx rip=0x%lx\n", tf->tf_rsp, tf->tf_rip)); /* * Check for security violations. */ if (((scp->sc_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 || !USERMODE(scp->sc_cs, scp->sc_eflags)) return EINVAL; if (scp->sc_fs != 0 && !VALID_USER_DSEL32(scp->sc_fs) && !(VALID_USER_FSEL32(scp->sc_fs) && pcb->pcb_fs != 0)) return EINVAL; if (scp->sc_gs != 0 && !VALID_USER_DSEL32(scp->sc_gs) && !(VALID_USER_GSEL32(scp->sc_gs) && pcb->pcb_gs != 0)) return EINVAL; if (scp->sc_es != 0 && !VALID_USER_DSEL32(scp->sc_es)) return EINVAL; if (!VALID_USER_DSEL32(scp->sc_ds) || !VALID_USER_DSEL32(scp->sc_ss)) return EINVAL; if (scp->sc_eip >= VM_MAXUSER_ADDRESS32) return EINVAL; gssel = (register_t)scp->sc_gs & 0xffff; fssel = (register_t)scp->sc_fs & 0xffff; cpu_fsgs_reload(l, fssel, gssel); tf->tf_es = (register_t)scp->sc_es & 0xffff; tf->tf_ds = (register_t)scp->sc_ds & 0xffff; tf->tf_rflags &= ~PSL_USER; tf->tf_rflags |= ((register_t)scp->sc_eflags & PSL_USER); tf->tf_rdi = (register_t)scp->sc_edi & 0xffffffff; tf->tf_rsi = (register_t)scp->sc_esi & 0xffffffff; tf->tf_rbp = (register_t)scp->sc_ebp & 0xffffffff; tf->tf_rbx = (register_t)scp->sc_ebx & 0xffffffff; tf->tf_rdx = (register_t)scp->sc_edx & 0xffffffff; tf->tf_rcx = (register_t)scp->sc_ecx & 0xffffffff; tf->tf_rax = (register_t)scp->sc_eax & 0xffffffff; tf->tf_rip = (register_t)scp->sc_eip & 0xffffffff; tf->tf_cs = (register_t)scp->sc_cs & 0xffff; tf->tf_rsp = (register_t)scp->sc_esp_at_signal & 0xffffffff; tf->tf_ss = (register_t)scp->sc_ss & 0xffff; mutex_enter(p->p_lock); /* Restore signal stack. */ ss_gap = (ssize_t) ((char *)NETBSD32IPTR64(scp->sc_esp_at_signal) - (char *)sas->ss_sp); if (ss_gap >= 0 && ss_gap < sas->ss_size) sas->ss_flags |= SS_ONSTACK; else sas->ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ linux32_old_to_native_sigset(&mask, &scp->sc_mask); (void) sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); DPRINTF(("linux32_sigreturn: rip = 0x%lx, rsp = 0x%lx, flags = 0x%lx\n", tf->tf_rip, tf->tf_rsp, tf->tf_rflags)); return EJUSTRETURN; }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext13 *) sigcntxp; } */ struct proc *p = l->l_proc; struct sigcontext13 *scp; struct frame *frame; struct sigcontext13 tsigc; sigset_t mask; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if ((int)scp & 1) return EINVAL; if (copyin(scp, &tsigc, sizeof(tsigc)) != 0) return EFAULT; scp = &tsigc; /* Make sure the user isn't pulling a fast one on us! */ if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0) return EINVAL; /* Restore register context. */ frame = (struct frame *)l->l_md.md_regs; /* * We only support restoring the sigcontext13 in this call. * We are not called from the sigcode (per sendsig()), so * we will not have a sigstate to restore. */ if (scp->sc_ap != 0) return EINVAL; /* * Restore the user supplied information. * This should be at the last so that the error (EINVAL) * is reported to the sigreturn caller, not to the * jump destination. */ frame->f_regs[SP] = scp->sc_sp; frame->f_regs[A6] = scp->sc_fp; frame->f_pc = scp->sc_pc; frame->f_sr = scp->sc_ps; mutex_enter(p->p_lock); /* Restore signal stack. */ if (scp->sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ native_sigset13_to_sigset(&scp->sc_mask, &mask); (void)sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); return EJUSTRETURN; }
/* * The following needs code review for potential security issues */ int linux_sys_sigreturn(struct lwp *l, const struct linux_sys_sigreturn_args *uap, register_t *retval) { /* { syscallarg(struct linux_sigcontext *) scp; } */ struct proc *p = l->l_proc; struct linux_sigcontext *scp, context; struct linux_sigregs sregs; struct linux_pt_regs *lregs; struct trapframe *tf; sigset_t mask; int i; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, scp); /* * Get the context from user stack */ if (copyin(scp, &context, sizeof(*scp))) return (EFAULT); /* * Restore register context. */ if (copyin((void *)context.lregs, &sregs, sizeof(sregs))) return (EFAULT); lregs = (struct linux_pt_regs *)&sregs.lgp_regs; tf = trapframe(l); #ifdef DEBUG_LINUX printf("linux_sys_sigreturn: trapframe=0x%lx scp=0x%lx\n", (unsigned long)tf, (unsigned long)scp); #endif if (!PSL_USEROK_P(lregs->lmsr)) return (EINVAL); for (i = 0; i < 32; i++) tf->tf_fixreg[i] = lregs->lgpr[i]; tf->tf_lr = lregs->llink; tf->tf_cr = lregs->lccr; tf->tf_xer = lregs->lxer; tf->tf_ctr = lregs->lctr; tf->tf_srr0 = lregs->lnip; tf->tf_srr1 = lregs->lmsr; /* * Make sure the fpu state is discarded */ #ifdef PPC_HAVE_FPU fpu_discard(); #endif memcpy(curpcb->pcb_fpu.fpreg, (void *)&sregs.lfp_regs, sizeof(curpcb->pcb_fpu.fpreg)); fpu_mark_used(curlwp); mutex_enter(p->p_lock); /* * Restore signal stack. * * XXX cannot find the onstack information in Linux sig context. * Is signal stack really supported on Linux? */ #if 0 if (sc.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else #endif l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ linux_old_extra_to_native_sigset(&mask, &context.lmask, &context._unused[3]); (void) sigprocmask1(l, SIG_SETMASK, &mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct proc *p = l->l_proc; struct sigcontext *scp; struct frame *frame; struct sigcontext tsigc; struct sigstate tstate; int rf, flags; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp); #endif if ((int)scp & 1) return EINVAL; if (copyin(scp, &tsigc, sizeof(tsigc)) != 0) return EFAULT; scp = &tsigc; /* Make sure the user isn't pulling a fast one on us! */ if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0) return EINVAL; /* Restore register context. */ frame = (struct frame *) l->l_md.md_regs; /* * Grab pointer to hardware state information. * If zero, the user is probably doing a longjmp. */ if ((rf = scp->sc_ap) == 0) goto restore; /* * See if there is anything to do before we go to the * expense of copying in close to 1/2K of data */ flags = fuword((void *)rf); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): sc_ap %x flags %x\n", p->p_pid, rf, flags); #endif /* fuword failed (bogus sc_ap value). */ if (flags == -1) return EINVAL; if (flags == 0 || copyin((void *)rf, &tstate, sizeof(tstate)) != 0) goto restore; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sigreturn(%d): ssp %p usp %x scp %p ft %d\n", p->p_pid, &flags, scp->sc_sp, SCARG(uap, sigcntxp), (flags & SS_RTEFRAME) ? tstate.ss_frame.f_format : -1); #endif /* * Restore long stack frames. Note that we do not copy * back the saved SR or PC, they were picked up above from * the sigcontext structure. */ if (flags & SS_RTEFRAME) { register int sz; /* grab frame type and validate */ sz = tstate.ss_frame.f_format; if (sz > 15 || (sz = exframesize[sz]) < 0 || frame->f_stackadj < sz) return EINVAL; frame->f_stackadj -= sz; frame->f_format = tstate.ss_frame.f_format; frame->f_vector = tstate.ss_frame.f_vector; memcpy(&frame->F_u, &tstate.ss_frame.F_u, sz); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): copy in %d of frame type %d\n", p->p_pid, sz, tstate.ss_frame.f_format); #endif } /* * Restore most of the users registers except for A6 and SP * which will be handled below. */ if (flags & SS_USERREGS) memcpy(frame->f_regs, tstate.ss_frame.f_regs, sizeof(frame->f_regs) - (2 * NBPW)); /* * Restore the original FP context */ if (fputype && (flags & SS_FPSTATE)) m68881_restore(&tstate.ss_fpstate); restore: /* * Restore the user supplied information. * This should be at the last so that the error (EINVAL) * is reported to the sigreturn caller, not to the * jump destination. */ frame->f_regs[SP] = scp->sc_sp; frame->f_regs[A6] = scp->sc_fp; frame->f_pc = scp->sc_pc; frame->f_sr = scp->sc_ps; mutex_enter(p->p_lock); /* Restore signal stack. */ if (scp->sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &scp->sc_mask, 0); mutex_exit(p->p_lock); #ifdef DEBUG if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate) printf("sigreturn(%d): copied in FP state (%x) at %p\n", p->p_pid, *(u_int *)&tstate.ss_fpstate, &tstate.ss_fpstate); if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sigreturn(%d): returns\n", p->p_pid); #endif return EJUSTRETURN; }