示例#1
0
/* ...and the new sigaction conversion funcs. */
void
linux_to_native_sigaction(struct sigaction *bsa, const struct linux_sigaction *lsa)
{
	bsa->sa_handler = lsa->linux_sa_handler;
	linux_to_native_sigset(&bsa->sa_mask, &lsa->linux_sa_mask);
	bsa->sa_flags = linux_to_native_sigflags(lsa->linux_sa_flags);
}
示例#2
0
int
linux_sys_rt_sigreturn(struct lwp *l, const struct linux_sys_rt_sigreturn_args *uap, register_t *retval)
{
	/* {
		syscallarg(struct linux_rt_sigframe *) sfp;
	} */
	struct linux_rt_sigframe *sfp, sigframe;
	sigset_t mask;

	/*
	 * The trampoline code hands us the context.
	 * It is unsafe to keep track of it ourselves, in the event that a
	 * program jumps out of a signal handler.
	 */

	sfp = SCARG(uap, sfp);

	if (ALIGN(sfp) != (u_int64_t)sfp)
		return(EINVAL);

	/*
	 * Fetch the frame structure.
	 */
	if (copyin((void *)sfp, &sigframe,
			sizeof(struct linux_rt_sigframe)) != 0)
		return (EFAULT);

	/* Grab the signal mask */
	linux_to_native_sigset(&mask, &sigframe.uc.uc_sigmask);

	return(linux_restore_sigcontext(l, sigframe.uc.uc_mcontext, &mask));
}
示例#3
0
static int
fetchss(const void *u, void *s, size_t len)
{
	int error;
	linux_sigset_t lss;
	
	if ((error = copyin(u, &lss, sizeof(lss))) != 0)
		return error;

	linux_to_native_sigset(s, &lss);
	return 0;
}
示例#4
0
int
linux_sys_rt_sigprocmask(struct lwp *l, const struct linux_sys_rt_sigprocmask_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) how;
		syscallarg(const linux_sigset_t *) set;
		syscallarg(linux_sigset_t *) oset;
		syscallarg(size_t) sigsetsize;
	} */
	linux_sigset_t nlss, olss, *oset;
	const linux_sigset_t *set;
	struct proc *p = l->l_proc;
	sigset_t nbss, obss;
	int error, how;

	if (SCARG(uap, sigsetsize) != sizeof(linux_sigset_t))
		return (EINVAL);

	switch (SCARG(uap, how)) {
	case LINUX_SIG_BLOCK:
		how = SIG_BLOCK;
		break;
	case LINUX_SIG_UNBLOCK:
		how = SIG_UNBLOCK;
		break;
	case LINUX_SIG_SETMASK:
		how = SIG_SETMASK;
		break;
	default:
		return (EINVAL);
	}

	set = SCARG(uap, set);
	oset = SCARG(uap, oset);

	if (set) {
		error = copyin(set, &nlss, sizeof(nlss));
		if (error)
			return (error);
		linux_to_native_sigset(&nbss, &nlss);
	}
	mutex_enter(p->p_lock);
	error = sigprocmask1(l, how,
	    set ? &nbss : NULL, oset ? &obss : NULL);
	mutex_exit(p->p_lock);
	if (!error && oset) {
		native_to_linux_sigset(&olss, &obss);
		error = copyout(&olss, oset, sizeof(olss));
	}
	return (error);
}
示例#5
0
void
linux_old_extra_to_native_sigset(sigset_t *bss, const linux_old_sigset_t *lss, const unsigned long *extra)
{
	linux_sigset_t lsnew;

	/* convert old sigset to new sigset */
	linux_sigemptyset(&lsnew);
	lsnew.sig[0] = *lss;
	if (extra)
		memcpy(&lsnew.sig[1], extra,
		    sizeof(linux_sigset_t) - sizeof(linux_old_sigset_t));

	linux_to_native_sigset(bss, &lsnew);
}
示例#6
0
/*
 * System call to cleanup state after a signal
 * has been taken.  Reset signal mask and
 * stack state from context left by sendsig (above).
 */
int
linux_sys_sigreturn(struct lwp *l, const struct linux_sys_sigreturn_args *uap, register_t *retval)
{
	/* {
		syscallarg(struct linux_sigframe *) sf;
	} */
	struct proc *p = l->l_proc;
	struct linux_sigframe *sf, ksf;
	struct frame *f;
	sigset_t mask;
	int i, error;

#ifdef DEBUG_LINUX
	printf("linux_sys_sigreturn()\n");
#endif /* DEBUG_LINUX */

	/*
	 * The trampoline code hands us the context.
	 * It is unsafe to keep track of it ourselves, in the event that a
	 * program jumps out of a signal handler.
	 */
	sf = SCARG(uap, sf);

	if ((error = copyin(sf, &ksf, sizeof(ksf))) != 0)
		return (error);

	/* Restore the register context. */
	f = (struct frame *)l->l_md.md_regs;
	for (i=0; i<32; i++)
		f->f_regs[i] = ksf.lsf_sc.lsc_regs[i];
	f->f_regs[_R_MULLO] = ksf.lsf_sc.lsc_mdlo;
	f->f_regs[_R_MULHI] = ksf.lsf_sc.lsc_mdhi;
	f->f_regs[_R_PC] = ksf.lsf_sc.lsc_pc;
	f->f_regs[_R_BADVADDR] = ksf.lsf_sc.lsc_badvaddr;
	f->f_regs[_R_CAUSE] = ksf.lsf_sc.lsc_cause;

	mutex_enter(p->p_lock);

	/* Restore signal stack. */
	l->l_sigstk.ss_flags &= ~SS_ONSTACK;

	/* Restore signal mask. */
	linux_to_native_sigset(&mask, (linux_sigset_t *)&ksf.lsf_mask);
	(void)sigprocmask1(l, SIG_SETMASK, &mask, 0);

	mutex_exit(p->p_lock);

	return (EJUSTRETURN);
}
示例#7
0
int
linux_sys_rt_sigsuspend(struct lwp *l, const struct linux_sys_rt_sigsuspend_args *uap, register_t *retval)
{
	/* {
		syscallarg(linux_sigset_t *) unewset;
		syscallarg(size_t) sigsetsize;
	} */
	linux_sigset_t lss;
	sigset_t bss;
	int error;

	if (SCARG(uap, sigsetsize) != sizeof(linux_sigset_t))
		return (EINVAL);

	error = copyin(SCARG(uap, unewset), &lss, sizeof(linux_sigset_t));
	if (error)
		return (error);

	linux_to_native_sigset(&bss, &lss);

	return (sigsuspend1(l, &bss));
}
示例#8
0
int
linux_sys_rt_sigreturn(struct lwp *l, const void *v, register_t *retval)
{
    struct linux_ucontext *luctx;
    struct trapframe *tf = l->l_md.md_regs;
    struct linux_sigcontext *lsigctx;
    struct linux_rt_sigframe frame, *fp;
    ucontext_t uctx;
    mcontext_t *mctx;
    int error;

    fp = (struct linux_rt_sigframe *)(tf->tf_regs[_R_SP]);
    if ((error = copyin(fp, &frame, sizeof(frame))) != 0) {
        mutex_enter(l->l_proc->p_lock);
        sigexit(l, SIGILL);
        return error;
    }

    luctx = &frame.lrs_uc;
    lsigctx = &luctx->luc_mcontext;

    bzero(&uctx, sizeof(uctx));
    mctx = (mcontext_t *)&uctx.uc_mcontext;

    /*
     * Set the flags. Linux always have CPU, stack and signal state,
     * FPU is optional. uc_flags is not used to tell what we have.
     */
    uctx.uc_flags = (_UC_SIGMASK|_UC_CPU|_UC_STACK);
    uctx.uc_link = NULL;

    /*
     * Signal set.
     */
    linux_to_native_sigset(&uctx.uc_sigmask, &luctx->luc_sigmask);

    /*
     * CPU state.
     */
    memcpy(mctx->__gregs, lsigctx->lsc_regs, sizeof(lsigctx->lsc_regs));

    /*
     * And the stack.
     */
    uctx.uc_stack.ss_flags = 0;
    if (luctx->luc_stack.ss_flags & LINUX_SS_ONSTACK)
        uctx.uc_stack.ss_flags |= SS_ONSTACK;

    if (luctx->luc_stack.ss_flags & LINUX_SS_DISABLE)
        uctx.uc_stack.ss_flags |= SS_DISABLE;

    uctx.uc_stack.ss_sp = luctx->luc_stack.ss_sp;
    uctx.uc_stack.ss_size = luctx->luc_stack.ss_size;

    /*
     * And let setucontext deal with that.
     */
    mutex_enter(l->l_proc->p_lock);
    error = setucontext(l, &uctx);
    mutex_exit(l->l_proc->p_lock);
    if (error)
        return error;

    return (EJUSTRETURN);
}
示例#9
0
/*
 * System call to cleanup state after a signal
 * has been taken.  Reset signal mask and
 * stack state from context left by sendsig (above).
 * Return to previous pc and psl as specified by
 * context left by sendsig. Check carefully to
 * make sure that the user has not modified the
 * psl to gain improper privileges or to cause
 * a machine fault.
 *
 * XXX not tested
 */
int
linux_sys_rt_sigreturn(struct lwp *l, const struct linux_sys_rt_sigreturn_args *uap, register_t *retval)
{
	/* {
		syscallarg(struct linux_rt_sigframe *) sfp;
	} */
	struct proc *p = l->l_proc;
	struct linux_rt_sigframe *scp, sigframe;
	struct linux_sigregs sregs;
	struct linux_pt_regs *lregs;
	struct trapframe *tf;
	sigset_t mask;
	int i;

	/*
	 * The trampoline code hands us the context.
	 * It is unsafe to keep track of it ourselves, in the event that a
	 * program jumps out of a signal handler.
	 */
	scp = SCARG(uap, sfp);

	/*
	 * Get the context from user stack
	 */
	if (copyin((void *)scp, &sigframe, sizeof(*scp)))
		return (EFAULT);

	/*
	 *  Restore register context.
	 */
	if (copyin((void *)sigframe.luc.luc_context.lregs,
		   &sregs, sizeof(sregs)))
		return (EFAULT);
	lregs = (struct linux_pt_regs *)&sregs.lgp_regs;

	tf = trapframe(l);
#ifdef DEBUG_LINUX
	printf("linux_sys_rt_sigreturn: trapframe=0x%lx scp=0x%lx\n",
	    (unsigned long)tf, (unsigned long)scp);
#endif

	if (!PSL_USEROK_P(lregs->lmsr))
		return (EINVAL);

	for (i = 0; i < 32; i++)
		tf->tf_fixreg[i] = lregs->lgpr[i];
	tf->tf_lr = lregs->llink;
	tf->tf_cr = lregs->lccr;
	tf->tf_xer = lregs->lxer;
	tf->tf_ctr = lregs->lctr;
	tf->tf_srr0 = lregs->lnip;
	tf->tf_srr1 = lregs->lmsr;

	/*
	 * Make sure the fpu state is discarded
	 */
#ifdef PPC_HAVE_FPU
	fpu_discard();
#endif

	memcpy(curpcb->pcb_fpu.fpreg, (void *)&sregs.lfp_regs,
	       sizeof(curpcb->pcb_fpu.fpreg));

	fpu_mark_used(curlwp);

	mutex_enter(p->p_lock);

	/*
	 * Restore signal stack.
	 *
	 * XXX cannot find the onstack information in Linux sig context.
	 * Is signal stack really supported on Linux?
	 *
	 * It seems to be supported in libc6...
	 */
	/* if (sc.sc_onstack & SS_ONSTACK)
		l->l_sigstk.ss_flags |= SS_ONSTACK;
	else */
		l->l_sigstk.ss_flags &= ~SS_ONSTACK;

	/*
	 * Grab the signal mask
	 */
	linux_to_native_sigset(&mask, &sigframe.luc.luc_sigmask);
	(void) sigprocmask1(l, SIG_SETMASK, &mask, 0);

	mutex_exit(p->p_lock);

	return (EJUSTRETURN);
}
示例#10
0
int
linux_sys_rt_sigreturn(struct lwp *l, const void *v, register_t *retval)
{
	struct linux_ucontext *luctx;
	struct trapframe *tf = l->l_md.md_regs;
	struct linux_sigcontext *lsigctx;
	struct linux__fpstate fpstate;
	struct linux_rt_sigframe frame, *fp;
	ucontext_t uctx;
	mcontext_t *mctx;
	struct fxsave64 *fxarea;
	int error;

	fp = (struct linux_rt_sigframe *)(tf->tf_rsp - 8);
	if ((error = copyin(fp, &frame, sizeof(frame))) != 0) {
		mutex_enter(l->l_proc->p_lock);
		sigexit(l, SIGILL);
		return error;
	}
	luctx = &frame.uc;
	lsigctx = &luctx->luc_mcontext;

	bzero(&uctx, sizeof(uctx));
	mctx = (mcontext_t *)&uctx.uc_mcontext;
	fxarea = (struct fxsave64 *)&mctx->__fpregs;

	/* 
	 * Set the flags. Linux always have CPU, stack and signal state,
	 * FPU is optional. uc_flags is not used to tell what we have.
	 */
	uctx.uc_flags = (_UC_SIGMASK|_UC_CPU|_UC_STACK|_UC_CLRSTACK);
	if (lsigctx->fpstate != NULL)
		uctx.uc_flags |= _UC_FPU;
	uctx.uc_link = NULL;

	/*
	 * Signal set 
	 */
	linux_to_native_sigset(&uctx.uc_sigmask, &luctx->luc_sigmask);

	/*
	 * CPU state
	 */
	mctx->__gregs[_REG_R8] = lsigctx->r8;
	mctx->__gregs[_REG_R9] = lsigctx->r9;
	mctx->__gregs[_REG_R10] = lsigctx->r10;
	mctx->__gregs[_REG_R11] = lsigctx->r11;
	mctx->__gregs[_REG_R12] = lsigctx->r12;
	mctx->__gregs[_REG_R13] = lsigctx->r13;
	mctx->__gregs[_REG_R14] = lsigctx->r14;
	mctx->__gregs[_REG_R15] = lsigctx->r15;
	mctx->__gregs[_REG_RDI] = lsigctx->rdi;
	mctx->__gregs[_REG_RSI] = lsigctx->rsi;
	mctx->__gregs[_REG_RBP] = lsigctx->rbp;
	mctx->__gregs[_REG_RBX] = lsigctx->rbx;
	mctx->__gregs[_REG_RAX] = lsigctx->rax;
	mctx->__gregs[_REG_RDX] = lsigctx->rdx;
	mctx->__gregs[_REG_RCX] = lsigctx->rcx;
	mctx->__gregs[_REG_RIP] = lsigctx->rip;
	mctx->__gregs[_REG_RFLAGS] = lsigctx->eflags;
	mctx->__gregs[_REG_CS] = lsigctx->cs;
	mctx->__gregs[_REG_GS] = lsigctx->gs;
	mctx->__gregs[_REG_FS] = lsigctx->fs;
	mctx->__gregs[_REG_ERR] = lsigctx->err;
	mctx->__gregs[_REG_TRAPNO] = lsigctx->trapno;
	mctx->__gregs[_REG_ES] = tf->tf_es;
	mctx->__gregs[_REG_DS] = tf->tf_ds;
	mctx->__gregs[_REG_RSP] = lsigctx->rsp; /* XXX */
	mctx->__gregs[_REG_SS] = tf->tf_ss;

	/*
	 * FPU state 
	 */
	if (lsigctx->fpstate != NULL) {
		error = copyin(lsigctx->fpstate, &fpstate, sizeof(fpstate));
		if (error != 0) {
			mutex_enter(l->l_proc->p_lock);
			sigexit(l, SIGILL);
			return error;
		}

		fxarea->fx_fcw = fpstate.cwd;
		fxarea->fx_fsw = fpstate.swd;
		fxarea->fx_ftw = fpstate.twd;
		fxarea->fx_fop = fpstate.fop;
		fxarea->fx_rip = fpstate.rip;
		fxarea->fx_rdp = fpstate.rdp;
		fxarea->fx_mxcsr = fpstate.mxcsr;
		fxarea->fx_mxcsr_mask = fpstate.mxcsr_mask;
		memcpy(&fxarea->fx_st, &fpstate.st_space, 
		    sizeof(fxarea->fx_st));
		memcpy(&fxarea->fx_xmm, &fpstate.xmm_space, 
		    sizeof(fxarea->fx_xmm));
	}

	/*
	 * And the stack
	 */
	uctx.uc_stack.ss_flags = 0;
	if (luctx->luc_stack.ss_flags & LINUX_SS_ONSTACK)
		uctx.uc_stack.ss_flags |= SS_ONSTACK;

	if (luctx->luc_stack.ss_flags & LINUX_SS_DISABLE)
		uctx.uc_stack.ss_flags |= SS_DISABLE;

	uctx.uc_stack.ss_sp = luctx->luc_stack.ss_sp;
	uctx.uc_stack.ss_size = luctx->luc_stack.ss_size;

	/*
	 * And let setucontext deal with that.
	 */
	mutex_enter(l->l_proc->p_lock);
	error = setucontext(l, &uctx);
	mutex_exit(l->l_proc->p_lock);
	if (error)
		return error;

	return EJUSTRETURN;
}