int sys_swapcontext(struct thread *td, struct swapcontext_args *uap) { ucontext_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(ucontext_t)); get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } } return (ret == 0 ? EJUSTRETURN : ret); }
struct getcontext_args { struct __ucontext *ucp; } struct setcontext_args { const struct __ucontext_t *ucp; } struct swapcontext_args { struct __ucontext *oucp; const struct __ucontext_t *ucp; } #endif int sys_getcontext(struct thread *td, struct getcontext_args *uap) { ucontext_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(ucontext_t)); get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC_COPY_SIZE); } return (ret); }
static int get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { mcontext_t mcp64; unsigned i; int error; error = get_mcontext(td, &mcp64, flags); if (error != 0) return (error); mcp->mc_onstack = mcp64.mc_onstack; mcp->mc_pc = mcp64.mc_pc; for (i = 0; i < 32; i++) mcp->mc_regs[i] = mcp64.mc_regs[i]; mcp->sr = mcp64.sr; mcp->mullo = mcp64.mullo; mcp->mulhi = mcp64.mulhi; mcp->mc_fpused = mcp64.mc_fpused; for (i = 0; i < 33; i++) mcp->mc_fpregs[i] = mcp64.mc_fpregs[i]; mcp->mc_fpc_eir = mcp64.mc_fpc_eir; mcp->mc_tls = (int32_t)(intptr_t)mcp64.mc_tls; return (0); }
int swapcontext(struct thread *td, struct swapcontext_args *uap) { ucontext_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { SIG_CANTMASK(uc.uc_sigmask); PROC_LOCK(td->td_proc); td->td_sigmask = uc.uc_sigmask; PROC_UNLOCK(td->td_proc); } } } } return (ret == 0 ? EJUSTRETURN : ret); }
/* Initializes a dcontext with the supplied state and calls dispatch */ void dynamo_start(priv_mcontext_t *mc) { priv_mcontext_t *mcontext; dcontext_t *dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); thread_starting(dcontext); /* Signal other threads for take over. */ dynamorio_take_over_threads(dcontext); /* Set return address */ mc->pc = canonicalize_pc_target(dcontext, mc->pc); dcontext->next_tag = mc->pc; ASSERT(dcontext->next_tag != NULL); /* transfer exec state to mcontext */ mcontext = get_mcontext(dcontext); *mcontext = *mc; /* clear pc */ mcontext->pc = 0; DOLOG(2, LOG_TOP, { byte *cur_esp; GET_STACK_PTR(cur_esp); LOG(THREAD, LOG_TOP, 2, "%s: next_tag="PFX", cur xsp="PFX", mc->xsp="PFX"\n", __FUNCTION__, dcontext->next_tag, cur_esp, mc->xsp); });
/* * Save the calling context in (oucp) then switch to (ucp). * * Block all signals while switching contexts. get_mcontext() returns zero * when retrieving a context. * * When some other thread calls set_mcontext() to resume our thread, * the resume point causes get_mcontext() to return non-zero to us. * Signals will be blocked and we must restore the signal mask before * returning. */ int _swapcontext(ucontext_t *oucp, const ucontext_t *ucp) { int ret; ret = _sigprocmask(SIG_BLOCK, &sigset_block_all, &oucp->uc_sigmask); if (ret == 0) { if (get_mcontext(&oucp->uc_mcontext) == 0) { ret = set_mcontext(&ucp->uc_mcontext); } else { ret = _sigprocmask(SIG_SETMASK, &oucp->uc_sigmask, NULL); } } return(ret); }
/* Initializes a dcontext with the supplied state and calls dispatch */ void dynamo_start(priv_mcontext_t *mc) { priv_mcontext_t *mcontext; dcontext_t *dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); thread_starting(dcontext); /* Signal other threads for take over. */ dynamorio_take_over_threads(dcontext); /* Set return address */ dcontext->next_tag = mc->pc; ASSERT(dcontext->next_tag != NULL); /* transfer exec state to mcontext */ mcontext = get_mcontext(dcontext); *mcontext = *mc; /* clear pc */ mcontext->pc = 0; /* Swap stacks so dispatch is invoked outside the application. * We begin interpretation at the application return point, * and thus we need to look like DR returned -- adjust the app * stack to account for the return address. */ mcontext->xsp += XSP_SZ; call_switch_stack(dcontext, dcontext->dstack, dispatch, false/*not on initstack*/, true/*return on error*/); /* In release builds, this will simply return and continue native * execution. That's better than calling unexpected_return() which * goes into an infinite loop. */ ASSERT_NOT_REACHED(); }
void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; int code, onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }