int sys_swapcontext(struct thread *td, struct swapcontext_args *uap) { ucontext_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(ucontext_t)); get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } } return (ret == 0 ? EJUSTRETURN : ret); }
int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { uint64_t sstatus; ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ sstatus = uc.uc_mcontext.mc_gpregs.gp_sstatus; if ((sstatus & SSTATUS_PS) != 0 || (sstatus & SSTATUS_PIE) == 0) return (EINVAL); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); }
int swapcontext(struct thread *td, struct swapcontext_args *uap) { ucontext_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { get_mcontext(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { SIG_CANTMASK(uc.uc_sigmask); PROC_LOCK(td->td_proc); td->td_sigmask = uc.uc_sigmask; PROC_UNLOCK(td->td_proc); } } } } return (ret == 0 ? EJUSTRETURN : ret); }
/* * Switch to the target context. The current signal mask is saved in ucp * and all signals are blocked. The call to set_mcontext() causes the * specified context to be switched to (usually resuming as a return from * the get_mcontext() procedure). The current context is thrown away. * * The target context being resumed is responsible for restoring the * signal mask appropriate for the target context. */ int _setcontext(ucontext_t *ucp) { int ret; ret = _sigprocmask(SIG_BLOCK, &sigset_block_all, &ucp->uc_sigmask); if (ret == 0) ret = set_mcontext(&ucp->uc_mcontext); return(ret); }
static int thr_create_initthr(struct thread *td, void *thunk) { struct thr_create_initthr_args *args; /* Copy out the child tid. */ args = thunk; if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid)) return (EFAULT); return (set_mcontext(td, &args->ctx.uc_mcontext)); }
/* * Save the calling context in (oucp) then switch to (ucp). * * Block all signals while switching contexts. get_mcontext() returns zero * when retrieving a context. * * When some other thread calls set_mcontext() to resume our thread, * the resume point causes get_mcontext() to return non-zero to us. * Signals will be blocked and we must restore the signal mask before * returning. */ int _swapcontext(ucontext_t *oucp, const ucontext_t *ucp) { int ret; ret = _sigprocmask(SIG_BLOCK, &sigset_block_all, &oucp->uc_sigmask); if (ret == 0) { if (get_mcontext(&oucp->uc_mcontext) == 0) { ret = set_mcontext(&ucp->uc_mcontext); } else { ret = _sigprocmask(SIG_SETMASK, &oucp->uc_sigmask, NULL); } } return(ret); }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc as specified by * context left by sendsig. */ int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); }
int sys_setcontext(struct thread *td, struct setcontext_args *uap) { ucontext_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret == 0 ? EJUSTRETURN : ret); }
static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { mcontext_t mcp64; unsigned i; mcp64.mc_onstack = mcp->mc_onstack; mcp64.mc_pc = mcp->mc_pc; for (i = 0; i < 32; i++) mcp64.mc_regs[i] = mcp->mc_regs[i]; mcp64.sr = mcp->sr; mcp64.mullo = mcp->mullo; mcp64.mulhi = mcp->mulhi; mcp64.mc_fpused = mcp->mc_fpused; for (i = 0; i < 33; i++) mcp64.mc_fpregs[i] = mcp->mc_fpregs[i]; mcp64.mc_fpc_eir = mcp->mc_fpc_eir; mcp64.mc_tls = (void *)(intptr_t)mcp->mc_tls; return (set_mcontext(td, &mcp64)); }
int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; uint32_t spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); spsr = uc.uc_mcontext.mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); set_mcontext(td, &uc.uc_mcontext); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); }
static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags, struct rtprio *rtp) { stack_t stack; struct thread *newtd; struct proc *p; int error; p = td->td_proc; /* Have race condition but it is cheap. */ if (p->p_numthreads >= max_threads_per_proc) { ++max_threads_hits; return (EPROCLIM); } if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT PROC_LOCK(td->td_proc); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(td->td_proc); if (error != 0) return (EPROCLIM); #endif /* Initialize our td */ newtd = thread_alloc(0); if (newtd == NULL) { error = ENOMEM; goto fail; } cpu_set_upcall(newtd, td); /* * Try the copyout as soon as we allocate the td so we don't * have to tear things down in a failure case below. * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ if ((child_tid != NULL && suword_lwpid(child_tid, newtd->td_tid)) || (parent_tid != NULL && suword_lwpid(parent_tid, newtd->td_tid))) { thread_free(newtd); error = EFAULT; goto fail; } bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_ucred = crhold(td->td_ucred); if (ctx != NULL) { /* old way to set user context */ error = set_mcontext(newtd, ctx); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } else { /* Set up our machine context. */ stack.ss_sp = stack_base; stack.ss_size = stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(newtd, start_func, arg, &stack); /* Setup user TLS address and TLS pointer register. */ error = cpu_set_user_tls(newtd, tls_base); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } PROC_LOCK(td->td_proc); td->td_proc->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; PROC_UNLOCK(p); tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); #endif return (error); }