static int thr_new_initthr(struct thread *td, void *thunk) { stack_t stack; struct thr_param *param; /* * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ param = thunk; if ((param->child_tid != NULL && suword_lwpid(param->child_tid, td->td_tid)) || (param->parent_tid != NULL && suword_lwpid(param->parent_tid, td->td_tid))) return (EFAULT); /* Set up our machine context. */ stack.ss_sp = param->stack_base; stack.ss_size = param->stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(td, param->start_func, param->arg, &stack); /* Setup user TLS address and TLS pointer register. */ return (cpu_set_user_tls(td, param->tls_base)); }
int cloudabi32_thread_setregs(struct thread *td, const cloudabi32_threadattr_t *attr, uint32_t tcb) { stack_t stack; uint32_t args[3]; void *frameptr; int error; /* Perform standard register initialization. */ stack.ss_sp = TO_PTR(attr->stack); stack.ss_size = attr->stack_size - sizeof(args); cpu_set_upcall(td, TO_PTR(attr->entry_point), NULL, &stack); /* * Copy the arguments for the thread entry point onto the stack * (args[1] and args[2]). Similar to process startup, use the * otherwise unused return address (args[0]) for TLS. */ args[0] = tcb; args[1] = td->td_tid; args[2] = attr->argument; frameptr = (void *)td->td_frame->tf_rsp; error = copyout(args, frameptr, sizeof(args)); if (error != 0) return (error); return (cpu_set_user_tls(td, frameptr)); }
static void cloudabi32_proc_setregs(struct thread *td, struct image_params *imgp, unsigned long stack) { ia32_setregs(td, imgp, stack); (void)cpu_set_user_tls(td, (void *)stack); }
static void cloudabi64_proc_setregs(struct thread *td, struct image_params *imgp, unsigned long stack) { struct trapframe *regs; exec_setregs(td, imgp, stack); /* * The stack now contains a pointer to the TCB and the auxiliary * vector. Let x0 point to the auxiliary vector, and set * tpidr_el0 to the TCB. */ regs = td->td_frame; regs->tf_x[0] = td->td_retval[0] = stack + roundup(sizeof(cloudabi64_tcb_t), sizeof(register_t)); (void)cpu_set_user_tls(td, (void *)stack); }
int cloudabi_sys_thread_exit(struct thread *td, struct cloudabi_sys_thread_exit_args *uap) { struct cloudabi_sys_lock_unlock_args cloudabi_sys_lock_unlock_args = { .lock = uap->lock, .scope = uap->scope, }; /* Wake up joining thread. */ cloudabi_sys_lock_unlock(td, &cloudabi_sys_lock_unlock_args); /* * Attempt to terminate the thread. Terminate the process if * it's the last thread. */ kern_thr_exit(td); exit1(td, 0, 0); /* NOTREACHED */ } int cloudabi_sys_thread_tcb_set(struct thread *td, struct cloudabi_sys_thread_tcb_set_args *uap) { return (cpu_set_user_tls(td, uap->tcb)); } int cloudabi_sys_thread_yield(struct thread *td, struct cloudabi_sys_thread_yield_args *uap) { sched_relinquish(td); return (0); }
int cloudabi64_thread_setregs(struct thread *td, const cloudabi64_threadattr_t *attr, uint64_t tcb) { struct trapframe *frame; stack_t stack; /* Perform standard register initialization. */ stack.ss_sp = TO_PTR(attr->stack); stack.ss_size = attr->stack_size; cpu_set_upcall(td, TO_PTR(attr->entry_point), NULL, &stack); /* * Pass in the thread ID of the new thread and the argument * pointer provided by the parent thread in as arguments to the * entry point. */ frame = td->td_frame; frame->tf_x[0] = td->td_tid; frame->tf_x[1] = attr->argument; /* Set up TLS. */ return (cpu_set_user_tls(td, (void *)tcb)); }
static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags, struct rtprio *rtp) { stack_t stack; struct thread *newtd; struct proc *p; int error; p = td->td_proc; /* Have race condition but it is cheap. */ if (p->p_numthreads >= max_threads_per_proc) { ++max_threads_hits; return (EPROCLIM); } if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT PROC_LOCK(td->td_proc); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(td->td_proc); if (error != 0) return (EPROCLIM); #endif /* Initialize our td */ newtd = thread_alloc(0); if (newtd == NULL) { error = ENOMEM; goto fail; } cpu_set_upcall(newtd, td); /* * Try the copyout as soon as we allocate the td so we don't * have to tear things down in a failure case below. * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ if ((child_tid != NULL && suword_lwpid(child_tid, newtd->td_tid)) || (parent_tid != NULL && suword_lwpid(parent_tid, newtd->td_tid))) { thread_free(newtd); error = EFAULT; goto fail; } bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_ucred = crhold(td->td_ucred); if (ctx != NULL) { /* old way to set user context */ error = set_mcontext(newtd, ctx); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } else { /* Set up our machine context. */ stack.ss_sp = stack_base; stack.ss_size = stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(newtd, start_func, arg, &stack); /* Setup user TLS address and TLS pointer register. */ error = cpu_set_user_tls(newtd, tls_base); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } PROC_LOCK(td->td_proc); td->td_proc->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; PROC_UNLOCK(p); tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); #endif return (error); }