static int thr_new_initthr(struct thread *td, void *thunk) { stack_t stack; struct thr_param *param; /* * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ param = thunk; if ((param->child_tid != NULL && suword_lwpid(param->child_tid, td->td_tid)) || (param->parent_tid != NULL && suword_lwpid(param->parent_tid, td->td_tid))) return (EFAULT); /* Set up our machine context. */ stack.ss_sp = param->stack_base; stack.ss_size = param->stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(td, param->start_func, param->arg, &stack); /* Setup user TLS address and TLS pointer register. */ return (cpu_set_user_tls(td, param->tls_base)); }
int sys_thr_self(struct thread *td, struct thr_self_args *uap) /* long *id */ { int error; error = suword_lwpid(uap->id, (unsigned)td->td_tid); if (error == -1) return (EFAULT); return (0); }
static int thr_create_initthr(struct thread *td, void *thunk) { struct thr_create_initthr_args *args; /* Copy out the child tid. */ args = thunk; if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid)) return (EFAULT); return (set_mcontext(td, &args->ctx.uc_mcontext)); }
int sys_thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { /* Signal userland that it can free the stack. */ if ((void *)uap->state != NULL) { suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); } return (kern_thr_exit(td)); }
int sys_thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { #ifdef THRWORKQ if (td->td_reuse_stack != NULL) { thrworkq_reusestack(td->td_proc, td->td_reuse_stack); td->td_reuse_stack = NULL; } #endif if ((void *)uap->state != NULL) { /* Signal userland that it can free the stack. */ suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); } return (kern_thr_exit(td)); }
int sys_thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { struct proc *p; p = td->td_proc; /* Signal userland that it can free the stack. */ if ((void *)uap->state != NULL) { suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); } rw_wlock(&tidhash_lock); PROC_LOCK(p); if (p->p_numthreads != 1) { racct_sub(p, RACCT_NTHR, 1); LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); tdsigcleanup(td); PROC_SLOCK(p); thread_stopped(p); thread_exit(); /* NOTREACHED */ } /* * Ignore attempts to shut down last thread in the proc. This * will actually call _exit(2) in the usermode trampoline when * it returns. */ PROC_UNLOCK(p); rw_wunlock(&tidhash_lock); return (0); }
static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags, struct rtprio *rtp) { stack_t stack; struct thread *newtd; struct proc *p; int error; p = td->td_proc; /* Have race condition but it is cheap. */ if (p->p_numthreads >= max_threads_per_proc) { ++max_threads_hits; return (EPROCLIM); } if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT PROC_LOCK(td->td_proc); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(td->td_proc); if (error != 0) return (EPROCLIM); #endif /* Initialize our td */ newtd = thread_alloc(0); if (newtd == NULL) { error = ENOMEM; goto fail; } cpu_set_upcall(newtd, td); /* * Try the copyout as soon as we allocate the td so we don't * have to tear things down in a failure case below. * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ if ((child_tid != NULL && suword_lwpid(child_tid, newtd->td_tid)) || (parent_tid != NULL && suword_lwpid(parent_tid, newtd->td_tid))) { thread_free(newtd); error = EFAULT; goto fail; } bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_ucred = crhold(td->td_ucred); if (ctx != NULL) { /* old way to set user context */ error = set_mcontext(newtd, ctx); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } else { /* Set up our machine context. */ stack.ss_sp = stack_base; stack.ss_size = stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(newtd, start_func, arg, &stack); /* Setup user TLS address and TLS pointer register. */ error = cpu_set_user_tls(newtd, tls_base); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); goto fail; } } PROC_LOCK(td->td_proc); td->td_proc->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; PROC_UNLOCK(p); tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); #endif return (error); }