int do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp) { struct proc *p = l->l_proc; struct lwp *l2; struct schedstate_percpu *spc; vaddr_t uaddr; int error; /* XXX check against resource limits */ uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) return ENOMEM; error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class); if (__predict_false(error)) { uvm_uarea_free(uaddr); return error; } *new_lwp = l2->l_lid; /* * Set the new LWP running, unless the caller has requested that * it be created in suspended state. If the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); spc = &l2->l_cpu->ci_schedstate; if ((flags & LWP_SUSPENDED) == 0 && (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { KASSERT(l2->l_wchan == NULL); l2->l_stat = LSSTOP; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } else { KASSERT(lwp_locked(l2, spc->spc_mutex)); l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } } else { l2->l_stat = LSSUSPENDED; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } mutex_exit(p->p_lock); return 0; }
/* * Fork a kernel thread. Any process can request this to be done. */ int kthread_create(pri_t pri, int flag, struct cpu_info *ci, void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...) { lwp_t *l; vaddr_t uaddr; bool inmem; int error; va_list ap; int lc; inmem = uvm_uarea_alloc(&uaddr); if (uaddr == 0) return ENOMEM; if ((flag & KTHREAD_TS) != 0) { lc = SCHED_OTHER; } else { lc = SCHED_RR; } error = lwp_create(&lwp0, &proc0, uaddr, inmem, LWP_DETACHED, NULL, 0, func, arg, &l, lc); if (error) { uvm_uarea_free(uaddr, curcpu()); return error; } uvm_lwp_hold(l); if (fmt != NULL) { l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP); if (l->l_name == NULL) { lwp_exit(l); return ENOMEM; } va_start(ap, fmt); vsnprintf(l->l_name, MAXCOMLEN, fmt, ap); va_end(ap); } /* * Set parameters. */ if ((flag & KTHREAD_INTR) != 0) { KASSERT((flag & KTHREAD_MPSAFE) != 0); } if (pri == PRI_NONE) { if ((flag & KTHREAD_TS) != 0) { /* Maximum user priority level. */ pri = MAXPRI_USER; } else { /* Minimum kernel priority level. */ pri = PRI_KTHREAD; } } mutex_enter(proc0.p_lock); lwp_lock(l); l->l_priority = pri; if (ci != NULL) { if (ci != l->l_cpu) { lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); lwp_lock(l); } l->l_pflag |= LP_BOUND; l->l_cpu = ci; } if ((flag & KTHREAD_INTR) != 0) l->l_pflag |= LP_INTR; if ((flag & KTHREAD_MPSAFE) == 0) l->l_pflag &= ~LP_MPSAFE; /* * Set the new LWP running, unless the caller has requested * otherwise. */ if ((flag & KTHREAD_IDLE) == 0) { l->l_stat = LSRUN; sched_enqueue(l, false); lwp_unlock(l); } else lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock); /* * The LWP is not created suspended or stopped and cannot be set * into those states later, so must be considered runnable. */ proc0.p_nrlwps++; mutex_exit(proc0.p_lock); /* All done! */ if (lp != NULL) *lp = l; return (0); }
/* ARGSUSED */ int sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) { /* { syscallarg(const ucontext_t *) ucp; syscallarg(u_long) flags; syscallarg(lwpid_t *) new_lwp; } */ struct proc *p = l->l_proc; struct lwp *l2; vaddr_t uaddr; bool inmem; ucontext_t *newuc; int error, lid; #ifdef KERN_SA mutex_enter(p->p_lock); if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { mutex_exit(p->p_lock); return EINVAL; } mutex_exit(p->p_lock); #endif newuc = pool_get(&lwp_uc_pool, PR_WAITOK); error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); if (error) { pool_put(&lwp_uc_pool, newuc); return error; } /* XXX check against resource limits */ inmem = uvm_uarea_alloc(&uaddr); if (__predict_false(uaddr == 0)) { pool_put(&lwp_uc_pool, newuc); return ENOMEM; } error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); if (error) { uvm_uarea_free(uaddr, curcpu()); pool_put(&lwp_uc_pool, newuc); return error; } lid = l2->l_lid; error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); if (error) { lwp_exit(l2); pool_put(&lwp_uc_pool, newuc); return error; } /* * Set the new LWP running, unless the caller has requested that * it be created in suspended state. If the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) l2->l_stat = LSSTOP; else { KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); p->p_nrlwps++; l2->l_stat = LSRUN; sched_enqueue(l2, false); } lwp_unlock(l2); } else { l2->l_stat = LSSUSPENDED; lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); } mutex_exit(p->p_lock); return 0; }
static int linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval) { /* { syscallarg(int) flags; syscallarg(void *) stack; syscallarg(void *) parent_tidptr; syscallarg(void *) tls; syscallarg(void *) child_tidptr; } */ struct proc *p; struct lwp *l2; struct linux_emuldata *led; void *parent_tidptr, *tls, *child_tidptr; struct schedstate_percpu *spc; vaddr_t uaddr; lwpid_t lid; int flags, tnprocs, error; p = l->l_proc; flags = SCARG(uap, flags); parent_tidptr = SCARG(uap, parent_tidptr); tls = SCARG(uap, tls); child_tidptr = SCARG(uap, child_tidptr); tnprocs = atomic_inc_uint_nv(&nprocs); if (__predict_false(tnprocs >= maxproc) || kauth_authorize_process(l->l_cred, KAUTH_PROCESS_FORK, p, KAUTH_ARG(tnprocs), NULL, NULL) != 0) { atomic_dec_uint(&nprocs); return EAGAIN; } uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { atomic_dec_uint(&nprocs); return ENOMEM; } error = lwp_create(l, p, uaddr, LWP_DETACHED | LWP_PIDLID, SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class); if (__predict_false(error)) { DPRINTF(("%s: lwp_create error=%d\n", __func__, error)); atomic_dec_uint(&nprocs); uvm_uarea_free(uaddr); return error; } lid = l2->l_lid; /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */ if (flags & LINUX_CLONE_CHILD_CLEARTID) { led = l2->l_emuldata; led->led_clear_tid = child_tidptr; } /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */ if (flags & LINUX_CLONE_PARENT_SETTID) { if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_PARENT_SETTID " "failed (parent_tidptr = %p tid = %d error=%d)\n", __func__, parent_tidptr, lid, error); } /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */ if (flags & LINUX_CLONE_CHILD_SETTID) { if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_CHILD_SETTID " "failed (child_tidptr = %p, tid = %d error=%d)\n", __func__, child_tidptr, lid, error); } if (flags & LINUX_CLONE_SETTLS) { error = LINUX_LWP_SETPRIVATE(l2, tls); if (error) { DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n", __func__, error)); lwp_exit(l2); return error; } } /* * Set the new LWP running, unless the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); spc = &l2->l_cpu->ci_schedstate; if ((l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { KASSERT(l2->l_wchan == NULL); l2->l_stat = LSSTOP; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } else { KASSERT(lwp_locked(l2, spc->spc_mutex)); l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } } else { l2->l_stat = LSSUSPENDED; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } mutex_exit(p->p_lock); retval[0] = lid; retval[1] = 0; return 0; }