int linux_sys_exit(struct lwp *l, const struct linux_sys_exit_args *uap, register_t *retval) { lwp_exit(l); return 0; }
int sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) { lwp_exit(l); return 0; }
/* * Extended exit -- * Death of a lwp or process with optional bells and whistles. * * MPALMOSTSAFE */ int sys_extexit(struct extexit_args *uap) { struct proc *p = curproc; int action, who; int error; action = EXTEXIT_ACTION(uap->how); who = EXTEXIT_WHO(uap->how); /* Check parameters before we might perform some action */ switch (who) { case EXTEXIT_PROC: case EXTEXIT_LWP: break; default: return (EINVAL); } switch (action) { case EXTEXIT_SIMPLE: break; case EXTEXIT_SETINT: error = copyout(&uap->status, uap->addr, sizeof(uap->status)); if (error) return (error); break; default: return (EINVAL); } lwkt_gettoken(&p->p_token); switch (who) { case EXTEXIT_LWP: /* * Be sure only to perform a simple lwp exit if there is at * least one more lwp in the proc, which will call exit1() * later, otherwise the proc will be an UNDEAD and not even a * SZOMB! */ if (p->p_nthreads > 1) { lwp_exit(0); /* called w/ p_token held */ /* NOT REACHED */ } /* else last lwp in proc: do the real thing */ /* FALLTHROUGH */ default: /* to help gcc */ case EXTEXIT_PROC: lwkt_reltoken(&p->p_token); exit1(W_EXITCODE(uap->status, 0)); /* NOTREACHED */ } /* NOTREACHED */ lwkt_reltoken(&p->p_token); /* safety */ }
/* * Exit the calling lwp */ void syslwp_exit() { proc_t *p = ttoproc(curthread); mutex_enter(&p->p_lock); lwp_exit(); /* NOTREACHED */ }
/* * Destroy an inactive kthread. The kthread must be in the LSIDL state. */ void kthread_destroy(lwp_t *l) { KASSERT((l->l_flag & LW_SYSTEM) != 0); KASSERT(l->l_stat == LSIDL); lwp_exit(l); }
static void counter(long num) { if ( num ) { lwp_create((lwpfun)counter,(void*)num-1,STACKSIZE); printf("%ld\n",num); } else { printf("Bye\n"); } lwp_exit(); }
static void indentnum(uintptr_t num) { /* print the number num num times, indented by 5*num spaces * * Not terribly interesting, but it is instructive. * */ int id; id = (int)num; printf("Greetings from Thread %d. Yielding...\n",id); lwp_yield(); printf("I (%d) am still alive. Goodbye.\n",id); lwp_exit(); }
int main(int argc, char *argv[]){ if ( argc == -1 ) { /* can't happen, but the linker doesn't know it. */ /* these are all the required external symbols */ lwp_create(NULL,NULL,0); lwp_exit(); lwp_yield(); lwp_start(); lwp_stop(); lwp_set_scheduler(NULL); } printf("Linked successfully.\n"); exit(0); }
static void indentnum(void *num) { /* print the number num num times, indented by 5*num spaces * Not terribly interesting, but it is instructive. */ int howfar,i; howfar=(int)num; /* interpret num as an integer */ for(i=0;i<howfar;i++){ printf("%*d\n",howfar*5,howfar); lwp_yield(); /* let another have a turn */ } lwp_exit(); /* bail when done. This should * be unnecessary if the stack has * been properly prepared */ }
/* * Release resources. * Enter zombie state. * Wake up parent and init processes, * and dispose of children. */ void exit(int why, int what) { /* * If proc_exit() fails, then some other lwp in the process * got there first. We just have to call lwp_exit() to allow * the other lwp to finish exiting the process. Otherwise we're * restarting init, and should return. */ if (proc_exit(why, what) != 0) { mutex_enter(&curproc->p_lock); ASSERT(curproc->p_flag & SEXITLWPS); lwp_exit(); /* NOTREACHED */ } }
static void indentnum(uintptr_t num) { /* print the number num num times, indented by 5*num spaces * Not terribly interesting, but it is instructive. */ int howfar,i; howfar=(int)num; /* interpret num as an integer */ for(i=0;i<ITERS;i++){ printf("%*d\n",howfar*5,howfar); if ( num == NUMTHREADS && i == ITERS - 3 ) { checktids(); exit(EXIT_SUCCESS); } lwp_yield(); /* let another have a turn */ } lwp_exit(); /* bail when done. This should * be unnecessary if the stack has * been properly prepared */ }
/* * Cause a kernel thread to exit. Assumes the exiting thread is the * current context. */ void kthread_exit(int ecode) { const char *name; lwp_t *l = curlwp; /* We can't do much with the exit code, so just report it. */ if (ecode != 0) { if ((name = l->l_name) == NULL) name = "unnamed"; printf("WARNING: kthread `%s' (%d) exits with status %d\n", name, l->l_lid, ecode); } /* And exit.. */ lwp_exit(l); /* * XXX Fool the compiler. Making exit1() __noreturn__ is a can * XXX of worms right now. */ for (;;) ; }
/* * Handle signals, upcalls, profiling, and other AST's and/or tasks that * must be completed before we can return to or try to return to userland. * * Note that td_sticks is a 64 bit quantity, but there's no point doing 64 * arithmatic on the delta calculation so the absolute tick values are * truncated to an integer. */ static void userret(struct lwp *lp, struct trapframe *frame, int sticks) { struct proc *p = lp->lwp_proc; int sig; /* * Charge system time if profiling. Note: times are in microseconds. * This may do a copyout and block, so do it first even though it * means some system time will be charged as user time. */ if (p->p_flags & P_PROFIL) { addupc_task(p, frame->tf_rip, (u_int)((int)lp->lwp_thread->td_sticks - sticks)); } recheck: /* * If the jungle wants us dead, so be it. */ if (lp->lwp_mpflags & LWP_MP_WEXIT) { lwkt_gettoken(&p->p_token); lwp_exit(0); lwkt_reltoken(&p->p_token); /* NOT REACHED */ } /* * Block here if we are in a stopped state. */ if (p->p_stat == SSTOP || dump_stop_usertds) { lwkt_gettoken(&p->p_token); tstop(); lwkt_reltoken(&p->p_token); goto recheck; } /* * Post any pending upcalls. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the upcall. */ if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) { lwkt_gettoken(&p->p_token); if (p->p_flags & P_SIGVTALRM) { p->p_flags &= ~P_SIGVTALRM; ksignal(p, SIGVTALRM); } if (p->p_flags & P_SIGPROF) { p->p_flags &= ~P_SIGPROF; ksignal(p, SIGPROF); } if (p->p_flags & P_UPCALLPEND) { p->p_flags &= ~P_UPCALLPEND; postupcall(lp); } lwkt_reltoken(&p->p_token); goto recheck; } /* * Post any pending signals. If running a virtual kernel be sure * to restore the virtual kernel's vmspace before posting the signal. * * WARNING! postsig() can exit and not return. */ if ((sig = CURSIG_TRACE(lp)) != 0) { lwkt_gettoken(&p->p_token); postsig(sig); lwkt_reltoken(&p->p_token); goto recheck; } /* * block here if we are swapped out, but still process signals * (such as SIGKILL). proc0 (the swapin scheduler) is already * aware of our situation, we do not have to wake it up. */ if (p->p_flags & P_SWAPPEDOUT) { lwkt_gettoken(&p->p_token); get_mplock(); p->p_flags |= P_SWAPWAIT; swapin_request(); if (p->p_flags & P_SWAPWAIT) tsleep(p, PCATCH, "SWOUT", 0); p->p_flags &= ~P_SWAPWAIT; rel_mplock(); lwkt_reltoken(&p->p_token); goto recheck; } /* * In a multi-threaded program it is possible for a thread to change * signal state during a system call which temporarily changes the * signal mask. In this case postsig() might not be run and we * have to restore the mask ourselves. */ if (lp->lwp_flags & LWP_OLDMASK) { lp->lwp_flags &= ~LWP_OLDMASK; lp->lwp_sigmask = lp->lwp_oldsigmask; goto recheck; } }
/* ARGSUSED */ int sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) { /* { syscallarg(const ucontext_t *) ucp; syscallarg(u_long) flags; syscallarg(lwpid_t *) new_lwp; } */ struct proc *p = l->l_proc; struct lwp *l2; vaddr_t uaddr; bool inmem; ucontext_t *newuc; int error, lid; #ifdef KERN_SA mutex_enter(p->p_lock); if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { mutex_exit(p->p_lock); return EINVAL; } mutex_exit(p->p_lock); #endif newuc = pool_get(&lwp_uc_pool, PR_WAITOK); error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); if (error) { pool_put(&lwp_uc_pool, newuc); return error; } /* XXX check against resource limits */ inmem = uvm_uarea_alloc(&uaddr); if (__predict_false(uaddr == 0)) { pool_put(&lwp_uc_pool, newuc); return ENOMEM; } error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); if (error) { uvm_uarea_free(uaddr, curcpu()); pool_put(&lwp_uc_pool, newuc); return error; } lid = l2->l_lid; error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); if (error) { lwp_exit(l2); pool_put(&lwp_uc_pool, newuc); return error; } /* * Set the new LWP running, unless the caller has requested that * it be created in suspended state. If the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) l2->l_stat = LSSTOP; else { KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); p->p_nrlwps++; l2->l_stat = LSRUN; sched_enqueue(l2, false); } lwp_unlock(l2); } else { l2->l_stat = LSSUSPENDED; lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); } mutex_exit(p->p_lock); return 0; }
/* * Fork a kernel thread. Any process can request this to be done. */ int kthread_create(pri_t pri, int flag, struct cpu_info *ci, void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...) { lwp_t *l; vaddr_t uaddr; bool inmem; int error; va_list ap; int lc; inmem = uvm_uarea_alloc(&uaddr); if (uaddr == 0) return ENOMEM; if ((flag & KTHREAD_TS) != 0) { lc = SCHED_OTHER; } else { lc = SCHED_RR; } error = lwp_create(&lwp0, &proc0, uaddr, inmem, LWP_DETACHED, NULL, 0, func, arg, &l, lc); if (error) { uvm_uarea_free(uaddr, curcpu()); return error; } uvm_lwp_hold(l); if (fmt != NULL) { l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP); if (l->l_name == NULL) { lwp_exit(l); return ENOMEM; } va_start(ap, fmt); vsnprintf(l->l_name, MAXCOMLEN, fmt, ap); va_end(ap); } /* * Set parameters. */ if ((flag & KTHREAD_INTR) != 0) { KASSERT((flag & KTHREAD_MPSAFE) != 0); } if (pri == PRI_NONE) { if ((flag & KTHREAD_TS) != 0) { /* Maximum user priority level. */ pri = MAXPRI_USER; } else { /* Minimum kernel priority level. */ pri = PRI_KTHREAD; } } mutex_enter(proc0.p_lock); lwp_lock(l); l->l_priority = pri; if (ci != NULL) { if (ci != l->l_cpu) { lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); lwp_lock(l); } l->l_pflag |= LP_BOUND; l->l_cpu = ci; } if ((flag & KTHREAD_INTR) != 0) l->l_pflag |= LP_INTR; if ((flag & KTHREAD_MPSAFE) == 0) l->l_pflag &= ~LP_MPSAFE; /* * Set the new LWP running, unless the caller has requested * otherwise. */ if ((flag & KTHREAD_IDLE) == 0) { l->l_stat = LSRUN; sched_enqueue(l, false); lwp_unlock(l); } else lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock); /* * The LWP is not created suspended or stopped and cannot be set * into those states later, so must be considered runnable. */ proc0.p_nrlwps++; mutex_exit(proc0.p_lock); /* All done! */ if (lp != NULL) *lp = l; return (0); }
static int linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval) { /* { syscallarg(int) flags; syscallarg(void *) stack; syscallarg(void *) parent_tidptr; syscallarg(void *) tls; syscallarg(void *) child_tidptr; } */ struct proc *p; struct lwp *l2; struct linux_emuldata *led; void *parent_tidptr, *tls, *child_tidptr; struct schedstate_percpu *spc; vaddr_t uaddr; lwpid_t lid; int flags, tnprocs, error; p = l->l_proc; flags = SCARG(uap, flags); parent_tidptr = SCARG(uap, parent_tidptr); tls = SCARG(uap, tls); child_tidptr = SCARG(uap, child_tidptr); tnprocs = atomic_inc_uint_nv(&nprocs); if (__predict_false(tnprocs >= maxproc) || kauth_authorize_process(l->l_cred, KAUTH_PROCESS_FORK, p, KAUTH_ARG(tnprocs), NULL, NULL) != 0) { atomic_dec_uint(&nprocs); return EAGAIN; } uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { atomic_dec_uint(&nprocs); return ENOMEM; } error = lwp_create(l, p, uaddr, LWP_DETACHED | LWP_PIDLID, SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class); if (__predict_false(error)) { DPRINTF(("%s: lwp_create error=%d\n", __func__, error)); atomic_dec_uint(&nprocs); uvm_uarea_free(uaddr); return error; } lid = l2->l_lid; /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */ if (flags & LINUX_CLONE_CHILD_CLEARTID) { led = l2->l_emuldata; led->led_clear_tid = child_tidptr; } /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */ if (flags & LINUX_CLONE_PARENT_SETTID) { if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_PARENT_SETTID " "failed (parent_tidptr = %p tid = %d error=%d)\n", __func__, parent_tidptr, lid, error); } /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */ if (flags & LINUX_CLONE_CHILD_SETTID) { if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_CHILD_SETTID " "failed (child_tidptr = %p, tid = %d error=%d)\n", __func__, child_tidptr, lid, error); } if (flags & LINUX_CLONE_SETTLS) { error = LINUX_LWP_SETPRIVATE(l2, tls); if (error) { DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n", __func__, error)); lwp_exit(l2); return error; } } /* * Set the new LWP running, unless the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); spc = &l2->l_cpu->ci_schedstate; if ((l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { KASSERT(l2->l_wchan == NULL); l2->l_stat = LSSTOP; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } else { KASSERT(lwp_locked(l2, spc->spc_mutex)); l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } } else { l2->l_stat = LSSUSPENDED; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } mutex_exit(p->p_lock); retval[0] = lid; retval[1] = 0; return 0; }