void runtime·newosproc(M *mp, G *gp, void *stk, void (*fn)(void)) { UcontextT uc; int32 ret; if(0) { runtime·printf( "newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n", stk, mp, gp, fn, mp->id, mp->tls[0], &mp); } mp->tls[0] = mp->id; // so 386 asm can find it runtime·getcontext(&uc); uc.uc_flags = _UC_SIGMASK | _UC_CPU; uc.uc_link = nil; uc.uc_sigmask = sigset_all; runtime·lwp_mcontext_init(&uc.uc_mcontext, stk, mp, gp, fn); ret = runtime·lwp_create(&uc, 0, &mp->procid); if(ret < 0) { runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount() - 1, -ret); runtime·throw("runtime.newosproc"); } }
int thr_create(void *base, size_t size, void (*func)(), void *args, long flags, int *tid) { int i, stat; for(i=0;(i<MAXTHREAD)&&(thread_table[i].thread_key >=0 );i++); /* lwp_setstkcache(size, 1);*/ stat = lwp_create( &thread_table[i], func, MINPRIO, flags, lwp_newstk(), 1, args); *tid = i; return(stat); }
static void counter(long num) { if ( num ) { lwp_create((lwpfun)counter,(void*)num-1,STACKSIZE); printf("%ld\n",num); } else { printf("Bye\n"); } lwp_exit(); }
int main(int argc, char *argv[]){ long i; /* spawn a number of individual LWPs */ for(i=0;i<1;i++) lwp_create((lwpfun)indentnum,(void*)i,INITIALSTACK); lwp_start(); return 0; }
int do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp) { struct proc *p = l->l_proc; struct lwp *l2; struct schedstate_percpu *spc; vaddr_t uaddr; int error; /* XXX check against resource limits */ uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) return ENOMEM; error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class); if (__predict_false(error)) { uvm_uarea_free(uaddr); return error; } *new_lwp = l2->l_lid; /* * Set the new LWP running, unless the caller has requested that * it be created in suspended state. If the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); spc = &l2->l_cpu->ci_schedstate; if ((flags & LWP_SUSPENDED) == 0 && (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { KASSERT(l2->l_wchan == NULL); l2->l_stat = LSSTOP; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } else { KASSERT(lwp_locked(l2, spc->spc_mutex)); l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } } else { l2->l_stat = LSSUSPENDED; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } mutex_exit(p->p_lock); return 0; }
/* * Initialize LWP by spinning of a schedular */ int ldap_int_thread_initialize( void ) { thread_t tid; stkalign_t *stack; int stackno; if (( stack = get_stack( &stackno )) == NULL ) { return -1; } lwp_create( &tid, lwp_scheduler, MINPRIO, 0, stack, 1, stackno ); return 0; }
int main(int argc, char *argv[]){ if ( argc == -1 ) { /* can't happen, but the linker doesn't know it. */ /* these are all the required external symbols */ lwp_create(NULL,NULL,0); lwp_exit(); lwp_yield(); lwp_start(); lwp_stop(); lwp_set_scheduler(NULL); } printf("Linked successfully.\n"); exit(0); }
int ldap_pvt_thread_create( ldap_pvt_thread_t * thread, int detach, void *(*start_routine)( void *), void *arg) { stkalign_t *stack; int stackno; if ( (stack = ldap_int_thread_get_stack( &stackno )) == NULL ) { return( -1 ); } return( lwp_create( thread, lwp_create_stack, MINPRIO, 0, stack, 3, start_routine, arg, stackno ) ); }
int main(int argc, char *argv[]){ int i,cnt,err; snake s[MAXSNAKES]; err = 0; for (i=1;i<argc;i++) { /* check options */ fprintf(stderr,"%s: unknown option\n",argv[i]); err++; } if ( err ) { fprintf(stderr,"usage: %s [-z]\n",argv[0]); exit(err); } install_handler(SIGINT, (sigfun)kill_snake); /* SIGINT will kill a snake */ install_handler(SIGQUIT,(sigfun)lwp_stop); /* SIGQUIT will stop the system */ start_windowing(); /* start up curses windowing */ /* Initialize Snakes */ cnt = 0; /* snake new_snake(int y, int x, int len, int dir, int color) ;*/ s[cnt++] = new_snake( 8,30,10, E,1);/* each starts a different color */ s[cnt++] = new_snake(10,30,10, E,2); s[cnt++] = new_snake(12,30,10, E,3); s[cnt++] = new_snake( 8,50,10, W,4); s[cnt++] = new_snake(10,50,10, W,5); s[cnt++] = new_snake(12,50,10, W,6); s[cnt++] = new_snake( 4,40,10, S,7); /* Draw each snake */ draw_all_snakes(); /* turn each snake loose as an individual LWP */ for(i=0;i<cnt;i++) { s[i]->lw_pid = lwp_create((lwpfun)run_hungry_snake,(void*)(s+i), INITIALSTACK); } lwp_start(); /* returns when the last lwp exits */ end_windowing(); /* close down curses windowing */ printf("Goodbye.\n"); /* Say goodbye, Gracie */ return err; }
int main(int argc, char *argv[]){ long i; srandom(0); /* so it's repeatable */ lwp_set_scheduler(Random); printf("Launching LWPS\n"); /* spawn a number of individual LWPs */ for(i=1;i<=NUMTHREADS;i++) { lwp_create((lwpfun)indentnum,(void*)i,INITIALSTACK); } lwp_start(); /* returns when the last lwp exits */ printf("Back from LWPS.\n"); return 0; }
int main(int argc, char *argv[]){ long i; printf("Creating LWPS\n"); /* spawn a number of individual LWPs */ for(i=1;i<=5;i++) { lwp_create((lwpfun)indentnum,(void*)i,INITIALSTACK); } printf("Setting the scheduler.\n"); lwp_set_scheduler(AltRoundRobin); printf("Launching LWPS\n"); lwp_start(); /* returns when the last lwp exits */ printf("Back from LWPS.\n"); return 0; }
void mthread_init( context *mainctx ) { int i; for(i=0;i<MAXTHREAD;i++) thread_table[i].thread_key = -1; lwp_self(&thread_table[0]); mutex_init(&mark_lock, USYNC_THREAD, 0); mutex_init(&p_mark_lock, USYNC_THREAD, 0); mutex_init(&alloc_lock, USYNC_THREAD, 0); mutex_init(&free_thread_lock, USYNC_THREAD, 0); mutex_init(&qthread_lock, USYNC_THREAD, 0); mutex_init(&qsort_lock, USYNC_THREAD, 0); sema_init(&free_thread_sem, 0, USYNC_THREAD, 0); rwlock_init(&gc_lock, USYNC_THREAD, 0); pod_setmaxpri(2); lwp_setstkcache(32*1024*4, MAXTHREAD); lwp_create((thread_t *)0, scheduler, 2, 0, lwp_newstk(), 0); lwp_setpri(SELF,MINPRIO); }
/* * System call to create an lwp. * * Notes on the LWP_DETACHED and LWP_DAEMON flags: * * A detached lwp (LWP_DETACHED) cannot be the specific target of * lwp_wait() (it is not joinable), but lwp_wait(0, ...) is required * to sleep until all non-daemon detached lwps have terminated before * returning EDEADLK because a detached lwp might create a non-detached lwp * that could then be returned by lwp_wait(0, ...). See also lwp_detach(). * * A daemon lwp (LWP_DAEMON) is a detached lwp that has the additional * property that it does not affect the termination condition of the * process: The last non-daemon lwp to call lwp_exit() causes the process * to exit and lwp_wait(0, ...) does not sleep waiting for daemon lwps * to terminate. See the block comment before lwp_wait(). */ int syslwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) { klwp_t *lwp; proc_t *p = ttoproc(curthread); kthread_t *t; ucontext_t uc; #ifdef _SYSCALL32_IMPL ucontext32_t uc32; #endif /* _SYSCALL32_IMPL */ k_sigset_t sigmask; int tid; model_t model = get_udatamodel(); uintptr_t thrptr = 0; if (flags & ~(LWP_DAEMON|LWP_DETACHED|LWP_SUSPENDED)) return (set_errno(EINVAL)); /* * lwp_create() is disallowed for the /proc agent lwp. */ if (curthread == p->p_agenttp) return (set_errno(ENOTSUP)); if (model == DATAMODEL_NATIVE) { if (copyin(ucp, &uc, sizeof (ucontext_t))) return (set_errno(EFAULT)); sigutok(&uc.uc_sigmask, &sigmask); #if defined(__i386) /* * libc stashed thrptr into unused kernel %sp. * See setup_context() in libc. */ thrptr = (uint32_t)uc.uc_mcontext.gregs[ESP]; #endif } #ifdef _SYSCALL32_IMPL else { if (copyin(ucp, &uc32, sizeof (ucontext32_t))) return (set_errno(EFAULT)); sigutok(&uc32.uc_sigmask, &sigmask); #if defined(__sparc) ucontext_32ton(&uc32, &uc, NULL, NULL); #else /* __amd64 */ ucontext_32ton(&uc32, &uc); /* * libc stashed thrptr into unused kernel %sp. * See setup_context() in libc. */ thrptr = (uint32_t)uc32.uc_mcontext.gregs[ESP]; #endif } #endif /* _SYSCALL32_IMPL */ (void) save_syscall_args(); /* save args for tracing first */ mutex_enter(&curproc->p_lock); pool_barrier_enter(); mutex_exit(&curproc->p_lock); lwp = lwp_create(lwp_rtt, NULL, NULL, curproc, TS_STOPPED, curthread->t_pri, &sigmask, curthread->t_cid, 0); mutex_enter(&curproc->p_lock); pool_barrier_exit(); mutex_exit(&curproc->p_lock); if (lwp == NULL) return (set_errno(EAGAIN)); lwp_load(lwp, uc.uc_mcontext.gregs, thrptr); t = lwptot(lwp); /* * Copy the new lwp's lwpid into the caller's specified buffer. */ if (new_lwp && copyout(&t->t_tid, new_lwp, sizeof (id_t))) { /* * caller's buffer is not writable, return * EFAULT, and terminate new lwp. */ mutex_enter(&p->p_lock); t->t_proc_flag |= TP_EXITLWP; t->t_sig_check = 1; t->t_sysnum = 0; t->t_proc_flag &= ~TP_HOLDLWP; lwp_create_done(t); mutex_exit(&p->p_lock); return (set_errno(EFAULT)); } /* * clone callers context, if any. must be invoked * while -not- holding p_lock. */ if (curthread->t_ctx) lwp_createctx(curthread, t); /* * copy current contract templates */ lwp_ctmpl_copy(lwp, ttolwp(curthread)); mutex_enter(&p->p_lock); /* * Copy the syscall arguments to the new lwp's arg area * for the benefit of debuggers. */ t->t_sysnum = SYS_lwp_create; lwp->lwp_ap = lwp->lwp_arg; lwp->lwp_arg[0] = (long)ucp; lwp->lwp_arg[1] = (long)flags; lwp->lwp_arg[2] = (long)new_lwp; lwp->lwp_argsaved = 1; if (!(flags & (LWP_DETACHED|LWP_DAEMON))) t->t_proc_flag |= TP_TWAIT; if (flags & LWP_DAEMON) { t->t_proc_flag |= TP_DAEMON; p->p_lwpdaemon++; } tid = (int)t->t_tid; /* for /proc debuggers */ /* * We now set the newly-created lwp running. * If it is being created as LWP_SUSPENDED, we leave its * TP_HOLDLWP flag set so it will stop in system call exit. */ if (!(flags & LWP_SUSPENDED)) t->t_proc_flag &= ~TP_HOLDLWP; lwp_create_done(t); mutex_exit(&p->p_lock); return (tid); }
/* * General fork call. Note that another LWP in the process may call exec() * or exit() while we are forking. It's safe to continue here, because * neither operation will complete until all LWPs have exited the process. */ int fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize, void (*func)(void *), void *arg, register_t *retval, struct proc **rnewprocp) { struct proc *p1, *p2, *parent; struct plimit *p1_lim; uid_t uid; struct lwp *l2; int count; vaddr_t uaddr; int tnprocs; int tracefork; int error = 0; p1 = l1->l_proc; uid = kauth_cred_getuid(l1->l_cred); tnprocs = atomic_inc_uint_nv(&nprocs); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. */ if (__predict_false(tnprocs >= maxproc)) error = -1; else error = kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL); if (error) { static struct timeval lasttfm; atomic_dec_uint(&nprocs); if (ratecheck(&lasttfm, &fork_tfmrate)) tablefull("proc", "increase kern.maxproc or NPROC"); if (forkfsleep) kpause("forkmx", false, forkfsleep, NULL); return EAGAIN; } /* * Enforce limits. */ count = chgproccnt(uid, 1); if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) { if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT, p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); if (forkfsleep) kpause("forkulim", false, forkfsleep, NULL); return EAGAIN; } } /* * Allocate virtual address space for the U-area now, while it * is still easy to abort the fork operation if we're out of * kernel virtual address space. */ uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); return ENOMEM; } /* * We are now committed to the fork. From here on, we may * block on resources, but resource allocation may NOT fail. */ /* Allocate new proc. */ p2 = proc_alloc(); /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ memset(&p2->p_startzero, 0, (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero)); memcpy(&p2->p_startcopy, &p1->p_startcopy, (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy)); TAILQ_INIT(&p2->p_sigpend.sp_info); LIST_INIT(&p2->p_lwps); LIST_INIT(&p2->p_sigwaiters); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * Inherit flags we want to keep. The flags related to SIGCHLD * handling are important in order to keep a consistent behaviour * for the child after the fork. If we are a 32-bit process, the * child will be too. */ p2->p_flag = p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32); p2->p_emul = p1->p_emul; p2->p_execsw = p1->p_execsw; if (flags & FORK_SYSTEM) { /* * Mark it as a system process. Set P_NOCLDWAIT so that * children are reparented to init(8) when they exit. * init(8) can easily wait them out for us. */ p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT); } mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE); rw_init(&p2->p_reflock); cv_init(&p2->p_waitcv, "wait"); cv_init(&p2->p_lwpcv, "lwpwait"); /* * Share a lock between the processes if they are to share signal * state: we must synchronize access to it. */ if (flags & FORK_SHARESIGS) { p2->p_lock = p1->p_lock; mutex_obj_hold(p1->p_lock); } else p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); kauth_proc_fork(p1, p2); p2->p_raslist = NULL; #if defined(__HAVE_RAS) ras_fork(p1, p2); #endif /* bump references to the text vnode (for procfs) */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) vref(p2->p_textvp); if (flags & FORK_SHAREFILES) fd_share(p2); else if (flags & FORK_CLEANFILES) p2->p_fd = fd_init(NULL); else p2->p_fd = fd_copy(); /* XXX racy */ p2->p_mqueue_cnt = p1->p_mqueue_cnt; if (flags & FORK_SHARECWD) cwdshare(p2); else p2->p_cwdi = cwdinit(); /* * Note: p_limit (rlimit stuff) is copy-on-write, so normally * we just need increase pl_refcnt. */ p1_lim = p1->p_limit; if (!p1_lim->pl_writeable) { lim_addref(p1_lim); p2->p_limit = p1_lim; } else { p2->p_limit = lim_copy(p1_lim); } if (flags & FORK_PPWAIT) { /* Mark ourselves as waiting for a child. */ l1->l_pflag |= LP_VFORKWAIT; p2->p_lflag = PL_PPWAIT; p2->p_vforklwp = l1; } else { p2->p_lflag = 0; } p2->p_sflag = 0; p2->p_slflag = 0; parent = (flags & FORK_NOWAIT) ? initproc : p1; p2->p_pptr = parent; p2->p_ppid = parent->p_pid; LIST_INIT(&p2->p_children); p2->p_aio = NULL; #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. * If not inherited, these were zeroed above. */ if (p1->p_traceflag & KTRFAC_INHERIT) { mutex_enter(&ktrace_lock); p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracep = p1->p_tracep) != NULL) ktradref(p2); mutex_exit(&ktrace_lock); } #endif /* * Create signal actions for the child process. */ p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS); mutex_enter(p1->p_lock); p2->p_sflag |= (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP)); sched_proc_fork(p1, p2); mutex_exit(p1->p_lock); p2->p_stflag = p1->p_stflag; /* * p_stats. * Copy parts of p_stats, and zero out the rest. */ p2->p_stats = pstatscopy(p1->p_stats); /* * Set up the new process address space. */ uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false); /* * Finish creating the child process. * It will return through a different path later. */ lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0, stack, stacksize, (func != NULL) ? func : child_return, arg, &l2, l1->l_class); /* * Inherit l_private from the parent. * Note that we cannot use lwp_setprivate() here since that * also sets the CPU TLS register, which is incorrect if the * process has changed that without letting the kernel know. */ l2->l_private = l1->l_private; /* * If emulation has a process fork hook, call it now. */ if (p2->p_emul->e_proc_fork) (*p2->p_emul->e_proc_fork)(p2, l1, flags); /* * ...and finally, any other random fork hooks that subsystems * might have registered. */ doforkhooks(p2, p1); SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0); /* * It's now safe for the scheduler and other processes to see the * child process. */ mutex_enter(proc_lock); if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT) p2->p_lflag |= PL_CONTROLT; LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling); p2->p_exitsig = exitsig; /* signal for parent on exit */ /* * We don't want to tracefork vfork()ed processes because they * will not receive the SIGTRAP until it is too late. */ tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) == (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0; if (tracefork) { p2->p_slflag |= PSL_TRACED; p2->p_opptr = p2->p_pptr; if (p2->p_pptr != p1->p_pptr) { struct proc *parent1 = p2->p_pptr; if (parent1->p_lock < p2->p_lock) { if (!mutex_tryenter(parent1->p_lock)) { mutex_exit(p2->p_lock); mutex_enter(parent1->p_lock); } } else if (parent1->p_lock > p2->p_lock) { mutex_enter(parent1->p_lock); } parent1->p_slflag |= PSL_CHTRACED; proc_reparent(p2, p1->p_pptr); if (parent1->p_lock != p2->p_lock) mutex_exit(parent1->p_lock); } /* * Set ptrace status. */ p1->p_fpid = p2->p_pid; p2->p_fpid = p1->p_pid; } LIST_INSERT_AFTER(p1, p2, p_pglist); LIST_INSERT_HEAD(&allproc, p2, p_list); p2->p_trace_enabled = trace_is_enabled(p2); #ifdef __HAVE_SYSCALL_INTERN (*p2->p_emul->e_syscall_intern)(p2); #endif /* * Update stats now that we know the fork was successful. */ uvmexp.forks++; if (flags & FORK_PPWAIT) uvmexp.forks_ppwait++; if (flags & FORK_SHAREVM) uvmexp.forks_sharevm++; /* * Pass a pointer to the new process to the caller. */ if (rnewprocp != NULL) *rnewprocp = p2; if (ktrpoint(KTR_EMUL)) p2->p_traceflag |= KTRFAC_TRC_EMUL; /* * Notify any interested parties about the new process. */ if (!SLIST_EMPTY(&p1->p_klist)) { mutex_exit(proc_lock); KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); mutex_enter(proc_lock); } /* * Make child runnable, set start time, and add to run queue except * if the parent requested the child to start in SSTOP state. */ mutex_enter(p2->p_lock); /* * Start profiling. */ if ((p2->p_stflag & PST_PROFIL) != 0) { mutex_spin_enter(&p2->p_stmutex); startprofclock(p2); mutex_spin_exit(&p2->p_stmutex); } getmicrotime(&p2->p_stats->p_start); p2->p_acflag = AFORK; lwp_lock(l2); KASSERT(p2->p_nrlwps == 1); if (p2->p_sflag & PS_STOPFORK) { struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate; p2->p_nrlwps = 0; p2->p_stat = SSTOP; p2->p_waited = 0; p1->p_nstopchild++; l2->l_stat = LSSTOP; KASSERT(l2->l_wchan == NULL); lwp_unlock_to(l2, spc->spc_lwplock); } else { p2->p_nrlwps = 1; p2->p_stat = SACTIVE; l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } /* * Return child pid to parent process, * marking us as parent via retval[1]. */ if (retval != NULL) { retval[0] = p2->p_pid; retval[1] = 0; } mutex_exit(p2->p_lock); /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, sleep until it clears LP_VFORKWAIT. */ #if 0 while (l1->l_pflag & LP_VFORKWAIT) { cv_wait(&l1->l_waitcv, proc_lock); } #else while (p2->p_lflag & PL_PPWAIT) cv_wait(&p1->p_waitcv, proc_lock); #endif /* * Let the parent know that we are tracing its child. */ if (tracefork) { ksiginfo_t ksi; KSI_INIT_EMPTY(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_lid = l1->l_lid; kpsignal(p1, &ksi, NULL); } mutex_exit(proc_lock); return 0; }
/* * Fork a kernel thread. Any process can request this to be done. */ int kthread_create(pri_t pri, int flag, struct cpu_info *ci, void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...) { lwp_t *l; vaddr_t uaddr; bool inmem; int error; va_list ap; int lc; inmem = uvm_uarea_alloc(&uaddr); if (uaddr == 0) return ENOMEM; if ((flag & KTHREAD_TS) != 0) { lc = SCHED_OTHER; } else { lc = SCHED_RR; } error = lwp_create(&lwp0, &proc0, uaddr, inmem, LWP_DETACHED, NULL, 0, func, arg, &l, lc); if (error) { uvm_uarea_free(uaddr, curcpu()); return error; } uvm_lwp_hold(l); if (fmt != NULL) { l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP); if (l->l_name == NULL) { lwp_exit(l); return ENOMEM; } va_start(ap, fmt); vsnprintf(l->l_name, MAXCOMLEN, fmt, ap); va_end(ap); } /* * Set parameters. */ if ((flag & KTHREAD_INTR) != 0) { KASSERT((flag & KTHREAD_MPSAFE) != 0); } if (pri == PRI_NONE) { if ((flag & KTHREAD_TS) != 0) { /* Maximum user priority level. */ pri = MAXPRI_USER; } else { /* Minimum kernel priority level. */ pri = PRI_KTHREAD; } } mutex_enter(proc0.p_lock); lwp_lock(l); l->l_priority = pri; if (ci != NULL) { if (ci != l->l_cpu) { lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); lwp_lock(l); } l->l_pflag |= LP_BOUND; l->l_cpu = ci; } if ((flag & KTHREAD_INTR) != 0) l->l_pflag |= LP_INTR; if ((flag & KTHREAD_MPSAFE) == 0) l->l_pflag &= ~LP_MPSAFE; /* * Set the new LWP running, unless the caller has requested * otherwise. */ if ((flag & KTHREAD_IDLE) == 0) { l->l_stat = LSRUN; sched_enqueue(l, false); lwp_unlock(l); } else lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock); /* * The LWP is not created suspended or stopped and cannot be set * into those states later, so must be considered runnable. */ proc0.p_nrlwps++; mutex_exit(proc0.p_lock); /* All done! */ if (lp != NULL) *lp = l; return (0); }
/* ARGSUSED */ int sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) { /* { syscallarg(const ucontext_t *) ucp; syscallarg(u_long) flags; syscallarg(lwpid_t *) new_lwp; } */ struct proc *p = l->l_proc; struct lwp *l2; vaddr_t uaddr; bool inmem; ucontext_t *newuc; int error, lid; #ifdef KERN_SA mutex_enter(p->p_lock); if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { mutex_exit(p->p_lock); return EINVAL; } mutex_exit(p->p_lock); #endif newuc = pool_get(&lwp_uc_pool, PR_WAITOK); error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); if (error) { pool_put(&lwp_uc_pool, newuc); return error; } /* XXX check against resource limits */ inmem = uvm_uarea_alloc(&uaddr); if (__predict_false(uaddr == 0)) { pool_put(&lwp_uc_pool, newuc); return ENOMEM; } error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED, NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); if (error) { uvm_uarea_free(uaddr, curcpu()); pool_put(&lwp_uc_pool, newuc); return error; } lid = l2->l_lid; error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); if (error) { lwp_exit(l2); pool_put(&lwp_uc_pool, newuc); return error; } /* * Set the new LWP running, unless the caller has requested that * it be created in suspended state. If the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) l2->l_stat = LSSTOP; else { KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex)); p->p_nrlwps++; l2->l_stat = LSRUN; sched_enqueue(l2, false); } lwp_unlock(l2); } else { l2->l_stat = LSSUSPENDED; lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock); } mutex_exit(p->p_lock); return 0; }
int main() { lwp_create((lwpfun)counter,(void*)100,STACKSIZE); lwp_start(); exit(0); }
int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct lwp_params create_params; void *stack; sigset_t sigmask, oldsigmask; struct pthread *curthread, *new_thread; int ret = 0, locked; _thr_check_init(); /* * Tell libc and others now they need lock to protect their data. */ if (_thr_isthreaded() == 0 && _thr_setthreaded(1)) return (EAGAIN); curthread = tls_get_curthread(); if ((new_thread = _thr_alloc(curthread)) == NULL) return (EAGAIN); if (attr == NULL || *attr == NULL) { /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; } else { new_thread->attr = *(*attr); } if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* inherit scheduling contention scope */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; /* * scheduling policy and scheduling parameters will be * inherited in following code. */ } if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ new_thread->terminated = 1; _thr_free(curthread, new_thread); return (EAGAIN); } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* * Check if this thread is to inherit the scheduling * attributes from its parent: */ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* * Copy the scheduling attributes. Lock the scheduling * lock to get consistent scheduling parameters. */ THR_LOCK(curthread); new_thread->base_priority = curthread->base_priority; new_thread->attr.prio = curthread->attr.prio; new_thread->attr.sched_policy = curthread->attr.sched_policy; THR_UNLOCK(curthread); } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) new_thread->flags = THR_FLAGS_NEED_SUSPEND; new_thread->state = PS_RUNNING; if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED) new_thread->tlflags |= TLFLAGS_DETACHED; /* Add the new thread. */ new_thread->refcount = 1; _thr_link(curthread, new_thread); /* Return thread pointer eariler so that new thread can use it. */ (*thread) = new_thread; if (SHOULD_REPORT_EVENT(curthread, TD_CREATE)) { THR_THREAD_LOCK(curthread, new_thread); locked = 1; } else locked = 0; /* Schedule the new thread. */ stack = (char *)new_thread->attr.stackaddr_attr + new_thread->attr.stacksize_attr; bzero(&create_params, sizeof(create_params)); create_params.lwp_func = thread_start; create_params.lwp_arg = new_thread; create_params.lwp_stack = stack; create_params.lwp_tid1 = &new_thread->tid; /* * Thread created by thr_create() inherits currrent thread * sigmask, however, before new thread setup itself correctly, * it can not handle signal, so we should mask all signals here. * We do this at the very last moment, so that we don't run * into problems while we have all signals disabled. */ SIGFILLSET(sigmask); __sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask); new_thread->sigmask = oldsigmask; ret = lwp_create(&create_params); __sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL); if (ret != 0) { if (!locked) THR_THREAD_LOCK(curthread, new_thread); new_thread->state = PS_DEAD; new_thread->terminated = 1; if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) { new_thread->cycle++; _thr_umtx_wake(&new_thread->cycle, INT_MAX); } THR_THREAD_UNLOCK(curthread, new_thread); THREAD_LIST_LOCK(curthread); _thread_active_threads--; new_thread->tlflags |= TLFLAGS_DETACHED; _thr_ref_delete_unlocked(curthread, new_thread); THREAD_LIST_UNLOCK(curthread); (*thread) = NULL; ret = EAGAIN; } else if (locked) { _thr_report_creation(curthread, new_thread); THR_THREAD_UNLOCK(curthread, new_thread); } return (ret); }
static int linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval) { /* { syscallarg(int) flags; syscallarg(void *) stack; syscallarg(void *) parent_tidptr; syscallarg(void *) tls; syscallarg(void *) child_tidptr; } */ struct proc *p; struct lwp *l2; struct linux_emuldata *led; void *parent_tidptr, *tls, *child_tidptr; struct schedstate_percpu *spc; vaddr_t uaddr; lwpid_t lid; int flags, tnprocs, error; p = l->l_proc; flags = SCARG(uap, flags); parent_tidptr = SCARG(uap, parent_tidptr); tls = SCARG(uap, tls); child_tidptr = SCARG(uap, child_tidptr); tnprocs = atomic_inc_uint_nv(&nprocs); if (__predict_false(tnprocs >= maxproc) || kauth_authorize_process(l->l_cred, KAUTH_PROCESS_FORK, p, KAUTH_ARG(tnprocs), NULL, NULL) != 0) { atomic_dec_uint(&nprocs); return EAGAIN; } uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { atomic_dec_uint(&nprocs); return ENOMEM; } error = lwp_create(l, p, uaddr, LWP_DETACHED | LWP_PIDLID, SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class); if (__predict_false(error)) { DPRINTF(("%s: lwp_create error=%d\n", __func__, error)); atomic_dec_uint(&nprocs); uvm_uarea_free(uaddr); return error; } lid = l2->l_lid; /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */ if (flags & LINUX_CLONE_CHILD_CLEARTID) { led = l2->l_emuldata; led->led_clear_tid = child_tidptr; } /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */ if (flags & LINUX_CLONE_PARENT_SETTID) { if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_PARENT_SETTID " "failed (parent_tidptr = %p tid = %d error=%d)\n", __func__, parent_tidptr, lid, error); } /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */ if (flags & LINUX_CLONE_CHILD_SETTID) { if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0) printf("%s: LINUX_CLONE_CHILD_SETTID " "failed (child_tidptr = %p, tid = %d error=%d)\n", __func__, child_tidptr, lid, error); } if (flags & LINUX_CLONE_SETTLS) { error = LINUX_LWP_SETPRIVATE(l2, tls); if (error) { DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n", __func__, error)); lwp_exit(l2); return error; } } /* * Set the new LWP running, unless the process is stopping, * then the LWP is created stopped. */ mutex_enter(p->p_lock); lwp_lock(l2); spc = &l2->l_cpu->ci_schedstate; if ((l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { KASSERT(l2->l_wchan == NULL); l2->l_stat = LSSTOP; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } else { KASSERT(lwp_locked(l2, spc->spc_mutex)); l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } } else { l2->l_stat = LSSUSPENDED; p->p_nrlwps--; lwp_unlock_to(l2, spc->spc_lwplock); } mutex_exit(p->p_lock); retval[0] = lid; retval[1] = 0; return 0; }