static void free_thread(struct pthread *curthread, struct pthread *thread) { free_stack(&thread->attr); if ((thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { /* Free the KSE and KSEG. */ _kseg_free(thread->kseg); _kse_free(curthread, thread->kse); } _thr_free(curthread, thread); }
void _thr_gc(struct pthread *curthread) { struct pthread *td, *td_next; TAILQ_HEAD(, pthread) worklist; TAILQ_INIT(&worklist); THREAD_LIST_LOCK(curthread); /* Check the threads waiting for GC. */ for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) { td_next = TAILQ_NEXT(td, gcle); if (td->terminated == 0) { /* make sure we are not still in userland */ continue; } _thr_stack_free(&td->attr); if (((td->tlflags & TLFLAGS_DETACHED) != 0) && (td->refcount == 0)) { THR_GCLIST_REMOVE(td); /* * The thread has detached and is no longer * referenced. It is safe to remove all * remnants of the thread. */ THR_LIST_REMOVE(td); TAILQ_INSERT_HEAD(&worklist, td, gcle); } } THREAD_LIST_UNLOCK(curthread); while ((td = TAILQ_FIRST(&worklist)) != NULL) { TAILQ_REMOVE(&worklist, td, gcle); /* * XXX we don't free initial thread, because there might * have some code referencing initial thread. */ if (td == _thr_initial) { DBG_MSG("Initial thread won't be freed\n"); continue; } _thr_free(curthread, td); } }
int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct pthread *curthread, *new_thread; struct thr_param param; struct sched_param sched_param; struct rtprio rtp; sigset_t set, oset; cpuset_t *cpusetp; int i, cpusetsize, create_suspended, locked, old_stack_prot, ret; cpusetp = NULL; ret = cpusetsize = 0; _thr_check_init(); /* * Tell libc and others now they need lock to protect their data. */ if (_thr_isthreaded() == 0) { _malloc_first_thread(); if (_thr_setthreaded(1)) return (EAGAIN); } curthread = _get_curthread(); if ((new_thread = _thr_alloc(curthread)) == NULL) return (EAGAIN); memset(¶m, 0, sizeof(param)); if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; else { new_thread->attr = *(*attr); cpusetp = new_thread->attr.cpuset; cpusetsize = new_thread->attr.cpusetsize; new_thread->attr.cpuset = NULL; new_thread->attr.cpusetsize = 0; } if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* inherit scheduling contention scope */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; new_thread->attr.prio = curthread->attr.prio; new_thread->attr.sched_policy = curthread->attr.sched_policy; } new_thread->tid = TID_TERMINATED; old_stack_prot = _rtld_get_stack_prot(); if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ _thr_free(curthread, new_thread); return (EAGAIN); } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancel_enable = 1; new_thread->cancel_async = 0; /* Initialize the mutex queue: */ for (i = 0; i < TMQ_NITEMS; i++) TAILQ_INIT(&new_thread->mq[i]); /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) { new_thread->flags = THR_FLAGS_NEED_SUSPEND; create_suspended = 1; } else { create_suspended = 0; } new_thread->state = PS_RUNNING; if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED) new_thread->flags |= THR_FLAGS_DETACHED; /* Add the new thread. */ new_thread->refcount = 1; _thr_link(curthread, new_thread); /* * Handle the race between __pthread_map_stacks_exec and * thread linkage. */ if (old_stack_prot != _rtld_get_stack_prot()) _thr_stack_fix_protection(new_thread); /* Return thread pointer eariler so that new thread can use it. */ (*thread) = new_thread; if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) { THR_THREAD_LOCK(curthread, new_thread); locked = 1; } else locked = 0; param.start_func = (void (*)(void *)) thread_start; param.arg = new_thread; param.stack_base = new_thread->attr.stackaddr_attr; param.stack_size = new_thread->attr.stacksize_attr; param.tls_base = (char *)new_thread->tcb; param.tls_size = sizeof(struct tcb); param.child_tid = &new_thread->tid; param.parent_tid = &new_thread->tid; param.flags = 0; if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) param.flags |= THR_SYSTEM_SCOPE; if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) param.rtp = NULL; else { sched_param.sched_priority = new_thread->attr.prio; _schedparam_to_rtp(new_thread->attr.sched_policy, &sched_param, &rtp); param.rtp = &rtp; } /* Schedule the new thread. */ if (create_suspended) { SIGFILLSET(set); SIGDELSET(set, SIGTRAP); __sys_sigprocmask(SIG_SETMASK, &set, &oset); new_thread->sigmask = oset; SIGDELSET(new_thread->sigmask, SIGCANCEL); } ret = thr_new(¶m, sizeof(param)); if (ret != 0) { ret = errno; /* * Translate EPROCLIM into well-known POSIX code EAGAIN. */ if (ret == EPROCLIM) ret = EAGAIN; } if (create_suspended) __sys_sigprocmask(SIG_SETMASK, &oset, NULL); if (ret != 0) { if (!locked) THR_THREAD_LOCK(curthread, new_thread); new_thread->state = PS_DEAD; new_thread->tid = TID_TERMINATED; new_thread->flags |= THR_FLAGS_DETACHED; new_thread->refcount--; if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) { new_thread->cycle++; _thr_umtx_wake(&new_thread->cycle, INT_MAX, 0); } _thr_try_gc(curthread, new_thread); /* thread lock released */ atomic_add_int(&_thread_active_threads, -1); } else if (locked) { if (cpusetp != NULL) { if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, TID(new_thread), cpusetsize, cpusetp)) { ret = errno; /* kill the new thread */ new_thread->force_exit = 1; new_thread->flags |= THR_FLAGS_DETACHED; _thr_try_gc(curthread, new_thread); /* thread lock released */ goto out; } } _thr_report_creation(curthread, new_thread); THR_THREAD_UNLOCK(curthread, new_thread); } out: if (ret) (*thread) = 0; return (ret); }
int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct lwp_params create_params; void *stack; sigset_t sigmask, oldsigmask; struct pthread *curthread, *new_thread; int ret = 0, locked; _thr_check_init(); /* * Tell libc and others now they need lock to protect their data. */ if (_thr_isthreaded() == 0 && _thr_setthreaded(1)) return (EAGAIN); curthread = tls_get_curthread(); if ((new_thread = _thr_alloc(curthread)) == NULL) return (EAGAIN); if (attr == NULL || *attr == NULL) { /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; } else { new_thread->attr = *(*attr); } if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* inherit scheduling contention scope */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; /* * scheduling policy and scheduling parameters will be * inherited in following code. */ } if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ new_thread->terminated = 1; _thr_free(curthread, new_thread); return (EAGAIN); } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* * Check if this thread is to inherit the scheduling * attributes from its parent: */ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* * Copy the scheduling attributes. Lock the scheduling * lock to get consistent scheduling parameters. */ THR_LOCK(curthread); new_thread->base_priority = curthread->base_priority; new_thread->attr.prio = curthread->attr.prio; new_thread->attr.sched_policy = curthread->attr.sched_policy; THR_UNLOCK(curthread); } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) new_thread->flags = THR_FLAGS_NEED_SUSPEND; new_thread->state = PS_RUNNING; if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED) new_thread->tlflags |= TLFLAGS_DETACHED; /* Add the new thread. */ new_thread->refcount = 1; _thr_link(curthread, new_thread); /* Return thread pointer eariler so that new thread can use it. */ (*thread) = new_thread; if (SHOULD_REPORT_EVENT(curthread, TD_CREATE)) { THR_THREAD_LOCK(curthread, new_thread); locked = 1; } else locked = 0; /* Schedule the new thread. */ stack = (char *)new_thread->attr.stackaddr_attr + new_thread->attr.stacksize_attr; bzero(&create_params, sizeof(create_params)); create_params.lwp_func = thread_start; create_params.lwp_arg = new_thread; create_params.lwp_stack = stack; create_params.lwp_tid1 = &new_thread->tid; /* * Thread created by thr_create() inherits currrent thread * sigmask, however, before new thread setup itself correctly, * it can not handle signal, so we should mask all signals here. * We do this at the very last moment, so that we don't run * into problems while we have all signals disabled. */ SIGFILLSET(sigmask); __sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask); new_thread->sigmask = oldsigmask; ret = lwp_create(&create_params); __sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL); if (ret != 0) { if (!locked) THR_THREAD_LOCK(curthread, new_thread); new_thread->state = PS_DEAD; new_thread->terminated = 1; if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) { new_thread->cycle++; _thr_umtx_wake(&new_thread->cycle, INT_MAX); } THR_THREAD_UNLOCK(curthread, new_thread); THREAD_LIST_LOCK(curthread); _thread_active_threads--; new_thread->tlflags |= TLFLAGS_DETACHED; _thr_ref_delete_unlocked(curthread, new_thread); THREAD_LIST_UNLOCK(curthread); (*thread) = NULL; ret = EAGAIN; } else if (locked) { _thr_report_creation(curthread, new_thread); THR_THREAD_UNLOCK(curthread, new_thread); } return (ret); }
/* * Some notes on new thread creation and first time initializion * to enable multi-threading. * * There are basically two things that need to be done. * * 1) The internal library variables must be initialized. * 2) Upcalls need to be enabled to allow multiple threads * to be run. * * The first may be done as a result of other pthread functions * being called. When _thr_initial is null, _libpthread_init is * called to initialize the internal variables; this also creates * or sets the initial thread. It'd be nice to automatically * have _libpthread_init called on program execution so we don't * have to have checks throughout the library. * * The second part is only triggered by the creation of the first * thread (other than the initial/main thread). If the thread * being created is a scope system thread, then a new KSE/KSEG * pair needs to be allocated. Also, if upcalls haven't been * enabled on the initial thread's KSE, they must be now that * there is more than one thread; this could be delayed until * the initial KSEG has more than one thread. */ int _pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { struct pthread *curthread, *new_thread; struct kse *kse = NULL; struct kse_group *kseg = NULL; kse_critical_t crit; int ret = 0; if (_thr_initial == NULL) _libpthread_init(NULL); /* * Turn on threaded mode, if failed, it is unnecessary to * do further work. */ if (_kse_isthreaded() == 0 && _kse_setthreaded(1)) { return (EAGAIN); } curthread = _get_curthread(); /* * Allocate memory for the thread structure. * Some functions use malloc, so don't put it * in a critical region. */ if ((new_thread = _thr_alloc(curthread)) == NULL) { /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ new_thread->attr = _pthread_attr_default; else { new_thread->attr = *(*attr); if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) { /* inherit scheduling contention scop */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; /* * scheduling policy and scheduling parameters will be * inherited in following code. */ } } if (_thread_scope_system > 0) new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM; else if ((_thread_scope_system < 0) && (thread != &_thr_sig_daemon)) new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM; if (create_stack(&new_thread->attr) != 0) { /* Insufficient memory to create a stack: */ ret = EAGAIN; _thr_free(curthread, new_thread); } else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && (((kse = _kse_alloc(curthread, 1)) == NULL) || ((kseg = _kseg_alloc(curthread)) == NULL))) { /* Insufficient memory to create a new KSE/KSEG: */ ret = EAGAIN; if (kse != NULL) { kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE; _kse_free(curthread, kse); } free_stack(&new_thread->attr); _thr_free(curthread, new_thread); } else { if (kseg != NULL) { /* Add the KSE to the KSEG's list of KSEs. */ TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_kgqe); kseg->kg_ksecount = 1; kse->k_kseg = kseg; kse->k_schedq = &kseg->kg_schedq; } /* * Write a magic value to the thread structure * to help identify valid ones: */ new_thread->magic = THR_MAGIC; new_thread->slice_usec = -1; new_thread->start_routine = start_routine; new_thread->arg = arg; new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; /* No thread is wanting to join to this one: */ new_thread->joiner = NULL; /* * Initialize the machine context. * Enter a critical region to get consistent context. */ crit = _kse_critical_enter(); THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context); /* Initialize the thread for signals: */ new_thread->sigmask = curthread->sigmask; _kse_critical_leave(crit); new_thread->tcb->tcb_tmbx.tm_udata = new_thread; new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask = new_thread->sigmask; new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = new_thread->attr.stacksize_attr; new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = new_thread->attr.stackaddr_attr; makecontext(&new_thread->tcb->tcb_tmbx.tm_context, (void (*)(void))thread_start, 3, new_thread, start_routine, arg); /* * Check if this thread is to inherit the scheduling * attributes from its parent: */ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) { /* * Copy the scheduling attributes. * Lock the scheduling lock to get consistent * scheduling parameters. */ THR_SCHED_LOCK(curthread, curthread); new_thread->base_priority = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.prio = curthread->base_priority & ~THR_SIGNAL_PRIORITY; new_thread->attr.sched_policy = curthread->attr.sched_policy; THR_SCHED_UNLOCK(curthread, curthread); } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ new_thread->base_priority = new_thread->attr.prio; } new_thread->active_priority = new_thread->base_priority; new_thread->inherited_priority = 0; /* Initialize the mutex queue: */ TAILQ_INIT(&new_thread->mutexq); /* Initialise hooks in the thread structure: */ new_thread->specific = NULL; new_thread->specific_data_count = 0; new_thread->cleanup = NULL; new_thread->flags = 0; new_thread->tlflags = 0; new_thread->sigbackout = NULL; new_thread->continuation = NULL; new_thread->wakeup_time.tv_sec = -1; new_thread->lock_switch = 0; sigemptyset(&new_thread->sigpend); new_thread->check_pending = 0; new_thread->locklevel = 0; new_thread->rdlock_count = 0; new_thread->sigstk.ss_sp = 0; new_thread->sigstk.ss_size = 0; new_thread->sigstk.ss_flags = SS_DISABLE; new_thread->oldsigmask = NULL; if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) { new_thread->state = PS_SUSPENDED; new_thread->flags = THR_FLAGS_SUSPENDED; } else new_thread->state = PS_RUNNING; /* * System scope threads have their own kse and * kseg. Process scope threads are all hung * off the main process kseg. */ if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) { new_thread->kseg = _kse_initial->k_kseg; new_thread->kse = _kse_initial; } else { kse->k_curthread = NULL; kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD; new_thread->kse = kse; new_thread->kseg = kse->k_kseg; kse->k_kcb->kcb_kmbx.km_udata = kse; kse->k_kcb->kcb_kmbx.km_curthread = NULL; } /* * Schedule the new thread starting a new KSEG/KSE * pair if necessary. */ ret = _thr_schedule_add(curthread, new_thread); if (ret != 0) free_thread(curthread, new_thread); else { /* Return a pointer to the thread structure: */ (*thread) = new_thread; } } } /* Return the status: */ return (ret); }