int GC_pthread_create(pthread_t *new_thread, const pthread_attr_t *attr_in, void * (*thread_execp)(void *), void *arg) { int result; GC_thread t; pthread_t my_new_thread; pthread_attr_t attr; word my_flags = 0; int flag; void * stack = 0; size_t stack_size = 0; int n; struct sched_param schedparam; (void)pthread_attr_init(&attr); if (attr_in != 0) { (void)pthread_attr_getstacksize(attr_in, &stack_size); (void)pthread_attr_getstackaddr(attr_in, &stack); } LOCK(); if (!GC_is_initialized) { GC_init_inner(); } GC_multithreaded++; if (stack == 0) { if (stack_size == 0) stack_size = 1048576; /* ^-- 1 MB (this was GC_min_stack_sz, but that * violates the pthread_create documentation which * says the default value if none is supplied is * 1MB) */ else stack_size += thr_min_stack(); stack = (void *)GC_stack_alloc(&stack_size); if (stack == 0) { GC_multithreaded--; UNLOCK(); errno = ENOMEM; return -1; } } else { my_flags |= CLIENT_OWNS_STACK; } (void)pthread_attr_setstacksize(&attr, stack_size); (void)pthread_attr_setstackaddr(&attr, stack); if (attr_in != 0) { (void)pthread_attr_getscope(attr_in, &n); (void)pthread_attr_setscope(&attr, n); (void)pthread_attr_getschedparam(attr_in, &schedparam); (void)pthread_attr_setschedparam(&attr, &schedparam); (void)pthread_attr_getschedpolicy(attr_in, &n); (void)pthread_attr_setschedpolicy(&attr, n); (void)pthread_attr_getinheritsched(attr_in, &n); (void)pthread_attr_setinheritsched(&attr, n); (void)pthread_attr_getdetachstate(attr_in, &flag); if (flag == PTHREAD_CREATE_DETACHED) { my_flags |= DETACHED; } (void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); } /* * thr_create can call malloc(), which if redirected will * attempt to acquire the allocation lock. * Unlock here to prevent deadlock. */ #if 0 #ifdef I386 UNLOCK(); #endif #endif result = pthread_create(&my_new_thread, &attr, thread_execp, arg); #if 0 #ifdef I386 LOCK(); #endif #endif if (result == 0) { t = GC_new_thread(my_new_thread); t -> flags = my_flags; if (!(my_flags & DETACHED)) cond_init(&(t->join_cv), USYNC_THREAD, 0); t -> stack = stack; t -> stack_size = stack_size; if (new_thread != 0) *new_thread = my_new_thread; pthread_cond_signal(&GC_create_cv); } else { if (!(my_flags & CLIENT_OWNS_STACK)) { GC_stack_free(stack, stack_size); } GC_multithreaded--; } UNLOCK(); pthread_attr_destroy(&attr); return(result); }
tpool_t * tpool_create(uint_t min_threads, uint_t max_threads, uint_t linger, pthread_attr_t *attr) { tpool_t *tpool; void *stackaddr; size_t stacksize; size_t minstack; int error; if (min_threads > max_threads || max_threads < 1) { errno = EINVAL; return (NULL); } if (attr != NULL) { if (pthread_attr_getstack(attr, &stackaddr, &stacksize) != 0) { errno = EINVAL; return (NULL); } /* * Allow only one thread in the pool with a specified stack. * Require threads to have at least the minimum stack size. */ minstack = thr_min_stack(); if (stackaddr != NULL) { if (stacksize < minstack || max_threads != 1) { errno = EINVAL; return (NULL); } } else if (stacksize != 0 && stacksize < minstack) { errno = EINVAL; return (NULL); } } tpool = lmalloc(sizeof (*tpool)); if (tpool == NULL) { errno = ENOMEM; return (NULL); } (void) mutex_init(&tpool->tp_mutex, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_busycv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_workcv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_waitcv, USYNC_THREAD, NULL); tpool->tp_minimum = min_threads; tpool->tp_maximum = max_threads; tpool->tp_linger = linger; /* * We cannot just copy the attribute pointer. * We need to initialize a new pthread_attr_t structure * with the values from the user-supplied pthread_attr_t. * If the attribute pointer is NULL, we need to initialize * the new pthread_attr_t structure with default values. */ error = _pthread_attr_clone(&tpool->tp_attr, attr); if (error) { lfree(tpool, sizeof (*tpool)); errno = error; return (NULL); } /* make all pool threads be detached daemon threads */ (void) pthread_attr_setdetachstate(&tpool->tp_attr, PTHREAD_CREATE_DETACHED); (void) _pthread_attr_setdaemonstate_np(&tpool->tp_attr, PTHREAD_CREATE_DAEMON_NP); /* insert into the global list of all thread pools */ lmutex_lock(&thread_pool_lock); if (thread_pools == NULL) { tpool->tp_forw = tpool; tpool->tp_back = tpool; thread_pools = tpool; } else { thread_pools->tp_back->tp_forw = tpool; tpool->tp_forw = thread_pools; tpool->tp_back = thread_pools->tp_back; thread_pools->tp_back = tpool; } lmutex_unlock(&thread_pool_lock); return (tpool); }