int pthread_detach(pthread_t thread_id) { int retval = 0; nc_basic_thread_data_t *basic_data = thread_id; nc_thread_descriptor_t *detached_tdb; /* * TODO(gregoryd) - can be optimized using InterlockedExchange * once it's available. */ pthread_mutex_lock(&__nc_thread_management_lock); detached_tdb = basic_data->tdb; if (NULL == detached_tdb) { /* The thread has already terminated. */ nc_release_basic_data_mu(basic_data); } else { if (!detached_tdb->join_waiting) { if (detached_tdb->joinable) { detached_tdb->joinable = 0; } else { /* Already detached. */ retval = EINVAL; } } else { /* Another thread is already waiting to join - do nothing. */ } } pthread_mutex_unlock(&__nc_thread_management_lock); return retval; }
void pthread_exit(void *retval) { /* Get all we need from the tdb before releasing it. */ nc_thread_descriptor_t *tdb = nc_get_tdb(); nc_thread_memory_block_t *stack_node = tdb->stack_node; int32_t *is_used = &stack_node->is_used; nc_basic_thread_data_t *basic_data = tdb->basic_data; int joinable = tdb->joinable; /* Call cleanup handlers. */ while (NULL != __nc_cleanup_handlers) { pthread_cleanup_pop(1); } /* Call the destruction functions for TSD. */ __nc_tsd_exit(); __newlib_thread_exit(); __nc_futex_thread_exit(); if (__nc_initial_thread_id != basic_data) { pthread_mutex_lock(&__nc_thread_management_lock); --__nc_running_threads_counter; pthread_mutex_unlock(&__nc_thread_management_lock); } else { /* This is the main thread - wait for other threads to complete. */ wait_for_threads(); exit(0); } pthread_mutex_lock(&__nc_thread_management_lock); basic_data->retval = retval; if (joinable) { /* If somebody is waiting for this thread, signal. */ basic_data->status = THREAD_TERMINATED; pthread_cond_signal(&basic_data->join_condvar); } /* * We can release TLS+TDB - thread id and its return value are still * kept in basic_data. */ nc_release_tls_node(tdb->tls_node, tdb); if (!joinable) { nc_release_basic_data_mu(basic_data); } /* Now add the stack to the list but keep it marked as used. */ nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node); if (1 == __nc_running_threads_counter) { pthread_cond_signal(&__nc_last_thread_cond); } pthread_mutex_unlock(&__nc_thread_management_lock); irt_thread.thread_exit(is_used); nc_abort(); }
int pthread_join(pthread_t thread_id, void **thread_return) { int retval = 0; nc_basic_thread_data_t *basic_data = thread_id; if (pthread_self() == thread_id) { return EDEADLK; } pthread_mutex_lock(&__nc_thread_management_lock); if (basic_data->tdb != NULL) { /* The thread is still running. */ nc_thread_descriptor_t *joined_tdb = basic_data->tdb; if (!joined_tdb->joinable || joined_tdb->join_waiting) { /* The thread is detached or another thread is waiting to join. */ retval = EINVAL; goto ret; } joined_tdb->join_waiting = 1; /* Wait till the thread terminates. */ while (THREAD_TERMINATED != basic_data->status) { pthread_cond_wait(&basic_data->join_condvar, &__nc_thread_management_lock); } } ANNOTATE_CONDVAR_LOCK_WAIT(&basic_data->join_condvar, &__nc_thread_management_lock); /* The thread has already terminated. */ /* Save the return value. */ if (thread_return != NULL) { *thread_return = basic_data->retval; } /* Release the resources. */ nc_release_basic_data_mu(basic_data); retval = 0; ret: pthread_mutex_unlock(&__nc_thread_management_lock); return retval; }
int pthread_create(pthread_t *thread_id, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { int retval = EAGAIN; void *esp; /* Declare the variables outside of the while scope. */ nc_thread_memory_block_t *stack_node = NULL; char *thread_stack = NULL; nc_thread_descriptor_t *new_tdb = NULL; nc_basic_thread_data_t *new_basic_data = NULL; nc_thread_memory_block_t *tls_node = NULL; size_t stacksize = PTHREAD_STACK_DEFAULT; void *new_tp; /* TODO(gregoryd) - right now a single lock is used, try to optimize? */ pthread_mutex_lock(&__nc_thread_management_lock); do { /* Allocate the combined TLS + TDB block---see tls.h for explanation. */ tls_node = nc_allocate_memory_block_mu(TLS_AND_TDB_MEMORY, __nacl_tls_combined_size(TDB_SIZE)); if (NULL == tls_node) break; new_tp = __nacl_tls_initialize_memory(nc_memory_block_to_payload(tls_node), TDB_SIZE); new_tdb = (nc_thread_descriptor_t *) ((char *) new_tp + __nacl_tp_tdb_offset(TDB_SIZE)); /* * TODO(gregoryd): consider creating a pool of basic_data structs, * similar to stack and TLS+TDB (probably when adding the support for * variable stack size). */ new_basic_data = malloc(sizeof(*new_basic_data)); if (NULL == new_basic_data) { /* * The tdb should be zero intialized. * This just re-emphasizes this requirement. */ new_tdb->basic_data = NULL; break; } nc_tdb_init(new_tdb, new_basic_data); new_tdb->tls_node = tls_node; /* * All the required members of the tdb must be initialized before * the thread is started and actually before the global lock is released, * since another thread can call pthread_join() or pthread_detach(). */ new_tdb->start_func = start_routine; new_tdb->state = arg; if (attr != NULL) { new_tdb->joinable = attr->joinable; stacksize = attr->stacksize; } /* Allocate the stack for the thread. */ stack_node = nc_allocate_memory_block_mu(THREAD_STACK_MEMORY, stacksize); if (NULL == stack_node) { retval = EAGAIN; break; } thread_stack = align((uint32_t) nc_memory_block_to_payload(stack_node), kStackAlignment); new_tdb->stack_node = stack_node; retval = 0; } while (0); if (0 != retval) { pthread_mutex_unlock(&__nc_thread_management_lock); goto ret; /* error */ } /* * Speculatively increase the thread count. If thread creation * fails, we will decrease it back. This way the thread count will * never be lower than the actual number of threads, but can briefly * be higher than that. */ ++__nc_running_threads_counter; /* * Save the new thread id. This can not be done after the syscall, * because the child thread could have already finished by that * time. If thread creation fails, it will be overriden with -1 * later. */ *thread_id = new_basic_data; pthread_mutex_unlock(&__nc_thread_management_lock); /* * Calculate the top-of-stack location. The very first location is a * zero address of architecture-dependent width, needed to satisfy the * normal ABI alignment requirements for the stack. (On some machines * this is the dummy return address of the thread-start function.) * * Both thread_stack and stacksize are multiples of 16. */ esp = (void *) (thread_stack + stacksize - kStackPadBelowAlign); memset(esp, 0, kStackPadBelowAlign); /* Start the thread. */ retval = irt_thread.thread_create( FUN_TO_VOID_PTR(nc_thread_starter), esp, new_tp); if (0 != retval) { pthread_mutex_lock(&__nc_thread_management_lock); /* TODO(gregoryd) : replace with atomic decrement? */ --__nc_running_threads_counter; pthread_mutex_unlock(&__nc_thread_management_lock); goto ret; } assert(0 == retval); ret: if (0 != retval) { /* Failed to create a thread. */ pthread_mutex_lock(&__nc_thread_management_lock); nc_release_tls_node(tls_node, new_tdb); if (new_basic_data) { nc_release_basic_data_mu(new_basic_data); } if (stack_node) { stack_node->is_used = 0; nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node); } pthread_mutex_unlock(&__nc_thread_management_lock); *thread_id = NACL_PTHREAD_ILLEGAL_THREAD_ID; } return retval; }