/* NOTE: this implementation doesn't support a init function that throws a C++ exception * or calls fork() */ int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) { static_assert(sizeof(atomic_int) == sizeof(pthread_once_t), "pthread_once_t should actually be atomic_int in implementation."); // We prefer casting to atomic_int instead of declaring pthread_once_t to be atomic_int directly. // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx. atomic_int* once_control_ptr = reinterpret_cast<atomic_int*>(once_control); // First check if the once is already initialized. This will be the common // case and we want to make this as fast as possible. Note that this still // requires a load_acquire operation here to ensure that all the // stores performed by the initialization function are observable on // this CPU after we exit. int old_value = atomic_load_explicit(once_control_ptr, memory_order_acquire); while (true) { if (__predict_true(old_value == ONCE_INITIALIZATION_COMPLETE)) { return 0; } // Try to atomically set the initialization underway flag. This requires a compare exchange // in a loop, and we may need to exit prematurely if the initialization is complete. if (!atomic_compare_exchange_weak_explicit(once_control_ptr, &old_value, ONCE_INITIALIZATION_UNDERWAY, memory_order_acquire, memory_order_acquire)) { continue; } if (old_value == ONCE_INITIALIZATION_NOT_YET_STARTED) { // We got here first, we can handle the initialization. (*init_routine)(); // Do a store_release indicating that initialization is complete. atomic_store_explicit(once_control_ptr, ONCE_INITIALIZATION_COMPLETE, memory_order_release); // Wake up any waiters, if any. __futex_wake_ex(once_control_ptr, 0, INT_MAX); return 0; } // The initialization is underway, wait for its finish. __futex_wait_ex(once_control_ptr, 0, old_value, NULL); old_value = atomic_load_explicit(once_control_ptr, memory_order_acquire); } }
int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) { pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface); uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed); uint16_t mtype = (old_state & MUTEX_TYPE_MASK); uint16_t shared = (old_state & MUTEX_SHARED_MASK); // Handle common case first. if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) { __pthread_normal_mutex_unlock(mutex, shared); return 0; } // Do we already own this recursive or error-check mutex? pid_t tid = __get_thread()->tid; if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) { return EPERM; } // If the counter is > 0, we can simply decrement it atomically. // Since other threads can mutate the lower state bits (and only the // lower state bits), use a compare_exchange loop to do it. if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) { // We still own the mutex, so a release fence is not needed. atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed); return 0; } // The counter is 0, so we'are going to unlock the mutex by resetting its // state to unlocked, we need to perform a atomic_exchange inorder to read // the current state, which will be locked_contended if there may have waiters // to awake. // A release fence is required to make previous stores visible to next // lock owner threads. atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed); const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED; old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release); if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) { __futex_wake_ex(&mutex->state, shared, 1); } return 0; }
/* NOTE: this implementation doesn't support a init function that throws a C++ exception * or calls fork() */ int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) ) { volatile pthread_once_t* ocptr = once_control; /* PTHREAD_ONCE_INIT is 0, we use the following bit flags * * bit 0 set -> initialization is under way * bit 1 set -> initialization is complete */ #define ONCE_INITIALIZING (1 << 0) #define ONCE_COMPLETED (1 << 1) /* First check if the once is already initialized. This will be the common * case and we want to make this as fast as possible. Note that this still * requires a load_acquire operation here to ensure that all the * stores performed by the initialization function are observable on * this CPU after we exit. */ if (__likely((*ocptr & ONCE_COMPLETED) != 0)) { ANDROID_MEMBAR_FULL(); return 0; } for (;;) { /* Try to atomically set the INITIALIZING flag. * This requires a cmpxchg loop, and we may need * to exit prematurely if we detect that * COMPLETED is now set. */ int32_t oldval, newval; do { oldval = *ocptr; if ((oldval & ONCE_COMPLETED) != 0) break; newval = oldval | ONCE_INITIALIZING; } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0); if ((oldval & ONCE_COMPLETED) != 0) { /* We detected that COMPLETED was set while in our loop */ ANDROID_MEMBAR_FULL(); return 0; } if ((oldval & ONCE_INITIALIZING) == 0) { /* We got there first, we can jump out of the loop to * handle the initialization */ break; } /* Another thread is running the initialization and hasn't completed * yet, so wait for it, then try again. */ __futex_wait_ex(ocptr, 0, oldval, NULL); } /* call the initialization function. */ (*init_routine)(); /* Do a store_release indicating that initialization is complete */ ANDROID_MEMBAR_FULL(); *ocptr = ONCE_COMPLETED; /* Wake up any waiters, if any */ __futex_wake_ex(ocptr, 0, INT_MAX); return 0; }