__LIBC_HIDDEN__ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex) { int mvalue, mtype, tid, shared; if (__unlikely(mutex == NULL)) return EINVAL; mvalue = mutex->value; mtype = (mvalue & MUTEX_TYPE_MASK); shared = (mvalue & MUTEX_SHARED_MASK); /* Handle common case first */ if (__likely(mtype == MUTEX_TYPE_BITS_NORMAL)) { _normal_unlock(mutex, shared); return 0; } /* Do we already own this recursive or error-check mutex ? */ tid = __get_thread()->tid; if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) ) return EPERM; /* If the counter is > 0, we can simply decrement it atomically. * Since other threads can mutate the lower state bits (and only the * lower state bits), use a cmpxchg to do it. */ if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) { for (;;) { int newval = mvalue - MUTEX_COUNTER_BITS_ONE; if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) { /* success: we still own the mutex, so no memory barrier */ return 0; } /* the value changed, so reload and loop */ mvalue = mutex->value; } } /* the counter is 0, so we're going to unlock the mutex by resetting * its value to 'unlocked'. We need to perform a swap in order * to read the current state, which will be 2 if there are waiters * to awake. * * TODO: Change this to __bionic_swap_release when we implement it * to get rid of the explicit memory barrier below. */ ANDROID_MEMBAR_FULL(); /* RELEASE BARRIER */ mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value); /* Wake one waiting thread, if any */ if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) { __futex_wake_ex(&mutex->value, shared, 1); } return 0; }
int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) { #if !defined(__LP64__) // Some apps depend on being able to pass NULL as a mutex and get EINVAL // back. Don't need to worry about it for LP64 since the ABI is brand new, // but keep compatibility for LP32. http://b/19995172. if (mutex_interface == NULL) { return EINVAL; } #endif pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface); uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed); uint16_t mtype = (old_state & MUTEX_TYPE_MASK); uint16_t shared = (old_state & MUTEX_SHARED_MASK); // Handle common case first. if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) { __pthread_normal_mutex_unlock(mutex, shared); return 0; } // Do we already own this recursive or error-check mutex? pid_t tid = __get_thread()->tid; if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) { return EPERM; } // If the counter is > 0, we can simply decrement it atomically. // Since other threads can mutate the lower state bits (and only the // lower state bits), use a compare_exchange loop to do it. if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) { // We still own the mutex, so a release fence is not needed. atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed); return 0; } // The counter is 0, so we'are going to unlock the mutex by resetting its // state to unlocked, we need to perform a atomic_exchange inorder to read // the current state, which will be locked_contended if there may have waiters // to awake. // A release fence is required to make previous stores visible to next // lock owner threads. atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed); const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED; old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release); if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) { __futex_wake_ex(&mutex->state, shared, 1); } return 0; }