示例#1
0
/*
 * Lock a non-recursive mutex.
 *
 * As noted above, there are three states:
 *   0 (unlocked, no contention)
 *   1 (locked, no contention)
 *   2 (locked, contention)
 *
 * Non-recursive mutexes don't use the thread-id or counter fields, and the
 * "type" value is zero, so the only bits that will be set are the ones in
 * the lock state field.
 */
static __inline__ void
_normal_lock(pthread_mutex_t*  mutex, int shared)
{
    /* convenience shortcuts */
    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    /*
     * The common case is an unlocked mutex, so we begin by trying to
     * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
     * __bionic_cmpxchg() returns 0 if it made the swap successfully.
     * If the result is nonzero, this lock is already held by another thread.
     */
    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
        const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
        /*
         * We want to go to sleep until the mutex is available, which
         * requires promoting it to state 2 (CONTENDED). We need to
         * swap in the new state value and then wait until somebody wakes us up.
         *
         * __bionic_swap() returns the previous value.  We swap 2 in and
         * see if we got zero back; if so, we have acquired the lock.  If
         * not, another thread still holds the lock and we wait again.
         *
         * The second argument to the __futex_wait() call is compared
         * against the current value.  If it doesn't match, __futex_wait()
         * returns immediately (otherwise, it sleeps for a time specified
         * by the third argument; 0 means sleep forever).  This ensures
         * that the mutex is in state 2 when we go to sleep on it, which
         * guarantees a wake-up call.
         */
        while (__bionic_swap(locked_contended, &mutex->value) != unlocked)
            __futex_wait_ex(&mutex->value, shared, locked_contended, 0);
    }
    ANDROID_MEMBAR_FULL();
}
/*
 * Lock a mutex of type NORMAL.
 *
 * As noted above, there are three states:
 *   0 (unlocked, no contention)
 *   1 (locked, no contention)
 *   2 (locked, contention)
 *
 * Non-recursive mutexes don't use the thread-id or counter fields, and the
 * "type" value is zero, so the only bits that will be set are the ones in
 * the lock state field.
 */
static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_internal_t* mutex,
                                                              uint16_t shared,
                                                              bool use_realtime_clock,
                                                              const timespec* abs_timeout_or_null) {
    if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
        return 0;
    }
    int result = check_timespec(abs_timeout_or_null, true);
    if (result != 0) {
        return result;
    }

    ScopedTrace trace("Contending for pthread mutex");

    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // We want to go to sleep until the mutex is available, which requires
    // promoting it to locked_contended. We need to swap in the new state
    // and then wait until somebody wakes us up.
    // An atomic_exchange is used to compete with other threads for the lock.
    // If it returns unlocked, we have acquired the lock, otherwise another
    // thread still holds the lock and we should wait again.
    // If lock is acquired, an acquire fence is needed to make all memory accesses
    // made by other threads visible to the current CPU.
    while (atomic_exchange_explicit(&mutex->state, locked_contended,
                                    memory_order_acquire) != unlocked) {
        if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock,
                            abs_timeout_or_null) == -ETIMEDOUT) {
            return ETIMEDOUT;
        }
    }
    return 0;
}
示例#3
0
int sem_timedwait(sem_t* sem, const timespec* abs_timeout) {
  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);

  // POSIX says we need to try to decrement the semaphore
  // before checking the timeout value. Note that if the
  // value is currently 0, __sem_trydec() does nothing.
  if (__sem_trydec(sem_count_ptr) > 0) {
    return 0;
  }

  // Check it as per POSIX.
  int result = check_timespec(abs_timeout, false);
  if (result != 0) {
    errno = result;
    return -1;
  }

  unsigned int shared = SEM_GET_SHARED(sem_count_ptr);

  while (true) {
    // Try to grab the semaphore. If the value was 0, this will also change it to -1.
    if (__sem_dec(sem_count_ptr) > 0) {
      return 0;
    }

    // Contention detected. Wait for a wakeup event.
    int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, true, abs_timeout);

    // Return in case of timeout or interrupt.
    if (result == -ETIMEDOUT || result == -EINTR) {
      errno = -result;
      return -1;
    }
  }
}
示例#4
0
static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }

  while (true) {
    int result = __pthread_rwlock_tryrdlock(rwlock);
    if (result == 0 || result == EAGAIN) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count++;

    // We rely on the fact that all atomic exchange operations on the same object (here it is
    // rwlock->state) always appear to occur in a single total order. If the pending flag is added
    // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the
    // state is unlocked and will not wait anymore.
    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_reader_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count--;
    if (rwlock->pending_reader_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
示例#5
0
__LIBC_HIDDEN__
int __pthread_cond_timedwait_relative(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* reltime) {
  int old_value = cond->value;

  pthread_mutex_unlock(mutex);
  int status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), old_value, reltime);
  pthread_mutex_lock(mutex);

  if (status == -ETIMEDOUT) {
    return ETIMEDOUT;
  }
  return 0;
}
static inline __always_inline int __recursive_or_errorcheck_mutex_wait(
                                                      pthread_mutex_internal_t* mutex,
                                                      uint16_t shared,
                                                      uint16_t old_state,
                                                      const timespec* rel_timeout) {
// __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid
// field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used.
// But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the
// owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error.

#if defined(__LP64__)
  return __futex_wait_ex(&mutex->state, shared, old_state, rel_timeout);

#else
  // This implementation works only when the layout of pthread_mutex_internal_t matches below expectation.
  // And it is based on the assumption that Android is always in little-endian devices.
  static_assert(offsetof(pthread_mutex_internal_t, state) == 0, "");
  static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, "");

  uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed);
  return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state, rel_timeout);
#endif
}
示例#7
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }
  while (true) {
    int result = __pthread_rwlock_trywrlock(rwlock);
    if (result == 0) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_write_lock(old_state)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count++;

    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_writer_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_write_lock(old_state)) {
      futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count--;
    if (rwlock->pending_writer_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
                                                                  memory_order_relaxed))) {
    return EDEADLK;
  }

  while (true) {
    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__predict_true(old_state == 0)) {
      if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
                                                memory_order_acquire, memory_order_relaxed)) {
        // writer_thread_id is protected by rwlock and can only be modified in rwlock write
        // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
        // is safe enough for it.
        atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
        return 0;
      }
    } else {
      timespec ts;
      timespec* rel_timeout = NULL;

      if (abs_timeout_or_null != NULL) {
        rel_timeout = &ts;
        if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
          return ETIMEDOUT;
        }
      }

      // To avoid losing wake ups, the pending_writers increment should be observed before
      // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
      // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
      // operations in futex_wait.
      atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      atomic_thread_fence(memory_order_seq_cst);

      int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
                                rel_timeout);

      atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      if (ret == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
    }
  }
}
示例#9
0
/* NOTE: this implementation doesn't support a init function that throws a C++ exception
 *       or calls fork()
 */
int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) {
  static_assert(sizeof(atomic_int) == sizeof(pthread_once_t),
                "pthread_once_t should actually be atomic_int in implementation.");

  // We prefer casting to atomic_int instead of declaring pthread_once_t to be atomic_int directly.
  // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
  atomic_int* once_control_ptr = reinterpret_cast<atomic_int*>(once_control);

  // First check if the once is already initialized. This will be the common
  // case and we want to make this as fast as possible. Note that this still
  // requires a load_acquire operation here to ensure that all the
  // stores performed by the initialization function are observable on
  // this CPU after we exit.
  int old_value = atomic_load_explicit(once_control_ptr, memory_order_acquire);

  while (true) {
    if (__predict_true(old_value == ONCE_INITIALIZATION_COMPLETE)) {
      return 0;
    }

    // Try to atomically set the initialization underway flag. This requires a compare exchange
    // in a loop, and we may need to exit prematurely if the initialization is complete.
    if (!atomic_compare_exchange_weak_explicit(once_control_ptr, &old_value,
                                               ONCE_INITIALIZATION_UNDERWAY,
                                               memory_order_acquire, memory_order_acquire)) {
      continue;
    }

    if (old_value == ONCE_INITIALIZATION_NOT_YET_STARTED) {
      // We got here first, we can handle the initialization.
      (*init_routine)();

      // Do a store_release indicating that initialization is complete.
      atomic_store_explicit(once_control_ptr, ONCE_INITIALIZATION_COMPLETE, memory_order_release);

      // Wake up any waiters, if any.
      __futex_wake_ex(once_control_ptr, 0, INT_MAX);
      return 0;
    }

    // The initialization is underway, wait for its finish.
    __futex_wait_ex(once_control_ptr, 0, old_value, NULL);
    old_value = atomic_load_explicit(once_control_ptr, memory_order_acquire);
  }
}
示例#10
0
static int __pthread_cond_timedwait(pthread_cond_internal_t* cond, pthread_mutex_t* mutex,
                                    bool use_realtime_clock, const timespec* abs_timeout_or_null) {
  int result = check_timespec(abs_timeout_or_null, true);
  if (result != 0) {
    return result;
  }

  unsigned int old_state = atomic_load_explicit(&cond->state, memory_order_relaxed);
  pthread_mutex_unlock(mutex);
  int status = __futex_wait_ex(&cond->state, cond->process_shared(), old_state,
                               use_realtime_clock, abs_timeout_or_null);
  pthread_mutex_lock(mutex);

  if (status == -ETIMEDOUT) {
    return ETIMEDOUT;
  }
  return 0;
}
示例#11
0
int sem_wait(sem_t* sem) {
  atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem);
  unsigned int shared = SEM_GET_SHARED(sem_count_ptr);

  while (true) {
    if (__sem_dec(sem_count_ptr) > 0) {
      return 0;
    }

    int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, false, nullptr);
    if (bionic_get_application_target_sdk_version() >= __ANDROID_API_N__) {
      if (result ==-EINTR) {
        errno = EINTR;
        return -1;
      }
    }
  }
}
示例#12
0
static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout, clockid_t clock) {
  timespec ts;

  int mvalue = mutex->value;
  int mtype  = (mvalue & MUTEX_TYPE_MASK);
  int shared = (mvalue & MUTEX_SHARED_MASK);

  // Handle common case first.
  if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
      ANDROID_MEMBAR_FULL();
      return 0;
    }

    // Loop while needed.
    while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
      if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
        return ETIMEDOUT;
      }
      __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
    }
    ANDROID_MEMBAR_FULL();
    return 0;
  }

  // Do we already own this recursive or error-check mutex?
  pid_t tid = __get_thread()->tid;
  if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
    return _recursive_increment(mutex, mvalue, mtype);
  }

  // The following implements the same loop as pthread_mutex_lock_impl
  // but adds checks to ensure that the operation never exceeds the
  // absolute expiration time.
  mtype |= shared;

  // First try a quick lock.
  if (mvalue == mtype) {
    mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
      ANDROID_MEMBAR_FULL();
      return 0;
    }
    mvalue = mutex->value;
  }

  while (true) {
    // If the value is 'unlocked', try to acquire it directly.
    // NOTE: put state to 2 since we know there is contention.
    if (mvalue == mtype) { // Unlocked.
      mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
      if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
        ANDROID_MEMBAR_FULL();
        return 0;
      }
      // The value changed before we could lock it. We need to check
      // the time to avoid livelocks, reload the value, then loop again.
      if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
        return ETIMEDOUT;
      }

      mvalue = mutex->value;
      continue;
    }

    // The value is locked. If 'uncontended', try to switch its state
    // to 'contented' to ensure we get woken up later.
    if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
      int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
      if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
        // This failed because the value changed, reload it.
        mvalue = mutex->value;
      } else {
        // This succeeded, update mvalue.
        mvalue = newval;
      }
    }

    // Check time and update 'ts'.
    if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
      return ETIMEDOUT;
    }

    // Only wait to be woken up if the state is '2', otherwise we'll
    // simply loop right now. This can happen when the second cmpxchg
    // in our loop failed because the mutex was unlocked by another thread.
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
      if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
      mvalue = mutex->value;
    }
  }
  /* NOTREACHED */
}
示例#13
0
__LIBC_HIDDEN__
int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
{
    int mvalue, mtype, tid, shared;

    mvalue = mutex->value;
    mtype = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle normal case first */
    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
        _normal_lock(mutex, shared);
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
        return _recursive_increment(mutex, mvalue, mtype);

    /* Add in shared state to avoid extra 'or' operations below */
    mtype |= shared;

    /* First, if the mutex is unlocked, try to quickly acquire it.
     * In the optimistic case where this works, set the state to 1 to
     * indicate locked with no contention */
    if (mvalue == mtype) {
        int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }
        /* argh, the value changed, reload before entering the loop */
        mvalue = mutex->value;
    }

    for (;;) {
        int newval;

        /* if the mutex is unlocked, its value should be 'mtype' and
         * we try to acquire it by setting its owner and state atomically.
         * NOTE: We put the state to 2 since we _know_ there is contention
         * when we are in this loop. This ensures all waiters will be
         * unlocked.
         */
        if (mvalue == mtype) {
            newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
            /* TODO: Change this to __bionic_cmpxchg_acquire when we
             *        implement it to get rid of the explicit memory
             *        barrier below.
             */
            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
                mvalue = mutex->value;
                continue;
            }
            ANDROID_MEMBAR_FULL();
            return 0;
        }

        /* the mutex is already locked by another thread, if its state is 1
         * we will change it to 2 to indicate contention. */
        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
            newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
                mvalue = mutex->value;
                continue;
            }
            mvalue = newval;
        }

        /* wait until the mutex is unlocked */
        __futex_wait_ex(&mutex->value, shared, mvalue, NULL);

        mvalue = mutex->value;
    }
    /* NOTREACHED */
}
示例#14
0
__LIBC_HIDDEN__
int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
{
    clockid_t        clock = CLOCK_MONOTONIC;
    struct timespec  abstime;
    struct timespec  ts;
    int               mvalue, mtype, tid, shared;

    /* compute absolute expiration time */
    __timespec_to_relative_msec(&abstime, msecs, clock);

    if (__unlikely(mutex == NULL))
        return EINVAL;

    mvalue = mutex->value;
    mtype  = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle common case first */
    if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
    {
        const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
        const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

        /* fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0 */
        if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }

        /* loop while needed */
        while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
                return EBUSY;

            __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
        }
        ANDROID_MEMBAR_FULL();
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
        return _recursive_increment(mutex, mvalue, mtype);

    /* the following implements the same loop than pthread_mutex_lock_impl
     * but adds checks to ensure that the operation never exceeds the
     * absolute expiration time.
     */
    mtype |= shared;

    /* first try a quick lock */
    if (mvalue == mtype) {
        mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }
        mvalue = mutex->value;
    }

    for (;;) {
        struct timespec ts;

        /* if the value is 'unlocked', try to acquire it directly */
        /* NOTE: put state to 2 since we know there is contention */
        if (mvalue == mtype) { /* unlocked */
            mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
            if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
                ANDROID_MEMBAR_FULL();
                return 0;
            }
            /* the value changed before we could lock it. We need to check
             * the time to avoid livelocks, reload the value, then loop again. */
            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
                return EBUSY;

            mvalue = mutex->value;
            continue;
        }

        /* The value is locked. If 'uncontended', try to switch its state
         * to 'contented' to ensure we get woken up later. */
        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
            int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
            if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
                /* this failed because the value changed, reload it */
                mvalue = mutex->value;
            } else {
                /* this succeeded, update mvalue */
                mvalue = newval;
            }
        }

        /* check time and update 'ts' */
        if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
            return EBUSY;

        /* Only wait to be woken up if the state is '2', otherwise we'll
         * simply loop right now. This can happen when the second cmpxchg
         * in our loop failed because the mutex was unlocked by another
         * thread.
         */
        if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
            if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == ETIMEDOUT) {
                return EBUSY;
            }
            mvalue = mutex->value;
        }
    }
    /* NOTREACHED */
}
示例#15
0
/* NOTE: this implementation doesn't support a init function that throws a C++ exception
 *       or calls fork()
 */
int pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
{
    volatile pthread_once_t* ocptr = once_control;

    /* PTHREAD_ONCE_INIT is 0, we use the following bit flags
     *
     *   bit 0 set  -> initialization is under way
     *   bit 1 set  -> initialization is complete
     */
#define ONCE_INITIALIZING           (1 << 0)
#define ONCE_COMPLETED              (1 << 1)

    /* First check if the once is already initialized. This will be the common
    * case and we want to make this as fast as possible. Note that this still
    * requires a load_acquire operation here to ensure that all the
    * stores performed by the initialization function are observable on
    * this CPU after we exit.
    */
    if (__likely((*ocptr & ONCE_COMPLETED) != 0)) {
        ANDROID_MEMBAR_FULL();
        return 0;
    }

    for (;;) {
        /* Try to atomically set the INITIALIZING flag.
         * This requires a cmpxchg loop, and we may need
         * to exit prematurely if we detect that
         * COMPLETED is now set.
         */
        int32_t  oldval, newval;

        do {
            oldval = *ocptr;
            if ((oldval & ONCE_COMPLETED) != 0)
                break;

            newval = oldval | ONCE_INITIALIZING;
        } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0);

        if ((oldval & ONCE_COMPLETED) != 0) {
            /* We detected that COMPLETED was set while in our loop */
            ANDROID_MEMBAR_FULL();
            return 0;
        }

        if ((oldval & ONCE_INITIALIZING) == 0) {
            /* We got there first, we can jump out of the loop to
             * handle the initialization */
            break;
        }

        /* Another thread is running the initialization and hasn't completed
         * yet, so wait for it, then try again. */
        __futex_wait_ex(ocptr, 0, oldval, NULL);
    }

    /* call the initialization function. */
    (*init_routine)();

    /* Do a store_release indicating that initialization is complete */
    ANDROID_MEMBAR_FULL();
    *ocptr = ONCE_COMPLETED;

    /* Wake up any waiters, if any */
    __futex_wake_ex(ocptr, 0, INT_MAX);

    return 0;
}