示例#1
0
static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout, clockid_t clock) {
  timespec ts;

  int mvalue = mutex->value;
  int mtype  = (mvalue & MUTEX_TYPE_MASK);
  int shared = (mvalue & MUTEX_SHARED_MASK);

  // Handle common case first.
  if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
    const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
    if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
      ANDROID_MEMBAR_FULL();
      return 0;
    }

    // Loop while needed.
    while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
      if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
        return ETIMEDOUT;
      }
      __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
    }
    ANDROID_MEMBAR_FULL();
    return 0;
  }

  // Do we already own this recursive or error-check mutex?
  pid_t tid = __get_thread()->tid;
  if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
    return _recursive_increment(mutex, mvalue, mtype);
  }

  // The following implements the same loop as pthread_mutex_lock_impl
  // but adds checks to ensure that the operation never exceeds the
  // absolute expiration time.
  mtype |= shared;

  // First try a quick lock.
  if (mvalue == mtype) {
    mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
      ANDROID_MEMBAR_FULL();
      return 0;
    }
    mvalue = mutex->value;
  }

  while (true) {
    // If the value is 'unlocked', try to acquire it directly.
    // NOTE: put state to 2 since we know there is contention.
    if (mvalue == mtype) { // Unlocked.
      mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
      if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
        ANDROID_MEMBAR_FULL();
        return 0;
      }
      // The value changed before we could lock it. We need to check
      // the time to avoid livelocks, reload the value, then loop again.
      if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
        return ETIMEDOUT;
      }

      mvalue = mutex->value;
      continue;
    }

    // The value is locked. If 'uncontended', try to switch its state
    // to 'contented' to ensure we get woken up later.
    if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
      int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
      if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
        // This failed because the value changed, reload it.
        mvalue = mutex->value;
      } else {
        // This succeeded, update mvalue.
        mvalue = newval;
      }
    }

    // Check time and update 'ts'.
    if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
      return ETIMEDOUT;
    }

    // Only wait to be woken up if the state is '2', otherwise we'll
    // simply loop right now. This can happen when the second cmpxchg
    // in our loop failed because the mutex was unlocked by another thread.
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
      if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
      mvalue = mutex->value;
    }
  }
  /* NOTREACHED */
}
示例#2
0
__LIBC_HIDDEN__
int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
{
    clockid_t        clock = CLOCK_MONOTONIC;
    struct timespec  abstime;
    struct timespec  ts;
    int               mvalue, mtype, tid, shared;

    /* compute absolute expiration time */
    __timespec_to_relative_msec(&abstime, msecs, clock);

    if (__unlikely(mutex == NULL))
        return EINVAL;

    mvalue = mutex->value;
    mtype  = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle common case first */
    if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) )
    {
        const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
        const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

        /* fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0 */
        if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }

        /* loop while needed */
        while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
                return EBUSY;

            __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
        }
        ANDROID_MEMBAR_FULL();
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
        return _recursive_increment(mutex, mvalue, mtype);

    /* the following implements the same loop than pthread_mutex_lock_impl
     * but adds checks to ensure that the operation never exceeds the
     * absolute expiration time.
     */
    mtype |= shared;

    /* first try a quick lock */
    if (mvalue == mtype) {
        mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }
        mvalue = mutex->value;
    }

    for (;;) {
        struct timespec ts;

        /* if the value is 'unlocked', try to acquire it directly */
        /* NOTE: put state to 2 since we know there is contention */
        if (mvalue == mtype) { /* unlocked */
            mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
            if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
                ANDROID_MEMBAR_FULL();
                return 0;
            }
            /* the value changed before we could lock it. We need to check
             * the time to avoid livelocks, reload the value, then loop again. */
            if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
                return EBUSY;

            mvalue = mutex->value;
            continue;
        }

        /* The value is locked. If 'uncontended', try to switch its state
         * to 'contented' to ensure we get woken up later. */
        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
            int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
            if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
                /* this failed because the value changed, reload it */
                mvalue = mutex->value;
            } else {
                /* this succeeded, update mvalue */
                mvalue = newval;
            }
        }

        /* check time and update 'ts' */
        if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
            return EBUSY;

        /* Only wait to be woken up if the state is '2', otherwise we'll
         * simply loop right now. This can happen when the second cmpxchg
         * in our loop failed because the mutex was unlocked by another
         * thread.
         */
        if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
            if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == ETIMEDOUT) {
                return EBUSY;
            }
            mvalue = mutex->value;
        }
    }
    /* NOTREACHED */
}
示例#3
0
__LIBC_HIDDEN__
int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
{
    int mvalue, mtype, tid, shared;

    mvalue = mutex->value;
    mtype = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle normal case first */
    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
        _normal_lock(mutex, shared);
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
        return _recursive_increment(mutex, mvalue, mtype);

    /* Add in shared state to avoid extra 'or' operations below */
    mtype |= shared;

    /* First, if the mutex is unlocked, try to quickly acquire it.
     * In the optimistic case where this works, set the state to 1 to
     * indicate locked with no contention */
    if (mvalue == mtype) {
        int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
        if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
            ANDROID_MEMBAR_FULL();
            return 0;
        }
        /* argh, the value changed, reload before entering the loop */
        mvalue = mutex->value;
    }

    for (;;) {
        int newval;

        /* if the mutex is unlocked, its value should be 'mtype' and
         * we try to acquire it by setting its owner and state atomically.
         * NOTE: We put the state to 2 since we _know_ there is contention
         * when we are in this loop. This ensures all waiters will be
         * unlocked.
         */
        if (mvalue == mtype) {
            newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
            /* TODO: Change this to __bionic_cmpxchg_acquire when we
             *        implement it to get rid of the explicit memory
             *        barrier below.
             */
            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
                mvalue = mutex->value;
                continue;
            }
            ANDROID_MEMBAR_FULL();
            return 0;
        }

        /* the mutex is already locked by another thread, if its state is 1
         * we will change it to 2 to indicate contention. */
        if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
            newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
            if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
                mvalue = mutex->value;
                continue;
            }
            mvalue = newval;
        }

        /* wait until the mutex is unlocked */
        __futex_wait_ex(&mutex->value, shared, mvalue, NULL);

        mvalue = mutex->value;
    }
    /* NOTREACHED */
}
static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex,
                                             bool use_realtime_clock,
                                             const timespec* abs_timeout_or_null) {
    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    // Handle common case first.
    if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
        return __pthread_normal_mutex_lock(mutex, shared, use_realtime_clock, abs_timeout_or_null);
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
        if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
            return EDEADLK;
        }
        return __recursive_increment(mutex, old_state);
    }

    const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    const uint16_t locked_contended   = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // First, if the mutex is unlocked, try to quickly acquire it.
    // In the optimistic case where this works, set the state to locked_uncontended.
    if (old_state == unlocked) {
        // If exchanged successfully, an acquire fence is required to make
        // all memory accesses made by other threads visible to the current CPU.
        if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
                             locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
            atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
            return 0;
        }
    }

    ScopedTrace trace("Contending for pthread mutex");

    while (true) {
        if (old_state == unlocked) {
            // NOTE: We put the state to locked_contended since we _know_ there
            // is contention when we are in this loop. This ensures all waiters
            // will be unlocked.

            // If exchanged successfully, an acquire fence is required to make
            // all memory accesses made by other threads visible to the current CPU.
            if (__predict_true(atomic_compare_exchange_weak_explicit(&mutex->state,
                                                                     &old_state, locked_contended,
                                                                     memory_order_acquire,
                                                                     memory_order_relaxed))) {
                atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
                return 0;
            }
            continue;
        } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(old_state)) {
            // We should set it to locked_contended beforing going to sleep. This can make
            // sure waiters will be woken up eventually.

            int new_state = MUTEX_STATE_BITS_FLIP_CONTENTION(old_state);
            if (__predict_false(!atomic_compare_exchange_weak_explicit(&mutex->state,
                                                                       &old_state, new_state,
                                                                       memory_order_relaxed,
                                                                       memory_order_relaxed))) {
                continue;
            }
            old_state = new_state;
        }

        int result = check_timespec(abs_timeout_or_null, true);
        if (result != 0) {
            return result;
        }
        // We are in locked_contended state, sleep until someone wakes us up.
        if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, use_realtime_clock,
                                                 abs_timeout_or_null) == -ETIMEDOUT) {
            return ETIMEDOUT;
        }
        old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    }
}