Пример #1
0
// This function is used by pthread_cond_broadcast and
// pthread_cond_signal to atomically decrement the counter
// then wake up 'counter' threads.
static int __pthread_cond_pulse(pthread_cond_t* cond, int counter) {
  if (__predict_false(cond == NULL)) {
    return EINVAL;
  }

  long flags = (cond->value & ~COND_COUNTER_MASK);
  while (true) {
    long old_value = cond->value;
    long new_value = ((old_value - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags;
    if (__bionic_cmpxchg(old_value, new_value, &cond->value) == 0) {
      break;
    }
  }

  // Ensure that all memory accesses previously made by this thread are
  // visible to the woken thread(s).  On the other side, the "wait"
  // code will issue any necessary barriers when locking the mutex.
  //
  // This may not strictly be necessary -- if the caller follows
  // recommended practice and holds the mutex before signaling the cond
  // var, the mutex ops will provide correct semantics.  If they don't
  // hold the mutex, they're subject to race conditions anyway.
  ANDROID_MEMBAR_FULL();

  __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
  return 0;
}
Пример #2
0
/* This function is used by pthread_cond_broadcast and
 * pthread_cond_signal to atomically decrement the counter
 * then wake-up 'counter' threads.
 */
static int
__pthread_cond_pulse(pthread_cond_t *cond, int  counter)
{
    long flags;

    if (__unlikely(cond == NULL))
        return EINVAL;

    flags = (cond->value & ~COND_COUNTER_MASK);
    for (;;) {
        long oldval = cond->value;
        long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
                      | flags;
        if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
            break;
    }

    /*
     * Ensure that all memory accesses previously made by this thread are
     * visible to the woken thread(s).  On the other side, the "wait"
     * code will issue any necessary barriers when locking the mutex.
     *
     * This may not strictly be necessary -- if the caller follows
     * recommended practice and holds the mutex before signaling the cond
     * var, the mutex ops will provide correct semantics.  If they don't
     * hold the mutex, they're subject to race conditions anyway.
     */
    ANDROID_MEMBAR_FULL();

    __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
    return 0;
}
Пример #3
0
__LIBC_HIDDEN__
int __pthread_cond_timedwait_relative(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* reltime) {
  int old_value = cond->value;

  pthread_mutex_unlock(mutex);
  int status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), old_value, reltime);
  pthread_mutex_lock(mutex);

  if (status == -ETIMEDOUT) {
    return ETIMEDOUT;
  }
  return 0;
}
Пример #4
0
int pthread_condattr_getpshared(const pthread_condattr_t* attr, int* pshared) {
  *pshared = static_cast<int>(COND_IS_SHARED(*attr));
  return 0;
}
Пример #5
0
 bool process_shared() {
   return COND_IS_SHARED(atomic_load_explicit(&state, memory_order_relaxed));
 }