Ejemplo n.º 1
0
int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  memset(rwlock, 0, sizeof(pthread_rwlock_internal_t));

  if (__predict_false(attr != NULL)) {
    rwlock->pshared = __rwlockattr_getpshared(attr);
    int kind = __rwlockattr_getkind(attr);
    switch (kind) {
      case PTHREAD_RWLOCK_PREFER_READER_NP:
        rwlock->writer_nonrecursive_preferred = false;
        break;
      case PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP:
        rwlock->writer_nonrecursive_preferred = true;
        break;
      default:
        return EINVAL;
    }
    if ((*attr & RWLOCKATTR_RESERVED_MASK) != 0) {
      return EINVAL;
    }
  }

  atomic_init(&rwlock->state, 0);
  rwlock->pending_lock.init(rwlock->pshared);
  return 0;
}
Ejemplo n.º 2
0
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
  // Avoid slowing down fast path of wrlock.
  if (__predict_true(__pthread_rwlock_trywrlock(rwlock) == 0)) {
    return 0;
  }
  return __pthread_rwlock_timedwrlock(rwlock, nullptr);
}
Ejemplo n.º 3
0
int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
    return EBUSY;
  }
  return 0;
}
Ejemplo n.º 4
0
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);

  while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
                             old_state + 1, memory_order_acquire, memory_order_relaxed)) {
  }
  return (old_state >= 0) ? 0 : EBUSY;
}
Ejemplo n.º 5
0
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);

  while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
                                              memory_order_acquire, memory_order_relaxed)) {
  }
  if (old_state == 0) {
    atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
    return 0;
  }
  return EBUSY;
}
Ejemplo n.º 6
0
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
  if (__predict_false(old_state == 0)) {
    return EPERM;
  } else if (old_state == -1) {
    if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) {
      return EPERM;
    }
    // We're no longer the owner.
    atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed);
    // Change state from -1 to 0.
    atomic_store_explicit(&rwlock->state, 0, memory_order_release);

  } else { // old_state > 0
    // Reduce state by 1.
    while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
                               old_state - 1, memory_order_release, memory_order_relaxed)) {
    }

    if (old_state <= 0) {
      return EPERM;
    } else if (old_state > 1) {
      return 0;
    }
    // old_state = 1, which means the last reader calling unlock. It has to wake up waiters.
  }

  // If having waiters, wake up them.
  // To avoid losing wake ups, the update of state should be observed before reading
  // pending_readers/pending_writers by all threads. Use read locking as an example:
  //     read locking thread                        unlocking thread
  //      pending_readers++;                         state = 0;
  //      seq_cst fence                              seq_cst fence
  //      read state for futex_wait                  read pending_readers for futex_wake
  //
  // So when locking and unlocking threads are running in parallel, we will not get
  // in a situation that the locking thread reads state as negative and needs to wait,
  // while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters.
  atomic_thread_fence(memory_order_seq_cst);
  if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 ||
                      atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) {
    __futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX);
  }
  return 0;
}
Ejemplo n.º 7
0
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
  if (__state_owned_by_writer(old_state)) {
    if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) != __get_thread()->tid) {
      return EPERM;
    }
    atomic_store_explicit(&rwlock->writer_tid, 0, memory_order_relaxed);
    old_state = atomic_fetch_and_explicit(&rwlock->state, ~STATE_OWNED_BY_WRITER_FLAG,
                                          memory_order_release);
    if (!__state_have_pending_readers_or_writers(old_state)) {
      return 0;
    }

  } else if (__state_owned_by_readers(old_state)) {
    old_state = atomic_fetch_sub_explicit(&rwlock->state, STATE_READER_COUNT_CHANGE_STEP,
                                          memory_order_release);
    if (!__state_is_last_reader(old_state) || !__state_have_pending_readers_or_writers(old_state)) {
      return 0;
    }

  } else {
    return EPERM;
  }

  // Wake up pending readers or writers.
  rwlock->pending_lock.lock();
  if (rwlock->pending_writer_count != 0) {
    rwlock->pending_writer_wakeup_serial++;
    rwlock->pending_lock.unlock();

    __futex_wake_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, 1);

  } else if (rwlock->pending_reader_count != 0) {
    rwlock->pending_reader_wakeup_serial++;
    rwlock->pending_lock.unlock();

    __futex_wake_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, INT_MAX);

  } else {
    // It happens when waiters are woken up by timeout.
    rwlock->pending_lock.unlock();
  }
  return 0;
}
Ejemplo n.º 8
0
int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  if (__predict_true(attr == NULL)) {
    rwlock->attr = 0;
  } else {
    switch (*attr) {
      case PTHREAD_PROCESS_SHARED:
      case PTHREAD_PROCESS_PRIVATE:
        rwlock->attr= *attr;
        break;
      default:
        return EINVAL;
    }
  }

  atomic_init(&rwlock->state, 0);
  atomic_init(&rwlock->writer_thread_id, 0);
  atomic_init(&rwlock->pending_readers, 0);
  atomic_init(&rwlock->pending_writers, 0);

  return 0;
}
Ejemplo n.º 9
0
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
  return __pthread_rwlock_trywrlock(__get_internal_rwlock(rwlock_interface));
}
Ejemplo n.º 10
0
int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
}
Ejemplo n.º 11
0
int pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t* rwlock_interface,
                                            const timespec* abs_timeout) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  return __pthread_rwlock_timedrdlock(rwlock, false, abs_timeout);
}
Ejemplo n.º 12
0
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  return __pthread_rwlock_timedwrlock(rwlock, NULL);
}