예제 #1
0
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
  int val = 0;

  /* We assume that the first try mostly will be successful, thus we use
     atomic_exchange if it is not implemented by a CAS loop (we also assume
     that atomic_exchange can be faster if it succeeds, see
     ATOMIC_EXCHANGE_USES_CAS).  Otherwise, we use a weak CAS and not an
     exchange so we bail out after the first failed attempt to change the
     state.  For the subsequent attempts we use atomic_compare_and_exchange
     after we observe that the lock is not acquired.
     See also comment in pthread_spin_trylock.
     We use acquire MO to synchronize-with the release MO store in
     pthread_spin_unlock, and thus ensure that prior critical sections
     happen-before this critical section.  */
#if ! ATOMIC_EXCHANGE_USES_CAS
  /* Try to acquire the lock with an exchange instruction as this architecture
     has such an instruction and we assume it is faster than a CAS.
     The acquisition succeeds if the lock is not in an acquired state.  */
  if (__glibc_likely (atomic_exchange_acquire (lock, 1) == 0))
    return 0;
#else
  /* Try to acquire the lock with a CAS instruction as this architecture
     has no exchange instruction.  The acquisition succeeds if the lock is not
     acquired.  */
  if (__glibc_likely (atomic_compare_exchange_weak_acquire (lock, &val, 1)))
    return 0;
#endif

  do
    {
      /* The lock is contended and we need to wait.  Going straight back
	 to cmpxchg is not a good idea on many targets as that will force
	 expensive memory synchronizations among processors and penalize other
	 running threads.
	 There is no technical reason for throwing in a CAS every now and then,
	 and so far we have no evidence that it can improve performance.
	 If that would be the case, we have to adjust other spin-waiting loops
	 elsewhere, too!
	 Thus we use relaxed MO reads until we observe the lock to not be
	 acquired anymore.  */
      do
	{
	  /* TODO Back-off.  */

	  atomic_spin_nop ();

	  val = atomic_load_relaxed (lock);
	}
      while (val != 0);

      /* We need acquire memory order here for the same reason as mentioned
	 for the first try to lock the spinlock.  */
    }
  while (!atomic_compare_exchange_weak_acquire (lock, &val, 1));

  return 0;
}
예제 #2
0
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
  /* Reject invalid timeouts.  */
  if (__glibc_unlikely (abstime->tv_nsec < 0)
      || __glibc_unlikely (abstime->tv_nsec >= 1000000000))
    return EINVAL;

  /* Repeat until thread terminated.  */
  int tid;
  while ((tid = atomic_load_relaxed (tidp)) != 0)
    {
      /* See exit-thread.h for details.  */
      if (tid == NACL_EXITING_TID)
	/* The thread should now be in the process of exiting, so it will
	   finish quick enough that the timeout doesn't matter.  If any
	   thread ever stays in this state for long, there is something
	   catastrophically wrong.  */
	atomic_spin_nop ();
      else
	{
	  assert (tid > 0);

	  /* If *FUTEX == TID, wait until woken or timeout.  */
	  int err = __nacl_irt_futex.futex_wait_abs ((volatile int *) tidp,
						     tid, abstime);
	  if (err != 0)
	    {
	      if (__glibc_likely (err == ETIMEDOUT))
		return err;
	      assert (err == EAGAIN);
	    }
	}
    }

  return 0;
}