Exemplo n.º 1
0
int
attribute_protected
__pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
{
  for (;;)
    {
      int oldval;
      int newval;

      /* Pseudo code:
	 newval = __fork_generation | 1;
	 oldval = *once_control;
	 if ((oldval & 2) == 0)
	   *once_control = newval;
	 Do this atomically.
      */
      do
	{
	  newval = __fork_generation | 1;
	  oldval = *once_control;
	  if (oldval & 2)
	    break;
	} while (atomic_compare_and_exchange_val_acq (once_control, newval, oldval) != oldval);

      /* Check if the initializer has already been done.  */
      if ((oldval & 2) != 0)
	return 0;

      /* Check if another thread already runs the initializer.	*/
      if ((oldval & 1) == 0)
	break;

      /* Check whether the initializer execution was interrupted by a fork.  */
      if (oldval != newval)
	break;

      /* Same generation, some other thread was faster. Wait.  */
      lll_futex_wait (once_control, oldval, LLL_PRIVATE);
    }

  /* This thread is the first here.  Do the initialization.
     Register a cleanup handler so that in case the thread gets
     interrupted the initialization can be restarted.  */
  pthread_cleanup_push (clear_once_control, once_control);

  init_routine ();

  pthread_cleanup_pop (0);

  /* Say that the initialisation is done.  */
  *once_control = __fork_generation | 2;

  /* Wake up all other threads.  */
  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);

  return 0;
}
Exemplo n.º 2
0
void
__lll_lock_wait (int *futex)
{
  do
    {
      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_wait (futex, 2);
    }
  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
}
Exemplo n.º 3
0
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
  /* atomic_exchange usually takes less instructions than
     atomic_compare_and_exchange.  On the other hand,
     atomic_compare_and_exchange potentially generates less bus traffic
     when the lock is locked.
     We assume that the first try mostly will be successful, and we use
     atomic_exchange.  For the subsequent tries we use
     atomic_compare_and_exchange.  */
  if (atomic_exchange_acq (lock, 1) == 0)
    return 0;

  do
    {
      /* The lock is contended and we need to wait.  Going straight back
	 to cmpxchg is not a good idea on many targets as that will force
	 expensive memory synchronizations among processors and penalize other
	 running threads.
	 On the other hand, we do want to update memory state on the local core
	 once in a while to avoid spinning indefinitely until some event that
	 will happen to update local memory as a side-effect.  */
      if (SPIN_LOCK_READS_BETWEEN_CMPXCHG >= 0)
	{
	  int wait = SPIN_LOCK_READS_BETWEEN_CMPXCHG;

	  while (*lock != 0 && wait > 0)
	    --wait;
	}
      else
	{
	  while (*lock != 0)
	    ;
	}
    }
  while (atomic_compare_and_exchange_val_acq (lock, 1, 0) != 0);

  return 0;
}
Exemplo n.º 4
0
int
__lll_timedlock_wait (int *futex, const struct timespec *abstime)
{
  /* Reject invalid timeouts.  */
  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
    return EINVAL;

  do
    {
      struct timeval tv;
      struct timespec rt;

      /* Get the current time.  */
      (void) __gettimeofday (&tv, NULL);

      /* Compute relative timeout.  */
      rt.tv_sec = abstime->tv_sec - tv.tv_sec;
      rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
      if (rt.tv_nsec < 0)
	{
	  rt.tv_nsec += 1000000000;
	  --rt.tv_sec;
	}

      /* Already timed out?  */
      if (rt.tv_sec < 0)
	return ETIMEDOUT;

      /* Wait.  */
      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_timed_wait (futex, 2, &rt);
    }
  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);

  return 0;
}
Exemplo n.º 5
0
   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library.  If not, see
   <http://www.gnu.org/licenses/>.  */

#include <errno.h>
#include <sysdep.h>
#include <lowlevellock.h>
#include <sys/time.h>

void
__lll_lock_wait (lll_lock_t *futex, int private)
{
  do
    {
      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_wait (futex, 2, private);
    }
  while (atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0);
}

void
__lll_lock_wait_private (lll_lock_t *futex)
{
  do
    {
      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_wait (futex, 2, LLL_PRIVATE);
    }
Exemplo n.º 6
0
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem, expected;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_acq (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_rel (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  /* Tests for C11-like atomics.  */
  mem = 11;
  if (atomic_load_relaxed (&mem) != 11 || atomic_load_acquire (&mem) != 11)
    {
      puts ("atomic_load_{relaxed,acquire} test failed");
      ret = 1;
    }

  atomic_store_relaxed (&mem, 12);
  if (mem != 12)
    {
      puts ("atomic_store_relaxed test failed");
      ret = 1;
    }
  atomic_store_release (&mem, 13);
  if (mem != 13)
    {
      puts ("atomic_store_release test failed");
      ret = 1;
    }

  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_relaxed (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_relaxed (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_acquire (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_acquire test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_acquire (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_acquire test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_release (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_release test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_release (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_release test 2 failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_exchange_acquire (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_exchange_release (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_release test failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_fetch_add_relaxed (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_relaxed test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acquire (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_release (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_release test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acq_rel (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acq_rel test failed");
      ret = 1;
    }

  mem = 3;
  if (atomic_fetch_and_acquire (&mem, 2) != 3 || mem != 2)
    {
      puts ("atomic_fetch_and_acquire test failed");
      ret = 1;
    }

  mem = 4;
  if (atomic_fetch_or_relaxed (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_relaxed test failed");
      ret = 1;
    }
  mem = 4;
  if (atomic_fetch_or_acquire (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_acquire test failed");
      ret = 1;
    }

  /* This is a single-threaded test, so we can't test the effects of the
     fences.  */
  atomic_thread_fence_acquire ();
  atomic_thread_fence_release ();
  atomic_thread_fence_seq_cst ();

  return ret;
}
pthread_mutex_timedlock (
     pthread_mutex_t *mutex,
     const struct timespec *abstime)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
  int result = 0;

  /* We must not check ABSTIME here.  If the thread does not block
     abstime must not be checked for a valid value.  */

  switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
			    PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
	{
	  /* Just bump the counter.  */
	  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
	    /* Overflow of the counter.  */
	    return EAGAIN;

	  ++mutex->__data.__count;

	  goto out;
	}

      /* We have to get the mutex.  */
      result = lll_timedlock (mutex->__data.__lock, abstime,
			      PTHREAD_MUTEX_PSHARED (mutex));

      if (result != 0)
	goto out;

      /* Only locked once so far.  */
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
	return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      result = lll_timedlock (mutex->__data.__lock, abstime,
			      PTHREAD_MUTEX_PSHARED (mutex));
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
	goto simple;

      if (lll_trylock (mutex->__data.__lock) != 0)
	{
	  int cnt = 0;
	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
			     mutex->__data.__spins * 2 + 10);
	  do
	    {
	      if (cnt++ >= max_cnt)
		{
		  result = lll_timedlock (mutex->__data.__lock, abstime,
					  PTHREAD_MUTEX_PSHARED (mutex));
		  break;
		}

#ifdef BUSY_WAIT_NOP
	      BUSY_WAIT_NOP;
#endif
	    }
	  while (lll_trylock (mutex->__data.__lock) != 0);

	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
	}
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id | (oldval & FUTEX_WAITERS);

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);
	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  */
	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
					 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }

	  if (result == ETIMEDOUT || result == EINVAL)
	    goto out;

	  oldval = result;
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      id, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  The timeout value must be a relative value.
	       Convert it.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);

	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1,
				      abstime);
	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
	      {
		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
		  return ETIMEDOUT;

		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
		  {
		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
				&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
		    /* ESRCH can happen only for non-robust PI mutexes where
		       the owner of the lock died.  */
		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
			    || !robust);

		    /* Delay the thread until the timeout is reached.
		       Then return ETIMEDOUT.  */
		    struct timespec reltime;
		    struct timespec now;

		    INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
				      &now);
		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
		    if (reltime.tv_nsec < 0)
		      {
			reltime.tv_nsec += 1000000000;
			--reltime.tv_sec;
		      }
		    if (reltime.tv_sec >= 0)
		      while (nanosleep_not_cancel (&reltime, &reltime) != 0)
			continue;

		    return ETIMEDOUT;
		  }

		return INTERNAL_SYSCALL_ERRNO (e, __err);
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  */
	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
	}
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		result = EINVAL;
	      failpp:
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return result;
	      }

	    result = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (result)
	      return result;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						     ceilval | 1, ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  {
		    /* Reject invalid timeouts.  */
		    if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
		      {
			result = EINVAL;
			goto failpp;
		      }

		    struct timeval tv;
		    struct timespec rt;

		    /* Get the current time.  */
		    (void) gettimeofday (&tv, NULL);

		    /* Compute relative timeout.  */
		    rt.tv_sec = abstime->tv_sec - tv.tv_sec;
		    rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
		    if (rt.tv_nsec < 0)
		      {
			rt.tv_nsec += 1000000000;
			--rt.tv_sec;
		      }

		    /* Already timed out?  */
		    if (rt.tv_sec < 0)
		      {
			result = ETIMEDOUT;
			goto failpp;
		      }

		    lll_futex_timed_wait (&mutex->__data.__lock,
					  ceilval | 2, &rt,
					  PTHREAD_MUTEX_PSHARED (mutex));
		  }
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }
/* Try to get a file descriptor for the shared meory segment
   containing the database.  */
static struct mapped_database *
get_mapping (request_type type, const char *key,
	     struct mapped_database **mappedp)
{
  struct mapped_database *result = NO_MAPPING;
#ifdef SCM_RIGHTS
  const size_t keylen = strlen (key) + 1;
  char resdata[keylen];
  int saved_errno = errno;

  int mapfd = -1;

  /* Send the request.  */
  struct iovec iov[2];
  request_header req;

  int sock = open_socket ();
  if (sock < 0)
    goto out;

  req.version = NSCD_VERSION;
  req.type = type;
  req.key_len = keylen;

  iov[0].iov_base = &req;
  iov[0].iov_len = sizeof (req);
  iov[1].iov_base = (void *) key;
  iov[1].iov_len = keylen;

  if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2))
			!= iov[0].iov_len + iov[1].iov_len, 0))
    /* We cannot even write the request.  */
    goto out_close2;

  /* Room for the data sent along with the file descriptor.  We expect
     the key name back.  */
  iov[0].iov_base = resdata;
  iov[0].iov_len = keylen;

  union
  {
    struct cmsghdr hdr;
    char bytes[CMSG_SPACE (sizeof (int))];
  } buf;
  struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1,
			.msg_control = buf.bytes,
			.msg_controllen = sizeof (buf) };
  struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);

  cmsg->cmsg_level = SOL_SOCKET;
  cmsg->cmsg_type = SCM_RIGHTS;
  cmsg->cmsg_len = CMSG_LEN (sizeof (int));

  /* This access is well-aligned since BUF is correctly aligned for an
     int and CMSG_DATA preserves this alignment.  */
  *(int *) CMSG_DATA (cmsg) = -1;

  msg.msg_controllen = cmsg->cmsg_len;

  if (wait_on_socket (sock) <= 0)
    goto out_close2;

# ifndef MSG_NOSIGNAL
#  define MSG_NOSIGNAL 0
# endif
  if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg,
						       MSG_NOSIGNAL))
			!= keylen, 0))
    goto out_close2;

  mapfd = *(int *) CMSG_DATA (cmsg);

  if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len
			!= CMSG_LEN (sizeof (int)), 0))
    goto out_close;

  struct stat64 st;
  if (__builtin_expect (strcmp (resdata, key) != 0, 0)
      || __builtin_expect (fstat64 (mapfd, &st) != 0, 0)
      || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0))
    goto out_close;

  struct database_pers_head head;
  if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head,
						     sizeof (head), 0))
			!= sizeof (head), 0))
    goto out_close;

  if (__builtin_expect (head.version != DB_VERSION, 0)
      || __builtin_expect (head.header_size != sizeof (head), 0)
      /* This really should not happen but who knows, maybe the update
	 thread got stuck.  */
      || __builtin_expect (! head.nscd_certainly_running
			   && head.timestamp + MAPPING_TIMEOUT < time (NULL),
			   0))
    goto out_close;

  size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN)
		 + head.data_size);

  if (__builtin_expect (st.st_size < size, 0))
    goto out_close;

  /* The file is large enough, map it now.  */
  void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0);
  if (__builtin_expect (mapping != MAP_FAILED, 1))
    {
      /* Allocate a record for the mapping.  */
      struct mapped_database *newp = malloc (sizeof (*newp));
      if (newp == NULL)
	{
	  /* Ugh, after all we went through the memory allocation failed.  */
	  __munmap (mapping, size);
	  goto out_close;
	}

      newp->head = mapping;
      newp->data = ((char *) mapping + head.header_size
		    + roundup (head.module * sizeof (ref_t), ALIGN));
      newp->mapsize = size;
      /* Set counter to 1 to show it is usable.  */
      newp->counter = 1;

      result = newp;
    }

 out_close:
  __close (mapfd);
 out_close2:
  __close (sock);
 out:
  __set_errno (saved_errno);
#endif	/* SCM_RIGHTS */

  struct mapped_database *oldval = *mappedp;
  *mappedp = result;

  if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
    __nscd_unmap (oldval);

  return result;
}


struct mapped_database *
__nscd_get_map_ref (request_type type, const char *name,
		    struct locked_map_ptr *mapptr, int *gc_cyclep)
{
  struct mapped_database *cur = mapptr->mapped;
  if (cur == NO_MAPPING)
    return cur;

  int cnt = 0;
  while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0)
    {
      // XXX Best number of rounds?
      if (++cnt > 5)
	return NO_MAPPING;

      atomic_delay ();
    }

  cur = mapptr->mapped;

  if (__builtin_expect (cur != NO_MAPPING, 1))
    {
      /* If not mapped or timestamp not updated, request new map.  */
      if (cur == NULL
	  || (cur->head->nscd_certainly_running == 0
	      && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL)))
	cur = get_mapping (type, name, &mapptr->mapped);

      if (__builtin_expect (cur != NO_MAPPING, 1))
	{
	  if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
				0))
	    cur = NO_MAPPING;
	  else
	    atomic_increment (&cur->counter);
	}
    }

  mapptr->lock = 0;

  return cur;
}


const struct datahead *
__nscd_cache_search (request_type type, const char *key, size_t keylen,
		     const struct mapped_database *mapped)
{
  unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;

  ref_t work = mapped->head->array[hash];
  while (work != ENDREF)
    {
      struct hashentry *here = (struct hashentry *) (mapped->data + work);

      if (type == here->type && keylen == here->len
	  && memcmp (key, mapped->data + here->key, keylen) == 0)
	{
	  /* We found the entry.  Increment the appropriate counter.  */
	  const struct datahead *dh
	    = (struct datahead *) (mapped->data + here->packet);

	  /* See whether we must ignore the entry or whether something
	     is wrong because garbage collection is in progress.  */
	  if (dh->usable && ((char *) dh + dh->allocsize
			     <= (char *) mapped->head + mapped->mapsize))
	    return dh;
	}

      work = here->next;
    }

  return NULL;
}
Exemplo n.º 9
0
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  return ret;
}
int
pthread_spin_trylock (pthread_spinlock_t *lock)
{
  return atomic_compare_and_exchange_val_acq (lock, 1, 0) ? EBUSY : 0;
}
Exemplo n.º 11
0
static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (PTHREAD_MUTEX_TYPE (mutex))
    {
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id;
#ifdef NO_INCR
	      newval |= FUTEX_WAITERS;
#else
	      newval |= (oldval & FUTEX_WAITERS);
#endif

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);

	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  If we are not supposed
		 to increment __nusers we actually have to decrement
		 it here.  */
#ifdef NO_INCR
	      --mutex->__data.__nusers;
#endif

	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int newval = id;
#ifdef NO_INCR
	newval |= FUTEX_WAITERS;
#endif
	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      newval, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);
	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1, 0);

	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
	      {
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
		/* ESRCH can happen only for non-robust PI mutexes where
		   the owner of the lock died.  */
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

		/* Delay the thread indefinitely.  */
		while (1)
		  pause_not_cancel ();
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  If we are not supposed to
	       increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
	    --mutex->__data.__nusers;
#endif

	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return EINVAL;
	      }

	    int retval = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (retval)
	      return retval;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
						     ceilval | 2,
#else
						     ceilval | 1,
#endif
						     ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
				  PTHREAD_MUTEX_PSHARED (mutex));
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }