コード例 #1
0
ファイル: lowlevellock.c プロジェクト: Jaden-J/uClibc
void
__lll_lock_wait_private (int *futex)
{
  if (*futex == 2)
    lll_futex_wait (futex, 2, LLL_PRIVATE);

  while (atomic_exchange_acq (futex, 2) != 0)
    lll_futex_wait (futex, 2, LLL_PRIVATE);
}
コード例 #2
0
ファイル: pthread_once.c プロジェクト: Jaden-J/uClibc
int
attribute_protected
__pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
{
  for (;;)
    {
      int oldval;
      int newval;

      /* Pseudo code:
	 newval = __fork_generation | 1;
	 oldval = *once_control;
	 if ((oldval & 2) == 0)
	   *once_control = newval;
	 Do this atomically.
      */
      do
	{
	  newval = __fork_generation | 1;
	  oldval = *once_control;
	  if (oldval & 2)
	    break;
	} while (atomic_compare_and_exchange_val_acq (once_control, newval, oldval) != oldval);

      /* Check if the initializer has already been done.  */
      if ((oldval & 2) != 0)
	return 0;

      /* Check if another thread already runs the initializer.	*/
      if ((oldval & 1) == 0)
	break;

      /* Check whether the initializer execution was interrupted by a fork.  */
      if (oldval != newval)
	break;

      /* Same generation, some other thread was faster. Wait.  */
      lll_futex_wait (once_control, oldval, LLL_PRIVATE);
    }

  /* This thread is the first here.  Do the initialization.
     Register a cleanup handler so that in case the thread gets
     interrupted the initialization can be restarted.  */
  pthread_cleanup_push (clear_once_control, once_control);

  init_routine ();

  pthread_cleanup_pop (0);

  /* Say that the initialisation is done.  */
  *once_control = __fork_generation | 2;

  /* Wake up all other threads.  */
  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);

  return 0;
}
コード例 #3
0
void lll_lock_wait(OSLowLock* futex) {
	do {
		int old = OSCompareAndExchangeInt(futex, 2, 1);
		
		if (old != 0) {
			lll_futex_wait(futex, 2);
		}
	}
	while (OSCompareAndExchangeInt(futex, 2, 0) != 0);
}
コード例 #4
0
ファイル: lowlevellock.c プロジェクト: AubrCool/glibc
void
__lll_lock_wait_private (int *futex)
{
  do
    {
      int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_wait (futex, 2, LLL_PRIVATE);
    }
  while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
}
コード例 #5
0
ファイル: lowlevellock.c プロジェクト: jameshilliard/20-4-4
void
__lll_lock_wait (int *futex)
{
  do
    {
      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
      if (oldval != 0)
	lll_futex_wait (futex, 2);
    }
  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
}
コード例 #6
0
ファイル: cancellation.c プロジェクト: aywq2008/omniplay
void
internal_function attribute_hidden
__internal_pthread_disable_asynccancel (int oldtype)
{
  /* If asynchronous cancellation was enabled before we do not have
     anything to do.  */
  if (oldtype & CANCELTYPE_BITMASK)
    return;

  struct pthread *self = THREAD_SELF;
  int newval;

  int oldval;
  oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      newval = oldval & ~CANCELTYPE_BITMASK;

      int curval;
      curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval);
      if (__builtin_expect (curval == oldval, 1))
	break;

      /* Prepare the next round.  */
      oldval = curval;
    }

  /* We cannot return when we are being canceled.  Upon return the
     thread might be things which would have to be undone.  The
     following loop should loop until the cancellation signal is
     delivered.  */
  while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
			   == CANCELING_BITMASK, 0))
    {
      lll_futex_wait (&self->cancelhandling, newval, LLL_PRIVATE);
      newval = THREAD_GETMEM (self, cancelhandling);
    }
}
コード例 #7
0
int
__new_sem_wait (sem_t *sem)
{
  /* First check for cancellation.  */
  CANCELLATION_P (THREAD_SELF);

  int *futex = (int *) sem;
  int err;

  do
    {
      int val;
      if (__atomic_is_v9)
	val = atomic_decrement_if_positive (futex);
      else
	{
	  __sparc32_atomic_do_lock24 (futex + 1);
	  val = *futex;
	  if (val > 0)
	    *futex = val - 1;
	  __sparc32_atomic_do_unlock24 (futex + 1);
	}
      if (val > 0)
	return 0;

      /* Enable asynchronous cancellation.  Required by the standard.  */
      int oldtype = __pthread_enable_asynccancel ();

      err = lll_futex_wait (futex, 0);

      /* Disable asynchronous cancellation.  */
      __pthread_disable_asynccancel (oldtype);
    }
  while (err == 0 || err == -EWOULDBLOCK);

  __set_errno (-err);
  return -1;
}
コード例 #8
0
void
__unregister_atfork (
     void *dso_handle)
{
  /* Check whether there is any entry in the list which we have to
     remove.  It is likely that this is not the case so don't bother
     getting the lock.

     We do not worry about other threads adding entries for this DSO
     right this moment.  If this happens this is a race and we can do
     whatever we please.  The program will crash anyway seen.  */
  struct fork_handler *runp = __fork_handlers;
  struct fork_handler *lastp = NULL;

  while (runp != NULL)
    if (runp->dso_handle == dso_handle)
      break;
    else
      {
	lastp = runp;
	runp = runp->next;
      }

  if (runp == NULL)
    /* Nothing to do.  */
    return;

  /* Get the lock to not conflict with additions or deletions.  Note
     that there couldn't have been another thread deleting something.
     The __unregister_atfork function is only called from the
     dlclose() code which itself serializes the operations.  */
  lll_lock (__fork_lock, LLL_PRIVATE);

  /* We have to create a new list with all the entries we don't remove.  */
  struct deleted_handler
  {
    struct fork_handler *handler;
    struct deleted_handler *next;
  } *deleted = NULL;

  /* Remove the entries for the DSO which is unloaded from the list.
     It's a single linked list so readers are.  */
  do
    {
    again:
      if (runp->dso_handle == dso_handle)
	{
	  if (lastp == NULL)
	    {
	      /* We have to use an atomic operation here because
		 __linkin_atfork also uses one.  */
	      if (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
							 runp->next, runp)
		  != 0)
		{
		  runp = __fork_handlers;
		  goto again;
		}
	    }
	  else
	    lastp->next = runp->next;

	  /* We cannot overwrite the ->next element now.  Put the deleted
	     entries in a separate list.  */
	  struct deleted_handler *newp = alloca (sizeof (*newp));
	  newp->handler = runp;
	  newp->next = deleted;
	  deleted = newp;
	}
      else
	lastp = runp;

      runp = runp->next;
    }
  while (runp != NULL);

  /* Release the lock.  */
  lll_unlock (__fork_lock, LLL_PRIVATE);

  /* Walk the list of all entries which have to be deleted.  */
  while (deleted != NULL)
    {
      /* We need to be informed by possible current users.  */
      deleted->handler->need_signal = 1;
      /* Make sure this gets written out first.  */
      atomic_write_barrier ();

      /* Decrement the reference counter.  If it does not reach zero
	 wait for the last user.  */
      atomic_decrement (&deleted->handler->refcntr);
      unsigned int val;
      while ((val = deleted->handler->refcntr) != 0)
	lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE);

      deleted = deleted->next;
    }
}
コード例 #9
0
/* Acquire read lock for RWLOCK.  */
int
attribute_protected
__pthread_rwlock_rdlock (
     pthread_rwlock_t *rwlock)
{
  int result = 0;

  /* Make sure we are along.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  while (1)
    {
      /* Get the rwlock if there is no writer...  */
      if (rwlock->__data.__writer == 0
	  /* ...and if either no writer is waiting or we prefer readers.  */
	  && (!rwlock->__data.__nr_writers_queued
	      || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
	{
	  /* Increment the reader counter.  Avoid overflow.  */
	  if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
	    {
	      /* Overflow on number of readers.	 */
	      --rwlock->__data.__nr_readers;
	      result = EAGAIN;
	    }

	  break;
	}

      /* Make sure we are not holding the rwlock as a writer.  This is
	 a deadlock situation we recognize and report.  */
      if (__builtin_expect (rwlock->__data.__writer
			    == THREAD_GETMEM (THREAD_SELF, tid), 0))
	{
	  result = EDEADLK;
	  break;
	}

      /* Remember that we are a reader.  */
      if (__builtin_expect (++rwlock->__data.__nr_readers_queued == 0, 0))
	{
	  /* Overflow on number of queued readers.  */
	  --rwlock->__data.__nr_readers_queued;
	  result = EAGAIN;
	  break;
	}

      int waitval = rwlock->__data.__readers_wakeup;

      /* Free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* Wait for the writer to finish.  */
      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
		      rwlock->__data.__shared);

      /* Get the lock.  */
      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

      --rwlock->__data.__nr_readers_queued;
    }

  /* We are done, free the lock.  */
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
コード例 #10
0
ファイル: pthread_mutex_lock.c プロジェクト: Jaden-J/uClibc
static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (PTHREAD_MUTEX_TYPE (mutex))
    {
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
		     &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
	{
	again:
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
	    {
	      /* The previous owner died.  Try locking the mutex.  */
	      int newval = id;
#ifdef NO_INCR
	      newval |= FUTEX_WAITERS;
#else
	      newval |= (oldval & FUTEX_WAITERS);
#endif

	      newval
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						       newval, oldval);

	      if (newval != oldval)
		{
		  oldval = newval;
		  goto again;
		}

	      /* We got the mutex.  */
	      mutex->__data.__count = 1;
	      /* But it is inconsistent unless marked otherwise.  */
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	      ENQUEUE_MUTEX (mutex);
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	      /* Note that we deliberately exit here.  If we fall
		 through to the end of the function __nusers would be
		 incremented which is not correct because the old
		 owner has to be discounted.  If we are not supposed
		 to increment __nusers we actually have to decrement
		 it here.  */
#ifdef NO_INCR
	      --mutex->__data.__nusers;
#endif

	      return EOWNERDEAD;
	    }

	  /* Check whether we already hold the mutex.  */
	  if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	    {
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);
		  return EDEADLK;
		}

	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
		{
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
				 NULL);

		  /* Just bump the counter.  */
		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		    /* Overflow of the counter.  */
		    return EAGAIN;

		  ++mutex->__data.__count;

		  return 0;
		}
	    }

	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);

	  if (__builtin_expect (mutex->__data.__owner
				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	    {
	      /* This mutex is now not recoverable.  */
	      mutex->__data.__count = 0;
	      lll_unlock (mutex->__data.__lock,
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	      return ENOTRECOVERABLE;
	    }
	}
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
	int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

	if (robust)
	  /* Note: robust PI futexes are signaled by setting bit 0.  */
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
			 (void *) (((uintptr_t) &mutex->__data.__list.__next)
				   | 1));

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
		return EDEADLK;
	      }

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int newval = id;
#ifdef NO_INCR
	newval |= FUTEX_WAITERS;
#endif
	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
						      newval, 0);

	if (oldval != 0)
	  {
	    /* The mutex is locked.  The kernel will now take care of
	       everything.  */
	    int private = (robust
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
			   : PTHREAD_MUTEX_PSHARED (mutex));
	    INTERNAL_SYSCALL_DECL (__err);
	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
				      __lll_private_flag (FUTEX_LOCK_PI,
							  private), 1, 0);

	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
	      {
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
		/* ESRCH can happen only for non-robust PI mutexes where
		   the owner of the lock died.  */
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

		/* Delay the thread indefinitely.  */
		while (1)
		  pause_not_cancel ();
	      }

	    oldval = mutex->__data.__lock;

	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
	  }

	if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
	  {
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

	    /* We got the mutex.  */
	    mutex->__data.__count = 1;
	    /* But it is inconsistent unless marked otherwise.  */
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

	    /* Note that we deliberately exit here.  If we fall
	       through to the end of the function __nusers would be
	       incremented which is not correct because the old owner
	       has to be discounted.  If we are not supposed to
	       increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
	    --mutex->__data.__nusers;
#endif

	    return EOWNERDEAD;
	  }

	if (robust
	    && __builtin_expect (mutex->__data.__owner
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
	  {
	    /* This mutex is now not recoverable.  */
	    mutex->__data.__count = 0;

	    INTERNAL_SYSCALL_DECL (__err);
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
			      __lll_private_flag (FUTEX_UNLOCK_PI,
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
			      0, 0);

	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	    return ENOTRECOVERABLE;
	  }

	mutex->__data.__count = 1;
	if (robust)
	  {
	    ENQUEUE_MUTEX_PI (mutex);
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
	  }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
	int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

	oldval = mutex->__data.__lock;

	/* Check whether we already hold the mutex.  */
	if (mutex->__data.__owner == id)
	  {
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
	      return EDEADLK;

	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
	      {
		/* Just bump the counter.  */
		if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
		  /* Overflow of the counter.  */
		  return EAGAIN;

		++mutex->__data.__count;

		return 0;
	      }
	  }

	int oldprio = -1, ceilval;
	do
	  {
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

	    if (__pthread_current_priority () > ceiling)
	      {
		if (oldprio != -1)
		  __pthread_tpp_change_priority (oldprio, -1);
		return EINVAL;
	      }

	    int retval = __pthread_tpp_change_priority (oldprio, ceiling);
	    if (retval)
	      return retval;

	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
	    oldprio = ceiling;

	    oldval
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
						     ceilval | 2,
#else
						     ceilval | 1,
#endif
						     ceilval);

	    if (oldval == ceilval)
	      break;

	    do
	      {
		oldval
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							 ceilval | 2,
							 ceilval | 1);

		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
		  break;

		if (oldval != ceilval)
		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
				  PTHREAD_MUTEX_PSHARED (mutex));
	      }
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
							ceilval | 2, ceilval)
		   != ceilval);
	  }
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

	assert (mutex->__data.__owner == 0);
	mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }