/*
 * ptw32_mcs_lock_release -- release an MCS lock.
 * 
 * See: 
 * J. M. Mellor-Crummey and M. L. Scott.
 * Algorithms for Scalable Synchronization on Shared-Memory Multiprocessors.
 * ACM Transactions on Computer Systems, 9(1):21-65, Feb. 1991.
 */
INLINE void 
ptw32_mcs_lock_release (ptw32_mcs_local_node_t * node)
{
  ptw32_mcs_lock_t *lock = node->lock;
  ptw32_mcs_local_node_t *next = (ptw32_mcs_local_node_t *)
    InterlockedExchangeAdd((LPLONG)&node->next, 0); /* MBR fence */

  if (0 == next)
    {
      /* no known successor */

      if (node == (ptw32_mcs_local_node_t *)
	  PTW32_INTERLOCKED_COMPARE_EXCHANGE((PTW32_INTERLOCKED_LPLONG)lock,
					     (PTW32_INTERLOCKED_LONG)0,
					     (PTW32_INTERLOCKED_LONG)node))
	{
	  /* no successor, lock is free now */
	  return;
	}
  
      /* wait for successor */
      ptw32_mcs_flag_wait(&node->nextFlag);
      next = (ptw32_mcs_local_node_t *)
	InterlockedExchangeAdd((LPLONG)&node->next, 0); /* MBR fence */
    }

  /* pass the lock */
  ptw32_mcs_flag_set(&next->readyFlag);
}
int
pthread_barrier_wait (pthread_barrier_t * barrier)
{
  int result;
  int step;
  pthread_barrier_t b;

  if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID)
    {
      return EINVAL;
    }

  b = *barrier;
  step = b->iStep;

  if (0 == InterlockedDecrement ((long *) &(b->nCurrentBarrierHeight)))
    {
      /* Must be done before posting the semaphore. */
      b->nCurrentBarrierHeight = b->nInitialBarrierHeight;

      /*
       * There is no race condition between the semaphore wait and post
       * because we are using two alternating semas and all threads have
       * entered barrier_wait and checked nCurrentBarrierHeight before this
       * barrier's sema can be posted. Any threads that have not quite
       * entered sem_wait below when the multiple_post has completed
       * will nevertheless continue through the semaphore (barrier)
       * and will not be left stranded.
       */
      result = (b->nInitialBarrierHeight > 1
		? sem_post_multiple (&(b->semBarrierBreeched[step]),
				     b->nInitialBarrierHeight - 1) : 0);
    }
  else
    {
      /*
       * Use the non-cancelable version of sem_wait().
       */
      result = ptw32_semwait (&(b->semBarrierBreeched[step]));
    }

  /*
   * The first thread across will be the PTHREAD_BARRIER_SERIAL_THREAD.
   * This also sets up the alternate semaphore as the next barrier.
   */
  if (0 == result)
    {
      result = ((PTW32_INTERLOCKED_LONG) step ==
		PTW32_INTERLOCKED_COMPARE_EXCHANGE ((PTW32_INTERLOCKED_LPLONG)
						    & (b->iStep),
						    (PTW32_INTERLOCKED_LONG)
						    (1L - step),
						    (PTW32_INTERLOCKED_LONG)
						    step) ?
		PTHREAD_BARRIER_SERIAL_THREAD : 0);
    }

  return (result);
}
int
pthread_mutex_trylock (pthread_mutex_t * mutex)
{
  int result = 0;
  pthread_mutex_t mx;

  /*
   * Let the system deal with invalid pointers.
   */

  /*
   * We do a quick check to see if we need to do more work
   * to initialise a static mutex. We check
   * again inside the guarded section of ptw32_mutex_check_need_init()
   * to avoid race conditions.
   */
  if (*mutex >= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER)
    {
      if ((result = ptw32_mutex_check_need_init (mutex)) != 0)
	{
	  return (result);
	}
    }

  mx = *mutex;

  if (0 == (LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE (
		     (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx,
		     (PTW32_INTERLOCKED_LONG) 1,
		     (PTW32_INTERLOCKED_LONG) 0))
    {
      if (mx->kind != PTHREAD_MUTEX_NORMAL)
	{
	  mx->recursive_count = 1;
	  mx->ownerThread = pthread_self ();
	}
    }
  else
    {
      if (mx->kind == PTHREAD_MUTEX_RECURSIVE &&
	  pthread_equal (mx->ownerThread, pthread_self ()))
	{
	  mx->recursive_count++;
	}
      else
	{
	  result = EBUSY;
	}
    }

  return (result);
}
/*
 * ptw32_mcs_flag_set -- notify another thread about an event.
 * 
 * Set event if an event handle has been stored in the flag, and
 * set flag to -1 otherwise. Note that -1 cannot be a valid handle value.
 */
INLINE void 
ptw32_mcs_flag_set (LONG * flag)
{
  HANDLE e = (HANDLE)PTW32_INTERLOCKED_COMPARE_EXCHANGE(
						(PTW32_INTERLOCKED_LPLONG)flag,
						(PTW32_INTERLOCKED_LONG)-1,
						(PTW32_INTERLOCKED_LONG)0);
  if ((HANDLE)0 != e)
    {
      /* another thread has already stored an event handle in the flag */
      SetEvent(e);
    }
}
int pthread_spin_trylock (pthread_spinlock_t * lock)
{
  
  switch ((long)
	  PTW32_INTERLOCKED_COMPARE_EXCHANGE(lock,
					           PTW32_SPIN_LOCKED,
					           PTW32_SPIN_UNLOCKED))
    {
    case PTW32_SPIN_UNLOCKED:
      return 0;
    case PTW32_SPIN_LOCKED:
      return EBUSY;
    }

  return EINVAL;
}
/*
 * ptw32_mcs_flag_set -- wait for notification from another.
 * 
 * Store an event handle in the flag and wait on it if the flag has not been
 * set, and proceed without creating an event otherwise.
 */
INLINE void 
ptw32_mcs_flag_wait (LONG * flag)
{
  if (0 == InterlockedExchangeAdd((LPLONG)flag, 0)) /* MBR fence */
    {
      /* the flag is not set. create event. */

      HANDLE e = CreateEvent(NULL, PTW32_FALSE, PTW32_FALSE, NULL);

      if (0 == PTW32_INTERLOCKED_COMPARE_EXCHANGE(
			                  (PTW32_INTERLOCKED_LPLONG)flag,
			                  (PTW32_INTERLOCKED_LONG)e,
			                  (PTW32_INTERLOCKED_LONG)0))
	{
	  /* stored handle in the flag. wait on it now. */
	  WaitForSingleObject(e, INFINITE);
	}

      CloseHandle(e);
    }
}
int
pthread_spin_trylock (pthread_spinlock_t * lock)
{
  register pthread_spinlock_t s;

  if (NULL == lock || NULL == *lock)
    {
      return (EINVAL);
    }

  if (*lock == PTHREAD_SPINLOCK_INITIALIZER)
    {
      int result;

      if ((result = ptw32_spinlock_check_need_init (lock)) != 0)
	{
	  return (result);
	}
    }

  s = *lock;

  switch ((long)
	  PTW32_INTERLOCKED_COMPARE_EXCHANGE ((PTW32_INTERLOCKED_LPLONG) &
					      (s->interlock),
					      (PTW32_INTERLOCKED_LONG)
					      PTW32_SPIN_LOCKED,
					      (PTW32_INTERLOCKED_LONG)
					      PTW32_SPIN_UNLOCKED))
    {
    case PTW32_SPIN_UNLOCKED:
      return 0;
    case PTW32_SPIN_LOCKED:
      return EBUSY;
    case PTW32_SPIN_USE_MUTEX:
      return pthread_mutex_trylock (&(s->u.mutex));
    }

  return EINVAL;
}
Exemple #8
0
int
pthread_mutex_lock (pthread_mutex_t * mutex)
{
  int result = 0;
  pthread_mutex_t mx;

  /*
   * Let the system deal with invalid pointers.
   */

  /*
   * We do a quick check to see if we need to do more work
   * to initialise a static mutex. We check
   * again inside the guarded section of ptw32_mutex_check_need_init()
   * to avoid race conditions.
   */
  if (*mutex >= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER)
    {
      if ((result = ptw32_mutex_check_need_init (mutex)) != 0)
	{
	  return (result);
	}
    }

  mx = *mutex;

  if (mx->kind == PTHREAD_MUTEX_NORMAL)
    {
      if ((LONG) PTW32_INTERLOCKED_EXCHANGE(
		   (LPLONG) &mx->lock_idx,
		   (LONG) 1) != 0)
	{
	  while ((LONG) PTW32_INTERLOCKED_EXCHANGE(
                          (LPLONG) &mx->lock_idx,
			  (LONG) -1) != 0)
	    {
	      if (WAIT_OBJECT_0 != WaitForSingleObject (mx->event, INFINITE))
	        {
	          result = EINVAL;
		  break;
	        }
	    }
	}
    }
  else
    {
      pthread_t self = pthread_self();

      if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE(
                   (PTW32_INTERLOCKED_LPLONG) &mx->lock_idx,
		   (PTW32_INTERLOCKED_LONG) 1,
		   (PTW32_INTERLOCKED_LONG) 0) == 0)
	{
	  mx->recursive_count = 1;
	  mx->ownerThread = self;
	}
      else
	{
	  if (pthread_equal (mx->ownerThread, self))
	    {
	      if (mx->kind == PTHREAD_MUTEX_RECURSIVE)
		{
		  mx->recursive_count++;
		}
	      else
		{
		  result = EDEADLK;
		}
	    }
	  else
	    {
	      while ((LONG) PTW32_INTERLOCKED_EXCHANGE(
                              (LPLONG) &mx->lock_idx,
			      (LONG) -1) != 0)
		{
	          if (WAIT_OBJECT_0 != WaitForSingleObject (mx->event, INFINITE))
		    {
	              result = EINVAL;
		      break;
		    }
		}

	      if (0 == result)
		{
		  mx->recursive_count = 1;
		  mx->ownerThread = self;
		}
	    }
	}
    }

  return (result);
}
int
pthread_spin_destroy (pthread_spinlock_t * lock)
{
  register pthread_spinlock_t s;
  int result = 0;

  if (lock == NULL || *lock == NULL)
    {
      return EINVAL;
    }

  if ((s = *lock) != PTHREAD_SPINLOCK_INITIALIZER)
    {
      if (s->interlock == PTW32_SPIN_USE_MUTEX)
	{
	  result = pthread_mutex_destroy (&(s->u.mutex));
	}
      else if ((PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED !=
	       PTW32_INTERLOCKED_COMPARE_EXCHANGE ((PTW32_INTERLOCKED_LPLONG)
						   & (s->interlock),
						   (PTW32_INTERLOCKED_LONG)
						   PTW32_OBJECT_INVALID,
						   (PTW32_INTERLOCKED_LONG)
						   PTW32_SPIN_UNLOCKED))
	{
	  result = EINVAL;
	}

      if (0 == result)
	{
	  /*
	   * We are relying on the application to ensure that all other threads
	   * have finished with the spinlock before destroying it.
	   */
	  *lock = NULL;
	  (void) free (s);
	}
    }
  else
    {
      /*
       * See notes in ptw32_spinlock_check_need_init() above also.
       */
      EnterCriticalSection (&ptw32_spinlock_test_init_lock);

      /*
       * Check again.
       */
      if (*lock == PTHREAD_SPINLOCK_INITIALIZER)
	{
	  /*
	   * This is all we need to do to destroy a statically
	   * initialised spinlock that has not yet been used (initialised).
	   * If we get to here, another thread
	   * waiting to initialise this mutex will get an EINVAL.
	   */
	  *lock = NULL;
	}
      else
	{
	  /*
	   * The spinlock has been initialised while we were waiting
	   * so assume it's in use.
	   */
	  result = EBUSY;
	}

      LeaveCriticalSection (&ptw32_spinlock_test_init_lock);
    }

  return (result);
}