int pthread_spin_unlock (pthread_spinlock_t * lock) { register pthread_spinlock_t s; if (NULL == lock || NULL == *lock) { return (EINVAL); } s = *lock; if (s == PTHREAD_SPINLOCK_INITIALIZER) { return EPERM; } switch ((long) ptw32_interlocked_compare_exchange ((PTW32_INTERLOCKED_LPLONG) & (s->interlock), (PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED, (PTW32_INTERLOCKED_LONG) PTW32_SPIN_LOCKED)) { case PTW32_SPIN_LOCKED: return 0; case PTW32_SPIN_UNLOCKED: return EPERM; case PTW32_SPIN_USE_MUTEX: return pthread_mutex_unlock (&(s->u.mutex)); } return EINVAL; }
/* * NOTE: For speed, these routines don't check if "lock" is valid. */ int pthread_spin_trylock(pthread_spinlock_t *lock) { pthread_spinlock_t s = *lock; if (s == PTHREAD_SPINLOCK_INITIALIZER) { int result; if ((result = ptw32_spinlock_check_need_init(lock)) != 0) { return(result); } } switch ((long) ptw32_interlocked_compare_exchange((PTW32_INTERLOCKED_LPLONG) &(s->interlock), (PTW32_INTERLOCKED_LONG) PTW32_SPIN_LOCKED, (PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED )) { case PTW32_SPIN_UNLOCKED: return 0; case PTW32_SPIN_LOCKED: return EBUSY; case PTW32_SPIN_USE_MUTEX: return pthread_mutex_trylock(&(s->u.mutex)); } return EINVAL; }
int pthread_mutex_trylock(pthread_mutex_t *mutex) { int result = 0; pthread_mutex_t mx; if (mutex == NULL || *mutex == NULL) { return EINVAL; } /* * We do a quick check to see if we need to do more work * to initialise a static mutex. We check * again inside the guarded section of ptw32_mutex_check_need_init() * to avoid race conditions. */ if (*mutex == PTHREAD_MUTEX_INITIALIZER) { result = ptw32_mutex_check_need_init(mutex); } mx = *mutex; if (result == 0) { if ( (PTW32_INTERLOCKED_LONG) PTW32_MUTEX_LOCK_IDX_INIT == ptw32_interlocked_compare_exchange((PTW32_INTERLOCKED_LPLONG) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 0, (PTW32_INTERLOCKED_LONG) PTW32_MUTEX_LOCK_IDX_INIT)) { mx->recursive_count = 1; mx->ownerThread = (mx->kind != PTHREAD_MUTEX_FAST_NP ? pthread_self() : (pthread_t) PTW32_MUTEX_OWNER_ANONYMOUS); } else { if( mx->kind == PTHREAD_MUTEX_RECURSIVE_NP && pthread_equal( mx->ownerThread, pthread_self() ) ) { mx->recursive_count++; } else { result = EBUSY; } } } return(result); }
int pthread_barrier_wait(pthread_barrier_t *barrier) { int result; int step; pthread_barrier_t b; if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID) { return EINVAL; } b = *barrier; step = b->iStep; if (0 == InterlockedDecrement((long *) &(b->nCurrentBarrierHeight))) { /* Must be done before posting the semaphore. */ b->nCurrentBarrierHeight = b->nInitialBarrierHeight; /* * There is no race condition between the semaphore wait and post * because we are using two alternating semas and all threads have * entered barrier_wait and checked nCurrentBarrierHeight before this * barrier's sema can be posted. Any threads that have not quite * entered sem_wait below when the multiple_post has completed * will nevertheless continue through the semaphore (barrier) * and will not be left stranded. */ result = (b->nInitialBarrierHeight > 1 ? sem_post_multiple(&(b->semBarrierBreeched[step]), b->nInitialBarrierHeight - 1) : 0); } else { BOOL switchCancelState; int oldCancelState; pthread_t self = pthread_self(); /* * This routine is not a cancelation point, so temporarily * prevent sem_wait() from being one. * PTHREAD_CANCEL_ASYNCHRONOUS threads can still be canceled. */ switchCancelState = (self->cancelType == PTHREAD_CANCEL_DEFERRED && 0 == pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldCancelState)); result = sem_wait(&(b->semBarrierBreeched[step])); if (switchCancelState) { (void) pthread_setcancelstate(oldCancelState, NULL); } } /* * The first thread across will be the PTHREAD_BARRIER_SERIAL_THREAD. * This also sets up the alternate semaphore as the next barrier. */ if (0 == result) { result = ((PTW32_INTERLOCKED_LONG) step == ptw32_interlocked_compare_exchange((PTW32_INTERLOCKED_LPLONG) &(b->iStep), (PTW32_INTERLOCKED_LONG) (1L - step), (PTW32_INTERLOCKED_LONG) step) ? PTHREAD_BARRIER_SERIAL_THREAD : 0); } return(result); }
int pthread_spin_destroy (pthread_spinlock_t * lock) { register pthread_spinlock_t s; int result = 0; if (lock == NULL || *lock == NULL) { return EINVAL; } if ((s = *lock) != PTHREAD_SPINLOCK_INITIALIZER) { if (s->interlock == PTW32_SPIN_USE_MUTEX) { result = pthread_mutex_destroy (&(s->u.mutex)); } else if ((PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED != ptw32_interlocked_compare_exchange ((PTW32_INTERLOCKED_LPLONG) & (s->interlock), (PTW32_INTERLOCKED_LONG) PTW32_OBJECT_INVALID, (PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED)) { result = EINVAL; } if (0 == result) { /* * We are relying on the application to ensure that all other threads * have finished with the spinlock before destroying it. */ *lock = NULL; (void) free (s); } } else { /* * See notes in ptw32_spinlock_check_need_init() above also. */ EnterCriticalSection (&ptw32_spinlock_test_init_lock); /* * Check again. */ if (*lock == PTHREAD_SPINLOCK_INITIALIZER) { /* * This is all we need to do to destroy a statically * initialised spinlock that has not yet been used (initialised). * If we get to here, another thread * waiting to initialise this mutex will get an EINVAL. */ *lock = NULL; } else { /* * The spinlock has been initialised while we were waiting * so assume it's in use. */ result = EBUSY; } LeaveCriticalSection (&ptw32_spinlock_test_init_lock); } return (result); }