INLINE int ptw32_robust_mutex_inherit(pthread_mutex_t * mutex) { int result; pthread_mutex_t mx = *mutex; ptw32_robust_node_t* robust = mx->robustNode; switch ((LONG)PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR)&robust->stateInconsistent, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT, (PTW32_INTERLOCKED_LONG)-1 /* The terminating thread sets this */)) { case -1L: result = EOWNERDEAD; break; case (LONG)PTW32_ROBUST_NOTRECOVERABLE: result = ENOTRECOVERABLE; break; default: result = 0; break; } return result; }
int pthread_mutex_consistent (pthread_mutex_t* mutex) { pthread_mutex_t mx = *mutex; int result = 0; /* * Let the system deal with invalid pointers. */ if (mx == NULL) { return EINVAL; } if (mx->kind >= 0 || (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT != PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR)&mx->robustNode->stateInconsistent, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_CONSISTENT, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT)) { result = EINVAL; } return (result); }
int pthread_spin_lock (pthread_spinlock_t * lock) { register pthread_spinlock_t s; if (NULL == lock || NULL == *lock) { return (EINVAL); } if (*lock == PTHREAD_SPINLOCK_INITIALIZER) { int result; if ((result = ptw32_spinlock_check_need_init (lock)) != 0) { return (result); } } s = *lock; while ((PTW32_INTERLOCKED_LONG) PTW32_SPIN_LOCKED == PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR) &s->interlock, (PTW32_INTERLOCKED_LONG) PTW32_SPIN_LOCKED, (PTW32_INTERLOCKED_LONG) PTW32_SPIN_UNLOCKED)) { } if (s->interlock == PTW32_SPIN_LOCKED) { return 0; } else if (s->interlock == PTW32_SPIN_USE_MUTEX) { return pthread_mutex_lock (&(s->u.mutex)); } return EINVAL; }
int pthread_mutex_unlock (pthread_mutex_t * mutex) { int result = 0; int kind; pthread_mutex_t mx; /* * Let the system deal with invalid pointers. */ mx = *mutex; /* * If the thread calling us holds the mutex then there is no * race condition. If another thread holds the * lock then we shouldn't be in here. */ if (mx < PTHREAD_ERRORCHECK_MUTEX_INITIALIZER) { kind = mx->kind; if (kind >= 0) { if (kind == PTHREAD_MUTEX_NORMAL) { LONG idx; idx = (LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx, (PTW32_INTERLOCKED_LONG)0); if (idx != 0) { if (idx < 0) { /* * Someone may be waiting on that mutex. */ if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } else { if (pthread_equal (to_pthread(mx->ownerThread), pthread_self())) { if (kind != PTHREAD_MUTEX_RECURSIVE || 0 == --mx->recursive_count) { mx->ownerThread.p = NULL; if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx, (PTW32_INTERLOCKED_LONG)0) < 0L) { /* Someone may be waiting on that mutex */ if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } else { result = EPERM; } } } else { /* Robust types */ pthread_t self = pthread_self(); kind = -kind - 1; /* Convert to non-robust range */ /* * The thread must own the lock regardless of type if the mutex * is robust. */ if (pthread_equal (to_pthread(mx->ownerThread), self)) { PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->robustNode->stateInconsistent, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT); if (PTHREAD_MUTEX_NORMAL == kind) { ptw32_robust_mutex_remove(mutex, NULL); if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 0) < 0) { /* * Someone may be waiting on that mutex. */ if (SetEvent (mx->event) == 0) { result = EINVAL; } } } else { if (kind != PTHREAD_MUTEX_RECURSIVE || 0 == --mx->recursive_count) { ptw32_robust_mutex_remove(mutex, NULL); if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 0) < 0) { /* * Someone may be waiting on that mutex. */ if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } } else { result = EPERM; } } } else if (mx != PTHREAD_MUTEX_INITIALIZER) { result = EINVAL; } return (result); }
int pthread_mutex_timedlock (pthread_mutex_t * mutex, const struct timespec *abstime) { pthread_mutex_t mx; int kind; int result = 0; /* * Let the system deal with invalid pointers. */ /* * We do a quick check to see if we need to do more work * to initialise a static mutex. We check * again inside the guarded section of ptw32_mutex_check_need_init() * to avoid race conditions. */ if (*mutex >= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER) { if ((result = ptw32_mutex_check_need_init (mutex)) != 0) { return (result); } } mx = *mutex; kind = mx->kind; if (kind >= 0) { if (mx->kind == PTHREAD_MUTEX_NORMAL) { if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1) != 0) { while ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) -1) != 0) { if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) { return result; } } } } else { pthread_t self = pthread_self(); if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1, (PTW32_INTERLOCKED_LONG) 0) == 0) { mx->recursive_count = 1; mx->ownerThread = self; } else { if (pthread_equal (mx->ownerThread, self)) { if (mx->kind == PTHREAD_MUTEX_RECURSIVE) { mx->recursive_count++; } else { return EDEADLK; } } else { while ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) -1) != 0) { if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) { return result; } } mx->recursive_count = 1; mx->ownerThread = self; } } } } else { /* * Robust types * All types record the current owner thread. * The mutex is added to a per thread list when ownership is acquired. */ ptw32_robust_state_t* statePtr = &mx->robustNode->stateInconsistent; if ((PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD_LONG( (PTW32_INTERLOCKED_LONGPTR)statePtr, (PTW32_INTERLOCKED_LONG)0)) { result = ENOTRECOVERABLE; } else { pthread_t self = pthread_self(); kind = -kind - 1; /* Convert to non-robust range */ if (PTHREAD_MUTEX_NORMAL == kind) { if ((PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1) != 0) { while (0 == (result = ptw32_robust_mutex_inherit(mutex)) && (PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) -1) != 0) { if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) { return result; } if ((PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD_LONG( (PTW32_INTERLOCKED_LONGPTR)statePtr, (PTW32_INTERLOCKED_LONG)0)) { /* Unblock the next thread */ SetEvent(mx->event); result = ENOTRECOVERABLE; break; } } if (0 == result || EOWNERDEAD == result) { /* * Add mutex to the per-thread robust mutex currently-held list. * If the thread terminates, all mutexes in this list will be unlocked. */ ptw32_robust_mutex_add(mutex, self); } } } else { pthread_t self = pthread_self(); if (0 == (PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1, (PTW32_INTERLOCKED_LONG) 0)) { mx->recursive_count = 1; /* * Add mutex to the per-thread robust mutex currently-held list. * If the thread terminates, all mutexes in this list will be unlocked. */ ptw32_robust_mutex_add(mutex, self); } else { if (pthread_equal (mx->ownerThread, self)) { if (PTHREAD_MUTEX_RECURSIVE == kind) { mx->recursive_count++; } else { return EDEADLK; } } else { while (0 == (result = ptw32_robust_mutex_inherit(mutex)) && (PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_EXCHANGE_LONG( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) -1) != 0) { if (0 != (result = ptw32_timed_eventwait (mx->event, abstime))) { return result; } } if ((PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD_LONG( (PTW32_INTERLOCKED_LONGPTR)statePtr, (PTW32_INTERLOCKED_LONG)0)) { /* Unblock the next thread */ SetEvent(mx->event); result = ENOTRECOVERABLE; } else if (0 == result || EOWNERDEAD == result) { mx->recursive_count = 1; /* * Add mutex to the per-thread robust mutex currently-held list. * If the thread terminates, all mutexes in this list will be unlocked. */ ptw32_robust_mutex_add(mutex, self); } } } } } } return result; }
int pthread_mutex_unlock (pthread_mutex_t * mutex) { int result = 0; int kind; pthread_mutex_t mx; mx = *mutex; if (mx < PTHREAD_ERRORCHECK_MUTEX_INITIALIZER) { kind = mx->kind; if (kind >= 0) { if (kind == PTHREAD_MUTEX_NORMAL) { LONG idx; idx = (LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx, (PTW32_INTERLOCKED_LONG)0); if (idx != 0) { if (idx < 0) { if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } else { if (pthread_equal (mx->ownerThread, pthread_self())) { if (kind != PTHREAD_MUTEX_RECURSIVE || 0 == --mx->recursive_count) { mx->ownerThread.p = NULL; if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG ((PTW32_INTERLOCKED_LONGPTR)&mx->lock_idx, (PTW32_INTERLOCKED_LONG)0) < 0L) { if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } else { result = EPERM; } } } else { pthread_t self = pthread_self(); kind = -kind - 1; if (pthread_equal (mx->ownerThread, self)) { PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->robustNode->stateInconsistent, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE, (PTW32_INTERLOCKED_LONG)PTW32_ROBUST_INCONSISTENT); if (PTHREAD_MUTEX_NORMAL == kind) { ptw32_robust_mutex_remove(mutex, NULL); if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 0) < 0) { if (SetEvent (mx->event) == 0) { result = EINVAL; } } } else { if (kind != PTHREAD_MUTEX_RECURSIVE || 0 == --mx->recursive_count) { ptw32_robust_mutex_remove(mutex, NULL); if ((LONG) PTW32_INTERLOCKED_EXCHANGE_LONG((PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 0) < 0) { if (SetEvent (mx->event) == 0) { result = EINVAL; } } } } } else { result = EPERM; } } } else if (mx != PTHREAD_MUTEX_INITIALIZER) { result = EINVAL; } return (result); }
int pthread_mutex_trylock (pthread_mutex_t * mutex) { pthread_mutex_t mx; int kind; int result = 0; /* * Let the system deal with invalid pointers. */ /* * We do a quick check to see if we need to do more work * to initialise a static mutex. We check * again inside the guarded section of ptw32_mutex_check_need_init() * to avoid race conditions. */ if (*mutex >= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER) { if ((result = ptw32_mutex_check_need_init (mutex)) != 0) { return (result); } } mx = *mutex; kind = mx->kind; if (kind >= 0) { /* Non-robust */ if (0 == (PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG ( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1, (PTW32_INTERLOCKED_LONG) 0)) { if (kind != PTHREAD_MUTEX_NORMAL) { mx->recursive_count = 1; mx->ownerThread = pthread_self (); } } else { if (kind == PTHREAD_MUTEX_RECURSIVE && pthread_equal (mx->ownerThread, pthread_self ())) { mx->recursive_count++; } else { result = EBUSY; } } } else { /* * Robust types * All types record the current owner thread. * The mutex is added to a per thread list when ownership is acquired. */ pthread_t self; ptw32_robust_state_t* statePtr = &mx->robustNode->stateInconsistent; if ((PTW32_INTERLOCKED_LONG)PTW32_ROBUST_NOTRECOVERABLE == PTW32_INTERLOCKED_EXCHANGE_ADD_LONG( (PTW32_INTERLOCKED_LONGPTR)statePtr, (PTW32_INTERLOCKED_LONG)0)) { return ENOTRECOVERABLE; } self = pthread_self(); kind = -kind - 1; /* Convert to non-robust range */ if (0 == (PTW32_INTERLOCKED_LONG) PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG ( (PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx, (PTW32_INTERLOCKED_LONG) 1, (PTW32_INTERLOCKED_LONG) 0)) { if (kind != PTHREAD_MUTEX_NORMAL) { mx->recursive_count = 1; } ptw32_robust_mutex_add(mutex, self); } else { if (PTHREAD_MUTEX_RECURSIVE == kind && pthread_equal (mx->ownerThread, pthread_self ())) { mx->recursive_count++; } else { if (EOWNERDEAD == (result = ptw32_robust_mutex_inherit(mutex))) { mx->recursive_count = 1; ptw32_robust_mutex_add(mutex, self); } else { if (0 == result) { result = EBUSY; } } } } } return (result); }