int pthread_barrier_wait (pthread_barrier_t * barrier) { int result; int step; pthread_barrier_t b; if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID) { return EINVAL; } b = *barrier; step = b->iStep; if (0 == InterlockedDecrement ((long *) &(b->nCurrentBarrierHeight))) { /* Must be done before posting the semaphore. */ b->nCurrentBarrierHeight = b->nInitialBarrierHeight; /* * There is no race condition between the semaphore wait and post * because we are using two alternating semas and all threads have * entered barrier_wait and checked nCurrentBarrierHeight before this * barrier's sema can be posted. Any threads that have not quite * entered sem_wait below when the multiple_post has completed * will nevertheless continue through the semaphore (barrier) * and will not be left stranded. */ result = (b->nInitialBarrierHeight > 1 ? sem_post_multiple (&(b->semBarrierBreeched[step]), b->nInitialBarrierHeight - 1) : 0); } else { /* * Use the non-cancelable version of sem_wait(). */ result = ptw32_semwait (&(b->semBarrierBreeched[step])); } /* * The first thread across will be the PTHREAD_BARRIER_SERIAL_THREAD. * This also sets up the alternate semaphore as the next barrier. */ if (0 == result) { result = ((PTW32_INTERLOCKED_LONG) step == PTW32_INTERLOCKED_COMPARE_EXCHANGE ((PTW32_INTERLOCKED_LPLONG) & (b->iStep), (PTW32_INTERLOCKED_LONG) (1L - step), (PTW32_INTERLOCKED_LONG) step) ? PTHREAD_BARRIER_SERIAL_THREAD : 0); } return (result); }
int pthread_barrier_wait (pthread_barrier_t * barrier) { int result; pthread_barrier_t b; ptw32_mcs_local_node_t node; if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID) { return EINVAL; } ptw32_mcs_lock_acquire(&(*barrier)->lock, &node); b = *barrier; if (--b->nCurrentBarrierHeight == 0) { ptw32_mcs_node_transfer(&b->proxynode, &node); result = (b->nInitialBarrierHeight > 1 ? sem_post_multiple (&(b->semBarrierBreeched), b->nInitialBarrierHeight - 1) : 0); } else { ptw32_mcs_lock_release(&node); result = ptw32_semwait (&(b->semBarrierBreeched)); } if ((PTW32_INTERLOCKED_LONG)PTW32_INTERLOCKED_INCREMENT_LONG((PTW32_INTERLOCKED_LONGPTR)&b->nCurrentBarrierHeight) == (PTW32_INTERLOCKED_LONG)b->nInitialBarrierHeight) { ptw32_mcs_lock_release(&b->proxynode); if (0 == result) { result = PTHREAD_BARRIER_SERIAL_THREAD; } } return (result); }
int pthread_barrier_wait (pthread_barrier_t * barrier) { int result; pthread_barrier_t b; ptw32_mcs_local_node_t node; if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID) { return EINVAL; } ptw32_mcs_lock_acquire(&(*barrier)->lock, &node); b = *barrier; if (--b->nCurrentBarrierHeight == 0) { /* * We are the last thread to arrive at the barrier before it releases us. * Move our MCS local node to the global scope barrier handle so that the * last thread out (not necessarily us) can release the lock. */ ptw32_mcs_node_transfer(&b->proxynode, &node); /* * Any threads that have not quite entered sem_wait below when the * multiple_post has completed will nevertheless continue through * the semaphore (barrier). */ result = (b->nInitialBarrierHeight > 1 ? sem_post_multiple (&(b->semBarrierBreeched), b->nInitialBarrierHeight - 1) : 0); } else { ptw32_mcs_lock_release(&node); /* * Use the non-cancelable version of sem_wait(). * * It is possible that all nInitialBarrierHeight-1 threads are * at this point when the last thread enters the barrier, resets * nCurrentBarrierHeight = nInitialBarrierHeight and leaves. * If pthread_barrier_destroy is called at that moment then the * barrier will be destroyed along with the semas. */ result = ptw32_semwait (&(b->semBarrierBreeched)); } if ((PTW32_INTERLOCKED_LONG)PTW32_INTERLOCKED_INCREMENT_LONG((PTW32_INTERLOCKED_LONGPTR)&b->nCurrentBarrierHeight) == (PTW32_INTERLOCKED_LONG)b->nInitialBarrierHeight) { /* * We are the last thread to cross this barrier */ ptw32_mcs_lock_release(&b->proxynode); if (0 == result) { result = PTHREAD_BARRIER_SERIAL_THREAD; } } return (result); }
static INLINE int ptw32_cond_unblock (pthread_cond_t * cond, int unblockAll) /* * Notes. * * Does not use the external mutex for synchronisation, * therefore semBlockLock is needed. * mtxUnblockLock is for LEVEL-2 synch. LEVEL-2 is the * state where the external mutex is not necessarily locked by * any thread, ie. between cond_wait unlocking and re-acquiring * the lock after having been signaled or a timeout or * cancellation. * * Uses the following CV elements: * nWaitersBlocked * nWaitersToUnblock * nWaitersGone * mtxUnblockLock * semBlockLock * semBlockQueue */ { int result; pthread_cond_t cv; int nSignalsToIssue; if (cond == NULL || *cond == NULL) { return EINVAL; } cv = *cond; /* * No-op if the CV is static and hasn't been initialised yet. * Assuming that any race condition is harmless. */ if (cv == PTHREAD_COND_INITIALIZER) { return 0; } if ((result = pthread_mutex_lock (&(cv->mtxUnblockLock))) != 0) { return result; } if (0 != cv->nWaitersToUnblock) { if (0 == cv->nWaitersBlocked) { return pthread_mutex_unlock (&(cv->mtxUnblockLock)); } if (unblockAll) { cv->nWaitersToUnblock += (nSignalsToIssue = cv->nWaitersBlocked); cv->nWaitersBlocked = 0; } else { nSignalsToIssue = 1; cv->nWaitersToUnblock++; cv->nWaitersBlocked--; } } else if (cv->nWaitersBlocked > cv->nWaitersGone) { /* Use the non-cancellable version of sem_wait() */ if (ptw32_semwait (&(cv->semBlockLock)) != 0) { result = errno; (void) pthread_mutex_unlock (&(cv->mtxUnblockLock)); return result; } if (0 != cv->nWaitersGone) { cv->nWaitersBlocked -= cv->nWaitersGone; cv->nWaitersGone = 0; } if (unblockAll) { nSignalsToIssue = cv->nWaitersToUnblock = cv->nWaitersBlocked; cv->nWaitersBlocked = 0; } else { nSignalsToIssue = cv->nWaitersToUnblock = 1; cv->nWaitersBlocked--; } } else { return pthread_mutex_unlock (&(cv->mtxUnblockLock)); } if ((result = pthread_mutex_unlock (&(cv->mtxUnblockLock))) == 0) { if (sem_post_multiple (&(cv->semBlockQueue), nSignalsToIssue) != 0) { result = errno; } } return result; } /* ptw32_cond_unblock */
static void PTW32_CDECL ptw32_cond_wait_cleanup (void *args) { ptw32_cond_wait_cleanup_args_t *cleanup_args = (ptw32_cond_wait_cleanup_args_t *) args; pthread_cond_t cv = cleanup_args->cv; int *resultPtr = cleanup_args->resultPtr; int nSignalsWasLeft; int result; /* * Whether we got here as a result of signal/broadcast or because of * timeout on wait or thread cancellation we indicate that we are no * longer waiting. The waiter is responsible for adjusting waiters * (to)unblock(ed) counts (protected by unblock lock). */ if ((result = pthread_mutex_lock (&(cv->mtxUnblockLock))) != 0) { *resultPtr = result; return; } if (0 != (nSignalsWasLeft = cv->nWaitersToUnblock)) { --(cv->nWaitersToUnblock); } else if (INT_MAX / 2 == ++(cv->nWaitersGone)) { /* Use the non-cancellable version of sem_wait() */ if (ptw32_semwait (&(cv->semBlockLock)) != 0) { *resultPtr = errno; /* * This is a fatal error for this CV, * so we deliberately don't unlock * cv->mtxUnblockLock before returning. */ return; } cv->nWaitersBlocked -= cv->nWaitersGone; if (sem_post (&(cv->semBlockLock)) != 0) { *resultPtr = errno; /* * This is a fatal error for this CV, * so we deliberately don't unlock * cv->mtxUnblockLock before returning. */ return; } cv->nWaitersGone = 0; } if ((result = pthread_mutex_unlock (&(cv->mtxUnblockLock))) != 0) { *resultPtr = result; return; } if (1 == nSignalsWasLeft) { if (sem_post (&(cv->semBlockLock)) != 0) { *resultPtr = errno; return; } } /* * XSH: Upon successful return, the mutex has been locked and is owned * by the calling thread. */ if ((result = pthread_mutex_lock (cleanup_args->mutexPtr)) != 0) { *resultPtr = result; } } /* ptw32_cond_wait_cleanup */
int pthread_cond_destroy (pthread_cond_t * cond) /* * ------------------------------------------------------ * DOCPUBLIC * This function destroys a condition variable * * * PARAMETERS * cond * pointer to an instance of pthread_cond_t * * * DESCRIPTION * This function destroys a condition variable. * * NOTES: * 1) A condition variable can be destroyed * immediately after all the threads that * are blocked on it are awakened. e.g. * * struct list { * pthread_mutex_t lm; * ... * } * * struct elt { * key k; * int busy; * pthread_cond_t notbusy; * ... * } * * * struct elt * * list_find(struct list *lp, key k) * { * struct elt *ep; * * pthread_mutex_lock(&lp->lm); * while ((ep = find_elt(l,k) != NULL) && ep->busy) * pthread_cond_wait(&ep->notbusy, &lp->lm); * if (ep != NULL) * ep->busy = 1; * pthread_mutex_unlock(&lp->lm); * return(ep); * } * * delete_elt(struct list *lp, struct elt *ep) * { * pthread_mutex_lock(&lp->lm); * assert(ep->busy); * ... remove ep from list ... * ep->busy = 0; * (A) pthread_cond_broadcast(&ep->notbusy); * pthread_mutex_unlock(&lp->lm); * (B) pthread_cond_destroy(&rp->notbusy); * free(ep); * } * * In this example, the condition variable * and its list element may be freed (line B) * immediately after all threads waiting for * it are awakened (line A), since the mutex * and the code ensure that no other thread * can touch the element to be deleted. * * RESULTS * 0 successfully released condition variable, * EINVAL 'cond' is invalid, * EBUSY 'cond' is in use, * * ------------------------------------------------------ */ { pthread_cond_t cv; int result = 0, result1 = 0, result2 = 0; /* * Assuming any race condition here is harmless. */ if (cond == NULL || *cond == NULL) { return EINVAL; } if (*cond != PTHREAD_COND_INITIALIZER) { ptw32_mcs_local_node_t node; ptw32_mcs_lock_acquire(&ptw32_cond_list_lock, &node); cv = *cond; /* * Close the gate; this will synchronize this thread with * all already signaled waiters to let them retract their * waiter status - SEE NOTE 1 ABOVE!!! */ if (ptw32_semwait (&(cv->semBlockLock)) != 0) /* Non-cancelable */ { result = PTW32_GET_ERRNO(); } else { /* * !TRY! lock mtxUnblockLock; try will detect busy condition * and will not cause a deadlock with respect to concurrent * signal/broadcast. */ if ((result = pthread_mutex_trylock (&(cv->mtxUnblockLock))) != 0) { (void) sem_post (&(cv->semBlockLock)); } } if (result != 0) { ptw32_mcs_lock_release(&node); return result; } /* * Check whether cv is still busy (still has waiters) */ if (cv->nWaitersBlocked > cv->nWaitersGone) { if (sem_post (&(cv->semBlockLock)) != 0) { result = PTW32_GET_ERRNO(); } result1 = pthread_mutex_unlock (&(cv->mtxUnblockLock)); result2 = EBUSY; } else { /* * Now it is safe to destroy */ *cond = NULL; if (sem_destroy (&(cv->semBlockLock)) != 0) { result = PTW32_GET_ERRNO(); } if (sem_destroy (&(cv->semBlockQueue)) != 0) { result1 = PTW32_GET_ERRNO(); } if ((result2 = pthread_mutex_unlock (&(cv->mtxUnblockLock))) == 0) { result2 = pthread_mutex_destroy (&(cv->mtxUnblockLock)); } /* Unlink the CV from the list */ if (ptw32_cond_list_head == cv) { ptw32_cond_list_head = cv->next; } else { cv->prev->next = cv->next; } if (ptw32_cond_list_tail == cv) { ptw32_cond_list_tail = cv->prev; } else { cv->next->prev = cv->prev; } (void) free (cv); } ptw32_mcs_lock_release(&node); } else { ptw32_mcs_local_node_t node; /* * See notes in ptw32_cond_check_need_init() above also. */ ptw32_mcs_lock_acquire(&ptw32_cond_test_init_lock, &node); /* * Check again. */ if (*cond == PTHREAD_COND_INITIALIZER) { /* * This is all we need to do to destroy a statically * initialised cond that has not yet been used (initialised). * If we get to here, another thread waiting to initialise * this cond will get an EINVAL. That's OK. */ *cond = NULL; } else { /* * The cv has been initialised while we were waiting * so assume it's in use. */ result = EBUSY; } ptw32_mcs_lock_release(&node); } return ((result != 0) ? result : ((result1 != 0) ? result1 : result2)); }