int _pthread_barrier_wait(pthread_barrier_t *barrier) { struct pthread *curthread = _get_curthread(); pthread_barrier_t bar; int64_t cycle; int ret; if (barrier == NULL || *barrier == NULL) return (EINVAL); bar = *barrier; THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (++bar->b_waiters == bar->b_count) { /* Current thread is lastest thread */ bar->b_waiters = 0; bar->b_cycle++; _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = PTHREAD_BARRIER_SERIAL_THREAD; } else { cycle = bar->b_cycle; bar->b_refcount++; do { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); /* test cycle to avoid bogus wakeup */ } while (cycle == bar->b_cycle); if (--bar->b_refcount == 0 && bar->b_destroying) _thr_ucond_broadcast(&bar->b_cv); THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); ret = 0; } return (ret); }
/* * Cancellation behaivor: * Thread may be canceled at start, if thread is canceled, it means it * did not get a wakeup from pthread_cond_signal(), otherwise, it is * not canceled. * Thread cancellation never cause wakeup from pthread_cond_signal() * to be lost. */ static int cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); int recurse; int error, error2 = 0; error = _mutex_cv_detach(mp, &recurse); if (error != 0) return (error); if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); _thr_cancel_leave(curthread, 0); } else { error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); } /* * Note that PP mutex and ROBUST mutex may return * interesting error codes. */ if (error == 0) { error2 = _mutex_cv_lock(mp, recurse); } else if (error == EINTR || error == ETIMEDOUT) { error2 = _mutex_cv_lock(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ error2 = _mutex_cv_attach(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); } return (error2 != 0 ? error2 : error); }
int _pthread_barrier_destroy(pthread_barrier_t *barrier) { pthread_barrier_t bar; struct pthread *curthread; int pshared; if (barrier == NULL || *barrier == NULL) return (EINVAL); if (*barrier == THR_PSHARED_PTR) { bar = __thr_pshared_offpage(barrier, 0); if (bar == NULL) { *barrier = NULL; return (0); } pshared = 1; } else { bar = *barrier; pshared = 0; } curthread = _get_curthread(); THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (bar->b_destroying) { THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } bar->b_destroying = 1; do { if (bar->b_waiters > 0) { bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } if (bar->b_refcount != 0) { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); } else break; } while (1); bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); *barrier = NULL; if (pshared) __thr_pshared_destroy(barrier); else free(bar); return (0); }
int _pthread_barrier_destroy(pthread_barrier_t *barrier) { pthread_barrier_t bar; struct pthread *curthread; if (barrier == NULL || *barrier == NULL) return (EINVAL); curthread = _get_curthread(); bar = *barrier; THR_UMUTEX_LOCK(curthread, &bar->b_lock); if (bar->b_destroying) { THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } bar->b_destroying = 1; do { if (bar->b_waiters > 0) { bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); return (EBUSY); } if (bar->b_refcount != 0) { _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0); THR_UMUTEX_LOCK(curthread, &bar->b_lock); } else break; } while (1); bar->b_destroying = 0; THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); *barrier = NULL; free(bar); return (0); }