/** * Implements the indefinite wait. * * @returns See RTSemEventMultiWaitEx. * @param pThis The semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param pSrcPos The source position, can be NULL. */ static int rtSemEventMultiPosixWaitIndefinite(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, PCRTLOCKVALSRCPOS pSrcPos) { /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); AssertMsgReturn(!rc, ("Failed to lock event multi sem %p, rc=%d.\n", pThis, rc), RTErrConvertFromErrno(rc)); ASMAtomicIncU32(&pThis->cWaiters); for (;;) { /* check state. */ uint32_t const u32State = pThis->u32State; if (u32State != EVENTMULTI_STATE_NOT_SIGNALED) { ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc)); return u32State == EVENTMULTI_STATE_SIGNALED ? VINF_SUCCESS : VERR_SEM_DESTROYED; } /* wait */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, RT_INDEFINITE_WAIT, RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pThis->cWaiters); pthread_mutex_unlock(&pThis->Mutex); return rc; } } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); /** @todo interruptible wait is not implementable... */ NOREF(fFlags); rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if (RT_UNLIKELY(rc)) { AssertMsgFailed(("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc)); ASMAtomicDecU32(&pThis->cWaiters); int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2)); NOREF(rc2); return RTErrConvertFromErrno(rc); } } }
RTDECL(int) RTSemEventWaitNoResume(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies) { PCRTLOCKVALSRCPOS pSrcPos = NULL; /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); /* * Wait for condition. */ #ifdef RTSEMEVENT_STRICT RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) ? RTThreadSelfAutoAdopt() : RTThreadSelf(); if (pThis->fEverHadSignallers) { DWORD rc = WaitForSingleObjectEx(pThis->hev, 0 /*Timeout*/, TRUE /*fAlertable*/); if (rc != WAIT_TIMEOUT || cMillies == 0) return rtSemEventWaitHandleStatus(pThis, rc); int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, cMillies, RTTHREADSTATE_EVENT, true); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); DWORD rc = WaitForSingleObjectEx(pThis->hev, cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies, TRUE /*fAlertable*/); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); return rtSemEventWaitHandleStatus(pThis, rc); }
DECLINLINE(int) rtSemEventLnxMultiWait(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); /* * Quickly check whether it's signaled. */ int32_t iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if (iCur == -1) return VINF_SUCCESS; /* * Check and convert the timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64Deadline = 0; /* shut up gcc */ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { /* If the timeout is zero, then we're done. */ if (!uTimeout) return VERR_TIMEOUT; /* Convert it to a deadline + interval timespec. */ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout != UINT64_MAX) /* unofficial way of indicating an indefinite wait */ { if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) u64Deadline = RTTimeSystemNanoTS() + uTimeout; else { uint64_t u64Now = RTTimeSystemNanoTS(); if (uTimeout <= u64Now) return VERR_TIMEOUT; u64Deadline = uTimeout; uTimeout -= u64Now; } if ( sizeof(ts.tv_sec) >= sizeof(uint64_t) || uTimeout <= UINT64_C(1000000000) * UINT32_MAX) { ts.tv_nsec = uTimeout % UINT32_C(1000000000); ts.tv_sec = uTimeout / UINT32_C(1000000000); pTimeout = &ts; } } } /* * The wait loop. */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (unsigned i = 0;; i++) { /* * Start waiting. We only account for there being or having been * threads waiting on the semaphore to keep things simple. */ iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if ( iCur == 1 || ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)) { /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64Deadline - RTTimeSystemNanoTS(); if (i64Diff < 1000) return VERR_TIMEOUT; ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } #ifdef RTSEMEVENTMULTI_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc9)) return rc9; } #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 1, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) return VERR_SEM_DESTROYED; if (rc == 0) return VINF_SUCCESS; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { /** @todo something is broken here. shows up every now and again in the ata * code. Should try to run the timeout against RTTimeMilliTS to * check that it's doing the right thing... */ Assert(pTimeout); return VERR_TIMEOUT; } if (rc == -EWOULDBLOCK) /* retry, the value changed. */; else if (rc == -EINTR) { if (fFlags & RTSEMWAIT_FLAGS_NORESUME) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } } else if (iCur == -1) return VINF_SUCCESS; } }
static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume) { PCRTLOCKVALSRCPOS pSrcPos = NULL; /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); /* * Quickly check whether it's signaled. */ /** @todo this isn't fair if someone is already waiting on it. They should * have the first go at it! * (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */ if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) return VINF_SUCCESS; /* * Convert the timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { if (!cMillies) return VERR_TIMEOUT; ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } ASMAtomicIncS32(&pThis->cWaiters); /* * The wait loop. */ #ifdef RTSEMEVENT_STRICT RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) ? RTThreadSelfAutoAdopt() : RTThreadSelf(); #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif int rc = VINF_SUCCESS; for (;;) { #ifdef RTSEMEVENT_STRICT if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, cMillies, RTTHREADSTATE_EVENT, true); if (RT_FAILURE(rc)) break; } #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) { rc = VERR_SEM_DESTROYED; break; } if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK)) { /* successful wakeup or fSignalled > 0 in the meantime */ if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) break; } else if (lrc == -ETIMEDOUT) { rc = VERR_TIMEOUT; break; } else if (lrc == -EINTR) { if (!fAutoResume) { rc = VERR_INTERRUPTED; break; } } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno)); rc = RTErrConvertFromErrno(lrc); break; } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } ASMAtomicDecS32(&pThis->cWaiters); return rc; }
/** * Implements the timed wait. * * @returns See RTSemEventMultiWaitEx * @param pThis The semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source position, can be NULL. */ static int rtSemEventMultiPosixWaitTimed(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Convert uTimeout to a relative value in nano seconds. */ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout == UINT64_MAX) /* unofficial way of indicating an indefinite wait */ return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); uint64_t uAbsTimeout = uTimeout; if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE) { uint64_t u64Now = RTTimeSystemNanoTS(); uTimeout = uTimeout > u64Now ? uTimeout - u64Now : 0; } if (uTimeout == 0) return rtSemEventMultiPosixWaitPoll(pThis); /* * Get current time and calc end of deadline relative to real time. */ struct timespec ts = {0,0}; if (!pThis->fMonotonicClock) { #if defined(RT_OS_DARWIN) || defined(RT_OS_HAIKU) struct timeval tv = {0,0}; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; #else clock_gettime(CLOCK_REALTIME, &ts); #endif struct timespec tsAdd; tsAdd.tv_nsec = uTimeout % UINT32_C(1000000000); tsAdd.tv_sec = uTimeout / UINT32_C(1000000000); if ( sizeof(ts.tv_sec) < sizeof(uint64_t) && ( uTimeout > UINT64_C(1000000000) * UINT32_MAX || (uint64_t)ts.tv_sec + tsAdd.tv_sec >= UINT32_MAX) ) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); ts.tv_sec += tsAdd.tv_sec; ts.tv_nsec += tsAdd.tv_nsec; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } /* Note! No need to complete uAbsTimeout for RTSEMWAIT_FLAGS_RELATIVE in this path. */ } else { /* ASSUMES RTTimeSystemNanoTS() == RTTimeNanoTS() == clock_gettime(CLOCK_MONOTONIC). */ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) uAbsTimeout += RTTimeSystemNanoTS(); if ( sizeof(ts.tv_sec) < sizeof(uint64_t) && uAbsTimeout > UINT64_C(1000000000) * UINT32_MAX) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); ts.tv_nsec = uAbsTimeout % UINT32_C(1000000000); ts.tv_sec = uAbsTimeout / UINT32_C(1000000000); } /* * To business! */ /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); AssertMsgReturn(rc == 0, ("rc=%d pThis=%p\n", rc, pThis), RTErrConvertFromErrno(rc)); NOREF(rc); ASMAtomicIncU32(&pThis->cWaiters); for (;;) { /* check state. */ uint32_t const u32State = pThis->u32State; if (u32State != EVENTMULTI_STATE_NOT_SIGNALED) { ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc)); return u32State == EVENTMULTI_STATE_SIGNALED ? VINF_SUCCESS : VERR_SEM_DESTROYED; } /* wait */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pThis->cWaiters); pthread_mutex_unlock(&pThis->Mutex); return rc; } } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if ( rc && ( rc != EINTR /* according to SuS this function shall not return EINTR, but linux man page says differently. */ || (fFlags & RTSEMWAIT_FLAGS_NORESUME)) ) { AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc)); ASMAtomicDecU32(&pThis->cWaiters); int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2)); NOREF(rc2); return RTErrConvertFromErrno(rc); } /* check the absolute deadline. */ } }
DECL_FORCE_INLINE(int) rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate handle. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); RTMSINTERVAL cMilliesInitial = cMillies; uint64_t tsStart = 0; if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0) tsStart = RTTimeNanoTS(); #ifdef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies > 0) { int rc9; if (pThis->hWriter != NIL_RTTHREAD && pThis->hWriter == RTThreadNativeSelf()) rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies); else rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Take critsect. */ int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); return rc; } /* * Check if the state of affairs allows read access. * Do not block further readers if there is a writer waiting, as * that will break/deadlock reader recursion. */ if ( pThis->hWriter == NIL_RTNATIVETHREAD #if 0 && ( !pThis->cWritesWaiting || pThis->cReads) #endif ) { pThis->cReads++; Assert(pThis->cReads > 0); #ifdef RTSEMRW_STRICT RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner; if (pThis->hWriter == hNativeSelf) { #ifdef RTSEMRW_STRICT int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos); if (RT_FAILURE(rc9)) { RTCritSectLeave(&pThis->CritSect); return rc9; } #endif pThis->cWriterReads++; Assert(pThis->cWriterReads > 0); RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Wait till it's ready for reading. */ if (cMillies == 0) return VERR_TIMEOUT; #ifndef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (;;) { if (cMillies != RT_INDEFINITE_WAIT) { int64_t tsDelta = RTTimeNanoTS() - tsStart; if (tsDelta >= 1000000) { tsDelta /= 1000000; if ((uint64_t)tsDelta < cMilliesInitial) cMilliesInitial = (RTMSINTERVAL)tsDelta; else cMilliesInitial = 1; } } #ifdef RTSEMRW_STRICT rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_READ, false); if (RT_FAILURE(rc)) break; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); #endif int rcWait; if (fInterruptible) rcWait = rc = RTSemEventMultiWaitNoResume(pThis->ReadEvent, cMillies); else rcWait = rc = RTSemEventMultiWait(pThis->ReadEvent, cMillies); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); if (RT_FAILURE(rc) && rc != VERR_TIMEOUT) /* handle timeout below */ { AssertMsgRC(rc, ("RTSemEventMultiWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (pThis->u32Magic != RTSEMRW_MAGIC) { rc = VERR_SEM_DESTROYED; break; } /* * Re-take critsect and repeat the check we did before the loop. */ rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if ( pThis->hWriter == NIL_RTNATIVETHREAD #if 0 && ( !pThis->cWritesWaiting || pThis->cReads) #endif ) { pThis->cReads++; Assert(pThis->cReads > 0); #ifdef RTSEMRW_STRICT RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Quit if the wait already timed out. */ if (rcWait == VERR_TIMEOUT) { rc = VERR_TIMEOUT; break; } } /* failed */ return rc; }
DECL_FORCE_INLINE(int) rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume) { #ifdef RTSEMEVENT_STRICT PCRTLOCKVALSRCPOS pSrcPos = NULL; #endif /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); uint32_t u32 = pThis->u32State; AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); /* * Timed or indefinite wait? */ if (cMillies == RT_INDEFINITE_WAIT) { /* for fairness, yield before going to sleep. */ if ( ASMAtomicIncU32(&pThis->cWaiters) > 1 && pThis->u32State == EVENT_STATE_SIGNALED) pthread_yield(); /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); if (rc) { ASMAtomicDecU32(&pThis->cWaiters); AssertMsgFailed(("Failed to lock event sem %p, rc=%d.\n", hEventSem, rc)); return RTErrConvertFromErrno(rc); } for (;;) { /* check state. */ if (pThis->u32State == EVENT_STATE_SIGNALED) { ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc); return VINF_SUCCESS; } if (pThis->u32State == EVENT_STATE_UNINITIALIZED) { rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc); return VERR_SEM_DESTROYED; } /* wait */ #ifdef RTSEMEVENT_STRICT RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) ? RTThreadSelfAutoAdopt() : RTThreadSelf(); if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, cMillies, RTTHREADSTATE_EVENT, true); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pThis->cWaiters); pthread_mutex_unlock(&pThis->Mutex); return rc; } } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); if (rc) { AssertMsgFailed(("Failed to wait on event sem %p, rc=%d.\n", hEventSem, rc)); ASMAtomicDecU32(&pThis->cWaiters); int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc2)); NOREF(rc2); return RTErrConvertFromErrno(rc); } } } else { /* * Get current time and calc end of wait time. */ struct timespec ts = {0,0}; #if defined(RT_OS_DARWIN) || defined(RT_OS_HAIKU) struct timeval tv = {0,0}; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; #else clock_gettime(CLOCK_REALTIME, &ts); #endif if (cMillies != 0) { ts.tv_nsec += (cMillies % 1000) * 1000000; ts.tv_sec += cMillies / 1000; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } } /* for fairness, yield before going to sleep. */ if (ASMAtomicIncU32(&pThis->cWaiters) > 1 && cMillies) pthread_yield(); /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); if (rc) { ASMAtomicDecU32(&pThis->cWaiters); AssertMsg(rc == ETIMEDOUT, ("Failed to lock event sem %p, rc=%d.\n", hEventSem, rc)); return RTErrConvertFromErrno(rc); } for (;;) { /* check state. */ if (pThis->u32State == EVENT_STATE_SIGNALED) { ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc); return VINF_SUCCESS; } if (pThis->u32State == EVENT_STATE_UNINITIALIZED) { rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc); return VERR_SEM_DESTROYED; } /* we're done if the timeout is 0. */ if (!cMillies) { ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); return VERR_TIMEOUT; } /* wait */ #ifdef RTSEMEVENT_STRICT RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) ? RTThreadSelfAutoAdopt() : RTThreadSelf(); if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, cMillies, RTTHREADSTATE_EVENT, true); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pThis->cWaiters); pthread_mutex_unlock(&pThis->Mutex); return rc; } } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); if (rc && (rc != EINTR || !fAutoResume)) /* according to SuS this function shall not return EINTR, but linux man page says differently. */ { AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event sem %p, rc=%d.\n", hEventSem, rc)); ASMAtomicDecU32(&pThis->cWaiters); int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event sem %p, rc2=%d.\n", hEventSem, rc2)); NOREF(rc2); return RTErrConvertFromErrno(rc); } } /* for (;;) */ } }