DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos) { Assert(pCritSect); Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC); /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/ /* * Return straight away if NOP. */ if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; /* * Try take the lock. (cLockers is -1 if it's free) */ RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf(); if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1)) { /* * Somebody is owning it (or will be soon). Perhaps it's us? */ if (pCritSect->NativeThreadOwner == NativeThreadSelf) { if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING)) { #ifdef RTCRITSECT_STRICT int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncS32(&pCritSect->cLockers); pCritSect->cNestings++; return VINF_SUCCESS; } AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect)); return VERR_SEM_NESTED; } return VERR_SEM_BUSY; } /* * First time */ pCritSect->cNestings = 1; ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); #ifdef RTCRITSECT_STRICT RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true); #endif return VINF_SUCCESS; }
/** * Tail code called when we've won the battle for the lock. * * @returns VINF_SUCCESS. * * @param pCritSect The critical section. * @param hNativeSelf The native handle of this thread. */ DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos) { AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner)); Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK)); ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1); Assert(pCritSect->s.Core.cNestings == 1); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf); # ifdef PDMCRITSECT_STRICT RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true); # endif STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); return VINF_SUCCESS; }
/** * Internal worker for RTSemMutexRequestNoResume and it's debug companion. * * @returns Same as RTSEmMutexRequestNoResume * @param hMutexSem The mutex handle. * @param cMillies The number of milliseconds to wait. * @param pSrcPos The source position of the caller. */ DECL_FORCE_INLINE(int) rtSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check for recursive entry. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); RTNATIVETHREAD hNativeOwner; ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner); if (hNativeOwner == hNativeSelf) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cRecursions); return VINF_SUCCESS; } /* * Lock mutex semaphore. */ RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies > 0) { #ifdef RTSEMMUTEX_STRICT hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else hThreadSelf = RTThreadSelf(); RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } DWORD rc = WaitForSingleObjectEx(pThis->hMtx, cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies, TRUE /*fAlertable*/); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); switch (rc) { case WAIT_OBJECT_0: #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif ASMAtomicWriteHandle(&pThis->hNativeOwner, hNativeSelf); ASMAtomicWriteU32(&pThis->cRecursions, 1); return VINF_SUCCESS; case WAIT_TIMEOUT: return VERR_TIMEOUT; case WAIT_IO_COMPLETION: return VERR_INTERRUPTED; case WAIT_ABANDONED: return VERR_SEM_OWNER_DIED; default: AssertMsgFailed(("%u\n", rc)); case WAIT_FAILED: { int rc2 = RTErrConvertFromWin32(GetLastError()); AssertMsgFailed(("Wait on hMutexSem %p failed, rc=%d lasterr=%d\n", hMutexSem, rc, GetLastError())); if (rc2 != VINF_SUCCESS) return rc2; AssertMsgFailed(("WaitForSingleObject(event) -> rc=%d while converted lasterr=%d\n", rc, rc2)); return VERR_INTERNAL_ERROR; } } }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNestings > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNestings); return VINF_SUCCESS; } #ifdef RTSEMMUTEX_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies) { int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif /* * Convert timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } /* * Lock the mutex. * Optimize for the uncontended case (makes 1-2 ns difference). */ if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))) { for (;;) { int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2); /* * Was the lock released in the meantime? This is unlikely (but possible) */ if (RT_UNLIKELY(iOld == 0)) break; /* * Go to sleep. */ if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec )) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC)) return VERR_SEM_DESTROYED; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { Assert(pTimeout); return VERR_TIMEOUT; } if (rc == 0) /* we'll leave the loop now unless another thread is faster */; else if (rc == -EWOULDBLOCK) /* retry with new value. */; else if (rc == -EINTR) { if (!fAutoResume) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } /* * When leaving this loop, iState is set to 2. This means that we gained the * lock and there are _possibly_ some waiters. We don't know exactly as another * thread might entered this loop at nearly the same time. Therefore we will * call futex_wakeup once too often (if _no_ other thread entered this loop). * The key problem is the simple futex_wait test for x != y (iState != 2) in * our case). */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNestings, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); /* * Recursion? */ pthread_t Self = pthread_self(); pthread_t Writer; ATOMIC_GET_PTHREAD_T(&pThis->Writer, &Writer); if (Writer == Self) { #ifdef RTSEMRW_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif Assert(pThis->cWrites < INT32_MAX); pThis->cWrites++; return VINF_SUCCESS; } /* * Try lock it. */ RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies) { #ifdef RTSEMRW_STRICT hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_WRITE, true); if (RT_FAILURE(rc9)) return rc9; #else hThreadSelf = RTThreadSelf(); RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, true); #endif } if (cMillies == RT_INDEFINITE_WAIT) { /* take rwlock */ int rc = pthread_rwlock_wrlock(&pThis->RWLock); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (rc) { AssertMsgFailed(("Failed write lock read-write sem %p, rc=%d.\n", hRWSem, rc)); return RTErrConvertFromErrno(rc); } } else { #ifdef RT_OS_DARWIN AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API.")); return VERR_NOT_IMPLEMENTED; #else /* !RT_OS_DARWIN */ /* * Get current time and calc end of wait time. */ struct timespec ts = {0,0}; clock_gettime(CLOCK_REALTIME, &ts); if (cMillies != 0) { ts.tv_nsec += (cMillies % 1000) * 1000000; ts.tv_sec += cMillies / 1000; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } } /* take rwlock */ int rc = pthread_rwlock_timedwrlock(&pThis->RWLock, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (rc) { AssertMsg(rc == ETIMEDOUT, ("Failed read lock read-write sem %p, rc=%d.\n", hRWSem, rc)); return RTErrConvertFromErrno(rc); } #endif /* !RT_OS_DARWIN */ } ATOMIC_SET_PTHREAD_T(&pThis->Writer, Self); pThis->cWrites = 1; Assert(!pThis->cReaders); #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNesting > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNesting); return VINF_SUCCESS; } /* * Lock it. */ RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies != 0) { #ifdef RTSEMMUTEX_STRICT hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else hThreadSelf = RTThreadSelf(); RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } if (cMillies == RT_INDEFINITE_WAIT) { /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (rc) { AssertMsgFailed(("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc); return RTErrConvertFromErrno(rc); } } else { #ifdef RT_OS_DARWIN AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API.")); return VERR_NOT_IMPLEMENTED; #else /* !RT_OS_DARWIN */ /* * Get current time and calc end of wait time. */ struct timespec ts = {0,0}; clock_gettime(CLOCK_REALTIME, &ts); if (cMillies != 0) { ts.tv_nsec += (cMillies % 1000) * 1000000; ts.tv_sec += cMillies / 1000; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } } /* take mutex */ int rc = pthread_mutex_timedlock(&pThis->Mutex, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (rc) { AssertMsg(rc == ETIMEDOUT, ("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc); return RTErrConvertFromErrno(rc); } #endif /* !RT_OS_DARWIN */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNesting, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos) { AssertPtr(pCritSect); AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED); /* * Return straight away if NOP. */ if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; /* * How is calling and is the order right? */ RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf(); #ifdef RTCRITSECT_STRICT RTTHREAD hThreadSelf = pCritSect->pValidatorRec ? RTThreadSelfAutoAdopt() : RTThreadSelf(); int rc9; if (pCritSect->pValidatorRec) /* (bootstap) */ { rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Increment the waiter counter. * This becomes 0 when the section is free. */ if (ASMAtomicIncS32(&pCritSect->cLockers) > 0) { /* * Nested? */ if (pCritSect->NativeThreadOwner == NativeThreadSelf) { if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING)) { #ifdef RTCRITSECT_STRICT rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) { ASMAtomicDecS32(&pCritSect->cLockers); return rc9; } #endif pCritSect->cNestings++; return VINF_SUCCESS; } AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */ ASMAtomicDecS32(&pCritSect->cLockers); return VERR_SEM_NESTED; } /* * Wait for the current owner to release it. */ #ifndef RTCRITSECT_STRICT RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (;;) { #ifdef RTCRITSECT_STRICT rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING), RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false); if (RT_FAILURE(rc9)) { ASMAtomicDecS32(&pCritSect->cLockers); return rc9; } #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false); #endif int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT); if (pCritSect->u32Magic != RTCRITSECT_MAGIC) return VERR_SEM_DESTROYED; if (rc == VINF_SUCCESS) break; AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc)); } AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner)); } /* * First time */ pCritSect->cNestings = 1; ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); #ifdef RTCRITSECT_STRICT RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate handle. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); RTMSINTERVAL cMilliesInitial = cMillies; uint64_t tsStart = 0; if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0) tsStart = RTTimeNanoTS(); #ifdef RTSEMRW_STRICT RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies) { hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Take critsect. */ int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); return rc; } /* * Check if the state of affairs allows write access. */ RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner; if ( !pThis->cReads && ( ( !pThis->cWrites && ( !pThis->cWritesWaiting /* play fair if we can wait */ || !cMillies) ) || pThis->hWriter == hNativeSelf ) ) { /* * Reset the reader event semaphore if necessary. */ if (pThis->fNeedResetReadEvent) { pThis->fNeedResetReadEvent = false; rc = RTSemEventMultiReset(pThis->ReadEvent); AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc)); } pThis->cWrites++; pThis->hWriter = hNativeSelf; #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, pThis->cWrites == 1); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } /* * Signal writer presence. */ if (cMillies != 0) pThis->cWritesWaiting++; RTCritSectLeave(&pThis->CritSect); /* * Wait till it's ready for writing. */ if (cMillies == 0) return VERR_TIMEOUT; #ifndef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (;;) { if (cMillies != RT_INDEFINITE_WAIT) { int64_t tsDelta = RTTimeNanoTS() - tsStart; if (tsDelta >= 1000000) { tsDelta /= 1000000; if ((uint64_t)tsDelta < cMilliesInitial) cMilliesInitial = (RTMSINTERVAL)tsDelta; else cMilliesInitial = 1; } } #ifdef RTSEMRW_STRICT rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_WRITE, false); if (RT_FAILURE(rc)) break; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); #endif int rcWait; if (fInterruptible) rcWait = rc = RTSemEventWaitNoResume(pThis->WriteEvent, cMillies); else rcWait = rc = RTSemEventWait(pThis->WriteEvent, cMillies); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (RT_UNLIKELY(RT_FAILURE_NP(rc) && rc != VERR_TIMEOUT)) /* timeouts are handled below */ { AssertMsgRC(rc, ("RTSemEventWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (RT_UNLIKELY(pThis->u32Magic != RTSEMRW_MAGIC)) { rc = VERR_SEM_DESTROYED; break; } /* * Re-take critsect and repeat the check we did prior to this loop. */ rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (!pThis->cReads && (!pThis->cWrites || pThis->hWriter == hNativeSelf)) { /* * Reset the reader event semaphore if necessary. */ if (pThis->fNeedResetReadEvent) { pThis->fNeedResetReadEvent = false; rc = RTSemEventMultiReset(pThis->ReadEvent); AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc)); } pThis->cWrites++; pThis->hWriter = hNativeSelf; pThis->cWritesWaiting--; #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Quit if the wait already timed out. */ if (rcWait == VERR_TIMEOUT) { rc = VERR_TIMEOUT; break; } } /* * Timeout/error case, clean up. */ if (pThis->u32Magic == RTSEMRW_MAGIC) { RTCritSectEnter(&pThis->CritSect); /* Adjust this counter, whether we got the critsect or not. */ pThis->cWritesWaiting--; RTCritSectLeave(&pThis->CritSect); } return rc; }