/** * Implements the timed wait. * * @returns See RTSemEventMultiWaitEx * @param pThis The semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source position, can be NULL. */ static int rtSemEventMultiPosixWaitTimed(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Convert uTimeout to a relative value in nano seconds. */ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout == UINT64_MAX) /* unofficial way of indicating an indefinite wait */ return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); uint64_t uAbsTimeout = uTimeout; if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE) { uint64_t u64Now = RTTimeSystemNanoTS(); uTimeout = uTimeout > u64Now ? uTimeout - u64Now : 0; } if (uTimeout == 0) return rtSemEventMultiPosixWaitPoll(pThis); /* * Get current time and calc end of deadline relative to real time. */ struct timespec ts = {0,0}; if (!pThis->fMonotonicClock) { #if defined(RT_OS_DARWIN) || defined(RT_OS_HAIKU) struct timeval tv = {0,0}; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; #else clock_gettime(CLOCK_REALTIME, &ts); #endif struct timespec tsAdd; tsAdd.tv_nsec = uTimeout % UINT32_C(1000000000); tsAdd.tv_sec = uTimeout / UINT32_C(1000000000); if ( sizeof(ts.tv_sec) < sizeof(uint64_t) && ( uTimeout > UINT64_C(1000000000) * UINT32_MAX || (uint64_t)ts.tv_sec + tsAdd.tv_sec >= UINT32_MAX) ) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); ts.tv_sec += tsAdd.tv_sec; ts.tv_nsec += tsAdd.tv_nsec; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } /* Note! No need to complete uAbsTimeout for RTSEMWAIT_FLAGS_RELATIVE in this path. */ } else { /* ASSUMES RTTimeSystemNanoTS() == RTTimeNanoTS() == clock_gettime(CLOCK_MONOTONIC). */ if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) uAbsTimeout += RTTimeSystemNanoTS(); if ( sizeof(ts.tv_sec) < sizeof(uint64_t) && uAbsTimeout > UINT64_C(1000000000) * UINT32_MAX) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); ts.tv_nsec = uAbsTimeout % UINT32_C(1000000000); ts.tv_sec = uAbsTimeout / UINT32_C(1000000000); } /* * To business! */ /* take mutex */ int rc = pthread_mutex_lock(&pThis->Mutex); AssertMsgReturn(rc == 0, ("rc=%d pThis=%p\n", rc, pThis), RTErrConvertFromErrno(rc)); NOREF(rc); ASMAtomicIncU32(&pThis->cWaiters); for (;;) { /* check state. */ uint32_t const u32State = pThis->u32State; if (u32State != EVENTMULTI_STATE_NOT_SIGNALED) { ASMAtomicDecU32(&pThis->cWaiters); rc = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc)); return u32State == EVENTMULTI_STATE_SIGNALED ? VINF_SUCCESS : VERR_SEM_DESTROYED; } /* wait */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pThis->cWaiters); pthread_mutex_unlock(&pThis->Mutex); return rc; } } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if ( rc && ( rc != EINTR /* according to SuS this function shall not return EINTR, but linux man page says differently. */ || (fFlags & RTSEMWAIT_FLAGS_NORESUME)) ) { AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc)); ASMAtomicDecU32(&pThis->cWaiters); int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2)); NOREF(rc2); return RTErrConvertFromErrno(rc); } /* check the absolute deadline. */ } }
/** * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventWaitEx. * @param uTimeout See RTSemEventWaitEx. * @param pSrcPos The source code position of the wait. */ DECLINLINE(int) rtR0SemEventNtWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ if (!pThis) return VERR_INVALID_PARAMETER; AssertPtrReturn(pThis, VERR_INVALID_PARAMETER); AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); rtR0SemEventNtRetain(pThis); /* * Convert the timeout to a relative one because KeWaitForSingleObject * takes system time instead of interrupt time as input for absolute * timeout specifications. So, we're best of by giving it relative time. * * Lazy bird converts uTimeout to relative nanoseconds and then to Nt time. */ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout == UINT64_MAX) fFlags |= RTSEMWAIT_FLAGS_INDEFINITE; else { if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE) { uint64_t u64Now = RTTimeSystemNanoTS(); uTimeout = u64Now < uTimeout ? uTimeout - u64Now : 0; } } } /* * Wait for it. * We're assuming interruptible waits should happen at UserMode level. */ NTSTATUS rcNt; BOOLEAN fInterruptible = !!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE); KPROCESSOR_MODE WaitMode = fInterruptible ? UserMode : KernelMode; if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, NULL); else { LARGE_INTEGER Timeout; Timeout.QuadPart = -(int64_t)(uTimeout / 100); rcNt = KeWaitForSingleObject(&pThis->Event, Executive, WaitMode, fInterruptible, &Timeout); } int rc; if (pThis->u32Magic == RTSEMEVENT_MAGIC) { switch (rcNt) { case STATUS_SUCCESS: rc = VINF_SUCCESS; break; case STATUS_ALERTED: rc = VERR_INTERRUPTED; break; case STATUS_USER_APC: rc = VERR_INTERRUPTED; break; case STATUS_TIMEOUT: rc = VERR_TIMEOUT; break; default: AssertMsgFailed(("pThis->u32Magic=%RX32 pThis=%p: wait returned %lx!\n", pThis->u32Magic, pThis, (long)rcNt)); rc = VERR_INTERNAL_ERROR_4; break; } } else rc = VERR_SEM_DESTROYED; rtR0SemEventNtRelease(pThis); return rc; }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNestings > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNestings); return VINF_SUCCESS; } #ifdef RTSEMMUTEX_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies) { int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif /* * Convert timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } /* * Lock the mutex. * Optimize for the uncontended case (makes 1-2 ns difference). */ if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))) { for (;;) { int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2); /* * Was the lock released in the meantime? This is unlikely (but possible) */ if (RT_UNLIKELY(iOld == 0)) break; /* * Go to sleep. */ if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec )) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC)) return VERR_SEM_DESTROYED; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { Assert(pTimeout); return VERR_TIMEOUT; } if (rc == 0) /* we'll leave the loop now unless another thread is faster */; else if (rc == -EWOULDBLOCK) /* retry with new value. */; else if (rc == -EINTR) { if (!fAutoResume) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } /* * When leaving this loop, iState is set to 2. This means that we gained the * lock and there are _possibly_ some waiters. We don't know exactly as another * thread might entered this loop at nearly the same time. Therefore we will * call futex_wakeup once too often (if _no_ other thread entered this loop). * The key problem is the simple futex_wait test for x != y (iState != 2) in * our case). */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNestings, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
/** * Service request callback function. * * @returns VBox status code. * @param pSession The caller's session. * @param u64Arg 64-bit integer argument. * @param pReqHdr The request header. Input / Output. Optional. */ DECLEXPORT(int) TSTR0ThreadPreemptionSrvReqHandler(PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr) { if (u64Arg) return VERR_INVALID_PARAMETER; if (!VALID_PTR(pReqHdr)) return VERR_INVALID_PARAMETER; char *pszErr = (char *)(pReqHdr + 1); size_t cchErr = pReqHdr->cbReq - sizeof(*pReqHdr); if (cchErr < 32 || cchErr >= 0x10000) return VERR_INVALID_PARAMETER; *pszErr = '\0'; /* * The big switch. */ switch (uOperation) { case TSTR0THREADPREMEPTION_SANITY_OK: break; case TSTR0THREADPREMEPTION_SANITY_FAILURE: RTStrPrintf(pszErr, cchErr, "!42failure42%1024s", ""); break; case TSTR0THREADPREMEPTION_BASIC: { if (!ASMIntAreEnabled()) RTStrPrintf(pszErr, cchErr, "!Interrupts disabled"); else if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false by default"); else { RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&State); if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable"); else if (!ASMIntAreEnabled()) RTStrPrintf(pszErr, cchErr, "!Interrupts disabled"); RTThreadPreemptRestore(&State); } break; } case TSTR0THREADPREMEPTION_IS_PENDING: { RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&State); if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { if (ASMIntAreEnabled()) { uint64_t u64StartTS = RTTimeNanoTS(); uint64_t u64StartSysTS = RTTimeSystemNanoTS(); uint64_t cLoops = 0; uint64_t cNanosSysElapsed; uint64_t cNanosElapsed; bool fPending; do { fPending = RTThreadPreemptIsPending(NIL_RTTHREAD); cNanosElapsed = RTTimeNanoTS() - u64StartTS; cNanosSysElapsed = RTTimeSystemNanoTS() - u64StartSysTS; cLoops++; } while ( !fPending && cNanosElapsed < UINT64_C(2)*1000U*1000U*1000U && cNanosSysElapsed < UINT64_C(2)*1000U*1000U*1000U && cLoops < 100U*_1M); if (!fPending) RTStrPrintf(pszErr, cchErr, "!Preempt not pending after %'llu loops / %'llu ns / %'llu ns (sys)", cLoops, cNanosElapsed, cNanosSysElapsed); else if (cLoops == 1) RTStrPrintf(pszErr, cchErr, "!cLoops=1\n"); else RTStrPrintf(pszErr, cchErr, "RTThreadPreemptIsPending returned true after %'llu loops / %'llu ns / %'llu ns (sys)", cLoops, cNanosElapsed, cNanosSysElapsed); } else RTStrPrintf(pszErr, cchErr, "!Interrupts disabled"); } else RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable"); RTThreadPreemptRestore(&State); break; } case TSTR0THREADPREMEPTION_NESTED: { bool const fDefault = RTThreadPreemptIsEnabled(NIL_RTTHREAD); RTTHREADPREEMPTSTATE State1 = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&State1); if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&State2); if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { RTTHREADPREEMPTSTATE State3 = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&State3); if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 3rd RTThreadPreemptDisable"); RTThreadPreemptRestore(&State3); if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptRestore"); } else RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptDisable"); RTThreadPreemptRestore(&State2); if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptRestore"); } else RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptDisable"); RTThreadPreemptRestore(&State1); if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) != fDefault && !*pszErr) RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false after 3rd RTThreadPreemptRestore"); break; } default: RTStrPrintf(pszErr, cchErr, "!Unknown test #%d", uOperation); break; } /* The error indicator is the '!' in the message buffer. */ return VINF_SUCCESS; }
int main() { /* * Init. */ RTTEST hTest; RTEXITCODE rcExit = RTTestInitExAndCreate(0, NULL, RTR3INIT_FLAGS_SUPLIB, "tstRTTime", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * RTNanoTimeTS() shall never return something which * is less or equal to the return of the previous call. */ RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTStartTS = RTTimeNanoTS(); uint64_t u64OSStartTS = RTTimeSystemNanoTS(); uint32_t i; uint64_t u64Prev = RTTimeNanoTS(); for (i = 0; i < 100*_1M; i++) { uint64_t u64 = RTTimeNanoTS(); if (u64 <= u64Prev) { /** @todo wrapping detection. */ RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx (1)\n", i, u64, u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } else if (u64 - u64Prev > 1000000000 /* 1sec */) { RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } if (!(i & (_1M*2 - 1))) { RTTestPrintf(hTest, RTTESTLVL_INFO, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); RTThreadYield(); u64 = RTTimeNanoTS(); } u64Prev = u64; } RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTElapsedTS = RTTimeNanoTS(); uint64_t u64OSElapsedTS = RTTimeSystemNanoTS(); u64RTElapsedTS -= u64RTStartTS; u64OSElapsedTS -= u64OSStartTS; int64_t i64Diff = u64OSElapsedTS >= u64RTElapsedTS ? u64OSElapsedTS - u64RTElapsedTS : u64RTElapsedTS - u64OSElapsedTS; if (i64Diff > (int64_t)(u64OSElapsedTS / 1000)) RTTestFailed(hTest, "total time differs too much! u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); else { if (u64OSElapsedTS >= u64RTElapsedTS) RTTestValue(hTest, "Total time delta", u64OSElapsedTS - u64RTElapsedTS, RTTESTUNIT_NS); else RTTestValue(hTest, "Total time delta", u64RTElapsedTS - u64OSElapsedTS, RTTESTUNIT_NS); RTTestPrintf(hTest, RTTESTLVL_INFO, "total time difference: u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) /** @todo This isn't really x86 or AMD64 specific... */ RTTestValue(hTest, "RTTimeDbgSteps", RTTimeDbgSteps(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgSteps pp", ((uint64_t)RTTimeDbgSteps() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgExpired", RTTimeDbgExpired(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgExpired pp", ((uint64_t)RTTimeDbgExpired() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgBad", RTTimeDbgBad(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgBad pp", ((uint64_t)RTTimeDbgBad() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgRaces", RTTimeDbgRaces(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgRaces pp", ((uint64_t)RTTimeDbgRaces() * 1000) / i, RTTESTUNIT_PP1K); #endif return RTTestSummaryAndDestroy(hTest); }