RTDECL(uint64_t) RTTimeSystemNanoTS(void) { return RTTimeNanoTS(); }
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies, PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs) { /* * Validate the parameters, making sure to always set pcReqs. */ AssertPtrReturn(pcReqs, VERR_INVALID_POINTER); *pcReqs = 0; /* always set */ PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; RTFILEAIOCTX_VALID_RETURN(pCtxInt); AssertPtrReturn(pahReqs, VERR_INVALID_POINTER); AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER); AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE); /* * Can't wait if there are not requests around. */ if (RT_UNLIKELY(ASMAtomicUoReadS32(&pCtxInt->cRequests) == 0)) return VERR_FILE_AIO_NO_REQUEST; /* * Convert the timeout if specified. */ struct timespec *pTimeout = NULL; struct timespec Timeout = {0,0}; uint64_t StartNanoTS = 0; if (cMillies != RT_INDEFINITE_WAIT) { Timeout.tv_sec = cMillies / 1000; Timeout.tv_nsec = cMillies % 1000 * 1000000; pTimeout = &Timeout; StartNanoTS = RTTimeNanoTS(); } /* Wait for at least one. */ if (!cMinReqs) cMinReqs = 1; /* For the wakeup call. */ Assert(pCtxInt->hThreadWait == NIL_RTTHREAD); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf()); /* * Loop until we're woken up, hit an error (incl timeout), or * have collected the desired number of requests. */ int rc = VINF_SUCCESS; int cRequestsCompleted = 0; while (!pCtxInt->fWokenUp) { LNXKAIOIOEVENT aPortEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT]; int cRequestsToWait = RT_MIN(cReqs, AIO_MAXIMUM_REQUESTS_PER_CONTEXT); ASMAtomicXchgBool(&pCtxInt->fWaiting, true); rc = rtFileAsyncIoLinuxGetEvents(pCtxInt->AioContext, cMinReqs, cRequestsToWait, &aPortEvents[0], pTimeout); ASMAtomicXchgBool(&pCtxInt->fWaiting, false); if (RT_FAILURE(rc)) break; uint32_t const cDone = rc; rc = VINF_SUCCESS; /* * Process received events / requests. */ for (uint32_t i = 0; i < cDone; i++) { /* * The iocb is the first element in our request structure. * So we can safely cast it directly to the handle (see above) */ PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aPortEvents[i].pIoCB; AssertPtr(pReqInt); Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC); /** @todo aeichner: The rc field contains the result code * like you can find in errno for the normal read/write ops. * But there is a second field called rc2. I don't know the * purpose for it yet. */ if (RT_UNLIKELY(aPortEvents[i].rc < 0)) pReqInt->Rc = RTErrConvertFromErrno(-aPortEvents[i].rc); /* Convert to positive value. */ else { pReqInt->Rc = VINF_SUCCESS; pReqInt->cbTransfered = aPortEvents[i].rc; } /* Mark the request as finished. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt; } /* * Done Yet? If not advance and try again. */ if (cDone >= cMinReqs) break; cMinReqs -= cDone; cReqs -= cDone; if (cMillies != RT_INDEFINITE_WAIT) { /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */ uint64_t NanoTS = RTTimeNanoTS(); uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000; if (cMilliesElapsed >= cMillies) { rc = VERR_TIMEOUT; break; } /* The syscall supposedly updates it, but we're paranoid. :-) */ Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000; Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000; } } /* * Update the context state and set the return value. */ *pcReqs = cRequestsCompleted; ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted); Assert(pCtxInt->hThreadWait == RTThreadSelf()); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD); /* * Clear the wakeup flag and set rc. */ if ( pCtxInt->fWokenUp && RT_SUCCESS(rc)) { ASMAtomicXchgBool(&pCtxInt->fWokenUp, false); rc = VERR_INTERRUPTED; } return rc; }
RTDECL(uint64_t) RTTimeMilliTS(void) { return RTTimeNanoTS() / 1000000; }
int main() { /* * Init runtime */ unsigned cErrors = 0; int rc = RTR3Init(); if (RT_FAILURE(rc)) { RTPrintf("tstTimer: RTR3Init() -> %d\n", rc); return 1; } /* * Check that the clock is reliable. */ RTPrintf("tstTimer: TESTING - RTTimeNanoTS() for 2sec\n"); uint64_t uTSMillies = RTTimeMilliTS(); uint64_t uTSBegin = RTTimeNanoTS(); uint64_t uTSLast = uTSBegin; uint64_t uTSDiff; uint64_t cIterations = 0; do { uint64_t uTS = RTTimeNanoTS(); if (uTS < uTSLast) { RTPrintf("tstTimer: FAILURE - RTTimeNanoTS() is unreliable. uTS=%RU64 uTSLast=%RU64\n", uTS, uTSLast); cErrors++; } if (++cIterations > (2*1000*1000*1000)) { RTPrintf("tstTimer: FAILURE - RTTimeNanoTS() is unreliable. cIterations=%RU64 uTS=%RU64 uTSBegin=%RU64\n", cIterations, uTS, uTSBegin); return 1; } uTSLast = uTS; uTSDiff = uTSLast - uTSBegin; } while (uTSDiff < (2*1000*1000*1000)); uTSMillies = RTTimeMilliTS() - uTSMillies; if (uTSMillies >= 2500 || uTSMillies <= 1500) { RTPrintf("tstTimer: FAILURE - uTSMillies=%RI64 uTSBegin=%RU64 uTSLast=%RU64 uTSDiff=%RU64\n", uTSMillies, uTSBegin, uTSLast, uTSDiff); cErrors++; } if (!cErrors) RTPrintf("tstTimer: OK - RTTimeNanoTS()\n"); /* * Tests. */ static struct { unsigned uMicroInterval; unsigned uMilliesWait; unsigned cLower; unsigned cUpper; } aTests[] = { { 32000, 2000, 0, 0 }, { 20000, 2000, 0, 0 }, { 10000, 2000, 0, 0 }, { 8000, 2000, 0, 0 }, { 2000, 2000, 0, 0 }, { 1000, 2000, 0, 0 }, { 500, 5000, 0, 0 }, { 200, 5000, 0, 0 }, { 100, 5000, 0, 0 } }; unsigned i = 0; for (i = 0; i < RT_ELEMENTS(aTests); i++) { aTests[i].cLower = (aTests[i].uMilliesWait*1000 - aTests[i].uMilliesWait*100) / aTests[i].uMicroInterval; aTests[i].cUpper = (aTests[i].uMilliesWait*1000 + aTests[i].uMilliesWait*100) / aTests[i].uMicroInterval; gu64Norm = aTests[i].uMicroInterval*1000; RTPrintf("\n" "tstTimer: TESTING - %d us interval, %d ms wait, expects %d-%d ticks.\n", aTests[i].uMicroInterval, aTests[i].uMilliesWait, aTests[i].cLower, aTests[i].cUpper); /* * Start timer which ticks every 10ms. */ gcTicks = 0; PRTTIMER pTimer; gu64Max = 0; gu64Min = UINT64_MAX; gu64Prev = 0; RT_ZERO(cFrequency); #ifdef RT_OS_WINDOWS if (aTests[i].uMicroInterval < 1000) continue; rc = RTTimerCreate(&pTimer, aTests[i].uMicroInterval / 1000, TimerCallback, NULL); #else rc = RTTimerCreateEx(&pTimer, aTests[i].uMicroInterval * (uint64_t)1000, 0, TimerCallback, NULL); #endif if (RT_FAILURE(rc)) { RTPrintf("tstTimer: FAILURE - RTTimerCreateEx(,%u*1M,,,) -> %Rrc\n", aTests[i].uMicroInterval, rc); cErrors++; continue; } /* * Start the timer and active waiting for the requested test period. */ uTSBegin = RTTimeNanoTS(); #ifndef RT_OS_WINDOWS rc = RTTimerStart(pTimer, 0); if (RT_FAILURE(rc)) { RTPrintf("tstTimer: FAILURE - RTTimerStart(,0) -> %Rrc\n", aTests[i].uMicroInterval, rc); cErrors++; } #endif while (RTTimeNanoTS() - uTSBegin < (uint64_t)aTests[i].uMilliesWait * 1000000) /* nothing */; /* destroy the timer */ uint64_t uTSEnd = RTTimeNanoTS(); uTSDiff = uTSEnd - uTSBegin; rc = RTTimerDestroy(pTimer); if (RT_FAILURE(rc)) { RTPrintf("tstTimer: FAILURE - RTTimerDestroy() -> %d gcTicks=%d\n", rc, gcTicks); cErrors++; } RTPrintf("tstTimer: uTS=%RI64 (%RU64 - %RU64)\n", uTSDiff, uTSBegin, uTSEnd); unsigned cTicks = gcTicks; RTThreadSleep(aTests[i].uMicroInterval/1000 * 3); if (gcTicks != cTicks) { RTPrintf("tstTimer: FAILURE - RTTimerDestroy() didn't really stop the timer! gcTicks=%d cTicks=%d\n", gcTicks, cTicks); cErrors++; continue; } /* * Check the number of ticks. */ if (gcTicks < aTests[i].cLower) { RTPrintf("tstTimer: FAILURE - Too few ticks gcTicks=%d (expected %d-%d)", gcTicks, aTests[i].cUpper, aTests[i].cLower); cErrors++; } else if (gcTicks > aTests[i].cUpper) { RTPrintf("tstTimer: FAILURE - Too many ticks gcTicks=%d (expected %d-%d)", gcTicks, aTests[i].cUpper, aTests[i].cLower); cErrors++; } else RTPrintf("tstTimer: OK - gcTicks=%d", gcTicks); RTPrintf(" min=%RU64 max=%RU64\n", gu64Min, gu64Max); for (int j = 0; j < (int)RT_ELEMENTS(cFrequency); j++) { uint32_t len = cFrequency[j] * 70 / gcTicks; uint32_t deviation = j - RT_ELEMENTS(cFrequency) / 2; uint64_t u64FreqPercent = (uint64_t)cFrequency[j] * 10000 / gcTicks; uint64_t u64FreqPercentFrac = u64FreqPercent % 100; u64FreqPercent = u64FreqPercent / 100; RTPrintf("%+4d%c %6u %3llu.%02llu%% ", deviation, deviation == 0 ? ' ' : '%', cFrequency[j], u64FreqPercent, u64FreqPercentFrac); for (unsigned k = 0; k < len; k++) RTPrintf("*"); RTPrintf("\n"); } } /* * Summary. */ if (!cErrors) RTPrintf("tstTimer: SUCCESS\n"); else RTPrintf("tstTimer: FAILURE %d errors\n", cErrors); return !!cErrors; }
RTDECL(uint64_t) RTTimeSystemMilliTS(void) { return RTTimeNanoTS() / RT_NS_1MS; }
int main() { /* * Init. */ RTTEST hTest; RTEXITCODE rcExit = RTTestInitExAndCreate(0, NULL, RTR3INIT_FLAGS_SUPLIB, "tstRTTime", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * RTNanoTimeTS() shall never return something which * is less or equal to the return of the previous call. */ RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTStartTS = RTTimeNanoTS(); uint64_t u64OSStartTS = RTTimeSystemNanoTS(); uint32_t i; uint64_t u64Prev = RTTimeNanoTS(); for (i = 0; i < 100*_1M; i++) { uint64_t u64 = RTTimeNanoTS(); if (u64 <= u64Prev) { /** @todo wrapping detection. */ RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx (1)\n", i, u64, u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } else if (u64 - u64Prev > 1000000000 /* 1sec */) { RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } if (!(i & (_1M*2 - 1))) { RTTestPrintf(hTest, RTTESTLVL_INFO, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); RTThreadYield(); u64 = RTTimeNanoTS(); } u64Prev = u64; } RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTElapsedTS = RTTimeNanoTS(); uint64_t u64OSElapsedTS = RTTimeSystemNanoTS(); u64RTElapsedTS -= u64RTStartTS; u64OSElapsedTS -= u64OSStartTS; int64_t i64Diff = u64OSElapsedTS >= u64RTElapsedTS ? u64OSElapsedTS - u64RTElapsedTS : u64RTElapsedTS - u64OSElapsedTS; if (i64Diff > (int64_t)(u64OSElapsedTS / 1000)) RTTestFailed(hTest, "total time differs too much! u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); else { if (u64OSElapsedTS >= u64RTElapsedTS) RTTestValue(hTest, "Total time delta", u64OSElapsedTS - u64RTElapsedTS, RTTESTUNIT_NS); else RTTestValue(hTest, "Total time delta", u64RTElapsedTS - u64OSElapsedTS, RTTESTUNIT_NS); RTTestPrintf(hTest, RTTESTLVL_INFO, "total time difference: u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) /** @todo This isn't really x86 or AMD64 specific... */ RTTestValue(hTest, "RTTimeDbgSteps", RTTimeDbgSteps(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgSteps pp", ((uint64_t)RTTimeDbgSteps() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgExpired", RTTimeDbgExpired(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgExpired pp", ((uint64_t)RTTimeDbgExpired() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgBad", RTTimeDbgBad(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgBad pp", ((uint64_t)RTTimeDbgBad() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgRaces", RTTimeDbgRaces(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgRaces pp", ((uint64_t)RTTimeDbgRaces() * 1000) / i, RTTESTUNIT_PP1K); #endif return RTTestSummaryAndDestroy(hTest); }
/** * Receive thread. * This is reading stuff from the network. */ DECLCALLBACK(int) ReceiveThread(RTTHREAD hThreadSelf, void *pvArg) { uint32_t cbReceived = 0; uint32_t cLostFrames = 0; uint32_t iFrame = UINT32_MAX; PMYARGS pArgs = (PMYARGS)pvArg; NOREF(hThreadSelf); for (;;) { /* * Read data. */ while (IntNetRingHasMoreToRead(&pArgs->pBuf->Recv)) { uint8_t abBuf[16384 + 1024]; MYFRAMEHDR *pHdr = (MYFRAMEHDR *)&abBuf[0]; uint32_t cb = IntNetRingReadAndSkipFrame(&pArgs->pBuf->Recv, abBuf); /* check for termination frame. */ if ( pHdr->iFrame == 0xffffdead && pHdr->auEos[0] == 0xffffdead && pHdr->auEos[1] == 0xffffdead && pHdr->auEos[2] == 0xffffdead) { pArgs->u64End = RTTimeNanoTS(); RTThreadSleep(10); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "receiver thread %.6Rhxs terminating.\n" " iFrame=%u cb=%'u c=%'u %'uKB/s %'ufps cLost=%'u \n", &pArgs->Mac, iFrame, cbReceived, iFrame - cLostFrames, (unsigned)(cbReceived * 1000000000.0 / 1024 / (pArgs->u64End - pArgs->u64Start)), (unsigned)((iFrame - cLostFrames) * 1000000000.0 / (pArgs->u64End - pArgs->u64Start)), cLostFrames); return VINF_SUCCESS; } /* validate frame header */ if ( pHdr->DstMac.au16[0] != pArgs->Mac.au16[0] || pHdr->DstMac.au16[1] != pArgs->Mac.au16[1] || pHdr->DstMac.au16[2] != pArgs->Mac.au16[2] || pHdr->SrcMac.au16[0] != pArgs->Mac.au16[0] || pHdr->SrcMac.au16[1] != pArgs->Mac.au16[1] || pHdr->SrcMac.au16[2] != (pArgs->Mac.au16[2] + 1) % 2) { RTTestFailed(g_hTest, "receiver thread %.6Rhxs received frame header: %.16Rhxs\n", &pArgs->Mac, abBuf); } /* frame stuff and stats. */ int32_t off = pHdr->iFrame - (iFrame + 1); if (off) { if (off > 0) { #ifndef IGNORE_LOST_FRAMES RTTestFailed(g_hTest, "receiver thread %.6Rhxs: iFrame=%#x *puFrame=%#x off=%d\n", &pArgs->Mac, iFrame, pHdr->iFrame, off); #endif cLostFrames += off; } else { cLostFrames++; RTTestFailed(g_hTest, "receiver thread %.6Rhxs: iFrame=%#x *puFrame=%#x off=%d\n", &pArgs->Mac, iFrame, pHdr->iFrame, off); } } iFrame = pHdr->iFrame; cbReceived += cb; } /* * Wait for data. */ int rc = IntNetR0IfWait(pArgs->hIf, g_pSession, RT_INDEFINITE_WAIT); switch (rc) { case VERR_INTERRUPTED: case VINF_SUCCESS: break; case VERR_SEM_DESTROYED: RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "receiver thread %.6Rhxs terminating. iFrame=%u cb=%'u c=%'u cLost=%'u\n", &pArgs->Mac, iFrame, cbReceived, iFrame - cLostFrames, cLostFrames); return VINF_SUCCESS; default: RTTestFailed(g_hTest, "receiver thread %.6Rhxs got odd return value %Rrc! iFrame=%u cb=%'u c=%'u cLost=%'u\n", &pArgs->Mac, rc, iFrame, cbReceived, iFrame - cLostFrames, cLostFrames); return rc; } } }
static void Test4(unsigned cThreads, unsigned cSeconds, unsigned uWritePercent, bool fYield, bool fQuiet) { unsigned i; uint64_t acIterations[32]; RTTHREAD aThreads[RT_ELEMENTS(acIterations)]; AssertRelease(cThreads <= RT_ELEMENTS(acIterations)); RTTestSubF(g_hTest, "Test4 - %u threads, %u sec, %u%% writes, %syielding", cThreads, cSeconds, uWritePercent, fYield ? "" : "non-"); /* * Init globals. */ g_fYield = fYield; g_fQuiet = fQuiet; g_fTerminate = false; g_uWritePercent = uWritePercent; g_cConcurrentWriters = 0; g_cConcurrentReaders = 0; RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreate(&g_hSemRW), VINF_SUCCESS); /* * Create the threads and let them block on the semrw. */ RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWRequestWrite(g_hSemRW, RT_INDEFINITE_WAIT), VINF_SUCCESS); for (i = 0; i < cThreads; i++) { acIterations[i] = 0; RTTEST_CHECK_RC_RETV(g_hTest, RTThreadCreateF(&aThreads[i], Test4Thread, &acIterations[i], 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "test-%u", i), VINF_SUCCESS); } /* * Do the test run. */ uint32_t cErrorsBefore = RTTestErrorCount(g_hTest); uint64_t u64StartTS = RTTimeNanoTS(); RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_hSemRW), VINF_SUCCESS); RTThreadSleep(cSeconds * 1000); ASMAtomicWriteBool(&g_fTerminate, true); uint64_t ElapsedNS = RTTimeNanoTS() - u64StartTS; /* * Clean up the threads and semaphore. */ for (i = 0; i < cThreads; i++) RTTEST_CHECK_RC(g_hTest, RTThreadWait(aThreads[i], 5000, NULL), VINF_SUCCESS); RTTEST_CHECK_MSG(g_hTest, g_cConcurrentWriters == 0, (g_hTest, "g_cConcurrentWriters=%u at end of test\n", g_cConcurrentWriters)); RTTEST_CHECK_MSG(g_hTest, g_cConcurrentReaders == 0, (g_hTest, "g_cConcurrentReaders=%u at end of test\n", g_cConcurrentReaders)); RTTEST_CHECK_RC(g_hTest, RTSemRWDestroy(g_hSemRW), VINF_SUCCESS); g_hSemRW = NIL_RTSEMRW; if (RTTestErrorCount(g_hTest) != cErrorsBefore) RTThreadSleep(100); /* * Collect and display the results. */ uint64_t cItrTotal = acIterations[0]; for (i = 1; i < cThreads; i++) cItrTotal += acIterations[i]; uint64_t cItrNormal = cItrTotal / cThreads; uint64_t cItrMinOK = cItrNormal / 20; /* 5% */ uint64_t cItrMaxDeviation = 0; for (i = 0; i < cThreads; i++) { uint64_t cItrDelta = RT_ABS((int64_t)(acIterations[i] - cItrNormal)); if (acIterations[i] < cItrMinOK) RTTestFailed(g_hTest, "Thread %u did less than 5%% of the iterations - %llu (it) vs. %llu (5%%) - %llu%%\n", i, acIterations[i], cItrMinOK, cItrDelta * 100 / cItrNormal); else if (cItrDelta > cItrNormal / 2) RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Warning! Thread %u deviates by more than 50%% - %llu (it) vs. %llu (avg) - %llu%%\n", i, acIterations[i], cItrNormal, cItrDelta * 100 / cItrNormal); if (cItrDelta > cItrMaxDeviation) cItrMaxDeviation = cItrDelta; } //RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, // "Threads: %u Total: %llu Per Sec: %llu Avg: %llu ns Max dev: %llu%%\n", // cThreads, // cItrTotal, // cItrTotal / cSeconds, // ElapsedNS / cItrTotal, // cItrMaxDeviation * 100 / cItrNormal // ); // RTTestValue(g_hTest, "Thruput", cItrTotal * UINT32_C(1000000000) / ElapsedNS, RTTESTUNIT_CALLS_PER_SEC); RTTestValue(g_hTest, "Max diviation", cItrMaxDeviation * 100 / cItrNormal, RTTESTUNIT_PCT); }
void tstFileAioTestReadWriteBasic(RTFILE File, bool fWrite, void *pvTestBuf, size_t cbTestBuf, size_t cbTestFile, uint32_t cMaxReqsInFlight) { /* Allocate request array. */ RTFILEAIOREQ *paReqs; paReqs = (PRTFILEAIOREQ)RTTestGuardedAllocHead(g_hTest, cMaxReqsInFlight * sizeof(RTFILEAIOREQ)); RTTESTI_CHECK_RETV(paReqs); RT_BZERO(paReqs, sizeof(cMaxReqsInFlight * sizeof(RTFILEAIOREQ))); /* Allocate array holding pointer to data buffers. */ void **papvBuf = (void **)RTTestGuardedAllocHead(g_hTest, cMaxReqsInFlight * sizeof(void *)); RTTESTI_CHECK_RETV(papvBuf); /* Allocate the buffers*/ for (unsigned i = 0; i < cMaxReqsInFlight; i++) { RTTESTI_CHECK_RC_OK_RETV(RTTestGuardedAlloc(g_hTest, cbTestBuf, PAGE_SIZE, true /*fHead*/, &papvBuf[i])); if (fWrite) memcpy(papvBuf[i], pvTestBuf, cbTestBuf); if (fWrite) memcpy(papvBuf[i], pvTestBuf, cbTestBuf); else RT_BZERO(papvBuf[i], cbTestBuf); } /* Allocate array holding completed requests. */ RTFILEAIOREQ *paReqsCompleted; paReqsCompleted = (PRTFILEAIOREQ)RTTestGuardedAllocHead(g_hTest, cMaxReqsInFlight * sizeof(RTFILEAIOREQ)); RTTESTI_CHECK_RETV(paReqsCompleted); RT_BZERO(paReqsCompleted, cMaxReqsInFlight * sizeof(RTFILEAIOREQ)); /* Create a context and associate the file handle with it. */ RTFILEAIOCTX hAioContext; RTTESTI_CHECK_RC_RETV(RTFileAioCtxCreate(&hAioContext, cMaxReqsInFlight, 0 /* fFlags */), VINF_SUCCESS); RTTESTI_CHECK_RC_RETV(RTFileAioCtxAssociateWithFile(hAioContext, File), VINF_SUCCESS); /* Initialize requests. */ for (unsigned i = 0; i < cMaxReqsInFlight; i++) RTFileAioReqCreate(&paReqs[i]); RTFOFF off = 0; int cRuns = 0; uint64_t NanoTS = RTTimeNanoTS(); size_t cbLeft = cbTestFile; while (cbLeft) { int rc; int cReqs = 0; for (unsigned i = 0; i < cMaxReqsInFlight; i++) { size_t cbTransfer = cbLeft < cbTestBuf ? cbLeft : cbTestBuf; if (!cbTransfer) break; if (fWrite) rc = RTFileAioReqPrepareWrite(paReqs[i], File, off, papvBuf[i], cbTransfer, papvBuf[i]); else rc = RTFileAioReqPrepareRead(paReqs[i], File, off, papvBuf[i], cbTransfer, papvBuf[i]); RTTESTI_CHECK_RC(rc, VINF_SUCCESS); cbLeft -= cbTransfer; off += cbTransfer; cReqs++; } rc = RTFileAioCtxSubmit(hAioContext, paReqs, cReqs); RTTESTI_CHECK_MSG(rc == VINF_SUCCESS, ("Failed to submit tasks after %d runs. rc=%Rrc\n", cRuns, rc)); if (rc != VINF_SUCCESS) break; /* Wait */ uint32_t cCompleted = 0; RTTESTI_CHECK_RC(rc = RTFileAioCtxWait(hAioContext, cReqs, RT_INDEFINITE_WAIT, paReqsCompleted, cMaxReqsInFlight, &cCompleted), VINF_SUCCESS); if (rc != VINF_SUCCESS) break; if (!fWrite) { for (uint32_t i = 0; i < cCompleted; i++) { /* Compare that we read the right stuff. */ void *pvBuf = RTFileAioReqGetUser(paReqsCompleted[i]); RTTESTI_CHECK(pvBuf); size_t cbTransfered; RTTESTI_CHECK_RC(rc = RTFileAioReqGetRC(paReqsCompleted[i], &cbTransfered), VINF_SUCCESS); if (rc != VINF_SUCCESS) break; RTTESTI_CHECK_MSG(cbTransfered == cbTestBuf, ("cbTransfered=%zd\n", cbTransfered)); RTTESTI_CHECK_RC_OK(rc = (memcmp(pvBuf, pvTestBuf, cbTestBuf) == 0 ? VINF_SUCCESS : VERR_BAD_EXE_FORMAT)); if (rc != VINF_SUCCESS) break; memset(pvBuf, 0, cbTestBuf); } } cRuns++; if (RT_FAILURE(rc)) break; } NanoTS = RTTimeNanoTS() - NanoTS; uint64_t SpeedKBs = (uint64_t)(cbTestFile / (NanoTS / 1000000000.0) / 1024); RTTestValue(g_hTest, "Throughput", SpeedKBs, RTTESTUNIT_KILOBYTES_PER_SEC); /* cleanup */ for (unsigned i = 0; i < cMaxReqsInFlight; i++) RTTestGuardedFree(g_hTest, papvBuf[i]); RTTestGuardedFree(g_hTest, papvBuf); for (unsigned i = 0; i < cMaxReqsInFlight; i++) RTTESTI_CHECK_RC(RTFileAioReqDestroy(paReqs[i]), VINF_SUCCESS); RTTESTI_CHECK_RC(RTFileAioCtxDestroy(hAioContext), VINF_SUCCESS); RTTestGuardedFree(g_hTest, paReqs); }
/** * Initializes the guest object. */ HRESULT Guest::init(Console *aParent) { LogFlowThisFunc(("aParent=%p\n", aParent)); ComAssertRet(aParent, E_INVALIDARG); /* Enclose the state transition NotReady->InInit->Ready */ AutoInitSpan autoInitSpan(this); AssertReturn(autoInitSpan.isOk(), E_FAIL); unconst(mParent) = aParent; /* Confirm a successful initialization when it's the case */ autoInitSpan.setSucceeded(); ULONG aMemoryBalloonSize; HRESULT hr = mParent->machine()->COMGETTER(MemoryBalloonSize)(&aMemoryBalloonSize); if (hr == S_OK) /** @todo r=andy SUCCEEDED? */ mMemoryBalloonSize = aMemoryBalloonSize; else mMemoryBalloonSize = 0; /* Default is no ballooning */ BOOL fPageFusionEnabled; hr = mParent->machine()->COMGETTER(PageFusionEnabled)(&fPageFusionEnabled); if (hr == S_OK) /** @todo r=andy SUCCEEDED? */ mfPageFusionEnabled = fPageFusionEnabled; else mfPageFusionEnabled = false; /* Default is no page fusion*/ mStatUpdateInterval = 0; /* Default is not to report guest statistics at all */ mCollectVMMStats = false; /* Clear statistics. */ mNetStatRx = mNetStatTx = 0; mNetStatLastTs = RTTimeNanoTS(); for (unsigned i = 0 ; i < GUESTSTATTYPE_MAX; i++) mCurrentGuestStat[i] = 0; mVmValidStats = pm::VMSTATMASK_NONE; mMagic = GUEST_MAGIC; int vrc = RTTimerLRCreate(&mStatTimer, 1000 /* ms */, &Guest::staticUpdateStats, this); AssertMsgRC(vrc, ("Failed to create guest statistics update timer (%Rrc)\n", vrc)); #ifdef VBOX_WITH_GUEST_CONTROL unconst(mEventSource).createObject(); Assert(!mEventSource.isNull()); hr = mEventSource->init(static_cast<IGuest*>(this)); #else hr = S_OK; #endif try { #ifdef VBOX_WITH_DRAG_AND_DROP m_pGuestDnD = new GuestDnD(this); AssertPtr(m_pGuestDnD); #endif } catch(std::bad_alloc &) { hr = E_OUTOFMEMORY; } return hr; }
static int Test1(unsigned cThreads, unsigned cSeconds, bool fYield, bool fQuiet) { int rc; unsigned i; uint64_t g_au64[32]; RTTHREAD aThreads[RT_ELEMENTS(g_au64)]; AssertRelease(cThreads <= RT_ELEMENTS(g_au64)); /* * Init globals. */ g_fYield = fYield; g_fQuiet = fQuiet; g_fTerminate = false; rc = RTSemMutexCreate(&g_hMutex); if (RT_FAILURE(rc)) return PrintError("RTSemMutexCreate failed (rc=%Rrc)\n", rc); /* * Create the threads and let them block on the mutex. */ rc = RTSemMutexRequest(g_hMutex, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc)) return PrintError("RTSemMutexRequest failed (rc=%Rrc)\n", rc); for (i = 0; i < cThreads; i++) { g_au64[i] = 0; rc = RTThreadCreate(&aThreads[i], ThreadTest1, &g_au64[i], 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "test"); if (RT_FAILURE(rc)) return PrintError("RTThreadCreate failed for thread %u (rc=%Rrc)\n", i, rc); } if (!fQuiet) RTPrintf("tstSemMutex: %zu Threads created. Racing them for %u seconds (%s) ...\n", cThreads, cSeconds, g_fYield ? "yielding" : "no yielding"); uint64_t u64StartTS = RTTimeNanoTS(); rc = RTSemMutexRelease(g_hMutex); if (RT_FAILURE(rc)) PrintError("RTSemMutexRelease failed (rc=%Rrc)\n", rc); RTThreadSleep(cSeconds * 1000); ASMAtomicXchgBool(&g_fTerminate, true); uint64_t ElapsedNS = RTTimeNanoTS() - u64StartTS; for (i = 0; i < cThreads; i++) { rc = RTThreadWait(aThreads[i], 5000, NULL); if (RT_FAILURE(rc)) PrintError("RTThreadWait failed for thread %u (rc=%Rrc)\n", i, rc); } rc = RTSemMutexDestroy(g_hMutex); if (RT_FAILURE(rc)) PrintError("RTSemMutexDestroy failed - %Rrc\n", rc); g_hMutex = NIL_RTSEMMUTEX; if (g_cErrors) RTThreadSleep(100); /* * Collect and display the results. */ uint64_t Total = g_au64[0]; for (i = 1; i < cThreads; i++) Total += g_au64[i]; uint64_t Normal = Total / cThreads; uint64_t MaxDeviation = 0; for (i = 0; i < cThreads; i++) { uint64_t Delta = RT_ABS((int64_t)(g_au64[i] - Normal)); if (Delta > Normal / 2) RTPrintf("tstSemMutex: Warning! Thread %d deviates by more than 50%% - %llu (it) vs. %llu (avg)\n", i, g_au64[i], Normal); if (Delta > MaxDeviation) MaxDeviation = Delta; } RTPrintf("tstSemMutex: Threads: %u Total: %llu Per Sec: %llu Avg: %llu ns Max dev: %llu%%\n", cThreads, Total, Total / cSeconds, ElapsedNS / Total, MaxDeviation * 100 / Normal ); return 0; }
void Guest::updateStats(uint64_t iTick) { uint64_t cbFreeTotal = 0; uint64_t cbAllocTotal = 0; uint64_t cbBalloonedTotal = 0; uint64_t cbSharedTotal = 0; uint64_t cbSharedMem = 0; ULONG uNetStatRx = 0; ULONG uNetStatTx = 0; ULONG aGuestStats[GUESTSTATTYPE_MAX]; RT_ZERO(aGuestStats); AutoWriteLock alock(this COMMA_LOCKVAL_SRC_POS); ULONG validStats = mVmValidStats; /* Check if we have anything to report */ if (validStats) { mVmValidStats = pm::VMSTATMASK_NONE; memcpy(aGuestStats, mCurrentGuestStat, sizeof(aGuestStats)); } alock.release(); /* * Calling SessionMachine may take time as the object resides in VBoxSVC * process. This is why we took a snapshot of currently collected stats * and released the lock. */ Console::SafeVMPtrQuiet ptrVM(mParent); if (ptrVM.isOk()) { int rc; /* * There is no point in collecting VM shared memory if other memory * statistics are not available yet. Or is there? */ if (validStats) { /* Query the missing per-VM memory statistics. */ uint64_t cbTotalMemIgn, cbPrivateMemIgn, cbZeroMemIgn; rc = PGMR3QueryMemoryStats(ptrVM.rawUVM(), &cbTotalMemIgn, &cbPrivateMemIgn, &cbSharedMem, &cbZeroMemIgn); if (rc == VINF_SUCCESS) validStats |= pm::VMSTATMASK_GUEST_MEMSHARED; } if (mCollectVMMStats) { rc = PGMR3QueryGlobalMemoryStats(ptrVM.rawUVM(), &cbAllocTotal, &cbFreeTotal, &cbBalloonedTotal, &cbSharedTotal); AssertRC(rc); if (rc == VINF_SUCCESS) validStats |= pm::VMSTATMASK_VMM_ALLOC | pm::VMSTATMASK_VMM_FREE | pm::VMSTATMASK_VMM_BALOON | pm::VMSTATMASK_VMM_SHARED; } uint64_t uRxPrev = mNetStatRx; uint64_t uTxPrev = mNetStatTx; mNetStatRx = mNetStatTx = 0; rc = STAMR3Enum(ptrVM.rawUVM(), "*/ReceiveBytes|*/TransmitBytes", staticEnumStatsCallback, this); AssertRC(rc); uint64_t uTsNow = RTTimeNanoTS(); uint64_t cNsPassed = uTsNow - mNetStatLastTs; if (cNsPassed >= 1000) { mNetStatLastTs = uTsNow; uNetStatRx = (ULONG)((mNetStatRx - uRxPrev) * 1000000 / (cNsPassed / 1000)); /* in bytes per second */ uNetStatTx = (ULONG)((mNetStatTx - uTxPrev) * 1000000 / (cNsPassed / 1000)); /* in bytes per second */ validStats |= pm::VMSTATMASK_NET_RX | pm::VMSTATMASK_NET_TX; LogFlowThisFunc(("Net Rx=%llu Tx=%llu Ts=%llu Delta=%llu\n", mNetStatRx, mNetStatTx, uTsNow, cNsPassed)); } else { /* Can happen on resume or if we're using a non-monotonic clock source for the timer and the time is adjusted. */ mNetStatRx = uRxPrev; mNetStatTx = uTxPrev; LogThisFunc(("Net Ts=%llu cNsPassed=%llu - too small interval\n", uTsNow, cNsPassed)); } } mParent->reportVmStatistics(validStats, aGuestStats[GUESTSTATTYPE_CPUUSER], aGuestStats[GUESTSTATTYPE_CPUKERNEL], aGuestStats[GUESTSTATTYPE_CPUIDLE], /* Convert the units for RAM usage stats: page (4K) -> 1KB units */ mCurrentGuestStat[GUESTSTATTYPE_MEMTOTAL] * (_4K/_1K), mCurrentGuestStat[GUESTSTATTYPE_MEMFREE] * (_4K/_1K), mCurrentGuestStat[GUESTSTATTYPE_MEMBALLOON] * (_4K/_1K), (ULONG)(cbSharedMem / _1K), /* bytes -> KB */ mCurrentGuestStat[GUESTSTATTYPE_MEMCACHE] * (_4K/_1K), mCurrentGuestStat[GUESTSTATTYPE_PAGETOTAL] * (_4K/_1K), (ULONG)(cbAllocTotal / _1K), /* bytes -> KB */ (ULONG)(cbFreeTotal / _1K), (ULONG)(cbBalloonedTotal / _1K), (ULONG)(cbSharedTotal / _1K), uNetStatRx, uNetStatTx); }
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate handle. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); RTMSINTERVAL cMilliesInitial = cMillies; uint64_t tsStart = 0; if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0) tsStart = RTTimeNanoTS(); #ifdef RTSEMRW_STRICT RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies) { hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Take critsect. */ int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); return rc; } /* * Check if the state of affairs allows write access. */ RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner; if ( !pThis->cReads && ( ( !pThis->cWrites && ( !pThis->cWritesWaiting /* play fair if we can wait */ || !cMillies) ) || pThis->hWriter == hNativeSelf ) ) { /* * Reset the reader event semaphore if necessary. */ if (pThis->fNeedResetReadEvent) { pThis->fNeedResetReadEvent = false; rc = RTSemEventMultiReset(pThis->ReadEvent); AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc)); } pThis->cWrites++; pThis->hWriter = hNativeSelf; #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, pThis->cWrites == 1); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } /* * Signal writer presence. */ if (cMillies != 0) pThis->cWritesWaiting++; RTCritSectLeave(&pThis->CritSect); /* * Wait till it's ready for writing. */ if (cMillies == 0) return VERR_TIMEOUT; #ifndef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (;;) { if (cMillies != RT_INDEFINITE_WAIT) { int64_t tsDelta = RTTimeNanoTS() - tsStart; if (tsDelta >= 1000000) { tsDelta /= 1000000; if ((uint64_t)tsDelta < cMilliesInitial) cMilliesInitial = (RTMSINTERVAL)tsDelta; else cMilliesInitial = 1; } } #ifdef RTSEMRW_STRICT rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_WRITE, false); if (RT_FAILURE(rc)) break; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); #endif int rcWait; if (fInterruptible) rcWait = rc = RTSemEventWaitNoResume(pThis->WriteEvent, cMillies); else rcWait = rc = RTSemEventWait(pThis->WriteEvent, cMillies); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (RT_UNLIKELY(RT_FAILURE_NP(rc) && rc != VERR_TIMEOUT)) /* timeouts are handled below */ { AssertMsgRC(rc, ("RTSemEventWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (RT_UNLIKELY(pThis->u32Magic != RTSEMRW_MAGIC)) { rc = VERR_SEM_DESTROYED; break; } /* * Re-take critsect and repeat the check we did prior to this loop. */ rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (!pThis->cReads && (!pThis->cWrites || pThis->hWriter == hNativeSelf)) { /* * Reset the reader event semaphore if necessary. */ if (pThis->fNeedResetReadEvent) { pThis->fNeedResetReadEvent = false; rc = RTSemEventMultiReset(pThis->ReadEvent); AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc)); } pThis->cWrites++; pThis->hWriter = hNativeSelf; pThis->cWritesWaiting--; #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Quit if the wait already timed out. */ if (rcWait == VERR_TIMEOUT) { rc = VERR_TIMEOUT; break; } } /* * Timeout/error case, clean up. */ if (pThis->u32Magic == RTSEMRW_MAGIC) { RTCritSectEnter(&pThis->CritSect); /* Adjust this counter, whether we got the critsect or not. */ pThis->cWritesWaiting--; RTCritSectLeave(&pThis->CritSect); } return rc; }
DECL_FORCE_INLINE(int) rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate handle. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); RTMSINTERVAL cMilliesInitial = cMillies; uint64_t tsStart = 0; if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0) tsStart = RTTimeNanoTS(); #ifdef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies > 0) { int rc9; if (pThis->hWriter != NIL_RTTHREAD && pThis->hWriter == RTThreadNativeSelf()) rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies); else rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Take critsect. */ int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); return rc; } /* * Check if the state of affairs allows read access. * Do not block further readers if there is a writer waiting, as * that will break/deadlock reader recursion. */ if ( pThis->hWriter == NIL_RTNATIVETHREAD #if 0 && ( !pThis->cWritesWaiting || pThis->cReads) #endif ) { pThis->cReads++; Assert(pThis->cReads > 0); #ifdef RTSEMRW_STRICT RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner; if (pThis->hWriter == hNativeSelf) { #ifdef RTSEMRW_STRICT int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos); if (RT_FAILURE(rc9)) { RTCritSectLeave(&pThis->CritSect); return rc9; } #endif pThis->cWriterReads++; Assert(pThis->cWriterReads > 0); RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Wait till it's ready for reading. */ if (cMillies == 0) return VERR_TIMEOUT; #ifndef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (;;) { if (cMillies != RT_INDEFINITE_WAIT) { int64_t tsDelta = RTTimeNanoTS() - tsStart; if (tsDelta >= 1000000) { tsDelta /= 1000000; if ((uint64_t)tsDelta < cMilliesInitial) cMilliesInitial = (RTMSINTERVAL)tsDelta; else cMilliesInitial = 1; } } #ifdef RTSEMRW_STRICT rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_READ, false); if (RT_FAILURE(rc)) break; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); #endif int rcWait; if (fInterruptible) rcWait = rc = RTSemEventMultiWaitNoResume(pThis->ReadEvent, cMillies); else rcWait = rc = RTSemEventMultiWait(pThis->ReadEvent, cMillies); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); if (RT_FAILURE(rc) && rc != VERR_TIMEOUT) /* handle timeout below */ { AssertMsgRC(rc, ("RTSemEventMultiWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if (pThis->u32Magic != RTSEMRW_MAGIC) { rc = VERR_SEM_DESTROYED; break; } /* * Re-take critsect and repeat the check we did before the loop. */ rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) { AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc)); break; } if ( pThis->hWriter == NIL_RTNATIVETHREAD #if 0 && ( !pThis->cWritesWaiting || pThis->cReads) #endif ) { pThis->cReads++; Assert(pThis->cReads > 0); #ifdef RTSEMRW_STRICT RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); #endif RTCritSectLeave(&pThis->CritSect); return VINF_SUCCESS; } RTCritSectLeave(&pThis->CritSect); /* * Quit if the wait already timed out. */ if (rcWait == VERR_TIMEOUT) { rc = VERR_TIMEOUT; break; } } /* failed */ return rc; }
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies, PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs) { int rc = VINF_SUCCESS; int cRequestsCompleted = 0; /* * Validate the parameters, making sure to always set pcReqs. */ AssertPtrReturn(pcReqs, VERR_INVALID_POINTER); *pcReqs = 0; /* always set */ PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; RTFILEAIOCTX_VALID_RETURN(pCtxInt); AssertPtrReturn(pahReqs, VERR_INVALID_POINTER); AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER); AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE); if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0)) return VERR_FILE_AIO_NO_REQUEST; /* * Convert the timeout if specified. */ struct timespec *pTimeout = NULL; struct timespec Timeout = {0,0}; uint64_t StartNanoTS = 0; if (cMillies != RT_INDEFINITE_WAIT) { Timeout.tv_sec = cMillies / 1000; Timeout.tv_nsec = cMillies % 1000 * 1000000; pTimeout = &Timeout; StartNanoTS = RTTimeNanoTS(); } /* Wait for at least one. */ if (!cMinReqs) cMinReqs = 1; /* For the wakeup call. */ Assert(pCtxInt->hThreadWait == NIL_RTTHREAD); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf()); while ( cMinReqs && RT_SUCCESS_NP(rc)) { struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT]; int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT; int rcBSD; uint64_t StartTime; ASMAtomicXchgBool(&pCtxInt->fWaiting, true); rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout); ASMAtomicXchgBool(&pCtxInt->fWaiting, false); if (RT_UNLIKELY(rcBSD < 0)) { rc = RTErrConvertFromErrno(errno); break; } uint32_t const cDone = rcBSD; /* Process received events. */ for (uint32_t i = 0; i < cDone; i++) { PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata; AssertPtr(pReqInt); Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC); /* * Retrieve the status code here already because the * user may omit the RTFileAioReqGetRC() call and * we will leak kernel resources then. * This will result in errors during submission * of other requests as soon as the max_aio_queue_per_proc * limit is reached. */ int cbTransfered = aio_return(&pReqInt->AioCB); if (cbTransfered < 0) { pReqInt->Rc = RTErrConvertFromErrno(cbTransfered); pReqInt->cbTransfered = 0; } else { pReqInt->Rc = VINF_SUCCESS; pReqInt->cbTransfered = cbTransfered; } RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt; } /* * Done Yet? If not advance and try again. */ if (cDone >= cMinReqs) break; cMinReqs -= cDone; cReqs -= cDone; if (cMillies != RT_INDEFINITE_WAIT) { /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */ uint64_t NanoTS = RTTimeNanoTS(); uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000; if (cMilliesElapsed >= cMillies) { rc = VERR_TIMEOUT; break; } /* The syscall supposedly updates it, but we're paranoid. :-) */ Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000; Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000; } } /* * Update the context state and set the return value. */ *pcReqs = cRequestsCompleted; ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted); Assert(pCtxInt->hThreadWait == RTThreadSelf()); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD); /* * Clear the wakeup flag and set rc. */ if ( pCtxInt->fWokenUp && RT_SUCCESS(rc)) { ASMAtomicXchgBool(&pCtxInt->fWokenUp, false); rc = VERR_INTERRUPTED; } return rc; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { /* * Don't try mess with an offline CPU. */ if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; /* * Use the broadcast IPI routine if there are no more than two CPUs online, * or if the current IRQL is unsuitable for KeWaitForSingleObject. */ if ( g_pfnrtKeIpiGenericCall && ( RTMpGetOnlineCount() <= 2 || KeGetCurrentIrql() > APC_LEVEL) ) return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper, 0); #if 0 /** @todo untested code. needs some tuning. */ /* * Initialize the argument package and the objects within it. * The package is referenced counted to avoid unnecessary spinning to * synchronize cleanup and prevent stack corruption. */ PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp'); if (!pArgs) return VERR_NO_MEMORY; pArgs->cRefs = 2; pArgs->fExecuting = false; pArgs->fDone = false; pArgs->CallbackArgs.pfnWorker = pfnWorker; pArgs->CallbackArgs.pvUser1 = pvUser1; pArgs->CallbackArgs.pvUser2 = pvUser2; pArgs->CallbackArgs.idCpu = idCpu; pArgs->CallbackArgs.cHits = 0; pArgs->CallbackArgs.cRefs = 2; KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */); KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs); KeSetImportanceDpc(&pArgs->Dpc, HighImportance); KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu); /* * Disable preemption while we check the current processor and inserts the DPC. */ KIRQL bOldIrql; KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql); ASMCompilerBarrier(); /* paranoia */ if (RTMpCpuId() == idCpu) { /* Just execute the callback on the current CPU. */ pfnWorker(idCpu, pvUser1, pvUser2); KeLowerIrql(bOldIrql); ExFreePool(pArgs); return VINF_SUCCESS; } /* Different CPU, so queue it if the CPU is still online. */ int rc; if (RTMpIsCpuOnline(idCpu)) { BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0); Assert(fRc); KeLowerIrql(bOldIrql); uint64_t const nsRealWaitTS = RTTimeNanoTS(); /* * Wait actively for a while in case the CPU/thread responds quickly. */ uint32_t cLoopsLeft = 0x20000; while (cLoopsLeft-- > 0) { if (pArgs->fDone) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } ASMNopPause(); } /* * It didn't respond, so wait on the event object, poking the CPU if it's slow. */ LARGE_INTEGER Timeout; Timeout.QuadPart = -10000; /* 1ms */ NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } /* If it hasn't respondend yet, maybe poke it and wait some more. */ if (rcNt == STATUS_TIMEOUT) { if ( !pArgs->fExecuting && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7)) RTMpPokeCpu(idCpu); Timeout.QuadPart = -1280000; /* 128ms */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } } /* * Something weird is happening, try bail out. */ if (KeRemoveQueueDpc(&pArgs->Dpc)) { ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */ LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } else { /* DPC is running, wait a good while for it to complete. */ LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt != STATUS_SUCCESS) LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } rc = RTErrConvertFromNtStatus(rcNt); } else { /* CPU is offline.*/ KeLowerIrql(bOldIrql); rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; } rtMpNtOnSpecificRelease(pArgs); return rc; #else return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu); #endif }