/** * Notify framebuffer of an update. * * @returns COM status code * @param x Update region upper left corner x value. * @param y Update region upper left corner y value. * @param w Update region width in pixels. * @param h Update region height in pixels. * @param finished Address of output flag whether the update * could be fully processed in this call (which * has to return immediately) or VBox should wait * for a call to the update complete API before * continuing with display updates. */ HRESULT SDLFramebuffer::NotifyUpdate(ULONG x, ULONG y, ULONG w, ULONG h) { LogFlow(("SDLFramebuffer::NotifyUpdate: x = %d, y = %d, w = %d, h = %d\n", x, y, w, h)); #ifdef VBOXBFE_WITH_X11 /* * SDL does not allow us to make this call from any other * thread. So we have to send an event to the main SDL * thread and process it there. For sake of simplicity, we encode * all information in the event parameters. */ SDL_Event event; event.type = SDL_USEREVENT; event.user.type = SDL_USER_EVENT_UPDATERECT; // 16 bit is enough for coordinates event.user.data1 = (void*)(x << 16 | (y + mTopOffset)); event.user.data2 = (void*)(w << 16 | h); int rc = SDL_PushEvent(&event); // printf("%s:%d event=%p\n",__FILE__,__LINE__,&event); NOREF(rc); AssertMsg(!rc, ("Error: SDL_PushEvent was not successful! SDL error: '%s'\n", SDL_GetError())); /* in order to not flood the SDL event queue, yield the CPU */ RTThreadYield(); #else /* !VBOXBFE_WITH_X11 */ update(x, y + mTopOffset, w, h); #endif /* !VBOXBFE_WITH_X11 */ return S_OK; }
/** * Request a display resize from the framebuffer. * * @returns COM status code. * @param w New display width in pixels. * @param h New display height in pixels. * @param finished Address of output flag whether the update * could be fully processed in this call (which * has to return immediately) or VBox should wait * for all call to the resize complete API before * continuing with display updates. */ HRESULT SDLFramebuffer::RequestResize(ULONG w, ULONG h, BOOL *finished) { LogFlow(("SDLFramebuffer::RequestResize: w = %d, h = %d\n", w, h)); /* * SDL does not allow us to make this call from any other * thread. So we have to send an event to the main SDL * thread and tell VBox to wait. */ if (!finished) { AssertMsgFailed(("RequestResize requires the finished flag!\n")); return E_FAIL; } mWidth = w; mHeight = h; SDL_Event event; event.type = SDL_USEREVENT; event.user.type = SDL_USER_EVENT_RESIZE; int rc = SDL_PushEvent(&event); NOREF(rc); AssertMsg(!rc, ("Error: SDL_PushEvent was not successful!\n")); /* we want this request to be processed quickly, so yield the CPU */ RTThreadYield(); *finished = false; return S_OK; }
static int tstTrafficThreadCommon(uintptr_t iThread, bool fNS) { for (uint32_t iLoop = 0; RTTimeMilliTS() - g_u64StartMilliTS < g_cSecs*1000; iLoop++) { /* fudge */ if ((iLoop % 223) == 223) RTThreadYield(); else if ((iLoop % 16127) == 16127) RTThreadSleep(1); if (fNS) { RTTEST_CHECK_RC(g_hTest,RTSemXRoadsNSEnter(g_hXRoads), VINF_SUCCESS); ASMAtomicIncU32(&g_cNSCrossings); RTTEST_CHECK_RC(g_hTest,RTSemXRoadsNSLeave(g_hXRoads), VINF_SUCCESS); } else { RTTEST_CHECK_RC(g_hTest,RTSemXRoadsEWEnter(g_hXRoads), VINF_SUCCESS); ASMAtomicIncU32(&g_cEWCrossings); RTTEST_CHECK_RC(g_hTest,RTSemXRoadsEWLeave(g_hXRoads), VINF_SUCCESS); } } return VINF_SUCCESS; }
/** * Reads a register value. * * @returns VBox status code. * @param pVBoxSCSI Pointer to the SCSI state. * @param iRegister Index of the register to read. * @param pu32Value Where to store the content of the register. */ int vboxscsiReadRegister(PVBOXSCSI pVBoxSCSI, uint8_t iRegister, uint32_t *pu32Value) { uint8_t uVal = 0; switch (iRegister) { case 0: { if (ASMAtomicReadBool(&pVBoxSCSI->fBusy) == true) { uVal |= VBOX_SCSI_BUSY; /* There is an I/O operation in progress. * Yield the execution thread to let the I/O thread make progress. */ RTThreadYield(); } else uVal &= ~VBOX_SCSI_BUSY; break; } case 1: { if (pVBoxSCSI->cbBuf > 0) { AssertMsg(pVBoxSCSI->pBuf, ("pBuf is NULL\n")); uVal = pVBoxSCSI->pBuf[pVBoxSCSI->iBuf]; pVBoxSCSI->iBuf++; pVBoxSCSI->cbBuf--; if (pVBoxSCSI->cbBuf == 0) { /** The guest read the last byte from the data in buffer. * Clear everything and reset command buffer. */ RTMemFree(pVBoxSCSI->pBuf); pVBoxSCSI->pBuf = NULL; pVBoxSCSI->cbCDB = 0; pVBoxSCSI->iCDB = 0; pVBoxSCSI->iBuf = 0; pVBoxSCSI->uTargetDevice = 0; pVBoxSCSI->enmState = VBOXSCSISTATE_NO_COMMAND; memset(pVBoxSCSI->aCDB, 0, sizeof(pVBoxSCSI->aCDB)); } } break; } case 2: { uVal = pVBoxSCSI->regIdentify; break; } default: AssertMsgFailed(("Invalid register to read from %u\n", iRegister)); } *pu32Value = uVal; return VINF_SUCCESS; }
/** * Indicates to the XPCOM thread that it should terminate now. */ void terminateXPCOMQueueThread(void) { g_fTerminateXPCOMQueueThread = true; if (g_EventSemXPCOMQueueThread) { RTSemEventSignal(g_EventSemXPCOMQueueThread); RTThreadYield(); } }
static int rtR0ThreadFbsdSleepCommon(RTMSINTERVAL cMillies) { int rc; int cTicks; /* * 0 ms sleep -> yield. */ if (!cMillies) { RTThreadYield(); return VINF_SUCCESS; } /* * Translate milliseconds into ticks and go to sleep. */ if (cMillies != RT_INDEFINITE_WAIT) { if (hz == 1000) cTicks = cMillies; else if (hz == 100) cTicks = cMillies / 10; else { int64_t cTicks64 = ((uint64_t)cMillies * hz) / 1000; cTicks = (int)cTicks64; if (cTicks != cTicks64) cTicks = INT_MAX; } } else cTicks = 0; /* requires giant lock! */ rc = tsleep((void *)RTThreadSleep, PZERO | PCATCH, "iprtsl", /* max 6 chars */ cTicks); switch (rc) { case 0: return VINF_SUCCESS; case EWOULDBLOCK: return VERR_TIMEOUT; case EINTR: case ERESTART: return VERR_INTERRUPTED; default: AssertMsgFailed(("%d\n", rc)); return VERR_NO_TRANSLATION; } }
int main() { unsigned cErrors = 0; int i; RTR3InitExeNoArguments(RTR3INIT_FLAGS_SUPLIB); RTPrintf("tstTime-2: TESTING...\n"); /* * RTNanoTimeTS() shall never return something which * is less or equal to the return of the previous call. */ RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTStartTS = RTTimeNanoTS(); uint64_t u64OSStartTS = RTTimeSystemNanoTS(); #define NUMBER_OF_CALLS (100*_1M) for (i = 0; i < NUMBER_OF_CALLS; i++) RTTimeNanoTS(); uint64_t u64RTElapsedTS = RTTimeNanoTS(); uint64_t u64OSElapsedTS = RTTimeSystemNanoTS(); u64RTElapsedTS -= u64RTStartTS; u64OSElapsedTS -= u64OSStartTS; int64_t i64Diff = u64OSElapsedTS >= u64RTElapsedTS ? u64OSElapsedTS - u64RTElapsedTS : u64RTElapsedTS - u64OSElapsedTS; if (i64Diff > (int64_t)(u64OSElapsedTS / 1000)) { RTPrintf("tstTime-2: error: total time differs too much! u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); cErrors++; } else RTPrintf("tstTime-2: total time difference: u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); RTPrintf("tstTime-2: %u calls to RTTimeNanoTS\n", NUMBER_OF_CALLS); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) /** @todo This isn't really x86 or AMD64 specific... */ RTPrintf("tstTime-2: RTTimeDbgSteps -> %u (%d ppt)\n", RTTimeDbgSteps(), ((uint64_t)RTTimeDbgSteps() * 1000) / NUMBER_OF_CALLS); RTPrintf("tstTime-2: RTTimeDbgExpired -> %u (%d ppt)\n", RTTimeDbgExpired(), ((uint64_t)RTTimeDbgExpired() * 1000) / NUMBER_OF_CALLS); RTPrintf("tstTime-2: RTTimeDbgBad -> %u (%d ppt)\n", RTTimeDbgBad(), ((uint64_t)RTTimeDbgBad() * 1000) / NUMBER_OF_CALLS); RTPrintf("tstTime-2: RTTimeDbgRaces -> %u (%d ppt)\n", RTTimeDbgRaces(), ((uint64_t)RTTimeDbgRaces() * 1000) / NUMBER_OF_CALLS); #endif if (!cErrors) RTPrintf("tstTime-2: SUCCESS\n"); else RTPrintf("tstTime-2: FAILURE - %d errors\n", cErrors); return !!cErrors; }
int USBProxyBackendLinux::waitSysfs(RTMSINTERVAL aMillies) { #ifdef VBOX_USB_WITH_SYSFS int rc = mpWaiter->Wait(aMillies); if (rc == VERR_TRY_AGAIN) { RTThreadYield(); rc = VINF_SUCCESS; } return rc; #else /* !VBOX_USB_WITH_SYSFS */ return USBProxyService::wait(aMillies); #endif /* !VBOX_USB_WITH_SYSFS */ }
/** * Thread for erasing items from a shared list. * * @param hSelf The thread handle. * @param pvUser The provided user data. */ static DECLCALLBACK(int) MtTest6ThreadProc(RTTHREAD hSelf, void *pvUser) { MTTESTLISTTYPE<MTTESTTYPE> *pTestList = (MTTESTLISTTYPE<MTTESTTYPE> *)pvUser; /* Try to delete items from random places. */ for (size_t i = 0; i < MTTESTITEMS; ++i) { /* Make sure there is at least one item in the list. */ while (pTestList->isEmpty()) RTThreadYield(); pTestList->removeAt(RTRandU32Ex(0, (uint32_t)pTestList->size() - 1)); } return VINF_SUCCESS; }
/** * Yield the critical section if someone is waiting on it. * * When yielding, we'll leave the critical section and try to make sure the * other waiting threads get a chance of entering before we reclaim it. * * @retval true if yielded. * @retval false if not yielded. * @param pCritSect The critical section. */ VMMR3DECL(bool) PDMR3CritSectYield(PPDMCRITSECT pCritSect) { AssertPtrReturn(pCritSect, false); AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false); Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf()); Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)); /* No recursion allowed here. */ int32_t const cNestings = pCritSect->s.Core.cNestings; AssertReturn(cNestings == 1, false); int32_t const cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers); if (cLockers < cNestings) return false; #ifdef PDMCRITSECT_STRICT RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos; #endif PDMCritSectLeave(pCritSect); /* * If we're lucky, then one of the waiters has entered the lock already. * We spin a little bit in hope for this to happen so we can avoid the * yield detour. */ if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0) { int cLoops = 20; while ( cLoops > 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) >= 0) { ASMNopPause(); cLoops--; } if (cLoops == 0) RTThreadYield(); } #ifdef PDMCRITSECT_STRICT int rc = PDMCritSectEnterDebug(pCritSect, VERR_IGNORED, SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction); #else int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED); #endif AssertLogRelRC(rc); return true; }
int ThreadTest1(RTTHREAD ThreadSelf, void *pvUser) { uint64_t *pu64 = (uint64_t *)pvUser; for (;;) { int rc = RTSemMutexRequestNoResume(g_hMutex, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc)) { PrintError("%x: RTSemMutexRequestNoResume failed with %Rrc\n", rc); break; } if (ASMAtomicIncU32(&g_cbConcurrent) != 1) { PrintError("g_cbConcurrent=%d after request!\n", g_cbConcurrent); break; } /* * Check for fairness: The values of the threads should not differ too much */ (*pu64)++; /* * Check for correctness: Give other threads a chance. If the implementation is * correct, no other thread will be able to enter this lock now. */ if (g_fYield) RTThreadYield(); if (ASMAtomicDecU32(&g_cbConcurrent) != 0) { PrintError("g_cbConcurrent=%d before release!\n", g_cbConcurrent); break; } rc = RTSemMutexRelease(g_hMutex); if (RT_FAILURE(rc)) { PrintError("%x: RTSemMutexRelease failed with %Rrc\n", rc); break; } if (g_fTerminate) break; } if (!g_fQuiet) RTPrintf("tstSemMutex: Thread %08x exited with %lld\n", ThreadSelf, *pu64); return VINF_SUCCESS; }
int main() { unsigned cErrors = 0; RTR3InitExeNoArguments(RTR3INIT_FLAGS_SUPLIB); RTPrintf("tstTime-4: TESTING...\n"); /* * Check that RTTimeSystemNanoTS doesn't go backwards and * isn't too far from RTTimeNanoTS(). */ RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); /* warmup */ const uint64_t SysStartTS = RTTimeSystemNanoTS(); const uint64_t GipStartTS = RTTimeNanoTS(); uint64_t SysPrevTS, GipPrevTS; do { SysPrevTS = RTTimeSystemNanoTS(); GipPrevTS = RTTimeNanoTS(); if (SysPrevTS < SysStartTS) { cErrors++; RTPrintf("tstTime-4: Bad Sys time!\n"); } if (GipPrevTS < GipStartTS) { cErrors++; RTPrintf("tstTime-4: Bad Gip time!\n"); } uint64_t Delta = GipPrevTS > SysPrevTS ? GipPrevTS - SysPrevTS : SysPrevTS - GipPrevTS; if (Delta > 100000000ULL /* 100 ms */ ) { cErrors++; RTPrintf("tstTime-4: Delta=%llu (GipPrevTS=%llu, SysPrevTS=%llu)!\n", Delta, GipPrevTS, SysPrevTS); } } while (SysPrevTS - SysStartTS < 2000000000 /* 2s */); if (!cErrors) RTPrintf("tstTime-4: SUCCESS\n"); else RTPrintf("tstTime-4: FAILURE - %d errors\n", cErrors); return !!cErrors; }
/** * Helper to update the display information from the Framebuffer * */ void Display::updateDisplayData() { while(!mFramebuffer) { RTThreadYield(); } Assert(mFramebuffer); // the driver might not have been constructed yet if (mpDrv) { mFramebuffer->getAddress ((uintptr_t *)&mpDrv->Connector.pu8Data); mFramebuffer->getLineSize ((ULONG*)&mpDrv->Connector.cbScanline); mFramebuffer->getBitsPerPixel ((ULONG*)&mpDrv->Connector.cBits); mFramebuffer->getWidth ((ULONG*)&mpDrv->Connector.cx); mFramebuffer->getHeight ((ULONG*)&mpDrv->Connector.cy); mpDrv->pUpPort->pfnSetRenderVRAM (mpDrv->pUpPort, !!(mpDrv->Connector.pu8Data != (uint8_t*)~0UL)); } }
/** * Run the main service thread which listens for host state change * notifications. * @returns iprt status value. Service will be set to the stopped state on * failure. */ int SeamlessMain::run(void) { int rc = VINF_SUCCESS; LogRelFlowFunc(("\n")); /* This will only exit if something goes wrong. */ while (RT_SUCCESS(rc) || rc == VERR_INTERRUPTED) { if (RT_FAILURE(rc)) /* If we are not stopping, sleep for a bit to avoid using up too much CPU while retrying. */ RTThreadYield(); rc = nextStateChangeEvent(); } if (RT_FAILURE(rc)) { LogRel(("VBoxClient (seamless): event loop failed with error: %Rrc\n", rc)); stop(); } return rc; }
/** sub test */ static void tst4Sub(uint32_t cThreads) { RTTestISubF("Serialization - %u threads", cThreads); RTMEMPOOL hMemPool; RTTESTI_CHECK_RC_RETV(RTMemPoolCreate(&hMemPool, "test 2a"), VINF_SUCCESS); g_hMemPool4 = hMemPool; PRTTHREAD pahThreads = (PRTTHREAD)RTMemPoolAlloc(hMemPool, cThreads * sizeof(RTTHREAD)); RTTESTI_CHECK(pahThreads); if (pahThreads) { /* start them. */ for (uint32_t i = 0; i < cThreads; i++) { int rc = RTThreadCreateF(&pahThreads[i], tst4Thread, (void *)(uintptr_t)i, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "tst4-%u/%u", i, cThreads); RTTESTI_CHECK_RC_OK(rc); if (RT_FAILURE(rc)) pahThreads[i] = NIL_RTTHREAD; } RTThreadYield(); /* kick them off. */ for (uint32_t i = 0; i < cThreads; i++) if (pahThreads[i] != NIL_RTTHREAD) RTTESTI_CHECK_RC_OK(RTThreadUserSignal(pahThreads[i])); /* wait for them. */ for (uint32_t i = 0; i < cThreads; i++) if (pahThreads[i] != NIL_RTTHREAD) { int rc = RTThreadWait(pahThreads[i], 2*60*1000, NULL); RTTESTI_CHECK_RC_OK(rc); } } RTTESTI_CHECK_RC(RTMemPoolDestroy(hMemPool), VINF_SUCCESS); }
static void doTest(RTTEST hTest) { NOREF(hTest); uint32_t iAllocCpu = 0; while (iAllocCpu < RTCPUSET_MAX_CPUS) { const uint32_t cbTestSet = _1M * 32; const uint32_t cIterations = 384; /* * Change CPU and allocate a chunk of memory. */ RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAllocCpu))); void *pvTest = RTMemPageAlloc(cbTestSet); /* may be leaked, who cares */ RTTESTI_CHECK_RETV(pvTest != NULL); memset(pvTest, 0xef, cbTestSet); /* * Do the tests. */ uint32_t iAccessCpu = 0; while (iAccessCpu < RTCPUSET_MAX_CPUS) { RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAccessCpu))); /* * The write test. */ RTTimeNanoTS(); RTThreadYield(); uint64_t u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { ASMCompilerBarrier(); /* paranoia */ memset(pvTest, i, cbTestSet); } uint64_t const cNsElapsedWrite = RTTimeNanoTS() - u64StartTS; uint64_t cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedWrite / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-write", iAllocCpu, iAccessCpu); /* * The read test. */ memset(pvTest, 0, cbTestSet); RTTimeNanoTS(); RTThreadYield(); u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { #if 1 size_t register u = 0; size_t volatile *puCur = (size_t volatile *)pvTest; size_t volatile *puEnd = puCur + cbTestSet / sizeof(size_t); while (puCur != puEnd) u += *puCur++; #else ASMCompilerBarrier(); /* paranoia */ void *pvFound = memchr(pvTest, (i & 127) + 1, cbTestSet); RTTESTI_CHECK(pvFound == NULL); #endif } uint64_t const cNsElapsedRead = RTTimeNanoTS() - u64StartTS; cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedRead / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read", iAllocCpu, iAccessCpu); /* * The read/write test. */ RTTimeNanoTS(); RTThreadYield(); u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { ASMCompilerBarrier(); /* paranoia */ memcpy(pvTest, (uint8_t *)pvTest + cbTestSet / 2, cbTestSet / 2); } uint64_t const cNsElapsedRW = RTTimeNanoTS() - u64StartTS; cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedRW / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read-write", iAllocCpu, iAccessCpu); /* * Total time. */ RTTestIValueF(cNsElapsedRead + cNsElapsedWrite + cNsElapsedRW, RTTESTUNIT_NS, "cpu%02u-mem%02u-time", iAllocCpu, iAccessCpu); /* advance */ iAccessCpu = getNextCpu(iAccessCpu); } /* * Clean up and advance to the next CPU. */ RTMemPageFree(pvTest, cbTestSet); iAllocCpu = getNextCpu(iAllocCpu); } }
/** * Display change request monitor thread function. * Before entering the loop, we re-read the last request * received, and if the first one received inside the * loop is identical we ignore it, because it is probably * stale. */ static int runDisplay(Display *pDisplay) { LogRelFlowFunc(("\n")); Cursor hClockCursor = XCreateFontCursor(pDisplay, XC_watch); Cursor hArrowCursor = XCreateFontCursor(pDisplay, XC_left_ptr); int RRMaj, RRMin; if (!XRRQueryVersion(pDisplay, &RRMaj, &RRMin)) RRMin = 0; const char *pcszXrandr = "xrandr"; if (RTFileExists("/usr/X11/bin/xrandr")) pcszXrandr = "/usr/X11/bin/xrandr"; int rc = RTThreadCreate(NULL, x11ConnectionMonitor, NULL, 0, RTTHREADTYPE_INFREQUENT_POLLER, 0, "X11 monitor"); if (RT_FAILURE(rc)) return rc; while (true) { uint32_t fEvents = 0, cx = 0, cy = 0, cBits = 0, iDisplay = 0; rc = VbglR3WaitEvent( VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED, RT_INDEFINITE_WAIT, &fEvents); if (RT_FAILURE(rc) && rc != VERR_INTERRUPTED) /* VERR_NO_MEMORY? */ return rc; /* Jiggle the mouse pointer to wake up the driver. */ XGrabPointer(pDisplay, DefaultRootWindow(pDisplay), true, 0, GrabModeAsync, GrabModeAsync, None, hClockCursor, CurrentTime); XFlush(pDisplay); XGrabPointer(pDisplay, DefaultRootWindow(pDisplay), true, 0, GrabModeAsync, GrabModeAsync, None, hArrowCursor, CurrentTime); XFlush(pDisplay); XUngrabPointer(pDisplay, CurrentTime); XFlush(pDisplay); /* And if it is a size hint, set the new size now that the video * driver has had a chance to update its list. */ if (RT_SUCCESS(rc) && (fEvents & VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST)) { int rc2 = VbglR3GetDisplayChangeRequest(&cx, &cy, &cBits, &iDisplay, true); /* If we are not stopping, sleep for a bit to avoid using up too much CPU while retrying. */ if (RT_FAILURE(rc2)) RTThreadYield(); else if (RRMin < 2) setSize(pDisplay, cx, cy); else { char szCommand[256]; RTStrPrintf(szCommand, sizeof(szCommand), "%s --output VBOX%u --set VBOX_MODE %dx%d", pcszXrandr, iDisplay, cx, cy); system(szCommand); RTStrPrintf(szCommand, sizeof(szCommand), "%s --output VBOX%u --preferred", pcszXrandr, iDisplay); system(szCommand); } } } LogRelFlowFunc(("returning VINF_SUCCESS\n")); return VINF_SUCCESS; }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC, ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0)); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = ASMGetFlags(); #endif #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } ASMNopPause(); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif RTThreadYield(); } #else for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif ASMNopPause(); } #endif } else { #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) return; ASMNopPause(); } RTThreadYield(); } #else while (!ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) ASMNopPause(); #endif } }
RTDECL(int) RTLocalIpcSessionWaitForData(RTLOCALIPCSESSION hSession, uint32_t cMillies) { PRTLOCALIPCSESSIONINT pThis = (PRTLOCALIPCSESSIONINT)hSession; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTLOCALIPCSESSION_MAGIC, VERR_INVALID_HANDLE); uint64_t const StartMsTS = RTTimeMilliTS(); int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) return rc; for (unsigned iLoop = 0;; iLoop++) { HANDLE hWait = INVALID_HANDLE_VALUE; if (pThis->fIOPending) hWait = pThis->OverlappedIO.hEvent; else { /* Peek at the pipe buffer and see how many bytes it contains. */ DWORD cbAvailable; BOOL fRc = PeekNamedPipe(pThis->hNmPipe, NULL, 0, NULL, &cbAvailable, NULL); if ( fRc && cbAvailable) { rc = VINF_SUCCESS; break; } else if (!fRc) { rc = RTErrConvertFromWin32(GetLastError()); break; } /* Start a zero byte read operation that we can wait on. */ if (cMillies == 0) { rc = VERR_TIMEOUT; break; } AssertBreakStmt(pThis->cRefs == 1, rc = VERR_WRONG_ORDER); fRc = ResetEvent(pThis->OverlappedIO.hEvent); Assert(fRc == TRUE); DWORD cbRead = 0; if (ReadFile(pThis->hNmPipe, pThis->abBuf, 0, &cbRead, &pThis->OverlappedIO)) { rc = VINF_SUCCESS; if (iLoop > 10) RTThreadYield(); } else if (GetLastError() == ERROR_IO_PENDING) { pThis->cRefs++; pThis->fIOPending = true; pThis->fZeroByteRead = true; hWait = pThis->OverlappedIO.hEvent; } else rc = RTErrConvertFromWin32(GetLastError()); } if (RT_FAILURE(rc)) break; /* * Check for timeout. */ DWORD cMsMaxWait = INFINITE; if ( cMillies != RT_INDEFINITE_WAIT && ( hWait != INVALID_HANDLE_VALUE || iLoop > 10) ) { uint64_t cElapsed = RTTimeMilliTS() - StartMsTS; if (cElapsed >= cMillies) { rc = VERR_TIMEOUT; break; } cMsMaxWait = cMillies - (uint32_t)cElapsed; } /* * Wait. */ if (hWait != INVALID_HANDLE_VALUE) { RTCritSectLeave(&pThis->CritSect); DWORD dwRc = WaitForSingleObject(hWait, cMsMaxWait); if (dwRc == WAIT_OBJECT_0) rc = VINF_SUCCESS; else if (dwRc == WAIT_TIMEOUT) rc = VERR_TIMEOUT; else if (dwRc == WAIT_ABANDONED) rc = VERR_INVALID_HANDLE; else rc = RTErrConvertFromWin32(GetLastError()); if ( RT_FAILURE(rc) && pThis->u32Magic != RTLOCALIPCSESSION_MAGIC) return rc; int rc2 = RTCritSectEnter(&pThis->CritSect); AssertRC(rc2); if (pThis->fZeroByteRead) { Assert(pThis->cRefs); pThis->cRefs--; pThis->fIOPending = false; if (rc != VINF_SUCCESS) { BOOL fRc = CancelIo(pThis->hNmPipe); Assert(fRc == TRUE); } DWORD cbRead = 0; BOOL fRc = GetOverlappedResult(pThis->hNmPipe, &pThis->OverlappedIO, &cbRead, TRUE /*fWait*/); if ( !fRc && RT_SUCCESS(rc)) { DWORD dwRc = GetLastError(); if (dwRc == ERROR_OPERATION_ABORTED) rc = VERR_CANCELLED; else rc = RTErrConvertFromWin32(dwRc); } } if (RT_FAILURE(rc)) break; } } int rc2 = RTCritSectLeave(&pThis->CritSect); if (RT_SUCCESS(rc)) rc = rc2; return rc; }
int main() { RTR3InitExeNoArguments(0); /* * Just a simple testcase. */ RTPrintf("tstOnce: TESTING - smoke...\n"); RTONCE Once1 = RTONCE_INITIALIZER; g_fOnceCB1 = false; int rc = RTOnce(&Once1, Once1CB, (void *)1); if (rc != VINF_SUCCESS) RTPrintf("tstOnce: ERROR - Once1, 1 failed, rc=%Rrc\n", rc); g_fOnceCB1 = false; rc = RTOnce(&Once1, Once1CB, (void *)1); if (rc != VINF_SUCCESS) RTPrintf("tstOnce: ERROR - Once1, 2 failed, rc=%Rrc\n", rc); /* * Throw a bunch of threads up against a init once thing. */ RTPrintf("tstOnce: TESTING - bunch of threads...\n"); /* create the semaphore they'll be waiting on. */ rc = RTSemEventMultiCreate(&g_hEventMulti); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: FATAL ERROR - RTSemEventMultiCreate returned %Rrc\n", rc); return 1; } /* create the threads */ RTTHREAD aThreads[32]; for (unsigned i = 0; i < RT_ELEMENTS(aThreads); i++) { char szName[16]; RTStrPrintf(szName, sizeof(szName), "ONCE2-%d\n", i); rc = RTThreadCreate(&aThreads[i], Once2Thread, NULL, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, szName); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: ERROR - failed to create thread #%d\n", i); g_cErrors++; } } /* kick them off and yield */ rc = RTSemEventMultiSignal(g_hEventMulti); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: FATAL ERROR - RTSemEventMultiSignal returned %Rrc\n", rc); return 1; } RTThreadYield(); /* wait for all of them to finish up, 30 seconds each. */ for (unsigned i = 0; i < RT_ELEMENTS(aThreads); i++) if (aThreads[i] != NIL_RTTHREAD) { int rc2; rc = RTThreadWait(aThreads[i], 30*1000, &rc2); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: ERROR - RTThreadWait on thread #%u returned %Rrc\n", i, rc); g_cErrors++; } else if (RT_FAILURE(rc2)) { RTPrintf("tstOnce: ERROR - Thread #%u returned %Rrc\n", i, rc2); g_cErrors++; } } /* * Summary. */ if (!g_cErrors) RTPrintf("tstOnce: SUCCESS\n"); else RTPrintf("tstOnce: FAILURE - %d errors\n", g_cErrors); return !!g_cErrors; }
/** * Thread method to wait for XPCOM events and notify the SDL thread. * * @returns Error code * @param thread Thread ID * @param pvUser User specific parameter, the file descriptor * of the event queue socket */ DECLCALLBACK(int) xpcomEventThread(RTTHREAD hThreadSelf, void *pvUser) { RT_NOREF(hThreadSelf); int eqFD = (intptr_t)pvUser; unsigned cErrors = 0; int rc; /* Wait with the processing till the main thread needs it. */ RTSemEventWait(g_EventSemXPCOMQueueThread, 2500); do { fd_set fdset; FD_ZERO(&fdset); FD_SET(eqFD, &fdset); int n = select(eqFD + 1, &fdset, NULL, NULL, NULL); /* are there any events to process? */ if ((n > 0) && !g_fTerminateXPCOMQueueThread) { /* * Wait until all XPCOM events are processed. 1s just for sanity. */ int iWait = 1000; /* * Don't post an event if there is a pending XPCOM event to prevent an * overflow of the SDL event queue. */ if (g_s32XPCOMEventsPending < 1) { /* * Post the event and wait for it to be processed. If we don't wait, * we'll flood the queue on SMP systems and when the main thread is busy. * In the event of a push error, we'll yield the timeslice and retry. */ SDL_Event event = {0}; event.type = SDL_USEREVENT; event.user.type = SDL_USER_EVENT_XPCOM_EVENTQUEUE; rc = SDL_PushEvent(&event); if (!rc) { /* success */ ASMAtomicIncS32(&g_s32XPCOMEventsPending); cErrors = 0; } else { /* failure */ cErrors++; if (!RTThreadYield()) RTThreadSleep(2); iWait = (cErrors >= 10) ? RT_MIN(cErrors - 8, 50) : 0; } } else Log2(("not enqueueing SDL XPCOM event (%d)\n", g_s32XPCOMEventsPending)); if (iWait) RTSemEventWait(g_EventSemXPCOMQueueThread, iWait); } } while (!g_fTerminateXPCOMQueueThread); return VINF_SUCCESS; }
/** * Common worker for RTPoll and RTPollNoResume */ static int rtPollNoResumeWorker(RTPOLLSETINTERNAL *pThis, RTMSINTERVAL cMillies, uint32_t *pfEvents, uint32_t *pid) { if (RT_UNLIKELY(pThis->cHandles == 0 && cMillies == RT_INDEFINITE_WAIT)) return VERR_DEADLOCK; /* clear the revents. */ uint32_t i = pThis->cHandles; while (i-- > 0) pThis->paPollFds[i].revents = 0; int rc = poll(&pThis->paPollFds[0], pThis->cHandles, cMillies == RT_INDEFINITE_WAIT || cMillies >= INT_MAX ? -1 : (int)cMillies); if (rc == 0) return VERR_TIMEOUT; if (rc < 0) return RTErrConvertFromErrno(errno); for (i = 0; i < pThis->cHandles; i++) if (pThis->paPollFds[i].revents) { if (pfEvents) { *pfEvents = 0; if (pThis->paPollFds[i].revents & (POLLIN #ifdef POLLRDNORM | POLLRDNORM /* just in case */ #endif #ifdef POLLRDBAND | POLLRDBAND /* ditto */ #endif #ifdef POLLPRI | POLLPRI /* ditto */ #endif #ifdef POLLMSG | POLLMSG /* ditto */ #endif #ifdef POLLWRITE | POLLWRITE /* ditto */ #endif #ifdef POLLEXTEND | POLLEXTEND /* ditto */ #endif ) ) *pfEvents |= RTPOLL_EVT_READ; if (pThis->paPollFds[i].revents & (POLLOUT #ifdef POLLWRNORM | POLLWRNORM /* just in case */ #endif #ifdef POLLWRBAND | POLLWRBAND /* ditto */ #endif ) ) *pfEvents |= RTPOLL_EVT_WRITE; if (pThis->paPollFds[i].revents & (POLLERR | POLLHUP | POLLNVAL #ifdef POLLRDHUP | POLLRDHUP #endif ) ) *pfEvents |= RTPOLL_EVT_ERROR; } if (pid) *pid = pThis->paHandles[i].id; return VINF_SUCCESS; } AssertFailed(); RTThreadYield(); return VERR_INTERRUPTED; }
/** * Do the bi-directional transfer test. */ static void tstBidirectionalTransfer(PTSTSTATE pThis, uint32_t cbFrame) { MYARGS Args0; RT_ZERO(Args0); Args0.hIf = pThis->hIf0; Args0.pBuf = pThis->pBuf0; Args0.Mac.au16[0] = 0x8086; Args0.Mac.au16[1] = 0; Args0.Mac.au16[2] = 0; Args0.cbFrame = cbFrame; MYARGS Args1; RT_ZERO(Args1); Args1.hIf = pThis->hIf1; Args1.pBuf = pThis->pBuf1; Args1.Mac.au16[0] = 0x8086; Args1.Mac.au16[1] = 0; Args1.Mac.au16[2] = 1; Args1.cbFrame = cbFrame; RTTHREAD ThreadRecv0 = NIL_RTTHREAD; RTTHREAD ThreadRecv1 = NIL_RTTHREAD; RTTHREAD ThreadSend0 = NIL_RTTHREAD; RTTHREAD ThreadSend1 = NIL_RTTHREAD; RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv0, ReceiveThread, &Args0, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "RECV0")); RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv1, ReceiveThread, &Args1, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "RECV1")); RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend0, SendThread, &Args0, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND0")); RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend1, SendThread, &Args1, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND1")); int rc2 = VINF_SUCCESS; int rc; RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend0, 5*60*1000, &rc2)); if (RT_SUCCESS(rc)) { RTTESTI_CHECK_RC_OK(rc2); ThreadSend0 = NIL_RTTHREAD; RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend1, 5*60*1000, RT_SUCCESS(rc2) ? &rc2 : NULL)); if (RT_SUCCESS(rc)) { ThreadSend1 = NIL_RTTHREAD; RTTESTI_CHECK_RC_OK(rc2); } } if (RTTestErrorCount(g_hTest) == 0) { /* * Wait a bit for the receivers to finish up. */ unsigned cYields = 100000; while ( ( IntNetRingHasMoreToRead(&pThis->pBuf0->Recv) || IntNetRingHasMoreToRead(&pThis->pBuf1->Recv)) && cYields-- > 0) RTThreadYield(); uint64_t u64Elapsed = RT_MAX(Args0.u64End, Args1.u64End) - RT_MIN(Args0.u64Start, Args1.u64Start); uint64_t u64Speed = (uint64_t)((2 * g_cbTransfer / 1024) / (u64Elapsed / 1000000000.0)); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "transferred %u bytes in %'RU64 ns (%'RU64 KB/s)\n", 2 * g_cbTransfer, u64Elapsed, u64Speed); /* * Wait for the threads to finish up... */ RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv0, 5000, &rc2)); if (RT_SUCCESS(rc)) { RTTESTI_CHECK_RC_OK(rc2); ThreadRecv0 = NIL_RTTHREAD; } RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv1, 5000, &rc2)); if (RT_SUCCESS(rc)) { RTTESTI_CHECK_RC_OK(rc2); ThreadRecv1 = NIL_RTTHREAD; } } /* * Give them a chance to complete... */ RTThreadWait(ThreadRecv0, 5000, NULL); RTThreadWait(ThreadRecv1, 5000, NULL); RTThreadWait(ThreadSend0, 5000, NULL); RTThreadWait(ThreadSend1, 5000, NULL); /* * Display statistics. */ RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf0: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n", pThis->pBuf0->cStatYieldsOk.c, pThis->pBuf0->cStatYieldsNok.c, pThis->pBuf0->cStatLost.c, pThis->pBuf0->cStatBadFrames.c); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf0.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n", pThis->pBuf0->Recv.cStatFrames, pThis->pBuf0->Recv.cbStatWritten.c, pThis->pBuf0->Recv.cOverflows.c); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf0.Send: Frames=%llu Bytes=%llu Overflows=%llu\n", pThis->pBuf0->Send.cStatFrames, pThis->pBuf0->Send.cbStatWritten.c, pThis->pBuf0->Send.cOverflows.c); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf1: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n", pThis->pBuf1->cStatYieldsOk.c, pThis->pBuf1->cStatYieldsNok.c, pThis->pBuf1->cStatLost.c, pThis->pBuf1->cStatBadFrames.c); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf1.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n", pThis->pBuf1->Recv.cStatFrames, pThis->pBuf1->Recv.cbStatWritten.c, pThis->pBuf1->Recv.cOverflows.c); RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Buf1.Send: Frames=%llu Bytes=%llu Overflows=%llu\n", pThis->pBuf1->Send.cStatFrames, pThis->pBuf1->Send.cbStatWritten.c, pThis->pBuf1->Send.cOverflows.c); }
/** * Stops monitoring a VBoxSVC process. * * @param pUserData The user which chosen VBoxSVC should be watched. * @param pid The VBoxSVC PID. */ void VirtualBoxSDS::i_stopWatching(VBoxSDSPerUserData *pUserData, RTPROCESS pid) { /* * Add a remove order in the watcher's todo queue. */ RTCritSectEnter(&m_WatcherCritSect); for (uint32_t iRound = 0; ; iRound++) { uint32_t const iWatcher = pUserData->m_iWatcher; if (iWatcher < m_cWatchers) { VBoxSDSWatcher *pWatcher = m_papWatchers[pUserData->m_iWatcher]; if (!pWatcher->fShutdown) { /* * Remove duplicate todo entries. */ bool fAddIt = true; uint32_t iTodo = pWatcher->cTodos; while (iTodo-- > 0) if (pWatcher->aTodos[iTodo].Data.pUserData == pUserData) { if (pWatcher->aTodos[iTodo].hProcess == NULL) fAddIt = true; else { fAddIt = false; CloseHandle(pWatcher->aTodos[iTodo].hProcess); } uint32_t const cTodos = --pWatcher->cTodos; uint32_t const cToShift = cTodos - iTodo; if (cToShift > 0) memmove(&pWatcher->aTodos[iTodo], &pWatcher->aTodos[iTodo + 1], sizeof(pWatcher->aTodos[0]) * cToShift); pWatcher->aTodos[cTodos].hProcess = NULL; pWatcher->aTodos[cTodos].Data.setNull(); } /* * Did we just eliminated the add and cancel out this operation? */ if (!fAddIt) { pUserData->m_iWatcher = UINT32_MAX; pUserData->m_iTheChosenOneRevision++; i_decrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return; } /* * No we didn't. So, try append a removal item. */ iTodo = pWatcher->cTodos; if (iTodo < RT_ELEMENTS(pWatcher->aTodos)) { pWatcher->aTodos[iTodo].hProcess = NULL; pWatcher->aTodos[iTodo].Data.pUserData = pUserData; pWatcher->aTodos[iTodo].Data.pid = pid; pWatcher->aTodos[iTodo].Data.iRevision = pUserData->m_iTheChosenOneRevision++; pWatcher->cTodos = iTodo + 1; SetEvent(pWatcher->aHandles[0]); pUserData->m_iWatcher = UINT32_MAX; i_decrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return; } } else { LogRel(("i_stopWatching: Watcher #%u has shut down.\n", iWatcher)); break; } /* * Todo queue is full. Sleep a little and let the watcher process it. */ LogRel(("i_stopWatching: Watcher #%u todo queue is full! (round #%u)\n", iWatcher, iRound)); uint32_t const iTheChosenOneRevision = pUserData->m_iTheChosenOneRevision; SetEvent(pWatcher->aHandles[0]); RTCritSectLeave(&m_WatcherCritSect); RTThreadSleep(1 + (iRound & 127)); RTCritSectEnter(&m_WatcherCritSect); AssertLogRelMsgBreak(pUserData->m_iTheChosenOneRevision == iTheChosenOneRevision, ("Impossible! m_iTheChosenOneRevision changed %#x -> %#x!\n", iTheChosenOneRevision, pUserData->m_iTheChosenOneRevision)); } else { AssertLogRelMsg(pUserData->m_iWatcher == UINT32_MAX, ("Impossible! iWatcher=%d m_cWatcher=%u\n", iWatcher, m_cWatchers)); break; } } RTCritSectLeave(&m_WatcherCritSect); }
static DECLCALLBACK(int) Test4Thread(RTTHREAD ThreadSelf, void *pvUser) { /* Use randomization to get a little more variation of the sync pattern. We use a pseudo random generator here so that we don't end up testing the speed of the /dev/urandom implementation, but rather the read-write semaphores. */ int rc; RTRAND hRand; RTTEST_CHECK_RC_OK_RET(g_hTest, rc = RTRandAdvCreateParkMiller(&hRand), rc); RTTEST_CHECK_RC_OK_RET(g_hTest, rc = RTRandAdvSeed(hRand, (uintptr_t)ThreadSelf), rc); unsigned c100 = RTRandAdvU32Ex(hRand, 0, 99); uint64_t *pcItr = (uint64_t *)pvUser; bool fWrite; for (;;) { unsigned readrec = RTRandAdvU32Ex(hRand, 0, 3); unsigned writerec = RTRandAdvU32Ex(hRand, 0, 3); /* Don't overdo recursion testing. */ if (readrec > 1) readrec--; if (writerec > 1) writerec--; fWrite = (c100 < g_uWritePercent); if (fWrite) { for (unsigned i = 0; i <= writerec; i++) { rc = RTCritSectRwEnterExcl(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Write recursion %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc); break; } } if (RT_FAILURE(rc)) break; if (ASMAtomicIncU32(&g_cConcurrentWriters) != 1) { RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s after write locking it", g_cConcurrentWriters, RTThreadSelfName()); break; } if (g_cConcurrentReaders != 0) { RTTestFailed(g_hTest, "g_cConcurrentReaders=%u on %s after write locking it", g_cConcurrentReaders, RTThreadSelfName()); break; } } else { rc = RTCritSectRwEnterShared(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Read locking on %s failed with rc=%Rrc", RTThreadSelfName(), rc); break; } ASMAtomicIncU32(&g_cConcurrentReaders); if (g_cConcurrentWriters != 0) { RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s after read locking it", g_cConcurrentWriters, RTThreadSelfName()); break; } } for (unsigned i = 0; i < readrec; i++) { rc = RTCritSectRwEnterShared(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Read recursion %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc); break; } } if (RT_FAILURE(rc)) break; /* * Check for fairness: The values of the threads should not differ too much */ (*pcItr)++; /* * Check for correctness: Give other threads a chance. If the implementation is * correct, no other thread will be able to enter this lock now. */ if (g_fYield) RTThreadYield(); for (unsigned i = 0; i < readrec; i++) { rc = RTCritSectRwLeaveShared(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Read release %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc); break; } } if (RT_FAILURE(rc)) break; if (fWrite) { if (ASMAtomicDecU32(&g_cConcurrentWriters) != 0) { RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s before write release", g_cConcurrentWriters, RTThreadSelfName()); break; } if (g_cConcurrentReaders != 0) { RTTestFailed(g_hTest, "g_cConcurrentReaders=%u on %s before write release", g_cConcurrentReaders, RTThreadSelfName()); break; } for (unsigned i = 0; i <= writerec; i++) { rc = RTCritSectRwLeaveExcl(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Write release %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc); break; } } } else { if (g_cConcurrentWriters != 0) { RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s before read release", g_cConcurrentWriters, RTThreadSelfName()); break; } ASMAtomicDecU32(&g_cConcurrentReaders); rc = RTCritSectRwLeaveShared(&g_CritSectRw); if (RT_FAILURE(rc)) { RTTestFailed(g_hTest, "Read release on %s failed with rc=%Rrc", RTThreadSelfName(), rc); break; } } if (g_fTerminate) break; c100++; c100 %= 100; } if (!g_fQuiet) RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Thread %s exited with %lld\n", RTThreadSelfName(), *pcItr); RTRandAdvDestroy(hRand); return VINF_SUCCESS; }
/** * rtR3Init worker. */ static int rtR3InitBody(uint32_t fFlags, int cArgs, char ***papszArgs, const char *pszProgramPath) { /* * Early native initialization. */ int rc = rtR3InitNativeFirst(fFlags); AssertMsgRCReturn(rc, ("rtR3InitNativeFirst failed with %Rrc\n", rc), rc); /* * Disable error popups. */ #if defined(RT_OS_OS2) /** @todo move to private code. */ DosError(FERR_DISABLEHARDERR); #endif /* * Init C runtime locale before we do anything that may end up converting * paths or we'll end up using the "C" locale for path conversion. */ setlocale(LC_CTYPE, ""); /* * The Process ID. */ #ifdef _MSC_VER g_ProcessSelf = _getpid(); /* crappy ansi compiler */ #else g_ProcessSelf = getpid(); #endif /* * Save the init flags. */ g_fInitFlags |= fFlags; #if !defined(IN_GUEST) && !defined(RT_NO_GIP) # ifdef VBOX /* * This MUST be done as the very first thing, before any file is opened. * The log is opened on demand, but the first log entries may be caused * by rtThreadInit() below. */ const char *pszDisableHostCache = getenv("VBOX_DISABLE_HOST_DISK_CACHE"); if ( pszDisableHostCache != NULL && *pszDisableHostCache && strcmp(pszDisableHostCache, "0") != 0) { RTFileSetForceFlags(RTFILE_O_WRITE, RTFILE_O_WRITE_THROUGH, 0); RTFileSetForceFlags(RTFILE_O_READWRITE, RTFILE_O_WRITE_THROUGH, 0); } # endif /* VBOX */ #endif /* !IN_GUEST && !RT_NO_GIP */ /* * Thread Thread database and adopt the caller thread as 'main'. * This must be done before everything else or else we'll call into threading * without having initialized TLS entries and suchlike. */ rc = rtThreadInit(); AssertMsgRCReturn(rc, ("Failed to initialize threads, rc=%Rrc!\n", rc), rc); #if !defined(IN_GUEST) && !defined(RT_NO_GIP) if (fFlags & RTR3INIT_FLAGS_SUPLIB) { /* * Init GIP first. * (The more time for updates before real use, the better.) */ rc = SUPR3Init(NULL); AssertMsgRCReturn(rc, ("Failed to initializable the support library, rc=%Rrc!\n", rc), rc); } #endif /* * The executable path, name and directory. Convert arguments. */ rc = rtR3InitProgramPath(pszProgramPath); AssertLogRelMsgRCReturn(rc, ("Failed to get executable directory path, rc=%Rrc!\n", rc), rc); rc = rtR3InitArgv(fFlags, cArgs, papszArgs); AssertLogRelMsgRCReturn(rc, ("Failed to convert the arguments, rc=%Rrc!\n", rc), rc); #if !defined(IN_GUEST) && !defined(RT_NO_GIP) /* * The threading is initialized we can safely sleep a bit if GIP * needs some time to update itself updating. */ if ((fFlags & RTR3INIT_FLAGS_SUPLIB) && g_pSUPGlobalInfoPage) { RTThreadSleep(20); RTTimeNanoTS(); } #endif /* * Init the program start TSes. * Do that here to be sure that the GIP time was properly updated the 1st time. */ g_u64ProgramStartNanoTS = RTTimeNanoTS(); g_u64ProgramStartMicroTS = g_u64ProgramStartNanoTS / 1000; g_u64ProgramStartMilliTS = g_u64ProgramStartNanoTS / 1000000; /* * The remainder cannot easily be undone, so it has to go last. */ /* Fork and exit callbacks. */ #if !defined(RT_OS_WINDOWS) && !defined(RT_OS_OS2) rc = pthread_atfork(NULL, NULL, rtR3ForkChildCallback); AssertMsg(rc == 0, ("%d\n", rc)); #endif atexit(rtR3ExitCallback); #ifdef IPRT_USE_SIG_CHILD_DUMMY /* * SIGCHLD must not be ignored (that's default), otherwise posix compliant waitpid * implementations won't work right. */ for (;;) { struct sigaction saOld; rc = sigaction(SIGCHLD, 0, &saOld); AssertMsg(rc == 0, ("%d/%d\n", rc, errno)); if ( rc != 0 || (saOld.sa_flags & SA_SIGINFO) || ( saOld.sa_handler != SIG_IGN && saOld.sa_handler != SIG_DFL) ) break; /* Try install dummy handler. */ struct sigaction saNew = saOld; saNew.sa_flags = SA_NOCLDSTOP | SA_RESTART; saNew.sa_handler = rtR3SigChildHandler; rc = sigemptyset(&saNew.sa_mask); AssertMsg(rc == 0, ("%d/%d\n", rc, errno)); struct sigaction saOld2; rc = sigaction(SIGCHLD, &saNew, &saOld2); AssertMsg(rc == 0, ("%d/%d\n", rc, errno)); if ( rc != 0 || ( saOld2.sa_handler == saOld.sa_handler && !(saOld2.sa_flags & SA_SIGINFO)) ) break; /* Race during dynamic load, restore and try again... */ sigaction(SIGCHLD, &saOld2, NULL); RTThreadYield(); } #endif /* IPRT_USE_SIG_CHILD_DUMMY */ #ifdef IPRT_WITH_ALIGNMENT_CHECKS /* * Enable alignment checks. */ const char *pszAlignmentChecks = getenv("IPRT_ALIGNMENT_CHECKS"); g_fRTAlignmentChecks = pszAlignmentChecks != NULL && pszAlignmentChecks[0] == '1' && pszAlignmentChecks[1] == '\0'; if (g_fRTAlignmentChecks) IPRT_ALIGNMENT_CHECKS_ENABLE(); #endif /* * Final native initialization. */ rc = rtR3InitNativeFinal(fFlags); AssertMsgRCReturn(rc, ("rtR3InitNativeFinal failed with %Rrc\n", rc), rc); return VINF_SUCCESS; }
/** * The old halt loop. */ static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/) { /* * Halt loop. */ PVM pVM = pUVCpu->pVM; PVMCPU pVCpu = pUVCpu->pVCpu; int rc = VINF_SUCCESS; ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); //unsigned cLoops = 0; for (;;) { /* * Work the timers and check if we can exit. * The poll call gives us the ticks left to the next event in * addition to perhaps set an FF. */ uint64_t const u64StartTimers = RTTimeNanoTS(); TMR3TimerQueuesDo(pVM); uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers); if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) || VMCPU_FF_ISPENDING(pVCpu, fMask)) break; uint64_t u64NanoTS; TMTimerPollGIP(pVM, pVCpu, &u64NanoTS); if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) || VMCPU_FF_ISPENDING(pVCpu, fMask)) break; /* * Wait for a while. Someone will wake us up or interrupt the call if * anything needs our attention. */ if (u64NanoTS < 50000) { //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++); /* spin */; } else { VMMR3YieldStop(pVM); //uint64_t u64Start = RTTimeNanoTS(); if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */ { //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++); uint64_t const u64StartSchedYield = RTTimeNanoTS(); RTThreadYield(); /* this is the best we can do here */ uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield; STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield); } else if (u64NanoTS < 2000000) { //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++); uint64_t const u64StartSchedHalt = RTTimeNanoTS(); rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1); uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt; STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); } else { //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15)); uint64_t const u64StartSchedHalt = RTTimeNanoTS(); rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15)); uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt; STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); } //uint64_t u64Slept = RTTimeNanoTS() - u64Start; //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept); } if (rc == VERR_TIMEOUT) rc = VINF_SUCCESS; else if (RT_FAILURE(rc)) { rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc); break; } } ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); return rc; }
int main(int argc, char **argv) { #ifndef VBOX RTPrintf("tstSup: SKIPPED\n"); return 0; #else /* * Init. */ RTTEST hTest; int rc = RTTestInitAndCreate("tstR0ThreadPreemption", &hTest); if (rc) return rc; RTTestBanner(hTest); PSUPDRVSESSION pSession; rc = SUPR3Init(&pSession); if (RT_FAILURE(rc)) { RTTestFailed(hTest, "SUPR3Init failed with rc=%Rrc\n", rc); return RTTestSummaryAndDestroy(hTest); } char szPath[RTPATH_MAX]; rc = RTPathExecDir(szPath, sizeof(szPath)); if (RT_SUCCESS(rc)) rc = RTPathAppend(szPath, sizeof(szPath), "tstR0ThreadPreemption.r0"); if (RT_FAILURE(rc)) { RTTestFailed(hTest, "Failed constructing .r0 filename (rc=%Rrc)", rc); return RTTestSummaryAndDestroy(hTest); } void *pvImageBase; rc = SUPR3LoadServiceModule(szPath, "tstR0ThreadPreemption", "TSTR0ThreadPreemptionSrvReqHandler", &pvImageBase); if (RT_FAILURE(rc)) { RTTestFailed(hTest, "SUPR3LoadServiceModule(%s,,,) failed with rc=%Rrc\n", szPath, rc); return RTTestSummaryAndDestroy(hTest); } /* test request */ struct { SUPR0SERVICEREQHDR Hdr; char szMsg[256]; } Req; /* * Sanity checks. */ RTTestSub(hTest, "Sanity"); Req.Hdr.u32Magic = SUPR0SERVICEREQHDR_MAGIC; Req.Hdr.cbReq = sizeof(Req); Req.szMsg[0] = '\0'; RTTESTI_CHECK_RC(rc = SUPR3CallR0Service("tstR0ThreadPreemption", sizeof("tstR0ThreadPreemption") - 1, TSTR0THREADPREMEPTION_SANITY_OK, 0, &Req.Hdr), VINF_SUCCESS); if (RT_FAILURE(rc)) return RTTestSummaryAndDestroy(hTest); RTTESTI_CHECK_MSG(Req.szMsg[0] == '\0', ("%s", Req.szMsg)); if (Req.szMsg[0] != '\0') return RTTestSummaryAndDestroy(hTest); Req.Hdr.u32Magic = SUPR0SERVICEREQHDR_MAGIC; Req.Hdr.cbReq = sizeof(Req); Req.szMsg[0] = '\0'; RTTESTI_CHECK_RC(rc = SUPR3CallR0Service("tstR0ThreadPreemption", sizeof("tstR0ThreadPreemption") - 1, TSTR0THREADPREMEPTION_SANITY_FAILURE, 0, &Req.Hdr), VINF_SUCCESS); if (RT_FAILURE(rc)) return RTTestSummaryAndDestroy(hTest); RTTESTI_CHECK_MSG(!strncmp(Req.szMsg, "!42failure42", sizeof("!42failure42") - 1), ("%s", Req.szMsg)); if (strncmp(Req.szMsg, "!42failure42", sizeof("!42failure42") - 1)) return RTTestSummaryAndDestroy(hTest); /* * Basic tests, bail out on failure. */ RTTestSub(hTest, "Basics"); Req.Hdr.u32Magic = SUPR0SERVICEREQHDR_MAGIC; Req.Hdr.cbReq = sizeof(Req); Req.szMsg[0] = '\0'; RTTESTI_CHECK_RC(rc = SUPR3CallR0Service("tstR0ThreadPreemption", sizeof("tstR0ThreadPreemption") - 1, TSTR0THREADPREMEPTION_BASIC, 0, &Req.Hdr), VINF_SUCCESS); if (RT_FAILURE(rc)) return RTTestSummaryAndDestroy(hTest); if (Req.szMsg[0] == '!') { RTTestIFailed("%s", &Req.szMsg[1]); return RTTestSummaryAndDestroy(hTest); } if (Req.szMsg[0]) RTTestIPrintf(RTTESTLVL_ALWAYS, "%s", Req.szMsg); /* * Stay in ring-0 until preemption is pending. */ RTThreadSleep(250); /** @todo fix GIP initialization? */ RTTestSub(hTest, "Pending Preemption"); for (int i = 0; ; i++) { Req.Hdr.u32Magic = SUPR0SERVICEREQHDR_MAGIC; Req.Hdr.cbReq = sizeof(Req); Req.szMsg[0] = '\0'; RTTESTI_CHECK_RC(rc = SUPR3CallR0Service("tstR0ThreadPreemption", sizeof("tstR0ThreadPreemption") - 1, TSTR0THREADPREMEPTION_IS_PENDING, 0, &Req.Hdr), VINF_SUCCESS); if ( strcmp(Req.szMsg, "cLoops=1\n") || i >= 64) { if (Req.szMsg[0] == '!') RTTestIFailed("%s", &Req.szMsg[1]); else if (Req.szMsg[0]) RTTestIPrintf(RTTESTLVL_ALWAYS, "%s", Req.szMsg); break; } if ((i % 3) == 0) RTThreadYield(); } /* * Test nested RTThreadPreemptDisable calls. */ RTTestSub(hTest, "Nested"); Req.Hdr.u32Magic = SUPR0SERVICEREQHDR_MAGIC; Req.Hdr.cbReq = sizeof(Req); Req.szMsg[0] = '\0'; RTTESTI_CHECK_RC(rc = SUPR3CallR0Service("tstR0ThreadPreemption", sizeof("tstR0ThreadPreemption") - 1, TSTR0THREADPREMEPTION_NESTED, 0, &Req.Hdr), VINF_SUCCESS); if (Req.szMsg[0] == '!') RTTestIFailed("%s", &Req.szMsg[1]); else if (Req.szMsg[0]) RTTestIPrintf(RTTESTLVL_ALWAYS, "%s", Req.szMsg); /* * Done. */ return RTTestSummaryAndDestroy(hTest); #endif }
int main() { /* * Init. */ RTTEST hTest; RTEXITCODE rcExit = RTTestInitExAndCreate(0, NULL, RTR3INIT_FLAGS_SUPLIB, "tstRTTime", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * RTNanoTimeTS() shall never return something which * is less or equal to the return of the previous call. */ RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTStartTS = RTTimeNanoTS(); uint64_t u64OSStartTS = RTTimeSystemNanoTS(); uint32_t i; uint64_t u64Prev = RTTimeNanoTS(); for (i = 0; i < 100*_1M; i++) { uint64_t u64 = RTTimeNanoTS(); if (u64 <= u64Prev) { /** @todo wrapping detection. */ RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx (1)\n", i, u64, u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } else if (u64 - u64Prev > 1000000000 /* 1sec */) { RTTestFailed(hTest, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); if (RTTestErrorCount(hTest) >= 256) break; RTThreadYield(); u64 = RTTimeNanoTS(); } if (!(i & (_1M*2 - 1))) { RTTestPrintf(hTest, RTTESTLVL_INFO, "i=%#010x u64=%#llx u64Prev=%#llx delta=%lld\n", i, u64, u64Prev, u64 - u64Prev); RTThreadYield(); u64 = RTTimeNanoTS(); } u64Prev = u64; } RTTimeSystemNanoTS(); RTTimeNanoTS(); RTThreadYield(); uint64_t u64RTElapsedTS = RTTimeNanoTS(); uint64_t u64OSElapsedTS = RTTimeSystemNanoTS(); u64RTElapsedTS -= u64RTStartTS; u64OSElapsedTS -= u64OSStartTS; int64_t i64Diff = u64OSElapsedTS >= u64RTElapsedTS ? u64OSElapsedTS - u64RTElapsedTS : u64RTElapsedTS - u64OSElapsedTS; if (i64Diff > (int64_t)(u64OSElapsedTS / 1000)) RTTestFailed(hTest, "total time differs too much! u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); else { if (u64OSElapsedTS >= u64RTElapsedTS) RTTestValue(hTest, "Total time delta", u64OSElapsedTS - u64RTElapsedTS, RTTESTUNIT_NS); else RTTestValue(hTest, "Total time delta", u64RTElapsedTS - u64OSElapsedTS, RTTESTUNIT_NS); RTTestPrintf(hTest, RTTESTLVL_INFO, "total time difference: u64OSElapsedTS=%#llx u64RTElapsedTS=%#llx delta=%lld\n", u64OSElapsedTS, u64RTElapsedTS, u64OSElapsedTS - u64RTElapsedTS); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) /** @todo This isn't really x86 or AMD64 specific... */ RTTestValue(hTest, "RTTimeDbgSteps", RTTimeDbgSteps(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgSteps pp", ((uint64_t)RTTimeDbgSteps() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgExpired", RTTimeDbgExpired(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgExpired pp", ((uint64_t)RTTimeDbgExpired() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgBad", RTTimeDbgBad(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgBad pp", ((uint64_t)RTTimeDbgBad() * 1000) / i, RTTESTUNIT_PP1K); RTTestValue(hTest, "RTTimeDbgRaces", RTTimeDbgRaces(), RTTESTUNIT_OCCURRENCES); RTTestValue(hTest, "RTTimeDbgRaces pp", ((uint64_t)RTTimeDbgRaces() * 1000) / i, RTTESTUNIT_PP1K); #endif return RTTestSummaryAndDestroy(hTest); }
/** * Starts monitoring a VBoxSVC process. * * @param pUserData The user which chosen VBoxSVC should be watched. * @param hProcess Handle to the VBoxSVC process. Consumed. * @param pid The VBoxSVC PID. * @returns Success indicator. */ bool VirtualBoxSDS::i_watchIt(VBoxSDSPerUserData *pUserData, HANDLE hProcess, RTPROCESS pid) { RTCritSectEnter(&m_WatcherCritSect); /* * Find a watcher with capacity left over (we save 8 entries for removals). */ for (uint32_t i = 0; i < m_cWatchers; i++) { VBoxSDSWatcher *pWatcher = m_papWatchers[i]; if ( pWatcher->cHandlesEffective < RT_ELEMENTS(pWatcher->aHandles) && !pWatcher->fShutdown) { uint32_t iTodo = pWatcher->cTodos; if (iTodo + 8 < RT_ELEMENTS(pWatcher->aTodos)) { pWatcher->aTodos[iTodo].hProcess = hProcess; pWatcher->aTodos[iTodo].Data.pUserData = pUserData; pWatcher->aTodos[iTodo].Data.iRevision = ++pUserData->m_iTheChosenOneRevision; pWatcher->aTodos[iTodo].Data.pid = pid; pWatcher->cTodos = iTodo + 1; pUserData->m_iWatcher = pWatcher->iWatcher; pUserData->i_retain(); BOOL fRc = SetEvent(pWatcher->aHandles[0]); AssertLogRelMsg(fRc, ("SetEvent(%p) failed: %u\n", pWatcher->aHandles[0], GetLastError())); LogRel(("i_watchIt: Added %p/%p to watcher #%u: %RTbool\n", pUserData, hProcess, pWatcher->iWatcher, fRc)); i_incrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return true; } } } /* * No watcher with capacity was found, so create a new one with * the user/handle prequeued. */ void *pvNew = RTMemRealloc(m_papWatchers, sizeof(m_papWatchers[0]) * (m_cWatchers + 1)); if (pvNew) { m_papWatchers = (VBoxSDSWatcher **)pvNew; VBoxSDSWatcher *pWatcher = (VBoxSDSWatcher *)RTMemAllocZ(sizeof(*pWatcher)); if (pWatcher) { for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aData); i++) pWatcher->aData[i].setNull(); for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aTodos); i++) pWatcher->aTodos[i].Data.setNull(); pWatcher->pVBoxSDS = this; pWatcher->iWatcher = m_cWatchers; pWatcher->cRefs = 2; pWatcher->cHandlesEffective = 2; pWatcher->cHandles = 2; pWatcher->aHandles[0] = CreateEventW(NULL, FALSE /*fManualReset*/, FALSE /*fInitialState*/, NULL); if (pWatcher->aHandles[0]) { /* Add incoming VBoxSVC process in slot #1: */ pWatcher->aHandles[1] = hProcess; pWatcher->aData[1].pid = pid; pWatcher->aData[1].pUserData = pUserData; pWatcher->aData[1].iRevision = ++pUserData->m_iTheChosenOneRevision; pUserData->i_retain(); pUserData->m_iWatcher = pWatcher->iWatcher; /* Start the thread and we're good. */ m_papWatchers[m_cWatchers++] = pWatcher; int rc = RTThreadCreateF(&pWatcher->hThread, i_watcherThreadProc, pWatcher, 0, RTTHREADTYPE_MAIN_WORKER, RTTHREADFLAGS_WAITABLE, "watcher%u", pWatcher->iWatcher); if (RT_SUCCESS(rc)) { LogRel(("i_watchIt: Created new watcher #%u for %p/%p\n", m_cWatchers, pUserData, hProcess)); i_incrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); return true; } LogRel(("i_watchIt: Error starting watcher thread: %Rrc\n", rc)); m_papWatchers[--m_cWatchers] = NULL; pUserData->m_iWatcher = UINT32_MAX; pUserData->i_release(); CloseHandle(pWatcher->aHandles[0]); } else LogRel(("i_watchIt: CreateEventW failed: %u\n", GetLastError())); RTMemFree(pWatcher); } else LogRel(("i_watchIt: failed to allocate watcher structure!\n")); } else LogRel(("i_watchIt: Failed to grow watcher array to %u entries!\n", m_cWatchers + 1)); RTCritSectLeave(&m_WatcherCritSect); CloseHandle(hProcess); return false; }