/*static*/ DECLCALLBACK(int) VirtualBox::ClientWatcher::subworkerThread(RTTHREAD hThreadSelf, void *pvUser) { VirtualBox::ClientWatcher::PerSubworker *pSubworker = (VirtualBox::ClientWatcher::PerSubworker *)pvUser; VirtualBox::ClientWatcher *pThis = pSubworker->pSelf; int vrc; while (!pThis->mfTerminate) { /* Before we start waiting, reset the event semaphore. */ vrc = RTThreadUserReset(pSubworker->hThread); AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserReset [iSubworker=%#u] -> %Rrc", pSubworker->iSubworker, vrc)); /* Do the job. */ pThis->subworkerWait(pSubworker, pThis->mcMsWait); /* Wait for the next job. */ do { vrc = RTThreadUserWaitNoResume(hThreadSelf, RT_INDEFINITE_WAIT); Assert(vrc == VINF_SUCCESS || vrc == VERR_INTERRUPTED); } while ( vrc != VINF_SUCCESS && !pThis->mfTerminate); } return VINF_SUCCESS; }
/** * Does the waiting on a section of the handle array. * * @param pSubworker Pointer to the calling thread's data. * @param cMsWait Number of milliseconds to wait. */ void VirtualBox::ClientWatcher::subworkerWait(VirtualBox::ClientWatcher::PerSubworker *pSubworker, uint32_t cMsWait) { /* * Figure out what section to wait on and do the waiting. */ uint32_t idxHandle = pSubworker->iSubworker * CW_MAX_HANDLES_PER_THREAD; uint32_t cHandles = CW_MAX_HANDLES_PER_THREAD; if (idxHandle + cHandles > mcWaitHandles) { cHandles = mcWaitHandles - idxHandle; AssertStmt(idxHandle < mcWaitHandles, cHandles = 1); } Assert(mahWaitHandles[idxHandle] == mUpdateReq); DWORD dwWait = ::WaitForMultipleObjects(cHandles, &mahWaitHandles[idxHandle], FALSE /*fWaitAll*/, cMsWait); pSubworker->dwWait = dwWait; /* * If we didn't wake up because of the UpdateReq handle, signal it to make * sure everyone else wakes up too. */ if (dwWait != WAIT_OBJECT_0) { BOOL fRc = SetEvent(mUpdateReq); Assert(fRc); NOREF(fRc); } /* * Last one signals the main thread. */ if (ASMAtomicDecU32(&mcActiveSubworkers) == 0) { int vrc = RTThreadUserSignal(maSubworkers[0].hThread); AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserSignal -> %Rrc\n", vrc)); } }
/** * Relocates the RC address space. * * @param pUVM The user mode VM handle. * @param offDelta The relocation delta. */ void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta) { /* * We will relocate the raw-mode context modules by offDelta if they have * been injected into the the DBGF_AS_RC map. */ if ( pUVM->dbgf.s.afAsAliasPopuplated[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)] && offDelta != 0) { RTDBGAS hAs = pUVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(DBGF_AS_RC)]; /* Take a snapshot of the modules as we might have overlapping addresses between the previous and new mapping. */ RTDbgAsLockExcl(hAs); uint32_t cModules = RTDbgAsModuleCount(hAs); if (cModules > 0 && cModules < _4K) { struct DBGFASRELOCENTRY { RTDBGMOD hDbgMod; RTRCPTR uOldAddr; } *paEntries = (struct DBGFASRELOCENTRY *)RTMemTmpAllocZ(sizeof(paEntries[0]) * cModules); if (paEntries) { /* Snapshot. */ for (uint32_t i = 0; i < cModules; i++) { paEntries[i].hDbgMod = RTDbgAsModuleByIndex(hAs, i); AssertLogRelMsg(paEntries[i].hDbgMod != NIL_RTDBGMOD, ("iModule=%#x\n", i)); RTDBGASMAPINFO aMappings[1] = { { 0, 0 } }; uint32_t cMappings = 1; int rc = RTDbgAsModuleQueryMapByIndex(hAs, i, &aMappings[0], &cMappings, 0 /*fFlags*/); if (RT_SUCCESS(rc) && cMappings == 1 && aMappings[0].iSeg == NIL_RTDBGSEGIDX) paEntries[i].uOldAddr = (RTRCPTR)aMappings[0].Address; else AssertLogRelMsgFailed(("iModule=%#x rc=%Rrc cMappings=%#x.\n", i, rc, cMappings)); } /* Unlink them. */ for (uint32_t i = 0; i < cModules; i++) { int rc = RTDbgAsModuleUnlink(hAs, paEntries[i].hDbgMod); AssertLogRelMsg(RT_SUCCESS(rc), ("iModule=%#x rc=%Rrc hDbgMod=%p\n", i, rc, paEntries[i].hDbgMod)); } /* Link them at the new locations. */ for (uint32_t i = 0; i < cModules; i++) { RTRCPTR uNewAddr = paEntries[i].uOldAddr + offDelta; int rc = RTDbgAsModuleLink(hAs, paEntries[i].hDbgMod, uNewAddr, RTDBGASLINK_FLAGS_REPLACE); AssertLogRelMsg(RT_SUCCESS(rc), ("iModule=%#x rc=%Rrc hDbgMod=%p %RRv -> %RRv\n", i, rc, paEntries[i].hDbgMod, paEntries[i].uOldAddr, uNewAddr)); RTDbgModRelease(paEntries[i].hDbgMod); } RTMemTmpFree(paEntries); } else AssertLogRelMsgFailed(("No memory for %#x modules.\n", cModules)); } else AssertLogRelMsgFailed(("cModules=%#x\n", cModules)); RTDbgAsUnlockExcl(hAs); } }
Console::teleporterSrcThreadWrapper(RTTHREAD hThread, void *pvUser) { TeleporterStateSrc *pState = (TeleporterStateSrc *)pvUser; /* * Console::teleporterSrc does the work, we just grab onto the VM handle * and do the cleanups afterwards. */ SafeVMPtr ptrVM(pState->mptrConsole); HRESULT hrc = ptrVM.rc(); if (SUCCEEDED(hrc)) hrc = pState->mptrConsole->teleporterSrc(pState); /* Close the connection ASAP on so that the other side can complete. */ if (pState->mhSocket != NIL_RTSOCKET) { RTTcpClientClose(pState->mhSocket); pState->mhSocket = NIL_RTSOCKET; } /* Aaarg! setMachineState trashes error info on Windows, so we have to complete things here on failure instead of right before cleanup. */ if (FAILED(hrc)) pState->mptrProgress->notifyComplete(hrc); /* We can no longer be canceled (success), or it doesn't matter any longer (failure). */ pState->mptrProgress->setCancelCallback(NULL, NULL); /* * Write lock the console before resetting mptrCancelableProgress and * fixing the state. */ AutoWriteLock autoLock(pState->mptrConsole COMMA_LOCKVAL_SRC_POS); pState->mptrConsole->mptrCancelableProgress.setNull(); VMSTATE const enmVMState = VMR3GetStateU(pState->mpUVM); MachineState_T const enmMachineState = pState->mptrConsole->mMachineState; if (SUCCEEDED(hrc)) { /* * Automatically shut down the VM on success. * * Note! We have to release the VM caller object or we'll deadlock in * powerDown. */ AssertLogRelMsg(enmVMState == VMSTATE_SUSPENDED, ("%s\n", VMR3GetStateName(enmVMState))); AssertLogRelMsg(enmMachineState == MachineState_TeleportingPausedVM, ("%s\n", Global::stringifyMachineState(enmMachineState))); ptrVM.release(); pState->mptrConsole->mVMIsAlreadyPoweringOff = true; /* (Make sure we stick in the TeleportingPausedVM state.) */ hrc = pState->mptrConsole->powerDown(); pState->mptrConsole->mVMIsAlreadyPoweringOff = false; pState->mptrProgress->notifyComplete(hrc); } else { /* * Work the state machinery on failure. * * If the state is no longer 'Teleporting*', some other operation has * canceled us and there is nothing we need to do here. In all other * cases, we've failed one way or another. */ if ( enmMachineState == MachineState_Teleporting || enmMachineState == MachineState_TeleportingPausedVM ) { if (pState->mfUnlockedMedia) { ErrorInfoKeeper Oak; HRESULT hrc2 = pState->mptrConsole->mControl->LockMedia(); if (FAILED(hrc2)) { uint64_t StartMS = RTTimeMilliTS(); do { RTThreadSleep(2); hrc2 = pState->mptrConsole->mControl->LockMedia(); } while ( FAILED(hrc2) && RTTimeMilliTS() - StartMS < 2000); } if (SUCCEEDED(hrc2)) pState->mfUnlockedMedia = true; else LogRel(("FATAL ERROR: Failed to re-take the media locks. hrc2=%Rhrc\n", hrc2)); } switch (enmVMState) { case VMSTATE_RUNNING: case VMSTATE_RUNNING_LS: case VMSTATE_DEBUGGING: case VMSTATE_DEBUGGING_LS: case VMSTATE_POWERING_OFF: case VMSTATE_POWERING_OFF_LS: case VMSTATE_RESETTING: case VMSTATE_RESETTING_LS: Assert(!pState->mfSuspendedByUs); Assert(!pState->mfUnlockedMedia); pState->mptrConsole->setMachineState(MachineState_Running); break; case VMSTATE_GURU_MEDITATION: case VMSTATE_GURU_MEDITATION_LS: pState->mptrConsole->setMachineState(MachineState_Stuck); break; case VMSTATE_FATAL_ERROR: case VMSTATE_FATAL_ERROR_LS: pState->mptrConsole->setMachineState(MachineState_Paused); break; default: AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState))); case VMSTATE_SUSPENDED: case VMSTATE_SUSPENDED_LS: case VMSTATE_SUSPENDING: case VMSTATE_SUSPENDING_LS: case VMSTATE_SUSPENDING_EXT_LS: if (!pState->mfUnlockedMedia) { pState->mptrConsole->setMachineState(MachineState_Paused); if (pState->mfSuspendedByUs) { autoLock.release(); int rc = VMR3Resume(VMR3GetVM(pState->mpUVM)); AssertLogRelMsgRC(rc, ("VMR3Resume -> %Rrc\n", rc)); autoLock.acquire(); } } else { /* Faking a guru meditation is the best I can think of doing here... */ pState->mptrConsole->setMachineState(MachineState_Stuck); } break; } } } autoLock.release(); /* * Cleanup. */ Assert(pState->mhSocket == NIL_RTSOCKET); delete pState; return VINF_SUCCESS; /* ignored */ }
/** * Stops monitoring a VBoxSVC process. * * @param pUserData The user which chosen VBoxSVC should be watched. * @param pid The VBoxSVC PID. */ void VirtualBoxSDS::i_stopWatching(VBoxSDSPerUserData *pUserData, RTPROCESS pid) { /* * Add a remove order in the watcher's todo queue. */ RTCritSectEnter(&m_WatcherCritSect); for (uint32_t iRound = 0; ; iRound++) { uint32_t const iWatcher = pUserData->m_iWatcher; if (iWatcher < m_cWatchers) { VBoxSDSWatcher *pWatcher = m_papWatchers[pUserData->m_iWatcher]; if (!pWatcher->fShutdown) { /* * Remove duplicate todo entries. */ bool fAddIt = true; uint32_t iTodo = pWatcher->cTodos; while (iTodo-- > 0) if (pWatcher->aTodos[iTodo].Data.pUserData == pUserData) { if (pWatcher->aTodos[iTodo].hProcess == NULL) fAddIt = true; else { fAddIt = false; CloseHandle(pWatcher->aTodos[iTodo].hProcess); } uint32_t const cTodos = --pWatcher->cTodos; uint32_t const cToShift = cTodos - iTodo; if (cToShift > 0) memmove(&pWatcher->aTodos[iTodo], &pWatcher->aTodos[iTodo + 1], sizeof(pWatcher->aTodos[0]) * cToShift); pWatcher->aTodos[cTodos].hProcess = NULL; pWatcher->aTodos[cTodos].Data.setNull(); } /* * Did we just eliminated the add and cancel out this operation? */ if (!fAddIt) { pUserData->m_iWatcher = UINT32_MAX; pUserData->m_iTheChosenOneRevision++; i_decrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return; } /* * No we didn't. So, try append a removal item. */ iTodo = pWatcher->cTodos; if (iTodo < RT_ELEMENTS(pWatcher->aTodos)) { pWatcher->aTodos[iTodo].hProcess = NULL; pWatcher->aTodos[iTodo].Data.pUserData = pUserData; pWatcher->aTodos[iTodo].Data.pid = pid; pWatcher->aTodos[iTodo].Data.iRevision = pUserData->m_iTheChosenOneRevision++; pWatcher->cTodos = iTodo + 1; SetEvent(pWatcher->aHandles[0]); pUserData->m_iWatcher = UINT32_MAX; i_decrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return; } } else { LogRel(("i_stopWatching: Watcher #%u has shut down.\n", iWatcher)); break; } /* * Todo queue is full. Sleep a little and let the watcher process it. */ LogRel(("i_stopWatching: Watcher #%u todo queue is full! (round #%u)\n", iWatcher, iRound)); uint32_t const iTheChosenOneRevision = pUserData->m_iTheChosenOneRevision; SetEvent(pWatcher->aHandles[0]); RTCritSectLeave(&m_WatcherCritSect); RTThreadSleep(1 + (iRound & 127)); RTCritSectEnter(&m_WatcherCritSect); AssertLogRelMsgBreak(pUserData->m_iTheChosenOneRevision == iTheChosenOneRevision, ("Impossible! m_iTheChosenOneRevision changed %#x -> %#x!\n", iTheChosenOneRevision, pUserData->m_iTheChosenOneRevision)); } else { AssertLogRelMsg(pUserData->m_iWatcher == UINT32_MAX, ("Impossible! iWatcher=%d m_cWatcher=%u\n", iWatcher, m_cWatchers)); break; } } RTCritSectLeave(&m_WatcherCritSect); }
/** * Starts monitoring a VBoxSVC process. * * @param pUserData The user which chosen VBoxSVC should be watched. * @param hProcess Handle to the VBoxSVC process. Consumed. * @param pid The VBoxSVC PID. * @returns Success indicator. */ bool VirtualBoxSDS::i_watchIt(VBoxSDSPerUserData *pUserData, HANDLE hProcess, RTPROCESS pid) { RTCritSectEnter(&m_WatcherCritSect); /* * Find a watcher with capacity left over (we save 8 entries for removals). */ for (uint32_t i = 0; i < m_cWatchers; i++) { VBoxSDSWatcher *pWatcher = m_papWatchers[i]; if ( pWatcher->cHandlesEffective < RT_ELEMENTS(pWatcher->aHandles) && !pWatcher->fShutdown) { uint32_t iTodo = pWatcher->cTodos; if (iTodo + 8 < RT_ELEMENTS(pWatcher->aTodos)) { pWatcher->aTodos[iTodo].hProcess = hProcess; pWatcher->aTodos[iTodo].Data.pUserData = pUserData; pWatcher->aTodos[iTodo].Data.iRevision = ++pUserData->m_iTheChosenOneRevision; pWatcher->aTodos[iTodo].Data.pid = pid; pWatcher->cTodos = iTodo + 1; pUserData->m_iWatcher = pWatcher->iWatcher; pUserData->i_retain(); BOOL fRc = SetEvent(pWatcher->aHandles[0]); AssertLogRelMsg(fRc, ("SetEvent(%p) failed: %u\n", pWatcher->aHandles[0], GetLastError())); LogRel(("i_watchIt: Added %p/%p to watcher #%u: %RTbool\n", pUserData, hProcess, pWatcher->iWatcher, fRc)); i_incrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); RTThreadYield(); return true; } } } /* * No watcher with capacity was found, so create a new one with * the user/handle prequeued. */ void *pvNew = RTMemRealloc(m_papWatchers, sizeof(m_papWatchers[0]) * (m_cWatchers + 1)); if (pvNew) { m_papWatchers = (VBoxSDSWatcher **)pvNew; VBoxSDSWatcher *pWatcher = (VBoxSDSWatcher *)RTMemAllocZ(sizeof(*pWatcher)); if (pWatcher) { for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aData); i++) pWatcher->aData[i].setNull(); for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aTodos); i++) pWatcher->aTodos[i].Data.setNull(); pWatcher->pVBoxSDS = this; pWatcher->iWatcher = m_cWatchers; pWatcher->cRefs = 2; pWatcher->cHandlesEffective = 2; pWatcher->cHandles = 2; pWatcher->aHandles[0] = CreateEventW(NULL, FALSE /*fManualReset*/, FALSE /*fInitialState*/, NULL); if (pWatcher->aHandles[0]) { /* Add incoming VBoxSVC process in slot #1: */ pWatcher->aHandles[1] = hProcess; pWatcher->aData[1].pid = pid; pWatcher->aData[1].pUserData = pUserData; pWatcher->aData[1].iRevision = ++pUserData->m_iTheChosenOneRevision; pUserData->i_retain(); pUserData->m_iWatcher = pWatcher->iWatcher; /* Start the thread and we're good. */ m_papWatchers[m_cWatchers++] = pWatcher; int rc = RTThreadCreateF(&pWatcher->hThread, i_watcherThreadProc, pWatcher, 0, RTTHREADTYPE_MAIN_WORKER, RTTHREADFLAGS_WAITABLE, "watcher%u", pWatcher->iWatcher); if (RT_SUCCESS(rc)) { LogRel(("i_watchIt: Created new watcher #%u for %p/%p\n", m_cWatchers, pUserData, hProcess)); i_incrementClientCount(); RTCritSectLeave(&m_WatcherCritSect); return true; } LogRel(("i_watchIt: Error starting watcher thread: %Rrc\n", rc)); m_papWatchers[--m_cWatchers] = NULL; pUserData->m_iWatcher = UINT32_MAX; pUserData->i_release(); CloseHandle(pWatcher->aHandles[0]); } else LogRel(("i_watchIt: CreateEventW failed: %u\n", GetLastError())); RTMemFree(pWatcher); } else LogRel(("i_watchIt: failed to allocate watcher structure!\n")); } else LogRel(("i_watchIt: Failed to grow watcher array to %u entries!\n", m_cWatchers + 1)); RTCritSectLeave(&m_WatcherCritSect); CloseHandle(hProcess); return false; }
/*static*/ DECLCALLBACK(int) VirtualBox::ClientWatcher::worker(RTTHREAD hThreadSelf, void *pvUser) { LogFlowFuncEnter(); NOREF(hThreadSelf); VirtualBox::ClientWatcher *that = (VirtualBox::ClientWatcher *)pvUser; Assert(that); typedef std::vector<ComObjPtr<Machine> > MachineVector; typedef std::vector<ComObjPtr<SessionMachine> > SessionMachineVector; SessionMachineVector machines; MachineVector spawnedMachines; size_t cnt = 0; size_t cntSpawned = 0; VirtualBoxBase::initializeComForThread(); #if defined(RT_OS_WINDOWS) int vrc; /* Initialize all the subworker data. */ that->maSubworkers[0].hThread = hThreadSelf; for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++) that->maSubworkers[iSubworker].hThread = NIL_RTTHREAD; for (uint32_t iSubworker = 0; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++) { that->maSubworkers[iSubworker].pSelf = that; that->maSubworkers[iSubworker].iSubworker = iSubworker; } do { /* VirtualBox has been early uninitialized, terminate. */ AutoCaller autoCaller(that->mVirtualBox); if (!autoCaller.isOk()) break; bool fPidRace = false; /* We poll if the PID of a spawning session hasn't been established yet. */ bool fRecentDeath = false; /* We slowly poll if a session has recently been closed to do reaping. */ for (;;) { /* release the caller to let uninit() ever proceed */ autoCaller.release(); /* Kick of the waiting. */ uint32_t const cSubworkers = (that->mcWaitHandles + CW_MAX_HANDLES_PER_THREAD - 1) / CW_MAX_HANDLES_PER_THREAD; uint32_t const cMsWait = fPidRace ? 500 : fRecentDeath ? 5000 : INFINITE; LogFlowFunc(("UPDATE: Waiting. %u handles, %u subworkers, %u ms wait\n", that->mcWaitHandles, cSubworkers, cMsWait)); that->mcMsWait = cMsWait; ASMAtomicWriteU32(&that->mcActiveSubworkers, cSubworkers); RTThreadUserReset(hThreadSelf); for (uint32_t iSubworker = 1; iSubworker < cSubworkers; iSubworker++) { if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD) { vrc = RTThreadUserSignal(that->maSubworkers[iSubworker].hThread); AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserSignal -> %Rrc\n", vrc)); } else { vrc = RTThreadCreateF(&that->maSubworkers[iSubworker].hThread, VirtualBox::ClientWatcher::subworkerThread, &that->maSubworkers[iSubworker], _128K, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "Watcher%u", iSubworker); AssertLogRelMsgStmt(RT_SUCCESS(vrc), ("%Rrc iSubworker=%u\n", vrc, iSubworker), that->maSubworkers[iSubworker].hThread = NIL_RTTHREAD); } if (RT_FAILURE(vrc)) that->subworkerWait(&that->maSubworkers[iSubworker], 1); } /* Wait ourselves. */ that->subworkerWait(&that->maSubworkers[0], cMsWait); /* Make sure all waiters are done waiting. */ BOOL fRc = SetEvent(that->mUpdateReq); Assert(fRc); NOREF(fRc); vrc = RTThreadUserWait(hThreadSelf, RT_INDEFINITE_WAIT); AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserWait -> %Rrc\n", vrc)); Assert(that->mcActiveSubworkers == 0); /* Consume pending update request before proceeding with processing the wait results. */ fRc = ResetEvent(that->mUpdateReq); Assert(fRc); bool update = ASMAtomicXchgBool(&that->mfUpdateReq, false); if (update) LogFlowFunc(("UPDATE: Update request pending\n")); update |= fPidRace; /* Process the wait results. */ autoCaller.add(); if (!autoCaller.isOk()) break; fRecentDeath = false; for (uint32_t iSubworker = 0; iSubworker < cSubworkers; iSubworker++) { DWORD dwWait = that->maSubworkers[iSubworker].dwWait; LogFlowFunc(("UPDATE: subworker #%u: dwWait=%#x\n", iSubworker, dwWait)); if ( (dwWait > WAIT_OBJECT_0 && dwWait < WAIT_OBJECT_0 + CW_MAX_HANDLES_PER_THREAD) || (dwWait > WAIT_ABANDONED_0 && dwWait < WAIT_ABANDONED_0 + CW_MAX_HANDLES_PER_THREAD) ) { uint32_t idxHandle = iSubworker * CW_MAX_HANDLES_PER_THREAD; if (dwWait > WAIT_OBJECT_0 && dwWait < WAIT_OBJECT_0 + CW_MAX_HANDLES_PER_THREAD) idxHandle += dwWait - WAIT_OBJECT_0; else idxHandle += dwWait - WAIT_ABANDONED_0; uint32_t const idxMachine = idxHandle - (iSubworker + 1); if (idxMachine < cnt) { /* Machine mutex is released or abandond due to client process termination. */ LogFlowFunc(("UPDATE: Calling i_checkForDeath on idxMachine=%u (idxHandle=%u) dwWait=%#x\n", idxMachine, idxHandle, dwWait)); fRecentDeath |= (machines[idxMachine])->i_checkForDeath(); } else if (idxMachine < cnt + cntSpawned) { /* Spawned VM process has terminated normally. */ Assert(dwWait < WAIT_ABANDONED_0); LogFlowFunc(("UPDATE: Calling i_checkForSpawnFailure on idxMachine=%u/%u idxHandle=%u dwWait=%#x\n", idxMachine, idxMachine - cnt, idxHandle, dwWait)); fRecentDeath |= (spawnedMachines[idxMachine - cnt])->i_checkForSpawnFailure(); } else AssertFailed(); update = true; } else Assert(dwWait == WAIT_OBJECT_0 || dwWait == WAIT_TIMEOUT); } if (update) { LogFlowFunc(("UPDATE: Update pending (cnt=%u cntSpawned=%u)...\n", cnt, cntSpawned)); /* close old process handles */ that->winResetHandleArray((uint32_t)cntSpawned); // get reference to the machines list in VirtualBox VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList(); // lock the machines list for reading AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS); /* obtain a new set of opened machines */ cnt = 0; machines.clear(); uint32_t idxHandle = 0; for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { AssertMsgBreak(idxHandle < CW_MAX_CLIENTS, ("CW_MAX_CLIENTS reached")); ComObjPtr<SessionMachine> sm; if ((*it)->i_isSessionOpenOrClosing(sm)) { AutoCaller smCaller(sm); if (smCaller.isOk()) { AutoReadLock smLock(sm COMMA_LOCKVAL_SRC_POS); Machine::ClientToken *ct = sm->i_getClientToken(); if (ct) { HANDLE ipcSem = ct->getToken(); machines.push_back(sm); if (!(idxHandle % CW_MAX_HANDLES_PER_THREAD)) idxHandle++; that->mahWaitHandles[idxHandle++] = ipcSem; ++cnt; } } } } LogFlowFunc(("UPDATE: direct session count = %d\n", cnt)); /* obtain a new set of spawned machines */ fPidRace = false; cntSpawned = 0; spawnedMachines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { AssertMsgBreak(idxHandle < CW_MAX_CLIENTS, ("CW_MAX_CLIENTS reached")); if ((*it)->i_isSessionSpawning()) { ULONG pid; HRESULT hrc = (*it)->COMGETTER(SessionPID)(&pid); if (SUCCEEDED(hrc)) { if (pid != NIL_RTPROCESS) { HANDLE hProc = OpenProcess(SYNCHRONIZE, FALSE, pid); AssertMsg(hProc != NULL, ("OpenProcess (pid=%d) failed with %d\n", pid, GetLastError())); if (hProc != NULL) { spawnedMachines.push_back(*it); if (!(idxHandle % CW_MAX_HANDLES_PER_THREAD)) idxHandle++; that->mahWaitHandles[idxHandle++] = hProc; ++cntSpawned; } } else fPidRace = true; } } } LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned)); /* Update mcWaitHandles and make sure there is at least one handle to wait on. */ that->mcWaitHandles = RT_MAX(idxHandle, 1); // machines lock unwinds here } else LogFlowFunc(("UPDATE: No update pending.\n")); /* reap child processes */ that->reapProcesses(); } /* for ever (well, till autoCaller fails). */ } while (0); /* Terminate subworker threads. */ ASMAtomicWriteBool(&that->mfTerminate, true); for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++) if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD) RTThreadUserSignal(that->maSubworkers[iSubworker].hThread); for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++) if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD) { vrc = RTThreadWait(that->maSubworkers[iSubworker].hThread, RT_MS_1MIN, NULL /*prc*/); if (RT_SUCCESS(vrc)) that->maSubworkers[iSubworker].hThread = NIL_RTTHREAD; else AssertLogRelMsgFailed(("RTThreadWait -> %Rrc\n", vrc)); } /* close old process handles */ that->winResetHandleArray((uint32_t)cntSpawned); /* release sets of machines if any */ machines.clear(); spawnedMachines.clear(); ::CoUninitialize(); #elif defined(RT_OS_OS2) /* according to PMREF, 64 is the maximum for the muxwait list */ SEMRECORD handles[64]; HMUX muxSem = NULLHANDLE; do { AutoCaller autoCaller(that->mVirtualBox); /* VirtualBox has been early uninitialized, terminate */ if (!autoCaller.isOk()) break; for (;;) { /* release the caller to let uninit() ever proceed */ autoCaller.release(); int vrc = RTSemEventWait(that->mUpdateReq, 500); /* Restore the caller before using VirtualBox. If it fails, this * means VirtualBox is being uninitialized and we must terminate. */ autoCaller.add(); if (!autoCaller.isOk()) break; bool update = false; bool updateSpawned = false; if (RT_SUCCESS(vrc)) { /* update event is signaled */ update = true; updateSpawned = true; } else { AssertMsg(vrc == VERR_TIMEOUT || vrc == VERR_INTERRUPTED, ("RTSemEventWait returned %Rrc\n", vrc)); /* are there any mutexes? */ if (cnt > 0) { /* figure out what's going on with machines */ unsigned long semId = 0; APIRET arc = ::DosWaitMuxWaitSem(muxSem, SEM_IMMEDIATE_RETURN, &semId); if (arc == NO_ERROR) { /* machine mutex is normally released */ Assert(semId >= 0 && semId < cnt); if (semId >= 0 && semId < cnt) { #if 0//def DEBUG { AutoReadLock machineLock(machines[semId] COMMA_LOCKVAL_SRC_POS); LogFlowFunc(("released mutex: machine='%ls'\n", machines[semId]->name().raw())); } #endif machines[semId]->i_checkForDeath(); } update = true; } else if (arc == ERROR_SEM_OWNER_DIED) { /* machine mutex is abandoned due to client process * termination; find which mutex is in the Owner Died * state */ for (size_t i = 0; i < cnt; ++i) { PID pid; TID tid; unsigned long reqCnt; arc = DosQueryMutexSem((HMTX)handles[i].hsemCur, &pid, &tid, &reqCnt); if (arc == ERROR_SEM_OWNER_DIED) { /* close the dead mutex as asked by PMREF */ ::DosCloseMutexSem((HMTX)handles[i].hsemCur); Assert(i >= 0 && i < cnt); if (i >= 0 && i < cnt) { #if 0//def DEBUG { AutoReadLock machineLock(machines[semId] COMMA_LOCKVAL_SRC_POS); LogFlowFunc(("mutex owner dead: machine='%ls'\n", machines[i]->name().raw())); } #endif machines[i]->i_checkForDeath(); } } } update = true; } else AssertMsg(arc == ERROR_INTERRUPT || arc == ERROR_TIMEOUT, ("DosWaitMuxWaitSem returned %d\n", arc)); } /* are there any spawning sessions? */ if (cntSpawned > 0) { for (size_t i = 0; i < cntSpawned; ++i) updateSpawned |= (spawnedMachines[i])-> i_checkForSpawnFailure(); } } if (update || updateSpawned) { // get reference to the machines list in VirtualBox VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList(); // lock the machines list for reading AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS); if (update) { /* close the old muxsem */ if (muxSem != NULLHANDLE) ::DosCloseMuxWaitSem(muxSem); /* obtain a new set of opened machines */ cnt = 0; machines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { /// @todo handle situations with more than 64 objects AssertMsg(cnt <= 64 /* according to PMREF */, ("maximum of 64 mutex semaphores reached (%d)", cnt)); ComObjPtr<SessionMachine> sm; if ((*it)->i_isSessionOpenOrClosing(sm)) { AutoCaller smCaller(sm); if (smCaller.isOk()) { AutoReadLock smLock(sm COMMA_LOCKVAL_SRC_POS); ClientToken *ct = sm->i_getClientToken(); if (ct) { HMTX ipcSem = ct->getToken(); machines.push_back(sm); handles[cnt].hsemCur = (HSEM)ipcSem; handles[cnt].ulUser = cnt; ++cnt; } } } } LogFlowFunc(("UPDATE: direct session count = %d\n", cnt)); if (cnt > 0) { /* create a new muxsem */ APIRET arc = ::DosCreateMuxWaitSem(NULL, &muxSem, cnt, handles, DCMW_WAIT_ANY); AssertMsg(arc == NO_ERROR, ("DosCreateMuxWaitSem returned %d\n", arc)); NOREF(arc); } } if (updateSpawned) { /* obtain a new set of spawned machines */ spawnedMachines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { if ((*it)->i_isSessionSpawning()) spawnedMachines.push_back(*it); } cntSpawned = spawnedMachines.size(); LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned)); } } /* reap child processes */ that->reapProcesses(); } /* for ever (well, till autoCaller fails). */ } while (0); /* close the muxsem */ if (muxSem != NULLHANDLE) ::DosCloseMuxWaitSem(muxSem); /* release sets of machines if any */ machines.clear(); spawnedMachines.clear(); #elif defined(VBOX_WITH_SYS_V_IPC_SESSION_WATCHER) bool update = false; bool updateSpawned = false; do { AutoCaller autoCaller(that->mVirtualBox); if (!autoCaller.isOk()) break; do { /* release the caller to let uninit() ever proceed */ autoCaller.release(); /* determine wait timeout adaptively: after updating information * relevant to the client watcher, check a few times more * frequently. This ensures good reaction time when the signalling * has to be done a bit before the actual change for technical * reasons, and saves CPU cycles when no activities are expected. */ RTMSINTERVAL cMillies; { uint8_t uOld, uNew; do { uOld = ASMAtomicUoReadU8(&that->mUpdateAdaptCtr); uNew = uOld ? uOld - 1 : uOld; } while (!ASMAtomicCmpXchgU8(&that->mUpdateAdaptCtr, uNew, uOld)); Assert(uOld <= RT_ELEMENTS(s_aUpdateTimeoutSteps) - 1); cMillies = s_aUpdateTimeoutSteps[uOld]; } int rc = RTSemEventWait(that->mUpdateReq, cMillies); /* * Restore the caller before using VirtualBox. If it fails, this * means VirtualBox is being uninitialized and we must terminate. */ autoCaller.add(); if (!autoCaller.isOk()) break; if (RT_SUCCESS(rc) || update || updateSpawned) { /* RT_SUCCESS(rc) means an update event is signaled */ // get reference to the machines list in VirtualBox VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList(); // lock the machines list for reading AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS); if (RT_SUCCESS(rc) || update) { /* obtain a new set of opened machines */ machines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { ComObjPtr<SessionMachine> sm; if ((*it)->i_isSessionOpenOrClosing(sm)) machines.push_back(sm); } cnt = machines.size(); LogFlowFunc(("UPDATE: direct session count = %d\n", cnt)); } if (RT_SUCCESS(rc) || updateSpawned) { /* obtain a new set of spawned machines */ spawnedMachines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { if ((*it)->i_isSessionSpawning()) spawnedMachines.push_back(*it); } cntSpawned = spawnedMachines.size(); LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned)); } // machines lock unwinds here } update = false; for (size_t i = 0; i < cnt; ++i) update |= (machines[i])->i_checkForDeath(); updateSpawned = false; for (size_t i = 0; i < cntSpawned; ++i) updateSpawned |= (spawnedMachines[i])->i_checkForSpawnFailure(); /* reap child processes */ that->reapProcesses(); } while (true); } while (0); /* release sets of machines if any */ machines.clear(); spawnedMachines.clear(); #elif defined(VBOX_WITH_GENERIC_SESSION_WATCHER) bool updateSpawned = false; do { AutoCaller autoCaller(that->mVirtualBox); if (!autoCaller.isOk()) break; do { /* release the caller to let uninit() ever proceed */ autoCaller.release(); /* determine wait timeout adaptively: after updating information * relevant to the client watcher, check a few times more * frequently. This ensures good reaction time when the signalling * has to be done a bit before the actual change for technical * reasons, and saves CPU cycles when no activities are expected. */ RTMSINTERVAL cMillies; { uint8_t uOld, uNew; do { uOld = ASMAtomicUoReadU8(&that->mUpdateAdaptCtr); uNew = uOld ? (uint8_t)(uOld - 1) : uOld; } while (!ASMAtomicCmpXchgU8(&that->mUpdateAdaptCtr, uNew, uOld)); Assert(uOld <= RT_ELEMENTS(s_aUpdateTimeoutSteps) - 1); cMillies = s_aUpdateTimeoutSteps[uOld]; } int rc = RTSemEventWait(that->mUpdateReq, cMillies); /* * Restore the caller before using VirtualBox. If it fails, this * means VirtualBox is being uninitialized and we must terminate. */ autoCaller.add(); if (!autoCaller.isOk()) break; /** @todo this quite big effort for catching machines in spawning * state which can't be caught by the token mechanism (as the token * can't be in the other process yet) could be eliminated if the * reaping is made smarter, having cross-reference information * from the pid to the corresponding machine object. Both cases do * more or less the same thing anyway. */ if (RT_SUCCESS(rc) || updateSpawned) { /* RT_SUCCESS(rc) means an update event is signaled */ // get reference to the machines list in VirtualBox VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList(); // lock the machines list for reading AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS); if (RT_SUCCESS(rc) || updateSpawned) { /* obtain a new set of spawned machines */ spawnedMachines.clear(); for (MachinesOList::iterator it = allMachines.begin(); it != allMachines.end(); ++it) { if ((*it)->i_isSessionSpawning()) spawnedMachines.push_back(*it); } cntSpawned = spawnedMachines.size(); LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned)); } NOREF(cnt); // machines lock unwinds here } updateSpawned = false; for (size_t i = 0; i < cntSpawned; ++i) updateSpawned |= (spawnedMachines[i])->i_checkForSpawnFailure(); /* reap child processes */ that->reapProcesses(); } while (true); } while (0); /* release sets of machines if any */ machines.clear(); spawnedMachines.clear(); #else # error "Port me!" #endif VirtualBoxBase::uninitializeComForThread(); LogFlowFuncLeave(); return 0; }