/** * Destroys an allocated page. * * @param pPage Pointer to the page to be destroyed. * @remarks This function expects page in @c pPage to be shared locked. */ static void rtR0MemObjSolPageDestroy(page_t *pPage) { /* * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails, * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as * we cannot touch any page_t members once the lock is dropped. */ AssertPtr(pPage); Assert(PAGE_LOCKED_SE(pPage, SE_SHARED)); u_offset_t offPage = pPage->p_offset; int rc = page_tryupgrade(pPage); if (!rc) { page_unlock(pPage); page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL); /* * Since we allocated the pages as PG_NORELOC we should only get back the exact page always. */ AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n", &g_PageVnode, offPage, pFoundPage, pPage)); } Assert(PAGE_LOCKED_SE(pPage, SE_EXCL)); page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */); page_destroy(pPage, 0 /* move it to the free list */); }
RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx) { RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx; RTNATIVETHREAD hSelf = RTThreadNativeSelf(); uint32_t cLockers; RTSEMSPINMUTEXSTATE State; bool fRc; Assert(hSelf != NIL_RTNATIVETHREAD); RTSEMSPINMUTEX_VALIDATE_RETURN(pThis); /* * Get the saved state and try release the semaphore. */ State = pThis->SavedState; ASMCompilerBarrier(); ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc); AssertMsgReturn(fRc, ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers), VERR_NOT_OWNER); cLockers = ASMAtomicDecS32(&pThis->cLockers); rtSemSpinMutexLeave(&State); if (cLockers > 0) { int rc = RTSemEventSignal(pThis->hEventSem); AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc)); } return VINF_SUCCESS; }
/** * Returns the physical address for a page. * * @param pPage Pointer to the page. * * @returns The physical address for a page. */ static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage) { AssertPtr(pPage); pfn_t PageFrameNum = page_pptonum(pPage); AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n")); return (uint64_t)PageFrameNum << PAGE_SHIFT; }
/** * \#DB (Debug event) handler for the hypervisor code. * * This is mostly the same as TRPMGCTrap01Handler, but we skip the PGM auto * mapping set as well as the default trap exit path since they are both really * bad ideas in this context. * * @returns VBox status code. * VINF_SUCCESS means we completely handled this trap, * other codes are passed execution to host context. * * @param pTrpmCpu Pointer to TRPMCPU data (within VM). * @param pRegFrame Pointer to the register frame for the trap. * @internal */ DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) { RTGCUINTREG uDr6 = ASMGetAndClearDR6(); PVM pVM = TRPMCPU_2_VM(pTrpmCpu); PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); LogFlow(("TRPMGCHyper01: cs:eip=%04x:%08x uDr6=%RTreg\n", pRegFrame->cs.Sel, pRegFrame->eip, uDr6)); /* * We currently don't make use of the X86_DR7_GD bit, but * there might come a time when we do. */ AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD, ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n", ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6), VERR_NOT_IMPLEMENTED); AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n")); /* * Now leave the rest to the DBGF. */ int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_TRPM_IPE_1); Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip, uDr6)); return rc; }
/** * Notification wrapper that updates CPU states and invokes our notification * callbacks. * * @param idCpu The CPU Id. * @param pvUser1 Pointer to the notifier_block (unused). * @param pvUser2 The notification event. * @remarks This can be invoked in interrupt context. */ static DECLCALLBACK(void) rtMpNotificationLinuxOnCurrentCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) { unsigned long ulNativeEvent = *(unsigned long *)pvUser2; NOREF(pvUser1); AssertRelease(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); AssertReleaseMsg(idCpu == RTMpCpuId(), /* ASSUMES iCpu == RTCPUID */ ("idCpu=%u RTMpCpuId=%d ApicId=%d\n", idCpu, RTMpCpuId(), ASMGetApicId() )); switch (ulNativeEvent) { # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); break; # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); break; # endif } }
/** * Load virtualized flags. * * This function is called from CPUMRawEnter(). It doesn't have to update the * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively. * * @param pVM Pointer to the VM. * @param pCtxCore The cpu context core. * @see pg_raw */ VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore) { bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); Assert(!HMIsEnabled(pVM)); /* * Currently we don't bother to check whether PATM is enabled or not. * For all cases where it isn't, IOPL will be safe and IF will be set. */ register uint32_t efl = pCtxCore->eflags.u32; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK; AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)); AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip)); efl &= ~PATM_VIRTUAL_FLAGS_MASK; efl |= X86_EFL_IF; pCtxCore->eflags.u32 = efl; #ifdef IN_RING3 #ifdef PATM_EMULATE_SYSENTER PCPUMCTX pCtx; /* Check if the sysenter handler has changed. */ pCtx = CPUMQueryGuestCtxPtr(pVM); if ( pCtx->SysEnter.cs != 0 && pCtx->SysEnter.eip != 0 ) { if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip) { pVM->patm.s.pfnSysEnterPatchGC = 0; pVM->patm.s.pfnSysEnterGC = 0; Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip)); pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip); if (pVM->patm.s.pfnSysEnterPatchGC == 0) { rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32); if (rc == VINF_SUCCESS) { pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip); pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip; Assert(pVM->patm.s.pfnSysEnterPatchGC); } } else pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip; } } else { pVM->patm.s.pfnSysEnterPatchGC = 0; pVM->patm.s.pfnSysEnterGC = 0; } #endif #endif }
/** * Register the main drivers. * * @returns VBox status code. * @param pCallbacks Pointer to the callback table. * @param u32Version VBox version number. */ extern "C" DECLEXPORT(int) VBoxDriversRegister(PCPDMDRVREGCB pCallbacks, uint32_t u32Version) { LogFlow(("VBoxDriversRegister: u32Version=%#x\n", u32Version)); AssertReleaseMsg(u32Version == VBOX_VERSION, ("u32Version=%#x VBOX_VERSION=%#x\n", u32Version, VBOX_VERSION)); int rc = pCallbacks->pfnRegister(pCallbacks, &Mouse::DrvReg); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &Keyboard::DrvReg); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &Display::DrvReg); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &VMMDev::DrvReg); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_PDM_AUDIO_DRIVER rc = pCallbacks->pfnRegister(pCallbacks, &AudioVRDE::DrvReg); #else rc = pCallbacks->pfnRegister(pCallbacks, &AudioSniffer::DrvReg); #endif if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &Nvram::DrvReg); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &EmWebcam::DrvReg); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_USB_CARDREADER rc = pCallbacks->pfnRegister(pCallbacks, &UsbCardReader::DrvReg); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &Console::DrvStatusReg); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_PCI_PASSTHROUGH rc = pCallbacks->pfnRegister(pCallbacks, &PCIRawDev::DrvReg); if (RT_FAILURE(rc)) return rc; #endif return VINF_SUCCESS; }
/** * Insert the per thread data structure into the tree. * * This can be called from both the thread it self and the parent, * thus it must handle insertion failures in a nice manner. * * @param pThread Pointer to thread structure allocated by rtThreadAlloc(). * @param NativeThread The native thread id. */ DECLHIDDEN(void) rtThreadInsert(PRTTHREADINT pThread, RTNATIVETHREAD NativeThread) { Assert(pThread); Assert(pThread->u32Magic == RTTHREADINT_MAGIC); { RT_THREAD_LOCK_RW(); /* * Do not insert a terminated thread. * * This may happen if the thread finishes before the RTThreadCreate call * gets this far. Since the OS may quickly reuse the native thread ID * it should not be reinserted at this point. */ if (rtThreadGetState(pThread) != RTTHREADSTATE_TERMINATED) { /* * Before inserting we must check if there is a thread with this id * in the tree already. We're racing parent and child on insert here * so that the handle is valid in both ends when they return / start. * * If it's not ourself we find, it's a dead alien thread and we will * unlink it from the tree. Alien threads will be released at this point. */ PRTTHREADINT pThreadOther = (PRTTHREADINT)RTAvlPVGet(&g_ThreadTree, (void *)NativeThread); if (pThreadOther != pThread) { bool fRc; /* remove dead alien if any */ if (pThreadOther) { AssertMsg(pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN, ("%p:%s; %p:%s\n", pThread, pThread->szName, pThreadOther, pThreadOther->szName)); ASMAtomicBitClear(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE_BIT); rtThreadRemoveLocked(pThreadOther); if (pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN) rtThreadRelease(pThreadOther); } /* insert the thread */ ASMAtomicWritePtr(&pThread->Core.Key, (void *)NativeThread); fRc = RTAvlPVInsert(&g_ThreadTree, &pThread->Core); ASMAtomicOrU32(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE); if (fRc) ASMAtomicIncU32(&g_cThreadInTree); AssertReleaseMsg(fRc, ("Lock problem? %p (%RTnthrd) %s\n", pThread, NativeThread, pThread->szName)); NOREF(fRc); } } RT_THREAD_UNLOCK_RW(); } }
/** * Wrapper which unpacks the params and calls thread function. */ static void *rtThreadNativeMain(void *pvArgs) { PRTTHREADINT pThread = (PRTTHREADINT)pvArgs; pthread_t Self = pthread_self(); Assert((uintptr_t)Self == (RTNATIVETHREAD)Self && (uintptr_t)Self != NIL_RTNATIVETHREAD); #if defined(RT_OS_LINUX) /* * Set the TID. */ pThread->tid = syscall(__NR_gettid); ASMMemoryFence(); #endif /* * Block SIGALRM - required for timer-posix.cpp. * This is done to limit harm done by OSes which doesn't do special SIGALRM scheduling. * It will not help much if someone creates threads directly using pthread_create. :/ */ sigset_t SigSet; sigemptyset(&SigSet); sigaddset(&SigSet, SIGALRM); sigprocmask(SIG_BLOCK, &SigSet, NULL); #ifdef RTTHREAD_POSIX_WITH_POKE if (g_iSigPokeThread != -1) siginterrupt(g_iSigPokeThread, 1); #endif /* * Set the TLS entry and, if possible, the thread name. */ int rc = pthread_setspecific(g_SelfKey, pThread); AssertReleaseMsg(!rc, ("failed to set self TLS. rc=%d thread '%s'\n", rc, pThread->szName)); #ifdef IPRT_MAY_HAVE_PTHREAD_SET_NAME_NP if (g_pfnThreadSetName) # ifdef RT_OS_DARWIN g_pfnThreadSetName(pThread->szName); # else g_pfnThreadSetName(Self, pThread->szName); # endif #endif /* * Call common main. */ rc = rtThreadMain(pThread, (uintptr_t)Self, &pThread->szName[0]); pthread_setspecific(g_SelfKey, NULL); pthread_exit((void *)(intptr_t)rc); return (void *)(intptr_t)rc; }
/** * Register builtin devices. * * @returns VBox status code. * @param pCallbacks Pointer to the callback table. * @param u32Version VBox version number. */ extern "C" DECLEXPORT(int) VBoxDevicesRegister(PPDMDEVREGCB pCallbacks, uint32_t u32Version) { LogFlow(("VBoxDevicesRegister: u32Version=%#x\n", u32Version)); AssertReleaseMsg(u32Version == VBOX_VERSION, ("u32Version=%#x VBOX_VERSION=%#x\n", u32Version, VBOX_VERSION)); int rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceAPIC); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceIOAPIC); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceLPC); if (RT_FAILURE(rc)) return rc; return VINF_SUCCESS; }
RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect) { /* * Assert sanity and check for NOP. */ Assert(pCritSect); Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC); if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; /* * Assert ownership and so on. */ Assert(pCritSect->cNestings > 0); Assert(pCritSect->cLockers >= 0); Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf()); #ifdef RTCRITSECT_STRICT int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1); if (RT_FAILURE(rc9)) return rc9; #endif /* * Decrement nestings, if <= 0 when we'll release the critsec. */ pCritSect->cNestings--; if (pCritSect->cNestings > 0) ASMAtomicDecS32(&pCritSect->cLockers); else { /* * Set owner to zero. * Decrement waiters, if >= 0 then we have to wake one of them up. */ ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD); if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0) { int rc = RTSemEventSignal(pCritSect->EventSem); AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc)); } } return VINF_SUCCESS; }
/** * Returns the physical address for a virtual address. * * @param pv The virtual address. * * @returns The physical address corresponding to @a pv. */ static uint64_t rtR0MemObjSolVirtToPhys(void *pv) { struct hat *pHat = NULL; pfn_t PageFrameNum = 0; uintptr_t uVirtAddr = (uintptr_t)pv; if (SOL_IS_KRNL_ADDR(pv)) pHat = kas.a_hat; else { proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf(); AssertRelease(pProcess); pHat = pProcess->p_as->a_hat; } PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK)); AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv)); return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK)); }
/** @copydoc RTRANDINT::pfnGetBytes */ static DECLCALLBACK(void) rtRandAdvPosixGetBytes(PRTRANDINT pThis, uint8_t *pb, size_t cb) { ssize_t cbRead = read(pThis->u.File.hFile, pb, cb); if ((size_t)cbRead != cb) { ssize_t cTries = RT_MIN(cb, 256); do { if (cbRead > 0) { cb -= cbRead; pb += cbRead; } cbRead = read(pThis->u.File.hFile, pb, cb); } while ( (size_t)cbRead != cb && cTries-- > 0); AssertReleaseMsg((size_t)cbRead == cb, ("%zu != %zu, cTries=%zd errno=%d\n", cbRead, cb, cTries, errno)); } }
/** @interface_method_impl{PDMDEVHLPR0,pfnPCIPhysRead} */ static DECLCALLBACK(int) pdmR0DevHlp_PCIPhysWrite(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite) { PDMDEV_ASSERT_DEVINS(pDevIns); #ifndef PDM_DO_NOT_RESPECT_PCI_BM_BIT /* * Just check the busmaster setting here and forward the request to the generic read helper. */ PPCIDEVICE pPciDev = pDevIns->Internal.s.pPciDeviceR0; AssertReleaseMsg(pPciDev, ("No PCI device registered!\n")); if (!PCIDevIsBusmaster(pPciDev)) { Log(("pdmRCDevHlp_PCIPhysWrite: caller=%p/%d: returns %Rrc - Not bus master! GCPhys=%RGp cbWrite=%#zx\n", pDevIns, pDevIns->iInstance, VERR_PDM_NOT_PCI_BUS_MASTER, GCPhys, cbWrite)); return VERR_PDM_NOT_PCI_BUS_MASTER; } #endif return pDevIns->pHlpR0->pfnPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); }
/** @copydoc RTRANDINT::pfnGetBytes */ static DECLCALLBACK(void) rtRandAdvPosixGetBytes(PRTRANDINT pThis, uint8_t *pb, size_t cb) { ssize_t cbRead = read(pThis->u.File.hFile, pb, cb); if ((size_t)cbRead != cb) { /* S10 has been observed returning 1040 bytes at the time from /dev/urandom. Which means we need to do than 256 rounds to reach 668171 bytes if that's what demanded by the caller (like tstRTMemWipe.cpp). */ ssize_t cTries = RT_MAX(256, cb / 64); do { if (cbRead > 0) { cb -= cbRead; pb += cbRead; } cbRead = read(pThis->u.File.hFile, pb, cb); } while ( (size_t)cbRead != cb && cTries-- > 0); AssertReleaseMsg((size_t)cbRead == cb, ("%zu != %zu, cTries=%zd errno=%d\n", cbRead, cb, cTries, errno)); } }
/** * Transforms the guest CPU state to raw-ring mode. * * This function will change the any of the cs and ss register with DPL=0 to DPL=1. * * Used by emInterpretIret() after the new state has been loaded. * * @param pVCpu Pointer to the VMCPU. * @param pCtxCore The context core (for trap usage). * @see @ref pg_raw * @remarks Will be probably obsoleted by #5653 (it will leave and reenter raw * mode instead, I think). */ VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) { /* * Are we in Ring-0? */ if ( pCtxCore->ss.Sel && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0 && !pCtxCore->eflags.Bits.u1VM) { /* * Set CPL to Ring-1. */ pCtxCore->ss.Sel |= 1; if ( pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0) pCtxCore->cs.Sel |= 1; } else { if ( EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)) && !pCtxCore->eflags.Bits.u1VM && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1) { /* Set CPL to Ring-2. */ pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2; if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1) pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2; } } /* * Assert sanity. */ AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL)); pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */ }
Qt::DropAction UIDnDHandler::dragDrop(ulong screenID, int x, int y, Qt::DropAction proposedAction, Qt::DropActions possibleActions, const QMimeData *pMimeData) { LogFlowFunc(("enmOpMode=%RU32, screenID=%RU32, x=%d, y=%d, action=%ld\n", m_enmOpMode, screenID, x, y, toVBoxDnDAction(proposedAction))); if (m_enmOpMode != DNDMODE_HOSTTOGUEST) return Qt::IgnoreAction; /* The format the guest requests. */ QString strFormat; /* Ask the guest for dropping data. */ KDnDAction enmResult = m_dndTarget.Drop(screenID, x, y, toVBoxDnDAction(proposedAction), toVBoxDnDActions(possibleActions), pMimeData->formats().toVector(), strFormat); /* Has the guest accepted the drop event? */ if ( m_dndTarget.isOk() && enmResult != KDnDAction_Ignore) { LogFlowFunc(("strFormat=%s ...\n", strFormat.toUtf8().constData())); QByteArray arrBytes; /* * Does the host support the format requested by the guest? * Lookup the format in the MIME data object. */ AssertPtr(pMimeData); if (pMimeData->formats().indexOf(strFormat) >= 0) { arrBytes = pMimeData->data(strFormat); Assert(!arrBytes.isEmpty()); } /* * The host does not support the format requested by the guest. * This can happen if the host wants to send plan text, for example, but * the guest requested something else, e.g. an URI list. * * In that case dictate the guest to use a fixed format from the host, * so instead returning the requested URI list, return the original * data format from the host. The guest has to try to deal with that then. **/ else { LogRel3(("DnD: Guest requested a different format '%s'\n", strFormat.toUtf8().constData())); LogRel3(("DnD: The host offered:\n")); #if 0 for (QStringList::iterator itFmt = pMimeData->formats().begin(); itFmt != pMimeData->formats().end(); itFmt++) { QString strTemp = *itFmt; LogRel3(("DnD: \t%s\n", strTemp.toUtf8().constData())); } #endif if (pMimeData->hasText()) { LogRel3(("DnD: Converting data to text ...\n")); arrBytes = pMimeData->text().toUtf8(); strFormat = "text/plain;charset=utf-8"; } else { LogRel(("DnD: Error: Could not convert host format to guest format\n")); enmResult = KDnDAction_Ignore; } } if (arrBytes.size()) /* Anything to send? */ { /* Convert data to a vector. */ QVector<uint8_t> vecData(arrBytes.size()); /** @todo Can this throw or anything? */ AssertReleaseMsg(vecData.size() == arrBytes.size(), ("Drag and drop format buffer size does not match")); memcpy(vecData.data(), arrBytes.constData(), arrBytes.size()); /* Send data to the guest. */ LogRel3(("DnD: Host is sending %d bytes of data as '%s'\n", vecData.size(), strFormat.toUtf8().constData())); CProgress progress = m_dndTarget.SendData(screenID, strFormat, vecData); if (m_dndTarget.isOk()) { msgCenter().showModalProgressDialog(progress, tr("Dropping data ..."), ":/progress_dnd_hg_90px.png", m_pParent); LogFlowFunc(("Transfer fCompleted=%RTbool, fCanceled=%RTbool, hr=%Rhrc\n", progress.GetCompleted(), progress.GetCanceled(), progress.GetResultCode())); BOOL fCanceled = progress.GetCanceled(); if ( !fCanceled && ( !progress.isOk() || progress.GetResultCode() != 0)) { msgCenter().cannotDropDataToGuest(progress, m_pParent); enmResult = KDnDAction_Ignore; } } else { msgCenter().cannotDropDataToGuest(m_dndTarget, m_pParent); enmResult = KDnDAction_Ignore; } } else /* Error. */ enmResult = KDnDAction_Ignore; } /* * Since the mouse button has been release this in any case marks * the end of the current transfer direction. So reset the current * mode as well here. */ setOpMode(DNDMODE_UNKNOWN); return toQtDnDAction(enmResult); }
/** * Restores virtualized flags. * * This function is called from CPUMRawLeave(). It will update the eflags register. * ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!! * * @param pVM Pointer to the VM. * @param pCtxCore The cpu context core. * @param rawRC Raw mode return code * @see @ref pg_raw */ VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) { bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); /* * We will only be called if PATMRawEnter was previously called. */ register uint32_t efl = pCtxCore->eflags.u32; efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK); pCtxCore->eflags.u32 = efl; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF; AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC)); AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC)); #ifdef IN_RING3 if ( (efl & X86_EFL_IF) && fPatchCode ) { if ( rawRC < VINF_PATM_LEAVE_RC_FIRST || rawRC > VINF_PATM_LEAVE_RC_LAST) { /* * Golden rules: * - Don't interrupt special patch streams that replace special instructions * - Don't break instruction fusing (sti, pop ss, mov ss) * - Don't go back to an instruction that has been overwritten by a patch jump * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect * */ if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */ { PATMTRANSSTATE enmState; RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState); AssertRelease(pOrgInstrGC); Assert(enmState != PATMTRANS_OVERWRITTEN); if (enmState == PATMTRANS_SAFE) { Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC)); Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack); pCtxCore->eip = pOrgInstrGC; fPatchCode = false; /* to reset the stack ptr */ CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */ } else { LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } else { LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } } #else /* !IN_RING3 */ AssertMsgFailed(("!IN_RING3")); #endif /* !IN_RING3 */ if (!fPatchCode) { if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip) { EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip); } CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* Reset the stack pointer to the top of the stack. */ #ifdef DEBUG if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE) { LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp)); } #endif CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE; } }
/** * Free memory allocated using MMHyperAlloc(). * The caller validates the parameters of this request. * * @returns VBox status code. * @param pVM The VM to operate on. * @param pv The memory to free. * @remark Try avoid free hyper memory. */ static int mmHyperFreeInternal(PVM pVM, void *pv) { Log2(("MMHyperFree: pv=%p\n", pv)); if (!pv) return VINF_SUCCESS; AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv, ("Invalid pointer %p!\n", pv), VERR_INVALID_POINTER); /* * Get the heap and stats. * Validate the chunk at the same time. */ PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1); AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext, ("%p: offNext=%#RX32\n", pv, pChunk->offNext), VERR_INVALID_POINTER); AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk), ("%p: Not used!\n", pv), VERR_INVALID_POINTER); int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk); AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("%p: offPrev=%#RX32!\n", pv, offPrev), VERR_INVALID_POINTER); /* statistics */ #ifdef VBOX_WITH_STATISTICS PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat); AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat && pChunk->offStat, ("%p: offStat=%#RX32!\n", pv, pChunk->offStat), VERR_INVALID_POINTER); #else AssertMsgReturn(!pChunk->offStat, ("%p: offStat=%#RX32!\n", pv, pChunk->offStat), VERR_INVALID_POINTER); #endif /* The heap structure. */ PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap); AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK) && pChunk->offHeap, ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap), VERR_INVALID_POINTER); AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC, ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic), VERR_INVALID_POINTER); Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap)); /* Some more verifications using additional info from pHeap. */ AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap), ("%p: offPrev=%#RX32!\n", pv, offPrev), VERR_INVALID_POINTER); AssertMsgReturn(pChunk->offNext < pHeap->cbHeap, ("%p: offNext=%#RX32!\n", pv, pChunk->offNext), VERR_INVALID_POINTER); AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned, ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap), (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned), VERR_INVALID_POINTER); #ifdef MMHYPER_HEAP_STRICT mmHyperHeapCheck(pHeap); #endif #if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON) /* calc block size. */ const uint32_t cbChunk = pChunk->offNext ? pChunk->offNext : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk; #endif #ifdef MMHYPER_HEAP_FREE_POISON /* poison the block */ memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk)); #endif #ifdef MMHYPER_HEAP_FREE_DELAY # ifdef MMHYPER_HEAP_FREE_POISON /* * Check poison. */ unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees); while (i-- > 0) if (pHeap->aDelayedFrees[i].offChunk) { PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk); const size_t cb = pCur->offNext ? pCur->offNext - sizeof(*pCur) : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur); uint8_t *pab = (uint8_t *)(pCur + 1); for (unsigned off = 0; off < cb; off++) AssertReleaseMsg(pab[off] == 0xCB, ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n", pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off])); } # endif /* MMHYPER_HEAP_FREE_POISON */ /* * Delayed freeing. */ int rc = VINF_SUCCESS; if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk) { PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk); rc = mmHyperFree(pHeap, pChunkFree); } pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap; pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress(); pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees); #else /* !MMHYPER_HEAP_FREE_POISON */ /* * Call the worker. */ int rc = mmHyperFree(pHeap, pChunk); #endif /* !MMHYPER_HEAP_FREE_POISON */ /* * Update statistics. */ #ifdef VBOX_WITH_STATISTICS pStat->cFrees++; if (RT_SUCCESS(rc)) { pStat->cbFreed += cbChunk; pStat->cbCurAllocated -= cbChunk; } else pStat->cFailures++; #endif return rc; }
/** * Register builtin drivers. * * @returns VBox status code. * @param pCallbacks Pointer to the callback table. * @param u32Version VBox version number. */ extern "C" DECLEXPORT(int) VBoxDriversRegister(PCPDMDRVREGCB pCallbacks, uint32_t u32Version) { LogFlow(("VBoxDriversRegister: u32Version=%#x\n", u32Version)); AssertReleaseMsg(u32Version == VBOX_VERSION, ("u32Version=%#x VBOX_VERSION=%#x\n", u32Version, VBOX_VERSION)); int rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvMouseQueue); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvKeyboardQueue); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvVD); if (RT_FAILURE(rc)) return rc; #if defined(RT_OS_DARWIN) || defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS) || defined(RT_OS_FREEBSD) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostDVD); if (RT_FAILURE(rc)) return rc; #endif #if defined(RT_OS_LINUX) || defined(RT_OS_WINDOWS) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostFloppy); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvNAT); if (RT_FAILURE(rc)) return rc; #if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostInterface); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_UDPTUNNEL rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvUDPTunnel); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_VDE rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvVDE); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvIntNet); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvDedicatedNic); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvNetSniffer); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_NETSHAPER rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvNetShaper); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvAUDIO); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_AUDIO_DEBUG rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostDebugAudio); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_AUDIO_VALIDATIONKIT rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostValidationKitAudio); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostNullAudio); if (RT_FAILURE(rc)) return rc; #if defined(RT_OS_WINDOWS) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostDSound); if (RT_FAILURE(rc)) return rc; #endif #if defined(RT_OS_DARWIN) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostCoreAudio); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_AUDIO_ALSA rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostALSAAudio); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_AUDIO_OSS rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostOSSAudio); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_AUDIO_PULSE rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostPulseAudio); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvACPI); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvAcpiCpu); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_VUSB rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvVUSBRootHub); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_USB_VIDEO_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostWebcam); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvNamedPipe); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvTCP); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvUDP); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvRawFile); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvChar); if (RT_FAILURE(rc)) return rc; #if defined(RT_OS_LINUX) || defined(VBOX_WITH_WIN_PARPORT_SUP) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostParallel); if (RT_FAILURE(rc)) return rc; #endif #if defined(RT_OS_DARWIN) || defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS) || defined(RT_OS_FREEBSD) rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvHostSerial); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_SCSI rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvSCSI); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_DRV_DISK_INTEGRITY rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvDiskIntegrity); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvRamDisk); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_PCI_PASSTHROUGH_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DrvPciRaw); if (RT_FAILURE(rc)) return rc; #endif return VINF_SUCCESS; }
/** * Register builtin devices. * * @returns VBox status code. * @param pCallbacks Pointer to the callback table. * @param u32Version VBox version number. */ extern "C" DECLEXPORT(int) VBoxDevicesRegister(PPDMDEVREGCB pCallbacks, uint32_t u32Version) { LogFlow(("VBoxDevicesRegister: u32Version=%#x\n", u32Version)); AssertReleaseMsg(u32Version == VBOX_VERSION, ("u32Version=%#x VBOX_VERSION=%#x\n", u32Version, VBOX_VERSION)); int rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePCI); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePciIch9); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePcArch); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePcBios); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceIOAPIC); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePS2KeyboardMouse); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePIIX3IDE); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceI8254); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceI8259); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceHPET); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceSmc); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceFlash); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_EFI rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceEFI); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceMC146818); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceVga); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceVMMDev); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePCNet); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_E1000 rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceE1000); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_VIRTIO rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceVirtioNet); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_INIP rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceINIP); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceICHAC97); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceSB16); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceHDA); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_VUSB rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceOHCI); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_EHCI_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceEHCI); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_XHCI_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceXHCI); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_ACPI rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceACPI); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceDMA); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceFloppyController); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceSerialPort); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceOxPcie958); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceParallelPort); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_AHCI rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceAHCI); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_BUSLOGIC rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceBusLogic); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePCIBridge); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePciIch9Bridge); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_LSILOGIC rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceLsiLogicSCSI); if (RT_FAILURE(rc)) return rc; rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceLsiLogicSAS); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_NVME_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceNVMe); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_PCI_PASSTHROUGH_IMPL rc = pCallbacks->pfnRegister(pCallbacks, &g_DevicePciRaw); if (RT_FAILURE(rc)) return rc; #endif rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceGIMDev); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_NEW_LPC_DEVICE rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceLPC); if (RT_FAILURE(rc)) return rc; #endif #ifdef VBOX_WITH_VIRTUALKD rc = pCallbacks->pfnRegister(pCallbacks, &g_DeviceVirtualKD); if (RT_FAILURE(rc)) return rc; #endif return VINF_SUCCESS; }
/** * The emulation thread main function, with Virtual CPU ID for debugging. * * @returns Thread exit code. * @param ThreadSelf The handle to the executing thread. * @param pUVCpu Pointer to the user mode per-VCpu structure. * @param idCpu The virtual CPU ID, for backtrace purposes. */ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu) { PUVM pUVM = pUVCpu->pUVM; int rc; AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC, ("Invalid arguments to the emulation thread!\n")); rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu); AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc); if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtInit) pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu); /* * The request loop. */ rc = VINF_SUCCESS; Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM)); VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */ for (;;) { /* * During early init there is no pVM, so make a special path * for that to keep things clearly separate. */ if (!pUVM->pVM) { /* * Check for termination first. */ if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } /* * Only the first VCPU may initialize the VM during early init * and must therefore service all VMCPUID_ANY requests. * See also VMR3Create */ if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) && pUVCpu->idCpu == 0) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } } else { /* * Pending requests which needs servicing? * * We check for state changes in addition to status codes when * servicing requests. (Look after the ifs.) */ PVM pVM = pUVM->pVM; enmBefore = pVM->enmVMState; if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS)) { rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]); Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_ISSET(pVM, VM_FF_DBGF)) { /* * Service the debugger request. */ rc = DBGFR3VMMForcedAction(pVM); Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET)) { /* * Service a delayed reset request. */ rc = VMR3Reset(pVM); VM_FF_CLEAR(pVM, VM_FF_RESET); Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } /* * Check for termination requests, these have extremely high priority. */ if ( rc == VINF_EM_TERMINATE || pUVM->vm.s.fTerminateEMT) break; } /* * Some requests (both VMR3Req* and the DBGF) can potentially resume * or start the VM, in that case we'll get a change in VM status * indicating that we're now running. */ if ( RT_SUCCESS(rc) && pUVM->pVM) { PVM pVM = pUVM->pVM; PVMCPU pVCpu = &pVM->aCpus[idCpu]; if ( pVM->enmVMState == VMSTATE_RUNNING && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu))) { rc = EMR3ExecuteVM(pVM, pVCpu); Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState)); if (EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION) vmR3SetGuruMeditation(pVM); } } } /* forever */ /* * Cleanup and exit. */ Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n", ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED)); if ( idCpu == 0 && pUVM->pVM) { PVM pVM = pUVM->pVM; vmR3SetTerminated(pVM); pUVM->pVM = NULL; /** @todo SMP: This isn't 100% safe. We should wait for the other * threads to finish before destroy the VM. */ int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL); AssertLogRelRC(rc2); } if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm) pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu); pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD; Log(("vmR3EmulationThread: EMT is terminated.\n")); return rc; }