/** * Create a queue with a device owner. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pDevIns Device instance. * @param cbItem Size a queue item. * @param cItems Number of items in the queue. * @param cMilliesInterval Number of milliseconds between polling the queue. * If 0 then the emulation thread will be notified whenever an item arrives. * @param pfnCallback The consumer function. * @param fRZEnabled Set if the queue must be usable from RC/R0. * @param pszName The queue name. Unique. Not copied. * @param ppQueue Where to store the queue handle on success. * @thread Emulation thread only. */ VMMR3_INT_DECL(int) PDMR3QueueCreateDevice(PVM pVM, PPDMDEVINS pDevIns, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, PFNPDMQUEUEDEV pfnCallback, bool fRZEnabled, const char *pszName, PPDMQUEUE *ppQueue) { LogFlow(("PDMR3QueueCreateDevice: pDevIns=%p cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p fRZEnabled=%RTbool pszName=%s\n", pDevIns, cbItem, cItems, cMilliesInterval, pfnCallback, fRZEnabled, pszName)); /* * Validate input. */ VMCPU_ASSERT_EMT(&pVM->aCpus[0]); if (!pfnCallback) { AssertMsgFailed(("No consumer callback!\n")); return VERR_INVALID_PARAMETER; } /* * Create the queue. */ PPDMQUEUE pQueue; int rc = pdmR3QueueCreate(pVM, cbItem, cItems, cMilliesInterval, fRZEnabled, pszName, &pQueue); if (RT_SUCCESS(rc)) { pQueue->enmType = PDMQUEUETYPE_DEV; pQueue->u.Dev.pDevIns = pDevIns; pQueue->u.Dev.pfnCallback = pfnCallback; *ppQueue = pQueue; Log(("PDM: Created device queue %p; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pDevIns=%p\n", cbItem, cItems, cMilliesInterval, pfnCallback, pDevIns)); } return rc; }
/** * Create a queue with a driver owner. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pDrvIns Driver instance. * @param cbItem Size a queue item. * @param cItems Number of items in the queue. * @param cMilliesInterval Number of milliseconds between polling the queue. * If 0 then the emulation thread will be notified whenever an item arrives. * @param pfnCallback The consumer function. * @param pszName The queue name. Unique. Not copied. * @param ppQueue Where to store the queue handle on success. * @thread Emulation thread only. */ VMMR3_INT_DECL(int) PDMR3QueueCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, PFNPDMQUEUEDRV pfnCallback, const char *pszName, PPDMQUEUE *ppQueue) { LogFlow(("PDMR3QueueCreateDriver: pDrvIns=%p cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p pszName=%s\n", pDrvIns, cbItem, cItems, cMilliesInterval, pfnCallback, pszName)); /* * Validate input. */ VMCPU_ASSERT_EMT(&pVM->aCpus[0]); AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER); /* * Create the queue. */ PPDMQUEUE pQueue; int rc = pdmR3QueueCreate(pVM, cbItem, cItems, cMilliesInterval, false, pszName, &pQueue); if (RT_SUCCESS(rc)) { pQueue->enmType = PDMQUEUETYPE_DRV; pQueue->u.Drv.pDrvIns = pDrvIns; pQueue->u.Drv.pfnCallback = pfnCallback; *ppQueue = pQueue; Log(("PDM: Created driver queue %p; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pDrvIns=%p\n", cbItem, cItems, cMilliesInterval, pfnCallback, pDrvIns)); } return rc; }
/** * Disables all host calls, except certain fatal ones. * * @param pVCpu The cross context virtual CPU structure of the calling EMT. * @thread EMT. */ VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu) { VMCPU_ASSERT_EMT(pVCpu); #if defined(LOG_ENABLED) && defined(IN_RING0) RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */ #endif Assert(pVCpu->vmm.s.cCallRing3Disabled < 16); if (ASMAtomicUoIncU32(&pVCpu->vmm.s.cCallRing3Disabled) == 1) { /** @todo it might make more sense to just disable logging here, then we * won't flush away important bits... but that goes both ways really. */ #ifdef IN_RC pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = true; #else # ifdef LOG_ENABLED if (pVCpu->vmm.s.pR0LoggerR0) pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true; # endif #endif } #if defined(LOG_ENABLED) && defined(IN_RING0) ASMSetFlags(fFlags); #endif }
/** * Disassembles the instruction at RIP and if it's a hypercall * instruction, performs the hypercall. * * @param pVCpu The cross context virtual CPU structure. * @param pCtx Pointer to the guest-CPU context. * @param pcbInstr Where to store the disassembled instruction length. * Optional, can be NULL. * * @todo This interface should disappear when IEM/REM execution engines * handle VMCALL/VMMCALL instructions to call into GIM when * required. See @bugref{7270#c168}. */ VMM_INT_DECL(VBOXSTRICTRC) GIMExecHypercallInstr(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t *pcbInstr) { PVM pVM = pVCpu->CTX_SUFF(pVM); VMCPU_ASSERT_EMT(pVCpu); if (RT_UNLIKELY(!GIMIsEnabled(pVM))) return VERR_GIM_NOT_ENABLED; unsigned cbInstr; DISCPUSTATE Dis; int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr); if (RT_SUCCESS(rc)) { if (pcbInstr) *pcbInstr = (uint8_t)cbInstr; switch (pVM->gim.s.enmProviderId) { case GIMPROVIDERID_HYPERV: return gimHvExecHypercallInstr(pVCpu, pCtx, &Dis); case GIMPROVIDERID_KVM: return gimKvmExecHypercallInstr(pVCpu, pCtx, &Dis); default: AssertMsgFailed(("GIMExecHypercallInstr: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId)); return VERR_GIM_HYPERCALLS_NOT_AVAILABLE; } } Log(("GIM: GIMExecHypercallInstr: Failed to disassemble CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc)); return rc; }
/** * Call this to single step programmatically. * * You must pass down the return code to the EM loop! That's * where the actual single stepping take place (at least in the * current implementation). * * @returns VINF_EM_DBG_STEP * * @param pVCpu Pointer to the VMCPU. * * @thread VCpu EMT */ VMMR3DECL(int) DBGFR3PrgStep(PVMCPU pVCpu) { VMCPU_ASSERT_EMT(pVCpu); pVCpu->dbgf.s.fSingleSteppingRaw = true; return VINF_EM_DBG_STEP; }
/** * Create a queue with an external owner. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param cbItem Size a queue item. * @param cItems Number of items in the queue. * @param cMilliesInterval Number of milliseconds between polling the queue. * If 0 then the emulation thread will be notified whenever an item arrives. * @param pfnCallback The consumer function. * @param pvUser The user argument to the consumer function. * @param pszName The queue name. Unique. Not copied. * @param ppQueue Where to store the queue handle on success. * @thread Emulation thread only. */ VMMR3_INT_DECL(int) PDMR3QueueCreateExternal(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, PFNPDMQUEUEEXT pfnCallback, void *pvUser, const char *pszName, PPDMQUEUE *ppQueue) { LogFlow(("PDMR3QueueCreateExternal: cbItem=%d cItems=%d cMilliesInterval=%d pfnCallback=%p pszName=%s\n", cbItem, cItems, cMilliesInterval, pfnCallback, pszName)); /* * Validate input. */ VMCPU_ASSERT_EMT(&pVM->aCpus[0]); AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER); /* * Create the queue. */ PPDMQUEUE pQueue; int rc = pdmR3QueueCreate(pVM, cbItem, cItems, cMilliesInterval, false, pszName, &pQueue); if (RT_SUCCESS(rc)) { pQueue->enmType = PDMQUEUETYPE_EXTERNAL; pQueue->u.Ext.pvUser = pvUser; pQueue->u.Ext.pfnCallback = pfnCallback; *ppQueue = pQueue; Log(("PDM: Created external queue %p; cbItem=%d cItems=%d cMillies=%d pfnCallback=%p pvUser=%p\n", cbItem, cItems, cMilliesInterval, pfnCallback, pvUser)); } return rc; }
/** * Sets the last seen CPU timestamp counter. * * @returns VBox status code. * @param pVCpu The cross context virtual CPU structure. * @param u64LastSeenTick The last seen timestamp value. * * @thread EMT which TSC is to be set. */ VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick) { VMCPU_ASSERT_EMT(pVCpu); LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick)); if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick) pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick; return VINF_SUCCESS; }
/** * Worker for DBGFR3SelQueryInfo that calls into SELM. */ static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo) { PVM pVM = pUVM->pVM; VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); /* * Make the query. */ int rc; if (!(fFlags & DBGFSELQI_FLAGS_DT_SHADOW)) { PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); VMCPU_ASSERT_EMT(pVCpu); rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, pSelInfo); /* * 64-bit mode HACKS for making data and stack selectors wide open when * queried. This is voodoo magic. */ if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE) { /* Expand 64-bit data and stack selectors. The check is a bit bogus... */ if ( RT_SUCCESS(rc) && (pSelInfo->fFlags & ( DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE | DBGFSELINFO_FLAGS_GATE | DBGFSELINFO_FLAGS_HYPER | DBGFSELINFO_FLAGS_INVALID | DBGFSELINFO_FLAGS_NOT_PRESENT)) == DBGFSELINFO_FLAGS_LONG_MODE && pSelInfo->cbLimit != ~(RTGCPTR)0 && CPUMIsGuestIn64BitCode(pVCpu) ) { pSelInfo->GCPtrBase = 0; pSelInfo->cbLimit = ~(RTGCPTR)0; } else if ( Sel == 0 && CPUMIsGuestIn64BitCode(pVCpu)) { pSelInfo->GCPtrBase = 0; pSelInfo->cbLimit = ~(RTGCPTR)0; pSelInfo->Sel = 0; pSelInfo->SelGate = 0; pSelInfo->fFlags = DBGFSELINFO_FLAGS_LONG_MODE; pSelInfo->u.Raw64.Gen.u1Present = 1; pSelInfo->u.Raw64.Gen.u1Long = 1; pSelInfo->u.Raw64.Gen.u1DescType = 1; rc = VINF_SUCCESS; } } } else { if (HMIsEnabled(pVM)) rc = VERR_INVALID_STATE; else rc = SELMR3GetShadowSelectorInfo(pVM, Sel, pSelInfo); } return rc; }
/** * Calls the ring-3 host code. * * @returns VBox status code of the ring-3 call. * @retval VERR_VMM_RING3_CALL_DISABLED if called at the wrong time. This must * be passed up the stack, or if that isn't possible then VMMRZCallRing3 * needs to change it into an assertion. * * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure of the calling EMT. * @param enmOperation The operation. * @param uArg The argument to the operation. */ VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg) { VMCPU_ASSERT_EMT(pVCpu); /* * Check if calling ring-3 has been disabled and only let let fatal calls thru. */ if (RT_UNLIKELY( pVCpu->vmm.s.cCallRing3Disabled != 0 && enmOperation != VMMCALLRING3_VM_R0_ASSERTION)) { #ifndef IN_RING0 /* * In most cases, it's sufficient to return a status code which * will then be propagated up the code usually encountering several * AssertRC invocations along the way. Hitting one of those is more * helpful than stopping here. * * However, some doesn't check the status code because they are called * from void functions, and for these we'll turn this into a ring-0 * assertion host call. */ if (enmOperation != VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS) return VERR_VMM_RING3_CALL_DISABLED; #endif #ifdef IN_RC RTStrPrintf(g_szRTAssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x\n", enmOperation, uArg, pVCpu->idCpu); #endif RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), "VMMRZCallRing3: enmOperation=%d uArg=%#llx idCpu=%#x\n", enmOperation, uArg, pVCpu->idCpu); enmOperation = VMMCALLRING3_VM_R0_ASSERTION; } /* * The normal path. */ /** @todo profile this! */ pVCpu->vmm.s.enmCallRing3Operation = enmOperation; pVCpu->vmm.s.u64CallRing3Arg = uArg; pVCpu->vmm.s.rcCallRing3 = VERR_VMM_RING3_CALL_NO_RC; #ifdef IN_RC pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST); #else int rc; if (pVCpu->vmm.s.pfnCallRing3CallbackR0) { rc = pVCpu->vmm.s.pfnCallRing3CallbackR0(pVCpu, enmOperation, pVCpu->vmm.s.pvCallRing3CallbackUserR0); if (RT_FAILURE(rc)) return rc; } rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST); if (RT_FAILURE(rc)) return rc; #endif return pVCpu->vmm.s.rcCallRing3; }
/** * Invokes the read-MSR handler for the GIM provider configured for the VM. * * @returns Strict VBox status code like CPUMQueryGuestMsr. * @retval VINF_CPUM_R3_MSR_READ * @retval VERR_CPUM_RAISE_GP_0 * * @param pVCpu Pointer to the VMCPU. * @param idMsr The MSR to read. * @param pRange The range this MSR belongs to. * @param puValue Where to store the MSR value read. */ VMM_INT_DECL(VBOXSTRICTRC) GIMReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) { Assert(pVCpu); PVM pVM = pVCpu->CTX_SUFF(pVM); Assert(GIMIsEnabled(pVM)); VMCPU_ASSERT_EMT(pVCpu); switch (pVM->gim.s.enmProviderId) { case GIMPROVIDERID_HYPERV: return GIMHvReadMsr(pVCpu, idMsr, pRange, puValue); default: AssertMsgFailed(("GIMReadMsr: for unknown provider %u idMsr=%#RX32 -> #GP(0)", pVM->gim.s.enmProviderId, idMsr)); return VERR_CPUM_RAISE_GP_0; } }
/** * Implements a GIM hypercall with the provider configured for the VM. * * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pCtx Pointer to the guest-CPU context. */ VMM_INT_DECL(int) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx) { PVM pVM = pVCpu->CTX_SUFF(pVM); VMCPU_ASSERT_EMT(pVCpu); if (RT_UNLIKELY(!GIMIsEnabled(pVM))) return VERR_GIM_NOT_ENABLED; switch (pVM->gim.s.enmProviderId) { case GIMPROVIDERID_HYPERV: return GIMHvHypercall(pVCpu, pCtx); default: AssertMsgFailed(("GIMHypercall: for unknown provider %u\n", pVM->gim.s.enmProviderId)); return VERR_GIM_IPE_3; } }
/** * Sets the current CPU timestamp counter. * * @returns VBox status code. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param u64Tick The new timestamp value. * * @thread EMT which TSC is to be set. */ VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick) { VMCPU_ASSERT_EMT(pVCpu); STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet); /* * This is easier to do when the TSC is paused since resume will * do all the calculations for us. Actually, we don't need to * call tmCpuTickPause here since we overwrite u64TSC anyway. */ bool fTSCTicking = pVCpu->tm.s.fTSCTicking; pVCpu->tm.s.fTSCTicking = false; pVCpu->tm.s.u64TSC = u64Tick; pVCpu->tm.s.u64TSCLastSeen = u64Tick; if (fTSCTicking) tmCpuTickResume(pVM, pVCpu); /** @todo Try help synchronizing it better among the virtual CPUs? */ return VINF_SUCCESS; }
/** * Implements a GIM hypercall with the provider configured for the VM. * * @returns Strict VBox status code. * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation * failed). * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3. * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient. * @retval VERR_GIM_HYPERCALLS_NOT_AVAILABLE hypercalls unavailable. * @retval VERR_GIM_NOT_ENABLED GIM is not enabled (shouldn't really happen) * @retval VERR_GIM_HYPERCALL_MEMORY_READ_FAILED hypercall failed while reading * memory. * @retval VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED hypercall failed while * writing memory. * * @param pVCpu The cross context virtual CPU structure. * @param pCtx Pointer to the guest-CPU context. * * @thread EMT. */ VMM_INT_DECL(VBOXSTRICTRC) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx) { PVM pVM = pVCpu->CTX_SUFF(pVM); VMCPU_ASSERT_EMT(pVCpu); if (RT_UNLIKELY(!GIMIsEnabled(pVM))) return VERR_GIM_NOT_ENABLED; switch (pVM->gim.s.enmProviderId) { case GIMPROVIDERID_HYPERV: return gimHvHypercall(pVCpu, pCtx); case GIMPROVIDERID_KVM: return gimKvmHypercall(pVCpu, pCtx); default: AssertMsgFailed(("GIMHypercall: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId)); return VERR_GIM_HYPERCALLS_NOT_AVAILABLE; } }
/** * Counters VMMRZCallRing3Disable() and re-enables host calls. * * @param pVCpu The cross context virtual CPU structure of the calling EMT. * @thread EMT. */ VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu) { VMCPU_ASSERT_EMT(pVCpu); #if defined(LOG_ENABLED) && defined(IN_RING0) RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */ #endif Assert(pVCpu->vmm.s.cCallRing3Disabled > 0); if (ASMAtomicUoDecU32(&pVCpu->vmm.s.cCallRing3Disabled) == 0) { #ifdef IN_RC pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false; #else # ifdef LOG_ENABLED if (pVCpu->vmm.s.pR0LoggerR0) pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false; # endif #endif } #if defined(LOG_ENABLED) && defined(IN_RING0) ASMSetFlags(fFlags); #endif }
/** * Called on the EMT for the VCpu. * * @returns VBox status code. * @param pVCpu The virtual CPU handle. * @param pAddress The address. * @param pGCPhys Where to return the physical address. */ static DECLCALLBACK(int) dbgfR3AddrToPhysOnVCpu(PVMCPU pVCpu, PDBGFADDRESS pAddress, PRTGCPHYS pGCPhys) { VMCPU_ASSERT_EMT(pVCpu); /* This is just a wrapper because we cannot pass FlatPtr thru VMR3ReqCall directly. */ return PGMGstGetPage(pVCpu, pAddress->FlatPtr, NULL, pGCPhys); }
/** * Gets the last seen CPU timestamp counter of the guest. * * @returns the last seen TSC. * @param pVCpu The cross context virtual CPU structure. * * @thread EMT(pVCpu). */ VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu) { VMCPU_ASSERT_EMT(pVCpu); return pVCpu->tm.s.u64TSCLastSeen; }
/** * Checks whether its possible to call host context or not. * * @returns true if it's safe, false if it isn't. * @param pVCpu The cross context virtual CPU structure of the calling EMT. */ VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu) { VMCPU_ASSERT_EMT(pVCpu); Assert(pVCpu->vmm.s.cCallRing3Disabled <= 16); return pVCpu->vmm.s.cCallRing3Disabled == 0; }