/** @interface_method_impl{PDMAPICHLPR0,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR0ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR0; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR0ApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } }
/** @interface_method_impl{PDMAPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_UPDATE_PENDING: VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC); break; case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } #ifdef VBOX_WITH_REM REMR3NotifyInterruptClear(pVM, pVCpu); #endif }
/** * Gets the pending interrupt. * * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pu8Interrupt Where to store the interrupt on success. */ VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt) { PVM pVM = pVCpu->CTX_SUFF(pVM); pdmLock(pVM); /* * The local APIC has a higher priority than the PIC. */ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /* * Check the PIC. */ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Pic.CTX_SUFF(pDevIns), &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /** @todo Figure out exactly why we can get here without anything being set. (REM) */ pdmUnlock(pVM); return VERR_NO_DATA; }
/** @interface_method_impl{PDMPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ if (pVM->pdm.s.Apic.pfnLocalInterruptR3) { /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: Clearing local interrupt on LAPIC\n", pDevIns->pReg->szName, pDevIns->iInstance)); /* Lower the LAPIC's LINT0 line instead of signaling the CPU directly. */ /** @todo 'rcRZ' propagation to pfnLocalInterrupt from caller. */ pVM->pdm.s.Apic.pfnLocalInterruptR3(pVM->pdm.s.Apic.pDevInsR3, pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */); return; } LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); #ifdef VBOX_WITH_REM REMR3NotifyInterruptClear(pVM, pVCpu); #endif }
/** * Steps hardware accelerated mode. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. */ static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu) { Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC); int rc; PCPUMCTX pCtx = pVCpu->em.s.pCtx; VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* * Check vital forced actions, but ignore pending interrupts and timers. */ if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { rc = emR3HwaccmForcedActions(pVM, pVCpu, pCtx); if (rc != VINF_SUCCESS) return rc; } /* * Set flags for single stepping. */ CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF); /* * Single step. * We do not start time or anything, if anything we should just do a few nanoseconds. */ do { rc = VMMR3HwAccRunGC(pVM, pVCpu); } while ( rc == VINF_SUCCESS || rc == VINF_EM_RAW_INTERRUPT); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); /* * Make sure the trap flag is cleared. * (Too bad if the guest is trying to single step too.) */ CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF); /* * Deal with the return codes. */ rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc); return rc; }
/** * Process the critical sections (both types) queued for ring-3 'leave'. * * @param pVCpu The cross context virtual CPU structure. */ VMM_INT_DECL(void) PDMCritSectBothFF(PVMCPU pVCpu) { uint32_t i; Assert( pVCpu->pdm.s.cQueuedCritSectLeaves > 0 || pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves > 0 || pVCpu->pdm.s.cQueuedCritSectRwExclLeaves > 0); /* Shared leaves. */ i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves; pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves = 0; while (i-- > 0) { # ifdef IN_RING3 PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]; # else PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]); # endif pdmCritSectRwLeaveSharedQueued(pCritSectRw); LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw)); } /* Last, exclusive leaves. */ i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves; pVCpu->pdm.s.cQueuedCritSectRwExclLeaves = 0; while (i-- > 0) { # ifdef IN_RING3 PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]; # else PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]); # endif pdmCritSectRwLeaveExclQueued(pCritSectRw); LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw)); } /* Normal leaves. */ i = pVCpu->pdm.s.cQueuedCritSectLeaves; pVCpu->pdm.s.cQueuedCritSectLeaves = 0; while (i-- > 0) { # ifdef IN_RING3 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectLeaves[i]; # else PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectLeaves[i]); # endif PDMCritSectLeave(pCritSect); LogFlow(("PDMR3CritSectFF: %p\n", pCritSect)); } VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT); }
/** * Process the critical sections queued for ring-3 'leave'. * * @param pVCpu The VMCPU handle. */ VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu) { Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0); const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves; for (RTUINT i = 0; i < c; i++) { # ifdef IN_RING3 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i]; # else PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]); # endif PDMCritSectLeave(pCritSect); LogFlow(("PDMR3CritSectFF: %p\n", pCritSect)); } pVCpu->pdm.s.cQueuedCritSectLeaves = 0; VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT); }
/** @interface_method_impl{PDMPICHLPR0,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR0PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR0; if (pVM->pdm.s.Apic.pfnLocalInterruptR0) { /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ LogFlow(("pdmR0PicHlp_ClearInterruptFF: caller='%s'/%d: Clearing local interrupt on LAPIC\n", pDevIns, pDevIns->iInstance)); /* Lower the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptR0(pVM->pdm.s.Apic.pDevInsR0, 0, 0); return; } PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmR0PicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); }
/** * Executes instruction in HM mode if we can. * * This is somewhat comparable to REMR3EmulateInstruction. * * @returns VBox strict status code. * @retval VINF_EM_DBG_STEPPED on success. * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in * HM right now. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure for the calling EMT. * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX. * @thread EMT. */ VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) { PCPUMCTX pCtx = pVCpu->em.s.pCtx; Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK)); if (!HMR3CanExecuteGuest(pVM, pCtx)) return VINF_EM_RESCHEDULE; uint64_t const uOldRip = pCtx->rip; for (;;) { /* * Service necessary FFs before going into HM. */ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu, pCtx); if (rcStrict != VINF_SUCCESS) { Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); return rcStrict; } } /* * Go execute it. */ bool fOld = HMSetSingleInstruction(pVM, pVCpu, true); VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu); HMSetSingleInstruction(pVM, pVCpu, fOld); LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /* * Handle high priority FFs and informational status codes. We don't do * normal FF processing the caller or the next call can deal with them. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) { rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST)) { rcStrict = emR3HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict)); Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } /* * Done? */ if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE) || pCtx->rip != uOldRip) { if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip) rcStrict = VINF_EM_DBG_STEPPED; Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip)); return rcStrict; } } }
/** * Executes hardware accelerated raw code. (Intel VT-x & AMD-V) * * This function contains the raw-mode version of the inner * execution loop (the outer loop being in EMR3ExecuteVM()). * * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW, * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param pfFFDone Where to store an indicator telling whether or not * FFs were done before returning. */ int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) { int rc = VERR_IPE_UNINITIALIZED_STATUS; PCPUMCTX pCtx = pVCpu->em.s.pCtx; LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); *pfFFDone = false; STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry); #ifdef EM_NOTIFY_HM HMR3NotifyScheduled(pVCpu); #endif /* * Spin till we get a forced action which returns anything but VINF_SUCCESS. */ for (;;) { STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a); /* Check if a forced reschedule is pending. */ if (HMR3IsRescheduleRequired(pVM, pCtx)) { rc = VINF_EM_RESCHEDULE; break; } /* * Process high priority pre-execution raw-mode FFs. */ #ifdef VBOX_WITH_RAW_MODE Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { rc = emR3HmForcedActions(pVM, pVCpu, pCtx); if (rc != VINF_SUCCESS) break; } #ifdef LOG_ENABLED /* * Log important stuff before entering GC. */ if (TRPMHasTrap(pVCpu)) Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); uint32_t cpl = CPUMGetGuestCPL(pVCpu); if (pVM->cCpus == 1) { if (pCtx->eflags.Bits.u1VM) Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } else { if (pCtx->eflags.Bits.u1VM) Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } #endif /* LOG_ENABLED */ /* * Execute the code. */ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a); if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) { STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x); rc = VMMR3HmRunGC(pVM, pVCpu); STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x); } else { /* Give up this time slice; virtual time continues */ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u); RTThreadSleep(5); STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u); rc = VINF_SUCCESS; } /* * Deal with high priority post execution FFs before doing anything else. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); /* * Process the returned status code. */ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) break; rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc); if (rc != VINF_SUCCESS) break; /* * Check and execute forced actions. */ #ifdef VBOX_HIGH_RES_TIMERS_HACK TMTimerPollVoid(pVM, pVCpu); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK)) { rc = emR3ForcedActions(pVM, pVCpu, rc); VBOXVMM_EM_FF_ALL_RET(pVCpu, rc); if ( rc != VINF_SUCCESS && rc != VINF_EM_RESCHEDULE_HM) { *pfFFDone = true; break; } } } /* * Return to outer loop. */ #if defined(LOG_ENABLED) && defined(DEBUG) RTLogFlush(NULL); #endif return rc; }
RT_C_DECLS_END /** * Exits the trap, called when exiting a trap handler. * * Will reset the trap if it's not a guest trap or the trap * is already handled. Will process resume guest FFs. * * @returns rc, can be adjusted if its VINF_SUCCESS or something really bad * happened. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param rc The VBox status code to return. * @param pRegFrame Pointer to the register frame for the trap. * * @remarks This must not be used for hypervisor traps, only guest traps. */ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) { uint32_t uOldActiveVector = pVCpu->trpm.s.uActiveVector; NOREF(uOldActiveVector); /* Reset trap? */ if ( rc != VINF_EM_RAW_GUEST_TRAP && rc != VINF_EM_RAW_RING_SWITCH_INT) pVCpu->trpm.s.uActiveVector = UINT32_MAX; #ifdef VBOX_HIGH_RES_TIMERS_HACK /* * We should poll the timers occasionally. * We must *NOT* do this too frequently as it adds a significant overhead * and it'll kill us if the trap load is high. (See @bugref{1354}.) * (The heuristic is not very intelligent, we should really check trap * frequency etc. here, but alas, we lack any such information atm.) */ static unsigned s_iTimerPoll = 0; if (rc == VINF_SUCCESS) { if (!(++s_iTimerPoll & 0xf)) { TMTimerPollVoid(pVM, pVCpu); Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))); } } else s_iTimerPoll = 0; #endif /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */ if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu))); if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu)) { /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address. * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); } } /* * Pending resume-guest-FF? * Or pending (A)PIC interrupt? Windows XP will crash if we delay APIC interrupts. */ if ( rc == VINF_SUCCESS && ( VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PDM_CRITSECT) ) ) { /* The out of memory condition naturally outranks the others. */ if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) rc = VINF_EM_NO_MEMORY; /* Pending Ring-3 action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); rc = VINF_EM_RAW_TO_R3; } /* Pending timer action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)) rc = VINF_EM_RAW_TIMER_PENDING; /* The Virtual Sync clock has stopped. */ else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) rc = VINF_EM_RAW_TO_R3; /* DMA work pending? */ else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA)) rc = VINF_EM_RAW_TO_R3; /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST)) rc = VINF_EM_PENDING_REQUEST; /* Pending interrupt: dispatch it. */ else if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) && PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame) ) { uint8_t u8Interrupt; rc = PDMGetInterrupt(pVCpu, &u8Interrupt); Log(("trpmGCExitTrap: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc)); AssertFatalMsgRC(rc, ("PDMGetInterrupt failed with %Rrc\n", rc)); rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_HARDWARE_INT, uOldActiveVector); /* can't return if successful */ Assert(rc != VINF_SUCCESS); /* Stop the profile counter that was started in TRPMGCHandlersA.asm */ Assert(uOldActiveVector <= 16); STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); /* Assert the trap and go to the recompiler to dispatch it. */ TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT); STAM_PROFILE_ADV_START(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); rc = VINF_EM_RAW_INTERRUPT_PENDING; } /* * Try sync CR3? */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { #if 1 PGMRZDynMapReleaseAutoSet(pVCpu); PGMRZDynMapStartAutoSet(pVCpu); rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); #else rc = VINF_PGM_SYNC_CR3; #endif } } AssertMsg( rc != VINF_SUCCESS || ( pRegFrame->eflags.Bits.u1IF && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM)) , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss.Sel, pRegFrame->eflags.Bits.u2IOPL)); PGMRZDynMapReleaseAutoSet(pVCpu); return rc; }
/* execute the switch. */ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM) { uint32_t i; int rc; PCPUMCTX pHyperCtx, pGuestCtx; RTGCPHYS CR3Phys = 0x0; /* fake address */ PVMCPU pVCpu = &pVM->aCpus[0]; if (!HWACCMR3IsAllowed(pVM)) { RTPrintf("VMM: Hardware accelerated test not available!\n"); return VERR_ACCESS_DENIED; } /* * These forced actions are not necessary for the test and trigger breakpoints too. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); /* Enable mapping of the hypervisor into the shadow page table. */ uint32_t cb; rc = PGMR3MappingsSize(pVM, &cb); AssertRCReturn(rc, rc); /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */ rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb); AssertRCReturn(rc, rc); pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP; pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT; PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER); PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); VM_FF_CLEAR(pVM, VM_FF_REQUEST); /* * Setup stack for calling VMMGCEntry(). */ RTRCPTR RCPtrEP; rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP); if (RT_SUCCESS(rc)) { RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); /* Fill in hidden selector registers for the hypervisor state. */ SYNC_SEL(pHyperCtx, cs); SYNC_SEL(pHyperCtx, ds); SYNC_SEL(pHyperCtx, es); SYNC_SEL(pHyperCtx, fs); SYNC_SEL(pHyperCtx, gs); SYNC_SEL(pHyperCtx, ss); SYNC_SEL(pHyperCtx, tr); /* * Profile switching. */ RTPrintf("VMM: profiling switcher...\n"); Log(("VMM: profiling switcher...\n")); uint64_t TickMin = ~0; uint64_t tsBegin = RTTimeNanoTS(); uint64_t TickStart = ASMReadTSC(); for (i = 0; i < 1000000; i++) { CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu); /* Copy the hypervisor context to make sure we have a valid guest context. */ *pGuestCtx = *pHyperCtx; pGuestCtx->cr3 = CR3Phys; VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); uint64_t TickThisStart = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, 0); uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; if (RT_FAILURE(rc)) { Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } if (TickThisElapsed < TickMin) TickMin = TickThisElapsed; } uint64_t TickEnd = ASMReadTSC(); uint64_t tsEnd = RTTimeNanoTS(); uint64_t Elapsed = tsEnd - tsBegin; uint64_t PerIteration = Elapsed / (uint64_t)i; uint64_t cTicksElapsed = TickEnd - TickStart; uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin); Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin)); rc = VINF_SUCCESS; } else AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc)); return rc; }
/* execute the switch. */ VMMR3DECL(int) VMMDoTest(PVM pVM) { #if 1 PVMCPU pVCpu = &pVM->aCpus[0]; #ifdef NO_SUPCALLR0VMM RTPrintf("NO_SUPCALLR0VMM\n"); return VINF_SUCCESS; #endif /* * Setup stack for calling VMMGCEntry(). */ RTRCPTR RCPtrEP; int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP); if (RT_SUCCESS(rc)) { RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); /* * Test various crashes which we must be able to recover from. */ vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3"); vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP"); #if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */ vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ bool f; rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f); #if !defined(DEBUG_bird) if (RT_SUCCESS(rc) && f) #endif { /* see triple fault warnings in SELM and VMMGC.cpp. */ vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ } #endif vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP"); ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP. //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP"); vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)"); vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP"); vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler"); /* This test is no longer relevant as fs and gs are loaded with NULL selectors and we will always return to HC if a #GP occurs while returning to guest code. vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs"); */ /* * Set a debug register and perform a context switch. */ rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc); return rc; } /* a harmless breakpoint */ RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n"); DBGFADDRESS Addr; DBGFR3AddrFromFlat(pVM, &Addr, 0x10000); RTUINT iBp0; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc); return rc; } /* a bad one at VMMGCEntry */ RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n"); DBGFR3AddrFromFlat(pVM, &Addr, RCPtrEP); RTUINT iBp1; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } /* resume the breakpoint */ RTPrintf("VMM: resuming hyper after breakpoint\n"); CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM, pVCpu); if (rc != VINF_SUCCESS) { RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */ return rc; } /* engage the breakpoint again and try single stepping. */ RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n"); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu); RTPrintf("%RGr=>", OldPc); unsigned i; for (i = 0; i < 8; i++) { CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM, pVCpu); if (rc != VINF_EM_DBG_HYPER_STEPPED) { RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc); return rc; } RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu); RTPrintf("%RGr=>", Pc); if (Pc == OldPc) { RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc); return VERR_GENERAL_FAILURE; } OldPc = Pc; } RTPrintf("ok\n"); /* done, clear it */ if ( RT_FAILURE(DBGFR3BpClear(pVM, iBp0)) || RT_FAILURE(DBGFR3BpClear(pVM, iBp1))) { RTPrintf("VMM: Failed to clear breakpoints!\n"); return VERR_GENERAL_FAILURE; } rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc); return rc; } /* * Interrupt masking. */ RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); for (i = 0; i < 10000; i++) { uint64_t StartTick = ASMReadTSC(); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc); return rc; } uint64_t Ticks = ASMReadTSC() - StartTick; if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000)) RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000); } /* * Interrupt forwarding. */ CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HYPER_INTERRUPT); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC)); /* * Switch and do da thing. */ RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); i = 0; uint64_t tsBegin = RTTimeNanoTS(); uint64_t TickStart = ASMReadTSC(); Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu)); do { rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0); if (RT_LIKELY(rc == VINF_SUCCESS)) rc = pVCpu->vmm.s.iLastGZRc; if (RT_FAILURE(rc)) { Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } i++; if (!(i % 32)) Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n", i, CPUMGetHyperESI(pVCpu), CPUMGetHyperEDI(pVCpu), CPUMGetHyperEBX(pVCpu))); } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); uint64_t TickEnd = ASMReadTSC(); uint64_t tsEnd = RTTimeNanoTS(); uint64_t Elapsed = tsEnd - tsBegin; uint64_t PerIteration = Elapsed / (uint64_t)i; uint64_t cTicksElapsed = TickEnd - TickStart; uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration); Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration)); /* * These forced actions are not necessary for the test and trigger breakpoints too. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); /* * Profile switching. */ RTPrintf("VMM: profiling switcher...\n"); Log(("VMM: profiling switcher...\n")); uint64_t TickMin = ~0; tsBegin = RTTimeNanoTS(); TickStart = ASMReadTSC(); Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu)); for (i = 0; i < 1000000; i++) { CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_NOP); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ uint64_t TickThisStart = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0); if (RT_LIKELY(rc == VINF_SUCCESS)) rc = pVCpu->vmm.s.iLastGZRc; uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; if (RT_FAILURE(rc)) { Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } if (TickThisElapsed < TickMin) TickMin = TickThisElapsed; } TickEnd = ASMReadTSC(); tsEnd = RTTimeNanoTS(); Elapsed = tsEnd - tsBegin; PerIteration = Elapsed / (uint64_t)i; cTicksElapsed = TickEnd - TickStart; cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin); Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin)); rc = VINF_SUCCESS; } else AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc)); #endif return rc; }