RT_C_DECLS_END /** * Exits the trap, called when exiting a trap handler. * * Will reset the trap if it's not a guest trap or the trap * is already handled. Will process resume guest FFs. * * @returns rc, can be adjusted if its VINF_SUCCESS or something really bad * happened. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param rc The VBox status code to return. * @param pRegFrame Pointer to the register frame for the trap. * * @remarks This must not be used for hypervisor traps, only guest traps. */ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) { uint32_t uOldActiveVector = pVCpu->trpm.s.uActiveVector; NOREF(uOldActiveVector); /* Reset trap? */ if ( rc != VINF_EM_RAW_GUEST_TRAP && rc != VINF_EM_RAW_RING_SWITCH_INT) pVCpu->trpm.s.uActiveVector = UINT32_MAX; #ifdef VBOX_HIGH_RES_TIMERS_HACK /* * We should poll the timers occasionally. * We must *NOT* do this too frequently as it adds a significant overhead * and it'll kill us if the trap load is high. (See @bugref{1354}.) * (The heuristic is not very intelligent, we should really check trap * frequency etc. here, but alas, we lack any such information atm.) */ static unsigned s_iTimerPoll = 0; if (rc == VINF_SUCCESS) { if (!(++s_iTimerPoll & 0xf)) { TMTimerPollVoid(pVM, pVCpu); Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))); } } else s_iTimerPoll = 0; #endif /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */ if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu))); if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu)) { /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address. * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); } } /* * Pending resume-guest-FF? * Or pending (A)PIC interrupt? Windows XP will crash if we delay APIC interrupts. */ if ( rc == VINF_SUCCESS && ( VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PDM_CRITSECT) ) ) { /* The out of memory condition naturally outranks the others. */ if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) rc = VINF_EM_NO_MEMORY; /* Pending Ring-3 action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); rc = VINF_EM_RAW_TO_R3; } /* Pending timer action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)) rc = VINF_EM_RAW_TIMER_PENDING; /* The Virtual Sync clock has stopped. */ else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) rc = VINF_EM_RAW_TO_R3; /* DMA work pending? */ else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA)) rc = VINF_EM_RAW_TO_R3; /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST)) rc = VINF_EM_PENDING_REQUEST; /* Pending interrupt: dispatch it. */ else if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) && PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame) ) { uint8_t u8Interrupt; rc = PDMGetInterrupt(pVCpu, &u8Interrupt); Log(("trpmGCExitTrap: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc)); AssertFatalMsgRC(rc, ("PDMGetInterrupt failed with %Rrc\n", rc)); rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_HARDWARE_INT, uOldActiveVector); /* can't return if successful */ Assert(rc != VINF_SUCCESS); /* Stop the profile counter that was started in TRPMGCHandlersA.asm */ Assert(uOldActiveVector <= 16); STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); /* Assert the trap and go to the recompiler to dispatch it. */ TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT); STAM_PROFILE_ADV_START(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); rc = VINF_EM_RAW_INTERRUPT_PENDING; } /* * Try sync CR3? */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { #if 1 PGMRZDynMapReleaseAutoSet(pVCpu); PGMRZDynMapStartAutoSet(pVCpu); rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); #else rc = VINF_PGM_SYNC_CR3; #endif } } AssertMsg( rc != VINF_SUCCESS || ( pRegFrame->eflags.Bits.u1IF && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM)) , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss.Sel, pRegFrame->eflags.Bits.u2IOPL)); PGMRZDynMapReleaseAutoSet(pVCpu); return rc; }
/** * EMT worker for DBGFR3PagingDumpEx. * * @returns VBox status code. * @param pVM The VM handle. * @param idCpu The current CPU ID. * @param fFlags The flags, DBGFPGDMP_FLAGS_XXX. Valid. * @param pcr3 The CR3 to use (unless we're getting the current * state, see @a fFlags). * @param pu64FirstAddr The first address. * @param pu64LastAddr The last address. * @param cMaxDepth The depth. * @param pHlp The output callbacks. */ static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3, uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp) { /* * Implement dumping both context by means of recursion. */ if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) { int rc1 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST, pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp); int rc2 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW, pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp); return RT_FAILURE(rc1) ? rc1 : rc2; } /* * Get the current CR3/mode if required. */ uint64_t cr3 = *pcr3; if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE)) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; if (fFlags & DBGFPGDMP_FLAGS_SHADOW) { if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3) cr3 = PGMGetHyperCR3(pVCpu); if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) { fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu)); if (fFlags & DBGFPGDMP_FLAGS_NP) { fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetHostMode(pVM)); if (HC_ARCH_BITS == 32 && CPUMIsGuestInLongMode(pVCpu)) fFlags |= DBGFPGDMP_FLAGS_LME; } } } else { if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3) cr3 = CPUMGetGuestCR3(pVCpu); if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) { AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE); AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE); fFlags |= CPUMGetGuestCR4(pVCpu) & (X86_CR4_PSE | X86_CR4_PAE); AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME); AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE); fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE); } } } fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3); /* * Call PGM to do the real work. */ int rc; if (fFlags & DBGFPGDMP_FLAGS_SHADOW) rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp); else rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp); return rc; }