/** * Called by TRPM and CPUM assembly code to make sure the guest state is * ready for execution. * * @param pVM The VM handle. */ DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM) { /* * Check some important assumptions before resuming guest execution. */ PVMCPU pVCpu = VMMGetCpu0(pVM); PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; uint8_t const uRawCpl = CPUMGetGuestCPL(pVCpu); uint32_t const u32EFlags = CPUMRawGetEFlags(pVCpu); bool const fPatch = PATMIsPatchGCAddr(pVM, pCtx->eip); AssertMsg(pCtx->eflags.Bits.u1IF, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); AssertMsg(pCtx->eflags.Bits.u2IOPL < RT_MAX(uRawCpl, 1U), ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); if (!(u32EFlags & X86_EFL_VM)) { AssertMsg((u32EFlags & X86_EFL_IF) || fPatch,("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); AssertMsg((pCtx->cs.Sel & X86_SEL_RPL) > 0, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) > 0, ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); } AssertMsg(CPUMIsGuestInRawMode(pVCpu), ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : "")); }
/** * \#GP (General Protection Fault) handler. * * @returns VBox status code. * VINF_SUCCESS means we completely handled this trap, * other codes are passed execution to host context. * * @param pVM Pointer to the VM. * @param pTrpmCpu Pointer to TRPMCPU data (within VM). * @param pRegFrame Pointer to the register frame for the trap. */ static int trpmGCTrap0dHandler(PVM pVM, PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) { LogFlow(("trpmGCTrap0dHandler: cs:eip=%RTsel:%08RX32 uErr=%RGv\n", pRegFrame->cs.Sel, pRegFrame->eip, pTrpmCpu->uActiveErrorCode)); PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); /* * Convert and validate CS. */ STAM_PROFILE_START(&pVM->trpm.s.StatTrap0dDisasm, a); RTGCPTR PC; uint32_t cBits; int rc = SELMValidateAndConvertCSAddrGCTrap(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, pRegFrame->rip, &PC, &cBits); if (RT_FAILURE(rc)) { Log(("trpmGCTrap0dHandler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc)); STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); } /* * Disassemble the instruction. */ DISCPUSTATE Cpu; uint32_t cbOp; rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, &Cpu, &cbOp); if (RT_FAILURE(rc)) { AssertMsgFailed(("DISCoreOneEx failed to PC=%RGv rc=%Rrc\n", PC, rc)); STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); } STAM_PROFILE_STOP(&pVM->trpm.s.StatTrap0dDisasm, a); /* * Optimize RDTSC traps. * Some guests (like Solaris) are using RDTSC all over the place and * will end up trapping a *lot* because of that. * * Note: it's no longer safe to access the instruction opcode directly due to possible stale code TLB entries */ if (Cpu.pCurInstr->uOpcode == OP_RDTSC) return trpmGCTrap0dHandlerRdTsc(pVM, pVCpu, pRegFrame); /* * Deal with I/O port access. */ if ( pVCpu->trpm.s.uActiveErrorCode == 0 && (Cpu.pCurInstr->fOpType & DISOPTYPE_PORTIO)) { VBOXSTRICTRC rcStrict = IOMRCIOPortHandler(pVM, pRegFrame, &Cpu); if (IOM_SUCCESS(rcStrict)) pRegFrame->rip += cbOp; rc = VBOXSTRICTRC_TODO(rcStrict); return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); } /* * Deal with Ring-0 (privileged instructions) */ if ( (pRegFrame->ss.Sel & X86_SEL_RPL) <= 1 && !pRegFrame->eflags.Bits.u1VM) return trpmGCTrap0dHandlerRing0(pVM, pVCpu, pRegFrame, &Cpu, PC); /* * Deal with Ring-3 GPs. */ if (!pRegFrame->eflags.Bits.u1VM) return trpmGCTrap0dHandlerRing3(pVM, pVCpu, pRegFrame, &Cpu, PC); /* * Deal with v86 code. * * We always set IOPL to zero which makes e.g. pushf fault in V86 * mode. The guest might use IOPL=3 and therefore not expect a #GP. * Simply fall back to the recompiler to emulate this instruction if * that's the case. To get the correct we must use CPUMRawGetEFlags. */ X86EFLAGS eflags; eflags.u32 = CPUMRawGetEFlags(pVCpu); /* Get the correct value. */ Log3(("TRPM #GP V86: cs:eip=%04x:%08x IOPL=%d efl=%08x\n", pRegFrame->cs.Sel, pRegFrame->eip, eflags.Bits.u2IOPL, eflags.u)); if (eflags.Bits.u2IOPL != 3) { Assert(eflags.Bits.u2IOPL == 0); rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xD, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xd); Assert(rc == VINF_EM_RAW_GUEST_TRAP); return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); } return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); }
/** * \#GP (General Protection Fault) handler for Ring-3. * * @returns VBox status code. * VINF_SUCCESS means we completely handled this trap, * other codes are passed execution to host context. * * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param pRegFrame Pointer to the register frame for the trap. * @param pCpu The opcode info. * @param PC The program counter corresponding to cs:eip in pRegFrame. */ static int trpmGCTrap0dHandlerRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, RTGCPTR PC) { int rc; Assert(!pRegFrame->eflags.Bits.u1VM); switch (pCpu->pCurInstr->uOpcode) { /* * INT3 and INT xx are ring-switching. * (The shadow IDT will have set the entries to DPL=0, that's why we're here.) */ case OP_INT3: /* * Little hack to make the code below not fail */ pCpu->Param1.fUse = DISUSE_IMMEDIATE8; pCpu->Param1.uValue = 3; /* fall thru */ case OP_INT: { Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8); rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)pCpu->Param1.uValue, pCpu->cbInstr, TRPM_TRAP_NO_ERRORCODE, TRPM_SOFTWARE_INT, 0xd); if (RT_SUCCESS(rc) && rc != VINF_EM_RAW_GUEST_TRAP) return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame); pVCpu->trpm.s.uActiveVector = (pVCpu->trpm.s.uActiveErrorCode & X86_TRAP_ERR_SEL_MASK) >> X86_TRAP_ERR_SEL_SHIFT; pVCpu->trpm.s.enmActiveType = TRPM_SOFTWARE_INT; return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH_INT, pRegFrame); } /* * SYSCALL, SYSENTER, INTO and BOUND are also ring-switchers. */ case OP_SYSCALL: case OP_SYSENTER: #ifdef PATM_EMULATE_SYSENTER rc = PATMSysCall(pVM, pRegFrame, pCpu); if (rc == VINF_SUCCESS) return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame); /* else no break; */ #endif case OP_BOUND: case OP_INTO: pVCpu->trpm.s.uActiveVector = UINT32_MAX; return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH, pRegFrame); /* * Handle virtualized TSC & PMC reads, just in case. */ case OP_RDTSC: case OP_RDPMC: { rc = EMInterpretInstructionDisasState(pVCpu, pCpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR); if (rc == VERR_EM_INTERPRETER) rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED; return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); } /* * STI and CLI are I/O privileged, i.e. if IOPL */ case OP_STI: case OP_CLI: { uint32_t efl = CPUMRawGetEFlags(pVCpu); if (X86_EFL_GET_IOPL(efl) >= (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL)) { LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> REM\n")); return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RESCHEDULE_REM, pRegFrame); } LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0)\n")); break; } } /* * A genuine guest fault. */ return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_GUEST_TRAP, pRegFrame); }