Пример #1
0
/**
 * \#PF (Page Fault) handler.
 *
 * Calls PGM which does the actual handling.
 *
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
 * @param   pRegFrame   Pointer to the register frame for the trap.
 * @internal
 */
DECLASM(int) TRPMGCTrap0eHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
{
    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);

    LogFlow(("TRPMGC0e: %04x:%08x err=%x cr2=%08x\n", pRegFrame->cs.Sel, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode, (uint32_t)pVCpu->trpm.s.uActiveCR2));

    /*
     * This is all PGM stuff.
     */
    PGMRZDynMapStartAutoSet(pVCpu);
    int rc = PGMTrap0eHandler(pVCpu, pVCpu->trpm.s.uActiveErrorCode, pRegFrame, (RTGCPTR)pVCpu->trpm.s.uActiveCR2);
    switch (rc)
    {
        case VINF_EM_RAW_EMULATE_INSTR:
        case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
        case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
        case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
        case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
        case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
            if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
                rc = VINF_PATCH_EMULATE_INSTR;
            break;

        case VINF_EM_RAW_GUEST_TRAP:
            if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
            {
                PGMRZDynMapReleaseAutoSet(pVCpu);
                return VINF_PATM_PATCH_TRAP_PF;
            }

            rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xE, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xe);
            Assert(rc == VINF_EM_RAW_GUEST_TRAP);
            break;

        case VINF_EM_RAW_INTERRUPT_PENDING:
            Assert(TRPMHasTrap(pVCpu));
            /* no break; */
        case VINF_IOM_R3_MMIO_READ:
        case VINF_IOM_R3_MMIO_WRITE:
        case VINF_IOM_R3_MMIO_READ_WRITE:
        case VINF_PATM_HC_MMIO_PATCH_READ:
        case VINF_PATM_HC_MMIO_PATCH_WRITE:
        case VINF_SUCCESS:
        case VINF_EM_RAW_TO_R3:
        case VINF_EM_PENDING_REQUEST:
        case VINF_EM_RAW_TIMER_PENDING:
        case VINF_EM_NO_MEMORY:
        case VINF_CSAM_PENDING_ACTION:
        case VINF_PGM_SYNC_CR3: /** @todo Check this with Sander. */
            break;

        default:
            AssertMsg(PATMIsPatchGCAddr(pVM, pRegFrame->eip) == false, ("Patch address for return code %d. eip=%08x\n", rc, pRegFrame->eip));
            break;
    }
    rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
    Log6(("TRPMGC0e: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
    return rc;
}
Пример #2
0
/**
 * Load virtualized flags.
 *
 * This function is called from CPUMRawEnter(). It doesn't have to update the
 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @see     pg_raw
 */
VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    Assert(!HMIsEnabled(pVM));

    /*
     * Currently we don't bother to check whether PATM is enabled or not.
     * For all cases where it isn't, IOPL will be safe and IF will be set.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
    AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));

    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));

    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= X86_EFL_IF;
    pCtxCore->eflags.u32 = efl;

#ifdef IN_RING3
#ifdef PATM_EMULATE_SYSENTER
    PCPUMCTX pCtx;

    /* Check if the sysenter handler has changed. */
    pCtx = CPUMQueryGuestCtxPtr(pVM);
    if (   pCtx->SysEnter.cs  != 0
        && pCtx->SysEnter.eip != 0
       )
    {
        if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
        {
            pVM->patm.s.pfnSysEnterPatchGC = 0;
            pVM->patm.s.pfnSysEnterGC = 0;

            Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
            pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
            if (pVM->patm.s.pfnSysEnterPatchGC == 0)
            {
                rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
                if (rc == VINF_SUCCESS)
                {
                    pVM->patm.s.pfnSysEnterPatchGC  = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
                    pVM->patm.s.pfnSysEnterGC       = (RTRCPTR)pCtx->SysEnter.eip;
                    Assert(pVM->patm.s.pfnSysEnterPatchGC);
                }
            }
            else
                pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
        }
    }
    else
    {
        pVM->patm.s.pfnSysEnterPatchGC = 0;
        pVM->patm.s.pfnSysEnterGC = 0;
    }
#endif
#endif
}
Пример #3
0
/**
 * Checks if the interrupt flag is enabled or not.
 *
 * @returns true if it's enabled.
 * @returns false if it's disabled.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    CPU context
 */
VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
{
    if (PATMIsEnabled(pVM))
    {
        if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
            return false;
    }
    return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
}
Пример #4
0
/**
 * \#GP (General Protection Fault) handler.
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
 * @param   pRegFrame   Pointer to the register frame for the trap.
 * @internal
 */
DECLASM(int) TRPMGCTrap0dHandler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
{
    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);

    LogFlow(("TRPMGC0d: %04x:%08x err=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode));

    PGMRZDynMapStartAutoSet(pVCpu);
    int rc = trpmGCTrap0dHandler(pVM, pTrpmCpu, pRegFrame);
    switch (rc)
    {
        case VINF_EM_RAW_GUEST_TRAP:
        case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
            if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
                rc = VINF_PATM_PATCH_TRAP_GP;
            break;

        case VINF_EM_RAW_INTERRUPT_PENDING:
            Assert(TRPMHasTrap(pVCpu));
            /* no break; */
        case VINF_PGM_SYNC_CR3: /** @todo Check this with Sander. */
        case VINF_EM_RAW_EMULATE_INSTR:
        case VINF_IOM_R3_IOPORT_READ:
        case VINF_IOM_R3_IOPORT_WRITE:
        case VINF_IOM_R3_MMIO_WRITE:
        case VINF_IOM_R3_MMIO_READ:
        case VINF_IOM_R3_MMIO_READ_WRITE:
        case VINF_PATM_PATCH_INT3:
        case VINF_EM_NO_MEMORY:
        case VINF_EM_RAW_TO_R3:
        case VINF_EM_RAW_TIMER_PENDING:
        case VINF_EM_PENDING_REQUEST:
        case VINF_EM_HALT:
        case VINF_SUCCESS:
            break;

        default:
            AssertMsg(PATMIsPatchGCAddr(pVM, pRegFrame->eip) == false, ("return code %d\n", rc));
            break;
        }
    Log6(("TRPMGC0d: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
    return rc;
}
Пример #5
0
/**
 * Checks if the interrupt flag is enabled or not.
 *
 * @returns true if it's enabled.
 * @returns false if it's disabled.
 *
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The guest CPU context.
 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
 */
VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
{
    if (PATMIsEnabled(pVM))
    {
        Assert(!HMIsEnabled(pVM));
        if (PATMIsPatchGCAddr(pVM, pCtx->eip))
            return false;
    }
    return !!(pCtx->eflags.u32 & X86_EFL_IF);
}
Пример #6
0
/**
 * Adds branch pair to the lookup cache of the particular branch instruction
 *
 * @returns VBox status
 * @param   pVM                 Pointer to the VM.
 * @param   pJumpTableGC        Pointer to branch instruction lookup cache
 * @param   pBranchTarget       Original branch target
 * @param   pRelBranchPatch     Relative duplicated function address
 */
int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
{
    PPATCHJUMPTABLE pJumpTable;

    Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));

    AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);

#ifdef IN_RC
    pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
#else
    pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
#endif
    Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
    if (pJumpTable->cAddresses < pJumpTable->nrSlots)
    {
        uint32_t i;

        for (i=0;i<pJumpTable->nrSlots;i++)
        {
            if (pJumpTable->Slot[i].pInstrGC == 0)
            {
                pJumpTable->Slot[i].pInstrGC    = pBranchTarget;
                /* Relative address - eases relocation */
                pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
                pJumpTable->cAddresses++;
                break;
            }
        }
        AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
#ifdef VBOX_WITH_STATISTICS
        STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
        if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
            pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
#endif
    }
    else
    {
        /* Replace an old entry. */
        /** @todo replacement strategy isn't really bright. change to something better if required. */
        Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
        Assert((pJumpTable->nrSlots & 1) == 0);

        pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
        pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC    = pBranchTarget;
        /* Relative address - eases relocation */
        pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;

        pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);

        STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
    }

    return VINF_SUCCESS;
}
/**
 * Called by TRPM and CPUM assembly code to make sure the guest state is
 * ready for execution.
 *
 * @param   pVM                 The VM handle.
 */
DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM)
{
    /*
     * Check some important assumptions before resuming guest execution.
     */
    PVMCPU         pVCpu     = VMMGetCpu0(pVM);
    PCCPUMCTX      pCtx      = &pVCpu->cpum.s.Guest;
    uint8_t  const uRawCpl   = CPUMGetGuestCPL(pVCpu);
    uint32_t const u32EFlags = CPUMRawGetEFlags(pVCpu);
    bool     const fPatch    = PATMIsPatchGCAddr(pVM, pCtx->eip);
    AssertMsg(pCtx->eflags.Bits.u1IF,                ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    AssertMsg(pCtx->eflags.Bits.u2IOPL < RT_MAX(uRawCpl, 1U),
                                                     ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    if (!(u32EFlags & X86_EFL_VM))
    {
        AssertMsg((u32EFlags & X86_EFL_IF) || fPatch,("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->cs.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    }
    AssertMsg(CPUMIsGuestInRawMode(pVCpu),           ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
}
Пример #8
0
/**
 * Restores virtualized flags.
 *
 * This function is called from CPUMRawLeave(). It will update the eflags register.
 *
 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @param   rawRC       Raw mode return code
 * @see     @ref pg_raw
 */
VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    /*
     * We will only be called if PATMRawEnter was previously called.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
    pCtxCore->eflags.u32 = efl;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;

    AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC));
    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));

#ifdef IN_RING3
    if (    (efl & X86_EFL_IF)
        &&  fPatchCode
       )
    {
        if (    rawRC < VINF_PATM_LEAVE_RC_FIRST
            ||  rawRC > VINF_PATM_LEAVE_RC_LAST)
        {
            /*
             * Golden rules:
             * - Don't interrupt special patch streams that replace special instructions
             * - Don't break instruction fusing (sti, pop ss, mov ss)
             * - Don't go back to an instruction that has been overwritten by a patch jump
             * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
             *
             */
            if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1)            /* consistent patch instruction state */
            {
                PATMTRANSSTATE  enmState;
                RTRCPTR         pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);

                AssertRelease(pOrgInstrGC);

                Assert(enmState != PATMTRANS_OVERWRITTEN);
                if (enmState == PATMTRANS_SAFE)
                {
                    Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
                    Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
                    pCtxCore->eip = pOrgInstrGC;
                    fPatchCode = false; /* to reset the stack ptr */

                    CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;   /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
                }
                else
                {
                    LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n",  pCtxCore->eip, enmState));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
                }
            }
            else
            {
                LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n",  pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
                STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
            }
        }
    }
#else /* !IN_RING3 */
    AssertMsgFailed(("!IN_RING3"));
#endif  /* !IN_RING3 */

    if (!fPatchCode)
    {
        if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
        {
            EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip);
        }
        CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;

        /* Reset the stack pointer to the top of the stack. */
#ifdef DEBUG
        if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
        {
            LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
        }
#endif
        CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
    }
}
Пример #9
0
/**
 * \#GP (General Protection Fault) handler for Ring-0 privileged instructions.
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   pRegFrame   Pointer to the register frame for the trap.
 * @param   pCpu        The opcode info.
 * @param   PC          The program counter corresponding to cs:eip in pRegFrame.
 */
static int trpmGCTrap0dHandlerRing0(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, RTGCPTR PC)
{
    int     rc;

    /*
     * Try handle it here, if not return to HC and emulate/interpret it there.
     */
    switch (pCpu->pCurInstr->uOpcode)
    {
        case OP_INT3:
            /*
             * Little hack to make the code below not fail
             */
            pCpu->Param1.fUse  = DISUSE_IMMEDIATE8;
            pCpu->Param1.uValue = 3;
            /* fallthru */
        case OP_INT:
        {
            Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
            Assert(!(PATMIsPatchGCAddr(pVM, PC)));
            if (pCpu->Param1.uValue == 3)
            {
                /* Int 3 replacement patch? */
                if (PATMRCHandleInt3PatchTrap(pVM, pRegFrame) == VINF_SUCCESS)
                {
                    AssertFailed();
                    return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
                }
            }
            rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)pCpu->Param1.uValue, pCpu->cbInstr, TRPM_TRAP_NO_ERRORCODE, TRPM_SOFTWARE_INT, 0xd);
            if (RT_SUCCESS(rc) && rc != VINF_EM_RAW_GUEST_TRAP)
                return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);

            pVCpu->trpm.s.uActiveVector = (pVCpu->trpm.s.uActiveErrorCode & X86_TRAP_ERR_SEL_MASK) >> X86_TRAP_ERR_SEL_SHIFT;
            pVCpu->trpm.s.enmActiveType = TRPM_SOFTWARE_INT;
            return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH_INT, pRegFrame);
        }

#ifdef PATM_EMULATE_SYSENTER
        case OP_SYSEXIT:
        case OP_SYSRET:
            rc = PATMSysCall(pVM, pRegFrame, pCpu);
            return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
#endif

        case OP_HLT:
            /* If it's in patch code, defer to ring-3. */
            if (PATMIsPatchGCAddr(pVM, PC))
                break;

            pRegFrame->eip += pCpu->cbInstr;
            return trpmGCExitTrap(pVM, pVCpu, VINF_EM_HALT, pRegFrame);


        /*
         * These instructions are used by PATM and CASM for finding
         * dangerous non-trapping instructions. Thus, since all
         * scanning and patching is done in ring-3 we'll have to
         * return to ring-3 on the first encounter of these instructions.
         */
        case OP_MOV_CR:
        case OP_MOV_DR:
            /* We can safely emulate control/debug register move instructions in patched code. */
            if (    !PATMIsPatchGCAddr(pVM, PC)
                &&  !CSAMIsKnownDangerousInstr(pVM, PC))
                break;
        case OP_INVLPG:
        case OP_LLDT:
        case OP_STI:
        case OP_RDTSC:  /* just in case */
        case OP_RDPMC:
        case OP_CLTS:
        case OP_WBINVD: /* nop */
        case OP_RDMSR:
        case OP_WRMSR:
        {
            rc = EMInterpretInstructionDisasState(pVCpu, pCpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR);
            if (rc == VERR_EM_INTERPRETER)
                rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
            return trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
        }
    }

    return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EXCEPTION_PRIVILEGED, pRegFrame);
}
Пример #10
0
/**
 * Trap handler for illegal opcode fault (\#UD).
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
 * @param   pRegFrame   Pointer to the register frame for the trap.
 * @internal
 */
DECLASM(int) TRPMGCTrap06Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
{
    LogFlow(("TRPMGC06: %04x:%08x efl=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->eflags.u32));
    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
    int     rc;
    PGMRZDynMapStartAutoSet(pVCpu);

    if (CPUMGetGuestCPL(pVCpu) == 0)
    {
        /*
         * Decode the instruction.
         */
        RTGCPTR PC;
        rc = SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs,
                                          pRegFrame->rip, &PC);
        if (RT_FAILURE(rc))
        {
            Log(("TRPMGCTrap06Handler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc));
            rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_GUEST_TRAP, pRegFrame);
            Log6(("TRPMGC06: %Rrc (%04x:%08x) (SELM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
            return rc;
        }

        DISCPUSTATE Cpu;
        uint32_t    cbOp;
        rc = EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)PC, pRegFrame, &Cpu, &cbOp);
        if (RT_FAILURE(rc))
        {
            rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
            Log6(("TRPMGC06: %Rrc (%04x:%08x) (EM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
            return rc;
        }

        /*
         * UD2 in a patch?
         * Note! PATMGCHandleIllegalInstrTrap doesn't always return.
         */
        if (    Cpu.pCurInstr->uOpcode == OP_ILLUD2
            &&  PATMIsPatchGCAddr(pVM, pRegFrame->eip))
        {
            LogFlow(("TRPMGCTrap06Handler: -> PATMRCHandleIllegalInstrTrap\n"));
            rc = PATMRCHandleIllegalInstrTrap(pVM, pRegFrame);
            /** @todo  These tests are completely unnecessary, should just follow the
             *         flow and return at the end of the function. */
            if (    rc == VINF_SUCCESS
                ||  rc == VINF_EM_RAW_EMULATE_INSTR
                ||  rc == VINF_PATM_DUPLICATE_FUNCTION
                ||  rc == VINF_PATM_PENDING_IRQ_AFTER_IRET
                ||  rc == VINF_EM_RESCHEDULE)
            {
                rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
                Log6(("TRPMGC06: %Rrc (%04x:%08x) (PATM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
                return rc;
            }
        }
        /*
         * Speed up dtrace and don't entrust invalid lock sequences to the recompiler.
         */
        else if (Cpu.fPrefix & DISPREFIX_LOCK)
        {
            Log(("TRPMGCTrap06Handler: pc=%08x op=%d\n", pRegFrame->eip, Cpu.pCurInstr->uOpcode));
#ifdef DTRACE_EXPERIMENT /** @todo fix/remove/permanent-enable this when DIS/PATM handles invalid lock sequences. */
            Assert(!PATMIsPatchGCAddr(pVM, pRegFrame->eip));
            rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
            Assert(rc == VINF_EM_RAW_GUEST_TRAP);
#else
            rc = VINF_EM_RAW_EMULATE_INSTR;
#endif
        }
        /*
         * Handle MONITOR - it causes an #UD exception instead of #GP when not executed in ring 0.
         */
        else if (Cpu.pCurInstr->uOpcode == OP_MONITOR)
        {
            LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n"));
            rc = EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR);
        }
        /* Never generate a raw trap here; it might be an instruction, that requires emulation. */
        else
        {
            LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n"));
            rc = VINF_EM_RAW_EMULATE_INSTR;
        }
    }
    else
    {
        LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n"));
        rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    }

    rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
    Log6(("TRPMGC06: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
    return rc;
}
csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    Assert(VMCPU_IS_EMT(pVCpu));

    /*
     * Check if it's a dummy write that doesn't change anything.
     */
    if (   PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
        && !memcmp(pvPtr, pvBuf, cbBuf))
    {
        Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

#ifdef IN_RING3
    /*
     * Ring-3: Do proper handling.
     */
    int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
    AssertRC(rc);
    return VINF_PGM_HANDLER_DO_DEFAULT;

#else
    /*
     * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
     */
    uint32_t     const cpl            = CPUMGetGuestCPL(pVCpu);
    bool         const fPatchCode     = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
    PPATMGCSTATE       pPATMGCState   = PATMGetGCState(pVM);

    Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
    Assert(pPATMGCState);
    Assert(pPATMGCState->fPIF || fPatchCode);

# ifdef VBOX_WITH_REM
    /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
    /** @todo a bit overkill?? */
    REMFlushTBs(pVM);
# endif

    /*
     * When patch code is executing instructions that must complete, then we
     * must *never* interrupt it.
     */
    if (!pPATMGCState->fPIF && fPatchCode)
    {
        Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

    Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));

    /*
     * If user code is modifying one of our monitored pages, then we can safely
     * write to it as it's no longer being used for supervisor code.
     */
    if (cpl != 3)
    {
        VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
        if (   rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
            || rcStrict == VINF_SUCCESS)
            return rcStrict;
        if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
            return VINF_EM_RAW_EMULATE_INSTR;
        }
        Assert(rcStrict == VERR_PATCH_NOT_FOUND);
    }

    /*
     * Schedule ring-3 activity.
     * Note that GCPtr might be a different address in case of aliases.  So,
     * take down both alternatives.
     */
    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages]  = (RTRCPTR)GCPtr;
    pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
    if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
        return VINF_CSAM_PENDING_ACTION;

    /*
     * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
     */
    Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
    STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
    return VINF_PGM_HANDLER_DO_DEFAULT;
#endif
}
Пример #12
0
/**
 * \#PF Handler callback for virtual access handler ranges. (CSAM self-modifying
 * code monitor)
 *
 * Important to realize that a physical page in a range can have aliases, and
 * for ALL and WRITE handlers these will also trigger.
 *
 * @returns VBox status code (appropriate for GC return).
 * @param   pVM         Pointer to the VM.
 * @param   uErrorCode   CPU Error code.
 * @param   pRegFrame   Trap register frame.
 * @param   pvFault     The fault address (cr2).
 * @param   pvRange     The base address of the handled virtual range.
 * @param   offRange    The offset of the access into this range.
 *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
 */
VMMRCDECL(int) CSAMGCCodePageWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
    PPATMGCSTATE pPATMGCState;
    bool         fPatchCode = PATMIsPatchGCAddr(pVM, pRegFrame->eip);
    int          rc;
    PVMCPU       pVCpu = VMMGetCpu0(pVM);
    NOREF(uErrorCode);

    Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);

#ifdef VBOX_WITH_REM
    /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
    REMFlushTBs(pVM);
#endif

    pPATMGCState = PATMGetGCState(pVM);
    Assert(pPATMGCState);

    Assert(pPATMGCState->fPIF || fPatchCode);
    /** When patch code is executing instructions that must complete, then we must *never* interrupt it. */
    if (!pPATMGCState->fPIF && fPatchCode)
    {
        Log(("CSAMGCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", pRegFrame->eip));
        /** @note there are cases when pages previously used for code are now used for stack; patch generated code will fault (pushf))
         *  Just make the page r/w and continue.
         */
        /*
         * Make this particular page R/W.
         */
        rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
        AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
        ASMInvalidatePage((void *)(uintptr_t)pvFault);
        return VINF_SUCCESS;
    }

    uint32_t cpl;

    if (pRegFrame->eflags.Bits.u1VM)
        cpl = 3;
    else
        cpl = (pRegFrame->ss.Sel & X86_SEL_RPL);

    Log(("CSAMGCCodePageWriteHandler: code page write at %RGv original address %RGv (cpl=%d)\n", pvFault, (RTGCUINTPTR)pvRange + offRange, cpl));

    /* If user code is modifying one of our monitored pages, then we can safely make it r/w as it's no longer being used for supervisor code. */
    if (cpl != 3)
    {
        rc = PATMRCHandleWriteToPatchPage(pVM, pRegFrame, (RTRCPTR)((RTRCUINTPTR)pvRange + offRange), 4 /** @todo */);
        if (rc == VINF_SUCCESS)
            return rc;
        if (rc == VINF_EM_RAW_EMULATE_INSTR)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
            return VINF_EM_RAW_EMULATE_INSTR;
        }
        Assert(rc == VERR_PATCH_NOT_FOUND);
    }

    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);

    /* Note that pvFault might be a different address in case of aliases. So use pvRange + offset instead!. */
    pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)((RTRCUINTPTR)pvRange + offRange);
    pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)((RTRCUINTPTR)pvRange + offRange);
    if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
        return VINF_CSAM_PENDING_ACTION;

    /*
     * Make this particular page R/W. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
     */
    Log(("CSAMGCCodePageWriteHandler: enabled r/w for page %RGv\n", pvFault));
    rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
    AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
    ASMInvalidatePage((void *)(uintptr_t)pvFault);

    STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
    return VINF_SUCCESS;
}