Exemple #1
0
/**
 * Load virtualized flags.
 *
 * This function is called from CPUMRawEnter(). It doesn't have to update the
 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @see     pg_raw
 */
VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    Assert(!HMIsEnabled(pVM));

    /*
     * Currently we don't bother to check whether PATM is enabled or not.
     * For all cases where it isn't, IOPL will be safe and IF will be set.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
    AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));

    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));

    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= X86_EFL_IF;
    pCtxCore->eflags.u32 = efl;

#ifdef IN_RING3
#ifdef PATM_EMULATE_SYSENTER
    PCPUMCTX pCtx;

    /* Check if the sysenter handler has changed. */
    pCtx = CPUMQueryGuestCtxPtr(pVM);
    if (   pCtx->SysEnter.cs  != 0
        && pCtx->SysEnter.eip != 0
       )
    {
        if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
        {
            pVM->patm.s.pfnSysEnterPatchGC = 0;
            pVM->patm.s.pfnSysEnterGC = 0;

            Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
            pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
            if (pVM->patm.s.pfnSysEnterPatchGC == 0)
            {
                rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
                if (rc == VINF_SUCCESS)
                {
                    pVM->patm.s.pfnSysEnterPatchGC  = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
                    pVM->patm.s.pfnSysEnterGC       = (RTRCPTR)pCtx->SysEnter.eip;
                    Assert(pVM->patm.s.pfnSysEnterPatchGC);
                }
            }
            else
                pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
        }
    }
    else
    {
        pVM->patm.s.pfnSysEnterPatchGC = 0;
        pVM->patm.s.pfnSysEnterGC = 0;
    }
#endif
#endif
}
Exemple #2
0
/**
 * Check if this page needs to be analysed by CSAM.
 *
 * This function should only be called for supervisor pages and
 * only when CSAM is enabled. Leaving these selection criteria
 * to the caller simplifies the interface (PTE passing).
 *
 * Note that the page has not yet been synced, so the TLB trick
 * (which wasn't ever active anyway) cannot be applied.
 *
 * @returns true if the page should be marked not present because
 *          CSAM want need to scan it.
 * @returns false if the page was already scanned.
 * @param   pVM         Pointer to the VM.
 * @param   GCPtr       GC pointer of page
 */
VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
{
    if(!CSAMIsEnabled(pVM))
        return false;

    if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
    {
        /* Already checked! */
        STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
        return false;
    }
    STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
    return true;
}
Exemple #3
0
/**
 * Mark a page as scanned/not scanned
 *
 * @note: we always mark it as scanned, even if we haven't completely done so
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pPage       GC page address (not necessarily aligned)
 * @param   fScanned    Mark as scanned or not scanned
 *
 */
VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
{
    int pgdir, bit;
    uintptr_t page;

#ifdef LOG_ENABLED
    if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
       Log(("CSAMMarkPage %RRv\n", pPage));
#endif

    if (!CSAMIsEnabled(pVM))
        return VINF_SUCCESS;
    Assert(!HMIsEnabled(pVM));

    page  = (uintptr_t)pPage;
    pgdir = page >> X86_PAGE_4M_SHIFT;
    bit   = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;

    Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
    Assert(bit < PAGE_SIZE);

    if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
    {
        STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
        int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
        if (RT_FAILURE(rc))
        {
            Log(("MMHyperAlloc failed with %Rrc\n", rc));
            return rc;
        }
#ifdef IN_RC
        pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
        if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
            return rc;
        }
#else
        pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
        if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
            return rc;
        }
#endif
    }
    if(fScanned)
        ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
    else
        ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);

    return VINF_SUCCESS;
}
Exemple #4
0
/**
 * Check if the instruction is patched as a duplicated function
 *
 * @returns patch record
 * @param   pVM         Pointer to the VM.
 * @param   pInstrGC    Guest context point to the instruction
 *
 */
PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
{
    PPATMPATCHREC pRec;

    AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
    pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
    if (    pRec
        && (pRec->patch.uState == PATCH_ENABLED)
        && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
       )
        return pRec;
    return 0;
}
Exemple #5
0
/**
 * Checks if the int 3 was caused by a patched instruction
 *
 * @returns VBox status
 *
 * @param   pVM         Pointer to the VM.
 * @param   pInstrGC    Instruction pointer
 * @param   pOpcode     Original instruction opcode (out, optional)
 * @param   pSize       Original instruction size (out, optional)
 */
VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
{
    PPATMPATCHREC pRec;

    pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
    if (    pRec
        && (pRec->patch.uState == PATCH_ENABLED)
        && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
       )
    {
        if (pOpcode) *pOpcode = pRec->patch.opcode;
        if (pSize)   *pSize   = pRec->patch.cbPrivInstr;
        return true;
    }
    return false;
}
Exemple #6
0
/**
 * Restores virtualized flags.
 *
 * This function is called from CPUMRawLeave(). It will update the eflags register.
 *
 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @param   rawRC       Raw mode return code
 * @see     @ref pg_raw
 */
VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    /*
     * We will only be called if PATMRawEnter was previously called.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
    pCtxCore->eflags.u32 = efl;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;

    AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC));
    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));

#ifdef IN_RING3
    if (    (efl & X86_EFL_IF)
        &&  fPatchCode
       )
    {
        if (    rawRC < VINF_PATM_LEAVE_RC_FIRST
            ||  rawRC > VINF_PATM_LEAVE_RC_LAST)
        {
            /*
             * Golden rules:
             * - Don't interrupt special patch streams that replace special instructions
             * - Don't break instruction fusing (sti, pop ss, mov ss)
             * - Don't go back to an instruction that has been overwritten by a patch jump
             * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
             *
             */
            if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1)            /* consistent patch instruction state */
            {
                PATMTRANSSTATE  enmState;
                RTRCPTR         pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);

                AssertRelease(pOrgInstrGC);

                Assert(enmState != PATMTRANS_OVERWRITTEN);
                if (enmState == PATMTRANS_SAFE)
                {
                    Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
                    Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
                    pCtxCore->eip = pOrgInstrGC;
                    fPatchCode = false; /* to reset the stack ptr */

                    CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;   /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
                }
                else
                {
                    LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n",  pCtxCore->eip, enmState));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
                }
            }
            else
            {
                LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n",  pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
                STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
            }
        }
    }
#else /* !IN_RING3 */
    AssertMsgFailed(("!IN_RING3"));
#endif  /* !IN_RING3 */

    if (!fPatchCode)
    {
        if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
        {
            EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip);
        }
        CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;

        /* Reset the stack pointer to the top of the stack. */
#ifdef DEBUG
        if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
        {
            LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
        }
#endif
        CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
    }
}