/**
 * Lazily populates the specified address space.
 *
 * @param   pUVM        The user mode VM handle.
 * @param   hAlias      The alias.
 */
static void dbgfR3AsLazyPopulate(PUVM pUVM, RTDBGAS hAlias)
{
    DBGF_AS_DB_LOCK_WRITE(pUVM);
    uintptr_t iAlias = DBGF_AS_ALIAS_2_INDEX(hAlias);
    if (!pUVM->dbgf.s.afAsAliasPopuplated[iAlias])
    {
        RTDBGAS hDbgAs = pUVM->dbgf.s.ahAsAliases[iAlias];
        if (hAlias == DBGF_AS_R0 && pUVM->pVM)
            PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateR0Callback, hDbgAs);
        else if (hAlias == DBGF_AS_RC && pUVM->pVM && !HMIsEnabled(pUVM->pVM))
        {
            LogRel(("DBGF: Lazy init of RC address space\n"));
            PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateRCCallback, hDbgAs);
#ifdef VBOX_WITH_RAW_MODE
            PATMR3DbgPopulateAddrSpace(pUVM->pVM, hDbgAs);
#endif
        }
        else if (hAlias == DBGF_AS_PHYS && pUVM->pVM)
        {
            /** @todo Lazy load pc and vga bios symbols or the EFI stuff. */
        }

        pUVM->dbgf.s.afAsAliasPopuplated[iAlias] = true;
    }
    DBGF_AS_DB_UNLOCK_WRITE(pUVM);
}
示例#2
0
文件: MMAll.cpp 项目: miguelinux/vbox
/**
 * Calculate the host context ring-0 address of an offset into the HMA memory chunk.
 *
 * @returns the host context ring-0 address.
 * @param   pVM         The cross context VM structure.
 * @param   pLookup     The HMA lookup record.
 * @param   off         The offset into the HMA memory chunk.
 */
DECLINLINE(RTR0PTR) mmHyperLookupCalcR0(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_t off)
{
    switch (pLookup->enmType)
    {
        case MMLOOKUPHYPERTYPE_LOCKED:
            if (pLookup->u.Locked.pvR0)
                return (RTR0PTR)((RTR0UINTPTR)pLookup->u.Locked.pvR0 + off);
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
            AssertMsg(!HMIsEnabled(pVM), ("%s\n", R3STRING(pLookup->pszDesc)));
#else
            AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc))); NOREF(pVM);
#endif
            return NIL_RTR0PTR;

        case MMLOOKUPHYPERTYPE_HCPHYS:
            if (pLookup->u.HCPhys.pvR0)
                return (RTR0PTR)((RTR0UINTPTR)pLookup->u.HCPhys.pvR0 + off);
            AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc)));
            return NIL_RTR0PTR;

        default:
            AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
            return NIL_RTR0PTR;
    }
}
示例#3
0
/**
 * Register a access handler for a virtual range.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VM.
 * @param   enmType         Handler type. Any of the PGMVIRTHANDLERTYPE_* enums.
 * @param   GCPtr           Start address.
 * @param   GCPtrLast       Last address (inclusive).
 * @param   pfnInvalidateR3 The R3 invalidate callback (can be 0)
 * @param   pfnHandlerR3    The R3 handler.
 * @param   pszHandlerRC    The RC handler symbol name.
 * @param   pszModRC        The RC handler module.
 * @param   pszDesc         Pointer to description string. This must not be freed.
 */
VMMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast,
                                           PFNPGMR3VIRTINVALIDATE pfnInvalidateR3,
                                           PFNPGMR3VIRTHANDLER pfnHandlerR3,
                                           const char *pszHandlerRC, const char *pszModRC,
                                           const char *pszDesc)
{
    LogFlow(("PGMR3HandlerVirtualRegisterEx: enmType=%d GCPtr=%RGv GCPtrLast=%RGv pszHandlerRC=%p:{%s} pszModRC=%p:{%s} pszDesc=%s\n",
             enmType, GCPtr, GCPtrLast, pszHandlerRC, pszHandlerRC, pszModRC, pszModRC, pszDesc));

    /* Not supported/relevant for VT-x and AMD-V. */
    if (HMIsEnabled(pVM))
        return VERR_NOT_IMPLEMENTED;

    /*
     * Validate input.
     */
    if (!pszModRC)
        pszModRC = VMMGC_MAIN_MODULE_NAME;
    if (!pszModRC || !*pszModRC || !pszHandlerRC || !*pszHandlerRC)
    {
        AssertMsgFailed(("pfnHandlerGC or/and pszModRC is missing\n"));
        return VERR_INVALID_PARAMETER;
    }

    /*
     * Resolve the GC handler.
     */
    RTRCPTR pfnHandlerRC;
    int rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC);
    if (RT_SUCCESS(rc))
        return PGMR3HandlerVirtualRegisterEx(pVM, enmType, GCPtr, GCPtrLast, pfnInvalidateR3, pfnHandlerR3, pfnHandlerRC, pszDesc);

    AssertMsgFailed(("Failed to resolve %s.%s, rc=%Rrc.\n", pszModRC, pszHandlerRC, rc));
    return rc;
}
示例#4
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Updates the EFLAGS.
 * This is a worker for CPUMRawSetEFlags().
 *
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The guest cpu context.
 * @param   efl         The new EFLAGS value.
 */
VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTX pCtx, uint32_t efl)
{
    Assert(!HMIsEnabled(pVM));
    pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= X86_EFL_IF;
    pCtx->eflags.u32 = efl;
}
示例#5
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Get the EFLAGS.
 * This is a worker for CPUMRawGetEFlags().
 *
 * @returns The eflags.
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The guest cpu context.
 */
VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTX pCtx)
{
    Assert(!HMIsEnabled(pVM));
    uint32_t efl = pCtx->eflags.u32;
    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
    return efl;
}
/**
 * Notifies VMM that paravirtualized hypercalls are now disabled.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 */
VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
{
    /* If there is anything to do for raw-mode, do it here. */
#ifndef IN_RC
    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
        HMHypercallsDisable(pVCpu);
#endif
}
示例#7
0
/**
 * Load virtualized flags.
 *
 * This function is called from CPUMRawEnter(). It doesn't have to update the
 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @see     pg_raw
 */
VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    Assert(!HMIsEnabled(pVM));

    /*
     * Currently we don't bother to check whether PATM is enabled or not.
     * For all cases where it isn't, IOPL will be safe and IF will be set.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
    AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));

    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));

    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= X86_EFL_IF;
    pCtxCore->eflags.u32 = efl;

#ifdef IN_RING3
#ifdef PATM_EMULATE_SYSENTER
    PCPUMCTX pCtx;

    /* Check if the sysenter handler has changed. */
    pCtx = CPUMQueryGuestCtxPtr(pVM);
    if (   pCtx->SysEnter.cs  != 0
        && pCtx->SysEnter.eip != 0
       )
    {
        if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
        {
            pVM->patm.s.pfnSysEnterPatchGC = 0;
            pVM->patm.s.pfnSysEnterGC = 0;

            Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
            pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
            if (pVM->patm.s.pfnSysEnterPatchGC == 0)
            {
                rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
                if (rc == VINF_SUCCESS)
                {
                    pVM->patm.s.pfnSysEnterPatchGC  = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
                    pVM->patm.s.pfnSysEnterGC       = (RTRCPTR)pCtx->SysEnter.eip;
                    Assert(pVM->patm.s.pfnSysEnterPatchGC);
                }
            }
            else
                pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
        }
    }
    else
    {
        pVM->patm.s.pfnSysEnterPatchGC = 0;
        pVM->patm.s.pfnSysEnterGC = 0;
    }
#endif
#endif
}
示例#8
0
文件: DBGFMem.cpp 项目: jeppeter/vbox
/**
 * Worker for DBGFR3SelQueryInfo that calls into SELM.
 */
static DECLCALLBACK(int) dbgfR3SelQueryInfo(PUVM pUVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
{
    PVM pVM = pUVM->pVM;
    VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);

    /*
     * Make the query.
     */
    int rc;
    if (!(fFlags & DBGFSELQI_FLAGS_DT_SHADOW))
    {
        PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
        VMCPU_ASSERT_EMT(pVCpu);
        rc = SELMR3GetSelectorInfo(pVM, pVCpu, Sel, pSelInfo);

        /*
         * 64-bit mode HACKS for making data and stack selectors wide open when
         * queried. This is voodoo magic.
         */
        if (fFlags & DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)
        {
            /* Expand 64-bit data and stack selectors. The check is a bit bogus... */
            if (    RT_SUCCESS(rc)
                &&  (pSelInfo->fFlags & (  DBGFSELINFO_FLAGS_LONG_MODE | DBGFSELINFO_FLAGS_REAL_MODE | DBGFSELINFO_FLAGS_PROT_MODE
                                         | DBGFSELINFO_FLAGS_GATE      | DBGFSELINFO_FLAGS_HYPER
                                         | DBGFSELINFO_FLAGS_INVALID   | DBGFSELINFO_FLAGS_NOT_PRESENT))
                     == DBGFSELINFO_FLAGS_LONG_MODE
                &&  pSelInfo->cbLimit != ~(RTGCPTR)0
                &&  CPUMIsGuestIn64BitCode(pVCpu) )
            {
                pSelInfo->GCPtrBase = 0;
                pSelInfo->cbLimit   = ~(RTGCPTR)0;
            }
            else if (   Sel == 0
                     && CPUMIsGuestIn64BitCode(pVCpu))
            {
                pSelInfo->GCPtrBase = 0;
                pSelInfo->cbLimit   = ~(RTGCPTR)0;
                pSelInfo->Sel       = 0;
                pSelInfo->SelGate   = 0;
                pSelInfo->fFlags    = DBGFSELINFO_FLAGS_LONG_MODE;
                pSelInfo->u.Raw64.Gen.u1Present  = 1;
                pSelInfo->u.Raw64.Gen.u1Long     = 1;
                pSelInfo->u.Raw64.Gen.u1DescType = 1;
                rc = VINF_SUCCESS;
            }
        }
    }
    else
    {
        if (HMIsEnabled(pVM))
            rc = VERR_INVALID_STATE;
        else
            rc = SELMR3GetShadowSelectorInfo(pVM, Sel, pSelInfo);
    }
    return rc;
}
示例#9
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Checks if the interrupt flag is enabled or not.
 *
 * @returns true if it's enabled.
 * @returns false if it's disabled.
 *
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The guest CPU context.
 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
 */
VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
{
    if (PATMIsEnabled(pVM))
    {
        Assert(!HMIsEnabled(pVM));
        if (PATMIsPatchGCAddr(pVM, pCtx->eip))
            return false;
    }
    return !!(pCtx->eflags.u32 & X86_EFL_IF);
}
示例#10
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Set parameters for pending MMIO patch operation
 *
 * @returns VBox status code.
 * @param   pVM         The cross context VM structure.
 * @param   GCPhys      MMIO physical address.
 * @param   pCachedData RC pointer to cached data.
 */
VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
{
    if (!HMIsEnabled(pVM))
    {
        pVM->patm.s.mmio.GCPhys = GCPhys;
        pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
    }

    return VINF_SUCCESS;
}
示例#11
0
/**
 * Remember a possible code page for later inspection
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   GCPtr       GC pointer of page
 */
VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
{
    Assert(!HMIsEnabled(pVM));
    if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
    {
        pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
        VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
    }
    return;
}
/**
 * Converts a GC selector based address to a flat address.
 *
 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
 * for that.
 *
 * @returns Flat address.
 * @param   pVM     Pointer to the VM.
 * @param   Sel     Selector part.
 * @param   Addr    Address part.
 * @remarks Don't use when in long mode.
 */
VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
{
    Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM)));    /* DON'T USE! */
    Assert(!HMIsEnabled(pVM));

    /** @todo check the limit. */
    X86DESC    Desc;
    if (!(Sel & X86_SEL_LDT))
        Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
    else
    {
VMMR3_INT_DECL(int) gimR3HvInitFinalize(PVM pVM)
{
    pVM->gim.s.pfnHypercallR3 = &GIMHvHypercall;
    if (!HMIsEnabled(pVM))
    {
        rc = PDMR3LdrGetSymbolRC(pVM, NULL /* pszModule */, GIMHV_HYPERCALL, &pVM->gim.s.pfnHypercallRC);
        AssertRCReturn(rc, rc);
    }
    rc = PDMR3LdrGetSymbolR0(pVM, NULL /* pszModule */, GIMHV_HYPERCALL, &pVM->gim.s.pfnHypercallR0);
    AssertRCReturn(rc, rc);
}
示例#14
0
/**
 * Mark a page as scanned/not scanned
 *
 * @note: we always mark it as scanned, even if we haven't completely done so
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pPage       GC page address (not necessarily aligned)
 * @param   fScanned    Mark as scanned or not scanned
 *
 */
VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
{
    int pgdir, bit;
    uintptr_t page;

#ifdef LOG_ENABLED
    if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
       Log(("CSAMMarkPage %RRv\n", pPage));
#endif

    if (!CSAMIsEnabled(pVM))
        return VINF_SUCCESS;
    Assert(!HMIsEnabled(pVM));

    page  = (uintptr_t)pPage;
    pgdir = page >> X86_PAGE_4M_SHIFT;
    bit   = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;

    Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
    Assert(bit < PAGE_SIZE);

    if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
    {
        STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
        int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
        if (RT_FAILURE(rc))
        {
            Log(("MMHyperAlloc failed with %Rrc\n", rc));
            return rc;
        }
#ifdef IN_RC
        pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
        if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
            return rc;
        }
#else
        pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
        if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
            return rc;
        }
#endif
    }
    if(fScanned)
        ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
    else
        ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);

    return VINF_SUCCESS;
}
示例#15
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Reads patch code.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_PATCH_NOT_FOUND if the request is entirely outside the patch
 *          code.
 *
 * @param   pVM            The cross context VM structure.
 * @param   GCPtrPatchCode  The patch address to start reading at.
 * @param   pvDst           Where to return the patch code.
 * @param   cbToRead        Number of bytes to read.
 * @param   pcbRead         Where to return the actual number of bytes we've
 *                          read. Optional.
 */
VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead)
{
    /* Shortcut. */
    if (!PATMIsEnabled(pVM))
        return VERR_PATCH_NOT_FOUND;
    Assert(!HMIsEnabled(pVM));

    /*
     * Check patch code and patch helper code.  We assume the requested bytes
     * are not in either.
     */
    RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC;
    if (offPatchCode >= pVM->patm.s.cbPatchMem)
    {
        offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC;
        if (offPatchCode >= pVM->patm.s.cbPatchHelpers)
            return VERR_PATCH_NOT_FOUND;

        /*
         * Patch helper memory.
         */
        uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode;
        if (cbToRead > cbMaxRead)
            cbToRead = cbMaxRead;
#ifdef IN_RC
        memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead);
#else
        memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead);
#endif
    }
    else
    {
        /*
         * Patch memory.
         */
        uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode;
        if (cbToRead > cbMaxRead)
            cbToRead = cbMaxRead;
#ifdef IN_RC
        memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead);
#else
        memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead);
#endif
    }

    if (pcbRead)
        *pcbRead = cbToRead;
    return VINF_SUCCESS;
}
示例#16
0
/**
 * Check if this page was previously scanned by CSAM
 *
 * @returns true -> scanned, false -> not scanned
 * @param   pVM         Pointer to the VM.
 * @param   pPage       GC page address
 */
VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
{
    int pgdir, bit;
    uintptr_t page;
    Assert(!HMIsEnabled(pVM));

    page  = (uintptr_t)pPage;
    pgdir = page >> X86_PAGE_4M_SHIFT;
    bit   = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;

    Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
    Assert(bit < PAGE_SIZE);

    return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
}
示例#17
0
/**
 * Check if this page needs to be analysed by CSAM.
 *
 * This function should only be called for supervisor pages and
 * only when CSAM is enabled. Leaving these selection criteria
 * to the caller simplifies the interface (PTE passing).
 *
 * Note that the page has not yet been synced, so the TLB trick
 * (which wasn't ever active anyway) cannot be applied.
 *
 * @returns true if the page should be marked not present because
 *          CSAM want need to scan it.
 * @returns false if the page was already scanned.
 * @param   pVM         Pointer to the VM.
 * @param   GCPtr       GC pointer of page
 */
VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
{
    if (!CSAMIsEnabled(pVM))
        return false;
    Assert(!HMIsEnabled(pVM));

    if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
    {
        /* Already checked! */
        STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
        return false;
    }
    STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
    return true;
}
示例#18
0
/**
 * Register a access handler for a physical range.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VM.
 * @param   enmType         Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
 * @param   GCPhys          Start physical address.
 * @param   GCPhysLast      Last physical address. (inclusive)
 * @param   pfnHandlerR3    The R3 handler.
 * @param   pvUserR3        User argument to the R3 handler.
 * @param   pszModR0        The R0 handler module. NULL means the default R0 module.
 * @param   pszHandlerR0    The R0 handler symbol name.
 * @param   pvUserR0        User argument to the R0 handler.
 * @param   pszModRC        The RC handler module. NULL means the default RC
 *                          module.
 * @param   pszHandlerRC    The RC handler symbol name.
 * @param   pvUserRC        User argument to the RC handler. Values less than
 *                          0x10000 will not be relocated.
 * @param   pszDesc         Pointer to description string. This must not be freed.
 */
VMMR3DECL(int) PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
                                            PFNPGMR3PHYSHANDLER pfnHandlerR3, void *pvUserR3,
                                            const char *pszModR0, const char *pszHandlerR0, RTR0PTR pvUserR0,
                                            const char *pszModRC, const char *pszHandlerRC, RTRCPTR pvUserRC, const char *pszDesc)
{
    LogFlow(("PGMR3HandlerPhysicalRegister: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserHC=%RHv pszModR0=%s pszHandlerR0=%s pvUserR0=%RHv pszModRC=%s pszHandlerRC=%s pvUser=%RRv pszDesc=%s\n",
             enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pszModR0, pszHandlerR0, pvUserR0, pszHandlerRC, pszModRC, pvUserRC, pszDesc));

    /*
     * Validate input.
     */
    if (!pszModRC)
        pszModRC = VMMGC_MAIN_MODULE_NAME;
    if (!pszModR0)
        pszModR0 = VMMR0_MAIN_MODULE_NAME;
    if (!pszHandlerR0)
        pszHandlerR0 = "pgmPhysHandlerRedirectToHC";
    if (!pszHandlerRC)
        pszHandlerRC = "pgmPhysHandlerRedirectToHC";
    AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
    AssertPtrReturn(pszHandlerR0, VERR_INVALID_POINTER);
    AssertPtrReturn(pszHandlerRC, VERR_INVALID_POINTER);

    /*
     * Resolve the R0 handler.
     */
    R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0 = NIL_RTR0PTR;
    int rc = VINF_SUCCESS;
    rc = PDMR3LdrGetSymbolR0Lazy(pVM, pszModR0, NULL /*pszSearchPath*/, pszHandlerR0, &pfnHandlerR0);
    if (RT_SUCCESS(rc))
    {
        /*
         * Resolve the GC handler.
         */
        RTRCPTR pfnHandlerRC = NIL_RTRCPTR;
        if (!HMIsEnabled(pVM))
            rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC);
        if (RT_SUCCESS(rc))
            return PGMHandlerPhysicalRegisterEx(pVM, enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3,
                                                pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, pszDesc);

        AssertMsgFailed(("Failed to resolve %s.%s, rc=%Rrc.\n", pszModRC, pszHandlerRC, rc));
    }
    else
        AssertMsgFailed(("Failed to resolve %s.%s, rc=%Rrc.\n", pszModR0, pszHandlerR0, rc));

    return rc;
}
示例#19
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Checks if the int 3 was caused by a patched instruction
 *
 * @returns VBox status
 *
 * @param   pVM         The cross context VM structure.
 * @param   pInstrGC    Instruction pointer
 * @param   pOpcode     Original instruction opcode (out, optional)
 * @param   pSize       Original instruction size (out, optional)
 */
VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
{
    PPATMPATCHREC pRec;
    Assert(!HMIsEnabled(pVM));

    pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
    if (    pRec
        && (pRec->patch.uState == PATCH_ENABLED)
        && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
       )
    {
        if (pOpcode) *pOpcode = pRec->patch.opcode;
        if (pSize)   *pSize   = pRec->patch.cbPrivInstr;
        return true;
    }
    return false;
}
示例#20
0
/** @interface_method_impl{PDMPICHLPR3,pfnGetRCHelpers} */
static DECLCALLBACK(PCPDMPICHLPRC) pdmR3PicHlp_GetRCHelpers(PPDMDEVINS pDevIns)
{
    PDMDEV_ASSERT_DEVINS(pDevIns);
    PVM pVM = pDevIns->Internal.s.pVMR3;
    VM_ASSERT_EMT(pVM);

    RTRCPTR pRCHelpers = NIL_RTRCPTR;
    if (!HMIsEnabled(pVM))
    {
        int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_pdmRCPicHlp", &pRCHelpers);
        AssertReleaseRC(rc);
        AssertRelease(pRCHelpers);
    }

    LogFlow(("pdmR3PicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n",
             pDevIns->pReg->szName, pDevIns->iInstance, pRCHelpers));
    return pRCHelpers;
}
示例#21
0
/**
 * Check if this page needs to be analysed by CSAM
 *
 * @returns VBox status code
 * @param   pVM         Pointer to the VM.
 * @param   pvFault     Fault address
 */
VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
{
    Assert(!HMIsEnabled(pVM));
    if (!CSAMIsEnabled(pVM))
        return VINF_SUCCESS;

    LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));

    if (CSAMIsPageScanned(pVM, pvFault))
    {
        // Already checked!
        STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
        return VINF_SUCCESS;
    }

    STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
    VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
    return VINF_CSAM_PENDING_ACTION;
}
示例#22
0
/**
 * Check if we've scanned this instruction before. If true, then we can emulate
 * it instead of returning to ring 3.
 *
 * Using a simple array here as there are generally few mov crx instructions and
 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
 *
 * @returns boolean
 * @param   pVM         Pointer to the VM.
 * @param   GCPtr       GC pointer of page table entry
 */
VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
{
    Assert(!HMIsEnabled(pVM));

    for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
    {
        if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
            return true;
        }
    }
    /* Record that we're about to process it in ring 3. */
    pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
    pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;

    if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
        pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;

    STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
    return false;
}
示例#23
0
/**
 * Turn on code scanning
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 */
VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
{
    AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
    pVM->fCSAMEnabled = true;
    return VINF_SUCCESS;
}
示例#24
0
文件: VMMTests.cpp 项目: apaka/vbox
/* execute the switch. */
VMMR3DECL(int) VMMDoHmTest(PVM pVM)
{
    uint32_t i;
    int      rc;
    PCPUMCTX pHyperCtx, pGuestCtx;
    RTGCPHYS CR3Phys = 0x0; /* fake address */
    PVMCPU   pVCpu = &pVM->aCpus[0];

    if (!HMIsEnabled(pVM))
    {
        RTPrintf("VMM: Hardware accelerated test not available!\n");
        return VERR_ACCESS_DENIED;
    }

#ifdef VBOX_WITH_RAW_MODE
    /*
     * These forced actions are not necessary for the test and trigger breakpoints too.
     */
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
#endif

    /* Enable mapping of the hypervisor into the shadow page table. */
    uint32_t cb;
    rc = PGMR3MappingsSize(pVM, &cb);
    AssertRCReturn(rc, rc);

    /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
    rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
    AssertRCReturn(rc, rc);

    pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);

    pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
    pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT;
    PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
    PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);

    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
    VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    VM_FF_CLEAR(pVM, VM_FF_REQUEST);

    /*
     * Setup stack for calling VMMGCEntry().
     */
    RTRCPTR RCPtrEP;
    rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
    if (RT_SUCCESS(rc))
    {
        RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP);

        pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);

        /* Fill in hidden selector registers for the hypervisor state. */
        SYNC_SEL(pHyperCtx, cs);
        SYNC_SEL(pHyperCtx, ds);
        SYNC_SEL(pHyperCtx, es);
        SYNC_SEL(pHyperCtx, fs);
        SYNC_SEL(pHyperCtx, gs);
        SYNC_SEL(pHyperCtx, ss);
        SYNC_SEL(pHyperCtx, tr);

        /*
         * Profile switching.
         */
        RTPrintf("VMM: profiling switcher...\n");
        Log(("VMM: profiling switcher...\n"));
        uint64_t TickMin = ~0;
        uint64_t tsBegin = RTTimeNanoTS();
        uint64_t TickStart = ASMReadTSC();
        for (i = 0; i < 1000000; i++)
        {
            CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
            CPUMPushHyper(pVCpu, 0);
            CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HM_NOP);
            CPUMPushHyper(pVCpu, pVM->pVMRC);
            CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR));    /* stack frame size */
            CPUMPushHyper(pVCpu, RCPtrEP);                /* what to call */

            pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
            pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);

            /* Copy the hypervisor context to make sure we have a valid guest context. */
            *pGuestCtx = *pHyperCtx;
            pGuestCtx->cr3 = CR3Phys;

            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
            VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);

            uint64_t TickThisStart = ASMReadTSC();
            rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
            uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
            if (RT_FAILURE(rc))
            {
                Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
                VMMR3FatalDump(pVM, pVCpu, rc);
                return rc;
            }
            if (TickThisElapsed < TickMin)
                TickMin = TickThisElapsed;
        }
        uint64_t TickEnd = ASMReadTSC();
        uint64_t tsEnd = RTTimeNanoTS();

        uint64_t Elapsed = tsEnd - tsBegin;
        uint64_t PerIteration = Elapsed / (uint64_t)i;
        uint64_t cTicksElapsed = TickEnd - TickStart;
        uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;

        RTPrintf("VMM: %8d cycles     in %11llu ns (%11lld ticks),  %10llu ns/iteration (%11lld ticks)  Min %11lld ticks\n",
                 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
        Log(("VMM: %8d cycles     in %11llu ns (%11lld ticks),  %10llu ns/iteration (%11lld ticks)  Min %11lld ticks\n",
             i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));

        rc = VINF_SUCCESS;
    }
    else
        AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc));

    return rc;
}
示例#25
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Restores virtualized flags.
 *
 * This function is called from CPUMRawLeave(). It will update the eflags register.
 *
 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
 *
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The cpu context.
 * @param   rawRC       Raw mode return code
 * @see     @ref pg_raw
 */
VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTX pCtx, int rawRC)
{
    Assert(!HMIsEnabled(pVM));
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtx->eip);

    /*
     * We will only be called if PATMRawEnter was previously called.
     */
    uint32_t efl = pCtx->eflags.u32;
    efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
    pCtx->eflags.u32 = efl;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;

    AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtx->eip, rawRC));
    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtx->eip, rawRC));

#ifdef IN_RING3
    if (   (efl & X86_EFL_IF)
        && fPatchCode)
    {
        if (   rawRC < VINF_PATM_LEAVE_RC_FIRST
            || rawRC > VINF_PATM_LEAVE_RC_LAST)
        {
            /*
             * Golden rules:
             * - Don't interrupt special patch streams that replace special instructions
             * - Don't break instruction fusing (sti, pop ss, mov ss)
             * - Don't go back to an instruction that has been overwritten by a patch jump
             * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
             *
             */
            if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1)            /* consistent patch instruction state */
            {
                PATMTRANSSTATE  enmState;
                RTRCPTR         pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);

                AssertRelease(pOrgInstrGC);

                Assert(enmState != PATMTRANS_OVERWRITTEN);
                if (enmState == PATMTRANS_SAFE)
                {
                    Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
                    Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtx->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
                    pCtx->eip = pOrgInstrGC;
                    fPatchCode = false; /* to reset the stack ptr */

                    CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;   /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
                }
                else
                {
                    LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n",  pCtx->eip, enmState));
                    STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
                }
            }
            else
            {
                LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n",  pCtx->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
                STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
            }
        }
    }
#else  /* !IN_RING3 */
    /*
     * When leaving raw-mode state while IN_RC, it's generally for interpreting
     * a single original guest instruction.
     */
    AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtx->eip));
#endif /* !IN_RING3 */

    if (!fPatchCode)
    {
        if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtx->eip)
        {
            EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtx->eip);
        }
        CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;

        /* Reset the stack pointer to the top of the stack. */
#ifdef DEBUG
        if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
        {
            LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
        }
#endif
        CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
    }
}
示例#26
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Returns the guest context pointer and size of the GC context structure
 *
 * @returns VBox status code.
 * @param   pVM         The cross context VM structure.
 */
VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM)
{
    AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
    return pVM->patm.s.pGCStateGC;
}
示例#27
0
/**
 * VMMR3Init worker that initiates the switcher code (aka core code).
 *
 * This is core per VM code which might need fixups and/or for ease of use are
 * put on linear contiguous backing.
 *
 * @returns VBox status code.
 * @param   pVM     The cross context VM structure.
 */
int vmmR3SwitcherInit(PVM pVM)
{
#if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64)
    return VINF_SUCCESS;
#else

    /*
     * Calc the size.
     */
    const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
    unsigned cbCoreCode = 0;
    for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
    {
        pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
        PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
        if (pSwitcher)
        {
            AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
            cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
        }
    }

    /*
     * Allocate contiguous pages for switchers and deal with
     * conflicts in the intermediate mapping of the code.
     */
    pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
    pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
    int rc = VERR_NO_MEMORY;
    if (pVM->vmm.s.pvCoreCodeR3)
    {
        rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
        if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
        {
            /* try more allocations - Solaris, Linux.  */
            const unsigned cTries = 8234;
            struct VMMInitBadTry
            {
                RTR0PTR  pvR0;
                void    *pvR3;
                RTHCPHYS HCPhys;
                RTUINT   cb;
            } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
            AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
            unsigned i = 0;
            do
            {
                paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
                paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
                paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
                i++;
                pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
                pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
                pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
                if (!pVM->vmm.s.pvCoreCodeR3)
                    break;
                rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
            } while (   rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
                     && i < cTries - 1);

            /* cleanup */
            if (RT_FAILURE(rc))
            {
                paBadTries[i].pvR3   = pVM->vmm.s.pvCoreCodeR3;
                paBadTries[i].pvR0   = pVM->vmm.s.pvCoreCodeR0;
                paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
                paBadTries[i].cb     = pVM->vmm.s.cbCoreCode;
                i++;
                LogRel(("VMM: Failed to allocated and map core code: rc=%Rrc\n", rc));
            }
            while (i-- > 0)
            {
                LogRel(("VMM: Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
                        i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
                SUPR3ContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
            }
            RTMemTmpFree(paBadTries);
        }
    }
示例#28
0
文件: PATMAll.cpp 项目: jeppeter/vbox
/**
 * Emulate sysenter, sysexit and syscall instructions
 *
 * @returns VBox status
 *
 * @param   pVM         The cross context VM structure.
 * @param   pCtx        The relevant guest cpu context.
 * @param   pCpu        Disassembly state.
 */
VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTX pCtx, PDISCPUSTATE pCpu)
{
    Assert(CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)) == pCtx);
    AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);

    if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
    {
        if (    pCtx->SysEnter.cs == 0
            ||  pCtx->eflags.Bits.u1VM
            ||  (pCtx->cs.Sel & X86_SEL_RPL) != 3
            ||  pVM->patm.s.pfnSysEnterPatchGC == 0
            ||  pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
            ||  !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
            goto end;

        Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pCtx->eip, pVM->patm.s.pfnSysEnterPatchGC));
        /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
        /** @note The Intel manual suggests that the OS is responsible for this. */
        pCtx->cs.Sel      = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
        pCtx->eip         = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
        pCtx->ss.Sel      = pCtx->cs.Sel + 8;     /* SysEnter.cs + 8 */
        pCtx->esp         = pCtx->SysEnter.esp;
        pCtx->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
        pCtx->eflags.u32 |= X86_EFL_IF;

        /* Turn off interrupts. */
        pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;

        STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);

        return VINF_SUCCESS;
    }
    if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
    {
        if (    pCtx->SysEnter.cs == 0
            ||  (pCtx->cs.Sel & X86_SEL_RPL) != 1
            ||  pCtx->eflags.Bits.u1VM
            ||  !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
            goto end;

        Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pCtx->eip, pCtx->edx));

        pCtx->cs.Sel      = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
        pCtx->eip         = pCtx->edx;
        pCtx->ss.Sel      = pCtx->cs.Sel + 8;  /* SysEnter.cs + 24 */
        pCtx->esp         = pCtx->ecx;

        STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);

        return VINF_SUCCESS;
    }
    if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
    {
        /** @todo implement syscall */
    }
    else
    if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
    {
        /** @todo implement sysret */
    }

end:
    return VINF_EM_RAW_RING_SWITCH;
}
/**
 * Populate DBGF_AS_RC with PATM symbols.
 *
 * Called by dbgfR3AsLazyPopulate when DBGF_AS_RC or DBGF_AS_RC_AND_GC_GLOBAL is
 * accessed for the first time.
 *
 * @param   pVM                 The cross context VM structure.
 * @param   hDbgAs              The DBGF_AS_RC address space handle.
 */
VMMR3_INT_DECL(void) PATMR3DbgPopulateAddrSpace(PVM pVM, RTDBGAS hDbgAs)
{
    AssertReturnVoid(!HMIsEnabled(pVM));

    /*
     * Add a fake debug module for the PATMGCSTATE structure.
     */
    RTDBGMOD hDbgMod;
    int rc = RTDbgModCreate(&hDbgMod, "patmgcstate", sizeof(PATMGCSTATE), 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uVMFlags,                  "uVMFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uPendingAction,            "uPendingAction");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uPatchCalls,               "uPatchCalls");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uScratch,                  "uScratch");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEFlags,               "uIretEFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretCS,                   "uIretCS");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, uIretEIP,                  "uIretEIP");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Psp,                       "Psp");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, fPIF,                      "fPIF");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCPtrInhibitInterrupts,    "GCPtrInhibitInterrupts");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallPatchTargetAddr,     "GCCallPatchTargetAddr");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, GCCallReturnAddr,          "GCCallReturnAddr");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEAX,              "Restore.uEAX");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uECX,              "Restore.uECX");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uEDI,              "Restore.uEDI");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.eFlags,            "Restore.eFlags");
        ADD_MEMBER(hDbgMod, PATMGCSTATE, Restore.uFlags,            "Restore.uFlags");

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pGCStateGC, 0 /*fFlags/*/);
        AssertLogRelRC(rc);
        RTDbgModRelease(hDbgMod);
    }

    /*
     * Add something for the stats so we get some kind of symbols for
     * references to them while disassembling patches.
     */
    rc = RTDbgModCreate(&hDbgMod, "patmstats", PATM_STAT_MEMSIZE, 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        ADD_FUNC(hDbgMod, pVM->patm.s.pStatsGC, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, "PATMMemStatsStart");

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pStatsGC, 0 /*fFlags/*/);
        AssertLogRelRC(rc);
        RTDbgModRelease(hDbgMod);
    }

    /*
     * Add a fake debug module for the patches and stack.
     */
    rc = RTDbgModCreate(&hDbgMod, "patches", pVM->patm.s.cbPatchMem + PATM_STACK_TOTAL_SIZE + PAGE_SIZE, 0 /*fFlags*/);
    if (RT_SUCCESS(rc))
    {
        pVM->patm.s.hDbgModPatchMem = hDbgMod;
        patmR3DbgAddPatches(pVM, hDbgMod);

        rc = RTDbgAsModuleLink(hDbgAs, hDbgMod, pVM->patm.s.pPatchMemGC, 0 /*fFlags/*/);
        AssertLogRelRC(rc);
    }
}
示例#30
0
/**
 * Initializes the KVM GIM provider.
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   uVersion    The interface version this VM should use.
 */
VMMR3_INT_DECL(int) gimR3KvmInit(PVM pVM)
{
    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_KVM, VERR_INTERNAL_ERROR_5);

    int rc;
    PGIMKVM pKvm = &pVM->gim.s.u.Kvm;

    /*
     * Determine interface capabilities based on the version.
     */
    if (!pVM->gim.s.u32Version)
    {
        /* Basic features. */
        pKvm->uBaseFeat = 0
                        | GIM_KVM_BASE_FEAT_CLOCK_OLD
                        //| GIM_KVM_BASE_FEAT_NOP_IO_DELAY
                        //|  GIM_KVM_BASE_FEAT_MMU_OP
                        | GIM_KVM_BASE_FEAT_CLOCK
                        //| GIM_KVM_BASE_FEAT_ASYNC_PF
                        //| GIM_KVM_BASE_FEAT_STEAL_TIME
                        //| GIM_KVM_BASE_FEAT_PV_EOI
                        | GIM_KVM_BASE_FEAT_PV_UNHALT
                        ;
        /* Rest of the features are determined in gimR3KvmInitCompleted(). */
    }

    /*
     * Expose HVP (Hypervisor Present) bit to the guest.
     */
    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);

    /*
     * Modify the standard hypervisor leaves for KVM.
     */
    CPUMCPUIDLEAF HyperLeaf;
    RT_ZERO(HyperLeaf);
    HyperLeaf.uLeaf        = UINT32_C(0x40000000);
    HyperLeaf.uEax         = UINT32_C(0x40000001); /* Minimum value for KVM is 0x40000001. */
    HyperLeaf.uEbx         = 0x4B4D564B;           /* 'KVMK' */
    HyperLeaf.uEcx         = 0x564B4D56;           /* 'VMKV' */
    HyperLeaf.uEdx         = 0x0000004D;           /* 'M000' */
    rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
    AssertLogRelRCReturn(rc, rc);

    /*
     * Add KVM specific leaves.
     */
    HyperLeaf.uLeaf        = UINT32_C(0x40000001);
    HyperLeaf.uEax         = pKvm->uBaseFeat;
    HyperLeaf.uEbx         = 0;                    /* Reserved */
    HyperLeaf.uEcx         = 0;                    /* Reserved */
    HyperLeaf.uEdx         = 0;                    /* Reserved */
    rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
    AssertLogRelRCReturn(rc, rc);

    /*
     * Insert all MSR ranges of KVM.
     */
    for (unsigned i = 0; i < RT_ELEMENTS(g_aMsrRanges_Kvm); i++)
    {
        rc = CPUMR3MsrRangesInsert(pVM, &g_aMsrRanges_Kvm[i]);
        AssertLogRelRCReturn(rc, rc);
    }

    /*
     * Setup hypercall and #UD handling.
     */
    for (VMCPUID i = 0; i < pVM->cCpus; i++)
        VMMHypercallsEnable(&pVM->aCpus[i]);

    if (ASMIsAmdCpu())
    {
        pKvm->fTrapXcptUD   = true;
        pKvm->uOpCodeNative = OP_VMMCALL;
    }
    else
    {
        Assert(ASMIsIntelCpu() || ASMIsViaCentaurCpu());
        pKvm->fTrapXcptUD   = false;
        pKvm->uOpCodeNative = OP_VMCALL;
    }

    /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */
    if (!HMIsEnabled(pVM))
        pKvm->fTrapXcptUD = true;

    return VINF_SUCCESS;
}