/**
 * Records a invlpg instruction for replaying upon REM entry.
 *
 * @param   pVM         Pointer to the VM.
 * @param   GCPtrPage   The
 */
VMMDECL(void) REMNotifyInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
{
    /*
     * Try take the REM lock and push the address onto the array.
     */
    if (   pVM->rem.s.cInvalidatedPages < RT_ELEMENTS(pVM->rem.s.aGCPtrInvalidatedPages)
        && EMRemTryLock(pVM) == VINF_SUCCESS)
    {
        uint32_t iPage = pVM->rem.s.cInvalidatedPages;
        if (iPage < RT_ELEMENTS(pVM->rem.s.aGCPtrInvalidatedPages))
        {
            ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, iPage + 1);
            pVM->rem.s.aGCPtrInvalidatedPages[iPage] = GCPtrPage;

            EMRemUnlock(pVM);
            return;
        }

        CPUMSetChangedFlags(VMMGetCpu(pVM), CPUM_CHANGED_GLOBAL_TLB_FLUSH); /** @todo this array should be per-cpu technically speaking. */
        ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, 0); /** @todo leave this alone? Optimize this code? */

        EMRemUnlock(pVM);
    }
    else
    {
        /* Fallback: Simply tell the recompiler to flush its TLB. */
        CPUMSetChangedFlags(VMMGetCpu(pVM), CPUM_CHANGED_GLOBAL_TLB_FLUSH);
        ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, 0); /** @todo leave this alone?! Optimize this code? */
    }

    return;
}
static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
#endif
{
    NOREF(rcRC);

#ifdef LOG_ENABLED
    /*
     * Log it.
     */
    Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp));
    if (pszPrefix)
    {
        DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix);
        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
    }
#endif

    /*
     * Use IEM and fallback on REM if the functionality is missing.
     * Once IEM gets mature enough, nothing should ever fall back.
     */
    STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
    VBOXSTRICTRC rcStrict;
    uint32_t     idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
    RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
    {
        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
        rcStrict = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
    }
    else
    {
        RT_UNTRUSTED_VALIDATED_FENCE();
        rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
        LogFlow(("emR3HmExecuteInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
    }
    STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);

    if (   rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
        || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
    {
#ifdef VBOX_WITH_REM
        STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
        EMRemLock(pVM);
        /* Flush the recompiler TLB if the VCPU has changed. */
        if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
            CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
        pVM->em.s.idLastRemCpu = pVCpu->idCpu;

        rcStrict = REMR3EmulateInstruction(pVM, pVCpu);
        EMRemUnlock(pVM);
        STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
#else  /* !VBOX_WITH_REM */
        NOREF(pVM);
#endif /* !VBOX_WITH_REM */
    }

    return VBOXSTRICTRC_TODO(rcStrict);
}
Exemplo n.º 3
0
static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
#endif
{
#ifdef LOG_ENABLED
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;
#endif
    int      rc;
    NOREF(rcRC);

#ifdef LOG_ENABLED
    /*
     * Log it.
     */
    Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
    if (pszPrefix)
    {
        DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
    }
#endif

    /*
     * Use IEM and fallback on REM if the functionality is missing.
     * Once IEM gets mature enough, nothing should ever fall back.
     */
    STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
    rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
    STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);

    if (   rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
        || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
    {
#ifdef VBOX_WITH_REM
        STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
        EMRemLock(pVM);
        /* Flush the recompiler TLB if the VCPU has changed. */
        if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
            CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
        pVM->em.s.idLastRemCpu = pVCpu->idCpu;

        rc = REMR3EmulateInstruction(pVM, pVCpu);
        EMRemUnlock(pVM);
        STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
#else  /* !VBOX_WITH_REM */
        NOREF(pVM);
#endif /* !VBOX_WITH_REM */
    }

#ifdef EM_NOTIFY_HM
    if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
        HMR3NotifyEmulated(pVCpu);
#endif
    return rc;
}
Exemplo n.º 4
0
static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
#endif
{
#ifdef LOG_ENABLED
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;
#endif
    int      rc;
    NOREF(rcRC);

    /*
     *
     * The simple solution is to use the recompiler.
     * The better solution is to disassemble the current instruction and
     * try handle as many as possible without using REM.
     *
     */

#ifdef LOG_ENABLED
    /*
     * Disassemble the instruction if requested.
     */
    if (pszPrefix)
    {
        DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
        DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
    }
#endif /* LOG_ENABLED */

#if 0
    /* Try our own instruction emulator before falling back to the recompiler. */
    DISCPUSTATE Cpu;
    rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
    if (RT_SUCCESS(rc))
    {
        switch (Cpu.pCurInstr->uOpcode)
        {
        /* @todo we can do more now */
        case OP_MOV:
        case OP_AND:
        case OP_OR:
        case OP_XOR:
        case OP_POP:
        case OP_INC:
        case OP_DEC:
        case OP_XCHG:
            STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
            rc = EMInterpretInstructionCpuUpdtPC(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0);
            if (RT_SUCCESS(rc))
            {
#ifdef EM_NOTIFY_HWACCM
                if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
                    HWACCMR3NotifyEmulated(pVCpu);
#endif
                STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
                return rc;
            }
            if (rc != VERR_EM_INTERPRETER)
                AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
            STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
            break;
        }
    }
#endif /* 0 */
    STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
    Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
#ifdef VBOX_WITH_REM
    EMRemLock(pVM);
    /* Flush the recompiler TLB if the VCPU has changed. */
    if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
    pVM->em.s.idLastRemCpu = pVCpu->idCpu;

    rc = REMR3EmulateInstruction(pVM, pVCpu);
    EMRemUnlock(pVM);
#else
    rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
#endif
    STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);

#ifdef EM_NOTIFY_HWACCM
    if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
        HWACCMR3NotifyEmulated(pVCpu);
#endif
    return rc;
}
/**
 * Check a registered module for shared page changes.
 *
 * The PGM lock shall be taken prior to calling this method.
 *
 * @returns The following VBox status codes.
 *
 * @param   pVM                 The cross context VM structure.
 * @param   pGVM                Pointer to the GVM instance data.
 * @param   idCpu               The ID of the calling virtual CPU.
 * @param   pModule             Global module description.
 * @param   paRegionsGCPtrs     Array parallel to pModules->aRegions with the
 *                              addresses of the regions in the calling
 *                              process.
 */
VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs)
{
#ifdef VBOX_BUGREF_9217
    PVMCPU              pVCpu         = &pGVM->aCpus[idCpu];
#else
    PVMCPU              pVCpu         = &pVM->aCpus[idCpu];
#endif
    int                 rc            = VINF_SUCCESS;
    bool                fFlushTLBs    = false;
    bool                fFlushRemTLBs = false;
    GMMSHAREDPAGEDESC   PageDesc;

    Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule));

    PGM_LOCK_ASSERT_OWNER(pVM);     /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */

    /*
     * Check every region of the shared module.
     */
    for (uint32_t idxRegion = 0; idxRegion < pModule->cRegions; idxRegion++)
    {
        RTGCPTR  GCPtrPage  = paRegionsGCPtrs[idxRegion] & ~(RTGCPTR)PAGE_OFFSET_MASK;
        uint32_t cbLeft     = pModule->aRegions[idxRegion].cb; Assert(!(cbLeft & PAGE_OFFSET_MASK));
        uint32_t idxPage    = 0;

        while (cbLeft)
        {
            /** @todo inefficient to fetch each guest page like this... */
            RTGCPHYS GCPhys;
            uint64_t fFlags;
            rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys);
            if (    rc == VINF_SUCCESS
                &&  !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */
            {
                PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
                Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage));
                if (    pPage
                    &&  PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
                    &&  PGM_PAGE_GET_READ_LOCKS(pPage) == 0
                    &&  PGM_PAGE_GET_WRITE_LOCKS(pPage) == 0 )
                {
                    PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage);
                    PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
                    PageDesc.GCPhys = GCPhys;

                    rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc);
                    if (RT_FAILURE(rc))
                        break;

                    /*
                     * Any change for this page?
                     */
                    if (PageDesc.idPage != NIL_GMM_PAGEID)
                    {
                        Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);

                        Log(("PGMR0SharedModuleCheck: shared page gst virt=%RGv phys=%RGp host %RHp->%RHp\n",
                             GCPtrPage, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys));

                        /* Page was either replaced by an existing shared
                           version of it or converted into a read-only shared
                           page, so, clear all references. */
                        bool fFlush = false;
                        rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush);
                        Assert(   rc == VINF_SUCCESS
                               || (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
                                   && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)));
                        if (rc == VINF_SUCCESS)
                            fFlushTLBs |= fFlush;
                        fFlushRemTLBs = true;

                        if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage))
                        {
                            /* Update the physical address and page id now. */
                            PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys);
                            PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.idPage);

                            /* Invalidate page map TLB entry for this page too. */
                            pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys);
                            pVM->pgm.s.cReusedSharedPages++;
                        }
                        /* else: nothing changed (== this page is now a shared
                           page), so no need to flush anything. */

                        pVM->pgm.s.cSharedPages++;
                        pVM->pgm.s.cPrivatePages--;
                        PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED);

# ifdef VBOX_STRICT /* check sum hack */
                        pPage->s.u2Unused0 = PageDesc.u32StrictChecksum        & 3;
                        //pPage->s.u2Unused1 = (PageDesc.u32StrictChecksum >> 8) & 3;
# endif
                    }
                }
            }
            else
            {
                Assert(    rc == VINF_SUCCESS
                       ||  rc == VERR_PAGE_NOT_PRESENT
                       ||  rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
                       ||  rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
                       ||  rc == VERR_PAGE_TABLE_NOT_PRESENT);
                rc = VINF_SUCCESS;  /* ignore error */
            }

            idxPage++;
            GCPtrPage += PAGE_SIZE;
            cbLeft    -= PAGE_SIZE;
        }
    }

    /*
     * Do TLB flushing if necessary.
     */
    if (fFlushTLBs)
        PGM_INVL_ALL_VCPU_TLBS(pVM);

    if (fFlushRemTLBs)
#ifdef VBOX_BUGREF_9217
        for (VMCPUID idCurCpu = 0; idCurCpu < pGVM->cCpus; idCurCpu++)
            CPUMSetChangedFlags(&pGVM->aCpus[idCurCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
#else
        for (VMCPUID idCurCpu = 0; idCurCpu < pVM->cCpus; idCurCpu++)
            CPUMSetChangedFlags(&pVM->aCpus[idCurCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
#endif

    return rc;
}