Пример #1
0
/**
 * Read guest memory.
 *
 * @returns VBox status code.
 * @param   pUVM        The user mode VM handle.
 * @param   idCpu       The ID of the CPU context to read memory from.
 * @param   pAddress    Where to start reading.
 * @param   pvBuf       Where to store the data we've read.
 * @param   cbRead      The number of bytes to read.
 */
static DECLCALLBACK(int) dbgfR3MemRead(PUVM pUVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
{
    PVM pVM = pUVM->pVM;
    VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
    Assert(idCpu == VMMGetCpuId(pVM));

    /*
     * Validate the input we use, PGM does the rest.
     */
    if (!DBGFR3AddrIsValid(pUVM, pAddress))
        return VERR_INVALID_POINTER;
    if (!VALID_PTR(pvBuf))
        return VERR_INVALID_POINTER;

    /*
     * HMA is special
     */
    int rc;
    if (DBGFADDRESS_IS_HMA(pAddress))
    {
        if (DBGFADDRESS_IS_PHYS(pAddress))
            rc = VERR_INVALID_POINTER;
        else
            rc = MMR3HyperReadGCVirt(pVM, pvBuf, pAddress->FlatPtr, cbRead);
    }
    else
    {
        /*
         * Select DBGF worker by addressing mode.
         */
        PVMCPU  pVCpu   = VMMGetCpuById(pVM, idCpu);
        PGMMODE enmMode = PGMGetGuestMode(pVCpu);
        if (    enmMode == PGMMODE_REAL
            ||  enmMode == PGMMODE_PROTECTED
            ||  DBGFADDRESS_IS_PHYS(pAddress) )
            rc = PGMPhysSimpleReadGCPhys(pVM, pvBuf, pAddress->FlatPtr, cbRead);
        else
        {
#if GC_ARCH_BITS > 32
            if (    (   pAddress->FlatPtr >= _4G
                     || pAddress->FlatPtr + cbRead > _4G)
                &&  enmMode != PGMMODE_AMD64
                &&  enmMode != PGMMODE_AMD64_NX)
                return VERR_PAGE_TABLE_NOT_PRESENT;
#endif
            rc = PGMPhysSimpleReadGCPtr(pVCpu, pvBuf, pAddress->FlatPtr, cbRead);
        }
    }
    return rc;
}
Пример #2
0
/**
 * Patch OpenBSD interrupt handler prefix
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pCpu        Disassembly state of instruction.
 * @param   pInstrGC    GC Instruction pointer for instruction
 * @param   pInstrHC    GC Instruction pointer for instruction
 * @param   pPatchRec   Patch structure
 *
 */
int PATMPatchOpenBSDHandlerPrefix(PVM pVM, PDISCPUSTATE pCpu, RTGCPTR32 pInstrGC, uint8_t *pInstrHC, PPATMPATCHREC pPatchRec)
{
    uint8_t   uTemp[16];
    int       rc;

    Assert(sizeof(uTemp) > RT_MAX(sizeof(uFnOpenBSDHandlerPrefix1), sizeof(uFnOpenBSDHandlerPrefix2)));

    /* Guest OS specific patch; check heuristics first */

    rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), uTemp, pInstrGC, RT_MAX(sizeof(uFnOpenBSDHandlerPrefix1), sizeof(uFnOpenBSDHandlerPrefix2)));
    if (    RT_FAILURE(rc)
        || (    memcmp(uFnOpenBSDHandlerPrefix1, uTemp, sizeof(uFnOpenBSDHandlerPrefix1))
            &&  memcmp(uFnOpenBSDHandlerPrefix2, uTemp, sizeof(uFnOpenBSDHandlerPrefix2))))
    {
        return VERR_PATCHING_REFUSED;
    }
    /* Found it; patch the push cs */
    pPatchRec->patch.flags &= ~(PATMFL_GUEST_SPECIFIC);  /* prevent a breakpoint from being triggered */
    return patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, pCpu, &pPatchRec->patch);
}
/**
 * Synchronizes one GDT entry (guest -> shadow).
 *
 * @returns VBox strict status code (appropriate for trap handling and GC
 *          return).
 * @retval  VINF_SUCCESS
 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
 * @retval  VINF_SELM_SYNC_GDT
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       The current virtual CPU.
 * @param   pCtx        CPU context for the current CPU.
 * @param   iGDTEntry   The GDT entry to sync.
 *
 * @remarks Caller checks that this isn't the LDT entry!
 */
static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
{
    Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));

    /*
     * Validate the offset.
     */
    VBOXGDTR GdtrGuest;
    CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
    unsigned offEntry = iGDTEntry * sizeof(X86DESC);
    if (    iGDTEntry >= SELM_GDT_ELEMENTS
        ||  offEntry  >  GdtrGuest.cbGdt)
        return VINF_SUCCESS; /* ignore */

    /*
     * Read the guest descriptor.
     */
    X86DESC Desc;
    int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
    if (RT_FAILURE(rc))
    {
        rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
        if (RT_FAILURE(rc))
        {
            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
            /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
            return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
        }
    }

    /*
     * Check for conflicts.
     */
    RTSEL   Sel = iGDTEntry << X86_SEL_SHIFT;
    Assert(   !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         & ~X86_SEL_MASK_OFF_RPL)
           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         & ~X86_SEL_MASK_OFF_RPL)
           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]       & ~X86_SEL_MASK_OFF_RPL)
           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]        & ~X86_SEL_MASK_OFF_RPL)
           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
    if (    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         == Sel
        ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         == Sel
        ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]       == Sel
        ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]        == Sel
        ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
    {
        if (Desc.Gen.u1Present)
        {
            Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
            return VINF_SELM_SYNC_GDT;  /** @todo this status code is ignored, unfortunately. */
        }
        Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));

        /* Note: we can't continue below or else we'll change the shadow descriptor!! */
        /* When the guest makes the selector present, then we'll do a GDT sync. */
        return VINF_SUCCESS;
    }

    /*
     * Convert the guest selector to a shadow selector and update the shadow GDT.
     */
    selmGuestToShadowDesc(pVM, &Desc);
    PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
    //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
    //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
    *pShwDescr = Desc;

    /*
     * Detect and mark stale registers.
     */
    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    PCPUMSELREG  paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
    {
        if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
        {
            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
            {
                if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
                {
                    Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
                    paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
                    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
                    /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
                    rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
                }
                else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
                {
                    Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
                    paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
                }
                else
                    Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
            }
            else
                Log(("GDT write to selector in %s register %04X (out of sync)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
        }
    }

    /** @todo Detect stale LDTR as well? */

    return rcStrict;
}
Пример #4
0
/**
 * Check Windows XP sysenter heuristics and install patch
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pInstrGC    GC Instruction pointer for sysenter
 * @param   pPatchRec   Patch structure
 *
 */
int PATMPatchSysenterXP(PVM pVM, RTGCPTR32 pInstrGC, PPATMPATCHREC pPatchRec)
{
    PPATCHINFO  pPatch = &pPatchRec->patch;
    uint8_t     uTemp[16];
    RTGCPTR32   lpfnKiFastSystemCall, lpfnKiIntSystemCall = 0; /* (initializing it to shut up warning.) */
    int         rc, i;
    PVMCPU      pVCpu = VMMGetCpu0(pVM);

    Assert(sizeof(uTemp) > sizeof(uFnKiIntSystemCall));
    Assert(sizeof(uTemp) > sizeof(uFnKiFastSystemCall));

    /* Guest OS specific patch; check heuristics first */

    /* check the epilog of KiFastSystemCall */
    lpfnKiFastSystemCall = pInstrGC - 2;
    rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, lpfnKiFastSystemCall, sizeof(uFnKiFastSystemCall));
    if (    RT_FAILURE(rc)
        ||  memcmp(uFnKiFastSystemCall, uTemp, sizeof(uFnKiFastSystemCall)))
    {
        return VERR_PATCHING_REFUSED;
    }

    /* Now search for KiIntSystemCall */
    for (i=0;i<64;i++)
    {
        rc = PGMPhysSimpleReadGCPtr(pVCpu, uTemp, pInstrGC + i, sizeof(uFnKiIntSystemCall));
        if(RT_FAILURE(rc))
        {
            break;
        }
        if(!memcmp(uFnKiIntSystemCall, uTemp, sizeof(uFnKiIntSystemCall)))
        {
            lpfnKiIntSystemCall = pInstrGC + i;
            /* Found it! */
            break;
        }
    }
    if (i == 64)
    {
        Log(("KiIntSystemCall not found!!\n"));
        return VERR_PATCHING_REFUSED;
    }

    if (PAGE_ADDRESS(lpfnKiFastSystemCall) != PAGE_ADDRESS(lpfnKiIntSystemCall))
    {
        Log(("KiFastSystemCall and KiIntSystemCall not in the same page!!\n"));
        return VERR_PATCHING_REFUSED;
    }

    // make a copy of the guest code bytes that will be overwritten
    rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aPrivInstr, pPatch->pPrivInstrGC, SIZEOF_NEARJUMP32);
    AssertRC(rc);

    /* Now we simply jump from the fast version to the 'old and slow' system call */
    uTemp[0] = 0xE9;
    *(RTGCPTR32 *)&uTemp[1] = lpfnKiIntSystemCall - (pInstrGC + SIZEOF_NEARJUMP32);
    rc = PGMPhysSimpleDirtyWriteGCPtr(pVCpu, pInstrGC, uTemp, SIZEOF_NEARJUMP32);
    if (RT_FAILURE(rc))
    {
        Log(("PGMPhysSimpleDirtyWriteGCPtr failed with rc=%Rrc!!\n", rc));
        return VERR_PATCHING_REFUSED;
    }

#ifdef LOG_ENABLED
    Log(("Sysenter Patch code ----------------------------------------------------------\n"));
    PATMP2GLOOKUPREC cacheRec;
    RT_ZERO(cacheRec);
    cacheRec.pPatch = pPatch;

    patmr3DisasmCodeStream(pVM, pInstrGC, pInstrGC, patmr3DisasmCallback, &cacheRec);
    /* Free leftover lock if any. */
    if (cacheRec.Lock.pvMap)
        PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
    Log(("Sysenter Patch code ends -----------------------------------------------------\n"));
#endif

    pPatch->uState = PATCH_ENABLED;
    return VINF_SUCCESS;
}