Beispiel #1
0
VMMR3DECL(int)      IEMR3Init(PVM pVM)
{
    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];
        pVCpu->iem.s.offVM    = -RT_OFFSETOF(VM, aCpus[idCpu].iem.s);
        pVCpu->iem.s.offVMCpu = -RT_OFFSETOF(VMCPU, iem.s);
        pVCpu->iem.s.pCtxR3   = CPUMQueryGuestCtxPtr(pVCpu);
        pVCpu->iem.s.pCtxR0   = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
        pVCpu->iem.s.pCtxRC   = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);

        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,               STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Instructions interpreted",          "/IEM/CPU%u/cInstructions", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits,             STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Potential exists",                  "/IEM/CPU%u/cPotentialExits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_ASPECT_NOT_IMPLEMENTED",   "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented,     STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_INSTR_NOT_IMPLEMENTED",    "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Informational statuses returned",   "/IEM/CPU%u/cRetInfStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Error statuses returned",           "/IEM/CPU%u/cRetErrStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten,                   STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Approx bytes written",              "/IEM/CPU%u/cbWritten", idCpu);
    }
    return VINF_SUCCESS;
}
selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    NOREF(enmAccessType);
    Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
    NOREF(GCPtr);
    NOREF(cbBuf);
    NOREF(pvPtr);
    NOREF(pvBuf);
    NOREF(enmOrigin);
    NOREF(pvUser);

#  ifdef IN_RING3
    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    return VINF_PGM_HANDLER_DO_DEFAULT;

#  else  /* IN_RC: */
    /*
     * Execute the write, doing necessary pre and post shadow GDT checks.
     */
    PCPUMCTX pCtx        = CPUMQueryGuestCtxPtr(pVCpu);
    uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
    selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
    memcpy(pvBuf, pvPtr, cbBuf);
    VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
    if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
    else
        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
    return rcStrict;
#  endif
}
Beispiel #3
0
/**
 * Load virtualized flags.
 *
 * This function is called from CPUMRawEnter(). It doesn't have to update the
 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The cpu context core.
 * @see     pg_raw
 */
VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
{
    bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
    Assert(!HMIsEnabled(pVM));

    /*
     * Currently we don't bother to check whether PATM is enabled or not.
     * For all cases where it isn't, IOPL will be safe and IF will be set.
     */
    register uint32_t efl = pCtxCore->eflags.u32;
    CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
    AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));

    AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));

    efl &= ~PATM_VIRTUAL_FLAGS_MASK;
    efl |= X86_EFL_IF;
    pCtxCore->eflags.u32 = efl;

#ifdef IN_RING3
#ifdef PATM_EMULATE_SYSENTER
    PCPUMCTX pCtx;

    /* Check if the sysenter handler has changed. */
    pCtx = CPUMQueryGuestCtxPtr(pVM);
    if (   pCtx->SysEnter.cs  != 0
        && pCtx->SysEnter.eip != 0
       )
    {
        if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
        {
            pVM->patm.s.pfnSysEnterPatchGC = 0;
            pVM->patm.s.pfnSysEnterGC = 0;

            Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
            pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
            if (pVM->patm.s.pfnSysEnterPatchGC == 0)
            {
                rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
                if (rc == VINF_SUCCESS)
                {
                    pVM->patm.s.pfnSysEnterPatchGC  = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
                    pVM->patm.s.pfnSysEnterGC       = (RTRCPTR)pCtx->SysEnter.eip;
                    Assert(pVM->patm.s.pfnSysEnterPatchGC);
                }
            }
            else
                pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
        }
    }
    else
    {
        pVM->patm.s.pfnSysEnterPatchGC = 0;
        pVM->patm.s.pfnSysEnterGC = 0;
    }
#endif
#endif
}
Beispiel #4
0
/**
 * Initializes the interpreted execution manager.
 *
 * This must be called after CPUM as we're quering information from CPUM about
 * the guest and host CPUs.
 *
 * @returns VBox status code.
 * @param   pVM                 The cross context VM structure.
 */
VMMR3DECL(int)      IEMR3Init(PVM pVM)
{
    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];
        pVCpu->iem.s.offVM    = -RT_OFFSETOF(VM, aCpus[idCpu].iem.s);
        pVCpu->iem.s.offVMCpu = -RT_OFFSETOF(VMCPU, iem.s);
        pVCpu->iem.s.pCtxR3   = CPUMQueryGuestCtxPtr(pVCpu);
        pVCpu->iem.s.pCtxR0   = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
        pVCpu->iem.s.pCtxRC   = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);

        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,             STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Instructions interpreted",          "/IEM/CPU%u/cInstructions", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits,           STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Potential exits",                   "/IEM/CPU%u/cPotentialExits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_ASPECT_NOT_IMPLEMENTED",   "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented,   STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_INSTR_NOT_IMPLEMENTED",    "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses,           STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Informational statuses returned",   "/IEM/CPU%u/cRetInfStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses,           STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Error statuses returned",           "/IEM/CPU%u/cRetErrStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten,                 STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Approx bytes written",              "/IEM/CPU%u/cbWritten", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit,            STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);

        /*
         * Host and guest CPU information.
         */
        if (idCpu == 0)
        {
            pVCpu->iem.s.enmCpuVendor             = CPUMGetGuestCpuVendor(pVM);
            pVCpu->iem.s.enmHostCpuVendor         = CPUMGetHostCpuVendor(pVM);
        }
        else
        {
            pVCpu->iem.s.enmCpuVendor             = pVM->aCpus[0].iem.s.enmCpuVendor;
            pVCpu->iem.s.enmHostCpuVendor         = pVM->aCpus[0].iem.s.enmHostCpuVendor;
        }

        /*
         * Mark all buffers free.
         */
        uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
        while (iMemMap-- > 0)
            pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    }
    return VINF_SUCCESS;
}
Beispiel #5
0
/**
 * Set the APIC base.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VMCPU.
 * @param   u64Base         The new base.
 */
VMMDECL(int) PDMApicSetBase(PVMCPU pVCpu, uint64_t u64Base)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase));
        pdmLock(pVM);
        pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, u64Base);

        /* Update CPUM's copy of the APIC base. */
        PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
        Assert(pCtx);
        pCtx->msrApicBase = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu);

        pdmUnlock(pVM);
        return VINF_SUCCESS;
    }
    return VERR_PDM_NO_APIC_INSTANCE;
}
Beispiel #6
0
/**
 * Breakpoint was hit somewhere.
 * Figure out which breakpoint it is and notify the debugger.
 *
 * @returns VBox status.
 * @param   pVM         Pointer to the VM.
 * @param   enmEvent    DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
 */
VMMR3DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
{
    int rc = dbgfR3EventPrologue(pVM, enmEvent);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Send the event and process the reply communication.
     */
    /** @todo SMP */
    PVMCPU pVCpu = VMMGetCpu0(pVM);

    pVM->dbgf.s.DbgEvent.enmType = enmEvent;
    RTUINT iBp = pVM->dbgf.s.DbgEvent.u.Bp.iBp = pVCpu->dbgf.s.iActiveBp;
    pVCpu->dbgf.s.iActiveBp = ~0U;
    if (iBp != ~0U)
        pVM->dbgf.s.DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
    else
    {
        /* REM breakpoints has be been searched for. */
#if 0   /** @todo get flat PC api! */
        uint32_t eip = CPUMGetGuestEIP(pVM);
#else
        /* @todo SMP support!! */
        PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
        RTGCPTR  eip = pCtx->rip + pCtx->cs.u64Base;
#endif
        for (iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); iBp++)
            if (    pVM->dbgf.s.aBreakpoints[iBp].enmType == DBGFBPTYPE_REM
                &&  pVM->dbgf.s.aBreakpoints[iBp].GCPtr == eip)
            {
                pVM->dbgf.s.DbgEvent.u.Bp.iBp = iBp;
                break;
            }
        AssertMsg(pVM->dbgf.s.DbgEvent.u.Bp.iBp != ~0U, ("eip=%08x\n", eip));
        pVM->dbgf.s.DbgEvent.enmCtx = DBGFEVENTCTX_REM;
    }
    return dbgfR3SendEvent(pVM);
}
Beispiel #7
0
/**
 * Emulate sysenter, sysexit and syscall instructions
 *
 * @returns VBox status
 *
 * @param   pVM         Pointer to the VM.
 * @param   pCtxCore    The relevant core context.
 * @param   pCpu        Disassembly context
 */
VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
{
    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM));

    if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
    {
        if (    pCtx->SysEnter.cs == 0
            ||  pRegFrame->eflags.Bits.u1VM
            ||  (pRegFrame->cs.Sel & X86_SEL_RPL) != 3
            ||  pVM->patm.s.pfnSysEnterPatchGC == 0
            ||  pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
            ||  !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
            goto end;

        Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
        /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
        /** @note The Intel manual suggests that the OS is responsible for this. */
        pRegFrame->cs.Sel      = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
        pRegFrame->eip         = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
        pRegFrame->ss.Sel      = pRegFrame->cs.Sel + 8;     /* SysEnter.cs + 8 */
        pRegFrame->esp         = pCtx->SysEnter.esp;
        pRegFrame->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
        pRegFrame->eflags.u32 |= X86_EFL_IF;

        /* Turn off interrupts. */
        pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;

        STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);

        return VINF_SUCCESS;
    }
    if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
    {
        if (    pCtx->SysEnter.cs == 0
            ||  (pRegFrame->cs.Sel & X86_SEL_RPL) != 1
            ||  pRegFrame->eflags.Bits.u1VM
            ||  !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
            goto end;

        Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pRegFrame->eip, pRegFrame->edx));

        pRegFrame->cs.Sel      = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
        pRegFrame->eip         = pRegFrame->edx;
        pRegFrame->ss.Sel      = pRegFrame->cs.Sel + 8;  /* SysEnter.cs + 24 */
        pRegFrame->esp         = pRegFrame->ecx;

        STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);

        return VINF_SUCCESS;
    }
    if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
    {
        /** @todo implement syscall */
    }
    else
    if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
    {
        /** @todo implement sysret */
    }

end:
    return VINF_EM_RAW_RING_SWITCH;
}
Beispiel #8
0
/**
 * Checks if the interrupt flag is enabled or not.
 *
 * @returns true if it's enabled.
 * @returns false if it's disabled.
 *
 * @param   pVM         Pointer to the VM.
 */
VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
{
    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));

    return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
}
/**
 * Worker function for dbgfR3CoreWrite() which does the writing.
 *
 * @returns VBox status code
 * @param   pVM                 Pointer to the VM.
 * @param   hFile               The file to write to.  Caller closes this.
 */
static int dbgfR3CoreWriteWorker(PVM pVM, RTFILE hFile)
{
    /*
     * Collect core information.
     */
    uint32_t const cu32MemRanges = dbgfR3GetRamRangeCount(pVM);
    uint16_t const cMemRanges    = cu32MemRanges < UINT16_MAX - 1 ? cu32MemRanges : UINT16_MAX - 1; /* One PT_NOTE Program header */
    uint16_t const cProgHdrs     = cMemRanges + 1;

    DBGFCOREDESCRIPTOR CoreDescriptor;
    RT_ZERO(CoreDescriptor);
    CoreDescriptor.u32Magic           = DBGFCORE_MAGIC;
    CoreDescriptor.u32FmtVersion      = DBGFCORE_FMT_VERSION;
    CoreDescriptor.cbSelf             = sizeof(CoreDescriptor);
    CoreDescriptor.u32VBoxVersion     = VBOX_FULL_VERSION;
    CoreDescriptor.u32VBoxRevision    = VMMGetSvnRev();
    CoreDescriptor.cCpus              = pVM->cCpus;

    Log((DBGFLOG_NAME ": CoreDescriptor Version=%u Revision=%u\n", CoreDescriptor.u32VBoxVersion, CoreDescriptor.u32VBoxRevision));

    /*
     * Compute the file layout (see pg_dbgf_vmcore).
     */
    uint64_t const offElfHdr          = RTFileTell(hFile);
    uint64_t const offNoteSection     = offElfHdr         + sizeof(Elf64_Ehdr);
    uint64_t const offLoadSections    = offNoteSection    + sizeof(Elf64_Phdr);
    uint64_t const cbLoadSections     = cMemRanges * sizeof(Elf64_Phdr);
    uint64_t const offCoreDescriptor  = offLoadSections   + cbLoadSections;
    uint64_t const cbCoreDescriptor   = Elf64NoteSectionSize(g_pcszCoreVBoxCore, sizeof(CoreDescriptor));
    uint64_t const offCpuDumps        = offCoreDescriptor + cbCoreDescriptor;
    uint64_t const cbCpuDumps         = pVM->cCpus * Elf64NoteSectionSize(g_pcszCoreVBoxCpu, sizeof(DBGFCORECPU));
    uint64_t const offMemory          = offCpuDumps       + cbCpuDumps;

    uint64_t const offNoteSectionData = offCoreDescriptor;
    uint64_t const cbNoteSectionData  = cbCoreDescriptor + cbCpuDumps;

    /*
     * Write ELF header.
     */
    int rc = Elf64WriteElfHdr(hFile, cProgHdrs, 0 /* cSecHdrs */);
    if (RT_FAILURE(rc))
    {
        LogRel((DBGFLOG_NAME ": Elf64WriteElfHdr failed. rc=%Rrc\n", rc));
        return rc;
    }

    /*
     * Write PT_NOTE program header.
     */
    Assert(RTFileTell(hFile) == offNoteSection);
    rc = Elf64WriteProgHdr(hFile, PT_NOTE, PF_R,
                           offNoteSectionData,  /* file offset to contents */
                           cbNoteSectionData,   /* size in core file */
                           cbNoteSectionData,   /* size in memory */
                           0);                  /* physical address */
    if (RT_FAILURE(rc))
    {
        LogRel((DBGFLOG_NAME ": Elf64WritreProgHdr failed for PT_NOTE. rc=%Rrc\n", rc));
        return rc;
    }

    /*
     * Write PT_LOAD program header for each memory range.
     */
    Assert(RTFileTell(hFile) == offLoadSections);
    uint64_t offMemRange = offMemory;
    for (uint16_t iRange = 0; iRange < cMemRanges; iRange++)
    {
        RTGCPHYS    GCPhysStart;
        RTGCPHYS    GCPhysEnd;
        bool        fIsMmio;
        rc = PGMR3PhysGetRange(pVM, iRange, &GCPhysStart, &GCPhysEnd, NULL /* pszDesc */, &fIsMmio);
        if (RT_FAILURE(rc))
        {
            LogRel((DBGFLOG_NAME ": PGMR3PhysGetRange failed for iRange(%u) rc=%Rrc\n", iRange, rc));
            return rc;
        }

        uint64_t cbMemRange  = GCPhysEnd - GCPhysStart + 1;
        uint64_t cbFileRange = fIsMmio ? 0 : cbMemRange;

        Log((DBGFLOG_NAME ": PGMR3PhysGetRange iRange=%u GCPhysStart=%#x GCPhysEnd=%#x cbMemRange=%u\n",
             iRange, GCPhysStart, GCPhysEnd, cbMemRange));

        rc = Elf64WriteProgHdr(hFile, PT_LOAD, PF_R,
                               offMemRange,                         /* file offset to contents */
                               cbFileRange,                         /* size in core file */
                               cbMemRange,                          /* size in memory */
                               GCPhysStart);                        /* physical address */
        if (RT_FAILURE(rc))
        {
            LogRel((DBGFLOG_NAME ": Elf64WriteProgHdr failed for memory range(%u) cbFileRange=%u cbMemRange=%u rc=%Rrc\n",
                    iRange, cbFileRange, cbMemRange, rc));
            return rc;
        }

        offMemRange += cbFileRange;
    }

    /*
     * Write the Core descriptor note header and data.
     */
    Assert(RTFileTell(hFile) == offCoreDescriptor);
    rc = Elf64WriteNoteHdr(hFile, NT_VBOXCORE, g_pcszCoreVBoxCore, &CoreDescriptor, sizeof(CoreDescriptor));
    if (RT_FAILURE(rc))
    {
        LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr failed for Note '%s' rc=%Rrc\n", g_pcszCoreVBoxCore, rc));
        return rc;
    }

    /*
     * Write the CPU context note headers and data.
     */
    Assert(RTFileTell(hFile) == offCpuDumps);
    PDBGFCORECPU pDbgfCoreCpu = (PDBGFCORECPU)RTMemAlloc(sizeof(*pDbgfCoreCpu));
    if (RT_UNLIKELY(!pDbgfCoreCpu))
    {
        LogRel((DBGFLOG_NAME ": failed to alloc %u bytes for DBGFCORECPU\n", sizeof(*pDbgfCoreCpu)));
        return VERR_NO_MEMORY;
    }

    for (uint32_t iCpu = 0; iCpu < pVM->cCpus; iCpu++)
    {
        PVMCPU      pVCpu = &pVM->aCpus[iCpu];
        PCPUMCTX    pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
        if (RT_UNLIKELY(!pCtx))
        {
            LogRel((DBGFLOG_NAME ": CPUMQueryGuestCtxPtr failed for vCPU[%u]\n", iCpu));
            RTMemFree(pDbgfCoreCpu);
            return VERR_INVALID_POINTER;
        }

        RT_BZERO(pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
        dbgfR3GetCoreCpu(pCtx, pDbgfCoreCpu);
        rc = Elf64WriteNoteHdr(hFile, NT_VBOXCPU, g_pcszCoreVBoxCpu, pDbgfCoreCpu, sizeof(*pDbgfCoreCpu));
        if (RT_FAILURE(rc))
        {
            LogRel((DBGFLOG_NAME ": Elf64WriteNoteHdr failed for vCPU[%u] rc=%Rrc\n", iCpu, rc));
            RTMemFree(pDbgfCoreCpu);
            return rc;
        }
    }
    RTMemFree(pDbgfCoreCpu);
    pDbgfCoreCpu = NULL;

    /*
     * Write memory ranges.
     */
    Assert(RTFileTell(hFile) == offMemory);
    for (uint16_t iRange = 0; iRange < cMemRanges; iRange++)
    {
        RTGCPHYS GCPhysStart;
        RTGCPHYS GCPhysEnd;
        bool     fIsMmio;
        rc = PGMR3PhysGetRange(pVM, iRange, &GCPhysStart, &GCPhysEnd, NULL /* pszDesc */, &fIsMmio);
        if (RT_FAILURE(rc))
        {
            LogRel((DBGFLOG_NAME ": PGMR3PhysGetRange(2) failed for iRange(%u) rc=%Rrc\n", iRange, rc));
            return rc;
        }

        if (fIsMmio)
            continue;

        /*
         * Write page-by-page of this memory range.
         *
         * The read function may fail on MMIO ranges, we write these as zero
         * pages for now (would be nice to have the VGA bits there though).
         */
        uint64_t cbMemRange  = GCPhysEnd - GCPhysStart + 1;
        uint64_t cPages      = cbMemRange >> PAGE_SHIFT;
        for (uint64_t iPage = 0; iPage < cPages; iPage++)
        {
            uint8_t abPage[PAGE_SIZE];
            rc = PGMPhysSimpleReadGCPhys(pVM, abPage, GCPhysStart + (iPage << PAGE_SHIFT),  sizeof(abPage));
            if (RT_FAILURE(rc))
            {
                if (rc != VERR_PGM_PHYS_PAGE_RESERVED)
                    LogRel((DBGFLOG_NAME ": PGMPhysRead failed for iRange=%u iPage=%u. rc=%Rrc. Ignoring...\n", iRange, iPage, rc));
                RT_ZERO(abPage);
            }

            rc = RTFileWrite(hFile, abPage, sizeof(abPage), NULL /* all */);
            if (RT_FAILURE(rc))
            {
                LogRel((DBGFLOG_NAME ": RTFileWrite failed. iRange=%u iPage=%u rc=%Rrc\n", iRange, iPage, rc));
                return rc;
            }
        }
    }

    return rc;
}
Beispiel #10
0
/* execute the switch. */
VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
{
    uint32_t i;
    int      rc;
    PCPUMCTX pHyperCtx, pGuestCtx;
    RTGCPHYS CR3Phys = 0x0; /* fake address */
    PVMCPU   pVCpu = &pVM->aCpus[0];

    if (!HWACCMR3IsAllowed(pVM))
    {
        RTPrintf("VMM: Hardware accelerated test not available!\n");
        return VERR_ACCESS_DENIED;
    }

    /*
     * These forced actions are not necessary for the test and trigger breakpoints too.
     */
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);

    /* Enable mapping of the hypervisor into the shadow page table. */
    uint32_t cb;
    rc = PGMR3MappingsSize(pVM, &cb);
    AssertRCReturn(rc, rc);

    /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
    rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
    AssertRCReturn(rc, rc);

    pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);

    pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
    pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT;
    PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
    PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);

    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
    VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    VM_FF_CLEAR(pVM, VM_FF_REQUEST);

    /*
     * Setup stack for calling VMMGCEntry().
     */
    RTRCPTR RCPtrEP;
    rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
    if (RT_SUCCESS(rc))
    {
        RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP);

        pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);

        /* Fill in hidden selector registers for the hypervisor state. */
        SYNC_SEL(pHyperCtx, cs);
        SYNC_SEL(pHyperCtx, ds);
        SYNC_SEL(pHyperCtx, es);
        SYNC_SEL(pHyperCtx, fs);
        SYNC_SEL(pHyperCtx, gs);
        SYNC_SEL(pHyperCtx, ss);
        SYNC_SEL(pHyperCtx, tr);

        /*
         * Profile switching.
         */
        RTPrintf("VMM: profiling switcher...\n");
        Log(("VMM: profiling switcher...\n"));
        uint64_t TickMin = ~0;
        uint64_t tsBegin = RTTimeNanoTS();
        uint64_t TickStart = ASMReadTSC();
        for (i = 0; i < 1000000; i++)
        {
            CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
            CPUMPushHyper(pVCpu, 0);
            CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP);
            CPUMPushHyper(pVCpu, pVM->pVMRC);
            CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR));    /* stack frame size */
            CPUMPushHyper(pVCpu, RCPtrEP);                /* what to call */

            pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
            pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);

            /* Copy the hypervisor context to make sure we have a valid guest context. */
            *pGuestCtx = *pHyperCtx;
            pGuestCtx->cr3 = CR3Phys;

            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
            VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);

            uint64_t TickThisStart = ASMReadTSC();
            rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, 0);
            uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
            if (RT_FAILURE(rc))
            {
                Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
                VMMR3FatalDump(pVM, pVCpu, rc);
                return rc;
            }
            if (TickThisElapsed < TickMin)
                TickMin = TickThisElapsed;
        }
        uint64_t TickEnd = ASMReadTSC();
        uint64_t tsEnd = RTTimeNanoTS();

        uint64_t Elapsed = tsEnd - tsBegin;
        uint64_t PerIteration = Elapsed / (uint64_t)i;
        uint64_t cTicksElapsed = TickEnd - TickStart;
        uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;

        RTPrintf("VMM: %8d cycles     in %11llu ns (%11lld ticks),  %10llu ns/iteration (%11lld ticks)  Min %11lld ticks\n",
                 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
        Log(("VMM: %8d cycles     in %11llu ns (%11lld ticks),  %10llu ns/iteration (%11lld ticks)  Min %11lld ticks\n",
             i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));

        rc = VINF_SUCCESS;
    }
    else
        AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc));

    return rc;
}
Beispiel #11
0
/**
 * Initializes the interpreted execution manager.
 *
 * This must be called after CPUM as we're quering information from CPUM about
 * the guest and host CPUs.
 *
 * @returns VBox status code.
 * @param   pVM                The cross context VM structure.
 */
VMMR3DECL(int)      IEMR3Init(PVM pVM)
{
    uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
    uint64_t const uInitialTlbPhysRev  = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);

    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];
        pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu);
        pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
        pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);

        pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
        pVCpu->iem.s.CodeTlb.uTlbPhysRev  = pVCpu->iem.s.DataTlb.uTlbPhysRev  = uInitialTlbPhysRev;

        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,               STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Instructions interpreted",                     "/IEM/CPU%u/cInstructions", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps,                  STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Number of longjmp calls",                      "/IEM/CPU%u/cLongJumps", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits,             STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Potential exits",                              "/IEM/CPU%u/cPotentialExits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_ASPECT_NOT_IMPLEMENTED",              "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented,     STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_INSTR_NOT_IMPLEMENTED",               "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Informational statuses returned",              "/IEM/CPU%u/cRetInfStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Error statuses returned",                      "/IEM/CPU%u/cRetErrStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten,                   STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Approx bytes written",                         "/IEM/CPU%u/cbWritten", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit,              STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);

#ifdef VBOX_WITH_STATISTICS
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Code TLB hits",                            "/IEM/CPU%u/CodeTlb-Hits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Data TLB hits",                            "/IEM/CPU%u/DataTlb-Hits", idCpu);
#endif
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Code TLB misses",                          "/IEM/CPU%u/CodeTlb-Misses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB revision",                        "/IEM/CPU%u/CodeTlb-Revision", idCpu);
        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB physical revision",               "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB slow read path",                  "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);

        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Data TLB misses",                          "/IEM/CPU%u/DataTlb-Misses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Data TLB revision",                        "/IEM/CPU%u/DataTlb-Revision", idCpu);
        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Data TLB physical revision",               "/IEM/CPU%u/DataTlb-PhysRev", idCpu);

#if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
        /* Allocate instruction statistics and register them. */
        pVCpu->iem.s.pStatsR3 = (PIEMINSTRSTATS)MMR3HeapAllocZ(pVM, MM_TAG_IEM, sizeof(IEMINSTRSTATS));
        AssertLogRelReturn(pVCpu->iem.s.pStatsR3, VERR_NO_MEMORY);
        int rc = MMHyperAlloc(pVM, sizeof(IEMINSTRSTATS), sizeof(uint64_t), MM_TAG_IEM, (void **)&pVCpu->iem.s.pStatsCCR3);
        AssertLogRelRCReturn(rc, rc);
        pVCpu->iem.s.pStatsR0 = MMHyperR3ToR0(pVM, pVCpu->iem.s.pStatsCCR3);
        pVCpu->iem.s.pStatsRC = MMHyperR3ToR0(pVM, pVCpu->iem.s.pStatsCCR3);
# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
            STAMR3RegisterF(pVM, &pVCpu->iem.s.pStatsCCR3->a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
                            STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
            STAMR3RegisterF(pVM, &pVCpu->iem.s.pStatsR3->a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
                            STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
# include "IEMInstructionStatisticsTmpl.h"
# undef IEM_DO_INSTR_STAT
#endif

        /*
         * Host and guest CPU information.
         */
        if (idCpu == 0)
        {
            pVCpu->iem.s.enmCpuVendor             = CPUMGetGuestCpuVendor(pVM);
            pVCpu->iem.s.enmHostCpuVendor         = CPUMGetHostCpuVendor(pVM);
#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
            switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
            {
                case kCpumMicroarch_Intel_8086:     pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
                case kCpumMicroarch_Intel_80186:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
                case kCpumMicroarch_Intel_80286:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
                case kCpumMicroarch_Intel_80386:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
                case kCpumMicroarch_Intel_80486:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
                case kCpumMicroarch_Intel_P5:       pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
                case kCpumMicroarch_Intel_P6:       pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
                case kCpumMicroarch_NEC_V20:        pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
                case kCpumMicroarch_NEC_V30:        pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
                default:                            pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
            }
            LogRel(("IEM: TargetCpu=%s, Microarch=%s\n", iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMR3MicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch)));
#endif
        }
        else
        {
            pVCpu->iem.s.enmCpuVendor             = pVM->aCpus[0].iem.s.enmCpuVendor;
            pVCpu->iem.s.enmHostCpuVendor         = pVM->aCpus[0].iem.s.enmHostCpuVendor;
#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
            pVCpu->iem.s.uTargetCpu               = pVM->aCpus[0].iem.s.uTargetCpu;
#endif
        }

        /*
         * Mark all buffers free.
         */
        uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
        while (iMemMap-- > 0)
            pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    }
    return VINF_SUCCESS;
}
/**
 * \#DB (Debug event) handler.
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pVM             The cross context VM structure.
 * @param   pVCpu           The cross context virtual CPU structure.
 * @param   pRegFrame       Pointer to the register frame for the trap.
 * @param   uDr6            The DR6 hypervisor register value.
 * @param   fAltStepping    Alternative stepping indicator.
 */
VMMRZ_INT_DECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCUINTREG uDr6, bool fAltStepping)
{
#ifdef IN_RC
    const bool fInHyper = !(pRegFrame->ss.Sel & X86_SEL_RPL) && !pRegFrame->eflags.Bits.u1VM;
#else
    NOREF(pRegFrame);
    const bool fInHyper = false;
#endif

    /** @todo Intel docs say that X86_DR6_BS has the highest priority... */
    /*
     * A breakpoint?
     */
    AssertCompile(X86_DR6_B0 == 1 && X86_DR6_B1 == 2 && X86_DR6_B2 == 4 && X86_DR6_B3 == 8);
    if (   (uDr6 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3))
        && pVM->dbgf.s.cEnabledHwBreakpoints > 0)
    {
        for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
        {
            if (    ((uint32_t)uDr6 & RT_BIT_32(iBp))
                &&  pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG)
            {
                pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp;
                pVCpu->dbgf.s.fSingleSteppingRaw = false;
                LogFlow(("DBGFRZTrap03Handler: hit hw breakpoint %d at %04x:%RGv\n",
                         pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pRegFrame->cs.Sel, pRegFrame->rip));

                return fInHyper ? VINF_EM_DBG_HYPER_BREAKPOINT : VINF_EM_DBG_BREAKPOINT;
            }
        }
    }

    /*
     * Single step?
     * Are we single stepping or is it the guest?
     */
    if (    (uDr6 & X86_DR6_BS)
        &&  (fInHyper || pVCpu->dbgf.s.fSingleSteppingRaw || fAltStepping))
    {
        pVCpu->dbgf.s.fSingleSteppingRaw = false;
        LogFlow(("DBGFRZTrap01Handler: single step at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip));
        return fInHyper ? VINF_EM_DBG_HYPER_STEPPED : VINF_EM_DBG_STEPPED;
    }

#ifdef IN_RC
    /*
     * Either an ICEBP in hypervisor code or a guest related debug exception
     * of sorts.
     */
    if (RT_UNLIKELY(fInHyper))
    {
        /*
         * Is this a guest debug event that was delayed past a ring transition?
         *
         * Since we do no allow sysenter/syscall in raw-mode, the  only
         * non-trap/fault type transitions that can occur are thru interrupt gates.
         * Of those, only INT3 (#BP) has a DPL other than 0 with a CS.RPL of 0.
         * See bugref:9171 and bs3-cpu-weird-1 for more details.
         *
         * We need to reconstruct the guest register state from the hypervisor one
         * here, so here is the layout of the IRET frame on the stack:
         *    20:[8] GS          (V86 only)
         *    1C:[7] FS          (V86 only)
         *    18:[6] DS          (V86 only)
         *    14:[5] ES          (V86 only)
         *    10:[4] SS
         *    0c:[3] ESP
         *    08:[2] EFLAGS
         *    04:[1] CS
         *    00:[0] EIP
         */
        if (pRegFrame->rip == (uintptr_t)TRPMRCHandlerAsmTrap03)
        {
            uint32_t const *pu32Stack = (uint32_t const *)pRegFrame->esp;
            if (   (pu32Stack[2] & X86_EFL_VM)
                || (pu32Stack[1] & X86_SEL_RPL))
            {
                LogFlow(("DBGFRZTrap01Handler: Detected guest #DB delayed past ring transition %04x:%RX32 %#x\n",
                         pu32Stack[1] & 0xffff, pu32Stack[0], pu32Stack[2]));
                PCPUMCTX pGstCtx = CPUMQueryGuestCtxPtr(pVCpu);
                pGstCtx->rip      = pu32Stack[0];
                pGstCtx->cs.Sel   = pu32Stack[1];
                pGstCtx->eflags.u = pu32Stack[2];
                pGstCtx->rsp      = pu32Stack[3];
                pGstCtx->ss.Sel   = pu32Stack[4];
                if (pu32Stack[2] & X86_EFL_VM)
                {
                    pGstCtx->es.Sel = pu32Stack[5];
                    pGstCtx->ds.Sel = pu32Stack[6];
                    pGstCtx->fs.Sel = pu32Stack[7];
                    pGstCtx->gs.Sel = pu32Stack[8];
                }
                else
                {
                    pGstCtx->es.Sel = pRegFrame->es.Sel;
                    pGstCtx->ds.Sel = pRegFrame->ds.Sel;
                    pGstCtx->fs.Sel = pRegFrame->fs.Sel;
                    pGstCtx->gs.Sel = pRegFrame->gs.Sel;
                }
                pGstCtx->rax      = pRegFrame->rax;
                pGstCtx->rcx      = pRegFrame->rcx;
                pGstCtx->rdx      = pRegFrame->rdx;
                pGstCtx->rbx      = pRegFrame->rbx;
                pGstCtx->rsi      = pRegFrame->rsi;
                pGstCtx->rdi      = pRegFrame->rdi;
                pGstCtx->rbp      = pRegFrame->rbp;

                /*
                 * We should assert a #BP followed by a #DB here, but TRPM cannot
                 * do that.  So, we'll just assert the #BP and ignore the #DB, even
                 * if that isn't strictly correct.
                 */
                TRPMResetTrap(pVCpu);
                TRPMAssertTrap(pVCpu, X86_XCPT_BP, TRPM_SOFTWARE_INT);
                return VINF_EM_RAW_GUEST_TRAP;
            }
        }

        LogFlow(("DBGFRZTrap01Handler: Unknown bp at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip));
        return VERR_DBGF_HYPER_DB_XCPT;
    }
#endif

    LogFlow(("DBGFRZTrap01Handler: guest debug event %#x at %04x:%RGv!\n", (uint32_t)uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
    return VINF_EM_RAW_GUEST_TRAP;
}