Example #1
0
/**
 * Converts a GC selector based address to a flat address.
 *
 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
 * for that.
 *
 * @returns Flat address.
 * @param   pVM     Pointer to the VM.
 * @param   Sel     Selector part.
 * @param   Addr    Address part.
 * @remarks Don't use when in long mode.
 */
VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
{
    Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM)));    /* DON'T USE! */

    /** @todo check the limit. */
    X86DESC    Desc;
    if (!(Sel & X86_SEL_LDT))
        Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
    else
    {
/**
 * EMT worker for DBGFR3PagingDumpEx.
 *
 * @returns VBox status code.
 * @param   pVM             The VM handle.
 * @param   idCpu           The current CPU ID.
 * @param   fFlags          The flags, DBGFPGDMP_FLAGS_XXX.  Valid.
 * @param   pcr3            The CR3 to use (unless we're getting the current
 *                          state, see @a fFlags).
 * @param   pu64FirstAddr   The first address.
 * @param   pu64LastAddr    The last address.
 * @param   cMaxDepth       The depth.
 * @param   pHlp            The output callbacks.
 */
static DECLCALLBACK(int) dbgfR3PagingDumpEx(PVM pVM, VMCPUID idCpu, uint32_t fFlags, uint64_t *pcr3,
                                            uint64_t *pu64FirstAddr, uint64_t *pu64LastAddr,
                                            uint32_t cMaxDepth, PCDBGFINFOHLP pHlp)
{
    /*
     * Implement dumping both context by means of recursion.
     */
    if ((fFlags & (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW)) == (DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_SHADOW))
    {
        int rc1 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_GUEST,
                                     pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
        int rc2 = dbgfR3PagingDumpEx(pVM, idCpu, fFlags & ~DBGFPGDMP_FLAGS_SHADOW,
                                     pcr3, pu64FirstAddr, pu64LastAddr, cMaxDepth, pHlp);
        return RT_FAILURE(rc1) ? rc1 : rc2;
    }

    /*
     * Get the current CR3/mode if required.
     */
    uint64_t cr3 = *pcr3;
    if (fFlags & (DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE))
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];
        if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
        {
            if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
                cr3 = PGMGetHyperCR3(pVCpu);
            if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
            {
                fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu));
                if (fFlags & DBGFPGDMP_FLAGS_NP)
                {
                    fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetHostMode(pVM));
                    if (HC_ARCH_BITS == 32 && CPUMIsGuestInLongMode(pVCpu))
                        fFlags |= DBGFPGDMP_FLAGS_LME;
                }
            }
        }
        else
        {
            if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3)
                cr3 = CPUMGetGuestCR3(pVCpu);
            if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE)
            {
                AssertCompile(DBGFPGDMP_FLAGS_PSE == X86_CR4_PSE);      AssertCompile(DBGFPGDMP_FLAGS_PAE == X86_CR4_PAE);
                fFlags |= CPUMGetGuestCR4(pVCpu)  & (X86_CR4_PSE | X86_CR4_PAE);
                AssertCompile(DBGFPGDMP_FLAGS_LME == MSR_K6_EFER_LME);  AssertCompile(DBGFPGDMP_FLAGS_NXE == MSR_K6_EFER_NXE);
                fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE);
            }
        }
    }
    fFlags &= ~(DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_CURRENT_CR3);

    /*
     * Call PGM to do the real work.
     */
    int rc;
    if (fFlags & DBGFPGDMP_FLAGS_SHADOW)
        rc = PGMR3DumpHierarchyShw(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
    else
        rc = PGMR3DumpHierarchyGst(pVM, cr3, fFlags, *pu64FirstAddr, *pu64LastAddr, cMaxDepth, pHlp);
    return rc;
}