Example #1
0
/**
 * @callback_method_impl{FNDBGCCMD, The '.pgmcheckduppages' command.}
 */
DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
{
    unsigned cBallooned = 0;
    unsigned cShared    = 0;
    unsigned cZero      = 0;
    unsigned cUnique    = 0;
    unsigned cDuplicate = 0;
    unsigned cAllocZero = 0;
    unsigned cPages     = 0;
    NOREF(pCmd); NOREF(paArgs); NOREF(cArgs);
    PVM      pVM = pUVM->pVM;
    VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);

    pgmLock(pVM);

    for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
    {
        PPGMPAGE    pPage  = &pRam->aPages[0];
        RTGCPHYS    GCPhys = pRam->GCPhys;
        uint32_t    cLeft  = pRam->cb >> PAGE_SHIFT;
        while (cLeft-- > 0)
        {
            if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
            {
                switch (PGM_PAGE_GET_STATE(pPage))
                {
                    case PGM_PAGE_STATE_ZERO:
                        cZero++;
                        break;

                    case PGM_PAGE_STATE_BALLOONED:
                        cBallooned++;
                        break;

                    case PGM_PAGE_STATE_SHARED:
                        cShared++;
                        break;

                    case PGM_PAGE_STATE_ALLOCATED:
                    case PGM_PAGE_STATE_WRITE_MONITORED:
                    {
                        /* Check if the page was allocated, but completely zero. */
                        PGMPAGEMAPLOCK PgMpLck;
                        const void    *pvPage;
                        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
                        if (    RT_SUCCESS(rc)
                            &&  ASMMemIsZeroPage(pvPage))
                            cAllocZero++;
                        else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage)))
                            cDuplicate++;
                        else
                            cUnique++;
                        if (RT_SUCCESS(rc))
                            pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
                        break;
                    }

                    default:
                        AssertFailed();
                        break;
                }
            }

            /* next */
            pPage++;
            GCPhys += PAGE_SIZE;
            cPages++;
            /* Give some feedback for every processed megabyte. */
            if ((cPages & 0x7f) == 0)
                pCmdHlp->pfnPrintf(pCmdHlp, NULL, ".");
        }
    }
    pgmUnlock(pVM);

    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "\nNumber of zero pages      %08x (%d MB)\n", cZero, cZero / 256);
    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of alloczero pages %08x (%d MB)\n", cAllocZero, cAllocZero / 256);
    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of ballooned pages %08x (%d MB)\n", cBallooned, cBallooned / 256);
    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of shared pages    %08x (%d MB)\n", cShared, cShared / 256);
    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of unique pages    %08x (%d MB)\n", cUnique, cUnique / 256);
    pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of duplicate pages %08x (%d MB)\n", cDuplicate, cDuplicate / 256);
    return VINF_SUCCESS;
}
Example #2
0
/**
 * Check a registered module for shared page changes.
 *
 * The PGM lock shall be taken prior to calling this method.
 *
 * @returns The following VBox status codes.
 *
 * @param   pVM                 Pointer to the VM.
 * @param   pGVM                Pointer to the GVM instance data.
 * @param   idCpu               The ID of the calling virtual CPU.
 * @param   pModule             Global module description.
 * @param   paRegionsGCPtrs     Array parallel to pModules->aRegions with the
 *                              addresses of the regions in the calling
 *                              process.
 */
VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs)
{
    PVMCPU              pVCpu         = &pVM->aCpus[idCpu];
    int                 rc            = VINF_SUCCESS;
    bool                fFlushTLBs    = false;
    bool                fFlushRemTLBs = false;
    GMMSHAREDPAGEDESC   PageDesc;

    Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule));

    PGM_LOCK_ASSERT_OWNER(pVM);     /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */

    /*
     * Check every region of the shared module.
     */
    for (uint32_t idxRegion = 0; idxRegion < pModule->cRegions; idxRegion++)
    {
        RTGCPTR  GCPtrPage  = paRegionsGCPtrs[idxRegion] & ~(RTGCPTR)PAGE_OFFSET_MASK;
        uint32_t cbLeft     = pModule->aRegions[idxRegion].cb; Assert(!(cbLeft & PAGE_OFFSET_MASK));
        uint32_t idxPage    = 0;

        while (cbLeft)
        {
            /** @todo inefficient to fetch each guest page like this... */
            RTGCPHYS GCPhys;
            uint64_t fFlags;
            rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys);
            if (    rc == VINF_SUCCESS
                &&  !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */
            {
                PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
                Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage));
                if (    pPage
                    &&  PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
                    &&  PGM_PAGE_GET_READ_LOCKS(pPage) == 0
                    &&  PGM_PAGE_GET_WRITE_LOCKS(pPage) == 0 )
                {
                    PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage);
                    PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
                    PageDesc.GCPhys = GCPhys;

                    rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc);
                    if (RT_FAILURE(rc))
                        break;

                    /*
                     * Any change for this page?
                     */
                    if (PageDesc.idPage != NIL_GMM_PAGEID)
                    {
                        Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);

                        Log(("PGMR0SharedModuleCheck: shared page gst virt=%RGv phys=%RGp host %RHp->%RHp\n",
                             GCPtrPage, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys));

                        /* Page was either replaced by an existing shared
                           version of it or converted into a read-only shared
                           page, so, clear all references. */
                        bool fFlush = false;
                        rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush);
                        Assert(   rc == VINF_SUCCESS
                               || (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
                                   && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)));
                        if (rc == VINF_SUCCESS)
                            fFlushTLBs |= fFlush;
                        fFlushRemTLBs = true;

                        if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage))
                        {
                            /* Update the physical address and page id now. */
                            PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys);
                            PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.idPage);

                            /* Invalidate page map TLB entry for this page too. */
                            pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys);
                            pVM->pgm.s.cReusedSharedPages++;
                        }
                        /* else: nothing changed (== this page is now a shared
                           page), so no need to flush anything. */

                        pVM->pgm.s.cSharedPages++;
                        pVM->pgm.s.cPrivatePages--;
                        PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED);

# ifdef VBOX_STRICT /* check sum hack */
                        pPage->s.u2Unused0 = PageDesc.u32StrictChecksum        & 3;
                        pPage->s.u2Unused1 = (PageDesc.u32StrictChecksum >> 8) & 3;
# endif
                    }
                }
            }
/**
 * Check a registered module for shared page changes
 *
 * @returns The following VBox status codes.
 *
 * @param   pVM         The VM handle.
 * @param   pGVM        Pointer to the GVM instance data.
 * @param   idCpu       VCPU id
 * @param   pModule     Module description
 * @param   cRegions    Number of regions
 * @param   pRegions    Region array
 */
VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, uint32_t cRegions, PGMMSHAREDREGIONDESC pRegions)
{
    int                rc = VINF_SUCCESS;
    GMMSHAREDPAGEDESC  PageDesc;
    bool               fFlushTLBs = false;
    PVMCPU             pVCpu = &pVM->aCpus[idCpu];

    Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule));

    PGM_LOCK_ASSERT_OWNER(pVM);     /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */

    /* Check every region of the shared module. */
    for (unsigned idxRegion = 0; idxRegion < cRegions; idxRegion++)
    {
        Assert((pRegions[idxRegion].cbRegion & 0xfff) == 0);
        Assert((pRegions[idxRegion].GCRegionAddr & 0xfff) == 0);

        RTGCPTR  GCRegion = pRegions[idxRegion].GCRegionAddr;
        unsigned cbRegion = pRegions[idxRegion].cbRegion & ~0xfff;
        unsigned idxPage  = 0;

        while (cbRegion)
        {
            RTGCPHYS GCPhys;
            uint64_t fFlags;

            /** @todo inefficient to fetch each guest page like this... */
            rc = PGMGstGetPage(pVCpu, GCRegion, &fFlags, &GCPhys);
            if (    rc == VINF_SUCCESS
                &&  !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */
            {
                PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
                Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage));
                if (    pPage
                    &&  PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
                {
                    PageDesc.uHCPhysPageId = PGM_PAGE_GET_PAGEID(pPage);
                    PageDesc.HCPhys        = PGM_PAGE_GET_HCPHYS(pPage);
                    PageDesc.GCPhys        = GCPhys;

                    rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc);
                    if (rc == VINF_SUCCESS)
                    {
                        /* Any change for this page? */
                        if (PageDesc.uHCPhysPageId != NIL_GMM_PAGEID)
                        {
                            Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);

                            Log(("PGMR0SharedModuleCheck: shared page gc virt=%RGv phys %RGp host %RHp->%RHp\n", pRegions[idxRegion].GCRegionAddr + idxPage * PAGE_SIZE, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys));
                            if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage))
                            {
                                bool fFlush = false;

                                /* Page was replaced by an existing shared version of it; clear all references first. */
                                rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush);
                                Assert(rc == VINF_SUCCESS || (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)));
                                if (rc == VINF_SUCCESS)
                                    fFlushTLBs |= fFlush;

                                /* Update the physical address and page id now. */
                                PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys);
                                PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.uHCPhysPageId);

                                /* Invalidate page map TLB entry for this page too. */
                                pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys);
                                pVM->pgm.s.cReusedSharedPages++;
                            }
                            /* else nothing changed (== this page is now a shared page), so no need to flush anything. */

                            pVM->pgm.s.cSharedPages++;
                            pVM->pgm.s.cPrivatePages--;
                            PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED);
                        }
                    }
                    else
                        break;
                }
            }
            else
            {
                Assert(    rc == VINF_SUCCESS
                       ||  rc == VERR_PAGE_NOT_PRESENT
                       ||  rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT
                       ||  rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
                       ||  rc == VERR_PAGE_TABLE_NOT_PRESENT);
                rc = VINF_SUCCESS;  /* ignore error */
            }

            idxPage++;
            GCRegion += PAGE_SIZE;
            cbRegion -= PAGE_SIZE;
        }
    }

    if (fFlushTLBs)
        PGM_INVL_ALL_VCPU_TLBS(pVM);

    return rc;
}