/** * Query the state of a page in a shared module * * @returns VBox status code. * @param pVM Pointer to the VM. * @param GCPtrPage Page address. * @param pfShared Shared status (out). * @param pfPageFlags Page flags (out). */ VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags) { /* Debug only API for the page fusion testcase. */ RTGCPHYS GCPhys; uint64_t fFlags; pgmLock(pVM); int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &fFlags, &GCPhys); switch (rc) { case VINF_SUCCESS: { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); if (pPage) { *pfShared = PGM_PAGE_IS_SHARED(pPage); *pfPageFlags = fFlags; } else rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; break; } case VERR_PAGE_NOT_PRESENT: case VERR_PAGE_TABLE_NOT_PRESENT: case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT: case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT: *pfShared = false; *pfPageFlags = 0; rc = VINF_SUCCESS; break; default: break; } pgmUnlock(pVM); return rc; }
/** * Check a registered module for shared page changes. * * The PGM lock shall be taken prior to calling this method. * * @returns The following VBox status codes. * * @param pVM Pointer to the VM. * @param pGVM Pointer to the GVM instance data. * @param idCpu The ID of the calling virtual CPU. * @param pModule Global module description. * @param paRegionsGCPtrs Array parallel to pModules->aRegions with the * addresses of the regions in the calling * process. */ VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; int rc = VINF_SUCCESS; bool fFlushTLBs = false; bool fFlushRemTLBs = false; GMMSHAREDPAGEDESC PageDesc; Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule)); PGM_LOCK_ASSERT_OWNER(pVM); /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */ /* * Check every region of the shared module. */ for (uint32_t idxRegion = 0; idxRegion < pModule->cRegions; idxRegion++) { RTGCPTR GCPtrPage = paRegionsGCPtrs[idxRegion] & ~(RTGCPTR)PAGE_OFFSET_MASK; uint32_t cbLeft = pModule->aRegions[idxRegion].cb; Assert(!(cbLeft & PAGE_OFFSET_MASK)); uint32_t idxPage = 0; while (cbLeft) { /** @todo inefficient to fetch each guest page like this... */ RTGCPHYS GCPhys; uint64_t fFlags; rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys); if ( rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); if ( pPage && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED && PGM_PAGE_GET_READ_LOCKS(pPage) == 0 && PGM_PAGE_GET_WRITE_LOCKS(pPage) == 0 ) { PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage); PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); PageDesc.GCPhys = GCPhys; rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); if (RT_FAILURE(rc)) break; /* * Any change for this page? */ if (PageDesc.idPage != NIL_GMM_PAGEID) { Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); Log(("PGMR0SharedModuleCheck: shared page gst virt=%RGv phys=%RGp host %RHp->%RHp\n", GCPtrPage, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys)); /* Page was either replaced by an existing shared version of it or converted into a read-only shared page, so, clear all references. */ bool fFlush = false; rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); Assert( rc == VINF_SUCCESS || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); if (rc == VINF_SUCCESS) fFlushTLBs |= fFlush; fFlushRemTLBs = true; if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage)) { /* Update the physical address and page id now. */ PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys); PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.idPage); /* Invalidate page map TLB entry for this page too. */ pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys); pVM->pgm.s.cReusedSharedPages++; } /* else: nothing changed (== this page is now a shared page), so no need to flush anything. */ pVM->pgm.s.cSharedPages++; pVM->pgm.s.cPrivatePages--; PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED); # ifdef VBOX_STRICT /* check sum hack */ pPage->s.u2Unused0 = PageDesc.u32StrictChecksum & 3; pPage->s.u2Unused1 = (PageDesc.u32StrictChecksum >> 8) & 3; # endif } } }
/** * Called on the EMT for the VCpu. * * @returns VBox status code. * @param pVCpu The virtual CPU handle. * @param pAddress The address. * @param pGCPhys Where to return the physical address. */ static DECLCALLBACK(int) dbgfR3AddrToPhysOnVCpu(PVMCPU pVCpu, PDBGFADDRESS pAddress, PRTGCPHYS pGCPhys) { VMCPU_ASSERT_EMT(pVCpu); /* This is just a wrapper because we cannot pass FlatPtr thru VMR3ReqCall directly. */ return PGMGstGetPage(pVCpu, pAddress->FlatPtr, NULL, pGCPhys); }
/** * Check a registered module for shared page changes * * @returns The following VBox status codes. * * @param pVM The VM handle. * @param pGVM Pointer to the GVM instance data. * @param idCpu VCPU id * @param pModule Module description * @param cRegions Number of regions * @param pRegions Region array */ VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, uint32_t cRegions, PGMMSHAREDREGIONDESC pRegions) { int rc = VINF_SUCCESS; GMMSHAREDPAGEDESC PageDesc; bool fFlushTLBs = false; PVMCPU pVCpu = &pVM->aCpus[idCpu]; Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule)); PGM_LOCK_ASSERT_OWNER(pVM); /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */ /* Check every region of the shared module. */ for (unsigned idxRegion = 0; idxRegion < cRegions; idxRegion++) { Assert((pRegions[idxRegion].cbRegion & 0xfff) == 0); Assert((pRegions[idxRegion].GCRegionAddr & 0xfff) == 0); RTGCPTR GCRegion = pRegions[idxRegion].GCRegionAddr; unsigned cbRegion = pRegions[idxRegion].cbRegion & ~0xfff; unsigned idxPage = 0; while (cbRegion) { RTGCPHYS GCPhys; uint64_t fFlags; /** @todo inefficient to fetch each guest page like this... */ rc = PGMGstGetPage(pVCpu, GCRegion, &fFlags, &GCPhys); if ( rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); if ( pPage && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) { PageDesc.uHCPhysPageId = PGM_PAGE_GET_PAGEID(pPage); PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); PageDesc.GCPhys = GCPhys; rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); if (rc == VINF_SUCCESS) { /* Any change for this page? */ if (PageDesc.uHCPhysPageId != NIL_GMM_PAGEID) { Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); Log(("PGMR0SharedModuleCheck: shared page gc virt=%RGv phys %RGp host %RHp->%RHp\n", pRegions[idxRegion].GCRegionAddr + idxPage * PAGE_SIZE, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys)); if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage)) { bool fFlush = false; /* Page was replaced by an existing shared version of it; clear all references first. */ rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); Assert(rc == VINF_SUCCESS || (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); if (rc == VINF_SUCCESS) fFlushTLBs |= fFlush; /* Update the physical address and page id now. */ PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys); PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.uHCPhysPageId); /* Invalidate page map TLB entry for this page too. */ pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys); pVM->pgm.s.cReusedSharedPages++; } /* else nothing changed (== this page is now a shared page), so no need to flush anything. */ pVM->pgm.s.cSharedPages++; pVM->pgm.s.cPrivatePages--; PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED); } } else break; } } else { Assert( rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); rc = VINF_SUCCESS; /* ignore error */ } idxPage++; GCRegion += PAGE_SIZE; cbRegion -= PAGE_SIZE; } } if (fFlushTLBs) PGM_INVL_ALL_VCPU_TLBS(pVM); return rc; }