/** * Rendezvous callback that will be called once. * * @returns VBox strict status code. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU of the calling EMT. * @param pvUser Pointer to a VMCPUID with the requester's ID. */ static DECLCALLBACK(VBOXSTRICTRC) pgmR3SharedModuleRegRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser) { VMCPUID idCpu = *(VMCPUID *)pvUser; /* Execute on the VCPU that issued the original request to make sure we're in the right cr3 context. */ if (pVCpu->idCpu != idCpu) { Assert(pVM->cCpus > 1); return VINF_SUCCESS; } /* Flush all pending handy page operations before changing any shared page assignments. */ int rc = PGMR3PhysAllocateHandyPages(pVM); AssertRC(rc); /* * Lock it here as we can't deal with busy locks in this ring-0 path. */ LogFlow(("pgmR3SharedModuleRegRendezvous: start (%d)\n", pVM->pgm.s.cSharedPages)); pgmLock(pVM); pgmR3PhysAssertSharedPageChecksums(pVM); rc = GMMR3CheckSharedModules(pVM); pgmR3PhysAssertSharedPageChecksums(pVM); pgmUnlock(pVM); AssertLogRelRC(rc); LogFlow(("pgmR3SharedModuleRegRendezvous: done (%d)\n", pVM->pgm.s.cSharedPages)); return rc; }
/** * Updates the physical page access handlers. * * @param pVM Pointer to the VM. * @remark Only used when restoring a saved state. */ void pgmR3HandlerPhysicalUpdateAll(PVM pVM) { LogFlow(("pgmHandlerPhysicalUpdateAll:\n")); /* * Clear and set. * (the right -> left on the setting pass is just bird speculating on cache hits) */ pgmLock(pVM); RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, true, pgmR3HandlerPhysicalOneClear, pVM); RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, false, pgmR3HandlerPhysicalOneSet, pVM); pgmUnlock(pVM); }
/** * The '.pgmsharedmodules' command. * * @returns VBox status. * @param pCmd Pointer to the command descriptor (as registered). * @param pCmdHlp Pointer to command helper functions. * @param pVM Pointer to the current VM (if any). * @param paArgs Pointer to (readonly) array of arguments. * @param cArgs Number of arguments in the array. */ DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs) { NOREF(pCmd); NOREF(paArgs); NOREF(cArgs); pgmLock(pVM); for (unsigned i = 0; i < RT_ELEMENTS(g_apSharedModules); i++) { if (g_apSharedModules[i]) { pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Shared module %s (%s):\n", g_apSharedModules[i]->szName, g_apSharedModules[i]->szVersion); for (unsigned j = 0; j < g_apSharedModules[i]->cRegions; j++) pCmdHlp->pfnPrintf(pCmdHlp, NULL, "--- Region %d: base %RGv size %x\n", j, g_apSharedModules[i]->aRegions[j].GCRegionAddr, g_apSharedModules[i]->aRegions[j].cbRegion); } } pgmUnlock(pVM); return VINF_SUCCESS; }
/** * Query the state of a page in a shared module * * @returns VBox status code. * @param pVM Pointer to the VM. * @param GCPtrPage Page address. * @param pfShared Shared status (out). * @param pfPageFlags Page flags (out). */ VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags) { /* Debug only API for the page fusion testcase. */ RTGCPHYS GCPhys; uint64_t fFlags; pgmLock(pVM); int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &fFlags, &GCPhys); switch (rc) { case VINF_SUCCESS: { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); if (pPage) { *pfShared = PGM_PAGE_IS_SHARED(pPage); *pfPageFlags = fFlags; } else rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; break; } case VERR_PAGE_NOT_PRESENT: case VERR_PAGE_TABLE_NOT_PRESENT: case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT: case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT: *pfShared = false; *pfPageFlags = 0; rc = VINF_SUCCESS; break; default: break; } pgmUnlock(pVM); return rc; }
/** * @callback_method_impl{FNDBGCCMD, The '.pgmcheckduppages' command.} */ DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs) { unsigned cBallooned = 0; unsigned cShared = 0; unsigned cZero = 0; unsigned cUnique = 0; unsigned cDuplicate = 0; unsigned cAllocZero = 0; unsigned cPages = 0; NOREF(pCmd); NOREF(paArgs); NOREF(cArgs); PVM pVM = pUVM->pVM; VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); pgmLock(pVM); for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) { PPGMPAGE pPage = &pRam->aPages[0]; RTGCPHYS GCPhys = pRam->GCPhys; uint32_t cLeft = pRam->cb >> PAGE_SHIFT; while (cLeft-- > 0) { if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) { switch (PGM_PAGE_GET_STATE(pPage)) { case PGM_PAGE_STATE_ZERO: cZero++; break; case PGM_PAGE_STATE_BALLOONED: cBallooned++; break; case PGM_PAGE_STATE_SHARED: cShared++; break; case PGM_PAGE_STATE_ALLOCATED: case PGM_PAGE_STATE_WRITE_MONITORED: { /* Check if the page was allocated, but completely zero. */ PGMPAGEMAPLOCK PgMpLck; const void *pvPage; int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck); if ( RT_SUCCESS(rc) && ASMMemIsZeroPage(pvPage)) cAllocZero++; else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage))) cDuplicate++; else cUnique++; if (RT_SUCCESS(rc)) pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); break; } default: AssertFailed(); break; } } /* next */ pPage++; GCPhys += PAGE_SIZE; cPages++; /* Give some feedback for every processed megabyte. */ if ((cPages & 0x7f) == 0) pCmdHlp->pfnPrintf(pCmdHlp, NULL, "."); } } pgmUnlock(pVM); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "\nNumber of zero pages %08x (%d MB)\n", cZero, cZero / 256); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of alloczero pages %08x (%d MB)\n", cAllocZero, cAllocZero / 256); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of ballooned pages %08x (%d MB)\n", cBallooned, cBallooned / 256); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of shared pages %08x (%d MB)\n", cShared, cShared / 256); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of unique pages %08x (%d MB)\n", cUnique, cUnique / 256); pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Number of duplicate pages %08x (%d MB)\n", cDuplicate, cDuplicate / 256); return VINF_SUCCESS; }