/** * Process raw-mode specific forced actions. * * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending. * * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other * EM statuses. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param pCtx Pointer to the guest CPU context. */ static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) { /* * Sync page directory. */ if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); if (RT_FAILURE(rc)) return rc; Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); /* Prefetch pages for EIP and ESP. */ /** @todo This is rather expensive. Should investigate if it really helps at all. */ rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip)); if (rc == VINF_SUCCESS) rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp)); if (rc != VINF_SUCCESS) { if (rc != VINF_PGM_SYNC_CR3) { AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS); return rc; } rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); if (RT_FAILURE(rc)) return rc; } /** @todo maybe prefetch the supervisor stack page as well */ Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); } /* * Allocate handy pages (just in case the above actions have consumed some pages). */ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY)) { int rc = PGMR3PhysAllocateHandyPages(pVM); if (RT_FAILURE(rc)) return rc; } /* * Check whether we're out of memory now. * * This may stem from some of the above actions or operations that has been executed * since we ran FFs. The allocate handy pages must for instance always be followed by * this check. */ if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) return VINF_EM_NO_MEMORY; return VINF_SUCCESS; }
/** * Gets the pending interrupt. * * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pu8Interrupt Where to store the interrupt on success. */ VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt) { PVM pVM = pVCpu->CTX_SUFF(pVM); pdmLock(pVM); /* * The local APIC has a higher priority than the PIC. */ if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /* * Check the PIC. */ if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Pic.CTX_SUFF(pDevIns), &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /** @todo Figure out exactly why we can get here without anything being set. (REM) */ pdmUnlock(pVM); return VERR_NO_DATA; }
/** @interface_method_impl{PDMPICHLPR3,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR3PicHlp_SetInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; if (pVM->pdm.s.Apic.pfnLocalInterruptR3) { LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: Setting local interrupt on LAPIC\n", pDevIns->pReg->szName, pDevIns->iInstance)); /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptR3(pVM->pdm.s.Apic.pDevInsR3, 0, 1); return; } PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n", pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); #ifdef VBOX_WITH_REM REMR3NotifyInterruptSet(pVM, pVCpu); #endif VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE); }
/** @interface_method_impl{PDMAPICHLPRC,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmRCApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMRC; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmRCApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } }
/** @interface_method_impl{PDMAPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } #ifdef VBOX_WITH_REM REMR3NotifyInterruptClear(pVM, pVCpu); #endif }
/** @interface_method_impl{PDMAPICHLPR3,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR3ApicHlp_SetInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 1\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break; case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } #ifdef VBOX_WITH_REM REMR3NotifyInterruptSet(pVM, pVCpu); #endif VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE); }
/** @interface_method_impl{PDMPICHLPGC,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmRCPicHlp_SetInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMRC; if (pVM->pdm.s.Apic.pfnLocalInterruptRC) { LogFlow(("pdmRCPicHlp_SetInterruptFF: caller='%p'/%d: Setting local interrupt on LAPIC\n", pDevIns, pDevIns->iInstance)); /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptRC(pVM->pdm.s.Apic.pDevInsRC, 0, 1); return; } PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmRCPicHlp_SetInterruptFF: caller=%p/%d: VMMCPU_FF_INTERRUPT_PIC %d -> 1\n", pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); }
/** @interface_method_impl{PDMPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ if (pVM->pdm.s.Apic.pfnLocalInterruptR3) { /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: Clearing local interrupt on LAPIC\n", pDevIns->pReg->szName, pDevIns->iInstance)); /* Lower the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptR3(pVM->pdm.s.Apic.pDevInsR3, 0, 0); return; } LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); REMR3NotifyInterruptClear(pVM, pVCpu); }
RT_C_DECLS_END /** * Exits the trap, called when exiting a trap handler. * * Will reset the trap if it's not a guest trap or the trap * is already handled. Will process resume guest FFs. * * @returns rc, can be adjusted if its VINF_SUCCESS or something really bad * happened. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param rc The VBox status code to return. * @param pRegFrame Pointer to the register frame for the trap. * * @remarks This must not be used for hypervisor traps, only guest traps. */ static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) { uint32_t uOldActiveVector = pVCpu->trpm.s.uActiveVector; NOREF(uOldActiveVector); /* Reset trap? */ if ( rc != VINF_EM_RAW_GUEST_TRAP && rc != VINF_EM_RAW_RING_SWITCH_INT) pVCpu->trpm.s.uActiveVector = UINT32_MAX; #ifdef VBOX_HIGH_RES_TIMERS_HACK /* * We should poll the timers occasionally. * We must *NOT* do this too frequently as it adds a significant overhead * and it'll kill us if the trap load is high. (See @bugref{1354}.) * (The heuristic is not very intelligent, we should really check trap * frequency etc. here, but alas, we lack any such information atm.) */ static unsigned s_iTimerPoll = 0; if (rc == VINF_SUCCESS) { if (!(++s_iTimerPoll & 0xf)) { TMTimerPollVoid(pVM, pVCpu); Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))); } } else s_iTimerPoll = 0; #endif /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */ if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) { Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu))); if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu)) { /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address. * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); } } /* * Pending resume-guest-FF? * Or pending (A)PIC interrupt? Windows XP will crash if we delay APIC interrupts. */ if ( rc == VINF_SUCCESS && ( VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PDM_CRITSECT) ) ) { /* The out of memory condition naturally outranks the others. */ if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) rc = VINF_EM_NO_MEMORY; /* Pending Ring-3 action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); rc = VINF_EM_RAW_TO_R3; } /* Pending timer action. */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)) rc = VINF_EM_RAW_TIMER_PENDING; /* The Virtual Sync clock has stopped. */ else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) rc = VINF_EM_RAW_TO_R3; /* DMA work pending? */ else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA)) rc = VINF_EM_RAW_TO_R3; /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST) || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST)) rc = VINF_EM_PENDING_REQUEST; /* Pending interrupt: dispatch it. */ else if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) && PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame) ) { uint8_t u8Interrupt; rc = PDMGetInterrupt(pVCpu, &u8Interrupt); Log(("trpmGCExitTrap: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc)); AssertFatalMsgRC(rc, ("PDMGetInterrupt failed with %Rrc\n", rc)); rc = TRPMForwardTrap(pVCpu, pRegFrame, (uint32_t)u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_HARDWARE_INT, uOldActiveVector); /* can't return if successful */ Assert(rc != VINF_SUCCESS); /* Stop the profile counter that was started in TRPMGCHandlersA.asm */ Assert(uOldActiveVector <= 16); STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); /* Assert the trap and go to the recompiler to dispatch it. */ TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT); STAM_PROFILE_ADV_START(&pVM->trpm.s.aStatGCTraps[uOldActiveVector], a); rc = VINF_EM_RAW_INTERRUPT_PENDING; } /* * Try sync CR3? */ else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { #if 1 PGMRZDynMapReleaseAutoSet(pVCpu); PGMRZDynMapStartAutoSet(pVCpu); rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); #else rc = VINF_PGM_SYNC_CR3; #endif } } AssertMsg( rc != VINF_SUCCESS || ( pRegFrame->eflags.Bits.u1IF && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM)) , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss.Sel, pRegFrame->eflags.Bits.u2IOPL)); PGMRZDynMapReleaseAutoSet(pVCpu); return rc; }
/** * Check a registered module for shared page changes. * * The PGM lock shall be taken prior to calling this method. * * @returns The following VBox status codes. * * @param pVM Pointer to the VM. * @param pGVM Pointer to the GVM instance data. * @param idCpu The ID of the calling virtual CPU. * @param pModule Global module description. * @param paRegionsGCPtrs Array parallel to pModules->aRegions with the * addresses of the regions in the calling * process. */ VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; int rc = VINF_SUCCESS; bool fFlushTLBs = false; bool fFlushRemTLBs = false; GMMSHAREDPAGEDESC PageDesc; Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule)); PGM_LOCK_ASSERT_OWNER(pVM); /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */ /* * Check every region of the shared module. */ for (uint32_t idxRegion = 0; idxRegion < pModule->cRegions; idxRegion++) { RTGCPTR GCPtrPage = paRegionsGCPtrs[idxRegion] & ~(RTGCPTR)PAGE_OFFSET_MASK; uint32_t cbLeft = pModule->aRegions[idxRegion].cb; Assert(!(cbLeft & PAGE_OFFSET_MASK)); uint32_t idxPage = 0; while (cbLeft) { /** @todo inefficient to fetch each guest page like this... */ RTGCPHYS GCPhys; uint64_t fFlags; rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys); if ( rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); if ( pPage && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED && PGM_PAGE_GET_READ_LOCKS(pPage) == 0 && PGM_PAGE_GET_WRITE_LOCKS(pPage) == 0 ) { PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage); PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); PageDesc.GCPhys = GCPhys; rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); if (RT_FAILURE(rc)) break; /* * Any change for this page? */ if (PageDesc.idPage != NIL_GMM_PAGEID) { Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); Log(("PGMR0SharedModuleCheck: shared page gst virt=%RGv phys=%RGp host %RHp->%RHp\n", GCPtrPage, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys)); /* Page was either replaced by an existing shared version of it or converted into a read-only shared page, so, clear all references. */ bool fFlush = false; rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); Assert( rc == VINF_SUCCESS || ( VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); if (rc == VINF_SUCCESS) fFlushTLBs |= fFlush; fFlushRemTLBs = true; if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage)) { /* Update the physical address and page id now. */ PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys); PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.idPage); /* Invalidate page map TLB entry for this page too. */ pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys); pVM->pgm.s.cReusedSharedPages++; } /* else: nothing changed (== this page is now a shared page), so no need to flush anything. */ pVM->pgm.s.cSharedPages++; pVM->pgm.s.cPrivatePages--; PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED); # ifdef VBOX_STRICT /* check sum hack */ pPage->s.u2Unused0 = PageDesc.u32StrictChecksum & 3; pPage->s.u2Unused1 = (PageDesc.u32StrictChecksum >> 8) & 3; # endif } } }
/** * Check a registered module for shared page changes * * @returns The following VBox status codes. * * @param pVM The VM handle. * @param pGVM Pointer to the GVM instance data. * @param idCpu VCPU id * @param pModule Module description * @param cRegions Number of regions * @param pRegions Region array */ VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, uint32_t cRegions, PGMMSHAREDREGIONDESC pRegions) { int rc = VINF_SUCCESS; GMMSHAREDPAGEDESC PageDesc; bool fFlushTLBs = false; PVMCPU pVCpu = &pVM->aCpus[idCpu]; Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule)); PGM_LOCK_ASSERT_OWNER(pVM); /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */ /* Check every region of the shared module. */ for (unsigned idxRegion = 0; idxRegion < cRegions; idxRegion++) { Assert((pRegions[idxRegion].cbRegion & 0xfff) == 0); Assert((pRegions[idxRegion].GCRegionAddr & 0xfff) == 0); RTGCPTR GCRegion = pRegions[idxRegion].GCRegionAddr; unsigned cbRegion = pRegions[idxRegion].cbRegion & ~0xfff; unsigned idxPage = 0; while (cbRegion) { RTGCPHYS GCPhys; uint64_t fFlags; /** @todo inefficient to fetch each guest page like this... */ rc = PGMGstGetPage(pVCpu, GCRegion, &fFlags, &GCPhys); if ( rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); if ( pPage && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) { PageDesc.uHCPhysPageId = PGM_PAGE_GET_PAGEID(pPage); PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); PageDesc.GCPhys = GCPhys; rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); if (rc == VINF_SUCCESS) { /* Any change for this page? */ if (PageDesc.uHCPhysPageId != NIL_GMM_PAGEID) { Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); Log(("PGMR0SharedModuleCheck: shared page gc virt=%RGv phys %RGp host %RHp->%RHp\n", pRegions[idxRegion].GCRegionAddr + idxPage * PAGE_SIZE, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys)); if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage)) { bool fFlush = false; /* Page was replaced by an existing shared version of it; clear all references first. */ rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); Assert(rc == VINF_SUCCESS || (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); if (rc == VINF_SUCCESS) fFlushTLBs |= fFlush; /* Update the physical address and page id now. */ PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys); PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.uHCPhysPageId); /* Invalidate page map TLB entry for this page too. */ pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys); pVM->pgm.s.cReusedSharedPages++; } /* else nothing changed (== this page is now a shared page), so no need to flush anything. */ pVM->pgm.s.cSharedPages++; pVM->pgm.s.cPrivatePages--; PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED); } } else break; } } else { Assert( rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); rc = VINF_SUCCESS; /* ignore error */ } idxPage++; GCRegion += PAGE_SIZE; cbRegion -= PAGE_SIZE; } } if (fFlushTLBs) PGM_INVL_ALL_VCPU_TLBS(pVM); return rc; }