/** * Process raw-mode specific forced actions. * * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending. * * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other * EM statuses. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param pCtx Pointer to the guest CPU context. */ static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) { /* * Sync page directory. */ if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) { Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); if (RT_FAILURE(rc)) return rc; #ifdef VBOX_WITH_RAW_MODE Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); #endif /* Prefetch pages for EIP and ESP. */ /** @todo This is rather expensive. Should investigate if it really helps at all. */ rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip)); if (rc == VINF_SUCCESS) rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp)); if (rc != VINF_SUCCESS) { if (rc != VINF_PGM_SYNC_CR3) { AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS); return rc; } rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); if (RT_FAILURE(rc)) return rc; } /** @todo maybe prefetch the supervisor stack page as well */ #ifdef VBOX_WITH_RAW_MODE Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); #endif } /* * Allocate handy pages (just in case the above actions have consumed some pages). */ if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY)) { int rc = PGMR3PhysAllocateHandyPages(pVM); if (RT_FAILURE(rc)) return rc; } /* * Check whether we're out of memory now. * * This may stem from some of the above actions or operations that has been executed * since we ran FFs. The allocate handy pages must for instance always be followed by * this check. */ if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) return VINF_EM_NO_MEMORY; return VINF_SUCCESS; }
/** * Gets the pending interrupt. * * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pu8Interrupt Where to store the interrupt on success. */ VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt) { PVM pVM = pVCpu->CTX_SUFF(pVM); pdmLock(pVM); /* * The local APIC has a higher priority than the PIC. */ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /* * Check the PIC. */ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) { VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns)); Assert(pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)); uint32_t uTagSrc; int i = pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Pic.CTX_SUFF(pDevIns), &uTagSrc); AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i)); if (i >= 0) { pdmUnlock(pVM); *pu8Interrupt = (uint8_t)i; VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i); return VINF_SUCCESS; } } /** @todo Figure out exactly why we can get here without anything being set. (REM) */ pdmUnlock(pVM); return VERR_NO_DATA; }
/** @interface_method_impl{PDMPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3PicHlp_ClearInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ if (pVM->pdm.s.Apic.pfnLocalInterruptR3) { /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: Clearing local interrupt on LAPIC\n", pDevIns->pReg->szName, pDevIns->iInstance)); /* Lower the LAPIC's LINT0 line instead of signaling the CPU directly. */ /** @todo 'rcRZ' propagation to pfnLocalInterrupt from caller. */ pVM->pdm.s.Apic.pfnLocalInterruptR3(pVM->pdm.s.Apic.pDevInsR3, pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */); return; } LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); #ifdef VBOX_WITH_REM REMR3NotifyInterruptClear(pVM, pVCpu); #endif }
/** @interface_method_impl{PDMAPICHLPR0,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR0ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR0; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR0ApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } }
/** @interface_method_impl{PDMAPICHLPR3,pfnClearInterruptFF} */ static DECLCALLBACK(void) pdmR3ApicHlp_ClearInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 0\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); /* Note: NMI/SMI can't be cleared. */ switch (enmType) { case PDMAPICIRQ_UPDATE_PENDING: VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC); break; case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } #ifdef VBOX_WITH_REM REMR3NotifyInterruptClear(pVM, pVCpu); #endif }
/** @interface_method_impl{PDMPICHLPR3,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR3PicHlp_SetInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; if (pVM->pdm.s.Apic.pfnLocalInterruptR3) { LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: Setting local interrupt on LAPIC\n", pDevIns->pReg->szName, pDevIns->iInstance)); /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptR3(pVM->pdm.s.Apic.pDevInsR3, 0, 1); return; } PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n", pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); #ifdef VBOX_WITH_REM REMR3NotifyInterruptSet(pVM, pVCpu); #endif VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE); }
/** @interface_method_impl{PDMAPICHLPR3,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR3ApicHlp_SetInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR3; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 1\n", pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); switch (enmType) { case PDMAPICIRQ_HARDWARE: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break; case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } #ifdef VBOX_WITH_REM REMR3NotifyInterruptSet(pVM, pVCpu); #endif VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE); }
/** @interface_method_impl{PDMAPICHLPR0,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR0ApicHlp_SetInterruptFF(PPDMDEVINS pDevIns, PDMAPICIRQ enmType, VMCPUID idCpu) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR0; PVMCPU pVCpu = &pVM->aCpus[idCpu]; AssertReturnVoid(idCpu < pVM->cCpus); LogFlow(("pdmR0ApicHlp_SetInterruptFF: CPU%d=caller=%p/%d: VM_FF_INTERRUPT %d -> 1 (CPU%d)\n", VMMGetCpuId(pVM), pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC), idCpu)); switch (enmType) { case PDMAPICIRQ_UPDATE_PENDING: VMCPU_FF_SET(pVCpu, VMCPU_FF_UPDATE_APIC); break; case PDMAPICIRQ_HARDWARE: #ifdef VBOX_WITH_NEW_APIC VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); #endif VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); break; case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break; case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break; case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break; default: AssertMsgFailed(("enmType=%d\n", enmType)); break; } /* We need to wake up the target CPU. */ if ( #ifdef VBOX_WITH_NEW_APIC /* We are already on EMT if enmType is PDMAPICIRQ_HARDWARE. Don't bother with poking! */ enmType != PDMAPICIRQ_HARDWARE && #endif VMMGetCpuId(pVM) != idCpu) { switch (VMCPU_GET_STATE(pVCpu)) { case VMCPUSTATE_STARTED_EXEC: GVMMR0SchedPokeEx(pVM, pVCpu->idCpu, false /* don't take the used lock */); break; case VMCPUSTATE_STARTED_HALTED: GVMMR0SchedWakeUpEx(pVM, pVCpu->idCpu, false /* don't take the used lock */); break; default: break; /* nothing to do in other states. */ } } }
/** @interface_method_impl{PDMPICHLPR0,pfnSetInterruptFF} */ static DECLCALLBACK(void) pdmR0PicHlp_SetInterruptFF(PPDMDEVINS pDevIns) { PDMDEV_ASSERT_DEVINS(pDevIns); PVM pVM = pDevIns->Internal.s.pVMR0; if (pVM->pdm.s.Apic.pfnLocalInterruptR0) { LogFlow(("pdmR0PicHlp_SetInterruptFF: caller='%p'/%d: Setting local interrupt on LAPIC\n", pDevIns, pDevIns->iInstance)); /* Raise the LAPIC's LINT0 line instead of signaling the CPU directly. */ pVM->pdm.s.Apic.pfnLocalInterruptR0(pVM->pdm.s.Apic.pDevInsR0, 0, 1); return; } PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ LogFlow(("pdmR0PicHlp_SetInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n", pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); }
/** * Check a registered module for shared page changes. * * The PGM lock shall be taken prior to calling this method. * * @returns The following VBox status codes. * * @param pVM Pointer to the VM. * @param pGVM Pointer to the GVM instance data. * @param idCpu The ID of the calling virtual CPU. * @param pModule Global module description. * @param paRegionsGCPtrs Array parallel to pModules->aRegions with the * addresses of the regions in the calling * process. */ VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; int rc = VINF_SUCCESS; bool fFlushTLBs = false; bool fFlushRemTLBs = false; GMMSHAREDPAGEDESC PageDesc; Log(("PGMR0SharedModuleCheck: check %s %s base=%RGv size=%x\n", pModule->szName, pModule->szVersion, pModule->Core.Key, pModule->cbModule)); PGM_LOCK_ASSERT_OWNER(pVM); /* This cannot fail as we grab the lock in pgmR3SharedModuleRegRendezvous before calling into ring-0. */ /* * Check every region of the shared module. */ for (uint32_t idxRegion = 0; idxRegion < pModule->cRegions; idxRegion++) { RTGCPTR GCPtrPage = paRegionsGCPtrs[idxRegion] & ~(RTGCPTR)PAGE_OFFSET_MASK; uint32_t cbLeft = pModule->aRegions[idxRegion].cb; Assert(!(cbLeft & PAGE_OFFSET_MASK)); uint32_t idxPage = 0; while (cbLeft) { /** @todo inefficient to fetch each guest page like this... */ RTGCPHYS GCPhys; uint64_t fFlags; rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys); if ( rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ { PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); if ( pPage && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED && PGM_PAGE_GET_READ_LOCKS(pPage) == 0 && PGM_PAGE_GET_WRITE_LOCKS(pPage) == 0 ) { PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage); PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); PageDesc.GCPhys = GCPhys; rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); if (RT_FAILURE(rc)) break; /* * Any change for this page? */ if (PageDesc.idPage != NIL_GMM_PAGEID) { Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); Log(("PGMR0SharedModuleCheck: shared page gst virt=%RGv phys=%RGp host %RHp->%RHp\n", GCPtrPage, PageDesc.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), PageDesc.HCPhys)); /* Page was either replaced by an existing shared version of it or converted into a read-only shared page, so, clear all references. */ bool fFlush = false; rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); Assert( rc == VINF_SUCCESS || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); if (rc == VINF_SUCCESS) fFlushTLBs |= fFlush; fFlushRemTLBs = true; if (PageDesc.HCPhys != PGM_PAGE_GET_HCPHYS(pPage)) { /* Update the physical address and page id now. */ PGM_PAGE_SET_HCPHYS(pVM, pPage, PageDesc.HCPhys); PGM_PAGE_SET_PAGEID(pVM, pPage, PageDesc.idPage); /* Invalidate page map TLB entry for this page too. */ pgmPhysInvalidatePageMapTLBEntry(pVM, PageDesc.GCPhys); pVM->pgm.s.cReusedSharedPages++; } /* else: nothing changed (== this page is now a shared page), so no need to flush anything. */ pVM->pgm.s.cSharedPages++; pVM->pgm.s.cPrivatePages--; PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_SHARED); # ifdef VBOX_STRICT /* check sum hack */ pPage->s.u2Unused0 = PageDesc.u32StrictChecksum & 3; pPage->s.u2Unused1 = (PageDesc.u32StrictChecksum >> 8) & 3; # endif } } }