/** * Flush pending queues. * This is a forced action callback. * * @param pVM Pointer to the VM. * @thread Emulation thread only. */ VMMR3_INT_DECL(void) PDMR3QueueFlushAll(PVM pVM) { VM_ASSERT_EMT(pVM); LogFlow(("PDMR3QueuesFlush:\n")); /* * Only let one EMT flushing queues at any one time to preserve the order * and to avoid wasting time. The FF is always cleared here, because it's * only used to get someones attention. Queue inserts occurring during the * flush are caught using the pending bit. * * Note! We must check the force action and pending flags after clearing * the active bit! */ VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES); while (!ASMAtomicBitTestAndSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_ACTIVE_BIT)) { ASMAtomicBitClear(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT); for (PPDMQUEUE pCur = pVM->pUVM->pdm.s.pQueuesForced; pCur; pCur = pCur->pNext) if ( pCur->pPendingR3 || pCur->pPendingR0 || pCur->pPendingRC) pdmR3QueueFlush(pCur); ASMAtomicBitClear(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_ACTIVE_BIT); /* We're done if there were no inserts while we were busy. */ if ( !ASMBitTest(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT) && !VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES)) break; VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES); } }
/** * Sets the VMM Debug Command variable. * * @returns Previous command. * @param pVM Pointer to the VM. * @param enmCmd The command. */ DECLINLINE(DBGFCMD) dbgfR3SetCmd(PVM pVM, DBGFCMD enmCmd) { DBGFCMD rc; if (enmCmd == DBGFCMD_NO_COMMAND) { Log2(("DBGF: Setting command to %d (DBGFCMD_NO_COMMAND)\n", enmCmd)); rc = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pVM->dbgf.s.enmVMMCmd, enmCmd); VM_FF_CLEAR(pVM, VM_FF_DBGF); } else { Log2(("DBGF: Setting command to %d\n", enmCmd)); AssertMsg(pVM->dbgf.s.enmVMMCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmVMMCmd=%d\n", enmCmd, pVM->dbgf.s.enmVMMCmd)); rc = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pVM->dbgf.s.enmVMMCmd, enmCmd); VM_FF_SET(pVM, VM_FF_DBGF); VMR3NotifyGlobalFFU(pVM->pUVM, 0 /* didn't notify REM */); } return rc; }
/** * The emulation thread main function, with Virtual CPU ID for debugging. * * @returns Thread exit code. * @param ThreadSelf The handle to the executing thread. * @param pUVCpu Pointer to the user mode per-VCpu structure. * @param idCpu The virtual CPU ID, for backtrace purposes. */ int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu) { PUVM pUVM = pUVCpu->pUVM; int rc; AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC, ("Invalid arguments to the emulation thread!\n")); rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu); AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc); if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtInit) pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu); /* * The request loop. */ rc = VINF_SUCCESS; Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM)); VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */ for (;;) { /* * During early init there is no pVM, so make a special path * for that to keep things clearly separate. */ if (!pUVM->pVM) { /* * Check for termination first. */ if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } /* * Only the first VCPU may initialize the VM during early init * and must therefore service all VMCPUID_ANY requests. * See also VMR3Create */ if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) && pUVCpu->idCpu == 0) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING")); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } } else { /* * Pending requests which needs servicing? * * We check for state changes in addition to status codes when * servicing requests. (Look after the ifs.) */ PVM pVM = pUVM->pVM; enmBefore = pVM->enmVMState; if (pUVM->vm.s.fTerminateEMT) { rc = VINF_EM_TERMINATE; break; } if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS)) { rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]); Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) { /* * Service execute in any EMT request. */ rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) { /* * Service execute in specific EMT request. */ rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/); Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_ISSET(pVM, VM_FF_DBGF)) { /* * Service the debugger request. */ rc = DBGFR3VMMForcedAction(pVM); Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET)) { /* * Service a delayed reset request. */ rc = VMR3Reset(pVM); VM_FF_CLEAR(pVM, VM_FF_RESET); Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); } else { /* * Nothing important is pending, so wait for something. */ rc = VMR3WaitU(pUVCpu); if (RT_FAILURE(rc)) { AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc)); break; } } /* * Check for termination requests, these have extremely high priority. */ if ( rc == VINF_EM_TERMINATE || pUVM->vm.s.fTerminateEMT) break; } /* * Some requests (both VMR3Req* and the DBGF) can potentially resume * or start the VM, in that case we'll get a change in VM status * indicating that we're now running. */ if ( RT_SUCCESS(rc) && pUVM->pVM) { PVM pVM = pUVM->pVM; PVMCPU pVCpu = &pVM->aCpus[idCpu]; if ( pVM->enmVMState == VMSTATE_RUNNING && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu))) { rc = EMR3ExecuteVM(pVM, pVCpu); Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState)); if (EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION) vmR3SetGuruMeditation(pVM); } } } /* forever */ /* * Cleanup and exit. */ Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n", ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED)); if ( idCpu == 0 && pUVM->pVM) { PVM pVM = pUVM->pVM; vmR3SetTerminated(pVM); pUVM->pVM = NULL; /** @todo SMP: This isn't 100% safe. We should wait for the other * threads to finish before destroy the VM. */ int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL); AssertLogRelRC(rc2); } if ( pUVM->pVmm2UserMethods && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm) pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu); pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD; Log(("vmR3EmulationThread: EMT is terminated.\n")); return rc; }
/* execute the switch. */ VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM) { uint32_t i; int rc; PCPUMCTX pHyperCtx, pGuestCtx; RTGCPHYS CR3Phys = 0x0; /* fake address */ PVMCPU pVCpu = &pVM->aCpus[0]; if (!HWACCMR3IsAllowed(pVM)) { RTPrintf("VMM: Hardware accelerated test not available!\n"); return VERR_ACCESS_DENIED; } /* * These forced actions are not necessary for the test and trigger breakpoints too. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); /* Enable mapping of the hypervisor into the shadow page table. */ uint32_t cb; rc = PGMR3MappingsSize(pVM, &cb); AssertRCReturn(rc, rc); /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */ rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb); AssertRCReturn(rc, rc); pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP; pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT; PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER); PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); VM_FF_CLEAR(pVM, VM_FF_REQUEST); /* * Setup stack for calling VMMGCEntry(). */ RTRCPTR RCPtrEP; rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP); if (RT_SUCCESS(rc)) { RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); /* Fill in hidden selector registers for the hypervisor state. */ SYNC_SEL(pHyperCtx, cs); SYNC_SEL(pHyperCtx, ds); SYNC_SEL(pHyperCtx, es); SYNC_SEL(pHyperCtx, fs); SYNC_SEL(pHyperCtx, gs); SYNC_SEL(pHyperCtx, ss); SYNC_SEL(pHyperCtx, tr); /* * Profile switching. */ RTPrintf("VMM: profiling switcher...\n"); Log(("VMM: profiling switcher...\n")); uint64_t TickMin = ~0; uint64_t tsBegin = RTTimeNanoTS(); uint64_t TickStart = ASMReadTSC(); for (i = 0; i < 1000000; i++) { CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); CPUMPushHyper(pVCpu, 0); CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP); CPUMPushHyper(pVCpu, pVM->pVMRC); CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */ pHyperCtx = CPUMGetHyperCtxPtr(pVCpu); pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu); /* Copy the hypervisor context to make sure we have a valid guest context. */ *pGuestCtx = *pHyperCtx; pGuestCtx->cr3 = CR3Phys; VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); uint64_t TickThisStart = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, 0); uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; if (RT_FAILURE(rc)) { Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, pVCpu, rc); return rc; } if (TickThisElapsed < TickMin) TickMin = TickThisElapsed; } uint64_t TickEnd = ASMReadTSC(); uint64_t tsEnd = RTTimeNanoTS(); uint64_t Elapsed = tsEnd - tsBegin; uint64_t PerIteration = Elapsed / (uint64_t)i; uint64_t cTicksElapsed = TickEnd - TickStart; uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin); Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin)); rc = VINF_SUCCESS; } else AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc)); return rc; }