/** * Resumes the CPU timestamp counter ticking. * * @returns VBox status code. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @internal */ int tmCpuTickResume(PVM pVM, PVMCPU pVCpu) { if (!pVCpu->tm.s.fTSCTicking) { pVCpu->tm.s.fTSCTicking = true; /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're * unpaused before the virtual time and stopped after it. */ switch (pVM->tm.s.enmTSCMode) { case TMTSCMODE_REAL_TSC_OFFSET: pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC; break; case TMTSCMODE_VIRT_TSC_EMULATED: case TMTSCMODE_DYNAMIC: pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVCpu->tm.s.u64TSC; break; case TMTSCMODE_NATIVE_API: pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */ /* Looks like this is only used by weird modes and MSR TSC writes. We cannot support either on NEM/win. */ break; default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); } return VINF_SUCCESS; } AssertFailed(); return VERR_TM_TSC_ALREADY_TICKING; }
/** * Read the current CPU timestamp counter. * * @returns Gets the CPU tsc. * @param pVCpu The cross context virtual CPU structure. * @param fCheckTimers Whether to check timers. */ DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers) { uint64_t u64; if (RT_LIKELY(pVCpu->tm.s.fTSCTicking)) { PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) u64 = SUPReadTsc(); else u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); u64 -= pVCpu->tm.s.offTSCRawSrc; /* Always return a value higher than what the guest has already seen. */ if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen)) pVCpu->tm.s.u64TSCLastSeen = u64; else { STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */ u64 = pVCpu->tm.s.u64TSCLastSeen; } } else u64 = pVCpu->tm.s.u64TSC; return u64; }
/** * Resumes the CPU timestamp counter ticking. * * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted). * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. */ int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu) { if (!pVCpu->tm.s.fTSCTicking) { /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */ pVCpu->tm.s.fTSCTicking = true; uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking); AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE); if (c == 1) { /* The first VCPU to resume. */ uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc; STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume); /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; else pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVM->tm.s.u64LastPausedTSC; /* Calculate the offset for other VCPUs to use. */ pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; } else { /* All other VCPUs (if any). */ pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause; } } return VINF_SUCCESS; }
/** * Read the current CPU timestamp counter. * * @returns Gets the CPU tsc. * @param pVCpu The cross context virtual CPU structure. * @param fCheckTimers Whether to check timers. */ DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers) { uint64_t u64; if (RT_LIKELY(pVCpu->tm.s.fTSCTicking)) { PVM pVM = pVCpu->CTX_SUFF(pVM); switch (pVM->tm.s.enmTSCMode) { case TMTSCMODE_REAL_TSC_OFFSET: u64 = SUPReadTsc(); break; case TMTSCMODE_VIRT_TSC_EMULATED: case TMTSCMODE_DYNAMIC: u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); break; #ifndef IN_RC case TMTSCMODE_NATIVE_API: { u64 = 0; int rcNem = NEMHCQueryCpuTick(pVCpu, &u64, NULL); AssertLogRelRCReturn(rcNem, SUPReadTsc()); break; } #endif default: AssertFailedBreakStmt(u64 = SUPReadTsc()); } u64 -= pVCpu->tm.s.offTSCRawSrc; /* Always return a value higher than what the guest has already seen. */ if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen)) pVCpu->tm.s.u64TSCLastSeen = u64; else { STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */ u64 = pVCpu->tm.s.u64TSCLastSeen; } } else u64 = pVCpu->tm.s.u64TSC; return u64; }
/** * Resumes the CPU timestamp counter ticking. * * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted). * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. */ int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu) { if (!pVCpu->tm.s.fTSCTicking) { /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */ pVCpu->tm.s.fTSCTicking = true; uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking); AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE); if (c == 1) { /* The first VCPU to resume. */ uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc; STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume); /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ switch (pVM->tm.s.enmTSCMode) { case TMTSCMODE_REAL_TSC_OFFSET: pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; break; case TMTSCMODE_VIRT_TSC_EMULATED: case TMTSCMODE_DYNAMIC: pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVM->tm.s.u64LastPausedTSC; break; case TMTSCMODE_NATIVE_API: { #ifndef IN_RC int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC); AssertRCReturn(rc, rc); pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0; #else AssertFailedReturn(VERR_INTERNAL_ERROR_2); #endif break; } default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); } /* Calculate the offset addendum for other VCPUs to use. */ pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; } else { /* All other VCPUs (if any). */ pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause; } } return VINF_SUCCESS; }
/** * Resumes the CPU timestamp counter ticking. * * @returns VBox status code. * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @internal */ int tmCpuTickResume(PVM pVM, PVMCPU pVCpu) { if (!pVCpu->tm.s.fTSCTicking) { pVCpu->tm.s.fTSCTicking = true; /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're * unpaused before the virtual time and stopped after it. */ if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC; else pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVCpu->tm.s.u64TSC; return VINF_SUCCESS; } AssertFailed(); return VERR_TM_TSC_ALREADY_TICKING; }