Example #1
0
/**
 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
 * use the raw TSC.
 *
 * @returns The number of host CPU clock ticks to the next timer deadline.
 * @param   pVM             The cross context VM structure.
 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
 * @param   poffRealTsc     The offset against the TSC of the current host CPU,
 *                          if pfOffsettedTsc is set to true.
 * @param   pfOffsettedTsc  Where to return whether TSC offsetting can be used.
 * @param   pfParavirtTsc   Where to return whether paravirt TSC is enabled.
 *
 * @thread  EMT(pVCpu).
 * @see     TMCpuTickCanUseRealTSC().
 */
VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
                                                        bool *pfOffsettedTsc, bool *pfParavirtTsc)
{
    Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));

    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;

    /*
     * Same logic as in TMCpuTickCanUseRealTSC.
     */
    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    {
        /** @todo We should negate both deltas!  It's soo weird that we do the
         *        exact opposite of what the hardware implements. */
#ifdef IN_RING3
        *poffRealTsc     = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
#else
        *poffRealTsc     = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
#endif
        *pfOffsettedTsc  = true;
        return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
    }

    /*
     * Same logic as in TMCpuTickCanUseRealTSC.
     */
    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
        && !pVM->tm.s.fVirtualSyncCatchUp
        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
        && !pVM->tm.s.fVirtualWarpDrive)
    {
        /* The source is the timer synchronous virtual clock. */
        uint64_t cNsToDeadline;
        uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
        uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
                        ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
                        : u64NowVirtSync;
        u64Now -= pVCpu->tm.s.offTSCRawSrc;
        *poffRealTsc     = u64Now - ASMReadTSC();
        *pfOffsettedTsc  = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
        return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
    }

#ifdef VBOX_WITH_STATISTICS
    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
#endif
    *pfOffsettedTsc  = false;
    *poffRealTsc     = 0;
    return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
}
/**
 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
 *
 * @returns true/false accordingly.
 * @param   pVM             The cross context VM structure.
 * @param   pVCpu           The cross context virtual CPU structure.
 * @param   poffRealTsc     The offset against the TSC of the current host CPU,
 *                          if pfOffsettedTsc is set to true.
 * @param   pfParavirtTsc   Where to return whether paravirt TSC is enabled.
 *
 * @thread  EMT(pVCpu).
 * @see     TMCpuTickGetDeadlineAndTscOffset().
 */
VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
{
    Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));

    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;

    /*
     * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
     * the CPU will add them to RDTSC and RDTSCP at runtime.
     *
     * In tmCpuTickGetInternal we do:
     *          SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
     * Where SUPReadTsc() does:
     *          ASMReadTSC() - pGipCpu->i64TscDelta;
     * Which means tmCpuTickGetInternal actually does:
     *          ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
     * So, the offset to be ADDED to RDTSC[P] is:
     *          offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
     */
    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    {
        /** @todo We should negate both deltas!  It's soo weird that we do the
         *        exact opposite of what the hardware implements. */
#ifdef IN_RING3
        *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
#else
        *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
#endif
        return true;
    }

    /*
     * We require:
     *     1. A fixed TSC, this is checked at init time.
     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     *     3. Either that we're using the real TSC as time source or
     *          a) we don't have any lag to catch up, and
     *          b) the virtual sync clock hasn't been halted by an expired timer, and
     *          c) we're not using warp drive (accelerated virtual guest time).
     */
    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
        && !pVM->tm.s.fVirtualSyncCatchUp
        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
        && !pVM->tm.s.fVirtualWarpDrive)
    {
        /* The source is the timer synchronous virtual clock. */
        uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
                        - pVCpu->tm.s.offTSCRawSrc;
        /** @todo When we start collecting statistics on how much time we spend executing
         * guest code before exiting, we should check this against the next virtual sync
         * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
         * the chance that we'll get interrupted right after the timer expired. */
        if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
        {
            *poffRealTsc = u64Now - ASMReadTSC();
            return true;    /** @todo count this? */
        }
    }

#ifdef VBOX_WITH_STATISTICS
    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
#endif
    return false;
}