/**
 * The timer callback for an omni-timer.
 *
 * This is responsible for queueing the DPCs for the other CPUs and
 * perform the callback on the CPU on which it is called.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before scheduling the other DPCs
     * and doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        RTCPUSET    OnlineSet;
        RTMpGetOnlineSet(&OnlineSet);
        for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
            if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                &&  iCpuSelf != iCpu)
                KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);

        pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
/**
 * The slave DPC callback for an omni timer.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        if (!pTimer->u64NanoInterval)
            ASMAtomicWriteBool(&pTimer->fSuspended, true);
        pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
Exemple #3
0
/**
 * Internal worker for getting the GIP CPU array index for the calling CPU.
 *
 * @returns Index into SUPGLOBALINFOPAGE::aCPUs or UINT16_MAX.
 * @param   pGip    The GIP.
 */
DECLINLINE(uint16_t) supGetGipCpuIndex(PSUPGLOBALINFOPAGE pGip)
{
    uint16_t iGipCpu;
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very fast. */
        uint16_t cbLim = ASMGetIdtrLimit();
        uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us what need need and more. */
        uint32_t iCpuSet;
        ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction. */
        uint8_t idApic = ASMGetApicId();
        iGipCpu = pGip->aiCpuFromApicId[idApic];
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId() (disables cli to avoid host OS assertions about unsafe CPU number usage). */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif
    return iGipCpu;
}
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
{
    /*
     * Validate.
     */
    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);

    if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
        return VERR_TIMER_ACTIVE;
    if (   pTimer->fSpecificCpu
        && !RTMpIsCpuOnline(pTimer->idCpu))
        return VERR_CPU_OFFLINE;

    /*
     * Start the timer.
     */
    PKDPC pMasterDpc = pTimer->fOmniTimer
                     ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
                     : &pTimer->aSubTimers[0].NtDpc;

#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
    uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
    ULONG ulInterval = (ULONG)u64Interval;
    if (ulInterval != u64Interval)
        ulInterval = MAXLONG;
    else if (!ulInterval && pTimer->u64NanoInterval)
        ulInterval = 1;
#endif

    LARGE_INTEGER DueTime;
    DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
    if (!DueTime.QuadPart)
        DueTime.QuadPart = -1;

    unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
    for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
        pTimer->aSubTimers[iCpu].iTick = 0;
    ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
    ASMAtomicWriteBool(&pTimer->fSuspended, false);
#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
    pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
    KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
#else
    KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
#endif
    return VINF_SUCCESS;
}
Exemple #5
0
RTR3DECL(int) RTThreadSetAffinityToCpu(RTCPUID idCpu)
{
    int rc;
    if (idCpu == NIL_RTCPUID)
        rc = RTThreadSetAffinity(NULL);
    else
    {
        int iCpu = RTMpCpuIdToSetIndex(idCpu);
        if (iCpu >= 0)
        {
            RTCPUSET CpuSet;
            RTCpuSetEmpty(&CpuSet);
            RTCpuSetAddByIndex(&CpuSet, iCpu);
            rc = RTThreadSetAffinity(&CpuSet);
        }
        else
            rc = VERR_CPU_NOT_FOUND;
    }
    return rc;
}
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
{
    /*
     * Validate.
     */
    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);

    if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
        return VERR_TIMER_ACTIVE;
    if (   pTimer->fSpecificCpu
        && !RTMpIsCpuOnline(pTimer->idCpu))
        return VERR_CPU_OFFLINE;

    /*
     * Start the timer.
     */
    PKDPC pMasterDpc = pTimer->fOmniTimer
                     ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
                     : &pTimer->aSubTimers[0].NtDpc;

    uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
    ULONG ulInterval = (ULONG)u64Interval;
    if (ulInterval != u64Interval)
        ulInterval = MAXLONG;
    else if (!ulInterval && pTimer->u64NanoInterval)
        ulInterval = 1;

    LARGE_INTEGER DueTime;
    DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
    if (DueTime.QuadPart)
        DueTime.QuadPart = -1;

    ASMAtomicWriteBool(&pTimer->fSuspended, false);
    KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
    return VINF_SUCCESS;
}
/**
 * Get the timestamp frequency.
 *
 * @returns Number of ticks per second.
 * @param   pVM     The cross context VM structure.
 */
VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
{
    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
        && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
    {
#ifdef IN_RING3
        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
#elif defined(IN_RING0)
        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
#else
        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
#endif
        if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
            return cTSCTicksPerSecond;
    }
    return pVM->tm.s.cTSCTicksPerSecond;
}
Exemple #8
0
/**
 * The slow case for SUPReadTsc where we need to apply deltas.
 *
 * Must only be called when deltas are applicable, so please do not call it
 * directly.
 *
 * @returns TSC with delta applied.
 * @param   pGip        Pointer to the GIP.
 *
 * @remarks May be called with interrupts disabled in ring-0!  This is why the
 *          ring-0 code doesn't attempt to figure the delta.
 *
 * @internal
 */
SUPDECL(uint64_t) SUPReadTscWithDelta(PSUPGLOBALINFOPAGE pGip)
{
    uint64_t            uTsc;
    uint16_t            iGipCpu;
    AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
    AssertCompile(RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx) >= RTCPUSET_MAX_CPUS);
    Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO);

    /*
     * Read the TSC and get the corresponding aCPUs index.
     */
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us all we need, no loops/cli. */
        uint32_t iCpuSet;
        uTsc      = ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very quick, but we need to loop. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint16_t cbLim = ASMGetIdtrLimit();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetIdtrLimit() == cbLim))
            {
                uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
                iCpuSet &= RTCPUSET_MAX_CPUS - 1;
                iGipCpu  = pGip->aiCpuFromCpuSetIdx[iCpuSet];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction, requires looping. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint8_t idApic = ASMGetApicId();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetApicId() == idApic))
            {
                iGipCpu = pGip->aiCpuFromApicId[idApic];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId(), no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU, no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags(); /* Are already disable, but play safe. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif

    /*
     * If the delta is valid, apply it.
     */
    if (RT_LIKELY(iGipCpu < pGip->cCpus))
    {
        int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta;
        if (RT_LIKELY(iTscDelta != INT64_MAX))
            return uTsc - iTscDelta;

# ifdef IN_RING3
        /*
         * The delta needs calculating, call supdrv to get the TSC.
         */
        int rc = SUPR3ReadTsc(&uTsc, NULL);
        if (RT_SUCCESS(rc))
            return uTsc;
        AssertMsgFailed(("SUPR3ReadTsc -> %Rrc\n", rc));
        uTsc = ASMReadTSC();
# endif /* IN_RING3 */
    }

    /*
     * This shouldn't happen, especially not in ring-3 and raw-mode context.
     * But if it does, return something that's half useful.
     */
    AssertMsgFailed(("iGipCpu=%d (%#x) cCpus=%d fGetGipCpu=%#x\n", iGipCpu, iGipCpu, pGip->cCpus, pGip->fGetGipCpu));
    return uTsc;
}
/**
 * The timer callback for an omni-timer.
 *
 * This is responsible for queueing the DPCs for the other CPUs and
 * perform the callback on the CPU on which it is called.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before scheduling the other DPCs
     * and doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        RTCPUSET    OnlineSet;
        RTMpGetOnlineSet(&OnlineSet);

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());

        if (pTimer->u64NanoInterval)
        {
            /*
             * Recurring timer.
             */
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);

            uint64_t iTick = ++pSubTimer->iTick;
            rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
            pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
        }
        else
        {
            /*
             * Single shot timers gets complicated wrt to fSuspended maintance.
             */
            uint32_t cCpus = 0;
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
                    cCpus++;
            ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);

            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
                        ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */

            if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
                ASMAtomicWriteBool(&pTimer->fSuspended, true);

            pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
        }

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}