/** * smp_rendezvous action callback. * * This will perform the timer callback if we're on the right CPU. * * @param pvTimer The timer. */ static void rtTimerFreeBSDIpiAction(void *pvTimer) { PRTTIMER pTimer = (PRTTIMER)pvTimer; if ( pTimer->iCpu == RTTIMER_FLAGS_CPU_MASK || (u_int)pTimer->iCpu == curcpu) pTimer->pfnTimer(pTimer, pTimer->pvUser, pTimer->iTick); }
static void rtTimerFreeBSDCallback(void *pvTimer) { PRTTIMER pTimer = (PRTTIMER)pvTimer; /* calculate and set the next timeout */ pTimer->iTick++; if (!pTimer->u64NanoInterval) { pTimer->fSuspended = true; callout_stop(&pTimer->Callout); } else { struct timeval tv; const uint64_t u64NanoTS = RTTimeNanoTS(); pTimer->u64NextTS = pTimer->u64StartTS + pTimer->iTick * pTimer->u64NanoInterval; if (pTimer->u64NextTS < u64NanoTS) pTimer->u64NextTS = u64NanoTS + RTTimerGetSystemGranularity() / 2; tv.tv_sec = pTimer->u64NextTS / 1000000000; tv.tv_usec = (pTimer->u64NextTS % 1000000000) / 1000; callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer); } /* callback */ if ( !pTimer->fSpecificCpu || pTimer->iCpu == curcpu) pTimer->pfnTimer(pTimer, pTimer->pvUser, pTimer->iTick); else smp_rendezvous(NULL, rtTimerFreeBSDIpiAction, NULL, pvTimer); }
/** * Win32 callback wrapper. */ static void CALLBACK rttimerCallback(UINT uTimerID, UINT uMsg, DWORD_PTR dwUser, DWORD_PTR dw1, DWORD_PTR dw2) { PRTTIMER pTimer = (PRTTIMER)(void *)dwUser; Assert(pTimer->TimerId == uTimerID); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->iTick); NOREF(uMsg); NOREF(dw1); NOREF(dw2); NOREF(uTimerID); }
/** * Async callback. * * @param lpArgToCompletionRoutine Pointer to our timer structure. */ VOID CALLBACK rttimerAPCProc(LPVOID lpArgToCompletionRoutine, DWORD dwTimerLowValue, DWORD dwTimerHighValue) { PRTTIMER pTimer = (PRTTIMER)lpArgToCompletionRoutine; /* * Check if we're begin destroyed. */ if (pTimer->u32Magic != RTTIMER_MAGIC) return; /* * Callback the handler. */ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->iTick); /* * Rearm the timer handler. */ #ifdef USE_CATCH_UP pTimer->llNext.QuadPart += (int64_t)pTimer->uMilliesInterval * 10000; LARGE_INTEGER ll; ll.QuadPart = RTTimeNanoTS() - pTimer->llNext.QuadPart; if (ll.QuadPart < -500000) ll.QuadPart = ll.QuadPart / 100; else ll.QuadPart = -500000 / 100; /* need to catch up, do a minimum wait of 0.5ms. */ #else LARGE_INTEGER ll = pTimer->llNext; #endif BOOL frc = SetWaitableTimer(pTimer->hTimer, &ll, 0, rttimerAPCProc, pTimer, FALSE); AssertMsg(frc || pTimer->u32Magic != RTTIMER_MAGIC, ("last error %d\n", GetLastError())); }
/** * The slave DPC callback for an omni timer. * * @param pDpc The DPC object. * @param pvUser Pointer to the sub-timer. * @param SystemArgument1 Some system stuff. * @param SystemArgument2 Some system stuff. */ static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; PRTTIMER pTimer = pSubTimer->pParent; AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); #endif /* * Check that we haven't been suspended before doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { if (!pTimer->u64NanoInterval) ASMAtomicWriteBool(&pTimer->fSuspended, true); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * The timer callback for an omni-timer. * * This is responsible for queueing the DPCs for the other CPUs and * perform the callback on the CPU on which it is called. * * @param pDpc The DPC object. * @param pvUser Pointer to the sub-timer. * @param SystemArgument1 Some system stuff. * @param SystemArgument2 Some system stuff. */ static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; PRTTIMER pTimer = pSubTimer->pParent; int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); #endif /* * Check that we haven't been suspended before scheduling the other DPCs * and doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { RTCPUSET OnlineSet; RTMpGetOnlineSet(&OnlineSet); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) && iCpuSelf != iCpu) KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * Timer callback function for the non-omni timers. * * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer. * @param pDpc Pointer to the DPC. * @param pvUser Pointer to our internal timer structure. * @param SystemArgument1 Some system argument. * @param SystemArgument2 Some system argument. */ static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMER pTimer = (PRTTIMER)pvUser; AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); #endif /* * Check that we haven't been suspended before doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf()); if (!pTimer->u64NanoInterval) ASMAtomicWriteBool(&pTimer->fSuspended, true); uint64_t iTick = ++pTimer->aSubTimers[0].iTick; if (pTimer->u64NanoInterval) rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc); pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick); ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * Callback wrapper for single-CPU timers. * * @param pvArg Opaque pointer to the timer. * * @remarks This will be executed in interrupt context but only at the specified * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the * cyclic subsystem here, neither should pfnTimer(). */ static void rtTimerSolSingleCallbackWrapper(void *pvArg) { PRTTIMER pTimer = (PRTTIMER)pvArg; AssertPtrReturnVoid(pTimer); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); Assert(!pTimer->fAllCpus); /* Make sure one-shots do not fire another time. */ Assert( !pTimer->fSuspended || pTimer->cNsInterval != 0); if (!pTimer->fSuspendedFromTimer) { /* Make sure we are firing on the right CPU. */ Assert( !pTimer->fSpecificCpu || pTimer->iCpu == RTMpCpuId()); /* For one-shot, we may allow the callback to restart them. */ if (pTimer->cNsInterval == 0) pTimer->fSuspendedFromTimer = true; /* * Perform the callout. */ pTimer->u.Single.pActiveThread = curthread; uint64_t u64Tick = ++pTimer->u.Single.u64Tick; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); pTimer->u.Single.pActiveThread = NULL; if (RT_LIKELY(!pTimer->fSuspendedFromTimer)) { if ( !pTimer->fIntervalChanged || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE)) return; /* * The interval was changed, we need to set the expiration time * ourselves before returning. This comes at a slight cost, * which is why we don't do it all the time. */ if (pTimer->u.Single.nsNextTick) pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval); else pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval); cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick); return; } /* * The timer has been suspended, set expiration time to infinitiy. */ } if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE)) cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY); }
/** * Callback wrapper for Omni-CPU timers. * * @param pvArg Opaque pointer to the timer. * * @remarks This will be executed in interrupt context but only at the specified * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the * cyclic subsystem here, neither should pfnTimer(). */ static void rtTimerSolOmniCallbackWrapper(void *pvArg) { PRTTIMER pTimer = (PRTTIMER)pvArg; AssertPtrReturnVoid(pTimer); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); Assert(pTimer->fAllCpus); if (!pTimer->fSuspendedFromTimer) { /* * Perform the callout. */ uint32_t const iCpu = CPU->cpu_id; pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread; uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL; if (RT_LIKELY(!pTimer->fSuspendedFromTimer)) { if ( !pTimer->fIntervalChanged || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE)) return; /* * The interval was changed, we need to set the expiration time * ourselves before returning. This comes at a slight cost, * which is why we don't do it all the time. * * Note! The cyclic_reprogram call only affects the omni cyclic * component for this CPU. */ if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick) pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval); else pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval); cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick); return; } /* * The timer has been suspended, set expiration time to infinitiy. */ } if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE)) cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY); }
/** * Callback wrapper for Omni-CPU and single-CPU timers. * * @param pvArg Opaque pointer to the timer. * * @remarks This will be executed in interrupt context but only at the specified * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the * cyclic subsystem here, neither should pfnTimer(). */ static void rtTimerSolCallbackWrapper(void *pvArg) { PRTTIMER pTimer = (PRTTIMER)pvArg; AssertPtrReturnVoid(pTimer); if (pTimer->pSingleTimer) { uint64_t u64Tick = ++pTimer->pSingleTimer->u64Tick; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); } else if (pTimer->pOmniTimer) { uint64_t u64Tick = ++pTimer->pOmniTimer->au64Ticks[CPU->cpu_id]; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); } }
/** * Timer callback function for the non-omni timers. * * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer. * @param pDpc Pointer to the DPC. * @param pvUser Pointer to our internal timer structure. * @param SystemArgument1 Some system argument. * @param SystemArgument2 Some system argument. */ static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMER pTimer = (PRTTIMER)pvUser; AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); #endif /* * Check that we haven't been suspended before doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->aSubTimers[0].iTick); NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * Timer thread. */ static DECLCALLBACK(int) rttimerCallback(RTTHREAD Thread, void *pvArg) { PRTTIMER pTimer = (PRTTIMER)(void *)pvArg; Assert(pTimer->u32Magic == RTTIMER_MAGIC); /* * Bounce our priority up quite a bit. */ if ( !SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL) /*&& !SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST)*/) { int rc = GetLastError(); AssertMsgFailed(("Failed to set priority class lasterror %d.\n", rc)); pTimer->iError = RTErrConvertFromWin32(rc); return rc; } /* * Start the waitable timer. */ #ifdef USE_CATCH_UP const int64_t NSInterval = (int64_t)pTimer->uMilliesInterval * 1000000; pTimer->llNext.QuadPart = RTTimeNanoTS() + NSInterval; #else pTimer->llNext.QuadPart = -(int64_t)pTimer->uMilliesInterval * 10000; #endif LARGE_INTEGER ll; ll.QuadPart = -(int64_t)pTimer->uMilliesInterval * 10000; #ifdef USE_APC if (!SetWaitableTimer(pTimer->hTimer, &ll, 0, rttimerAPCProc, pTimer, FALSE)) #else if (!SetWaitableTimer(pTimer->hTimer, &ll, 0, NULL, NULL, FALSE)) #endif { int rc = GetLastError(); AssertMsgFailed(("Failed to set timer, lasterr %d.\n", rc)); pTimer->iError = RTErrConvertFromWin32(rc); RTThreadUserSignal(Thread); return rc; } /* * Wait for the semaphore to be posted. */ RTThreadUserSignal(Thread); for (;pTimer->u32Magic == RTTIMER_MAGIC;) { #ifdef USE_APC int rc = WaitForSingleObjectEx(pTimer->hevWait, INFINITE, TRUE); if (rc != WAIT_OBJECT_0 && rc != WAIT_IO_COMPLETION) #else int rc = WaitForSingleObjectEx(pTimer->hTimer, INFINITE, FALSE); if (pTimer->u32Magic != RTTIMER_MAGIC) break; if (rc == WAIT_OBJECT_0) { /* * Callback the handler. */ pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->iTick); /* * Rearm the timer handler. */ # ifdef USE_CATCH_UP pTimer->llNext.QuadPart += NSInterval; LARGE_INTEGER ll; ll.QuadPart = RTTimeNanoTS() - pTimer->llNext.QuadPart; if (ll.QuadPart < -500000) ll.QuadPart = ll.QuadPart / 100; else ll.QuadPart = -500000 / 100; /* need to catch up, do a minimum wait of 0.5ms. */ # else LARGE_INTEGER ll = pTimer->llNext; # endif BOOL fRc = SetWaitableTimer(pTimer->hTimer, &ll, 0, NULL, NULL, FALSE); AssertMsg(fRc || pTimer->u32Magic != RTTIMER_MAGIC, ("last error %d\n", GetLastError())); NOREF(fRc); } else #endif { /* * We failed during wait, so just signal the destructor and exit. */ int rc2 = GetLastError(); RTThreadUserSignal(Thread); AssertMsgFailed(("Wait on hTimer failed, rc=%d lasterr=%d\n", rc, rc2)); NOREF(rc2); return -1; } } /* * Exit. */ RTThreadUserSignal(Thread); return 0; }
/** * The timer callback for an omni-timer. * * This is responsible for queueing the DPCs for the other CPUs and * perform the callback on the CPU on which it is called. * * @param pDpc The DPC object. * @param pvUser Pointer to the sub-timer. * @param SystemArgument1 Some system stuff. * @param SystemArgument2 Some system stuff. */ static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; PRTTIMER pTimer = pSubTimer->pParent; int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); #endif /* * Check that we haven't been suspended before scheduling the other DPCs * and doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { RTCPUSET OnlineSet; RTMpGetOnlineSet(&OnlineSet); ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf()); if (pTimer->u64NanoInterval) { /* * Recurring timer. */ for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) && iCpuSelf != iCpu) KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0); uint64_t iTick = ++pSubTimer->iTick; rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc); pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick); } else { /* * Single shot timers gets complicated wrt to fSuspended maintance. */ uint32_t cCpus = 0; for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)) cCpus++; ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) && iCpuSelf != iCpu) if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0)) ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */ if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0) ASMAtomicWriteBool(&pTimer->fSuspended, true); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); } ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }