/*ARGSUSED*/ static void profile_enable(void *arg, dtrace_id_t id, void *parg) { profile_probe_t *prof = parg; cyc_omni_handler_t omni; cyc_handler_t hdlr; cyc_time_t when; ASSERT(prof->prof_interval != 0); ASSERT(MUTEX_HELD(&cpu_lock)); if (prof->prof_kind == PROF_TICK) { hdlr.cyh_func = profile_tick; hdlr.cyh_arg = prof; hdlr.cyh_level = CY_HIGH_LEVEL; when.cyt_interval = prof->prof_interval; when.cyt_when = dtrace_gethrtime() + when.cyt_interval; } else { ASSERT(prof->prof_kind == PROF_PROFILE); omni.cyo_online = profile_online; omni.cyo_offline = profile_offline; omni.cyo_arg = prof; } if (prof->prof_kind == PROF_TICK) { prof->prof_cyclic = cyclic_add(&hdlr, &when); } else { prof->prof_cyclic = cyclic_add_omni(&omni); } }
static void cyclic_test_002(void) { int error = 0; cyc_omni_handler_t hdlr; cyclic_id_t id; printf("%s: starting\n",__func__); hdlr.cyo_online = cyclic_test_002_online; hdlr.cyo_offline = cyclic_test_002_offline; hdlr.cyo_arg = NULL; nanotime(&test_002_start); mutex_enter(&cpu_lock); id = cyclic_add_omni(&hdlr); mutex_exit(&cpu_lock); DELAY(1200000); mutex_enter(&cpu_lock); cyclic_remove(id); mutex_exit(&cpu_lock); printf("%s: %s\n",__func__, error == 0 ? "passed":"failed"); }
/*ARGSUSED*/ static int profile_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) /* __APPLE__ */ profile_probe_t *prof = parg; cyc_omni_handler_t omni; cyc_handler_t hdlr; cyc_time_t when; ASSERT(prof->prof_interval != 0); ASSERT(MUTEX_HELD(&cpu_lock)); if (prof->prof_kind == PROF_TICK) { hdlr.cyh_func = profile_tick; hdlr.cyh_arg = prof; hdlr.cyh_level = CY_HIGH_LEVEL; when.cyt_interval = prof->prof_interval; #if !defined(__APPLE__) when.cyt_when = dtrace_gethrtime() + when.cyt_interval; #else when.cyt_when = 0; #endif /* __APPLE__ */ } else { ASSERT(prof->prof_kind == PROF_PROFILE); omni.cyo_online = profile_online; omni.cyo_offline = profile_offline; omni.cyo_arg = prof; } if (prof->prof_kind == PROF_TICK) { prof->prof_cyclic = cyclic_timer_add(&hdlr, &when); } else { prof->prof_cyclic = (cyclic_id_t)cyclic_add_omni(&omni); /* cast puns cyclic_id_list_t with cyclic_id_t */ } return(0); }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; /* One-shot timers are not supported by the cyclic system. */ if (pTimer->interval == 0) return VERR_NOT_SUPPORTED; pTimer->fSuspended = false; if (pTimer->fAllCpu) { PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); if (RT_UNLIKELY(!pOmniTimer)) return VERR_NO_MEMORY; pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) { RTMemFree(pOmniTimer); return VERR_NO_MEMORY; } /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->pOmniTimer = pOmniTimer; pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); cyc_omni_handler_t hOmni; hOmni.cyo_online = rtTimerSolOmniCpuOnline; hOmni.cyo_offline = NULL; hOmni.cyo_arg = pTimer; mutex_enter(&cpu_lock); pTimer->hCyclicId = cyclic_add_omni(&hOmni); mutex_exit(&cpu_lock); } else { int iCpu = SOL_TIMER_ANY_CPU; if (pTimer->fSpecificCpu) { iCpu = pTimer->iCpu; if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ return VERR_CPU_OFFLINE; } PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); if (RT_UNLIKELY(!pSingleTimer)) return VERR_NO_MEMORY; pTimer->pSingleTimer = pSingleTimer; pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; pSingleTimer->hHandler.cyh_arg = pTimer; pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; mutex_enter(&cpu_lock); if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) { mutex_exit(&cpu_lock); RTMemFree(pSingleTimer); pTimer->pSingleTimer = NULL; return VERR_CPU_OFFLINE; } pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); if (pTimer->interval == 0) { /** @todo use gethrtime_max instead of LLONG_MAX? */ AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; } else pSingleTimer->hFireTime.cyt_interval = pTimer->interval; pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); if (iCpu != SOL_TIMER_ANY_CPU) cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); mutex_exit(&cpu_lock); } return VINF_SUCCESS; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * It's not possible to restart a one-shot time from it's callback function, * at least not at the moment. */ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT); mutex_enter(&cpu_lock); /* * Make sure it's not active already. If it was suspended from a timer * callback function, we need to do some cleanup work here before we can * restart the timer. */ if (!pTimer->fSuspended) { if (!pTimer->fSuspendedFromTimer) { mutex_exit(&cpu_lock); return VERR_TIMER_ACTIVE; } cyclic_remove(pTimer->hCyclicId); pTimer->hCyclicId = CYCLIC_NONE; } pTimer->fSuspended = false; pTimer->fSuspendedFromTimer = false; pTimer->fIntervalChanged = false; if (pTimer->fAllCpus) { /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval); cyc_omni_handler_t HandlerOmni; HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline; HandlerOmni.cyo_offline = NULL; HandlerOmni.cyo_arg = pTimer; pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni); } else { cyc_handler_t Handler; cyc_time_t FireTime; /* * Setup a single CPU timer. If a specific CPU was requested, it * must be online or the timer cannot start. */ if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */ { pTimer->fSuspended = true; mutex_exit(&cpu_lock); return VERR_CPU_OFFLINE; } Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper; Handler.cyh_arg = pTimer; Handler.cyh_level = CY_LOCK_LEVEL; /* * Use a large interval (1 hour) so that we don't get a timer-callback between * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done. * See @bugref{7691#c20}. */ if (!pTimer->fSpecificCpu) FireTime.cyt_when = RTTimeSystemNanoTS() + u64First; else FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR; FireTime.cyt_interval = pTimer->cNsInterval != 0 ? pTimer->cNsInterval : CY_INFINITY /* Special value, see cyclic_fire(). */; pTimer->u.Single.u64Tick = 0; pTimer->u.Single.nsNextTick = 0; pTimer->hCyclicId = cyclic_add(&Handler, &FireTime); if (pTimer->fSpecificCpu) { cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */); cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First); } } mutex_exit(&cpu_lock); return VINF_SUCCESS; }