static boolean_t timer_call_enter_internal( timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags) { mpqueue_head_t *queue; mpqueue_head_t *old_queue; spl_t s; uint64_t slop = 0; s = splclock(); call->soft_deadline = deadline; call->flags = flags; if ((flags & TIMER_CALL_CRITICAL) == 0 && mach_timer_coalescing_enabled) { slop = timer_call_slop(deadline); deadline += slop; } #if defined(__i386__) || defined(__x86_64__) uint64_t ctime = mach_absolute_time(); if (__improbable(deadline < ctime)) { uint64_t delta = (ctime - deadline); past_deadline_timers++; past_deadline_deltas += delta; if (delta > past_deadline_longest) past_deadline_longest = deadline; if (delta < past_deadline_shortest) past_deadline_shortest = delta; deadline = ctime + past_deadline_timer_adjustment; call->soft_deadline = deadline; } #endif queue = timer_queue_assign(deadline); old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline); CE(call)->param1 = param1; splx(s); return (old_queue != NULL); }
static boolean_t timer_call_enter_internal( timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited) { mpqueue_head_t *queue = NULL; mpqueue_head_t *old_queue; spl_t s; uint64_t slop; uint32_t urgency; s = splclock(); call->soft_deadline = deadline; call->flags = flags; uint64_t ctime = mach_absolute_time(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, call, param1, deadline, flags, 0); urgency = (flags & TIMER_CALL_URGENCY_MASK); boolean_t slop_ratelimited = FALSE; slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited); if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) slop = leeway; if (UINT64_MAX - deadline <= slop) { deadline = UINT64_MAX; } else { deadline += slop; } if (__improbable(deadline < ctime)) { uint64_t delta = (ctime - deadline); past_deadline_timers++; past_deadline_deltas += delta; if (delta > past_deadline_longest) past_deadline_longest = deadline; if (delta < past_deadline_shortest) past_deadline_shortest = delta; deadline = ctime + past_deadline_timer_adjustment; call->soft_deadline = deadline; } /* Bit 0 of the "soft" deadline indicates that * this particular timer call requires rate-limiting * behaviour. Maintain the invariant deadline >= soft_deadline by * setting bit 0 of "deadline". */ deadline |= 1; if (ratelimited || slop_ratelimited) { call->soft_deadline |= 1ULL; } else { call->soft_deadline &= ~0x1ULL; } call->ttd = call->soft_deadline - ctime; #if CONFIG_DTRACE DTRACE_TMR7(callout__create, timer_call_func_t, CE(call)->func, timer_call_param_t, CE(call)->param0, uint32_t, call->flags, (deadline - call->soft_deadline), (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call); #endif if (!ratelimited && !slop_ratelimited) { queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue); } if (queue == NULL) { queue = timer_queue_assign(deadline); old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline); } CE(call)->param1 = param1; #if TIMER_TRACE CE(call)->entry_time = ctime; #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, call, (old_queue != NULL), call->soft_deadline, queue->count, 0); splx(s); return (old_queue != NULL); }
static boolean_t timer_call_enter_internal( timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited) { mpqueue_head_t *queue = NULL; mpqueue_head_t *old_queue; spl_t s; uint64_t slop; uint32_t urgency; uint64_t sdeadline, ttd; s = splclock(); sdeadline = deadline; uint64_t ctime = mach_absolute_time(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE_OR_PERM(param1), deadline, flags, 0); urgency = (flags & TIMER_CALL_URGENCY_MASK); boolean_t slop_ratelimited = FALSE; slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited); if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) slop = leeway; if (UINT64_MAX - deadline <= slop) { deadline = UINT64_MAX; } else { deadline += slop; } if (__improbable(deadline < ctime)) { uint64_t delta = (ctime - deadline); past_deadline_timers++; past_deadline_deltas += delta; if (delta > past_deadline_longest) past_deadline_longest = deadline; if (delta < past_deadline_shortest) past_deadline_shortest = delta; deadline = ctime + past_deadline_timer_adjustment; sdeadline = deadline; } if (ratelimited || slop_ratelimited) { flags |= TIMER_CALL_RATELIMITED; } else { flags &= ~TIMER_CALL_RATELIMITED; } ttd = sdeadline - ctime; #if CONFIG_DTRACE DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, timer_call_param_t, TCE(call)->param0, uint32_t, flags, (deadline - sdeadline), (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); #endif /* Program timer callout parameters under the appropriate per-CPU or * longterm queue lock. The callout may have been previously enqueued * and in-flight on this or another timer queue. */ if (!ratelimited && !slop_ratelimited) { queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue, sdeadline, ttd, param1, flags); } if (queue == NULL) { queue = timer_queue_assign(deadline); old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline, sdeadline, ttd, param1, flags); } #if TIMER_TRACE TCE(call)->entry_time = ctime; #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, VM_KERNEL_UNSLIDE_OR_PERM(call), (old_queue != NULL), deadline, queue->count, 0); splx(s); return (old_queue != NULL); }