void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x2b, 0); #endif timer_queue_unlock(queue); continue; } /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); /* and queue it on new */ new_queue = timer_queue_assign(CE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, CE(call)->deadline); timer_queue_unlock(new_queue); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); } } else { timer_queue_lock_spin(queue); } timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); simple_unlock(&call->lock); return (old_queue); }
boolean_t timer_call_cancel( timer_call_t call) { mpqueue_head_t *old_queue; spl_t s; s = splclock(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_START, VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, call->soft_deadline, call->flags, 0); old_queue = timer_call_dequeue_unlocked(call); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (!queue_empty(&old_queue->head)) { timer_queue_cancel(old_queue, TCE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); timer_call_t thead = (timer_call_t)queue_first(&old_queue->head); old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; } else { timer_queue_cancel(old_queue, TCE(call)->deadline, UINT64_MAX); old_queue->earliest_soft_deadline = UINT64_MAX; } timer_queue_unlock(old_queue); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_END, VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE_OR_PERM(old_queue), TCE(call)->deadline - mach_absolute_time(), TCE(call)->deadline - TCE(call)->entry_time, 0); splx(s); #if CONFIG_DTRACE DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); #endif return (old_queue != NULL); }
mpqueue_head_t * timer_call_dequeue_unlocked( timer_call_t call) { call_entry_t entry = TCE(call); mpqueue_head_t *old_queue; DBG("timer_call_dequeue_unlocked(%p)\n", call); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0, 0); #endif if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x1c, 0); timer_call_dequeue_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else { timer_call_entry_dequeue(call); } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); timer_queue_unlock(old_queue); } simple_unlock(&call->lock); return (old_queue); }
uint64_t timer_queue_expire_with_options( mpqueue_head_t *queue, uint64_t deadline, boolean_t rescan) { timer_call_t call = NULL; uint32_t tc_iterations = 0; DBG("timer_queue_expire(%p,)\n", queue); uint64_t cur_deadline = deadline; timer_queue_lock_spin(queue); while (!queue_empty(&queue->head)) { /* Upon processing one or more timer calls, refresh the * deadline to account for time elapsed in the callout */ if (++tc_iterations > 1) cur_deadline = mach_absolute_time(); if (call == NULL) call = TIMER_CALL(queue_first(&queue->head)); if (call->soft_deadline <= cur_deadline) { timer_call_func_t func; timer_call_param_t param0, param1; TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_EXPIRE | DBG_FUNC_NONE, call, call->soft_deadline, CE(call)->deadline, CE(call)->entry_time, 0); /* Bit 0 of the "soft" deadline indicates that * this particular timer call is rate-limited * and hence shouldn't be processed before its * hard deadline. */ if ((call->soft_deadline & 0x1) && (CE(call)->deadline > cur_deadline)) { if (rescan == FALSE) break; } if (!simple_lock_try(&call->lock)) { /* case (2b) lock inversion, dequeue and skip */ timer_queue_expire_lock_skips++; timer_call_entry_dequeue_async(call); call = NULL; continue; } timer_call_entry_dequeue(call); func = CE(call)->func; param0 = CE(call)->param0; param1 = CE(call)->param1; simple_unlock(&call->lock); timer_queue_unlock(queue); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_START, call, VM_KERNEL_UNSLIDE(func), param0, param1, 0); #if CONFIG_DTRACE DTRACE_TMR7(callout__start, timer_call_func_t, func, timer_call_param_t, param0, unsigned, call->flags, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call); #endif /* Maintain time-to-deadline in per-processor data * structure for thread wakeup deadline statistics. */ uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd)); *ttdp = call->ttd; (*func)(param0, param1); *ttdp = 0; #if CONFIG_DTRACE DTRACE_TMR4(callout__end, timer_call_func_t, func, param0, param1, call); #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_END, call, VM_KERNEL_UNSLIDE(func), param0, param1, 0); call = NULL; timer_queue_lock_spin(queue); } else { if (__probable(rescan == FALSE)) {
void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x2b, 0); #endif timer_queue_unlock(queue); continue; } boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0); /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); if (call_local == FALSE) { /* and queue it on new, discarding LOCAL timers */ new_queue = timer_queue_assign(TCE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, TCE(call)->deadline); timer_queue_unlock(new_queue); } else { timer_queue_shutdown_discarded++; } /* The only lingering LOCAL timer should be this thread's * quantum expiration timer. */ assert((call_local == FALSE) || (TCE(call)->func == thread_quantum_expire)); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline, uint64_t soft_deadline, uint64_t ttd, timer_call_param_t param1, uint32_t callout_flags) { call_entry_t entry = TCE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); } } else { timer_queue_lock_spin(queue); } call->soft_deadline = soft_deadline; call->flags = callout_flags; TCE(call)->param1 = param1; call->ttd = ttd; timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); simple_unlock(&call->lock); return (old_queue); }