mpqueue_head_t * timer_call_dequeue_unlocked( timer_call_t call) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_dequeue_unlocked(%p)\n", call); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_call_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): null queue pointer and reset flag */ call->async_dequeue = FALSE; #if TIMER_ASSERT timer_call_dequeue_unlocked_async1++; #endif } else { (void)remque(qe(entry)); #if TIMER_ASSERT timer_call_dequeue_unlocked_async2++; #endif } entry->queue = NULL; timer_call_unlock(old_queue); } simple_unlock(&call->lock); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, so that * fuzzy decisions can be made without lock acquisitions. */ queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline; if (old_queue) old_queue->count--; queue->count++; return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { return MPQUEUE(call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline)); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); call_entry_dequeue(CE(entry)); old_queue->count--; return old_queue; }
/* * Remove timer entry from its queue but don't change the queue pointer * and set the async_dequeue flag. This is locking case 2b. */ static __inline__ void timer_call_entry_dequeue_async( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (old_queue) { old_queue->count--; (void) remque(qe(entry)); entry->async_dequeue = TRUE; } return; }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); } } else { timer_queue_lock_spin(queue); } timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); simple_unlock(&call->lock); return (old_queue); }
mpqueue_head_t * timer_call_dequeue_unlocked( timer_call_t call) { call_entry_t entry = TCE(call); mpqueue_head_t *old_queue; DBG("timer_call_dequeue_unlocked(%p)\n", call); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0, 0); #endif if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x1c, 0); timer_call_dequeue_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else { timer_call_entry_dequeue(call); } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); timer_queue_unlock(old_queue); } simple_unlock(&call->lock); return (old_queue); }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_call_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): null queue pointer and reset flag */ call->async_dequeue = FALSE; entry->queue = NULL; #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async1++; #endif } else if (old_queue != queue) { (void)remque(qe(entry)); entry->queue = NULL; #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue != queue) { timer_call_unlock(old_queue); timer_call_lock_spin(queue); } } else { timer_call_lock_spin(queue); } timer_call_entry_enqueue_deadline(call, queue, deadline); timer_call_unlock(queue); simple_unlock(&call->lock); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, * so that fuzzy decisions can be made without lock acquisitions. */ queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline; if (old_queue) old_queue->count--; queue->count++; return old_queue; }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_dequeue() " "entry %p is not locked\n", entry); /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) panic("_call_entry_dequeue() " "queue %p is not locked\n", old_queue); call_entry_dequeue(CE(entry)); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); return (old_queue); }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline, uint64_t soft_deadline, uint64_t ttd, timer_call_param_t param1, uint32_t callout_flags) { call_entry_t entry = TCE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); } } else { timer_queue_lock_spin(queue); } call->soft_deadline = soft_deadline; call->flags = callout_flags; TCE(call)->param1 = param1; call->ttd = ttd; timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); simple_unlock(&call->lock); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { return MPQUEUE(call_entry_dequeue(CE(entry))); }