static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); call_entry_dequeue(CE(entry)); old_queue->count--; return old_queue; }
/* * _call_dequeue: * * Remove an entry from a queue. * * Returns TRUE if the entry was on a queue. * * Called with thread_call_lock held. */ static __inline__ boolean_t _call_dequeue( thread_call_t call, thread_call_group_t group) { queue_head_t *old_queue; old_queue = call_entry_dequeue(call); if (old_queue == &group->pending_queue) group->pending_count--; return (old_queue != NULL); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_dequeue() " "entry %p is not locked\n", entry); /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) panic("_call_entry_dequeue() " "queue %p is not locked\n", old_queue); call_entry_dequeue(CE(entry)); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { return MPQUEUE(call_entry_dequeue(CE(entry))); }