Пример #1
0
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
	timer_call_t		entry,
	mpqueue_head_t		*queue,
	uint64_t		deadline)
{
	mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);

	if (!hw_lock_held((hw_lock_t)&entry->lock))
		panic("_call_entry_enqueue_deadline() "
			"entry %p is not locked\n", entry);
	/* XXX More lock pretense:  */
	if (!hw_lock_held((hw_lock_t)&queue->lock_data))
		panic("_call_entry_enqueue_deadline() "
			"queue %p is not locked\n", queue);
	if (old_queue != NULL && old_queue != queue)
		panic("_call_entry_enqueue_deadline() "
			"old_queue %p != queue", old_queue);

	call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);

/* For efficiency, track the earliest soft deadline on the queue, so that
 * fuzzy decisions can be made without lock acquisitions.
 */
	queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline;

	if (old_queue)
		old_queue->count--;
	queue->count++;

	return (old_queue);
}
Пример #2
0
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
	timer_call_t			entry,
	mpqueue_head_t			*queue,
	uint64_t			deadline)
{
	return MPQUEUE(call_entry_enqueue_deadline(CE(entry),
						   QUEUE(queue), deadline));
}
Пример #3
0
/*
 *	_delayed_call_enqueue:
 *
 *	Place an entry on the delayed queue,
 *	after existing entries with an earlier
 * 	(or identical) deadline.
 *
 *	Returns TRUE if the entry was already
 *	on a queue.
 *
 *	Called with thread_call_lock held.
 */
static __inline__ boolean_t
_delayed_call_enqueue(
    thread_call_t		call,
	thread_call_group_t	group,
	uint64_t		deadline)
{
	queue_head_t		*old_queue;

	old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);

	if (old_queue == &group->pending_queue)
		group->pending_count--;

	return (old_queue != NULL);
}
Пример #4
0
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
	timer_call_t			entry,
	mpqueue_head_t			*queue,
	uint64_t			deadline)
{
	mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);

	call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);

	/* For efficiency, track the earliest soft deadline on the queue,
	 * so that fuzzy decisions can be made without lock acquisitions.
	 */
	queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline;

	if (old_queue)
		old_queue->count--;
	queue->count++;

	return old_queue;
}
Пример #5
0
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
	timer_call_t		entry,
	mpqueue_head_t		*queue,
	uint64_t		deadline)
{
	mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);

	if (!hw_lock_held((hw_lock_t)&entry->lock))
		panic("_call_entry_enqueue_deadline() "
			"entry %p is not locked\n", entry);
	/* XXX More lock pretense:  */
	if (!hw_lock_held((hw_lock_t)&queue->lock_data))
		panic("_call_entry_enqueue_deadline() "
			"queue %p is not locked\n", queue);
	if (old_queue != NULL && old_queue != queue)
		panic("_call_entry_enqueue_deadline() "
			"old_queue %p != queue", old_queue);

	call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);

	return (old_queue);
}