예제 #1
0
void
timer_queue_shutdown(
	mpqueue_head_t		*queue)
{
	timer_call_t		call;
	mpqueue_head_t		*new_queue;
	spl_t			s;

	DBG("timer_queue_shutdown(%p)\n", queue);

	s = splclock();

	/* Note comma operator in while expression re-locking each iteration */
	while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));
		if (!simple_lock_try(&call->lock)) {
			/*
			 * case (2b) lock order inversion, dequeue and skip
			 * Don't change the call_entry queue back-pointer
			 * but set the async_dequeue field.
			 */
			timer_queue_shutdown_lock_skips++;
			timer_call_entry_dequeue_async(call);
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				call,
				call->async_dequeue,
				CE(call)->queue,
				0x2b, 0);
#endif
			timer_queue_unlock(queue);
			continue;
		}

		/* remove entry from old queue */
		timer_call_entry_dequeue(call);
		timer_queue_unlock(queue);

		/* and queue it on new */
		new_queue = timer_queue_assign(CE(call)->deadline);
		timer_queue_lock_spin(new_queue);
		timer_call_entry_enqueue_deadline(
			call, new_queue, CE(call)->deadline);
		timer_queue_unlock(new_queue);

		simple_unlock(&call->lock);
	}

	timer_queue_unlock(queue);
	splx(s);
}
예제 #2
0
/*
 * Assumes call_entry and queues unlocked, interrupts disabled.
 */
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
	timer_call_t 			call,
	mpqueue_head_t			*queue,
	uint64_t			deadline)
{
	call_entry_t	entry = CE(call);
	mpqueue_head_t	*old_queue;

	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);

	simple_lock(&call->lock);
	old_queue = MPQUEUE(entry->queue);
	if (old_queue != NULL) {
		timer_queue_lock_spin(old_queue);
		if (call->async_dequeue) {
			/* collision (1c): timer already dequeued, clear flag */
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				call,
				call->async_dequeue,
				CE(call)->queue,
				0x1c, 0);
			timer_call_enqueue_deadline_unlocked_async1++;
#endif
			call->async_dequeue = FALSE;
			entry->queue = NULL;
		} else if (old_queue != queue) {
			timer_call_entry_dequeue(call);
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async2++;
#endif
		}
		if (old_queue == timer_longterm_queue)
			timer_longterm_dequeued_locked(call);
		if (old_queue != queue) {
			timer_queue_unlock(old_queue);
			timer_queue_lock_spin(queue);
		}
	} else {
		timer_queue_lock_spin(queue);
	}

	timer_call_entry_enqueue_deadline(call, queue, deadline);
	timer_queue_unlock(queue);
	simple_unlock(&call->lock);

	return (old_queue);
}
예제 #3
0
/*
 * Assumes call_entry and queues unlocked, interrupts disabled.
 */
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
	timer_call_t 			call,
	mpqueue_head_t			*queue,
	uint64_t			deadline)
{
	call_entry_t	entry = CE(call);
	mpqueue_head_t	*old_queue;

	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);

	simple_lock(&call->lock);
	old_queue = MPQUEUE(entry->queue);
	if (old_queue != NULL) {
		timer_call_lock_spin(old_queue);
		if (call->async_dequeue) {
			/* collision (1c): null queue pointer and reset flag */
			call->async_dequeue = FALSE;
			entry->queue = NULL;
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async1++;
#endif
		} else if (old_queue != queue) {
			(void)remque(qe(entry));
			entry->queue = NULL;
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async2++;
#endif
		}
		if (old_queue != queue) {
			timer_call_unlock(old_queue);
			timer_call_lock_spin(queue);
		}
	} else {
		timer_call_lock_spin(queue);
	}

	timer_call_entry_enqueue_deadline(call, queue, deadline);
	timer_call_unlock(queue);
	simple_unlock(&call->lock);

	return (old_queue);
}
예제 #4
0
void
timer_queue_shutdown(
	mpqueue_head_t		*queue)
{
	timer_call_t		call;
	mpqueue_head_t		*new_queue;
	spl_t			s;


	DBG("timer_queue_shutdown(%p)\n", queue);

	s = splclock();

	/* Note comma operator in while expression re-locking each iteration */
	while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));

		if (!simple_lock_try(&call->lock)) {
			/*
			 * case (2b) lock order inversion, dequeue and skip
			 * Don't change the call_entry queue back-pointer
			 * but set the async_dequeue field.
			 */
			timer_queue_shutdown_lock_skips++;
			timer_call_entry_dequeue_async(call);
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE_OR_PERM(call),
				call->async_dequeue,
				VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
				0x2b, 0);
#endif
			timer_queue_unlock(queue);
			continue;
		}

		boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0);

		/* remove entry from old queue */
		timer_call_entry_dequeue(call);
		timer_queue_unlock(queue);

		if (call_local == FALSE) {
			/* and queue it on new, discarding LOCAL timers */
			new_queue = timer_queue_assign(TCE(call)->deadline);
			timer_queue_lock_spin(new_queue);
			timer_call_entry_enqueue_deadline(
				call, new_queue, TCE(call)->deadline);
			timer_queue_unlock(new_queue);
		} else {
			timer_queue_shutdown_discarded++;
		}

		/* The only lingering LOCAL timer should be this thread's
		 * quantum expiration timer.
		 */
		assert((call_local == FALSE) ||
		    (TCE(call)->func == thread_quantum_expire));

		simple_unlock(&call->lock);
	}

	timer_queue_unlock(queue);
	splx(s);
}
예제 #5
0
/*
 * Assumes call_entry and queues unlocked, interrupts disabled.
 */
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
	timer_call_t 			call,
	mpqueue_head_t			*queue,
	uint64_t			deadline,
	uint64_t			soft_deadline,
	uint64_t			ttd,
	timer_call_param_t		param1,
	uint32_t			callout_flags)
{
	call_entry_t	entry = TCE(call);
	mpqueue_head_t	*old_queue;

	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);

	simple_lock(&call->lock);

	old_queue = MPQUEUE(entry->queue);

	if (old_queue != NULL) {
		timer_queue_lock_spin(old_queue);
		if (call->async_dequeue) {
			/* collision (1c): timer already dequeued, clear flag */
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE_OR_PERM(call),
				call->async_dequeue,
				VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
				0x1c, 0);
			timer_call_enqueue_deadline_unlocked_async1++;
#endif
			call->async_dequeue = FALSE;
			entry->queue = NULL;
		} else if (old_queue != queue) {
			timer_call_entry_dequeue(call);
#if TIMER_ASSERT
			timer_call_enqueue_deadline_unlocked_async2++;
#endif
		}
		if (old_queue == timer_longterm_queue)
			timer_longterm_dequeued_locked(call);
		if (old_queue != queue) {
			timer_queue_unlock(old_queue);
			timer_queue_lock_spin(queue);
		}
	} else {
		timer_queue_lock_spin(queue);
	}

	call->soft_deadline = soft_deadline;
	call->flags = callout_flags;
	TCE(call)->param1 = param1;
	call->ttd = ttd;

	timer_call_entry_enqueue_deadline(call, queue, deadline);
	timer_queue_unlock(queue);
	simple_unlock(&call->lock);

	return (old_queue);
}
예제 #6
0
/*
 * timer_queue_migrate() is called by etimer_queue_migrate()
 * to move timer requests from the local processor (queue_from)
 * to a target processor's (queue_to).
 */
int
timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
{
	timer_call_t	call;
	timer_call_t	head_to;
	int		timers_migrated = 0;

	DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);

	assert(!ml_get_interrupts_enabled());
	assert(queue_from != queue_to);

	if (serverperfmode) {
		/*
		 * if we're running a high end server
		 * avoid migrations... they add latency
		 * and don't save us power under typical
		 * server workloads
		 */
		return -4;
	}

	/*
	 * Take both local (from) and target (to) timer queue locks while
	 * moving the timers from the local queue to the target processor.
	 * We assume that the target is always the boot processor.
	 * But only move if all of the following is true:
	 *  - the target queue is non-empty
	 *  - the local queue is non-empty
	 *  - the local queue's first deadline is later than the target's
	 *  - the local queue contains no non-migrateable "local" call
	 * so that we need not have the target resync.
	 */

        timer_call_lock_spin(queue_to);

	head_to = TIMER_CALL(queue_first(&queue_to->head));
	if (queue_empty(&queue_to->head)) {
		timers_migrated = -1;
		goto abort1;
	}

        timer_call_lock_spin(queue_from);

	if (queue_empty(&queue_from->head)) {
		timers_migrated = -2;
		goto abort2;
	}

	call = TIMER_CALL(queue_first(&queue_from->head));
	if (CE(call)->deadline < CE(head_to)->deadline) {
		timers_migrated = 0;
		goto abort2;
	}

	/* perform scan for non-migratable timers */
	do {
		if (call->flags & TIMER_CALL_LOCAL) {
			timers_migrated = -3;
			goto abort2;
		}
		call = TIMER_CALL(queue_next(qe(call)));
	} while (!queue_end(&queue_from->head, qe(call)));

	/* migration loop itself -- both queues are locked */
	while (!queue_empty(&queue_from->head)) {
		call = TIMER_CALL(queue_first(&queue_from->head));
		if (!simple_lock_try(&call->lock)) {
			/* case (2b) lock order inversion, dequeue only */
			timer_queue_migrate_lock_skips++;
			(void) remque(qe(call));
			call->async_dequeue = TRUE;
			continue;
		}
		timer_call_entry_dequeue(call);
		timer_call_entry_enqueue_deadline(
			call, queue_to, CE(call)->deadline);
		timers_migrated++;
		simple_unlock(&call->lock);
	}

abort2:
       	timer_call_unlock(queue_from);
abort1:
       	timer_call_unlock(queue_to);

	return timers_migrated;
}