Exemple #1
0
void
timer_queue_shutdown(
	mpqueue_head_t		*queue)
{
	timer_call_t		call;
	mpqueue_head_t		*new_queue;
	spl_t			s;

	DBG("timer_queue_shutdown(%p)\n", queue);

	s = splclock();

	/* Note comma operator in while expression re-locking each iteration */
	while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));
		if (!simple_lock_try(&call->lock)) {
			/*
			 * case (2b) lock order inversion, dequeue and skip
			 * Don't change the call_entry queue back-pointer
			 * but set the async_dequeue field.
			 */
			timer_queue_shutdown_lock_skips++;
			timer_call_entry_dequeue_async(call);
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				call,
				call->async_dequeue,
				CE(call)->queue,
				0x2b, 0);
#endif
			timer_queue_unlock(queue);
			continue;
		}

		/* remove entry from old queue */
		timer_call_entry_dequeue(call);
		timer_queue_unlock(queue);

		/* and queue it on new */
		new_queue = timer_queue_assign(CE(call)->deadline);
		timer_queue_lock_spin(new_queue);
		timer_call_entry_enqueue_deadline(
			call, new_queue, CE(call)->deadline);
		timer_queue_unlock(new_queue);

		simple_unlock(&call->lock);
	}

	timer_queue_unlock(queue);
	splx(s);
}
Exemple #2
0
static boolean_t 
timer_call_enter_internal(
	timer_call_t 		call,
	timer_call_param_t	param1,
	uint64_t 		deadline,
	uint32_t 		flags)
{
	mpqueue_head_t		*queue;
	mpqueue_head_t		*old_queue;
	spl_t			s;
	uint64_t 		slop = 0;

	s = splclock();

	call->soft_deadline = deadline;
	call->flags = flags;

	if ((flags & TIMER_CALL_CRITICAL) == 0 &&
	     mach_timer_coalescing_enabled) {
		slop = timer_call_slop(deadline);
		deadline += slop;
	}

#if	defined(__i386__) || defined(__x86_64__)	
	uint64_t ctime = mach_absolute_time();
	if (__improbable(deadline < ctime)) {
		uint64_t delta = (ctime - deadline);

		past_deadline_timers++;
		past_deadline_deltas += delta;
		if (delta > past_deadline_longest)
			past_deadline_longest = deadline;
		if (delta < past_deadline_shortest)
			past_deadline_shortest = delta;

		deadline = ctime + past_deadline_timer_adjustment;
		call->soft_deadline = deadline;
	}
#endif
	queue = timer_queue_assign(deadline);

	old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);

	CE(call)->param1 = param1;

	splx(s);

	return (old_queue != NULL);
}
Exemple #3
0
static boolean_t 
timer_call_enter_internal(
	timer_call_t 		call,
	timer_call_param_t	param1,
	uint64_t 		deadline,
	uint64_t 		leeway,
	uint32_t 		flags,
	boolean_t		ratelimited)
{
	mpqueue_head_t		*queue = NULL;
	mpqueue_head_t		*old_queue;
	spl_t			s;
	uint64_t 		slop;
	uint32_t		urgency;

	s = splclock();

	call->soft_deadline = deadline;
	call->flags = flags;

	uint64_t ctime = mach_absolute_time();

	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
        	DECR_TIMER_ENTER | DBG_FUNC_START,
		call,
		param1, deadline, flags, 0); 

	urgency = (flags & TIMER_CALL_URGENCY_MASK);

	boolean_t slop_ratelimited = FALSE;
	slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited);

	if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop)
		slop = leeway;

	if (UINT64_MAX - deadline <= slop) {
		deadline = UINT64_MAX;
	} else {
		deadline += slop;
	}

	if (__improbable(deadline < ctime)) {
		uint64_t delta = (ctime - deadline);

		past_deadline_timers++;
		past_deadline_deltas += delta;
		if (delta > past_deadline_longest)
			past_deadline_longest = deadline;
		if (delta < past_deadline_shortest)
			past_deadline_shortest = delta;

		deadline = ctime + past_deadline_timer_adjustment;
		call->soft_deadline = deadline;
	}

	/* Bit 0 of the "soft" deadline indicates that
	 * this particular timer call requires rate-limiting
	 * behaviour. Maintain the invariant deadline >= soft_deadline by
	 * setting bit 0 of "deadline".
	 */

	deadline |= 1;
	if (ratelimited || slop_ratelimited) {
		call->soft_deadline |= 1ULL;
	} else {
		call->soft_deadline &= ~0x1ULL;
	}

	call->ttd =  call->soft_deadline - ctime;

#if CONFIG_DTRACE
	DTRACE_TMR7(callout__create, timer_call_func_t, CE(call)->func,
	timer_call_param_t, CE(call)->param0, uint32_t, call->flags,
	    (deadline - call->soft_deadline),
	    (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call);
#endif

	if (!ratelimited && !slop_ratelimited) {
		queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue);
	}

	if (queue == NULL) {
		queue = timer_queue_assign(deadline);
		old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
	}

	CE(call)->param1 = param1;
#if TIMER_TRACE
	CE(call)->entry_time = ctime;
#endif

	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
        	DECR_TIMER_ENTER | DBG_FUNC_END,
		call,
		(old_queue != NULL), call->soft_deadline, queue->count, 0); 

	splx(s);

	return (old_queue != NULL);
}
Exemple #4
0
void
timer_queue_shutdown(
	mpqueue_head_t		*queue)
{
	timer_call_t		call;
	mpqueue_head_t		*new_queue;
	spl_t			s;


	DBG("timer_queue_shutdown(%p)\n", queue);

	s = splclock();

	/* Note comma operator in while expression re-locking each iteration */
	while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));

		if (!simple_lock_try(&call->lock)) {
			/*
			 * case (2b) lock order inversion, dequeue and skip
			 * Don't change the call_entry queue back-pointer
			 * but set the async_dequeue field.
			 */
			timer_queue_shutdown_lock_skips++;
			timer_call_entry_dequeue_async(call);
#if TIMER_ASSERT
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE_OR_PERM(call),
				call->async_dequeue,
				VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue),
				0x2b, 0);
#endif
			timer_queue_unlock(queue);
			continue;
		}

		boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0);

		/* remove entry from old queue */
		timer_call_entry_dequeue(call);
		timer_queue_unlock(queue);

		if (call_local == FALSE) {
			/* and queue it on new, discarding LOCAL timers */
			new_queue = timer_queue_assign(TCE(call)->deadline);
			timer_queue_lock_spin(new_queue);
			timer_call_entry_enqueue_deadline(
				call, new_queue, TCE(call)->deadline);
			timer_queue_unlock(new_queue);
		} else {
			timer_queue_shutdown_discarded++;
		}

		/* The only lingering LOCAL timer should be this thread's
		 * quantum expiration timer.
		 */
		assert((call_local == FALSE) ||
		    (TCE(call)->func == thread_quantum_expire));

		simple_unlock(&call->lock);
	}

	timer_queue_unlock(queue);
	splx(s);
}
Exemple #5
0
static boolean_t 
timer_call_enter_internal(
	timer_call_t 		call,
	timer_call_param_t	param1,
	uint64_t 		deadline,
	uint64_t 		leeway,
	uint32_t 		flags,
	boolean_t		ratelimited)
{
	mpqueue_head_t		*queue = NULL;
	mpqueue_head_t		*old_queue;
	spl_t			s;
	uint64_t 		slop;
	uint32_t		urgency;
	uint64_t		sdeadline, ttd;

	s = splclock();

	sdeadline = deadline;
	uint64_t ctime = mach_absolute_time();

	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
        	DECR_TIMER_ENTER | DBG_FUNC_START,
	    VM_KERNEL_UNSLIDE_OR_PERM(call),
	    VM_KERNEL_UNSLIDE_OR_PERM(param1), deadline, flags, 0); 

	urgency = (flags & TIMER_CALL_URGENCY_MASK);

	boolean_t slop_ratelimited = FALSE;
	slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited);

	if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop)
		slop = leeway;

	if (UINT64_MAX - deadline <= slop) {
		deadline = UINT64_MAX;
	} else {
		deadline += slop;
	}

	if (__improbable(deadline < ctime)) {
		uint64_t delta = (ctime - deadline);

		past_deadline_timers++;
		past_deadline_deltas += delta;
		if (delta > past_deadline_longest)
			past_deadline_longest = deadline;
		if (delta < past_deadline_shortest)
			past_deadline_shortest = delta;

		deadline = ctime + past_deadline_timer_adjustment;
		sdeadline = deadline;
	}

	if (ratelimited || slop_ratelimited) {
		flags |= TIMER_CALL_RATELIMITED;
	} else {
		flags &= ~TIMER_CALL_RATELIMITED;
	}

	ttd =  sdeadline - ctime;
#if CONFIG_DTRACE
	DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func,
	timer_call_param_t, TCE(call)->param0, uint32_t, flags,
	    (deadline - sdeadline),
	    (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call);
#endif

	/* Program timer callout parameters under the appropriate per-CPU or
	 * longterm queue lock. The callout may have been previously enqueued
	 * and in-flight on this or another timer queue.
	 */
	if (!ratelimited && !slop_ratelimited) {
		queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue, sdeadline, ttd, param1, flags);
	}

	if (queue == NULL) {
		queue = timer_queue_assign(deadline);
		old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline, sdeadline, ttd, param1, flags);
	}

#if TIMER_TRACE
	TCE(call)->entry_time = ctime;
#endif

	TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
        	DECR_TIMER_ENTER | DBG_FUNC_END,
		VM_KERNEL_UNSLIDE_OR_PERM(call),
		(old_queue != NULL), deadline, queue->count, 0); 

	splx(s);

	return (old_queue != NULL);
}