Ejemplo n.º 1
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec tbef, taft;
	struct bintime bt, bt2;

	cpu_tick_calibrate(1);
	nanotime(&tbef);
	timespec2bintime(ts, &bt);
	binuptime(&bt2);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	nanotime(&taft);
	if (timestepwarnings) {
		log(LOG_INFO,
		    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
		    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
		    (intmax_t)taft.tv_sec, taft.tv_nsec,
		    (intmax_t)ts->tv_sec, ts->tv_nsec);
	}
	cpu_tick_calibrate(1);
}
Ejemplo n.º 2
0
/*
 * Feed-forward clock absolute time. This should be the preferred way to read
 * the feed-forward clock for "wall-clock" type time. The flags allow to compose
 * various flavours of absolute time (e.g. with or without leap seconds taken
 * into account). If valid pointers are provided, the ffcounter value and an
 * upper bound on clock error associated with the bintime are provided.
 * NOTE: use ffclock_convert_abs() to differ the conversion of a ffcounter value
 * read earlier.
 */
void
ffclock_abstime(ffcounter *ffcount, struct bintime *bt,
    struct bintime *error_bound, uint32_t flags)
{
	struct ffclock_estimate cest;
	ffcounter ffc;
	ffcounter update_ffcount;
	ffcounter ffdelta_error;

	/* Get counter and corresponding time. */
	if ((flags & FFCLOCK_FAST) == FFCLOCK_FAST)
		ffclock_last_tick(&ffc, bt, flags);
	else {
		ffclock_read_counter(&ffc);
		ffclock_convert_abs(ffc, bt, flags);
	}

	/* Current ffclock estimate, use update_ffcount as generation number. */
	do {
		update_ffcount = ffclock_estimate.update_ffcount;
		bcopy(&ffclock_estimate, &cest, sizeof(struct ffclock_estimate));
	} while (update_ffcount != ffclock_estimate.update_ffcount);

	/*
	 * Leap second adjustment. Total as seen by synchronisation algorithm
	 * since it started. cest.leapsec_next is the ffcounter prediction of
	 * when the next leapsecond occurs.
	 */
	if ((flags & FFCLOCK_LEAPSEC) == FFCLOCK_LEAPSEC) {
		bt->sec -= cest.leapsec_total;
		if (ffc > cest.leapsec_next)
			bt->sec -= cest.leapsec;
	}

	/* Boot time adjustment, for uptime/monotonic clocks. */
	if ((flags & FFCLOCK_UPTIME) == FFCLOCK_UPTIME) {
		bintime_sub(bt, &ffclock_boottime);
	}

	/* Compute error bound if a valid pointer has been passed. */
	if (error_bound) {
		ffdelta_error = ffc - cest.update_ffcount;
		ffclock_convert_diff(ffdelta_error, error_bound);
		/* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s] */
		bintime_mul(error_bound, cest.errb_rate *
		    (uint64_t)18446744073709LL);
		/* 18446744073 = int(2^64 / 1e9), since err_abs in [ns] */
		bintime_addx(error_bound, cest.errb_abs *
		    (uint64_t)18446744073LL);
	}

	if (ffcount)
		*ffcount = ffc;
}
Ejemplo n.º 3
0
static void
cpu_tick_calibrate(int reset)
{
	static uint64_t c_last;
	uint64_t c_this, c_delta;
	static struct bintime  t_last;
	struct bintime t_this, t_delta;
	uint32_t divi;

	if (reset) {
		/* The clock was stepped, abort & reset */
		t_last.sec = 0;
		return;
	}

	/* we don't calibrate fixed rate cputicks */
	if (!cpu_tick_variable)
		return;

	getbinuptime(&t_this);
	c_this = cpu_ticks();
	if (t_last.sec != 0) {
		c_delta = c_this - c_last;
		t_delta = t_this;
		bintime_sub(&t_delta, &t_last);
		/*
		 * Headroom:
		 * 	2^(64-20) / 16[s] =
		 * 	2^(44) / 16[s] =
		 * 	17.592.186.044.416 / 16 =
		 * 	1.099.511.627.776 [Hz]
		 */
		divi = t_delta.sec << 20;
		divi |= t_delta.frac >> (64 - 20);
		c_delta <<= 20;
		c_delta /= divi;
		if (c_delta > cpu_tick_frequency) {
			if (0 && bootverbose)
				printf("cpu_tick increased to %ju Hz\n",
				    c_delta);
			cpu_tick_frequency = c_delta;
		}
	}
Ejemplo n.º 4
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec ts2;
	struct bintime bt, bt2;

	binuptime(&bt2);
	timespec2bintime(ts, &bt);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	if (timestepwarnings) {
		bintime2timespec(&bt2, &ts2);
		log(LOG_INFO, "Time stepped from %ld.%09ld to %ld.%09ld\n",
		    (long)ts2.tv_sec, ts2.tv_nsec,
		    (long)ts->tv_sec, ts->tv_nsec);
	}
}
Ejemplo n.º 5
0
static void
compute_stats(struct ctl_lun_io_stats *cur_stats,
	      struct ctl_lun_io_stats *prev_stats, long double etime,
	      long double *mbsec, long double *kb_per_transfer,
	      long double *transfers_per_second, long double *ms_per_transfer,
	      long double *ms_per_dma, long double *dmas_per_second)
{
	uint64_t total_bytes = 0, total_operations = 0, total_dmas = 0;
	uint32_t port;
	struct bintime total_time_bt, total_dma_bt;
	struct timespec total_time_ts, total_dma_ts;
	int i;

	bzero(&total_time_bt, sizeof(total_time_bt));
	bzero(&total_dma_bt, sizeof(total_dma_bt));
	bzero(&total_time_ts, sizeof(total_time_ts));
	bzero(&total_dma_ts, sizeof(total_dma_ts));
	for (port = 0; port < CTL_MAX_PORTS; port++) {
		for (i = 0; i < CTL_STATS_NUM_TYPES; i++) {
			total_bytes += cur_stats->ports[port].bytes[i];
			total_operations +=
			    cur_stats->ports[port].operations[i];
			total_dmas += cur_stats->ports[port].num_dmas[i];
			bintime_add(&total_time_bt,
			    &cur_stats->ports[port].time[i]);
			bintime_add(&total_dma_bt,
			    &cur_stats->ports[port].dma_time[i]);
			if (prev_stats != NULL) {
				total_bytes -=
				    prev_stats->ports[port].bytes[i];
				total_operations -=
				    prev_stats->ports[port].operations[i];
				total_dmas -=
				    prev_stats->ports[port].num_dmas[i];
				bintime_sub(&total_time_bt,
				    &prev_stats->ports[port].time[i]);
				bintime_sub(&total_dma_bt,
				    &prev_stats->ports[port].dma_time[i]);
			}
		}
	}

	*mbsec = total_bytes;
	*mbsec /= 1024 * 1024;
	if (etime > 0.0)
		*mbsec /= etime;
	else
		*mbsec = 0;
	*kb_per_transfer = total_bytes;
	*kb_per_transfer /= 1024;
	if (total_operations > 0)
		*kb_per_transfer /= total_operations;
	else
		*kb_per_transfer = 0;
	*transfers_per_second = total_operations;
	*dmas_per_second = total_dmas;
	if (etime > 0.0) {
		*transfers_per_second /= etime;
		*dmas_per_second /= etime;
	} else {
		*transfers_per_second = 0;
		*dmas_per_second = 0;
	}

	bintime2timespec(&total_time_bt, &total_time_ts);
	bintime2timespec(&total_dma_bt, &total_dma_ts);
	if (total_operations > 0) {
		/*
		 * Convert the timespec to milliseconds.
		 */
		*ms_per_transfer = total_time_ts.tv_sec * 1000;
		*ms_per_transfer += total_time_ts.tv_nsec / 1000000;
		*ms_per_transfer /= total_operations;
	} else
		*ms_per_transfer = 0;

	if (total_dmas > 0) {
		/*
		 * Convert the timespec to milliseconds.
		 */
		*ms_per_dma = total_dma_ts.tv_sec * 1000;
		*ms_per_dma += total_dma_ts.tv_nsec / 1000000;
		*ms_per_dma /= total_dmas;
	} else
		*ms_per_dma = 0;
}
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int mtxcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	mtxcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct mtx *c_mtx;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_mtx = c->c_mtx;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_func = NULL;
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
					curr_callout = NULL;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
					curr_callout = c;
				}
				curr_cancelled = 0;
				mtx_unlock_spin(&callout_lock);
				if (c_mtx != NULL) {
					mtx_lock(c_mtx);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (curr_cancelled) {
						mtx_unlock(c_mtx);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					curr_cancelled = 1;

					if (c_mtx == &Giant) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						mtxcalls++;
						CTR3(KTR_CALLOUT, "callout mtx"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
#ifdef MAXHE_TODO
				THREAD_NO_SLEEPING();
				c_func(c_arg);
				THREAD_SLEEPING_OK();
#else
				c_func(c_arg);
#endif // MAXHE_TODO
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					mtx_unlock(c_mtx);
			skip:
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (callout_wait) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					callout_wait = 0;
					mtx_unlock_spin(&callout_lock);
					wakeup(&callout_wait);
					mtx_lock_spin(&callout_lock);
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
#ifdef MAXHE_TODO
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
#endif // MAXHE_TODO
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
Ejemplo n.º 7
0
static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
    int *lockcalls, int *gcalls)
{
	void (*c_func)(void *);
	void *c_arg;
	int c_flags;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

	KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
	    (CALLOUT_PENDING | CALLOUT_ACTIVE),
	    ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
	c_func = c->c_func;
	c_arg = c->c_arg;
	c_flags = c->c_flags;
	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
		c->c_flags = CALLOUT_LOCAL_ALLOC;
	else
		c->c_flags &= ~CALLOUT_PENDING;
	cc->cc_curr = c;
	cc->cc_cancel = 0;
	CC_UNLOCK(cc);
	{
		(*mpcalls)++;
		CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
		    c, c_func, c_arg);
	}
#ifdef DIAGNOSTIC
	binuptime(&bt1);
#endif
	c_func(c_arg);
#ifdef DIAGNOSTIC
	binuptime(&bt2);
	bintime_sub(&bt2, &bt1);
	if (bt2.frac > maxdt) {
		if (lastfunc != c_func || bt2.frac > maxdt * 2) {
			bintime2timespec(&bt2, &ts2);
			printf(
		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
		}
		maxdt = bt2.frac;
		lastfunc = c_func;
	}
#endif
	CTR1(KTR_CALLOUT, "callout %p finished", c);

	CC_LOCK(cc);
	KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
	cc->cc_curr = NULL;

	/*
	 * If the current callout is locally allocated (from
	 * timeout(9)) then put it on the freelist.
	 *
	 * Note: we need to check the cached copy of c_flags because
	 * if it was not local, then it's not safe to deref the
	 * callout pointer.
	 */
	KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
	    c->c_flags == CALLOUT_LOCAL_ALLOC,
	    ("corrupted callout"));
	if (c_flags & CALLOUT_LOCAL_ALLOC)
		callout_cc_del(c, cc);
}
Ejemplo n.º 8
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int gcalls;
	int wakeup_cookie;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				curr_callout = c;
				mtx_unlock_spin(&callout_lock);
				if (!(c_flags & CALLOUT_MPSAFE)) {
					mtx_lock(&Giant);
					gcalls++;
					CTR1(KTR_CALLOUT, "callout %p", c_func);
				} else {
					mpcalls++;
					CTR1(KTR_CALLOUT, "callout mpsafe %p",
					    c_func);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
				mtx_lock(&dont_sleep_in_callout);
#endif
				c_func(c_arg);
#ifdef DIAGNOSTIC
				mtx_unlock(&dont_sleep_in_callout);
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
					bintime2timespec(&bt2, &ts2);
					printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
					c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if (!(c_flags & CALLOUT_MPSAFE))
					mtx_unlock(&Giant);
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (wakeup_needed) {
					/*
					 * There might be someone waiting
					 * for the callout to complete.
					 */
					wakeup_cookie = wakeup_ctr;
					mtx_unlock_spin(&callout_lock);
					mtx_lock(&callout_wait_lock);
					cv_broadcast(&callout_wait);
					wakeup_done_ctr = wakeup_cookie;
					mtx_unlock(&callout_wait_lock);
					mtx_lock_spin(&callout_lock);
					wakeup_needed = 0;
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *arg)
{
	struct callout_cpu *cc;
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int lockcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	lockcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	cc = (struct callout_cpu *)arg;
	CC_LOCK(cc);
	while (cc->cc_softticks != ticks) {
		/*
		 * cc_softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = cc->cc_softticks;
		cc->cc_softticks++;
		bucket = &cc->cc_callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					cc->cc_next = c;
					/* Give interrupts a chance. */
					CC_UNLOCK(cc);
					;	/* nothing */
					CC_LOCK(cc);
					c = cc->cc_next;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct lock_class *class;
				struct lock_object *c_lock;
				int c_flags, sharedlock;

				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				class = (c->c_lock != NULL) ?
				    LOCK_CLASS(c->c_lock) : NULL;
				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
				    0 : 1;
				c_lock = c->c_lock;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				cc->cc_curr = c;
				cc->cc_cancel = 0;
				CC_UNLOCK(cc);
				if (c_lock != NULL) {
					class->lc_lock(c_lock, sharedlock);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (cc->cc_cancel) {
						class->lc_unlock(c_lock);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					cc->cc_cancel = 1;

					if (c_lock == &Giant.lock_object) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						lockcalls++;
						CTR3(KTR_CALLOUT, "callout lock"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
				THREAD_NO_SLEEPING();
				SDT_PROBE(callout_execute, kernel, ,
				    callout_start, c, 0, 0, 0, 0);
				c_func(c_arg);
				SDT_PROBE(callout_execute, kernel, ,
				    callout_end, c, 0, 0, 0, 0);
				THREAD_SLEEPING_OK();
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				CTR1(KTR_CALLOUT, "callout %p finished", c);
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					class->lc_unlock(c_lock);
			skip:
				CC_LOCK(cc);
				/*
				 * If the current callout is locally
				 * allocated (from timeout(9))
				 * then put it on the freelist.
				 *
				 * Note: we need to check the cached
				 * copy of c_flags because if it was not
				 * local, then it's not safe to deref the
				 * callout pointer.
				 */
				if (c_flags & CALLOUT_LOCAL_ALLOC) {
					KASSERT(c->c_flags ==
					    CALLOUT_LOCAL_ALLOC,
					    ("corrupted callout"));
					c->c_func = NULL;
					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
					    c_links.sle);
				}
				cc->cc_curr = NULL;
				if (cc->cc_waiting) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					cc->cc_waiting = 0;
					CC_UNLOCK(cc);
					wakeup(&cc->cc_waiting);
					CC_LOCK(cc);
				}
				steps = 0;
				c = cc->cc_next;
			}
		}
	}