static struct callout_cpu * callout_lock(struct callout *c) { struct callout_cpu *cc; int cpu; for (;;) { cpu = c->c_cpu; cc = CC_CPU(cpu); CC_LOCK(cc); if (cpu == c->c_cpu) break; CC_UNLOCK(cc); } return (cc); }
/* * New interface; clients allocate their own callout structures. * * callout_reset() - establish or change a timeout * callout_stop() - disestablish a timeout * callout_init() - initialize a callout structure so that it can * safely be passed to callout_reset() and callout_stop() * * <sys/callout.h> defines three convenience macros: * * callout_active() - returns truth if callout has not been stopped, * drained, or deactivated since the last time the callout was * reset. * callout_pending() - returns truth if callout is still waiting for timeout * callout_deactivate() - marks the callout as having been serviced */ int callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg, int cpu) { struct callout_cpu *cc; int cancelled = 0; /* * Don't allow migration of pre-allocated callouts lest they * become unbalanced. */ if (c->c_flags & CALLOUT_LOCAL_ALLOC) cpu = c->c_cpu; cc = callout_lock(c); if (cc->cc_curr == c) { /* * We're being asked to reschedule a callout which is * currently in progress. If there is a lock then we * can cancel the callout if it has not really started. */ if (c->c_lock != NULL && !cc->cc_cancel) cancelled = cc->cc_cancel = 1; } if (c->c_flags & CALLOUT_PENDING) { if (cc->cc_next == c) { cc->cc_next = BSD_TAILQ_NEXT(c, c_links.tqe); } BSD_TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, c_links.tqe); cancelled = 1; c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); } callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); CC_UNLOCK(cc); return (cancelled); }
/* * Software (low priority) clock interrupt. * Run periodic events from timeout queue. */ void softclock(void *arg) { struct callout_cpu *cc; struct callout *c; struct callout_tailq *bucket; int curticks; int steps; /* #steps since we last allowed interrupts */ int depth; int mpcalls; int lockcalls; int gcalls; #ifndef MAX_SOFTCLOCK_STEPS #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ #endif /* MAX_SOFTCLOCK_STEPS */ mpcalls = 0; lockcalls = 0; gcalls = 0; depth = 0; steps = 0; cc = (struct callout_cpu *)arg; CC_LOCK(cc); while (cc->cc_softticks - 1 != cc->cc_ticks) { /* * cc_softticks may be modified by hard clock, so cache * it while we work on a given bucket. */ curticks = cc->cc_softticks; cc->cc_softticks++; bucket = &cc->cc_callwheel[curticks & callwheelmask]; c = BSD_TAILQ_FIRST(bucket); while (c != NULL) { depth++; if (c->c_time != curticks) { c = BSD_TAILQ_NEXT(c, c_links.tqe); ++steps; if (steps >= MAX_SOFTCLOCK_STEPS) { cc->cc_next = c; /* Give interrupts a chance. */ CC_UNLOCK(cc); ; /* nothing */ CC_LOCK(cc); c = cc->cc_next; steps = 0; } } else { cc->cc_next = BSD_TAILQ_NEXT(c, c_links.tqe); BSD_TAILQ_REMOVE(bucket, c, c_links.tqe); softclock_call_cc(c, cc, &mpcalls, &lockcalls, &gcalls); steps = 0; c = cc->cc_next; } } } avg_depth += (depth * 1000 - avg_depth) >> 8; avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; cc->cc_next = NULL; CC_UNLOCK(cc); }
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, int *lockcalls, int *gcalls) { void (*c_func)(void *); void *c_arg; int c_flags; #ifdef DIAGNOSTIC struct bintime bt1, bt2; struct timespec ts2; static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ static timeout_t *lastfunc; #endif KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == (CALLOUT_PENDING | CALLOUT_ACTIVE), ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); c_func = c->c_func; c_arg = c->c_arg; c_flags = c->c_flags; if (c->c_flags & CALLOUT_LOCAL_ALLOC) c->c_flags = CALLOUT_LOCAL_ALLOC; else c->c_flags &= ~CALLOUT_PENDING; cc->cc_curr = c; cc->cc_cancel = 0; CC_UNLOCK(cc); { (*mpcalls)++; CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", c, c_func, c_arg); } #ifdef DIAGNOSTIC binuptime(&bt1); #endif c_func(c_arg); #ifdef DIAGNOSTIC binuptime(&bt2); bintime_sub(&bt2, &bt1); if (bt2.frac > maxdt) { if (lastfunc != c_func || bt2.frac > maxdt * 2) { bintime2timespec(&bt2, &ts2); printf( "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); } maxdt = bt2.frac; lastfunc = c_func; } #endif CTR1(KTR_CALLOUT, "callout %p finished", c); CC_LOCK(cc); KASSERT(cc->cc_curr == c, ("mishandled cc_curr")); cc->cc_curr = NULL; /* * If the current callout is locally allocated (from * timeout(9)) then put it on the freelist. * * Note: we need to check the cached copy of c_flags because * if it was not local, then it's not safe to deref the * callout pointer. */ KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || c->c_flags == CALLOUT_LOCAL_ALLOC, ("corrupted callout")); if (c_flags & CALLOUT_LOCAL_ALLOC) callout_cc_del(c, cc); }
/* * Software (low priority) clock interrupt. * Run periodic events from timeout queue. */ void softclock(void *arg) { struct callout_cpu *cc; struct callout *c; struct callout_tailq *bucket; int curticks; int steps; /* #steps since we last allowed interrupts */ int depth; int mpcalls; int lockcalls; int gcalls; #ifdef DIAGNOSTIC struct bintime bt1, bt2; struct timespec ts2; static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ static timeout_t *lastfunc; #endif #ifndef MAX_SOFTCLOCK_STEPS #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ #endif /* MAX_SOFTCLOCK_STEPS */ mpcalls = 0; lockcalls = 0; gcalls = 0; depth = 0; steps = 0; cc = (struct callout_cpu *)arg; CC_LOCK(cc); while (cc->cc_softticks != ticks) { /* * cc_softticks may be modified by hard clock, so cache * it while we work on a given bucket. */ curticks = cc->cc_softticks; cc->cc_softticks++; bucket = &cc->cc_callwheel[curticks & callwheelmask]; c = TAILQ_FIRST(bucket); while (c) { depth++; if (c->c_time != curticks) { c = TAILQ_NEXT(c, c_links.tqe); ++steps; if (steps >= MAX_SOFTCLOCK_STEPS) { cc->cc_next = c; /* Give interrupts a chance. */ CC_UNLOCK(cc); ; /* nothing */ CC_LOCK(cc); c = cc->cc_next; steps = 0; } } else { void (*c_func)(void *); void *c_arg; struct lock_class *class; struct lock_object *c_lock; int c_flags, sharedlock; cc->cc_next = TAILQ_NEXT(c, c_links.tqe); TAILQ_REMOVE(bucket, c, c_links.tqe); class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; c_lock = c->c_lock; c_func = c->c_func; c_arg = c->c_arg; c_flags = c->c_flags; if (c->c_flags & CALLOUT_LOCAL_ALLOC) { c->c_flags = CALLOUT_LOCAL_ALLOC; } else { c->c_flags = (c->c_flags & ~CALLOUT_PENDING); } cc->cc_curr = c; cc->cc_cancel = 0; CC_UNLOCK(cc); if (c_lock != NULL) { class->lc_lock(c_lock, sharedlock); /* * The callout may have been cancelled * while we switched locks. */ if (cc->cc_cancel) { class->lc_unlock(c_lock); goto skip; } /* The callout cannot be stopped now. */ cc->cc_cancel = 1; if (c_lock == &Giant.lock_object) { gcalls++; CTR3(KTR_CALLOUT, "callout %p func %p arg %p", c, c_func, c_arg); } else { lockcalls++; CTR3(KTR_CALLOUT, "callout lock" " %p func %p arg %p", c, c_func, c_arg); } } else { mpcalls++; CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", c, c_func, c_arg); } #ifdef DIAGNOSTIC binuptime(&bt1); #endif THREAD_NO_SLEEPING(); SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); c_func(c_arg); SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); THREAD_SLEEPING_OK(); #ifdef DIAGNOSTIC binuptime(&bt2); bintime_sub(&bt2, &bt1); if (bt2.frac > maxdt) { if (lastfunc != c_func || bt2.frac > maxdt * 2) { bintime2timespec(&bt2, &ts2); printf( "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); } maxdt = bt2.frac; lastfunc = c_func; } #endif CTR1(KTR_CALLOUT, "callout %p finished", c); if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) class->lc_unlock(c_lock); skip: CC_LOCK(cc); /* * If the current callout is locally * allocated (from timeout(9)) * then put it on the freelist. * * Note: we need to check the cached * copy of c_flags because if it was not * local, then it's not safe to deref the * callout pointer. */ if (c_flags & CALLOUT_LOCAL_ALLOC) { KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC, ("corrupted callout")); c->c_func = NULL; SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); } cc->cc_curr = NULL; if (cc->cc_waiting) { /* * There is someone waiting * for the callout to complete. */ cc->cc_waiting = 0; CC_UNLOCK(cc); wakeup(&cc->cc_waiting); CC_LOCK(cc); } steps = 0; c = cc->cc_next; } } }