/*
 * callout_halt:
 *
 *	Cancel a pending callout.  If in-flight, block until it completes.
 *	May not be called from a hard interrupt handler.  If the callout
 * 	can take locks, the caller of callout_halt() must not hold any of
 *	those locks, otherwise the two could deadlock.  If 'interlock' is
 *	non-NULL and we must wait for the callout to complete, it will be
 *	released and re-acquired before returning.
 */
bool
callout_halt(callout_t *cs, void *interlock)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	struct callout_cpu *cc;
	struct lwp *l;
	kmutex_t *lock, *relock;
	bool expired;

	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT(!cpu_intr_p());

	lock = callout_lock(c);
	relock = NULL;

	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
	if ((c->c_flags & CALLOUT_PENDING) != 0)
		CIRCQ_REMOVE(&c->c_list);
	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);

	l = curlwp;
	for (;;) {
		cc = c->c_cpu;
		if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
			break;
		if (interlock != NULL) {
			/*
			 * Avoid potential scheduler lock order problems by
			 * dropping the interlock without the callout lock
			 * held.
			 */
			mutex_spin_exit(lock);
			mutex_exit(interlock);
			relock = interlock;
			interlock = NULL;
		} else {
			/* XXX Better to do priority inheritance. */
			KASSERT(l->l_wchan == NULL);
			cc->cc_nwait++;
			cc->cc_ev_block.ev_count++;
			l->l_kpriority = true;
			sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
			sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
			    &sleep_syncobj);
			sleepq_block(0, false);
		}
		lock = callout_lock(c);
	}

	mutex_spin_exit(lock);
	if (__predict_false(relock != NULL))
		mutex_enter(relock);

	return expired;
}
/*
 * callout_bind:
 *
 *	Bind a callout so that it will only execute on one CPU.
 *	The callout must be stopped, and must be MPSAFE.
 *
 *	XXX Disabled for now until it is decided how to handle
 *	offlined CPUs.  We may want weak+strong binding.
 */
void
callout_bind(callout_t *cs, struct cpu_info *ci)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	struct callout_cpu *cc;
	kmutex_t *lock;

	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
	KASSERT(c->c_cpu->cc_active != c);
	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT((c->c_flags & CALLOUT_MPSAFE) != 0);

	lock = callout_lock(c);
	cc = ci->ci_data.cpu_callout;
	c->c_flags |= CALLOUT_BOUND;
	if (c->c_cpu != cc) {
		/*
		 * Assigning c_cpu effectively unlocks the callout
		 * structure, as we don't hold the new CPU's lock.
		 * Issue memory barrier to prevent accesses being
		 * reordered.
		 */
		membar_exit();
		c->c_cpu = cc;
	}
	mutex_spin_exit(lock);
}
/*
 * callout_stop:
 *
 *	Try to cancel a pending callout.  It may be too late: the callout
 *	could be running on another CPU.  If called from interrupt context,
 *	the callout could already be in progress at a lower priority.
 */
bool
callout_stop(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	struct callout_cpu *cc;
	kmutex_t *lock;
	bool expired;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);

	if ((c->c_flags & CALLOUT_PENDING) != 0)
		CIRCQ_REMOVE(&c->c_list);
	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);

	cc = c->c_cpu;
	if (cc->cc_active == c) {
		/*
		 * This is for non-MPSAFE callouts only.  To synchronize
		 * effectively we must be called with kernel_lock held.
		 * It's also taken in callout_softclock.
		 */
		cc->cc_cancel = c;
	}

	mutex_spin_exit(lock);

	return expired;
}
/*
 * callout_schedule:
 *
 *	Schedule a callout to run.  The function and argument must
 *	already be set in the callout structure.
 */
void
callout_schedule(callout_t *cs, int to_ticks)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	callout_schedule_locked(c, lock, to_ticks);
}
void
callout_ack(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	c->c_flags &= ~CALLOUT_INVOKING;
	mutex_spin_exit(lock);
}
void
callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT(func != NULL);

	lock = callout_lock(c);
	c->c_func = func;
	c->c_arg = arg;
	mutex_spin_exit(lock);
}
/*
 * callout_reset:
 *
 *	Reset a callout structure with a new function and argument, and
 *	schedule it to run.
 */
void
callout_reset(callout_t *cs, int to_ticks, void (*func)(void *), void *arg)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT(func != NULL);

	lock = callout_lock(c);
	c->c_func = func;
	c->c_arg = arg;
	callout_schedule_locked(c, lock, to_ticks);
}
bool
callout_invoking(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;
	bool rv;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
	mutex_spin_exit(lock);

	return rv;
}
bool
callout_active(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;
	bool rv;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
	mutex_spin_exit(lock);

	return rv;
}
Пример #10
0
/*
 * New interface; clients allocate their own callout structures.
 *
 * callout_reset() - establish or change a timeout
 * callout_stop() - disestablish a timeout
 * callout_init() - initialize a callout structure so that it can
 *	safely be passed to callout_reset() and callout_stop()
 *
 * <sys/callout.h> defines three convenience macros:
 *
 * callout_active() - returns truth if callout has not been stopped,
 *	drained, or deactivated since the last time the callout was
 *	reset.
 * callout_pending() - returns truth if callout is still waiting for timeout
 * callout_deactivate() - marks the callout as having been serviced
 */
int
callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
    void *arg, int cpu)
{
	struct callout_cpu *cc;
	int cancelled = 0;

	/*
	 * Don't allow migration of pre-allocated callouts lest they
	 * become unbalanced.
	 */
	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
		cpu = c->c_cpu;
	cc = callout_lock(c);
	if (cc->cc_curr == c) {
		/*
		 * We're being asked to reschedule a callout which is
		 * currently in progress.  If there is a lock then we
		 * can cancel the callout if it has not really started.
		 */
		if (c->c_lock != NULL && !cc->cc_cancel)
			cancelled = cc->cc_cancel = 1;
	}
	if (c->c_flags & CALLOUT_PENDING) {
		if (cc->cc_next == c) {
			cc->cc_next = BSD_TAILQ_NEXT(c, c_links.tqe);
		}
		BSD_TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
		    c_links.tqe);

		cancelled = 1;
		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
	}

	callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
	CC_UNLOCK(cc);

	return (cancelled);
}