Пример #1
0
/*
 * Start or restart a timeout.  Installs the callout structure on the
 * callwheel.  Callers may legally pass any value, even if 0 or negative,
 * but since the sc->curticks index may have already been processed a
 * minimum timeout of 1 tick will be enforced.
 *
 * This function will block if the callout is currently queued to a different
 * cpu or the callback is currently running in another thread.
 */
void
callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
{
	softclock_pcpu_t sc;
	globaldata_t gd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	sc = &softclock_pcpu_ary[gd->gd_cpuid];
	crit_enter_gd(gd);

	/*
	 * Our cpu must gain ownership of the callout and cancel anything
	 * still running, which is complex.  The easiest way to do it is to
	 * issue a callout_stop().
	 *
	 * Clearing bits on flags is a way to guarantee they are not set,
	 * as the cmpset atomic op will fail otherwise.  PENDING and ARMED
	 * must not be set, if we find them set we loop up and call
	 * stop_sync() again.
	 *
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_PENDING |
			 CALLOUT_ACTIVE;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
	}


	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_time = sc->curticks + to_ticks;

	TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & cwheelmask],
			  c, c_links.tqe);
	crit_exit_gd(gd);
}
Пример #2
0
/*
 * Start or restart a timeout.  Install the callout structure in the 
 * callwheel.  Callers may legally pass any value, even if 0 or negative,
 * but since the sc->curticks index may have already been processed a
 * minimum timeout of 1 tick will be enforced.
 *
 * The callout is installed on and will be processed on the current cpu's
 * callout wheel.
 *
 * WARNING! This function may be called from any cpu but the caller must
 * serialize callout_stop() and callout_reset() calls on the passed
 * structure regardless of cpu.
 */
void
callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), 
		void *arg)
{
	softclock_pcpu_t sc;
	globaldata_t gd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	sc = &softclock_pcpu_ary[gd->gd_cpuid];
	crit_enter_gd(gd);

	if (c->c_flags & CALLOUT_ACTIVE)
		callout_stop(c);

	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
	c->c_func = ftn;
	c->c_time = sc->curticks + to_ticks;
#ifdef SMP
	c->c_gd = gd;
#endif

	TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & callwheelmask], 
			  c, c_links.tqe);
	crit_exit_gd(gd);
}
Пример #3
0
/*
 * Stop a running timer.  WARNING!  If called on a cpu other then the one
 * the callout was started on this function will liveloop on its IPI to
 * the target cpu to process the request.  It is possible for the callout
 * to execute in that case.
 *
 * WARNING! This function may be called from any cpu but the caller must
 * serialize callout_stop() and callout_reset() calls on the passed
 * structure regardless of cpu.
 *
 * WARNING! This routine may be called from an IPI
 *
 * WARNING! This function can return while it's c_func is still running
 *	    in the callout thread, a secondary check may be needed.
 *	    Use callout_stop_sync() to wait for any callout function to
 *	    complete before returning, being sure that no deadlock is
 *	    possible if you do.
 */
int
callout_stop(struct callout *c)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Don't attempt to delete a callout that's not on the queue.  The
	 * callout may not have a cpu assigned to it.  Callers do not have
	 * to be on the issuing cpu but must still serialize access to the
	 * callout structure.
	 *
	 * We are not cpu-localized here and cannot safely modify the
	 * flags field in the callout structure.  Note that most of the
	 * time CALLOUT_ACTIVE will be 0 if CALLOUT_PENDING is also 0.
	 *
	 * If we race another cpu's dispatch of this callout it is possible
	 * for CALLOUT_ACTIVE to be set with CALLOUT_PENDING unset.  This
	 * will cause us to fall through and synchronize with the other
	 * cpu.
	 */
	if ((c->c_flags & CALLOUT_PENDING) == 0) {
		if ((c->c_flags & CALLOUT_ACTIVE) == 0) {
			crit_exit_gd(gd);
			return (0);
		}
		if (c->c_gd == NULL || c->c_gd == gd) {
			c->c_flags &= ~CALLOUT_ACTIVE;
			crit_exit_gd(gd);
			return (0);
		}
	}
	if ((tgd = c->c_gd) != gd) {
		/*
		 * If the callout is owned by a different CPU we have to
		 * execute the function synchronously on the target cpu.
		 */
		int seq;

		cpu_ccfence();	/* don't let tgd alias c_gd */
		seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c);
		lwkt_wait_ipiq(tgd, seq);
	} else {
		/*
		 * If the callout is owned by the same CPU we can
		 * process it directly, but if we are racing our helper
		 * thread (sc->next), we have to adjust sc->next.  The
		 * race is interlocked by a critical section.
		 */
		sc = &softclock_pcpu_ary[gd->gd_cpuid];

		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
		if (sc->next == c)
			sc->next = TAILQ_NEXT(c, c_links.tqe);

		TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask], 
				c, c_links.tqe);
		c->c_func = NULL;
	}
	crit_exit_gd(gd);
	return (1);
}
Пример #4
0
/*
 * Stop a running timer and ensure that any running callout completes before
 * returning.  If the timer is running on another cpu this function may block
 * to interlock against the callout.  If the callout is currently executing
 * or blocked in another thread this function may also block to interlock
 * against the callout.
 *
 * The caller must be careful to avoid deadlocks, either by using
 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
 * by using tokens and dealing with breaks in the serialization, or using
 * the lockmgr lock cancelation feature yourself in the callout callback
 * function.
 *
 * callout_stop() returns non-zero if the callout was pending.
 */
static int
_callout_stop(struct callout *c, int issync)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;
	int flags;
	int nflags;
	int rc;
	int cpuid;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Fast path operations:
	 *
	 * If ARMED and owned by our cpu, or not ARMED, and other simple
	 * conditions are met, we can just clear ACTIVE and EXECUTED
	 * and we are done.
	 */
	for (;;) {
		flags = c->c_flags;
		cpu_ccfence();

		cpuid = CALLOUT_FLAGS_TO_CPU(flags);

		/*
		 * Can't handle an armed callout in the fast path if it is
		 * not on the current cpu.  We must atomically increment the
		 * IPI count for the IPI we intend to send and break out of
		 * the fast path to enter the slow path.
		 */
		if (flags & CALLOUT_ARMED) {
			if (gd->gd_cpuid != cpuid) {
				nflags = flags + 1;
				if (atomic_cmpset_int(&c->c_flags,
						      flags, nflags)) {
					/* break to slow path */
					break;
				}
				continue;	/* retry */
			}
		} else {
			cpuid = gd->gd_cpuid;
			KKASSERT((flags & CALLOUT_IPI_MASK) == 0);
			KKASSERT((flags & CALLOUT_PENDING) == 0);
		}

		/*
		 * Process pending IPIs and retry (only if not called from
		 * an IPI).
		 */
		if (flags & CALLOUT_IPI_MASK) {
			lwkt_process_ipiq();
			continue;	/* retry */
		}

		/*
		 * Transition to the stopped state, recover the EXECUTED
		 * status.  If pending we cannot clear ARMED until after
		 * we have removed (c) from the callwheel.
		 *
		 * NOTE: The callout might already not be armed but in this
		 *	 case it should also not be pending.
		 */
		nflags = flags & ~(CALLOUT_ACTIVE |
				   CALLOUT_EXECUTED |
				   CALLOUT_WAITING |
				   CALLOUT_PENDING);

		/* NOTE: IPI_MASK already tested */
		if ((flags & CALLOUT_PENDING) == 0)
			nflags &= ~CALLOUT_ARMED;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			/*
			 * Can only remove from callwheel if currently
			 * pending.
			 */
			if (flags & CALLOUT_PENDING) {
				sc = &softclock_pcpu_ary[gd->gd_cpuid];
				if (sc->next == c)
					sc->next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(
					&sc->callwheel[c->c_time & cwheelmask],
					c,
					c_links.tqe);
				c->c_func = NULL;

				/*
				 * NOTE: Can't clear ARMED until we have
				 *	 physically removed (c) from the
				 *	 callwheel.
				 *
				 * NOTE: WAITING bit race exists when doing
				 *	 unconditional bit clears.
				 */
				callout_maybe_clear_armed(c);
				if (c->c_flags & CALLOUT_WAITING)
					flags |= CALLOUT_WAITING;
			}

			/*
			 * ARMED has been cleared at this point and (c)
			 * might now be stale.  Only good for wakeup()s.
			 */
			if (flags & CALLOUT_WAITING)
				wakeup(c);

			goto skip_slow;
		}
		/* retry */
	}

	/*
	 * Slow path (and not called via an IPI).
	 *
	 * When ARMED to a different cpu the stop must be processed on that
	 * cpu.  Issue the IPI and wait for completion.  We have already
	 * incremented the IPI count.
	 */
	tgd = globaldata_find(cpuid);
	lwkt_send_ipiq3(tgd, callout_stop_ipi, c, issync);

	for (;;) {
		int flags;
		int nflags;

		flags = c->c_flags;
		cpu_ccfence();
		if ((flags & CALLOUT_IPI_MASK) == 0)	/* fast path */
			break;
		nflags = flags | CALLOUT_WAITING;
		tsleep_interlock(c, 0);
		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			tsleep(c, PINTERLOCKED, "cstp1", 0);
		}
	}

skip_slow:

	/*
	 * If (issync) we must also wait for any in-progress callbacks to
	 * complete, unless the stop is being executed from the callback
	 * itself.  The EXECUTED flag is set prior to the callback
	 * being made so our existing flags status already has it.
	 *
	 * If auto-lock mode is being used, this is where we cancel any
	 * blocked lock that is potentially preventing the target cpu
	 * from completing the callback.
	 */
	while (issync) {
		intptr_t *runp;
		intptr_t runco;

		sc = &softclock_pcpu_ary[cpuid];
		if (gd->gd_curthread == &sc->thread)	/* stop from cb */
			break;
		runp = &sc->running;
		runco = *runp;
		cpu_ccfence();
		if ((runco & ~(intptr_t)1) != (intptr_t)c)
			break;
		if (c->c_flags & CALLOUT_AUTOLOCK)
			lockmgr(c->c_lk, LK_CANCEL_BEG);
		tsleep_interlock(c, 0);
		if (atomic_cmpset_long(runp, runco, runco | 1))
			tsleep(c, PINTERLOCKED, "cstp3", 0);
		if (c->c_flags & CALLOUT_AUTOLOCK)
			lockmgr(c->c_lk, LK_CANCEL_END);
	}

	crit_exit_gd(gd);
	rc = (flags & CALLOUT_EXECUTED) != 0;

	return rc;
}
Пример #5
0
/*
 * Setup a callout to run on the specified cpu.  Should generally be used
 * to run a callout on a specific cpu which does not nominally change.
 */
void
callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
		    void *arg, int cpuid)
{
	globaldata_t gd;
	globaldata_t tgd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	crit_enter_gd(gd);

	tgd = globaldata_find(cpuid);

	/*
	 * Our cpu must temporarily gain ownership of the callout and cancel
	 * anything still running, which is complex.  The easiest way to do
	 * it is to issue a callout_stop().
	 *
	 * Clearing bits on flags (vs nflags) is a way to guarantee they were
	 * not previously set, by forcing the atomic op to fail.  The callout
	 * must not be pending or armed after the stop_sync, if it is we have
	 * to loop up and stop_sync() again.
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(tgd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_ACTIVE;
		nflags = nflags + 1;		/* bump IPI count */
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
		cpu_pause();
	}

	/*
	 * Even though we are not the cpu that now owns the callout, our
	 * bumping of the IPI count (and in a situation where the callout is
	 * not queued to the callwheel) will prevent anyone else from
	 * depending on or acting on the contents of the callout structure.
	 */
	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_load = to_ticks;	/* IPI will add curticks */

	lwkt_send_ipiq(tgd, callout_reset_ipi, c);
	crit_exit_gd(gd);
}