Esempio n. 1
0
void
callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
    void *arg, int cpuid)
{
	KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid));

#ifndef SMP
	callout_reset(c, to_ticks, ftn, arg);
#else
	if (cpuid == mycpuid) {
		callout_reset(c, to_ticks, ftn, arg);
	} else {
		struct globaldata *target_gd;
		struct callout_remote_arg rmt;
		int seq;

		rmt.c = c;
		rmt.ftn = ftn;
		rmt.arg = arg;
		rmt.to_ticks = to_ticks;

		target_gd = globaldata_find(cpuid);

		seq = lwkt_send_ipiq(target_gd, callout_reset_ipi, &rmt);
		lwkt_wait_ipiq(target_gd, seq);
	}
#endif
}
Esempio n. 2
0
void
dump_reactivate_cpus(void)
{
#ifdef SMP
	globaldata_t gd;
	int cpu, seq;
#endif

	dump_stop_usertds = 1;

	need_user_resched();

#ifdef SMP
	for (cpu = 0; cpu < ncpus; cpu++) {
		gd = globaldata_find(cpu);
		seq = lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
		lwkt_wait_ipiq(gd, seq);
	}

	restart_cpus(stopped_cpus);
#endif
}
Esempio n. 3
0
/*
 * Stop a running timer.  WARNING!  If called on a cpu other then the one
 * the callout was started on this function will liveloop on its IPI to
 * the target cpu to process the request.  It is possible for the callout
 * to execute in that case.
 *
 * WARNING! This function may be called from any cpu but the caller must
 * serialize callout_stop() and callout_reset() calls on the passed
 * structure regardless of cpu.
 *
 * WARNING! This routine may be called from an IPI
 *
 * WARNING! This function can return while it's c_func is still running
 *	    in the callout thread, a secondary check may be needed.
 *	    Use callout_stop_sync() to wait for any callout function to
 *	    complete before returning, being sure that no deadlock is
 *	    possible if you do.
 */
int
callout_stop(struct callout *c)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Don't attempt to delete a callout that's not on the queue.  The
	 * callout may not have a cpu assigned to it.  Callers do not have
	 * to be on the issuing cpu but must still serialize access to the
	 * callout structure.
	 *
	 * We are not cpu-localized here and cannot safely modify the
	 * flags field in the callout structure.  Note that most of the
	 * time CALLOUT_ACTIVE will be 0 if CALLOUT_PENDING is also 0.
	 *
	 * If we race another cpu's dispatch of this callout it is possible
	 * for CALLOUT_ACTIVE to be set with CALLOUT_PENDING unset.  This
	 * will cause us to fall through and synchronize with the other
	 * cpu.
	 */
	if ((c->c_flags & CALLOUT_PENDING) == 0) {
		if ((c->c_flags & CALLOUT_ACTIVE) == 0) {
			crit_exit_gd(gd);
			return (0);
		}
		if (c->c_gd == NULL || c->c_gd == gd) {
			c->c_flags &= ~CALLOUT_ACTIVE;
			crit_exit_gd(gd);
			return (0);
		}
	}
	if ((tgd = c->c_gd) != gd) {
		/*
		 * If the callout is owned by a different CPU we have to
		 * execute the function synchronously on the target cpu.
		 */
		int seq;

		cpu_ccfence();	/* don't let tgd alias c_gd */
		seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c);
		lwkt_wait_ipiq(tgd, seq);
	} else {
		/*
		 * If the callout is owned by the same CPU we can
		 * process it directly, but if we are racing our helper
		 * thread (sc->next), we have to adjust sc->next.  The
		 * race is interlocked by a critical section.
		 */
		sc = &softclock_pcpu_ary[gd->gd_cpuid];

		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
		if (sc->next == c)
			sc->next = TAILQ_NEXT(c, c_links.tqe);

		TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask], 
				c, c_links.tqe);
		c->c_func = NULL;
	}
	crit_exit_gd(gd);
	return (1);
}