コード例 #1
0
/*
 * lwkt_thread_replyport() - Backend to lwkt_replymsg()
 *
 * Called with the reply port as an argument but in the context of the
 * original target port.  Completion must occur on the target port's
 * cpu.
 *
 * The critical section protects us from IPIs on the this CPU.
 */
static
void
lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg)
{
    int flags;

    KKASSERT((msg->ms_flags & (MSGF_DONE|MSGF_QUEUED|MSGF_INTRANSIT)) == 0);

    if (msg->ms_flags & MSGF_SYNC) {
        /*
         * If a synchronous completion has been requested, just wakeup
         * the message without bothering to queue it to the target port.
         *
         * Assume the target thread is non-preemptive, so no critical
         * section is required.
         */
        if (port->mpu_td->td_gd == mycpu) {
            crit_enter();
            flags = msg->ms_flags;
            cpu_sfence();
            msg->ms_flags |= MSGF_DONE | MSGF_REPLY;
            if (port->mp_flags & MSGPORTF_WAITING)
                _lwkt_schedule_msg(port->mpu_td, flags);
            crit_exit();
        } else {
#ifdef INVARIANTS
            atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT);
#endif
            atomic_set_int(&msg->ms_flags, MSGF_REPLY);
            lwkt_send_ipiq(port->mpu_td->td_gd,
                           (ipifunc1_t)lwkt_thread_replyport_remote, msg);
        }
    } else {
        /*
         * If an asynchronous completion has been requested the message
         * must be queued to the reply port.
         *
         * A critical section is required to interlock the port queue.
         */
        if (port->mpu_td->td_gd == mycpu) {
            crit_enter();
            _lwkt_enqueue_reply(port, msg);
            if (port->mp_flags & MSGPORTF_WAITING)
                _lwkt_schedule_msg(port->mpu_td, msg->ms_flags);
            crit_exit();
        } else {
#ifdef INVARIANTS
            atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT);
#endif
            atomic_set_int(&msg->ms_flags, MSGF_REPLY);
            lwkt_send_ipiq(port->mpu_td->td_gd,
                           (ipifunc1_t)lwkt_thread_replyport_remote, msg);
        }
    }
}
コード例 #2
0
ファイル: kern_intr.c プロジェクト: madhavsuresh/DragonFlyBSD
static void
sched_ithd_intern(struct intr_info *info)
{
    ++info->i_count;
    if (info->i_state != ISTATE_NOTHREAD) {
	if (info->i_reclist == NULL) {
	    report_stray_interrupt(info, "sched_ithd");
	} else {
#ifdef SMP
	    if (info->i_thread.td_gd == mycpu) {
		if (info->i_running == 0) {
		    info->i_running = 1;
		    if (info->i_state != ISTATE_LIVELOCKED)
			lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
		}
	    } else {
		lwkt_send_ipiq(info->i_thread.td_gd, sched_ithd_remote, info);
	    }
#else
	    if (info->i_running == 0) {
		info->i_running = 1;
		if (info->i_state != ISTATE_LIVELOCKED)
		    lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */
	    }
#endif
	}
    } else {
	report_stray_interrupt(info, "sched_ithd");
    }
}
コード例 #3
0
static
int
lwkt_thread_putport(lwkt_port_t port, lwkt_msg_t msg)
{
    KKASSERT((msg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0);

    msg->ms_target_port = port;
    if (port->mpu_td->td_gd == mycpu) {
        crit_enter();
        _lwkt_pushmsg(port, msg);
        if (port->mp_flags & MSGPORTF_WAITING)
            _lwkt_schedule_msg(port->mpu_td, msg->ms_flags);
        crit_exit();
    } else {
#ifdef INVARIANTS
        /*
         * Cleanup.
         *
         * An atomic op is needed on ms_flags vs originator.  Also
         * note that the originator might be using a different type
         * of msgport.
         */
        atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT);
#endif
        lwkt_send_ipiq(port->mpu_td->td_gd,
                       (ipifunc1_t)lwkt_thread_putport_remote, msg);
    }
    return (EASYNC);
}
コード例 #4
0
/*
 * lwkt_thread_putport() - Backend to lwkt_beginmsg()
 *
 * Called with the target port as an argument but in the context of the
 * reply port.  This function always implements an asynchronous put to
 * the target message port, and thus returns EASYNC.
 *
 * The message must already have cleared MSGF_DONE and MSGF_REPLY
 */
static
void
lwkt_thread_putport_remote(lwkt_msg_t msg)
{
    lwkt_port_t port = msg->ms_target_port;

    /*
     * Chase any thread migration that occurs
     */
    if (port->mpu_td->td_gd != mycpu) {
        lwkt_send_ipiq(port->mpu_td->td_gd,
                       (ipifunc1_t)lwkt_thread_putport_remote, msg);
        return;
    }

    /*
     * An atomic op is needed on ms_flags vs originator.  Also
     * note that the originator might be using a different type
     * of msgport.
     */
#ifdef INVARIANTS
    KKASSERT(msg->ms_flags & MSGF_INTRANSIT);
    atomic_clear_int(&msg->ms_flags, MSGF_INTRANSIT);
#endif
    _lwkt_pushmsg(port, msg);
    if (port->mp_flags & MSGPORTF_WAITING)
        _lwkt_schedule_msg(port->mpu_td, msg->ms_flags);
}
コード例 #5
0
/*
 * This function completes reply processing for the default case in the
 * context of the originating cpu.
 */
static
void
lwkt_thread_replyport_remote(lwkt_msg_t msg)
{
    lwkt_port_t port = msg->ms_reply_port;
    int flags;

    /*
     * Chase any thread migration that occurs
     */
    if (port->mpu_td->td_gd != mycpu) {
        lwkt_send_ipiq(port->mpu_td->td_gd,
                       (ipifunc1_t)lwkt_thread_replyport_remote, msg);
        return;
    }

    /*
     * Cleanup (in critical section, IPI on same cpu, atomic op not needed)
     */
#ifdef INVARIANTS
    KKASSERT(msg->ms_flags & MSGF_INTRANSIT);
    msg->ms_flags &= ~MSGF_INTRANSIT;
#endif
    flags = msg->ms_flags;
    if (msg->ms_flags & MSGF_SYNC) {
        cpu_sfence();
        msg->ms_flags |= MSGF_REPLY | MSGF_DONE;
    } else {
        _lwkt_enqueue_reply(port, msg);
    }
    if (port->mp_flags & MSGPORTF_WAITING)
        _lwkt_schedule_msg(port->mpu_td, flags);
}
コード例 #6
0
ファイル: kern_timeout.c プロジェクト: varialus/DragonFlyX
void
callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
    void *arg, int cpuid)
{
	KASSERT(cpuid >= 0 && cpuid < ncpus, ("invalid cpuid %d", cpuid));

#ifndef SMP
	callout_reset(c, to_ticks, ftn, arg);
#else
	if (cpuid == mycpuid) {
		callout_reset(c, to_ticks, ftn, arg);
	} else {
		struct globaldata *target_gd;
		struct callout_remote_arg rmt;
		int seq;

		rmt.c = c;
		rmt.ftn = ftn;
		rmt.arg = arg;
		rmt.to_ticks = to_ticks;

		target_gd = globaldata_find(cpuid);

		seq = lwkt_send_ipiq(target_gd, callout_reset_ipi, &rmt);
		lwkt_wait_ipiq(target_gd, seq);
	}
#endif
}
コード例 #7
0
ファイル: uthread.c プロジェクト: AhmadTux/DragonFlyBSD
void
lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
		struct globaldata *gd)
{
    bzero(td, sizeof(struct thread));
    td->td_kstack = stack;
    td->td_kstack_size = stksize;
    td->td_flags |= flags;
    td->td_gd = gd;
    td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT;
    lwkt_initport(&td->td_msgport, td);
    cpu_init_thread(td);
    if (td == &gd->gd_idlethread) {
	TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
	/* idle thread is not counted in gd_num_threads */
    } else if (gd == mycpu) {
	crit_enter();
	TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
        ++gd->gd_num_threads;
	if (td->td_flags & TDF_SYSTHREAD)
	    ++gd->gd_sys_threads;
	crit_exit();
    } else {
	lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
    }
}
コード例 #8
0
ファイル: if_lgue.c プロジェクト: AhmadTux/DragonFlyBSD
/*
 * Schedule start call
 */
static void
lgue_start_schedule(struct ifnet *ifp)
{
#ifdef SMP
	int cpu;

	cpu = ifp->if_start_cpuid(ifp);
	if (cpu != mycpuid)
		lwkt_send_ipiq(globaldata_find(cpu), lgue_start_ipifunc, ifp);
	else
#endif
		lgue_start_ipifunc(ifp);
}
コード例 #9
0
ファイル: kern_systimer.c プロジェクト: AhmadTux/DragonFlyBSD
/*
 * MPSAFE
 */
void
systimer_add(systimer_t info)
{
    struct globaldata *gd = mycpu;

    KKASSERT((info->flags & SYSTF_ONQUEUE) == 0);
    crit_enter();
    if (info->gd == gd) {
	systimer_t scan1;
	systimer_t scan2;
	scan1 = TAILQ_FIRST(&gd->gd_systimerq);
	if (scan1 == NULL || (int)(scan1->time - info->time) > 0) {
	    cputimer_intr_reload(info->time - sys_cputimer->count());
	    TAILQ_INSERT_HEAD(&gd->gd_systimerq, info, node);
	} else {
	    scan2 = TAILQ_LAST(&gd->gd_systimerq, systimerq);
	    for (;;) {
		if (scan1 == NULL) {
		    TAILQ_INSERT_TAIL(&gd->gd_systimerq, info, node);
		    break;
		}
		if ((int)(scan1->time - info->time) > 0) {
		    TAILQ_INSERT_BEFORE(scan1, info, node);
		    break;
		}
		if ((int)(scan2->time - info->time) <= 0) {
		    TAILQ_INSERT_AFTER(&gd->gd_systimerq, scan2, info, node);
		    break;
		}
		scan1 = TAILQ_NEXT(scan1, node);
		scan2 = TAILQ_PREV(scan2, systimerq, node);
	    }
	}
	info->flags = (info->flags | SYSTF_ONQUEUE) & ~SYSTF_IPIRUNNING;
	info->queue = &gd->gd_systimerq;
    } else {
#ifdef SMP
	KKASSERT((info->flags & SYSTF_IPIRUNNING) == 0);
	info->flags |= SYSTF_IPIRUNNING;
	lwkt_send_ipiq(info->gd, (ipifunc1_t)systimer_add, info);
#else
	panic("systimer_add: bad gd in info %p", info);
#endif
    }
    crit_exit();
}
コード例 #10
0
void
dump_reactivate_cpus(void)
{
#ifdef SMP
	globaldata_t gd;
	int cpu, seq;
#endif

	dump_stop_usertds = 1;

	need_user_resched();

#ifdef SMP
	for (cpu = 0; cpu < ncpus; cpu++) {
		gd = globaldata_find(cpu);
		seq = lwkt_send_ipiq(gd, need_user_resched_remote, NULL);
		lwkt_wait_ipiq(gd, seq);
	}

	restart_cpus(stopped_cpus);
#endif
}
コード例 #11
0
/*
 * Stop a running timer.  WARNING!  If called on a cpu other then the one
 * the callout was started on this function will liveloop on its IPI to
 * the target cpu to process the request.  It is possible for the callout
 * to execute in that case.
 *
 * WARNING! This function may be called from any cpu but the caller must
 * serialize callout_stop() and callout_reset() calls on the passed
 * structure regardless of cpu.
 *
 * WARNING! This routine may be called from an IPI
 *
 * WARNING! This function can return while it's c_func is still running
 *	    in the callout thread, a secondary check may be needed.
 *	    Use callout_stop_sync() to wait for any callout function to
 *	    complete before returning, being sure that no deadlock is
 *	    possible if you do.
 */
int
callout_stop(struct callout *c)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Don't attempt to delete a callout that's not on the queue.  The
	 * callout may not have a cpu assigned to it.  Callers do not have
	 * to be on the issuing cpu but must still serialize access to the
	 * callout structure.
	 *
	 * We are not cpu-localized here and cannot safely modify the
	 * flags field in the callout structure.  Note that most of the
	 * time CALLOUT_ACTIVE will be 0 if CALLOUT_PENDING is also 0.
	 *
	 * If we race another cpu's dispatch of this callout it is possible
	 * for CALLOUT_ACTIVE to be set with CALLOUT_PENDING unset.  This
	 * will cause us to fall through and synchronize with the other
	 * cpu.
	 */
	if ((c->c_flags & CALLOUT_PENDING) == 0) {
		if ((c->c_flags & CALLOUT_ACTIVE) == 0) {
			crit_exit_gd(gd);
			return (0);
		}
		if (c->c_gd == NULL || c->c_gd == gd) {
			c->c_flags &= ~CALLOUT_ACTIVE;
			crit_exit_gd(gd);
			return (0);
		}
	}
	if ((tgd = c->c_gd) != gd) {
		/*
		 * If the callout is owned by a different CPU we have to
		 * execute the function synchronously on the target cpu.
		 */
		int seq;

		cpu_ccfence();	/* don't let tgd alias c_gd */
		seq = lwkt_send_ipiq(tgd, (void *)callout_stop, c);
		lwkt_wait_ipiq(tgd, seq);
	} else {
		/*
		 * If the callout is owned by the same CPU we can
		 * process it directly, but if we are racing our helper
		 * thread (sc->next), we have to adjust sc->next.  The
		 * race is interlocked by a critical section.
		 */
		sc = &softclock_pcpu_ary[gd->gd_cpuid];

		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
		if (sc->next == c)
			sc->next = TAILQ_NEXT(c, c_links.tqe);

		TAILQ_REMOVE(&sc->callwheel[c->c_time & callwheelmask], 
				c, c_links.tqe);
		c->c_func = NULL;
	}
	crit_exit_gd(gd);
	return (1);
}
コード例 #12
0
ファイル: kern_timeout.c プロジェクト: wan721/DragonFlyBSD
/*
 * Remote IPI for callout_reset_bycpu().  The operation is performed only
 * on the 1->0 transition of the counter, otherwise there are callout_stop()s
 * pending after us.
 *
 * The IPI counter and PENDING flags must be set atomically with the
 * 1->0 transition.  The ACTIVE flag was set prior to the ipi being
 * sent and we do not want to race a caller on the original cpu trying
 * to deactivate() the flag concurrent with our installation of the
 * callout.
 */
static void
callout_reset_ipi(void *arg)
{
	struct callout *c = arg;
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	int flags;
	int nflags;

	for (;;) {
		flags = c->c_flags;
		cpu_ccfence();
		KKASSERT((flags & CALLOUT_IPI_MASK) > 0);

		/*
		 * We should already be armed for our cpu, if armed to another
		 * cpu, chain the IPI.  If for some reason we are not armed,
		 * we can arm ourselves.
		 */
		if (flags & CALLOUT_ARMED) {
			if (CALLOUT_FLAGS_TO_CPU(flags) != gd->gd_cpuid) {
				tgd = globaldata_find(
						CALLOUT_FLAGS_TO_CPU(flags));
				lwkt_send_ipiq(tgd, callout_reset_ipi, c);
				return;
			}
			nflags = (flags & ~CALLOUT_EXECUTED);
		} else {
			nflags = (flags & ~(CALLOUT_CPU_MASK |
					    CALLOUT_EXECUTED)) |
				 CALLOUT_ARMED |
				 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid);
		}

		/*
		 * Decrement the IPI count, retain and clear the WAITING
		 * status, clear EXECUTED.
		 *
		 * NOTE: It is possible for the callout to already have been
		 *	 marked pending due to SMP races.
		 */
		nflags = nflags - 1;
		if ((flags & CALLOUT_IPI_MASK) == 1) {
			nflags &= ~(CALLOUT_WAITING | CALLOUT_EXECUTED);
			nflags |= CALLOUT_PENDING;
		}

		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			/*
			 * Only install the callout on the 1->0 transition
			 * of the IPI count, and only if PENDING was not
			 * already set.  The latter situation should never
			 * occur but we check anyway.
			 */
			if ((flags & (CALLOUT_PENDING|CALLOUT_IPI_MASK)) == 1) {
				softclock_pcpu_t sc;

				sc = &softclock_pcpu_ary[gd->gd_cpuid];
				c->c_time = sc->curticks + c->c_load;
				TAILQ_INSERT_TAIL(
					&sc->callwheel[c->c_time & cwheelmask],
					c, c_links.tqe);
			}
			break;
		}
		/* retry */
		cpu_pause();
	}

	/*
	 * Issue wakeup if requested.
	 */
	if (flags & CALLOUT_WAITING)
		wakeup(c);
}
コード例 #13
0
ファイル: kern_timeout.c プロジェクト: wan721/DragonFlyBSD
/*
 * Setup a callout to run on the specified cpu.  Should generally be used
 * to run a callout on a specific cpu which does not nominally change.
 */
void
callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
		    void *arg, int cpuid)
{
	globaldata_t gd;
	globaldata_t tgd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	crit_enter_gd(gd);

	tgd = globaldata_find(cpuid);

	/*
	 * Our cpu must temporarily gain ownership of the callout and cancel
	 * anything still running, which is complex.  The easiest way to do
	 * it is to issue a callout_stop().
	 *
	 * Clearing bits on flags (vs nflags) is a way to guarantee they were
	 * not previously set, by forcing the atomic op to fail.  The callout
	 * must not be pending or armed after the stop_sync, if it is we have
	 * to loop up and stop_sync() again.
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(tgd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_ACTIVE;
		nflags = nflags + 1;		/* bump IPI count */
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
		cpu_pause();
	}

	/*
	 * Even though we are not the cpu that now owns the callout, our
	 * bumping of the IPI count (and in a situation where the callout is
	 * not queued to the callwheel) will prevent anyone else from
	 * depending on or acting on the contents of the callout structure.
	 */
	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_load = to_ticks;	/* IPI will add curticks */

	lwkt_send_ipiq(tgd, callout_reset_ipi, c);
	crit_exit_gd(gd);
}