Exemple #1
0
/*
 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
 * once per polling systimer tick.
 */
static void
stpoll_handler(netmsg_t msg)
{
	struct stpoll_ctx *st_ctx = &stpoll_context;
	struct thread *td = curthread;
	int i;

	KKASSERT(&td->td_msgport == netisr_portfn(0));

	crit_enter_quick(td);

	/* Reply ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (st_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	for (i = 0; i < st_ctx->poll_handlers; ++i) {
		const struct stpoll_rec *rec = &st_ctx->pr[i];
		struct ifnet *ifp = rec->ifp;

		if (!lwkt_serialize_try(rec->serializer))
			continue;

		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
		    (IFF_RUNNING | IFF_NPOLLING))
			rec->status_func(ifp, st_ctx->pollhz);

		lwkt_serialize_exit(rec->serializer);
	}

	crit_exit_quick(td);
}
Exemple #2
0
/*
 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
 * appropriate, typically once per polling systimer tick.
 *
 * Note that the message is replied immediately in order to allow a new
 * ISR to be scheduled in the handler.
 */
static void
rxpoll_handler(netmsg_t msg)
{
	struct iopoll_ctx *io_ctx;
	struct thread *td = curthread;
	int i, cycles;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Reply ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 3;
	if (io_ctx->residual_burst == 0) {
		/* First call in this tick */
		ifpoll_time_get(&io_ctx->poll_start_t);
		io_ctx->residual_burst = io_ctx->poll_burst;
	}
	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
		 io_ctx->residual_burst : io_ctx->poll_each_burst;
	io_ctx->residual_burst -= cycles;

	for (i = 0; i < io_ctx->poll_handlers; i++) {
		const struct iopoll_rec *rec = &io_ctx->pr[i];
		struct ifnet *ifp = rec->ifp;

		if (!lwkt_serialize_try(rec->serializer))
			continue;

		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
		    (IFF_RUNNING | IFF_NPOLLING))
			rec->poll_func(ifp, rec->arg, cycles);

		lwkt_serialize_exit(rec->serializer);
	}

	/*
	 * Do a quick exit/enter to catch any higher-priority
	 * interrupt sources.
	 */
	crit_exit_quick(td);
	crit_enter_quick(td);

	sched_iopollmore(io_ctx);
	io_ctx->phase = 4;

	crit_exit_quick(td);
}
Exemple #3
0
/*
 * lwkt_thread_waitmsg()
 *
 *	Wait for a particular message to be replied.  We must be the only
 *	thread waiting on the message.  The port must be owned by the
 *	caller.
 */
static
int
lwkt_thread_waitmsg(lwkt_msg_t msg, int flags)
{
    thread_t td = curthread;

    KASSERT((msg->ms_flags & MSGF_DROPABLE) == 0,
            ("can't wait dropable message"));

    if ((msg->ms_flags & MSGF_DONE) == 0) {
        /*
         * If the done bit was not set we have to block until it is.
         */
        lwkt_port_t port = msg->ms_reply_port;
        int sentabort;

        KKASSERT(port->mpu_td == td);
        crit_enter_quick(td);
        sentabort = 0;

        while ((msg->ms_flags & MSGF_DONE) == 0) {
            port->mp_flags |= MSGPORTF_WAITING;	/* same cpu */
            if (sentabort == 0) {
                if ((sentabort = lwkt_sleep("waitmsg", flags)) != 0) {
                    lwkt_abortmsg(msg);
                }
            } else {
                lwkt_sleep("waitabt", 0);
            }
            port->mp_flags &= ~MSGPORTF_WAITING;
        }
        if (msg->ms_flags & MSGF_QUEUED)
            _lwkt_pullmsg(port, msg);
        crit_exit_quick(td);
    } else {
        /*
         * If the done bit was set we only have to mess around with the
         * message if it is queued on the reply port.
         */
        crit_enter_quick(td);
        if (msg->ms_flags & MSGF_QUEUED) {
            lwkt_port_t port = msg->ms_reply_port;
            thread_t td __debugvar = curthread;

            KKASSERT(port->mpu_td == td);
            _lwkt_pullmsg(port, msg);
        }
        crit_exit_quick(td);
    }
    return(msg->ms_error);
}
Exemple #4
0
static void
txpoll_handler(netmsg_t msg)
{
	struct iopoll_ctx *io_ctx;
	struct thread *td = curthread;
	int i;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Reply ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 3;

	for (i = 0; i < io_ctx->poll_handlers; i++) {
		const struct iopoll_rec *rec = &io_ctx->pr[i];
		struct ifnet *ifp = rec->ifp;

		if (!lwkt_serialize_try(rec->serializer))
			continue;

		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
		    (IFF_RUNNING | IFF_NPOLLING))
			rec->poll_func(ifp, rec->arg, -1);

		lwkt_serialize_exit(rec->serializer);
	}

	/*
	 * Do a quick exit/enter to catch any higher-priority
	 * interrupt sources.
	 */
	crit_exit_quick(td);
	crit_enter_quick(td);

	sched_iopollmore(io_ctx);
	io_ctx->phase = 4;

	crit_exit_quick(td);
}
Exemple #5
0
static void
txpollmore_handler(netmsg_t msg)
{
	struct thread *td = curthread;
	struct iopoll_ctx *io_ctx;
	uint32_t pending_polls;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Replay ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 5;

	io_ctx->pending_polls--;
	pending_polls = io_ctx->pending_polls;

	if (pending_polls == 0) {
		/* We are done */
		io_ctx->phase = 0;
	} else {
		/*
		 * Last cycle was long and caused us to miss one or more
		 * hardclock ticks.  Restart processing again.
		 */
		sched_iopoll(io_ctx);
		io_ctx->phase = 6;
	}

	crit_exit_quick(td);
}
Exemple #6
0
/*
 * lwkt_thread_getport()
 *
 *	Retrieve the next message from the port or NULL if no messages
 *	are ready.
 */
static
void *
lwkt_thread_getport(lwkt_port_t port)
{
    lwkt_msg_t msg;

    KKASSERT(port->mpu_td == curthread);

    crit_enter_quick(port->mpu_td);
    if ((msg = _lwkt_pollmsg(port)) != NULL)
        _lwkt_pullmsg(port, msg);
    crit_exit_quick(port->mpu_td);
    return(msg);
}
Exemple #7
0
/*
 * lwkt_thread_dropmsg() - Backend to lwkt_dropmsg()
 *
 * This function could _only_ be used when caller is in the same thread
 * as the message's target port owner thread.
 */
static int
lwkt_thread_dropmsg(lwkt_port_t port, lwkt_msg_t msg)
{
    int error;

    KASSERT(port->mpu_td == curthread,
            ("message could only be dropped in the same thread "
             "as the message target port thread"));
    crit_enter_quick(port->mpu_td);
    if ((msg->ms_flags & (MSGF_REPLY|MSGF_QUEUED)) == MSGF_QUEUED) {
        _lwkt_pullmsg(port, msg);
        atomic_set_int(&msg->ms_flags, MSGF_DONE);
        error = 0;
    } else {
        error = ENOENT;
    }
    crit_exit_quick(port->mpu_td);

    return (error);
}
Exemple #8
0
/*
 * Install the TLS.
 *
 * It shouldn't be possible for a preemptive thread switch to do anything
 * more than set gd_user_fs and wrmsr for us.  Even though there is a window
 * where gd_user_fs/gd_user_gs do not match the MSRs no preemptive thread
 * switch should ever switch to any heavy weight thread other than our own.
 *
 * Still, use a critical section to be safe.
 *
 * MPSAFE
 */
void
set_user_TLS(void)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;

	crit_enter_quick(td);
	td->td_pcb->pcb_fsbase = (register_t)td->td_tls.info[0].base;
	td->td_pcb->pcb_gsbase = (register_t)td->td_tls.info[1].base;
	if (gd->gd_user_fs != td->td_pcb->pcb_fsbase) {
		gd->gd_user_fs = td->td_pcb->pcb_fsbase;
		wrmsr(MSR_FSBASE, gd->gd_user_fs);
	}
	if (gd->gd_user_gs != td->td_pcb->pcb_gsbase) {
		gd->gd_user_gs = td->td_pcb->pcb_gsbase;
		wrmsr(MSR_KGSBASE, gd->gd_user_gs);
	}
	clear_quickret();
	crit_exit_quick(td);
}
Exemple #9
0
/*
 * lwkt_thread_waitport()
 *
 *	Wait for a new message to be available on the port.  We must be the
 *	the only thread waiting on the port.  The port must be owned by caller.
 */
static
void *
lwkt_thread_waitport(lwkt_port_t port, int flags)
{
    thread_t td = curthread;
    lwkt_msg_t msg;
    int error;

    KKASSERT(port->mpu_td == td);
    crit_enter_quick(td);
    while ((msg = _lwkt_pollmsg(port)) == NULL) {
        port->mp_flags |= MSGPORTF_WAITING;
        error = lwkt_sleep("waitport", flags);
        port->mp_flags &= ~MSGPORTF_WAITING;
        if (error)
            goto done;
    }
    _lwkt_pullmsg(port, msg);
done:
    crit_exit_quick(td);
    return(msg);
}
Exemple #10
0
/*
 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
 * adapting the burst size for the next cycle.
 *
 * It is very bad to fetch large bursts of packets from a single card at once,
 * because the burst could take a long time to be completely processed leading
 * to unfairness.  To reduce the problem, and also to account better for time
 * spent in network-related processing, we split the burst in smaller chunks
 * of fixed size, giving control to the other netisr's between chunks.  This
 * helps in improving the fairness, reducing livelock and accounting for the
 * work performed in low level handling.
 */
static void
rxpollmore_handler(netmsg_t msg)
{
	struct thread *td = curthread;
	struct iopoll_ctx *io_ctx;
	union ifpoll_time t;
	int kern_load;
	uint32_t pending_polls;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Replay ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 5;
	if (io_ctx->residual_burst > 0) {
		sched_iopoll(io_ctx);
		crit_exit_quick(td);
		/* Will run immediately on return, followed by netisrs */
		return;
	}

	/* Here we can account time spent in iopoll's in this tick */
	ifpoll_time_get(&t);
	kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
	io_ctx->kern_frac = kern_load;

	if (kern_load > (100 - io_ctx->user_frac)) {
		/* Try decrease ticks */
		if (io_ctx->poll_burst > 1)
			io_ctx->poll_burst--;
	} else {
		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
			io_ctx->poll_burst++;
	}

	io_ctx->pending_polls--;
	pending_polls = io_ctx->pending_polls;

	if (pending_polls == 0) {
		/* We are done */
		io_ctx->phase = 0;
	} else {
		/*
		 * Last cycle was long and caused us to miss one or more
		 * hardclock ticks.  Restart processing again, but slightly
		 * reduce the burst size to prevent that this happens again.
		 */
		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
		if (io_ctx->poll_burst < 1)
			io_ctx->poll_burst = 1;
		sched_iopoll(io_ctx);
		io_ctx->phase = 6;
	}

	crit_exit_quick(td);
}