/* * stpoll_handler is scheduled by sched_stpoll when appropriate, typically * once per polling systimer tick. */ static void stpoll_handler(netmsg_t msg) { struct stpoll_ctx *st_ctx = &stpoll_context; struct thread *td = curthread; int i; KKASSERT(&td->td_msgport == netisr_portfn(0)); crit_enter_quick(td); /* Reply ASAP */ lwkt_replymsg(&msg->lmsg, 0); if (st_ctx->poll_handlers == 0) { crit_exit_quick(td); return; } for (i = 0; i < st_ctx->poll_handlers; ++i) { const struct stpoll_rec *rec = &st_ctx->pr[i]; struct ifnet *ifp = rec->ifp; if (!lwkt_serialize_try(rec->serializer)) continue; if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == (IFF_RUNNING | IFF_NPOLLING)) rec->status_func(ifp, st_ctx->pollhz); lwkt_serialize_exit(rec->serializer); } crit_exit_quick(td); }
/* * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when * appropriate, typically once per polling systimer tick. * * Note that the message is replied immediately in order to allow a new * ISR to be scheduled in the handler. */ static void rxpoll_handler(netmsg_t msg) { struct iopoll_ctx *io_ctx; struct thread *td = curthread; int i, cycles; io_ctx = msg->lmsg.u.ms_resultp; KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); crit_enter_quick(td); /* Reply ASAP */ lwkt_replymsg(&msg->lmsg, 0); if (io_ctx->poll_handlers == 0) { crit_exit_quick(td); return; } io_ctx->phase = 3; if (io_ctx->residual_burst == 0) { /* First call in this tick */ ifpoll_time_get(&io_ctx->poll_start_t); io_ctx->residual_burst = io_ctx->poll_burst; } cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ? io_ctx->residual_burst : io_ctx->poll_each_burst; io_ctx->residual_burst -= cycles; for (i = 0; i < io_ctx->poll_handlers; i++) { const struct iopoll_rec *rec = &io_ctx->pr[i]; struct ifnet *ifp = rec->ifp; if (!lwkt_serialize_try(rec->serializer)) continue; if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == (IFF_RUNNING | IFF_NPOLLING)) rec->poll_func(ifp, rec->arg, cycles); lwkt_serialize_exit(rec->serializer); } /* * Do a quick exit/enter to catch any higher-priority * interrupt sources. */ crit_exit_quick(td); crit_enter_quick(td); sched_iopollmore(io_ctx); io_ctx->phase = 4; crit_exit_quick(td); }
static void txpoll_handler(netmsg_t msg) { struct iopoll_ctx *io_ctx; struct thread *td = curthread; int i; io_ctx = msg->lmsg.u.ms_resultp; KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid)); crit_enter_quick(td); /* Reply ASAP */ lwkt_replymsg(&msg->lmsg, 0); if (io_ctx->poll_handlers == 0) { crit_exit_quick(td); return; } io_ctx->phase = 3; for (i = 0; i < io_ctx->poll_handlers; i++) { const struct iopoll_rec *rec = &io_ctx->pr[i]; struct ifnet *ifp = rec->ifp; if (!lwkt_serialize_try(rec->serializer)) continue; if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == (IFF_RUNNING | IFF_NPOLLING)) rec->poll_func(ifp, rec->arg, -1); lwkt_serialize_exit(rec->serializer); } /* * Do a quick exit/enter to catch any higher-priority * interrupt sources. */ crit_exit_quick(td); crit_enter_quick(td); sched_iopollmore(io_ctx); io_ctx->phase = 4; crit_exit_quick(td); }