static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) { struct netmsg_acpi_cst msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, acpi_cst_cx_reprobe_cst_handler); msg.sc = sc; return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); }
static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) { struct netmsg_acpi_cst msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, acpi_cst_c3_bm_rld_handler); msg.sc = sc; lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); }
int so_pru_accept(struct socket *so, struct sockaddr **nam) { struct netmsg_pru_accept msg; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_accept); msg.nm_nam = nam; return lwkt_domsg(so->so_port, &msg.base.lmsg, 0); }
/* * Prepare to accept connections. */ static void tcp_usr_listen(netmsg_t msg) { struct socket *so = msg->listen.base.nm_so; struct thread *td = msg->listen.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; #ifdef SMP struct netmsg_inswildcard nm; #endif COMMON_START(so, inp, 0); if (tp->t_flags & TF_LISTEN) goto out; if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, td); if (error) goto out; } tp->t_state = TCPS_LISTEN; tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ #ifdef SMP if (ncpus > 1) { /* * We have to set the flag because we can't have other cpus * messing with our inp's flags. */ KASSERT(!(inp->inp_flags & INP_CONNECTED), ("already on connhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD), ("already on wildcardhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD_MP), ("already on MP wildcardhash\n")); inp->inp_flags |= INP_WILDCARD_MP; KKASSERT(so->so_port == cpu_portfn(0)); KKASSERT(&curthread->td_msgport == cpu_portfn(0)); KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, in_pcbinswildcardhash_handler); nm.nm_inp = inp; lwkt_domsg(cpu_portfn(1), &nm.base.lmsg, 0); } #endif in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); }
int so_pru_shutdown(struct socket *so) { struct netmsg_pru_shutdown msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_shutdown); error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_listen(struct socket *so, struct thread *td) { struct netmsg_pru_listen msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_listen); msg.nm_td = td; /* used only for prison_ip() XXX JH */ error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_sense(struct socket *so, struct stat *sb) { struct netmsg_pru_sense msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_sense); msg.nm_stat = sb; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_sockaddr(struct socket *so, struct sockaddr **nam) { struct netmsg_pru_sockaddr msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_sockaddr); msg.nm_nam = nam; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * Called by doio when trying to abort a netmsg_so_notify message. * Unlike the other functions this one is dispatched directly by * the LWKT subsystem, so it takes a lwkt_msg_t as an argument. * * The original message, lmsg, is under the control of the caller and * will not be destroyed until we return so we can safely reference it * in our synchronous abort request. * * This part of the abort request occurs on the originating cpu which * means we may race the message flags and the original message may * not even have been processed by the target cpu yet. */ void netmsg_so_notify_doabort(lwkt_msg_t lmsg) { struct netmsg_so_notify_abort msg; if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, netmsg_so_notify_abort); msg.nm_notifymsg = (void *)lmsg; lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0); } }
int so_pr_ctloutput(struct socket *so, struct sockopt *sopt) { struct netmsg_pr_ctloutput msg; int error; KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_ctloutput); msg.nm_sopt = sopt; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * NOTE: If the target port changes the bind operation will deal with it. */ int so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { struct netmsg_pru_bind msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_bind); msg.nm_nam = nam; msg.nm_td = td; /* used only for prison_ip() */ error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai) { struct netmsg_pru_attach msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_attach); msg.nm_proto = proto; msg.nm_ai = ai; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags) { struct netmsg_pru_rcvoob msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_rcvoob); msg.nm_m = m; msg.nm_flags = flags; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_connect2(struct socket *so1, struct socket *so2) { struct netmsg_pru_connect2 msg; int error; netmsg_init(&msg.base, so1, &curthread->td_msgport, 0, so1->so_proto->pr_usrreqs->pru_connect2); msg.nm_so1 = so1; msg.nm_so2 = so2; error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0); return (error); }
int ifpoll_register(struct ifnet *ifp) { struct ifpoll_info *info; struct netmsg_base nmsg; int error; if (ifp->if_npoll == NULL) { /* Device does not support polling */ return EOPNOTSUPP; } info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO); /* * Attempt to register. Interlock with IFF_NPOLLING. */ ifnet_serialize_all(ifp); if (ifp->if_flags & IFF_NPOLLING) { /* Already polling */ ifnet_deserialize_all(ifp); kfree(info, M_TEMP); return EBUSY; } info->ifpi_ifp = ifp; ifp->if_flags |= IFF_NPOLLING; ifp->if_npoll(ifp, info); KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid")); ifnet_deserialize_all(ifp); netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, ifpoll_register_handler); nmsg.lmsg.u.ms_resultp = info; error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); if (error) { if (!ifpoll_deregister(ifp)) { if_printf(ifp, "ifpoll_register: " "ifpoll_deregister failed!\n"); } } kfree(info, M_TEMP); return error; }
int so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { struct netmsg_pru_connect msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_connect); msg.nm_nam = nam; msg.nm_td = td; msg.nm_m = NULL; msg.nm_flags = 0; msg.nm_reconnect = 0; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * NOTE: If the target port changes the implied connect will deal with it. */ int so_pru_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { struct netmsg_pru_send msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_send); msg.nm_flags = flags; msg.nm_m = m; msg.nm_addr = addr; msg.nm_control = control; msg.nm_td = td; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * Protocol control input, typically via icmp. * * If the protocol pr_ctlport is not NULL we call it to figure out the * protocol port. If NULL is returned we can just return, otherwise * we issue a netmsg to call pr_ctlinput in the proper thread. * * This must be done synchronously as arg and/or extra may point to * temporary data. */ void so_pr_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra) { struct netmsg_pr_ctlinput msg; lwkt_port_t port; int cpuid; port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid); if (port == NULL) return; netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, pr->pr_ctlinput); msg.nm_cmd = cmd; msg.nm_direct = 0; msg.nm_arg = arg; msg.nm_extra = extra; lwkt_domsg(port, &msg.base.lmsg, 0); }
static void tcp6_usr_listen(netmsg_t msg) { struct socket *so = msg->listen.base.nm_so; struct thread *td = msg->listen.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; struct netmsg_inswildcard nm; COMMON_START(so, inp, 0); if (tp->t_flags & TF_LISTEN) goto out; if (inp->inp_lport == 0) { error = in6_pcbbind(inp, NULL, td); if (error) goto out; } tp->t_state = TCPS_LISTEN; tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ if (ncpus2 > 1) { /* * Put this inpcb into wildcard hash on other cpus. */ KKASSERT(so->so_port == netisr_cpuport(0)); ASSERT_IN_NETISR(0); KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); ASSERT_INP_NOTINHASH(inp); netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, in_pcbinswildcardhash_handler); nm.nm_inp = inp; lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0); } in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); }
static int sysctl_eachburst(SYSCTL_HANDLER_ARGS) { struct iopoll_ctx *io_ctx = arg1; struct iopoll_sysctl_netmsg msg; uint32_t each_burst; int error; each_burst = io_ctx->poll_each_burst; error = sysctl_handle_int(oidp, &each_burst, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, sysctl_eachburst_handler); msg.base.lmsg.u.ms_result = each_burst; msg.ctx = io_ctx; return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); }
int ip_fw_sockopt(struct sockopt *sopt) { struct netmsg_base smsg; /* * Disallow modifications in really-really secure mode, but still allow * the logging counters to be reset. */ if (sopt->sopt_name == IP_FW_ADD || (sopt->sopt_dir == SOPT_SET && sopt->sopt_name != IP_FW_RESETLOG)) { if (securelevel >= 3) return EPERM; } netmsg_init(&smsg, NULL, &curthread->td_msgport, 0, ip_fw_sockopt_dispatch); smsg.lmsg.u.ms_resultp = sopt; return lwkt_domsg(IPFW_CFGPORT, &smsg.lmsg, 0); }
/* * Master enable. */ static int sysctl_polling(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; int error, enabled; enabled = pctx->polling_enabled; error = sysctl_handle_int(oidp, &enabled, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_polling); msg.lmsg.u.ms_result = enabled; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static int sysctl_regfrac(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; uint32_t reg_frac; int error; reg_frac = pctx->reg_frac; error = sysctl_handle_int(oidp, ®_frac, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_regfrac); msg.lmsg.u.ms_result = reg_frac; port = netisr_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static int sysctl_eachburst(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; uint32_t each_burst; int error; each_burst = pctx->poll_each_burst; error = sysctl_handle_int(oidp, &each_burst, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_eachburst); msg.lmsg.u.ms_result = each_burst; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static int sysctl_burstmax(SYSCTL_HANDLER_ARGS) { struct iopoll_ctx *io_ctx = arg1; struct iopoll_sysctl_netmsg msg; uint32_t burst_max; int error; burst_max = io_ctx->poll_burst_max; error = sysctl_handle_int(oidp, &burst_max, 0, req); if (error || req->newptr == NULL) return error; if (burst_max < MIN_IOPOLL_BURST_MAX) burst_max = MIN_IOPOLL_BURST_MAX; else if (burst_max > MAX_IOPOLL_BURST_MAX) burst_max = MAX_IOPOLL_BURST_MAX; netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, sysctl_burstmax_handler); msg.base.lmsg.u.ms_result = burst_max; msg.ctx = io_ctx; return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); }
/* * Prepare to accept connections. */ static void tcp_usr_listen(netmsg_t msg) { struct socket *so = msg->listen.base.nm_so; struct thread *td = msg->listen.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; struct netmsg_inswildcard nm; lwkt_port_t port0 = netisr_cpuport(0); COMMON_START(so, inp, 0); if (&curthread->td_msgport != port0) { lwkt_msg_t lmsg = &msg->listen.base.lmsg; KASSERT((msg->listen.nm_flags & PRUL_RELINK) == 0, ("already asked to relink")); in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]); msg->listen.nm_flags |= PRUL_RELINK; /* See the related comment in tcp_connect() */ lwkt_setmsg_receipt(lmsg, tcp_sosetport); lwkt_forwardmsg(port0, lmsg); /* msg invalid now */ return; } KASSERT(so->so_port == port0, ("so_port is not netisr0")); if (msg->listen.nm_flags & PRUL_RELINK) { msg->listen.nm_flags &= ~PRUL_RELINK; in_pcblink(so->so_pcb, &tcbinfo[mycpuid]); } KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0")); if (tp->t_flags & TF_LISTEN) goto out; if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, td); if (error) goto out; } tp->t_state = TCPS_LISTEN; tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ if (ncpus2 > 1) { /* * Put this inpcb into wildcard hash on other cpus. */ ASSERT_INP_NOTINHASH(inp); netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, in_pcbinswildcardhash_handler); nm.nm_inp = inp; lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0); } in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); }