int so_pr_ctloutput(struct socket *so, struct sockopt *sopt) { struct netmsg_pr_ctloutput msg; int error; KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); if (sopt->sopt_dir == SOPT_SET && so->so_proto->pr_ctloutmsg != NULL) { struct netmsg_pr_ctloutput *amsg; /* Fast path: asynchronous pr_ctloutput */ amsg = so->so_proto->pr_ctloutmsg(sopt); if (amsg != NULL) { netmsg_init(&amsg->base, so, &netisr_afree_rport, 0, so->so_proto->pr_ctloutput); /* nm_flags and nm_sopt are setup by pr_ctloutmsg */ lwkt_sendmsg(so->so_port, &amsg->base.lmsg); return 0; } /* FALLTHROUGH */ } netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_ctloutput); msg.nm_flags = 0; msg.nm_sopt = sopt; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int ifpoll_deregister(struct ifnet *ifp) { struct netmsg_base nmsg; int error; if (ifp->if_npoll == NULL) return EOPNOTSUPP; ifnet_serialize_all(ifp); if ((ifp->if_flags & IFF_NPOLLING) == 0) { ifnet_deserialize_all(ifp); return EINVAL; } ifp->if_flags &= ~IFF_NPOLLING; ifnet_deserialize_all(ifp); netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, ifpoll_deregister_handler); nmsg.lmsg.u.ms_resultp = ifp; error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); if (!error) { ifnet_serialize_all(ifp); ifp->if_npoll(ifp, NULL); KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid")); ifnet_deserialize_all(ifp); } return error; }
static int sysctl_burstmax(SYSCTL_HANDLER_ARGS) { struct iopoll_ctx *io_ctx = arg1; struct iopoll_sysctl_netmsg msg; uint32_t burst_max; int error; burst_max = io_ctx->poll_burst_max; error = sysctl_handle_int(oidp, &burst_max, 0, req); if (error || req->newptr == NULL) return error; if (burst_max < MIN_IOPOLL_BURST_MAX) burst_max = MIN_IOPOLL_BURST_MAX; else if (burst_max > MAX_IOPOLL_BURST_MAX) burst_max = MAX_IOPOLL_BURST_MAX; netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, sysctl_burstmax_handler); msg.base.lmsg.u.ms_result = burst_max; msg.ctx = io_ctx; return lwkt_domsg(netisr_cpuport(io_ctx->poll_cpuid), &msg.base.lmsg, 0); }
static int sysctl_burstmax(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; uint32_t burst_max; int error; burst_max = pctx->poll_burst_max; error = sysctl_handle_int(oidp, &burst_max, 0, req); if (error || req->newptr == NULL) return error; if (burst_max < MIN_POLL_BURST_MAX) burst_max = MIN_POLL_BURST_MAX; else if (burst_max > MAX_POLL_BURST_MAX) burst_max = MAX_POLL_BURST_MAX; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_burstmax); msg.lmsg.u.ms_result = burst_max; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
int so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai) { struct netmsg_pru_attach *msg; int error; error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai); if (error) return error; msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK); if (msg == NULL) { /* * Fail to allocate message; fallback to * synchronized pru_attach. */ return so_pru_attach(so, proto, NULL /* postattach */); } netmsg_init(&msg->base, so, &netisr_afree_rport, 0, so->so_proto->pr_usrreqs->pru_attach); msg->nm_proto = proto; msg->nm_ai = NULL; /* postattach */ lwkt_sendmsg(so->so_port, &msg->base.lmsg); return 0; }
void so_pru_send_async(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr0, struct mbuf *control, struct thread *td) { struct netmsg_pru_send *msg; struct sockaddr *addr = NULL; KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND, ("async pru_send is not supported")); flags |= PRUS_NOREPLY; if (addr0 != NULL) { addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK); memcpy(addr, addr0, addr0->sa_len); flags |= PRUS_FREEADDR; } msg = &m->m_hdr.mh_sndmsg; netmsg_init(&msg->base, so, &netisr_apanic_rport, 0, so->so_proto->pr_usrreqs->pru_send); msg->nm_flags = flags; msg->nm_m = m; msg->nm_addr = addr; msg->nm_control = control; msg->nm_td = td; lwkt_sendmsg(so->so_port, &msg->base.lmsg); }
/* * Set the polling frequency */ static int sysctl_pollhz(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; int error, phz; phz = pctx->pollhz; error = sysctl_handle_int(oidp, &phz, 0, req); if (error || req->newptr == NULL) return error; if (phz <= 0) return EINVAL; else if (phz > DEVICE_POLLING_FREQ_MAX) phz = DEVICE_POLLING_FREQ_MAX; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_pollhz); msg.lmsg.u.ms_result = phz; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
void so_pr_ctlinput_direct(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra) { struct netmsg_pr_ctlinput msg; netisr_fn_t func; lwkt_port_t port; int cpuid; port = so_pr_ctlport(pr, cmd, arg, extra, &cpuid); if (port == NULL) return; if (cpuid != ncpus && cpuid != mycpuid) return; func = pr->pr_ctlinput; netmsg_init(&msg.base, NULL, &netisr_adone_rport, 0, func); msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); msg.base.lmsg.ms_flags |= MSGF_SYNC; msg.nm_cmd = cmd; msg.nm_direct = 1; msg.nm_arg = arg; msg.nm_extra = extra; func((netmsg_t)&msg); KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); }
static void tcp6_usr_listen(netmsg_t msg) { struct socket *so = msg->listen.base.nm_so; struct thread *td = msg->listen.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; #ifdef SMP struct netmsg_inswildcard nm; #endif COMMON_START(so, inp, 0); if (tp->t_flags & TF_LISTEN) goto out; if (inp->inp_lport == 0) { if (!(inp->inp_flags & IN6P_IPV6_V6ONLY)) inp->inp_vflag |= INP_IPV4; else inp->inp_vflag &= ~INP_IPV4; error = in6_pcbbind(inp, NULL, td); if (error) goto out; } tp->t_state = TCPS_LISTEN; tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ #ifdef SMP if (ncpus > 1) { /* * We have to set the flag because we can't have other cpus * messing with our inp's flags. */ KASSERT(!(inp->inp_flags & INP_CONNECTED), ("already on connhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD), ("already on wildcardhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD_MP), ("already on MP wildcardhash\n")); inp->inp_flags |= INP_WILDCARD_MP; KKASSERT(so->so_port == cpu_portfn(0)); KKASSERT(&curthread->td_msgport == cpu_portfn(0)); KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, in_pcbinswildcardhash_handler); nm.nm_inp = inp; lwkt_domsg(cpu_portfn(1), &nm.base.lmsg, 0); } #endif in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); }
void so_pru_sync(struct socket *so) { struct netmsg_base msg; netmsg_init(&msg, so, &curthread->td_msgport, 0, netmsg_sync_handler); lwkt_domsg(so->so_port, &msg.lmsg, 0); }
/* * Abort a socket and free it. Called from soabort() only. soabort() * got a ref on the socket which we must free on reply. */ void so_pru_abort(struct socket *so) { struct netmsg_pru_abort msg; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_abort); (void)lwkt_domsg(so->so_port, &msg.base.lmsg, 0); sofree(msg.base.nm_so); }
/* * Abort a socket and free it, asynchronously. Called from * soaborta() only. soaborta() got a ref on the socket which we must * free on reply. */ void so_pru_aborta(struct socket *so) { struct netmsg_pru_abort *msg; msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO); netmsg_init(&msg->base, so, &netisr_afree_free_so_rport, 0, so->so_proto->pr_usrreqs->pru_abort); lwkt_sendmsg(so->so_port, &msg->base.lmsg); }
int so_pru_shutdown(struct socket *so) { struct netmsg_pru_shutdown msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_shutdown); error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_accept(struct socket *so, struct sockaddr **nam) { struct netmsg_pru_accept msg; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_accept); msg.nm_nam = nam; return lwkt_domsg(so->so_port, &msg.base.lmsg, 0); }
static void acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc) { struct netmsg_acpi_cst msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, acpi_cst_c3_bm_rld_handler); msg.sc = sc; lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); }
static int acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc) { struct netmsg_acpi_cst msg; netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, acpi_cst_cx_reprobe_cst_handler); msg.sc = sc; return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); }
int so_pru_listen(struct socket *so, struct thread *td) { struct netmsg_pru_listen msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_listen); msg.nm_td = td; /* used only for prison_ip() XXX JH */ error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
void so_pru_disconnect_direct(struct socket *so) { struct netmsg_pru_disconnect msg; netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect; netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); msg.base.lmsg.ms_flags |= MSGF_SYNC; func((netmsg_t)&msg); KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); }
int so_pru_sockaddr(struct socket *so, struct sockaddr **nam) { struct netmsg_pru_sockaddr msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_sockaddr); msg.nm_nam = nam; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * Called by doio when trying to abort a netmsg_so_notify message. * Unlike the other functions this one is dispatched directly by * the LWKT subsystem, so it takes a lwkt_msg_t as an argument. * * The original message, lmsg, is under the control of the caller and * will not be destroyed until we return so we can safely reference it * in our synchronous abort request. * * This part of the abort request occurs on the originating cpu which * means we may race the message flags and the original message may * not even have been processed by the target cpu yet. */ void netmsg_so_notify_doabort(lwkt_msg_t lmsg) { struct netmsg_so_notify_abort msg; if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, netmsg_so_notify_abort); msg.nm_notifymsg = (void *)lmsg; lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0); } }
int so_pru_sense(struct socket *so, struct stat *sb) { struct netmsg_pru_sense msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_sense); msg.nm_stat = sb; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags) { struct netmsg_pru_rcvoob msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_rcvoob); msg.nm_m = m; msg.nm_flags = flags; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * Abort a socket and free it. Called from soabort_oncpu() only. * Caller must make sure that the current CPU is inpcb's owner CPU. */ void so_pru_abort_oncpu(struct socket *so) { struct netmsg_pru_abort msg; netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort; netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); msg.base.lmsg.ms_flags |= MSGF_SYNC; func((netmsg_t)&msg); KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); sofree(msg.base.nm_so); }
int so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai) { struct netmsg_pru_attach msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_attach); msg.nm_proto = proto; msg.nm_ai = ai; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pr_ctloutput(struct socket *so, struct sockopt *sopt) { struct netmsg_pr_ctloutput msg; int error; KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_ctloutput); msg.nm_sopt = sopt; error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
/* * NOTE: If the target port changes the bind operation will deal with it. */ int so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { struct netmsg_pru_bind msg; int error; netmsg_init(&msg.base, so, &curthread->td_msgport, 0, so->so_proto->pr_usrreqs->pru_bind); msg.nm_nam = nam; msg.nm_td = td; /* used only for prison_ip() */ error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); return (error); }
int so_pru_connect2(struct socket *so1, struct socket *so2) { struct netmsg_pru_connect2 msg; int error; netmsg_init(&msg.base, so1, &curthread->td_msgport, 0, so1->so_proto->pr_usrreqs->pru_connect2); msg.nm_so1 = so1; msg.nm_so2 = so2; error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0); return (error); }
/* * Enable multicast routing */ static int ip6_mrouter_init(struct socket *so, struct mbuf *m, int cmd) { int *v; ASSERT_NETISR0; #ifdef MRT6DEBUG if (mrt6debug) log(LOG_DEBUG, "ip6_mrouter_init: so_type = %d, pr_protocol = %d\n", so->so_type, so->so_proto->pr_protocol); #endif if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_ICMPV6) return EOPNOTSUPP; if (!m || (m->m_len != sizeof(int *))) return ENOPROTOOPT; v = mtod(m, int *); if (*v != 1) return ENOPROTOOPT; if (ip6_mrouter != NULL) return EADDRINUSE; ip6_mrouter = so; ip6_mrouter_ver = cmd; bzero((caddr_t)mf6ctable, sizeof(mf6ctable)); bzero((caddr_t)n6expire, sizeof(n6expire)); pim6 = 0;/* used for stubbing out/in pim stuff */ callout_init_mp(&expire_upcalls_ch); netmsg_init(&expire_upcalls_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY | MSGF_DROPABLE, expire_upcalls_dispatch); callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); #ifdef MRT6DEBUG if (mrt6debug) log(LOG_DEBUG, "ip6_mrouter_init\n"); #endif return 0; }
int ifpoll_register(struct ifnet *ifp) { struct ifpoll_info *info; struct netmsg_base nmsg; int error; if (ifp->if_npoll == NULL) { /* Device does not support polling */ return EOPNOTSUPP; } info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO); /* * Attempt to register. Interlock with IFF_NPOLLING. */ ifnet_serialize_all(ifp); if (ifp->if_flags & IFF_NPOLLING) { /* Already polling */ ifnet_deserialize_all(ifp); kfree(info, M_TEMP); return EBUSY; } info->ifpi_ifp = ifp; ifp->if_flags |= IFF_NPOLLING; ifp->if_npoll(ifp, info); KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid")); ifnet_deserialize_all(ifp); netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, ifpoll_register_handler); nmsg.lmsg.u.ms_resultp = info; error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); if (error) { if (!ifpoll_deregister(ifp)) { if_printf(ifp, "ifpoll_register: " "ifpoll_deregister failed!\n"); } } kfree(info, M_TEMP); return error; }
void udp_ctlinput(netmsg_t msg) { struct sockaddr *sa = msg->ctlinput.nm_arg; struct ip *ip = msg->ctlinput.nm_extra; int cmd = msg->ctlinput.nm_cmd, cpuid; inp_notify_t notify; struct in_addr faddr; notify = udp_get_inpnotify(cmd, sa, &ip, &cpuid); if (notify == NULL) goto done; faddr = ((struct sockaddr_in *)sa)->sin_addr; if (ip) { const struct udphdr *uh; struct inpcb *inp; if (cpuid != mycpuid) goto done; uh = (const struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); inp = in_pcblookup_hash(&udbinfo[mycpuid], faddr, uh->uh_dport, ip->ip_src, uh->uh_sport, 0, NULL); if (inp != NULL && inp->inp_socket != NULL) notify(inp, inetctlerrmap[cmd]); } else if (msg->ctlinput.nm_direct) { if (cpuid != ncpus && cpuid != mycpuid) goto done; if (mycpuid >= ncpus2) goto done; in_pcbnotifyall(&udbinfo[mycpuid], faddr, inetctlerrmap[cmd], notify); } else { struct netmsg_udp_notify *nm; ASSERT_IN_NETISR(0); nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); netmsg_init(&nm->base, NULL, &netisr_afree_rport, 0, udp_notifyall_oncpu); nm->nm_faddr = faddr; nm->nm_arg = inetctlerrmap[cmd]; nm->nm_notify = notify; lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg); } done: lwkt_replymsg(&msg->lmsg, 0); }