static void acpi_cst_set_lowest_handler(netmsg_t msg) { struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; int error; error = acpi_cst_set_lowest_oncpu(rmsg->sc, rmsg->val); lwkt_replymsg(&rmsg->base.lmsg, error); }
/* * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when * appropriate, typically once per polling systimer tick. * * Note that the message is replied immediately in order to allow a new * ISR to be scheduled in the handler. */ static void rxpoll_handler(netmsg_t msg) { struct iopoll_ctx *io_ctx; struct thread *td = curthread; int i, cycles; io_ctx = msg->lmsg.u.ms_resultp; KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); crit_enter_quick(td); /* Reply ASAP */ lwkt_replymsg(&msg->lmsg, 0); if (io_ctx->poll_handlers == 0) { crit_exit_quick(td); return; } io_ctx->phase = 3; if (io_ctx->residual_burst == 0) { /* First call in this tick */ ifpoll_time_get(&io_ctx->poll_start_t); io_ctx->residual_burst = io_ctx->poll_burst; } cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ? io_ctx->residual_burst : io_ctx->poll_each_burst; io_ctx->residual_burst -= cycles; for (i = 0; i < io_ctx->poll_handlers; i++) { const struct iopoll_rec *rec = &io_ctx->pr[i]; struct ifnet *ifp = rec->ifp; if (!lwkt_serialize_try(rec->serializer)) continue; if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == (IFF_RUNNING | IFF_NPOLLING)) rec->poll_func(ifp, rec->arg, cycles); lwkt_serialize_exit(rec->serializer); } /* * Do a quick exit/enter to catch any higher-priority * interrupt sources. */ crit_exit_quick(td); crit_enter_quick(td); sched_iopollmore(io_ctx); io_ctx->phase = 4; crit_exit_quick(td); }
static void ngd_detach(netmsg_t msg) { struct socket *so = msg->detach.base.nm_so; struct ngpcb *const pcbp = sotongpcb(so); KASSERT(pcbp != NULL, ("ngd_detach: pcbp == NULL")); ng_detach_common(pcbp, NG_DATA); lwkt_replymsg(&msg->detach.base.lmsg, 0); }
/* * lwkt_forwardmsg() * * Forward a message received on one port to another port. */ int lwkt_forwardmsg(lwkt_port_t port, lwkt_msg_t msg) { int error; KKASSERT((msg->ms_flags & (MSGF_QUEUED|MSGF_DONE|MSGF_REPLY)) == 0); if ((error = port->mp_putport(port, msg)) != EASYNC) lwkt_replymsg(msg, error); return(error); }
static void udp6_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; struct mbuf *m = msg->send.nm_m; struct sockaddr *addr = msg->send.nm_addr; struct mbuf *control = msg->send.nm_control; struct thread *td = msg->send.nm_td; struct inpcb *inp; int error = 0; inp = so->so_pcb; if (inp == NULL) { error = EINVAL; goto bad; } if (addr) { struct sockaddr_in6 *sin6; if (addr->sa_len != sizeof(struct sockaddr_in6)) { error = EINVAL; goto bad; } if (addr->sa_family != AF_INET6) { error = EAFNOSUPPORT; goto bad; } sin6 = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { error = EADDRNOTAVAIL; goto bad; } } error = udp6_output(inp, m, addr, control, td); lwkt_replymsg(&msg->send.base.lmsg, error); return; bad: m_freem(m); lwkt_replymsg(&msg->send.base.lmsg, error); }
static void ngc_connect(netmsg_t msg) { /* * At this time refuse to do this.. it used to * do something but it was undocumented and not used. */ kprintf("program tried to connect control socket to remote node\n"); lwkt_replymsg(&msg->connect.base.lmsg, EINVAL); }
void so_async_rcvd_reply(struct socket *so) { /* * Spinlock safe, reply runs to degenerate lwkt_null_replyport() */ spin_lock(&so->so_rcvd_spin); lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0); spin_unlock(&so->so_rcvd_spin); }
void lwkt_sendmsg_stage2(lwkt_port_t port, lwkt_msg_t msg) { int error; if ((error = lwkt_beginmsg(port, msg)) != EASYNC) { /* * Target port opted to execute the message synchronously so * queue the response. */ lwkt_replymsg(msg, error); } }
static void ngd_attach(netmsg_t msg) { struct socket *so = msg->attach.base.nm_so; struct ngpcb *const pcbp = sotongpcb(so); int error; if (pcbp != NULL) error = EISCONN; else error = ng_attach_data(so); lwkt_replymsg(&msg->connect.base.lmsg, error); }
static void ifpoll_register_handler(netmsg_t nmsg) { const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp; int cpuid = mycpuid, nextcpu; int error; KKASSERT(cpuid < ncpus2); KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid)); if (cpuid == 0) { error = stpoll_register(info->ifpi_ifp, &info->ifpi_status); if (error) goto failed; } error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid], &info->ifpi_rx[cpuid]); if (error) goto failed; error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid], &info->ifpi_tx[cpuid]); if (error) goto failed; /* Adjust polling frequency, after all registration is done */ poll_comm_adjust_pollhz(poll_common[cpuid]); nextcpu = cpuid + 1; if (nextcpu < ncpus2) lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg); else lwkt_replymsg(&nmsg->lmsg, 0); return; failed: lwkt_replymsg(&nmsg->lmsg, error); }
static void ngd_connect(netmsg_t msg) { struct socket *so = msg->connect.base.nm_so; struct sockaddr *nam = msg->connect.nm_nam; struct ngpcb *const pcbp = sotongpcb(so); int error; if (pcbp == NULL) error = EINVAL; else error = ng_connect_data(nam, pcbp); lwkt_replymsg(&msg->connect.base.lmsg, error); }
static void udp_notifyall_oncpu(netmsg_t msg) { struct netmsg_udp_notify *nm = (struct netmsg_udp_notify *)msg; int nextcpu, cpu = mycpuid; in_pcbnotifyall(&udbinfo[cpu], nm->nm_faddr, nm->nm_arg, nm->nm_notify); nextcpu = cpu + 1; if (nextcpu < ncpus2) lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg); else lwkt_replymsg(&nm->base.lmsg, 0); }
static void in_pcbinswildcardhash_handler(netmsg_t msg) { struct netmsg_inswildcard *nm = (struct netmsg_inswildcard *)msg; int cpu = mycpuid, nextcpu; in_pcbinswildcardhash_oncpu(nm->nm_inp, &tcbinfo[cpu]); nextcpu = cpu + 1; if (nextcpu < ncpus2) lwkt_forwardmsg(cpu_portfn(nextcpu), &nm->base.lmsg); else lwkt_replymsg(&nm->base.lmsg, 0); }
static void ip_fw_sockopt_dispatch(netmsg_t msg) { struct sockopt *sopt = msg->lmsg.u.ms_resultp; int error; KKASSERT(mycpuid == 0); if (IPFW_LOADED) error = ip_fw_ctl_ptr(sopt); else error = ENOPROTOOPT; lwkt_replymsg(&msg->lmsg, error); }
void udp_ctlinput(netmsg_t msg) { struct sockaddr *sa = msg->ctlinput.nm_arg; struct ip *ip = msg->ctlinput.nm_extra; int cmd = msg->ctlinput.nm_cmd, cpuid; inp_notify_t notify; struct in_addr faddr; notify = udp_get_inpnotify(cmd, sa, &ip, &cpuid); if (notify == NULL) goto done; faddr = ((struct sockaddr_in *)sa)->sin_addr; if (ip) { const struct udphdr *uh; struct inpcb *inp; if (cpuid != mycpuid) goto done; uh = (const struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); inp = in_pcblookup_hash(&udbinfo[mycpuid], faddr, uh->uh_dport, ip->ip_src, uh->uh_sport, 0, NULL); if (inp != NULL && inp->inp_socket != NULL) notify(inp, inetctlerrmap[cmd]); } else if (msg->ctlinput.nm_direct) { if (cpuid != ncpus && cpuid != mycpuid) goto done; if (mycpuid >= ncpus2) goto done; in_pcbnotifyall(&udbinfo[mycpuid], faddr, inetctlerrmap[cmd], notify); } else { struct netmsg_udp_notify *nm; ASSERT_IN_NETISR(0); nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT); netmsg_init(&nm->base, NULL, &netisr_afree_rport, 0, udp_notifyall_oncpu); nm->nm_faddr = faddr; nm->nm_arg = inetctlerrmap[cmd]; nm->nm_notify = notify; lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg); } done: lwkt_replymsg(&msg->lmsg, 0); }
/* * rfcomm_ctloutput(request, socket, level, optname, opt) * */ void rfcomm_ctloutput(netmsg_t msg) { struct socket *so = msg->ctloutput.base.nm_so; struct sockopt *sopt = msg->ctloutput.nm_sopt; struct rfcomm_dlc *pcb = (struct rfcomm_dlc *) so->so_pcb; struct mbuf *m; int error = 0; #ifdef notyet /* XXX */ DPRINTFN(2, "%s\n", prcorequests[sopt->sopt_dir]); #endif if (pcb == NULL) { error = EINVAL; goto out; } if (sopt->sopt_level != BTPROTO_RFCOMM) { error = ENOPROTOOPT; goto out; } switch(sopt->sopt_dir) { case PRCO_GETOPT: m = m_get(M_WAITOK, MT_DATA); crit_enter(); m->m_len = rfcomm_getopt(pcb, sopt->sopt_name, mtod(m, void *)); crit_exit(); if (m->m_len == 0) { m_freem(m); m = NULL; error = ENOPROTOOPT; } soopt_from_kbuf(sopt, mtod(m, void *), m->m_len); break; case PRCO_SETOPT: error = rfcomm_setopt2(pcb, sopt->sopt_name, so, sopt); break; default: error = ENOPROTOOPT; break; } out: lwkt_replymsg(&msg->ctloutput.base.lmsg, error); }
/* * The ACPI helper thread processes OSD execution callback messages. */ static void acpi_task_thread(void *arg) { ACPI_OSD_EXEC_CALLBACK func; struct acpi_task *at; get_mplock(); for (;;) { at = (void *)lwkt_waitport(&curthread->td_msgport, 0); func = at->at_function; func(at->at_context); lwkt_replymsg(&at->at_msg, 0); } rel_mplock(); }
static void udp6_connect(netmsg_t msg) { struct socket *so = msg->connect.base.nm_so; struct sockaddr *nam = msg->connect.nm_nam; struct thread *td = msg->connect.nm_td; struct sockaddr_in6 *sin6_p; struct inpcb *inp; int error; inp = so->so_pcb; if (inp == NULL) { error = EINVAL; goto out; } sin6_p = (struct sockaddr_in6 *)nam; if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { error = EADDRNOTAVAIL; goto out; } if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { error = EISCONN; goto out; } if (inp->inp_flags & INP_WILDCARD) in_pcbremwildcardhash(inp); if (!prison_remote_ip(td, nam)) { error = EAFNOSUPPORT; /* IPv4 only jail */ goto out; } error = in6_pcbconnect(inp, nam, td); if (error == 0) { soisconnected(so); } else if (error == EAFNOSUPPORT) { /* connection dissolved */ /* * Follow traditional BSD behavior and retain * the local port binding. But, fix the old misbehavior * of overwriting any previously bound local address. */ if (!(inp->inp_flags & INP_WASBOUND_NOTANY)) inp->in6p_laddr = kin6addr_any; in_pcbinswildcardhash(inp); } out: lwkt_replymsg(&msg->connect.base.lmsg, error); }
static void txpoll_handler(netmsg_t msg) { struct iopoll_ctx *io_ctx; struct thread *td = curthread; int i; io_ctx = msg->lmsg.u.ms_resultp; KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid)); crit_enter_quick(td); /* Reply ASAP */ lwkt_replymsg(&msg->lmsg, 0); if (io_ctx->poll_handlers == 0) { crit_exit_quick(td); return; } io_ctx->phase = 3; for (i = 0; i < io_ctx->poll_handlers; i++) { const struct iopoll_rec *rec = &io_ctx->pr[i]; struct ifnet *ifp = rec->ifp; if (!lwkt_serialize_try(rec->serializer)) continue; if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == (IFF_RUNNING | IFF_NPOLLING)) rec->poll_func(ifp, rec->arg, -1); lwkt_serialize_exit(rec->serializer); } /* * Do a quick exit/enter to catch any higher-priority * interrupt sources. */ crit_exit_quick(td); crit_enter_quick(td); sched_iopollmore(io_ctx); io_ctx->phase = 4; crit_exit_quick(td); }
static void udp6_detach(netmsg_t msg) { struct socket *so = msg->detach.base.nm_so; struct inpcb *inp; int error; inp = so->so_pcb; if (inp) { in6_pcbdetach(inp); error = 0; } else { error = EINVAL; } lwkt_replymsg(&msg->detach.base.lmsg, error); }
/* * lwkt_sendmsg() * * Request asynchronous completion and call lwkt_beginmsg(). The * target port can opt to execute the message synchronously or * asynchronously and this function will automatically queue the * response if the target executes the message synchronously. * * NOTE: The message is in an indeterminant state until this call * returns. The caller should not mess with it (e.g. try to abort it) * until then. * * NOTE: Do not use this function to forward a message as we might * clobber ms_flags in a SMP race. */ void lwkt_sendmsg(lwkt_port_t port, lwkt_msg_t msg) { int error; KKASSERT(msg->ms_reply_port != NULL && (msg->ms_flags & (MSGF_DONE|MSGF_QUEUED)) == MSGF_DONE); msg->ms_flags &= ~(MSGF_REPLY | MSGF_SYNC | MSGF_DONE); if ((error = lwkt_beginmsg(port, msg)) != EASYNC) { /* * Target port opted to execute the message synchronously so * queue the response. */ lwkt_replymsg(msg, error); } }
static void ngc_attach(netmsg_t msg) { struct socket *so = msg->attach.base.nm_so; struct pru_attach_info *ai = msg->attach.nm_ai; struct ngpcb *const pcbp = sotongpcb(so); int error; if (priv_check_cred(ai->p_ucred, PRIV_ROOT, NULL_CRED_OKAY) != 0) error = EPERM; else if (pcbp != NULL) error = EISCONN; else error = ng_attach_cntl(so); lwkt_replymsg(&msg->attach.base.lmsg, error); }
/* * Used for both data and control sockets */ static void ng_getsockaddr(netmsg_t msg) { struct socket *so = msg->sockaddr.base.nm_so; struct sockaddr **addr = msg->sockaddr.nm_nam; struct ngpcb *pcbp; struct sockaddr_ng *sg; int sg_len; int error = 0; /* Why isn't sg_data a `char[1]' ? :-( */ sg_len = sizeof(struct sockaddr_ng) - sizeof(sg->sg_data) + 1; pcbp = sotongpcb(so); if ((pcbp == NULL) || (pcbp->sockdata == NULL)) { /* XXXGL: can this still happen? */ error = EINVAL; goto replymsg; } mtx_lock(&pcbp->sockdata->mtx); if (pcbp->sockdata->node != NULL) { node_p node = pcbp->sockdata->node; int namelen = 0; /* silence compiler! */ if (NG_NODE_HAS_NAME(node)) sg_len += namelen = strlen(NG_NODE_NAME(node)); sg = kmalloc(sg_len, M_SONAME, M_WAITOK | M_ZERO); if (NG_NODE_HAS_NAME(node)) bcopy(NG_NODE_NAME(node), sg->sg_data, namelen); sg->sg_len = sg_len; sg->sg_family = AF_NETGRAPH; *addr = (struct sockaddr *)sg; mtx_unlock(&pcbp->sockdata->mtx); } else { mtx_unlock(&pcbp->sockdata->mtx); error = EINVAL; } replymsg: lwkt_replymsg(&msg->sockaddr.base.lmsg, error); }
static void tcp6_usr_connect(netmsg_t msg) { struct socket *so = msg->connect.base.nm_so; struct sockaddr *nam = msg->connect.nm_nam; struct thread *td = msg->connect.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; struct sockaddr_in6 *sin6p; COMMON_START(so, inp, 0); /* * Must disallow TCP ``connections'' to multicast addresses. */ sin6p = (struct sockaddr_in6 *)nam; if (sin6p->sin6_family == AF_INET6 && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) { error = EAFNOSUPPORT; goto out; } if (!prison_remote_ip(td, nam)) { error = EAFNOSUPPORT; /* IPv4 only jail */ goto out; } /* Reject v4-mapped address */ if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { error = EADDRNOTAVAIL; goto out; } inp->inp_inc.inc_isipv6 = 1; tcp6_connect(msg); /* msg is invalid now */ return; out: if (msg->connect.nm_m) { m_freem(msg->connect.nm_m); msg->connect.nm_m = NULL; } lwkt_replymsg(&msg->lmsg, error); }
/* * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort() * will sofree() it when we return. */ static void udp6_abort(netmsg_t msg) { struct socket *so = msg->abort.base.nm_so; struct inpcb *inp; int error; inp = so->so_pcb; if (inp) { soisdisconnected(so); in6_pcbdetach(inp); error = 0; } else { error = EINVAL; } lwkt_replymsg(&msg->abort.base.lmsg, error); }
/* * Initiate connection to peer. * Create a template for use in transmissions on this connection. * Enter SYN_SENT state, and mark socket as connecting. * Start keep-alive timer, and seed output sequence space. * Send initial segment on connection. */ static void tcp_usr_connect(netmsg_t msg) { struct socket *so = msg->connect.base.nm_so; struct sockaddr *nam = msg->connect.nm_nam; struct thread *td = msg->connect.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; struct sockaddr_in *sinp; COMMON_START(so, inp, 0); /* * Must disallow TCP ``connections'' to multicast addresses. */ sinp = (struct sockaddr_in *)nam; if (sinp->sin_family == AF_INET && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) { error = EAFNOSUPPORT; goto out; } if (!prison_remote_ip(td, (struct sockaddr*)sinp)) { error = EAFNOSUPPORT; /* IPv6 only jail */ goto out; } tcp_connect(msg); /* msg is invalid now */ return; out: if (msg->connect.nm_m) { m_freem(msg->connect.nm_m); msg->connect.nm_m = NULL; } if (msg->connect.nm_flags & PRUC_HELDTD) lwkt_rele(td); if (error && (msg->connect.nm_flags & PRUC_ASYNC)) { so->so_error = error; soisdisconnected(so); } lwkt_replymsg(&msg->lmsg, error); }
static void sysctl_eachburst_handler(netmsg_t nmsg) { struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; struct iopoll_ctx *io_ctx; uint32_t each_burst; io_ctx = msg->ctx; KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); each_burst = nmsg->lmsg.u.ms_result; if (each_burst > io_ctx->poll_burst_max) each_burst = io_ctx->poll_burst_max; else if (each_burst < 1) each_burst = 1; io_ctx->poll_each_burst = each_burst; lwkt_replymsg(&nmsg->lmsg, 0); }
static void sysctl_burstmax_handler(netmsg_t nmsg) { struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; struct iopoll_ctx *io_ctx; io_ctx = msg->ctx; KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result; if (io_ctx->poll_each_burst > io_ctx->poll_burst_max) io_ctx->poll_each_burst = io_ctx->poll_burst_max; if (io_ctx->poll_burst > io_ctx->poll_burst_max) io_ctx->poll_burst = io_ctx->poll_burst_max; if (io_ctx->residual_burst > io_ctx->poll_burst_max) io_ctx->residual_burst = io_ctx->poll_burst_max; lwkt_replymsg(&nmsg->lmsg, 0); }
/* * Handle a predicate event request. This function is only called once * when the predicate message queueing request is received. */ void netmsg_so_notify(netmsg_t msg) { struct lwkt_token *tok; struct signalsockbuf *ssb; ssb = (msg->notify.nm_etype & NM_REVENT) ? &msg->base.nm_so->so_rcv : &msg->base.nm_so->so_snd; /* * Reply immediately if the event has occured, otherwise queue the * request. * * NOTE: Socket can change if this is an accept predicate so cache * the token. */ tok = lwkt_token_pool_lookup(msg->base.nm_so); lwkt_gettoken(tok); atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); if (msg->notify.nm_predicate(&msg->notify)) { if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); lwkt_reltoken(tok); lwkt_replymsg(&msg->base.lmsg, msg->base.lmsg.ms_error); } else { TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list); /* * NOTE: * If predict ever blocks, 'tok' will be released, so * SSB_MEVENT set beforehand could have been cleared * when we reach here. In case that happens, we set * SSB_MEVENT again, after the notify has been queued. */ atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); lwkt_reltoken(tok); } }
static void udp6_attach(netmsg_t msg) { struct socket *so = msg->attach.base.nm_so; struct pru_attach_info *ai = msg->attach.nm_ai; struct inpcb *inp; int error; inp = so->so_pcb; if (inp != NULL) { error = EINVAL; goto out; } if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) { error = soreserve(so, udp_sendspace, udp_recvspace, ai->sb_rlimit); if (error) goto out; } error = in_pcballoc(so, &udbinfo[0]); if (error) goto out; inp = (struct inpcb *)so->so_pcb; inp->in6p_hops = -1; /* use kernel default */ inp->in6p_cksum = -1; /* just to be sure */ /* * XXX: ugly!! * IPv4 TTL initialization is necessary for an IPv6 socket as well, * because the socket may be bound to an IPv6 wildcard address, * which may match an IPv4-mapped IPv6 address. */ inp->inp_ip_ttl = ip_defttl; error = 0; out: lwkt_replymsg(&msg->attach.base.lmsg, error); }