static void tcp6_usr_listen(netmsg_t msg) { struct socket *so = msg->listen.base.nm_so; struct thread *td = msg->listen.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; #ifdef SMP struct netmsg_inswildcard nm; #endif COMMON_START(so, inp, 0); if (tp->t_flags & TF_LISTEN) goto out; if (inp->inp_lport == 0) { if (!(inp->inp_flags & IN6P_IPV6_V6ONLY)) inp->inp_vflag |= INP_IPV4; else inp->inp_vflag &= ~INP_IPV4; error = in6_pcbbind(inp, NULL, td); if (error) goto out; } tp->t_state = TCPS_LISTEN; tp->t_flags |= TF_LISTEN; tp->tt_msg = NULL; /* Catch any invalid timer usage */ #ifdef SMP if (ncpus > 1) { /* * We have to set the flag because we can't have other cpus * messing with our inp's flags. */ KASSERT(!(inp->inp_flags & INP_CONNECTED), ("already on connhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD), ("already on wildcardhash\n")); KASSERT(!(inp->inp_flags & INP_WILDCARD_MP), ("already on MP wildcardhash\n")); inp->inp_flags |= INP_WILDCARD_MP; KKASSERT(so->so_port == cpu_portfn(0)); KKASSERT(&curthread->td_msgport == cpu_portfn(0)); KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]); netmsg_init(&nm.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, in_pcbinswildcardhash_handler); nm.nm_inp = inp; lwkt_domsg(cpu_portfn(1), &nm.base.lmsg, 0); } #endif in_pcbinswildcardhash(inp); COMMON_END(PRU_LISTEN); }
static int sysctl_burstmax(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; uint32_t burst_max; int error; burst_max = pctx->poll_burst_max; error = sysctl_handle_int(oidp, &burst_max, 0, req); if (error || req->newptr == NULL) return error; if (burst_max < MIN_POLL_BURST_MAX) burst_max = MIN_POLL_BURST_MAX; else if (burst_max > MAX_POLL_BURST_MAX) burst_max = MAX_POLL_BURST_MAX; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_burstmax); msg.lmsg.u.ms_result = burst_max; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
/* * Set the polling frequency */ static int sysctl_pollhz(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; int error, phz; phz = pctx->pollhz; error = sysctl_handle_int(oidp, &phz, 0, req); if (error || req->newptr == NULL) return error; if (phz <= 0) return EINVAL; else if (phz > DEVICE_POLLING_FREQ_MAX) phz = DEVICE_POLLING_FREQ_MAX; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_pollhz); msg.lmsg.u.ms_result = phz; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static void in_pcbinswildcardhash_handler(netmsg_t msg) { struct netmsg_inswildcard *nm = (struct netmsg_inswildcard *)msg; int cpu = mycpuid, nextcpu; in_pcbinswildcardhash_oncpu(nm->nm_inp, &tcbinfo[cpu]); nextcpu = cpu + 1; if (nextcpu < ncpus2) lwkt_forwardmsg(cpu_portfn(nextcpu), &nm->base.lmsg); else lwkt_replymsg(&nm->base.lmsg, 0); }
/* * Master enable. */ static int sysctl_polling(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; int error, enabled; enabled = pctx->polling_enabled; error = sysctl_handle_int(oidp, &enabled, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_polling); msg.lmsg.u.ms_result = enabled; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static int sysctl_eachburst(SYSCTL_HANDLER_ARGS) { struct pollctx *pctx = arg1; struct netmsg_base msg; lwkt_port_t port; uint32_t each_burst; int error; each_burst = pctx->poll_each_burst; error = sysctl_handle_int(oidp, &each_burst, 0, req); if (error || req->newptr == NULL) return error; netmsg_init(&msg, NULL, &curthread->td_msgport, 0, poll_sysctl_eachburst); msg.lmsg.u.ms_result = each_burst; port = cpu_portfn(pctx->poll_cpuid); lwkt_domsg(port, &msg.lmsg, 0); return 0; }
static void schedpoll_oncpu(netmsg_t msg) { if (msg->lmsg.ms_flags & MSGF_DONE) lwkt_sendmsg(cpu_portfn(mycpuid), &msg->lmsg); }
int socreate(int dom, struct socket **aso, int type, int proto, struct thread *td) { struct proc *p = td->td_proc; struct protosw *prp; struct socket *so; struct pru_attach_info ai; int error; if (proto) prp = pffindproto(dom, proto, type); else prp = pffindtype(dom, type); if (prp == 0 || prp->pr_usrreqs->pru_attach == 0) return (EPROTONOSUPPORT); if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && prp->pr_domain->dom_family != PF_LOCAL && prp->pr_domain->dom_family != PF_INET && prp->pr_domain->dom_family != PF_INET6 && prp->pr_domain->dom_family != PF_ROUTE) { return (EPROTONOSUPPORT); } if (prp->pr_type != type) return (EPROTOTYPE); so = soalloc(p != 0); if (so == NULL) return (ENOBUFS); /* * Callers of socreate() presumably will connect up a descriptor * and call soclose() if they cannot. This represents our so_refs * (which should be 1) from soalloc(). */ soclrstate(so, SS_NOFDREF); /* * Set a default port for protocol processing. No action will occur * on the socket on this port until an inpcb is attached to it and * is able to match incoming packets, or until the socket becomes * available to userland. * * We normally default the socket to the protocol thread on cpu 0. * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol * thread and all pr_*()/pru_*() calls are executed synchronously. */ if (prp->pr_flags & PR_SYNC_PORT) so->so_port = &netisr_sync_port; else so->so_port = cpu_portfn(0); TAILQ_INIT(&so->so_incomp); TAILQ_INIT(&so->so_comp); so->so_type = type; so->so_cred = crhold(p->p_ucred); so->so_proto = prp; ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; ai.p_ucred = p->p_ucred; ai.fd_rdir = p->p_fd->fd_rdir; /* * Auto-sizing of socket buffers is managed by the protocols and * the appropriate flags must be set in the pru_attach function. */ error = so_pru_attach(so, proto, &ai); if (error) { sosetstate(so, SS_NOFDREF); sofree(so); /* from soalloc */ return error; } /* * NOTE: Returns referenced socket. */ *aso = so; return (0); }