static int tcp_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf *m, struct sockaddr_in *sin, struct sockaddr_in *if_sin) { struct inpcb *inp = tp->t_inpcb, *oinp; struct socket *so = inp->inp_socket; struct route *ro = &inp->inp_route; oinp = in_pcblookup_hash(&tcbinfo[mycpu->gd_cpuid], sin->sin_addr, sin->sin_port, (inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr : if_sin->sin_addr), inp->inp_lport, 0, NULL); if (oinp != NULL) { m_freem(m); return (EADDRINUSE); } if (inp->inp_laddr.s_addr == INADDR_ANY) inp->inp_laddr = if_sin->sin_addr; inp->inp_faddr = sin->sin_addr; inp->inp_fport = sin->sin_port; inp->inp_cpcbinfo = &tcbinfo[mycpu->gd_cpuid]; in_pcbinsconnhash(inp); /* * We are now on the inpcb's owner CPU, if the cached route was * freed because the rtentry's owner CPU is not the current CPU * (e.g. in tcp_connect()), then we try to reallocate it here with * the hope that a rtentry may be cloned from a RTF_PRCLONING * rtentry. */ if (!(inp->inp_socket->so_options & SO_DONTROUTE) && /*XXX*/ ro->ro_rt == NULL) { bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); ro->ro_dst.sa_family = AF_INET; ro->ro_dst.sa_len = sizeof(struct sockaddr_in); ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sin->sin_addr; rtalloc(ro); } /* * Now that no more errors can occur, change the protocol processing * port to the current thread (which is the correct thread). * * Create TCP timer message now; we are on the tcpcb's owner * CPU/thread. */ tcp_create_timermsg(tp, &curthread->td_msgport); /* * Compute window scaling to request. Use a larger scaling then * needed for the initial receive buffer in case the receive buffer * gets expanded. */ if (tp->request_r_scale < TCP_MIN_WINSHIFT) tp->request_r_scale = TCP_MIN_WINSHIFT; while (tp->request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat ) { tp->request_r_scale++; } soisconnecting(so); tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; tcp_callout_reset(tp, tp->tt_keep, tcp_keepinit, tcp_timer_keep); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); if (m) { ssb_appendstream(&so->so_snd, m); m = NULL; if (flags & PRUS_OOB) tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; } /* * Close the send side of the connection after * the data is sent if flagged. */ if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) { socantsendmore(so); tp = tcp_usrclosed(tp); } return (tcp_output(tp)); }
static int tcp6_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf **mp, struct sockaddr_in6 *sin6, struct in6_addr *addr6) { struct mbuf *m = *mp; struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct inpcb *oinp; /* * Cannot simply call in_pcbconnect, because there might be an * earlier incarnation of this same connection still in * TIME_WAIT state, creating an ADDRINUSE error. */ oinp = in6_pcblookup_hash(inp->inp_cpcbinfo, &sin6->sin6_addr, sin6->sin6_port, (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ? addr6 : &inp->in6p_laddr), inp->inp_lport, 0, NULL); if (oinp) return (EADDRINUSE); if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) inp->in6p_laddr = *addr6; inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0) inp->in6p_flowinfo = sin6->sin6_flowinfo; in_pcbinsconnhash(inp); /* * Now that no more errors can occur, change the protocol processing * port to the current thread (which is the correct thread). * * Create TCP timer message now; we are on the tcpcb's owner * CPU/thread. */ tcp_create_timermsg(tp, &curthread->td_msgport); /* Compute window scaling to request. */ if (tp->request_r_scale < TCP_MIN_WINSHIFT) tp->request_r_scale = TCP_MIN_WINSHIFT; while (tp->request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat) { tp->request_r_scale++; } soisconnecting(so); tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; tcp_callout_reset(tp, tp->tt_keep, tcp_keepinit, tcp_timer_keep); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); if (m) { ssb_appendstream(&so->so_snd, m); *mp = NULL; if (flags & PRUS_OOB) tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; } /* * Close the send side of the connection after * the data is sent if flagged. */ if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) { socantsendmore(so); tp = tcp_usrclosed(tp); } return (tcp_output(tp)); }
/* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. */ static void tcp_usr_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; int flags = msg->send.nm_flags; struct mbuf *m = msg->send.nm_m; struct sockaddr *nam = msg->send.nm_addr; struct mbuf *control = msg->send.nm_control; struct thread *td = msg->send.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; #ifdef INET6 int isipv6; #endif TCPDEBUG0; inp = so->so_pcb; if (inp == NULL) { /* * OOPS! we lost a race, the TCP session got reset after * we checked SS_CANTSENDMORE, eg: while doing uiomove or a * network interrupt in the non-critical section of sosend(). */ m_freem(m); if (control) m_freem(control); error = ECONNRESET; /* XXX EPIPE? */ tp = NULL; TCPDEBUG1(); goto out; } #ifdef INET6 isipv6 = nam && nam->sa_family == AF_INET6; #endif /* INET6 */ tp = intotcpcb(inp); TCPDEBUG1(); if (control) { /* TCP doesn't do control messages (rights, creds, etc) */ if (control->m_len) { m_freem(control); m_freem(m); error = EINVAL; goto out; } m_freem(control); /* empty control, just free it */ } /* * Don't let too much OOB data build up */ if (flags & PRUS_OOB) { if (ssb_space(&so->so_snd) < -512) { m_freem(m); error = ENOBUFS; goto out; } } /* * Do implied connect if not yet connected. Any data sent * with the connect is handled by tcp_connect() and friends. * * NOTE! PROTOCOL THREAD MAY BE CHANGED BY THE CONNECT! */ if (nam && tp->t_state < TCPS_SYN_SENT) { kprintf("implied fallback\n"); msg->connect.nm_nam = nam; msg->connect.nm_td = td; msg->connect.nm_m = m; msg->connect.nm_flags = flags; msg->connect.nm_reconnect = NMSG_RECONNECT_FALLBACK; #ifdef INET6 if (isipv6) tcp6_connect(msg); else #endif /* INET6 */ tcp_connect(msg); /* msg invalid now */ return; } /* * Pump the data into the socket. */ if (m) ssb_appendstream(&so->so_snd, m); if (flags & PRUS_OOB) { /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; tp->t_flags |= TF_FORCE; error = tcp_output(tp); tp->t_flags &= ~TF_FORCE; } else { if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); tp = tcp_usrclosed(tp); } if (tp != NULL) { if (flags & PRUS_MORETOCOME) tp->t_flags |= TF_MORETOCOME; error = tcp_output(tp); if (flags & PRUS_MORETOCOME) tp->t_flags &= ~TF_MORETOCOME; } } COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB : ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); }
/* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. */ static void tcp_usr_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; int flags = msg->send.nm_flags; struct mbuf *m = msg->send.nm_m; int error = 0; struct inpcb *inp; struct tcpcb *tp; TCPDEBUG0; KKASSERT(msg->send.nm_control == NULL); KKASSERT(msg->send.nm_addr == NULL); KKASSERT((flags & PRUS_FREEADDR) == 0); inp = so->so_pcb; if (inp == NULL) { /* * OOPS! we lost a race, the TCP session got reset after * we checked SS_CANTSENDMORE, eg: while doing uiomove or a * network interrupt in the non-critical section of sosend(). */ m_freem(m); error = ECONNRESET; /* XXX EPIPE? */ tp = NULL; TCPDEBUG1(); goto out; } tp = intotcpcb(inp); TCPDEBUG1(); #ifdef foo /* * This is no longer necessary, since: * - sosendtcp() has already checked it for us * - It does not work with asynchronized send */ /* * Don't let too much OOB data build up */ if (flags & PRUS_OOB) { if (ssb_space(&so->so_snd) < -512) { m_freem(m); error = ENOBUFS; goto out; } } #endif /* * Pump the data into the socket. */ if (m) { ssb_appendstream(&so->so_snd, m); sowwakeup(so); } if (flags & PRUS_OOB) { /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; tp->t_flags |= TF_FORCE; error = tcp_output(tp); tp->t_flags &= ~TF_FORCE; } else { if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); tp = tcp_usrclosed(tp); } if (tp != NULL && !tcp_output_pending(tp)) { if (flags & PRUS_MORETOCOME) tp->t_flags |= TF_MORETOCOME; error = tcp_output_fair(tp); if (flags & PRUS_MORETOCOME) tp->t_flags &= ~TF_MORETOCOME; } } COMMON_END1((flags & PRUS_OOB) ? PRU_SENDOOB : ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND), (flags & PRUS_NOREPLY)); }