/* * User issued close, and wish to trail through shutdown states: * if never received SYN, just forget it. If got a SYN from peer, * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. * If already got a FIN from peer, then almost done; go to LAST_ACK * state. In all other cases, have already sent FIN to peer (e.g. * after PRU_SHUTDOWN), and just have to play tedious game waiting * for peer to send FIN or not respond to keep-alives, etc. * We can let the user exit from the close as soon as the FIN is acked. */ static struct tcpcb * tcp_usrclosed(struct tcpcb *tp) { switch (tp->t_state) { case TCPS_CLOSED: case TCPS_LISTEN: tp->t_state = TCPS_CLOSED; tp = tcp_close(tp); break; case TCPS_SYN_SENT: case TCPS_SYN_RECEIVED: tp->t_flags |= TF_NEEDFIN; break; case TCPS_ESTABLISHED: tp->t_state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: tp->t_state = TCPS_LAST_ACK; break; } if (tp && tp->t_state >= TCPS_FIN_WAIT_2) { soisdisconnected(tp->t_inpcb->inp_socket); /* To prevent the connection hanging in FIN_WAIT_2 forever. */ if (tp->t_state == TCPS_FIN_WAIT_2) { tcp_callout_reset(tp, tp->tt_2msl, tcp_maxidle, tcp_timer_2msl); } } return (tp); }
/* Caller should be in critical section */ static struct tcpcb * tcp_timer_2msl_handler(struct tcpcb *tp) { #ifdef TCPDEBUG int ostate; #endif #ifdef TCPDEBUG ostate = tp->t_state; #endif /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long, or if 2MSL time is up from TIME_WAIT, delete connection * control block. Otherwise, check again in a bit. */ if (tp->t_state != TCPS_TIME_WAIT && (ticks - tp->t_rcvtime) <= tp->t_maxidle) { tcp_callout_reset(tp, tp->tt_2msl, tp->t_keepintvl, tcp_timer_2msl); } else { tp = tcp_close(tp); } #ifdef TCPDEBUG if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); #endif return tp; }
static int tcp_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf *m, struct sockaddr_in *sin, struct sockaddr_in *if_sin) { struct inpcb *inp = tp->t_inpcb, *oinp; struct socket *so = inp->inp_socket; struct route *ro = &inp->inp_route; oinp = in_pcblookup_hash(&tcbinfo[mycpu->gd_cpuid], sin->sin_addr, sin->sin_port, (inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr : if_sin->sin_addr), inp->inp_lport, 0, NULL); if (oinp != NULL) { m_freem(m); return (EADDRINUSE); } if (inp->inp_laddr.s_addr == INADDR_ANY) inp->inp_laddr = if_sin->sin_addr; inp->inp_faddr = sin->sin_addr; inp->inp_fport = sin->sin_port; inp->inp_cpcbinfo = &tcbinfo[mycpu->gd_cpuid]; in_pcbinsconnhash(inp); /* * We are now on the inpcb's owner CPU, if the cached route was * freed because the rtentry's owner CPU is not the current CPU * (e.g. in tcp_connect()), then we try to reallocate it here with * the hope that a rtentry may be cloned from a RTF_PRCLONING * rtentry. */ if (!(inp->inp_socket->so_options & SO_DONTROUTE) && /*XXX*/ ro->ro_rt == NULL) { bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); ro->ro_dst.sa_family = AF_INET; ro->ro_dst.sa_len = sizeof(struct sockaddr_in); ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sin->sin_addr; rtalloc(ro); } /* * Now that no more errors can occur, change the protocol processing * port to the current thread (which is the correct thread). * * Create TCP timer message now; we are on the tcpcb's owner * CPU/thread. */ tcp_create_timermsg(tp, &curthread->td_msgport); /* * Compute window scaling to request. Use a larger scaling then * needed for the initial receive buffer in case the receive buffer * gets expanded. */ if (tp->request_r_scale < TCP_MIN_WINSHIFT) tp->request_r_scale = TCP_MIN_WINSHIFT; while (tp->request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat ) { tp->request_r_scale++; } soisconnecting(so); tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; tcp_callout_reset(tp, tp->tt_keep, tcp_keepinit, tcp_timer_keep); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); if (m) { ssb_appendstream(&so->so_snd, m); m = NULL; if (flags & PRUS_OOB) tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; } /* * Close the send side of the connection after * the data is sent if flagged. */ if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) { socantsendmore(so); tp = tcp_usrclosed(tp); } return (tcp_output(tp)); }
static int tcp6_connect_oncpu(struct tcpcb *tp, int flags, struct mbuf **mp, struct sockaddr_in6 *sin6, struct in6_addr *addr6) { struct mbuf *m = *mp; struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct inpcb *oinp; /* * Cannot simply call in_pcbconnect, because there might be an * earlier incarnation of this same connection still in * TIME_WAIT state, creating an ADDRINUSE error. */ oinp = in6_pcblookup_hash(inp->inp_cpcbinfo, &sin6->sin6_addr, sin6->sin6_port, (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ? addr6 : &inp->in6p_laddr), inp->inp_lport, 0, NULL); if (oinp) return (EADDRINUSE); if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) inp->in6p_laddr = *addr6; inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0) inp->in6p_flowinfo = sin6->sin6_flowinfo; in_pcbinsconnhash(inp); /* * Now that no more errors can occur, change the protocol processing * port to the current thread (which is the correct thread). * * Create TCP timer message now; we are on the tcpcb's owner * CPU/thread. */ tcp_create_timermsg(tp, &curthread->td_msgport); /* Compute window scaling to request. */ if (tp->request_r_scale < TCP_MIN_WINSHIFT) tp->request_r_scale = TCP_MIN_WINSHIFT; while (tp->request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.ssb_hiwat) { tp->request_r_scale++; } soisconnecting(so); tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; tcp_callout_reset(tp, tp->tt_keep, tcp_keepinit, tcp_timer_keep); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); if (m) { ssb_appendstream(&so->so_snd, m); *mp = NULL; if (flags & PRUS_OOB) tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; } /* * Close the send side of the connection after * the data is sent if flagged. */ if ((flags & (PRUS_OOB|PRUS_EOF)) == PRUS_EOF) { socantsendmore(so); tp = tcp_usrclosed(tp); } return (tcp_output(tp)); }
/* Caller should be in critical section */ static struct tcpcb * tcp_timer_keep_handler(struct tcpcb *tp) { struct tcptemp *t_template; #ifdef TCPDEBUG int ostate = tp->t_state; #endif /* * Keep-alive timer went off; send something * or drop connection if idle for too long. */ tcpstat.tcps_keeptimeo++; if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if ((always_keepalive || (tp->t_flags & TF_KEEPALIVE) || (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE)) && tp->t_state <= TCPS_CLOSING) { if ((ticks - tp->t_rcvtime) >= tp->t_keepidle + tp->t_maxidle) goto dropit; /* * Send a packet designed to force a response * if the peer is up and reachable: * either an ACK if the connection is still alive, * or an RST if the peer has closed the connection * due to timeout or reboot. * Using sequence number tp->snd_una-1 * causes the transmitted zero-length segment * to lie outside the receive window; * by the protocol spec, this requires the * correspondent TCP to respond. */ tcpstat.tcps_keepprobe++; t_template = tcp_maketemplate(tp); if (t_template) { tcp_respond(tp, t_template->tt_ipgen, &t_template->tt_t, NULL, tp->rcv_nxt, tp->snd_una - 1, 0); tcp_freetemplate(t_template); } tcp_callout_reset(tp, tp->tt_keep, tp->t_keepintvl, tcp_timer_keep); } else { tcp_callout_reset(tp, tp->tt_keep, tp->t_keepidle, tcp_timer_keep); } #ifdef TCPDEBUG if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); #endif return tp; dropit: tcpstat.tcps_keepdrops++; tp = tcp_drop(tp, ETIMEDOUT); #ifdef TCPDEBUG if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); #endif return tp; }