void tcp_timer_rexmt(void * xtp) { struct tcpcb *tp = xtp; CURVNET_SET(tp->t_vnet); int rexmt; struct inpcb *inp; #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_rexmt) || !callout_active(&tp->t_timers->tt_rexmt)) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_rexmt); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); tcp_free_sackholes(tp); if (tp->t_fb->tfb_tcp_rexmit_tmr) { /* The stack has a timer action too. */ (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); } /* * Retransmission timer went off. Message has not * been acked within retransmit interval. Back off * to a longer retransmit interval and retransmit one segment. */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; TCPSTAT_INC(tcps_timeoutdrop); if (tcp_inpinfo_lock_add(inp)) { tcp_inpinfo_lock_del(inp, tp); goto out; } tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); tcp_inpinfo_lock_del(inp, tp); goto out; } if (tp->t_state == TCPS_SYN_SENT) { /* * If the SYN was retransmitted, indicate CWND to be * limited to 1 segment in cc_conn_init(). */ tp->snd_cwnd = 1; } else if (tp->t_rxtshift == 1) { /* * first retransmit; record ssthresh and cwnd so they can * be recovered if this turns out to be a "bad" retransmit. * A retransmit is considered "bad" if an ACK for this * segment is received within RTT/2 interval; the assumption * here is that the ACK was already in flight. See * "On Estimating End-to-End Network Path Properties" by * Allman and Paxson for more details. */ tp->snd_cwnd_prev = tp->snd_cwnd; tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->snd_recover_prev = tp->snd_recover; if (IN_FASTRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASFRECOVERY; else tp->t_flags &= ~TF_WASFRECOVERY; if (IN_CONGRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASCRECOVERY; else tp->t_flags &= ~TF_WASCRECOVERY; tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); tp->t_flags |= TF_PREVVALID; } else
void tcp_timer_2msl(void *xtp) { struct tcpcb *tp = xtp; struct inpcb *inp; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif inp = tp->t_inpcb; KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); tcp_free_sackholes(tp); if (callout_pending(&tp->t_timers->tt_2msl) || !callout_active(&tp->t_timers->tt_2msl)) { INP_WUNLOCK(tp->t_inpcb); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_2msl); if ((inp->inp_flags & INP_DROPPED) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long delete connection control block. Otherwise, check * again in a bit. * * If in TIME_WAIT state just ignore as this timeout is handled in * tcp_tw_2msl_scan(). * * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. * Ignore fact that there were recent incoming segments. */ if ((inp->inp_flags & INP_TIMEWAIT) != 0) { INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && tp->t_inpcb && tp->t_inpcb->inp_socket && (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { TCPSTAT_INC(tcps_finwait2_drops); if (tcp_inpinfo_lock_add(inp)) { tcp_inpinfo_lock_del(inp, tp); goto out; } tp = tcp_close(tp); tcp_inpinfo_lock_del(inp, tp); goto out; } else { if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) { callout_reset(&tp->t_timers->tt_2msl, TP_KEEPINTVL(tp), tcp_timer_2msl, tp); } else { if (tcp_inpinfo_lock_add(inp)) { tcp_inpinfo_lock_del(inp, tp); goto out; } tp = tcp_close(tp); tcp_inpinfo_lock_del(inp, tp); goto out; } } #ifdef TCPDEBUG if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); if (tp != NULL) INP_WUNLOCK(inp); out: CURVNET_RESTORE(); }
void tcp_timer_2msl(void *arg) { struct tcpcb *tp = arg; #ifdef TCP_DEBUG struct socket *so = NULL; short ostate; #endif mutex_enter(softnet_lock); if ((tp->t_flags & TF_DEAD) != 0) { mutex_exit(softnet_lock); return; } if (!callout_expired(&tp->t_timer[TCPT_2MSL])) { mutex_exit(softnet_lock); return; } /* * 2 MSL timeout went off, clear the SACK scoreboard, reset * the FACK estimate. */ KERNEL_LOCK(1, NULL); tcp_free_sackholes(tp); tp->snd_fack = tp->snd_una; #ifdef TCP_DEBUG #ifdef INET if (tp->t_inpcb) so = tp->t_inpcb->inp_socket; #endif #ifdef INET6 if (tp->t_in6pcb) so = tp->t_in6pcb->in6p_socket; #endif ostate = tp->t_state; #endif /* TCP_DEBUG */ /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long, or if 2MSL time is up from TIME_WAIT, delete connection * control block. Otherwise, check again in a bit. */ if (tp->t_state != TCPS_TIME_WAIT && ((tp->t_maxidle == 0) || ((tcp_now - tp->t_rcvtime) <= tp->t_maxidle))) TCP_TIMER_ARM(tp, TCPT_2MSL, tp->t_keepintvl); else tp = tcp_close(tp); #ifdef TCP_DEBUG if (tp && so->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, NULL, PRU_SLOWTIMO | (TCPT_2MSL << 8)); #endif mutex_exit(softnet_lock); KERNEL_UNLOCK_ONE(NULL); }
void tcp_timer_rexmt(void *arg) { struct tcpcb *tp = arg; uint32_t rto; #ifdef TCP_DEBUG struct socket *so = NULL; short ostate; #endif mutex_enter(softnet_lock); if ((tp->t_flags & TF_DEAD) != 0) { mutex_exit(softnet_lock); return; } if (!callout_expired(&tp->t_timer[TCPT_REXMT])) { mutex_exit(softnet_lock); return; } KERNEL_LOCK(1, NULL); if ((tp->t_flags & TF_PMTUD_PEND) && tp->t_inpcb && SEQ_GEQ(tp->t_pmtud_th_seq, tp->snd_una) && SEQ_LT(tp->t_pmtud_th_seq, (int)(tp->snd_una + tp->t_ourmss))) { extern struct sockaddr_in icmpsrc; struct icmp icmp; tp->t_flags &= ~TF_PMTUD_PEND; /* XXX create fake icmp message with relevant entries */ icmp.icmp_nextmtu = tp->t_pmtud_nextmtu; icmp.icmp_ip.ip_len = tp->t_pmtud_ip_len; icmp.icmp_ip.ip_hl = tp->t_pmtud_ip_hl; icmpsrc.sin_addr = tp->t_inpcb->inp_faddr; icmp_mtudisc(&icmp, icmpsrc.sin_addr); /* * Notify all connections to the same peer about * new mss and trigger retransmit. */ in_pcbnotifyall(&tcbtable, icmpsrc.sin_addr, EMSGSIZE, tcp_mtudisc); KERNEL_UNLOCK_ONE(NULL); mutex_exit(softnet_lock); return; } #ifdef TCP_DEBUG #ifdef INET if (tp->t_inpcb) so = tp->t_inpcb->inp_socket; #endif #ifdef INET6 if (tp->t_in6pcb) so = tp->t_in6pcb->in6p_socket; #endif ostate = tp->t_state; #endif /* TCP_DEBUG */ /* * Clear the SACK scoreboard, reset FACK estimate. */ tcp_free_sackholes(tp); tp->snd_fack = tp->snd_una; /* * Retransmission timer went off. Message has not * been acked within retransmit interval. Back off * to a longer retransmit interval and retransmit one segment. */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; TCP_STATINC(TCP_STAT_TIMEOUTDROP); tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); goto out; } TCP_STATINC(TCP_STAT_REXMTTIMEO); rto = TCP_REXMTVAL(tp); if (rto < tp->t_rttmin) rto = tp->t_rttmin; TCPT_RANGESET(tp->t_rxtcur, rto * tcp_backoff[tp->t_rxtshift], tp->t_rttmin, TCPTV_REXMTMAX); TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur); /* * If we are losing and we are trying path MTU discovery, * try turning it off. This will avoid black holes in * the network which suppress or fail to send "packet * too big" ICMP messages. We should ideally do * lots more sophisticated searching to find the right * value here... */ if (tp->t_mtudisc && tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) { TCP_STATINC(TCP_STAT_PMTUBLACKHOLE); #ifdef INET /* try turning PMTUD off */ if (tp->t_inpcb) tp->t_mtudisc = 0; #endif #ifdef INET6 /* try using IPv6 minimum MTU */ if (tp->t_in6pcb) tp->t_mtudisc = 0; #endif /* XXX: more sophisticated Black hole recovery code? */ } /* * If losing, let the lower level know and try for * a better route. Also, if we backed off this far, * our srtt estimate is probably bogus. Clobber it * so we'll take the next rtt measurement as our srtt; * move the current srtt into rttvar to keep the current * retransmit times until then. */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { #ifdef INET if (tp->t_inpcb) in_losing(tp->t_inpcb); #endif #ifdef INET6 if (tp->t_in6pcb) in6_losing(tp->t_in6pcb); #endif /* * This operation is not described in RFC2988. The * point is to keep srtt+4*rttvar constant, so we * should shift right 2 bits to divide by 4, and then * shift right one bit because the storage * representation of rttvar is 1/16s vs 1/32s for * srtt. */ tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); tp->t_srtt = 0; } tp->snd_nxt = tp->snd_una; tp->snd_high = tp->snd_max; /* * If timing a segment in this window, stop the timer. */ tp->t_rtttime = 0; /* * Remember if we are retransmitting a SYN, because if * we do, set the initial congestion window must be set * to 1 segment. */ if (tp->t_state == TCPS_SYN_SENT) tp->t_flags |= TF_SYN_REXMT; /* * Adjust congestion control parameters. */ tp->t_congctl->slow_retransmit(tp); (void) tcp_output(tp); out: #ifdef TCP_DEBUG if (tp && so->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, NULL, PRU_SLOWTIMO | (TCPT_REXMT << 8)); #endif KERNEL_UNLOCK_ONE(NULL); mutex_exit(softnet_lock); }
void tcp_timer_rexmt(void * xtp) { struct tcpcb *tp = xtp; CURVNET_SET(tp->t_vnet); int rexmt; int headlocked; struct inpcb *inp; #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif INP_INFO_RLOCK(&V_tcbinfo); inp = tp->t_inpcb; /* * XXXRW: While this assert is in fact correct, bugs in the tcpcb * tear-down mean we need it as a work-around for races between * timers and tcp_discardcb(). * * KASSERT(inp != NULL, ("tcp_timer_rexmt: inp == NULL")); */ if (inp == NULL) { tcp_timer_race++; INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } INP_WLOCK(inp); if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_rexmt) || !callout_active(&tp->t_timers->tt_rexmt)) { INP_WUNLOCK(inp); INP_INFO_RUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_rexmt); tcp_free_sackholes(tp); /* * Retransmission timer went off. Message has not * been acked within retransmit interval. Back off * to a longer retransmit interval and retransmit one segment. */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; TCPSTAT_INC(tcps_timeoutdrop); in_pcbref(inp); INP_INFO_RUNLOCK(&V_tcbinfo); INP_WUNLOCK(inp); INP_INFO_WLOCK(&V_tcbinfo); INP_WLOCK(inp); if (in_pcbrele_wlocked(inp)) { INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } if (inp->inp_flags & INP_DROPPED) { INP_WUNLOCK(inp); INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); headlocked = 1; goto out; } INP_INFO_RUNLOCK(&V_tcbinfo); headlocked = 0; if (tp->t_rxtshift == 1) { /* * first retransmit; record ssthresh and cwnd so they can * be recovered if this turns out to be a "bad" retransmit. * A retransmit is considered "bad" if an ACK for this * segment is received within RTT/2 interval; the assumption * here is that the ACK was already in flight. See * "On Estimating End-to-End Network Path Properties" by * Allman and Paxson for more details. */ tp->snd_cwnd_prev = tp->snd_cwnd; tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->snd_recover_prev = tp->snd_recover; if (IN_FASTRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASFRECOVERY; else tp->t_flags &= ~TF_WASFRECOVERY; if (IN_CONGRECOVERY(tp->t_flags)) tp->t_flags |= TF_WASCRECOVERY; else tp->t_flags &= ~TF_WASCRECOVERY; tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); tp->t_flags |= TF_PREVVALID; } else
void tcp_timer_2msl(void *xtp) { struct tcpcb *tp = xtp; struct inpcb *inp; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif /* * XXXRW: Does this actually happen? */ INP_INFO_WLOCK(&V_tcbinfo); inp = tp->t_inpcb; /* * XXXRW: While this assert is in fact correct, bugs in the tcpcb * tear-down mean we need it as a work-around for races between * timers and tcp_discardcb(). * * KASSERT(inp != NULL, ("tcp_timer_2msl: inp == NULL")); */ if (inp == NULL) { tcp_timer_race++; INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } INP_WLOCK(inp); tcp_free_sackholes(tp); if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_2msl) || !callout_active(&tp->t_timers->tt_2msl)) { INP_WUNLOCK(tp->t_inpcb); INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); return; } callout_deactivate(&tp->t_timers->tt_2msl); /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long, or if 2MSL time is up from TIME_WAIT, delete connection * control block. Otherwise, check again in a bit. * * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. * Ignore fact that there were recent incoming segments. */ if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && tp->t_inpcb && tp->t_inpcb->inp_socket && (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { TCPSTAT_INC(tcps_finwait2_drops); tp = tcp_close(tp); } else { if (tp->t_state != TCPS_TIME_WAIT && ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) callout_reset_on(&tp->t_timers->tt_2msl, TP_KEEPINTVL(tp), tcp_timer_2msl, tp, INP_CPU(inp)); else tp = tcp_close(tp); } #ifdef TCPDEBUG if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif if (tp != NULL) INP_WUNLOCK(inp); INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); }