/* * Tcp output routine: figure out what should be sent and send it. */ int tcp_output(struct tcpcb *tp) { struct inpcb * const inp = tp->t_inpcb; struct socket *so = inp->inp_socket; long len, recvwin, sendwin; int nsacked = 0; int off, flags, error; #ifdef TCP_SIGNATURE int sigoff = 0; #endif struct mbuf *m; struct ip *ip = NULL; struct ipovly *ipov = NULL; struct tcphdr *th; u_char opt[TCP_MAXOLEN]; unsigned int ipoptlen, optlen, hdrlen; int idle; boolean_t sendalot; struct ip6_hdr *ip6 = NULL; #ifdef INET6 const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0; #else const boolean_t isipv6 = FALSE; #endif KKASSERT(so->so_port == &curthread->td_msgport); /* * Determine length of data that should be transmitted, * and flags that will be used. * If there is some data or critical controls (SYN, RST) * to send, then transmit; otherwise, investigate further. */ /* * If we have been idle for a while, the send congestion window * could be no longer representative of the current state of the link. * So unless we are expecting more acks to come in, slow-start from * scratch to re-determine the send congestion window. */ if (tp->snd_max == tp->snd_una && (ticks - tp->t_rcvtime) >= tp->t_rxtcur) { if (tcp_do_rfc3390) { int initial_cwnd = min(4 * tp->t_maxseg, max(2 * tp->t_maxseg, 4380)); tp->snd_cwnd = min(tp->snd_cwnd, initial_cwnd); } else { tp->snd_cwnd = tp->t_maxseg; } tp->snd_wacked = 0; } /* * Calculate whether the transmit stream was previously idle * and adjust TF_LASTIDLE for the next time. */ idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); if (idle && (tp->t_flags & TF_MORETOCOME)) tp->t_flags |= TF_LASTIDLE; else tp->t_flags &= ~TF_LASTIDLE; if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && !IN_FASTRECOVERY(tp)) nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt); again: /* Make use of SACK information when slow-starting after a RTO. */ if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && !IN_FASTRECOVERY(tp)) { tcp_seq old_snd_nxt = tp->snd_nxt; tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt); nsacked += tp->snd_nxt - old_snd_nxt; } sendalot = FALSE; off = tp->snd_nxt - tp->snd_una; sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked); sendwin = min(sendwin, tp->snd_bwnd); flags = tcp_outflags[tp->t_state]; /* * Get standard flags, and add SYN or FIN if requested by 'hidden' * state flags. */ if (tp->t_flags & TF_NEEDFIN) flags |= TH_FIN; if (tp->t_flags & TF_NEEDSYN) flags |= TH_SYN; /* * If in persist timeout with window of 0, send 1 byte. * Otherwise, if window is small but nonzero * and timer expired, we will send what we can * and go to transmit state. */ if (tp->t_flags & TF_FORCE) { if (sendwin == 0) { /* * If we still have some data to send, then * clear the FIN bit. Usually this would * happen below when it realizes that we * aren't sending all the data. However, * if we have exactly 1 byte of unsent data, * then it won't clear the FIN bit below, * and if we are in persist state, we wind * up sending the packet without recording * that we sent the FIN bit. * * We can't just blindly clear the FIN bit, * because if we don't have any more data * to send then the probe will be the FIN * itself. */ if (off < so->so_snd.ssb_cc) flags &= ~TH_FIN; sendwin = 1; } else { tcp_callout_stop(tp, tp->tt_persist); tp->t_rxtshift = 0; } } /* * If snd_nxt == snd_max and we have transmitted a FIN, the * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in * a negative length. This can also occur when TCP opens up * its congestion window while receiving additional duplicate * acks after fast-retransmit because TCP will reset snd_nxt * to snd_max after the fast-retransmit. * * In the normal retransmit-FIN-only case, however, snd_nxt will * be set to snd_una, the offset will be 0, and the length may * wind up 0. */ len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off; /* * Lop off SYN bit if it has already been sent. However, if this * is SYN-SENT state and if segment contains data, suppress sending * segment (sending the segment would be an option if we still * did TAO and the remote host supported it). */ if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { flags &= ~TH_SYN; off--, len++; if (len > 0 && tp->t_state == TCPS_SYN_SENT) return 0; } /* * Be careful not to send data and/or FIN on SYN segments. * This measure is needed to prevent interoperability problems * with not fully conformant TCP implementations. */ if (flags & TH_SYN) { len = 0; flags &= ~TH_FIN; } if (len < 0) { /* * If FIN has been sent but not acked, * but we haven't been called to retransmit, * len will be < 0. Otherwise, window shrank * after we sent into it. If window shrank to 0, * cancel pending retransmit, pull snd_nxt back * to (closed) window, and set the persist timer * if it isn't already going. If the window didn't * close completely, just wait for an ACK. */ len = 0; if (sendwin == 0) { tcp_callout_stop(tp, tp->tt_rexmt); tp->t_rxtshift = 0; tp->snd_nxt = tp->snd_una; if (!tcp_callout_active(tp, tp->tt_persist)) tcp_setpersist(tp); } } KASSERT(len >= 0, ("%s: len < 0", __func__)); /* * Automatic sizing of send socket buffer. Often the send buffer * size is not optimally adjusted to the actual network conditions * at hand (delay bandwidth product). Setting the buffer size too * small limits throughput on links with high bandwidth and high * delay (eg. trans-continental/oceanic links). Setting the * buffer size too big consumes too much real kernel memory, * especially with many connections on busy servers. * * The criteria to step up the send buffer one notch are: * 1. receive window of remote host is larger than send buffer * (with a fudge factor of 5/4th); * 2. send buffer is filled to 7/8th with data (so we actually * have data to make use of it); * 3. send buffer fill has not hit maximal automatic size; * 4. our send window (slow start and cogestion controlled) is * larger than sent but unacknowledged data in send buffer. * * The remote host receive window scaling factor may limit the * growing of the send buffer before it reaches its allowed * maximum. * * It scales directly with slow start or congestion window * and does at most one step per received ACK. This fast * scaling has the drawback of growing the send buffer beyond * what is strictly necessary to make full use of a given * delay*bandwith product. However testing has shown this not * to be much of an problem. At worst we are trading wasting * of available bandwith (the non-use of it) for wasting some * socket buffer memory. * * TODO: Shrink send buffer during idle periods together * with congestion window. Requires another timer. Has to * wait for upcoming tcp timer rewrite. */ if (tcp_do_autosndbuf && so->so_snd.ssb_flags & SSB_AUTOSIZE) { if ((tp->snd_wnd / 4 * 5) >= so->so_snd.ssb_hiwat && so->so_snd.ssb_cc >= (so->so_snd.ssb_hiwat / 8 * 7) && so->so_snd.ssb_cc < tcp_autosndbuf_max && sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) { u_long newsize; newsize = ulmin(so->so_snd.ssb_hiwat + tcp_autosndbuf_inc, tcp_autosndbuf_max); if (!ssb_reserve(&so->so_snd, newsize, so, NULL)) atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); if (newsize >= (TCP_MAXWIN << tp->snd_scale)) atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); } } /* * Truncate to the maximum segment length and ensure that FIN is * removed if the length no longer contains the last data byte. */ if (len > tp->t_maxseg) { len = tp->t_maxseg; sendalot = TRUE; } if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc)) flags &= ~TH_FIN; recvwin = ssb_space(&so->so_rcv); /* * Sender silly window avoidance. We transmit under the following * conditions when len is non-zero: * * - We have a full segment * - This is the last buffer in a write()/send() and we are * either idle or running NODELAY * - we've timed out (e.g. persist timer) * - we have more then 1/2 the maximum send window's worth of * data (receiver may be limiting the window size) * - we need to retransmit */ if (len) { if (len == tp->t_maxseg) goto send; /* * NOTE! on localhost connections an 'ack' from the remote * end may occur synchronously with the output and cause * us to flush a buffer queued with moretocome. XXX * * note: the len + off check is almost certainly unnecessary. */ if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ (idle || (tp->t_flags & TF_NODELAY)) && len + off >= so->so_snd.ssb_cc && !(tp->t_flags & TF_NOPUSH)) { goto send; } if (tp->t_flags & TF_FORCE) /* typ. timeout case */ goto send; if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) goto send; if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ goto send; } /* * Compare available window to amount of window * known to peer (as advertised window less * next expected input). If the difference is at least two * max size segments, or at least 50% of the maximum possible * window, then want to send a window update to peer. */ if (recvwin > 0) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by * TCP_MAXWIN << tp->rcv_scale. */ long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) - (tp->rcv_adv - tp->rcv_nxt); long hiwat; /* * This ack case typically occurs when the user has drained * the TCP socket buffer sufficiently to warrent an ack * containing a 'pure window update'... that is, an ack that * ONLY updates the tcp window. * * It is unclear why we would need to do a pure window update * past 2 segments if we are going to do one at 1/2 the high * water mark anyway, especially since under normal conditions * the user program will drain the socket buffer quickly. * The 2-segment pure window update will often add a large * number of extra, unnecessary acks to the stream. * * avoid_pure_win_update now defaults to 1. */ if (avoid_pure_win_update == 0 || (tp->t_flags & TF_RXRESIZED)) { if (adv >= (long) (2 * tp->t_maxseg)) { goto send; } } hiwat = (long)(TCP_MAXWIN << tp->rcv_scale); if (hiwat > (long)so->so_rcv.ssb_hiwat) hiwat = (long)so->so_rcv.ssb_hiwat; if (adv >= hiwat / 2) goto send; } /* * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW * is also a catch-all for the retransmit timer timeout case. */ if (tp->t_flags & TF_ACKNOW) goto send; if ((flags & TH_RST) || ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN))) goto send; if (SEQ_GT(tp->snd_up, tp->snd_una)) goto send; /* * If our state indicates that FIN should be sent * and we have not yet done so, then we need to send. */ if (flags & TH_FIN && (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) goto send; /* * TCP window updates are not reliable, rather a polling protocol * using ``persist'' packets is used to insure receipt of window * updates. The three ``states'' for the output side are: * idle not doing retransmits or persists * persisting to move a small or zero window * (re)transmitting and thereby not persisting * * tcp_callout_active(tp, tp->tt_persist) * is true when we are in persist state. * The TF_FORCE flag in tp->t_flags * is set when we are called to send a persist packet. * tcp_callout_active(tp, tp->tt_rexmt) * is set when we are retransmitting * The output side is idle when both timers are zero. * * If send window is too small, there is data to transmit, and no * retransmit or persist is pending, then go to persist state. * If nothing happens soon, send when timer expires: * if window is nonzero, transmit what we can, * otherwise force out a byte. */ if (so->so_snd.ssb_cc > 0 && !tcp_callout_active(tp, tp->tt_rexmt) && !tcp_callout_active(tp, tp->tt_persist)) { tp->t_rxtshift = 0; tcp_setpersist(tp); } /* * No reason to send a segment, just return. */ return (0); send: /* * Before ESTABLISHED, force sending of initial options * unless TCP set not to do any options. * NOTE: we assume that the IP/TCP header plus TCP options * always fit in a single mbuf, leaving room for a maximum * link header, i.e. * max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES */ optlen = 0; if (isipv6) hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else hdrlen = sizeof(struct tcpiphdr); if (flags & TH_SYN) { tp->snd_nxt = tp->iss; if (!(tp->t_flags & TF_NOOPT)) { u_short mss; opt[0] = TCPOPT_MAXSEG; opt[1] = TCPOLEN_MAXSEG; mss = htons((u_short) tcp_mssopt(tp)); memcpy(opt + 2, &mss, sizeof mss); optlen = TCPOLEN_MAXSEG; if ((tp->t_flags & TF_REQ_SCALE) && (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_SCALE))) { *((u_int32_t *)(opt + optlen)) = htonl( TCPOPT_NOP << 24 | TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | tp->request_r_scale); optlen += 4; } if ((tcp_do_sack && !(flags & TH_ACK)) || tp->t_flags & TF_SACK_PERMITTED) { uint32_t *lp = (uint32_t *)(opt + optlen); *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); optlen += TCPOLEN_SACK_PERMITTED_ALIGNED; } } } /* * Send a timestamp and echo-reply if this is a SYN and our side * wants to use timestamps (TF_REQ_TSTMP is set) or both our side * and our peer have sent timestamps in our SYN's. */ if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && !(flags & TH_RST) && (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) { u_int32_t *lp = (u_int32_t *)(opt + optlen); /* Form timestamp option as shown in appendix A of RFC 1323. */ *lp++ = htonl(TCPOPT_TSTAMP_HDR); *lp++ = htonl(ticks); *lp = htonl(tp->ts_recent); optlen += TCPOLEN_TSTAMP_APPA; } /* Set receive buffer autosizing timestamp. */ if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) tp->rfbuf_ts = ticks; /* * If this is a SACK connection and we have a block to report, * fill in the SACK blocks in the TCP options. */ if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) == TF_SACK_PERMITTED && (!LIST_EMPTY(&tp->t_segq) || tp->reportblk.rblk_start != tp->reportblk.rblk_end)) tcp_sack_fill_report(tp, opt, &optlen); #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) { int i; u_char *bp; /* * Initialize TCP-MD5 option (RFC2385) */ bp = (u_char *)opt + optlen; *bp++ = TCPOPT_SIGNATURE; *bp++ = TCPOLEN_SIGNATURE; sigoff = optlen + 2; for (i = 0; i < TCP_SIGLEN; i++) *bp++ = 0; optlen += TCPOLEN_SIGNATURE; /* * Terminate options list and maintain 32-bit alignment. */ *bp++ = TCPOPT_NOP; *bp++ = TCPOPT_EOL; optlen += 2; } #endif /* TCP_SIGNATURE */ KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options")); hdrlen += optlen; if (isipv6) { ipoptlen = ip6_optlen(inp); } else { if (inp->inp_options) { ipoptlen = inp->inp_options->m_len - offsetof(struct ipoption, ipopt_list); } else {
/* * Tcp output routine: figure out what should be sent and send it. */ int tcp_output(struct tcpcb *tp) { struct inpcb * const inp = tp->t_inpcb; struct socket *so = inp->inp_socket; long len, recvwin, sendwin; int nsacked = 0; int off, flags, error = 0; #ifdef TCP_SIGNATURE int sigoff = 0; #endif struct mbuf *m; struct ip *ip; struct tcphdr *th; u_char opt[TCP_MAXOLEN]; unsigned int ipoptlen, optlen, hdrlen; int idle; boolean_t sendalot; struct ip6_hdr *ip6; #ifdef INET6 const boolean_t isipv6 = INP_ISIPV6(inp); #else const boolean_t isipv6 = FALSE; #endif boolean_t can_tso = FALSE, use_tso; boolean_t report_sack, idle_cwv = FALSE; u_int segsz, tso_hlen, tso_lenmax = 0; int segcnt = 0; boolean_t need_sched = FALSE; KKASSERT(so->so_port == &curthread->td_msgport); /* * Determine length of data that should be transmitted, * and flags that will be used. * If there is some data or critical controls (SYN, RST) * to send, then transmit; otherwise, investigate further. */ /* * If we have been idle for a while, the send congestion window * could be no longer representative of the current state of the * link; need to validate congestion window. However, we should * not perform congestion window validation here, since we could * be asked to send pure ACK. */ if (tp->snd_max == tp->snd_una && (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart) idle_cwv = TRUE; /* * Calculate whether the transmit stream was previously idle * and adjust TF_LASTIDLE for the next time. */ idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); if (idle && (tp->t_flags & TF_MORETOCOME)) tp->t_flags |= TF_LASTIDLE; else tp->t_flags &= ~TF_LASTIDLE; if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && !IN_FASTRECOVERY(tp)) nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt); /* * Find out whether TSO could be used or not * * For TSO capable devices, the following assumptions apply to * the processing of TCP flags: * - If FIN is set on the large TCP segment, the device must set * FIN on the last segment that it creates from the large TCP * segment. * - If PUSH is set on the large TCP segment, the device must set * PUSH on the last segment that it creates from the large TCP * segment. */ #if !defined(IPSEC) && !defined(FAST_IPSEC) if (tcp_do_tso #ifdef TCP_SIGNATURE && (tp->t_flags & TF_SIGNATURE) == 0 #endif ) { if (!isipv6) { struct rtentry *rt = inp->inp_route.ro_rt; if (rt != NULL && (rt->rt_flags & RTF_UP) && (rt->rt_ifp->if_hwassist & CSUM_TSO)) { can_tso = TRUE; tso_lenmax = rt->rt_ifp->if_tsolen; } } } #endif /* !IPSEC && !FAST_IPSEC */ again: m = NULL; ip = NULL; th = NULL; ip6 = NULL; if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) == TF_SACK_PERMITTED && (!TAILQ_EMPTY(&tp->t_segq) || tp->reportblk.rblk_start != tp->reportblk.rblk_end)) report_sack = TRUE; else report_sack = FALSE; /* Make use of SACK information when slow-starting after a RTO. */ if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max && !IN_FASTRECOVERY(tp)) { tcp_seq old_snd_nxt = tp->snd_nxt; tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt); nsacked += tp->snd_nxt - old_snd_nxt; } sendalot = FALSE; off = tp->snd_nxt - tp->snd_una; sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked); sendwin = min(sendwin, tp->snd_bwnd); flags = tcp_outflags[tp->t_state]; /* * Get standard flags, and add SYN or FIN if requested by 'hidden' * state flags. */ if (tp->t_flags & TF_NEEDFIN) flags |= TH_FIN; if (tp->t_flags & TF_NEEDSYN) flags |= TH_SYN; /* * If in persist timeout with window of 0, send 1 byte. * Otherwise, if window is small but nonzero * and timer expired, we will send what we can * and go to transmit state. */ if (tp->t_flags & TF_FORCE) { if (sendwin == 0) { /* * If we still have some data to send, then * clear the FIN bit. Usually this would * happen below when it realizes that we * aren't sending all the data. However, * if we have exactly 1 byte of unsent data, * then it won't clear the FIN bit below, * and if we are in persist state, we wind * up sending the packet without recording * that we sent the FIN bit. * * We can't just blindly clear the FIN bit, * because if we don't have any more data * to send then the probe will be the FIN * itself. */ if (off < so->so_snd.ssb_cc) flags &= ~TH_FIN; sendwin = 1; } else { tcp_callout_stop(tp, tp->tt_persist); tp->t_rxtshift = 0; } } /* * If snd_nxt == snd_max and we have transmitted a FIN, the * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in * a negative length. This can also occur when TCP opens up * its congestion window while receiving additional duplicate * acks after fast-retransmit because TCP will reset snd_nxt * to snd_max after the fast-retransmit. * * A negative length can also occur when we are in the * TCPS_SYN_RECEIVED state due to a simultanious connect where * our SYN has not been acked yet. * * In the normal retransmit-FIN-only case, however, snd_nxt will * be set to snd_una, the offset will be 0, and the length may * wind up 0. */ len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off; /* * Lop off SYN bit if it has already been sent. However, if this * is SYN-SENT state and if segment contains data, suppress sending * segment (sending the segment would be an option if we still * did TAO and the remote host supported it). */ if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { flags &= ~TH_SYN; off--, len++; if (len > 0 && tp->t_state == TCPS_SYN_SENT) { tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW); return 0; } } /* * Be careful not to send data and/or FIN on SYN segments. * This measure is needed to prevent interoperability problems * with not fully conformant TCP implementations. */ if (flags & TH_SYN) { len = 0; flags &= ~TH_FIN; } if (len < 0) { /* * A negative len can occur if our FIN has been sent but not * acked, or if we are in a simultanious connect in the * TCPS_SYN_RECEIVED state with our SYN sent but not yet * acked. * * If our window has contracted to 0 in the FIN case * (which can only occur if we have NOT been called to * retransmit as per code a few paragraphs up) then we * want to shift the retransmit timer over to the * persist timer. * * However, if we are in the TCPS_SYN_RECEIVED state * (the SYN case) we will be in a simultanious connect and * the window may be zero degeneratively. In this case we * do not want to shift to the persist timer after the SYN * or the SYN+ACK transmission. */ len = 0; if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) { tcp_callout_stop(tp, tp->tt_rexmt); tp->t_rxtshift = 0; tp->snd_nxt = tp->snd_una; if (!tcp_callout_active(tp, tp->tt_persist)) tcp_setpersist(tp); } } KASSERT(len >= 0, ("%s: len < 0", __func__)); /* * Automatic sizing of send socket buffer. Often the send buffer * size is not optimally adjusted to the actual network conditions * at hand (delay bandwidth product). Setting the buffer size too * small limits throughput on links with high bandwidth and high * delay (eg. trans-continental/oceanic links). Setting the * buffer size too big consumes too much real kernel memory, * especially with many connections on busy servers. * * The criteria to step up the send buffer one notch are: * 1. receive window of remote host is larger than send buffer * (with a fudge factor of 5/4th); * 2. hiwat has not significantly exceeded bwnd (inflight) * (bwnd is a maximal value if inflight is disabled). * 3. send buffer is filled to 7/8th with data (so we actually * have data to make use of it); * 4. hiwat has not hit maximal automatic size; * 5. our send window (slow start and cogestion controlled) is * larger than sent but unacknowledged data in send buffer. * * The remote host receive window scaling factor may limit the * growing of the send buffer before it reaches its allowed * maximum. * * It scales directly with slow start or congestion window * and does at most one step per received ACK. This fast * scaling has the drawback of growing the send buffer beyond * what is strictly necessary to make full use of a given * delay*bandwith product. However testing has shown this not * to be much of an problem. At worst we are trading wasting * of available bandwith (the non-use of it) for wasting some * socket buffer memory. * * The criteria for shrinking the buffer is based solely on * the inflight code (snd_bwnd). If inflight is disabled, * the buffer will not be shrinked. Note that snd_bwnd already * has a fudge factor. Our test adds a little hysteresis. */ if (tcp_do_autosndbuf && (so->so_snd.ssb_flags & SSB_AUTOSIZE)) { const int asbinc = tcp_autosndbuf_inc; const int hiwat = so->so_snd.ssb_hiwat; const int lowat = so->so_snd.ssb_lowat; u_long newsize; if ((tp->snd_wnd / 4 * 5) >= hiwat && so->so_snd.ssb_cc >= (hiwat / 8 * 7) && hiwat < tp->snd_bwnd + hiwat / 10 && hiwat + asbinc < tcp_autosndbuf_max && hiwat < (TCP_MAXWIN << tp->snd_scale) && sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) { newsize = ulmin(hiwat + asbinc, tcp_autosndbuf_max); if (!ssb_reserve(&so->so_snd, newsize, so, NULL)) atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); #if 0 if (newsize >= (TCP_MAXWIN << tp->snd_scale)) atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); #endif } else if ((long)tp->snd_bwnd < (long)(hiwat * 3 / 4 - lowat - asbinc) && hiwat > tp->t_maxseg * 2 + asbinc && hiwat + asbinc >= tcp_autosndbuf_min && tcp_do_autosndbuf == 1) { newsize = ulmax(hiwat - asbinc, tp->t_maxseg * 2); ssb_reserve(&so->so_snd, newsize, so, NULL); } } /* * Don't use TSO, if: * - Congestion window needs validation * - There are SACK blocks to report * - RST or SYN flags is set * - URG will be set * * XXX * Checking for SYN|RST looks overkill, just to be safe than sorry */ use_tso = can_tso; if (report_sack || idle_cwv || (flags & (TH_RST | TH_SYN))) use_tso = FALSE; if (use_tso) { tcp_seq ugr_nxt = tp->snd_nxt; if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) && tp->snd_nxt == tp->snd_max) --ugr_nxt; if (SEQ_GT(tp->snd_up, ugr_nxt)) use_tso = FALSE; } if (use_tso) { /* * Find out segment size and header length for TSO */ error = tcp_tso_getsize(tp, &segsz, &tso_hlen); if (error) use_tso = FALSE; } if (!use_tso) { segsz = tp->t_maxseg; tso_hlen = 0; /* not used */ } /* * Truncate to the maximum segment length if not TSO, and ensure that * FIN is removed if the length no longer contains the last data byte. */ if (len > segsz) { if (!use_tso) { len = segsz; ++segcnt; } else { int nsegs; if (__predict_false(tso_lenmax < segsz)) tso_lenmax = segsz << 1; /* * Truncate TSO transfers to (IP_MAXPACKET - iphlen - * thoff), and make sure that we send equal size * transfers down the stack (rather than big-small- * big-small-...). */ len = min(len, tso_lenmax); nsegs = min(len, (IP_MAXPACKET - tso_hlen)) / segsz; KKASSERT(nsegs > 0); len = nsegs * segsz; if (len <= segsz) { use_tso = FALSE; ++segcnt; } else { segcnt += nsegs; } } sendalot = TRUE; } else { use_tso = FALSE; if (len > 0) ++segcnt; } if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc)) flags &= ~TH_FIN; recvwin = ssb_space(&so->so_rcv); /* * Sender silly window avoidance. We transmit under the following * conditions when len is non-zero: * * - We have a full segment * - This is the last buffer in a write()/send() and we are * either idle or running NODELAY * - we've timed out (e.g. persist timer) * - we have more then 1/2 the maximum send window's worth of * data (receiver may be limiting the window size) * - we need to retransmit */ if (len) { if (len >= segsz) goto send; /* * NOTE! on localhost connections an 'ack' from the remote * end may occur synchronously with the output and cause * us to flush a buffer queued with moretocome. XXX * * note: the len + off check is almost certainly unnecessary. */ if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ (idle || (tp->t_flags & TF_NODELAY)) && len + off >= so->so_snd.ssb_cc && !(tp->t_flags & TF_NOPUSH)) { goto send; } if (tp->t_flags & TF_FORCE) /* typ. timeout case */ goto send; if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) goto send; if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */ goto send; if (tp->t_flags & TF_XMITNOW) goto send; } /* * Compare available window to amount of window * known to peer (as advertised window less * next expected input). If the difference is at least two * max size segments, or at least 50% of the maximum possible * window, then want to send a window update to peer. */ if (recvwin > 0) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by * TCP_MAXWIN << tp->rcv_scale. */ long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) - (tp->rcv_adv - tp->rcv_nxt); long hiwat; /* * This ack case typically occurs when the user has drained * the TCP socket buffer sufficiently to warrent an ack * containing a 'pure window update'... that is, an ack that * ONLY updates the tcp window. * * It is unclear why we would need to do a pure window update * past 2 segments if we are going to do one at 1/2 the high * water mark anyway, especially since under normal conditions * the user program will drain the socket buffer quickly. * The 2-segment pure window update will often add a large * number of extra, unnecessary acks to the stream. * * avoid_pure_win_update now defaults to 1. */ if (avoid_pure_win_update == 0 || (tp->t_flags & TF_RXRESIZED)) { if (adv >= (long) (2 * segsz)) { goto send; } } hiwat = (long)(TCP_MAXWIN << tp->rcv_scale); if (hiwat > (long)so->so_rcv.ssb_hiwat) hiwat = (long)so->so_rcv.ssb_hiwat; if (adv >= hiwat / 2) goto send; } /* * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW * is also a catch-all for the retransmit timer timeout case. */ if (tp->t_flags & TF_ACKNOW) goto send; if ((flags & TH_RST) || ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN))) goto send; if (SEQ_GT(tp->snd_up, tp->snd_una)) goto send; /* * If our state indicates that FIN should be sent * and we have not yet done so, then we need to send. */ if ((flags & TH_FIN) && (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) goto send; /* * TCP window updates are not reliable, rather a polling protocol * using ``persist'' packets is used to insure receipt of window * updates. The three ``states'' for the output side are: * idle not doing retransmits or persists * persisting to move a small or zero window * (re)transmitting and thereby not persisting * * tcp_callout_active(tp, tp->tt_persist) * is true when we are in persist state. * The TF_FORCE flag in tp->t_flags * is set when we are called to send a persist packet. * tcp_callout_active(tp, tp->tt_rexmt) * is set when we are retransmitting * The output side is idle when both timers are zero. * * If send window is too small, there is data to transmit, and no * retransmit or persist is pending, then go to persist state. * * If nothing happens soon, send when timer expires: * if window is nonzero, transmit what we can, otherwise force out * a byte. * * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED * with data pending. This situation can occur during a * simultanious connect. */ if (so->so_snd.ssb_cc > 0 && tp->t_state != TCPS_SYN_RECEIVED && !tcp_callout_active(tp, tp->tt_rexmt) && !tcp_callout_active(tp, tp->tt_persist)) { tp->t_rxtshift = 0; tcp_setpersist(tp); } /* * No reason to send a segment, just return. */ tp->t_flags &= ~TF_XMITNOW; return (0); send: if (need_sched && len > 0) { tcp_output_sched(tp); return 0; } /* * Before ESTABLISHED, force sending of initial options * unless TCP set not to do any options. * NOTE: we assume that the IP/TCP header plus TCP options * always fit in a single mbuf, leaving room for a maximum * link header, i.e. * max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES */ optlen = 0; if (isipv6) hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); else hdrlen = sizeof(struct tcpiphdr); if (flags & TH_SYN) { tp->snd_nxt = tp->iss; if (!(tp->t_flags & TF_NOOPT)) { u_short mss; opt[0] = TCPOPT_MAXSEG; opt[1] = TCPOLEN_MAXSEG; mss = htons((u_short) tcp_mssopt(tp)); memcpy(opt + 2, &mss, sizeof mss); optlen = TCPOLEN_MAXSEG; if ((tp->t_flags & TF_REQ_SCALE) && (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_SCALE))) { *((u_int32_t *)(opt + optlen)) = htonl( TCPOPT_NOP << 24 | TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | tp->request_r_scale); optlen += 4; } if ((tcp_do_sack && !(flags & TH_ACK)) || tp->t_flags & TF_SACK_PERMITTED) { uint32_t *lp = (uint32_t *)(opt + optlen); *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED); optlen += TCPOLEN_SACK_PERMITTED_ALIGNED; } } } /* * Send a timestamp and echo-reply if this is a SYN and our side * wants to use timestamps (TF_REQ_TSTMP is set) or both our side * and our peer have sent timestamps in our SYN's. */ if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && !(flags & TH_RST) && (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) { u_int32_t *lp = (u_int32_t *)(opt + optlen); /* Form timestamp option as shown in appendix A of RFC 1323. */ *lp++ = htonl(TCPOPT_TSTAMP_HDR); *lp++ = htonl(ticks); *lp = htonl(tp->ts_recent); optlen += TCPOLEN_TSTAMP_APPA; } /* Set receive buffer autosizing timestamp. */ if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE)) tp->rfbuf_ts = ticks; /* * If this is a SACK connection and we have a block to report, * fill in the SACK blocks in the TCP options. */ if (report_sack) tcp_sack_fill_report(tp, opt, &optlen); #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) { int i; u_char *bp; /* * Initialize TCP-MD5 option (RFC2385) */ bp = (u_char *)opt + optlen; *bp++ = TCPOPT_SIGNATURE; *bp++ = TCPOLEN_SIGNATURE; sigoff = optlen + 2; for (i = 0; i < TCP_SIGLEN; i++) *bp++ = 0; optlen += TCPOLEN_SIGNATURE; /* * Terminate options list and maintain 32-bit alignment. */ *bp++ = TCPOPT_NOP; *bp++ = TCPOPT_EOL; optlen += 2; } #endif /* TCP_SIGNATURE */ KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options")); hdrlen += optlen; if (isipv6) { ipoptlen = ip6_optlen(inp); } else { if (inp->inp_options) { ipoptlen = inp->inp_options->m_len - offsetof(struct ipoption, ipopt_list); } else {
/* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. */ static void tcp_usr_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; int flags = msg->send.nm_flags; struct mbuf *m = msg->send.nm_m; struct sockaddr *nam = msg->send.nm_addr; struct mbuf *control = msg->send.nm_control; struct thread *td = msg->send.nm_td; int error = 0; struct inpcb *inp; struct tcpcb *tp; #ifdef INET6 int isipv6; #endif TCPDEBUG0; inp = so->so_pcb; if (inp == NULL) { /* * OOPS! we lost a race, the TCP session got reset after * we checked SS_CANTSENDMORE, eg: while doing uiomove or a * network interrupt in the non-critical section of sosend(). */ m_freem(m); if (control) m_freem(control); error = ECONNRESET; /* XXX EPIPE? */ tp = NULL; TCPDEBUG1(); goto out; } #ifdef INET6 isipv6 = nam && nam->sa_family == AF_INET6; #endif /* INET6 */ tp = intotcpcb(inp); TCPDEBUG1(); if (control) { /* TCP doesn't do control messages (rights, creds, etc) */ if (control->m_len) { m_freem(control); m_freem(m); error = EINVAL; goto out; } m_freem(control); /* empty control, just free it */ } /* * Don't let too much OOB data build up */ if (flags & PRUS_OOB) { if (ssb_space(&so->so_snd) < -512) { m_freem(m); error = ENOBUFS; goto out; } } /* * Do implied connect if not yet connected. Any data sent * with the connect is handled by tcp_connect() and friends. * * NOTE! PROTOCOL THREAD MAY BE CHANGED BY THE CONNECT! */ if (nam && tp->t_state < TCPS_SYN_SENT) { kprintf("implied fallback\n"); msg->connect.nm_nam = nam; msg->connect.nm_td = td; msg->connect.nm_m = m; msg->connect.nm_flags = flags; msg->connect.nm_reconnect = NMSG_RECONNECT_FALLBACK; #ifdef INET6 if (isipv6) tcp6_connect(msg); else #endif /* INET6 */ tcp_connect(msg); /* msg invalid now */ return; } /* * Pump the data into the socket. */ if (m) ssb_appendstream(&so->so_snd, m); if (flags & PRUS_OOB) { /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; tp->t_flags |= TF_FORCE; error = tcp_output(tp); tp->t_flags &= ~TF_FORCE; } else { if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); tp = tcp_usrclosed(tp); } if (tp != NULL) { if (flags & PRUS_MORETOCOME) tp->t_flags |= TF_MORETOCOME; error = tcp_output(tp); if (flags & PRUS_MORETOCOME) tp->t_flags &= ~TF_MORETOCOME; } } COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB : ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); }
/* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. */ static void tcp_usr_send(netmsg_t msg) { struct socket *so = msg->send.base.nm_so; int flags = msg->send.nm_flags; struct mbuf *m = msg->send.nm_m; int error = 0; struct inpcb *inp; struct tcpcb *tp; TCPDEBUG0; KKASSERT(msg->send.nm_control == NULL); KKASSERT(msg->send.nm_addr == NULL); KKASSERT((flags & PRUS_FREEADDR) == 0); inp = so->so_pcb; if (inp == NULL) { /* * OOPS! we lost a race, the TCP session got reset after * we checked SS_CANTSENDMORE, eg: while doing uiomove or a * network interrupt in the non-critical section of sosend(). */ m_freem(m); error = ECONNRESET; /* XXX EPIPE? */ tp = NULL; TCPDEBUG1(); goto out; } tp = intotcpcb(inp); TCPDEBUG1(); #ifdef foo /* * This is no longer necessary, since: * - sosendtcp() has already checked it for us * - It does not work with asynchronized send */ /* * Don't let too much OOB data build up */ if (flags & PRUS_OOB) { if (ssb_space(&so->so_snd) < -512) { m_freem(m); error = ENOBUFS; goto out; } } #endif /* * Pump the data into the socket. */ if (m) { ssb_appendstream(&so->so_snd, m); sowwakeup(so); } if (flags & PRUS_OOB) { /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ tp->snd_up = tp->snd_una + so->so_snd.ssb_cc; tp->t_flags |= TF_FORCE; error = tcp_output(tp); tp->t_flags &= ~TF_FORCE; } else { if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); tp = tcp_usrclosed(tp); } if (tp != NULL && !tcp_output_pending(tp)) { if (flags & PRUS_MORETOCOME) tp->t_flags |= TF_MORETOCOME; error = tcp_output_fair(tp); if (flags & PRUS_MORETOCOME) tp->t_flags &= ~TF_MORETOCOME; } } COMMON_END1((flags & PRUS_OOB) ? PRU_SENDOOB : ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND), (flags & PRUS_NOREPLY)); }
/* * A specialization of sosend() for UDP based on protocol-specific knowledge: * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that * sosendallatonce() returns true, * the "atomic" variable is true, * and sosendudp() blocks until space is available for the entire send. * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or * PR_IMPLOPCL flags set. * UDP has no out-of-band data. * UDP has no control data. * UDP does not support MSG_EOR. */ int sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { boolean_t dontroute; /* temporary SO_DONTROUTE setting */ size_t resid; int error; int space; if (td->td_lwp != NULL) td->td_lwp->lwp_ru.ru_msgsnd++; if (control) m_freem(control); KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; restart: error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; if (so->so_state & SS_CANTSENDMORE) gotoerr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; goto release; } if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) gotoerr(EDESTADDRREQ); if (resid > so->so_snd.ssb_hiwat) gotoerr(EMSGSIZE); space = ssb_space(&so->so_snd); if (uio && (space < 0 || (size_t)space < resid)) { if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) gotoerr(EWOULDBLOCK); ssb_unlock(&so->so_snd); error = ssb_wait(&so->so_snd); if (error) goto out; goto restart; } if (uio) { top = m_uiomove(uio); if (top == NULL) goto release; } dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE); if (dontroute) so->so_options |= SO_DONTROUTE; error = so_pru_send(so, 0, top, addr, NULL, td); top = NULL; /* sent or freed in lower layer */ if (dontroute) so->so_options &= ~SO_DONTROUTE; release: ssb_unlock(&so->so_snd); out: if (top) m_freem(top); return (error); }
/* * Send on a socket. * If send must go all at once and message is larger than * send buffering, then hard error. * Lock against other senders. * If must go all at once and not enough room now, then * inform user that this would block and do nothing. * Otherwise, if nonblocking, send as much as possible. * The data to be sent is described by "uio" if nonzero, * otherwise by the mbuf chain "top" (which must be null * if uio is not). Data provided in mbuf chain must be small * enough to send all at once. * * Returns nonzero on error, timeout or signal; callers * must check for short counts if EINTR/ERESTART are returned. * Data and control buffers are freed on return. */ int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { struct mbuf **mp; struct mbuf *m; size_t resid; int space, len; int clen = 0, error, dontroute, mlen; int atomic = sosendallatonce(so) || top; int pru_flags; if (uio) { resid = uio->uio_resid; } else { resid = (size_t)top->m_pkthdr.len; #ifdef INVARIANTS len = 0; for (m = top; m; m = m->m_next) len += m->m_len; KKASSERT(top->m_pkthdr.len == len); #endif } /* * WARNING! resid is unsigned, space and len are signed. space * can wind up negative if the sockbuf is overcommitted. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && (so->so_proto->pr_flags & PR_ATOMIC); if (td->td_lwp != NULL) td->td_lwp->lwp_ru.ru_msgsnd++; if (control) clen = control->m_len; #define gotoerr(errcode) { error = errcode; goto release; } restart: error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; do { if (so->so_state & SS_CANTSENDMORE) gotoerr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection- * based socket if it supports implied connect. * Return ENOTCONN if not connected and no address is * supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) gotoerr(ENOTCONN); } else if (addr == 0) gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? ENOTCONN : EDESTADDRREQ); } if ((atomic && resid > so->so_snd.ssb_hiwat) || clen > so->so_snd.ssb_hiwat) { gotoerr(EMSGSIZE); } space = ssb_space(&so->so_snd); if (flags & MSG_OOB) space += 1024; if ((space < 0 || (size_t)space < resid + clen) && uio && (atomic || space < so->so_snd.ssb_lowat || space < clen)) { if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) gotoerr(EWOULDBLOCK); ssb_unlock(&so->so_snd); error = ssb_wait(&so->so_snd); if (error) goto out; goto restart; } mp = ⊤ space -= clen; do { if (uio == NULL) { /* * Data is prepackaged in "top". */ resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else do { if (resid > INT_MAX) resid = INT_MAX; m = m_getl((int)resid, MB_WAIT, MT_DATA, top == NULL ? M_PKTHDR : 0, &mlen); if (top == NULL) { m->m_pkthdr.len = 0; m->m_pkthdr.rcvif = NULL; } len = imin((int)szmin(mlen, resid), space); if (resid < MINCLSIZE) { /* * For datagram protocols, leave room * for protocol headers in first mbuf. */ if (atomic && top == 0 && len < mlen) MH_ALIGN(m, len); } space -= len; error = uiomove(mtod(m, caddr_t), (size_t)len, uio); resid = uio->uio_resid; m->m_len = len; *mp = m; top->m_pkthdr.len += len; if (error) goto release; mp = &m->m_next; if (resid == 0) { if (flags & MSG_EOR) top->m_flags |= M_EOR; break; } } while (space > 0 && atomic); if (dontroute) so->so_options |= SO_DONTROUTE; if (flags & MSG_OOB) { pru_flags = PRUS_OOB; } else if ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid == 0)) { /* * If the user set MSG_EOF, the protocol * understands this flag and nothing left to * send then use PRU_SEND_EOF instead of PRU_SEND. */ pru_flags = PRUS_EOF; } else if (resid > 0 && space > 0) { /* If there is more to send, set PRUS_MORETOCOME */ pru_flags = PRUS_MORETOCOME; } else { pru_flags = 0; } /* * XXX all the SS_CANTSENDMORE checks previously * done could be out of date. We could have recieved * a reset packet in an interrupt or maybe we slept * while doing page faults in uiomove() etc. We could * probably recheck again inside the splnet() protection * here, but there are probably other places that this * also happens. We must rethink this. */ error = so_pru_send(so, pru_flags, top, addr, control, td); if (dontroute) so->so_options &= ~SO_DONTROUTE; clen = 0; control = 0; top = NULL; mp = ⊤ if (error) goto release; } while (resid && space > 0); } while (resid); release: ssb_unlock(&so->so_snd); out: if (top) m_freem(top); if (control) m_freem(control); return (error); }