Exemplo n.º 1
0
static void
tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th)
{
	uint32_t flight_size = 0;

	if (SEQ_LEQ(th->th_ack, tp->snd_max))
		flight_size = tp->snd_max - th->th_ack;
	/*
	 * Complete ack. The current window was inflated for fast recovery.
	 * It has to be deflated post recovery.
	 *
	 * Window inflation should have left us with approx snd_ssthresh 
	 * outstanding data. If the flight size is zero or one segment,
	 * make congestion window to be at least as big as 2 segments to
	 * avoid delayed acknowledgements. This is according to RFC 6582.
	 */
	if (flight_size < tp->snd_ssthresh)
		tp->snd_cwnd = max(flight_size, tp->t_maxseg) 
				+ tp->t_maxseg;
	else
		tp->snd_cwnd = tp->snd_ssthresh;
	tp->t_ccstate->cub_tcp_win = 0;
	tp->t_ccstate->cub_target_win = 0;
	tp->t_ccstate->cub_tcp_bytes_acked = 0;
}
Exemplo n.º 2
0
/**
 *  \brief Update stream with SACK records from a TCP packet.
 *
 *  \param stream The stream to update.
 *  \param p packet to get the SACK records from
 *
 *  \retval -1 error
 *  \retval 0 ok
 */
int StreamTcpSackUpdatePacket(TcpStream *stream, Packet *p) {
    int records = TCP_GET_SACK_CNT(p);
    int record = 0;

    TCPOptSackRecord *sack_rec = (TCPOptSackRecord *)(TCP_GET_SACK_PTR(p));

    for (record = 0; record < records; record++) {
        SCLogDebug("%p last_ack %u, left edge %u, right edge %u", sack_rec,
            stream->last_ack, ntohl(sack_rec->le), ntohl(sack_rec->re));

        if (SEQ_LEQ(ntohl(sack_rec->re), stream->last_ack)) {
            SCLogDebug("record before last_ack");
            goto next;
        }

        /** \todo need a metric to a check for a right edge limit */
/*
        if (SEQ_GT(ntohl(sack_rec->re), stream->next_seq)) {
            SCLogDebug("record beyond next_seq %u", stream->next_seq);
            goto next;
        }
*/
        if (SEQ_GEQ(ntohl(sack_rec->le), ntohl(sack_rec->re))) {
            SCLogDebug("invalid record: le >= re");
            goto next;
        }

        if (StreamTcpSackInsertRange(stream, ntohl(sack_rec->le),
                    ntohl(sack_rec->re)) == -1)
        {
            SCReturnInt(-1);
        }

    next:
        sack_rec++;
    }
#ifdef DEBUG
    StreamTcpSackPrintList(stream);
#endif
    SCReturnInt(0);
}
Exemplo n.º 3
0
/*
 * Ertt_packet_measurements uses a small amount of state kept on each packet
 * sent to match incoming acknowledgements. This enables more accurate and
 * secure round trip time measurements. The resulting measurement is used for
 * congestion control algorithms which require a more accurate time.
 * Ertt_packet_measurements is called via the helper hook in tcp_input.c
 */
static int
ertt_packet_measurement_hook(int hhook_type, int hhook_id, void *udata,
    void *ctx_data, void *hdata, struct osd *hosd)
{
	struct ertt *e_t;
	struct tcpcb *tp;
	struct tcphdr *th;
	struct tcpopt *to;
	struct tcp_hhook_data *thdp;
	struct txseginfo *txsi;
	int acked, measurenext_len, multiack, new_sacked_bytes, rtt_bytes_adjust;
	uint32_t measurenext, rts;
	tcp_seq ack;

	KASSERT(ctx_data != NULL, ("%s: ctx_data is NULL!", __func__));
	KASSERT(hdata != NULL, ("%s: hdata is NULL!", __func__));

	e_t = (struct ertt *)hdata;
	thdp = ctx_data;
	tp = thdp->tp;
	th = thdp->th;
	to = thdp->to;
	new_sacked_bytes = (tp->sackhint.last_sack_ack != 0);
	measurenext = measurenext_len = multiack = rts = rtt_bytes_adjust = 0;
	acked = th->th_ack - tp->snd_una;

	INP_WLOCK_ASSERT(tp->t_inpcb);

	/* Packet has provided new acknowledgements. */
	if (acked > 0 || new_sacked_bytes) {
		if (acked == 0 && new_sacked_bytes) {
			/* Use last sacked data. */
			ack = tp->sackhint.last_sack_ack;
		} else
			ack = th->th_ack;

		txsi = TAILQ_FIRST(&e_t->txsegi_q);
		while (txsi != NULL) {
			rts = 0;

			/* Acknowledgement is acking more than this txsi. */
			if (SEQ_GT(ack, txsi->seq + txsi->len)) {
				if (txsi->flags & TXSI_RTT_MEASURE_START ||
				    measurenext) {
					marked_packet_rtt(txsi, e_t, tp,
					    &measurenext, &measurenext_len,
					    &rtt_bytes_adjust, MULTI_ACK);
				}
				TAILQ_REMOVE(&e_t->txsegi_q, txsi, txsegi_lnk);
				uma_zfree(txseginfo_zone, txsi);
				txsi = TAILQ_FIRST(&e_t->txsegi_q);
				continue;
			}

			/*
			 * Guess if delayed acks are being used by the receiver.
			 *
			 * XXXDH: A simple heuristic that could be improved
			 */
			if (!new_sacked_bytes) {
				if (acked > tp->t_maxseg) {
					e_t->dlyack_rx +=
					    (e_t->dlyack_rx < DLYACK_SMOOTH) ?
					    1 : 0;
					multiack = 1;
				} else if (acked > txsi->len) {
					multiack = 1;
					e_t->dlyack_rx +=
					    (e_t->dlyack_rx < DLYACK_SMOOTH) ?
					    1 : 0;
				} else if (acked == tp->t_maxseg ||
					   acked == txsi->len) {
					e_t->dlyack_rx -=
					    (e_t->dlyack_rx > 0) ? 1 : 0;
				}
				/* Otherwise leave dlyack_rx the way it was. */
			}

			/*
			 * Time stamps are only to help match the txsi with the
			 * received acknowledgements.
			 */
			if (e_t->timestamp_errors < MAX_TS_ERR &&
			    (to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
				/*
				 * Note: All packets sent with the offload will
				 * have the same time stamp. If we are sending
				 * on a fast interface and the t_maxseg is much
				 * smaller than one tick, this will be fine. The
				 * time stamp would be the same whether we were
				 * using tso or not. However, if the interface
				 * is slow, this will cause problems with the
				 * calculations. If the interface is slow, there
				 * is not reason to be using tso, and it should
				 * be turned off.
				 */
				/*
				 * If there are too many time stamp errors, time
				 * stamps won't be trusted
				 */
				rts = to->to_tsecr;
				/* Before this packet. */
				if (!e_t->dlyack_rx && TSTMP_LT(rts, txsi->tx_ts))
					/* When delayed acking is used, the
					 * reflected time stamp is of the first
					 * packet and thus may be before
					 * txsi->tx_ts.
					 */
					break;
				if (TSTMP_GT(rts, txsi->tx_ts)) {
					/*
					 * If reflected time stamp is later than
					 * tx_tsi, then this txsi is old.
					 */
					if (txsi->flags & TXSI_RTT_MEASURE_START
					    || measurenext) {
						marked_packet_rtt(txsi, e_t, tp,
						    &measurenext, &measurenext_len,
						    &rtt_bytes_adjust, OLD_TXSI);
					}
					TAILQ_REMOVE(&e_t->txsegi_q, txsi,
					    txsegi_lnk);
					uma_zfree(txseginfo_zone, txsi);
					txsi = TAILQ_FIRST(&e_t->txsegi_q);
					continue;
				}
				if (rts == txsi->tx_ts &&
				    TSTMP_LT(to->to_tsval, txsi->rx_ts)) {
					/*
					 * Segment received before sent!
					 * Something is wrong with the received
					 * timestamps so increment errors. If
					 * this keeps up we will ignore
					 * timestamps.
					 */
					e_t->timestamp_errors++;
				}
			}
			/*
			 * Acknowledging a sequence number before this txsi.
			 * If it is an old txsi that may have had the same seq
			 * numbers, it should have been removed if time stamps
			 * are being used.
			 */
			if (SEQ_LEQ(ack, txsi->seq))
				break; /* Before first packet in txsi. */

			/*
			 * Only ack > txsi->seq and ack <= txsi->seq+txsi->len
			 * past this point.
			 *
			 * If delayed acks are being used, an acknowledgement
			 * for a single segment will have been delayed by the
			 * receiver and will yield an inaccurate measurement. In
			 * this case, we only make the measurement if more than
			 * one segment is being acknowledged or sack is
			 * currently being used.
			 */
			if (!e_t->dlyack_rx || multiack || new_sacked_bytes) {
				/* Make an accurate new measurement. */
				e_t->rtt = tcp_ts_getticks() - txsi->tx_ts + 1;

				if (e_t->rtt < e_t->minrtt || e_t->minrtt == 0)
					e_t->minrtt = e_t->rtt;

				if (e_t->rtt > e_t->maxrtt || e_t->maxrtt == 0)
					e_t->maxrtt = e_t->rtt;
			}

			if (txsi->flags & TXSI_RTT_MEASURE_START || measurenext)
				marked_packet_rtt(txsi, e_t, tp,
				    &measurenext, &measurenext_len,
				    &rtt_bytes_adjust, CORRECT_ACK);

			if (txsi->flags & TXSI_TSO) {
				if (txsi->len > acked) {
					txsi->len -= acked;
					/*
					 * This presumes ack for first bytes in
					 * txsi, this may not be true but it
					 * shouldn't cause problems for the
					 * timing.
					 *
					 * We remeasure RTT even though we only
					 * have a single txsi. The rationale
					 * behind this is that it is better to
					 * have a slightly inaccurate
					 * measurement than no additional
					 * measurement for the rest of the bulk
					 * transfer. Since TSO is only used on
					 * high speed interface cards, so the
					 * packets should be transmitted at line
					 * rate back to back with little
					 * difference in transmission times (in
					 * ticks).
					 */
					txsi->seq += acked;
					/*
					 * Reset txsi measure flag so we don't
					 * use it for another RTT measurement.
					 */
					txsi->flags &= ~TXSI_RTT_MEASURE_START;
					/*
					 * There is still more data to be acked
					 * from tso bulk transmission, so we
					 * won't remove it from the TAILQ yet.
					 */
					break;
				}
				txsi->len = 0;
			}

			TAILQ_REMOVE(&e_t->txsegi_q, txsi, txsegi_lnk);
			uma_zfree(txseginfo_zone, txsi);
			break;
		}

		if (measurenext) {
			/*
			 * We need to do a RTT measurement. It won't be the best
			 * if we do it here.
			 */
			marked_packet_rtt(txsi, e_t, tp,
			    &measurenext, &measurenext_len,
			    &rtt_bytes_adjust, FORCED_MEASUREMENT);
		}
	}

	return (0);
}
Exemplo n.º 4
0
/*
 * TCP input routine, follows pages 65-76 of the
 * protocol specification dated September, 1981 very closely.
 */
void
tcp_input(struct mbuf *m, int iphlen, struct socket *inso)
{
  	struct ip save_ip, *ip;
	register struct tcpiphdr *ti;
	caddr_t optp = NULL;
	int optlen = 0;
	int len, tlen, off;
        register struct tcpcb *tp = NULL;
	register int tiflags;
        struct socket *so = NULL;
	int todrop, acked, ourfinisacked, needoutput = 0;
	int iss = 0;
	u_long tiwin;
	int ret;
    struct ex_list *ex_ptr;
    Slirp *slirp;

	DEBUG_CALL("tcp_input");
	DEBUG_ARGS((dfd, " m = %8lx  iphlen = %2d  inso = %lx\n",
		    (long )m, iphlen, (long )inso ));

	/*
	 * If called with m == 0, then we're continuing the connect
	 */
	if (m == NULL) {
		so = inso;
		slirp = so->slirp;

		/* Re-set a few variables */
		tp = sototcpcb(so);
		m = so->so_m;
                so->so_m = NULL;
		ti = so->so_ti;
		tiwin = ti->ti_win;
		tiflags = ti->ti_flags;

		goto cont_conn;
	}
	slirp = m->slirp;

	/*
	 * Get IP and TCP header together in first mbuf.
	 * Note: IP leaves IP header in first mbuf.
	 */
	ti = mtod(m, struct tcpiphdr *);
	if (iphlen > sizeof(struct ip )) {
	  ip_stripoptions(m, (struct mbuf *)0);
	  iphlen=sizeof(struct ip );
	}
	/* XXX Check if too short */


	/*
	 * Save a copy of the IP header in case we want restore it
	 * for sending an ICMP error message in response.
	 */
	ip=mtod(m, struct ip *);
	save_ip = *ip;
	save_ip.ip_len+= iphlen;

	/*
	 * Checksum extended TCP header and data.
	 */
	tlen = ((struct ip *)ti)->ip_len;
        tcpiphdr2qlink(ti)->next = tcpiphdr2qlink(ti)->prev = NULL;
        memset(&ti->ti_i.ih_mbuf, 0 , sizeof(struct mbuf_ptr));
	ti->ti_x1 = 0;
	ti->ti_len = htons((uint16_t)tlen);
	len = sizeof(struct ip ) + tlen;
	if(cksum(m, len)) {
	  goto drop;
	}

	/*
	 * Check that TCP offset makes sense,
	 * pull out TCP options and adjust length.		XXX
	 */
	off = ti->ti_off << 2;
	if (off < sizeof (struct tcphdr) || off > tlen) {
	  goto drop;
	}
	tlen -= off;
	ti->ti_len = tlen;
	if (off > sizeof (struct tcphdr)) {
	  optlen = off - sizeof (struct tcphdr);
	  optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
	}
	tiflags = ti->ti_flags;

	/*
	 * Convert TCP protocol specific fields to host format.
	 */
	NTOHL(ti->ti_seq);
	NTOHL(ti->ti_ack);
	NTOHS(ti->ti_win);
	NTOHS(ti->ti_urp);

	/*
	 * Drop TCP, IP headers and TCP options.
	 */
	m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
	m->m_len  -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);

	/*
	 * Locate pcb for segment.
	 */
findso:
	so = slirp->tcp_last_so;
	if (so->so_fport != ti->ti_dport ||
	    so->so_lport != ti->ti_sport ||
	    so->so_laddr.s_addr != ti->ti_src.s_addr ||
	    so->so_faddr.s_addr != ti->ti_dst.s_addr) {
		so = solookup(&slirp->tcb, ti->ti_src, ti->ti_sport,
			       ti->ti_dst, ti->ti_dport);
		if (so)
			slirp->tcp_last_so = so;
	}

	/*
	 * If the state is CLOSED (i.e., TCB does not exist) then
	 * all data in the incoming segment is discarded.
	 * If the TCB exists but is in CLOSED state, it is embryonic,
	 * but should either do a listen or a connect soon.
	 *
	 * state == CLOSED means we've done socreate() but haven't
	 * attached it to a protocol yet...
	 *
	 * XXX If a TCB does not exist, and the TH_SYN flag is
	 * the only flag set, then create a session, mark it
	 * as if it was LISTENING, and continue...
	 */
        if (so == NULL) {
          if (slirp->restricted) {
            /* Any hostfwds will have an existing socket, so we only get here
             * for non-hostfwd connections. These should be dropped, unless it
             * happens to be a guestfwd.
             */
            for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
                if (ex_ptr->ex_fport == ti->ti_dport &&
                    ti->ti_dst.s_addr == ex_ptr->ex_addr.s_addr) {
                    break;
                }
            }
            if (!ex_ptr) {
                goto dropwithreset;
            }
          }

	  if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
	    goto dropwithreset;

	  if ((so = socreate(slirp)) == NULL)
	    goto dropwithreset;
	  if (tcp_attach(so) < 0) {
	    free(so); /* Not sofree (if it failed, it's not insqued) */
	    goto dropwithreset;
	  }

	  sbreserve(&so->so_snd, TCP_SNDSPACE);
	  sbreserve(&so->so_rcv, TCP_RCVSPACE);

	  so->so_laddr = ti->ti_src;
	  so->so_lport = ti->ti_sport;
	  so->so_faddr = ti->ti_dst;
	  so->so_fport = ti->ti_dport;

	  if ((so->so_iptos = tcp_tos(so)) == 0)
	    so->so_iptos = ((struct ip *)ti)->ip_tos;

	  tp = sototcpcb(so);
	  tp->t_state = TCPS_LISTEN;
	}

        /*
         * If this is a still-connecting socket, this probably
         * a retransmit of the SYN.  Whether it's a retransmit SYN
	 * or something else, we nuke it.
         */
        if (so->so_state & SS_ISFCONNECTING)
                goto drop;

	tp = sototcpcb(so);

	/* XXX Should never fail */
        if (tp == NULL)
		goto dropwithreset;
	if (tp->t_state == TCPS_CLOSED)
		goto drop;

	tiwin = ti->ti_win;

	/*
	 * Segment received on connection.
	 * Reset idle time and keep-alive timer.
	 */
	tp->t_idle = 0;
	if (SO_OPTIONS)
	   tp->t_timer[TCPT_KEEP] = TCPTV_KEEPINTVL;
	else
	   tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_IDLE;

	/*
	 * Process options if not in LISTEN state,
	 * else do it below (after getting remote address).
	 */
	if (optp && tp->t_state != TCPS_LISTEN)
		tcp_dooptions(tp, (u_char *)optp, optlen, ti);

	/*
	 * Header prediction: check for the two common cases
	 * of a uni-directional data xfer.  If the packet has
	 * no control flags, is in-sequence, the window didn't
	 * change and we're not retransmitting, it's a
	 * candidate.  If the length is zero and the ack moved
	 * forward, we're the sender side of the xfer.  Just
	 * free the data acked & wake any higher level process
	 * that was blocked waiting for space.  If the length
	 * is non-zero and the ack didn't move, we're the
	 * receiver side.  If we're getting packets in-order
	 * (the reassembly queue is empty), add the data to
	 * the socket buffer and note that we need a delayed ack.
	 *
	 * XXX Some of these tests are not needed
	 * eg: the tiwin == tp->snd_wnd prevents many more
	 * predictions.. with no *real* advantage..
	 */
	if (tp->t_state == TCPS_ESTABLISHED &&
	    (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
	    ti->ti_seq == tp->rcv_nxt &&
	    tiwin && tiwin == tp->snd_wnd &&
	    tp->snd_nxt == tp->snd_max) {
		if (ti->ti_len == 0) {
			if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
			    SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
			    tp->snd_cwnd >= tp->snd_wnd) {
				/*
				 * this is a pure ack for outstanding data.
				 */
				if (tp->t_rtt &&
				    SEQ_GT(ti->ti_ack, tp->t_rtseq))
					tcp_xmit_timer(tp, tp->t_rtt);
				acked = ti->ti_ack - tp->snd_una;
				sbdrop(&so->so_snd, acked);
				tp->snd_una = ti->ti_ack;
				m_free(m);

				/*
				 * If all outstanding data are acked, stop
				 * retransmit timer, otherwise restart timer
				 * using current (possibly backed-off) value.
				 * If process is waiting for space,
				 * wakeup/selwakeup/signal.  If data
				 * are ready to send, let tcp_output
				 * decide between more output or persist.
				 */
				if (tp->snd_una == tp->snd_max)
					tp->t_timer[TCPT_REXMT] = 0;
				else if (tp->t_timer[TCPT_PERSIST] == 0)
					tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;

				/*
				 * This is called because sowwakeup might have
				 * put data into so_snd.  Since we don't so sowwakeup,
				 * we don't need this.. XXX???
				 */
				if (so->so_snd.sb_cc)
					(void) tcp_output(tp);

				return;
			}
		} else if (ti->ti_ack == tp->snd_una &&
		    tcpfrag_list_empty(tp) &&
		    ti->ti_len <= sbspace(&so->so_rcv)) {
			/*
			 * this is a pure, in-sequence data packet
			 * with nothing on the reassembly queue and
			 * we have enough buffer space to take it.
			 */
			tp->rcv_nxt += ti->ti_len;
			/*
			 * Add data to socket buffer.
			 */
			if (so->so_emu) {
				if (tcp_emu(so,m)) sbappend(so, m);
			} else
				sbappend(so, m);

			/*
			 * If this is a short packet, then ACK now - with Nagel
			 *	congestion avoidance sender won't send more until
			 *	he gets an ACK.
			 *
			 * It is better to not delay acks at all to maximize
			 * TCP throughput.  See RFC 2581.
			 */
			tp->t_flags |= TF_ACKNOW;
			tcp_output(tp);
			return;
		}
	} /* header prediction */
	/*
	 * Calculate amount of space in receive window,
	 * and then do TCP input processing.
	 * Receive window is amount of space in rcv queue,
	 * but not less than advertised window.
	 */
	{ int win;
          win = sbspace(&so->so_rcv);
	  if (win < 0)
	    win = 0;
	  tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
	}

	switch (tp->t_state) {

	/*
	 * If the state is LISTEN then ignore segment if it contains an RST.
	 * If the segment contains an ACK then it is bad and send a RST.
	 * If it does not contain a SYN then it is not interesting; drop it.
	 * Don't bother responding if the destination was a broadcast.
	 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
	 * tp->iss, and send a segment:
	 *     <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
	 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
	 * Fill in remote peer address fields if not previously specified.
	 * Enter SYN_RECEIVED state, and process any other fields of this
	 * segment in this state.
	 */
	case TCPS_LISTEN: {

	  if (tiflags & TH_RST)
	    goto drop;
	  if (tiflags & TH_ACK)
	    goto dropwithreset;
	  if ((tiflags & TH_SYN) == 0)
	    goto drop;

	  /*
	   * This has way too many gotos...
	   * But a bit of spaghetti code never hurt anybody :)
	   */

	  /*
	   * If this is destined for the control address, then flag to
	   * tcp_ctl once connected, otherwise connect
	   */
	  if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) ==
	      slirp->vnetwork_addr.s_addr) {
	    if (so->so_faddr.s_addr != slirp->vhost_addr.s_addr &&
		so->so_faddr.s_addr != slirp->vnameserver_addr.s_addr) {
		/* May be an add exec */
		for (ex_ptr = slirp->exec_list; ex_ptr;
		     ex_ptr = ex_ptr->ex_next) {
		  if(ex_ptr->ex_fport == so->so_fport &&
		     so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr) {
		    so->so_state |= SS_CTL;
		    break;
		  }
		}
		if (so->so_state & SS_CTL) {
		    goto cont_input;
		}
	    }
	    /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */
	  }

	  if (so->so_emu & EMU_NOCONNECT) {
	    so->so_emu &= ~EMU_NOCONNECT;
	    goto cont_input;
	  }

          if ((tcp_fconnect(so) == -1) &&
#if defined(_WIN32)
              socket_error() != WSAEWOULDBLOCK
#else
              (errno != EINPROGRESS) && (errno != EWOULDBLOCK)
#endif
          ) {
	    u_char code=ICMP_UNREACH_NET;
	    DEBUG_MISC((dfd, " tcp fconnect errno = %d-%s\n",
			errno,strerror(errno)));
	    if(errno == ECONNREFUSED) {
	      /* ACK the SYN, send RST to refuse the connection */
	      tcp_respond(tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
			  TH_RST|TH_ACK);
	    } else {
	      if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
	      HTONL(ti->ti_seq);             /* restore tcp header */
	      HTONL(ti->ti_ack);
	      HTONS(ti->ti_win);
	      HTONS(ti->ti_urp);
	      m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
	      m->m_len  += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
	      *ip=save_ip;
	      icmp_error(m, ICMP_UNREACH,code, 0,strerror(errno));
	    }
            tcp_close(tp);
	    m_free(m);
	  } else {
	    /*
	     * Haven't connected yet, save the current mbuf
	     * and ti, and return
	     * XXX Some OS's don't tell us whether the connect()
	     * succeeded or not.  So we must time it out.
	     */
	    so->so_m = m;
	    so->so_ti = ti;
	    tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
	    tp->t_state = TCPS_SYN_RECEIVED;
	    tcp_template(tp);
	  }
	  return;

	cont_conn:
	  /* m==NULL
	   * Check if the connect succeeded
	   */
	  if (so->so_state & SS_NOFDREF) {
	    tp = tcp_close(tp);
	    goto dropwithreset;
	  }
	cont_input:
	  tcp_template(tp);

	  if (optp)
	    tcp_dooptions(tp, (u_char *)optp, optlen, ti);

	  if (iss)
	    tp->iss = iss;
	  else
	    tp->iss = slirp->tcp_iss;
	  slirp->tcp_iss += TCP_ISSINCR/2;
	  tp->irs = ti->ti_seq;
	  tcp_sendseqinit(tp);
	  tcp_rcvseqinit(tp);
	  tp->t_flags |= TF_ACKNOW;
	  tp->t_state = TCPS_SYN_RECEIVED;
	  tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
	  goto trimthenstep6;
	} /* case TCPS_LISTEN */

	/*
	 * If the state is SYN_SENT:
	 *	if seg contains an ACK, but not for our SYN, drop the input.
	 *	if seg contains a RST, then drop the connection.
	 *	if seg does not contain SYN, then drop it.
	 * Otherwise this is an acceptable SYN segment
	 *	initialize tp->rcv_nxt and tp->irs
	 *	if seg contains ack then advance tp->snd_una
	 *	if SYN has been acked change to ESTABLISHED else SYN_RCVD state
	 *	arrange for segment to be acked (eventually)
	 *	continue processing rest of data/controls, beginning with URG
	 */
	case TCPS_SYN_SENT:
		if ((tiflags & TH_ACK) &&
		    (SEQ_LEQ(ti->ti_ack, tp->iss) ||
		     SEQ_GT(ti->ti_ack, tp->snd_max)))
			goto dropwithreset;

		if (tiflags & TH_RST) {
                        if (tiflags & TH_ACK) {
                                tcp_drop(tp, 0); /* XXX Check t_softerror! */
                        }
			goto drop;
		}

		if ((tiflags & TH_SYN) == 0)
			goto drop;
		if (tiflags & TH_ACK) {
			tp->snd_una = ti->ti_ack;
			if (SEQ_LT(tp->snd_nxt, tp->snd_una))
				tp->snd_nxt = tp->snd_una;
		}

		tp->t_timer[TCPT_REXMT] = 0;
		tp->irs = ti->ti_seq;
		tcp_rcvseqinit(tp);
		tp->t_flags |= TF_ACKNOW;
		if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
			soisfconnected(so);
			tp->t_state = TCPS_ESTABLISHED;

			(void) tcp_reass(tp, (struct tcpiphdr *)0,
				(struct mbuf *)0);
			/*
			 * if we didn't have to retransmit the SYN,
			 * use its rtt as our initial srtt & rtt var.
			 */
			if (tp->t_rtt)
				tcp_xmit_timer(tp, tp->t_rtt);
		} else
			tp->t_state = TCPS_SYN_RECEIVED;

trimthenstep6:
		/*
		 * Advance ti->ti_seq to correspond to first data byte.
		 * If data, trim to stay within window,
		 * dropping FIN if necessary.
		 */
		ti->ti_seq++;
		if (ti->ti_len > tp->rcv_wnd) {
			todrop = ti->ti_len - tp->rcv_wnd;
			m_adj(m, -todrop);
			ti->ti_len = tp->rcv_wnd;
			tiflags &= ~TH_FIN;
		}
		tp->snd_wl1 = ti->ti_seq - 1;
		tp->rcv_up = ti->ti_seq;
		goto step6;
	} /* switch tp->t_state */
	/*
	 * States other than LISTEN or SYN_SENT.
	 * Check that at least some bytes of segment are within
	 * receive window.  If segment begins before rcv_nxt,
	 * drop leading data (and SYN); if nothing left, just ack.
	 */
	todrop = tp->rcv_nxt - ti->ti_seq;
	if (todrop > 0) {
		if (tiflags & TH_SYN) {
			tiflags &= ~TH_SYN;
			ti->ti_seq++;
			if (ti->ti_urp > 1)
				ti->ti_urp--;
			else
				tiflags &= ~TH_URG;
			todrop--;
		}
		/*
		 * Following if statement from Stevens, vol. 2, p. 960.
		 */
		if (todrop > ti->ti_len
		    || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) {
			/*
			 * Any valid FIN must be to the left of the window.
			 * At this point the FIN must be a duplicate or out
			 * of sequence; drop it.
			 */
			tiflags &= ~TH_FIN;

			/*
			 * Send an ACK to resynchronize and drop any data.
			 * But keep on processing for RST or ACK.
			 */
			tp->t_flags |= TF_ACKNOW;
			todrop = ti->ti_len;
		}
		m_adj(m, todrop);
		ti->ti_seq += todrop;
		ti->ti_len -= todrop;
		if (ti->ti_urp > todrop)
			ti->ti_urp -= todrop;
		else {
			tiflags &= ~TH_URG;
			ti->ti_urp = 0;
		}
	}
	/*
	 * If new data are received on a connection after the
	 * user processes are gone, then RST the other end.
	 */
	if ((so->so_state & SS_NOFDREF) &&
	    tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
		tp = tcp_close(tp);
		goto dropwithreset;
	}

	/*
	 * If segment ends after window, drop trailing data
	 * (and PUSH and FIN); if nothing left, just ACK.
	 */
	todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
	if (todrop > 0) {
		if (todrop >= ti->ti_len) {
			/*
			 * If a new connection request is received
			 * while in TIME_WAIT, drop the old connection
			 * and start over if the sequence numbers
			 * are above the previous ones.
			 */
			if (tiflags & TH_SYN &&
			    tp->t_state == TCPS_TIME_WAIT &&
			    SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
				iss = tp->rcv_nxt + TCP_ISSINCR;
				tp = tcp_close(tp);
				goto findso;
			}
			/*
			 * If window is closed can only take segments at
			 * window edge, and have to drop data and PUSH from
			 * incoming segments.  Continue processing, but
			 * remember to ack.  Otherwise, drop segment
			 * and ack.
			 */
			if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
				tp->t_flags |= TF_ACKNOW;
			} else {
				goto dropafterack;
			}
		}
		m_adj(m, -todrop);
		ti->ti_len -= todrop;
		tiflags &= ~(TH_PUSH|TH_FIN);
	}

	/*
	 * If the RST bit is set examine the state:
	 *    SYN_RECEIVED STATE:
	 *	If passive open, return to LISTEN state.
	 *	If active open, inform user that connection was refused.
	 *    ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
	 *	Inform user that connection was reset, and close tcb.
	 *    CLOSING, LAST_ACK, TIME_WAIT STATES
	 *	Close the tcb.
	 */
	if (tiflags&TH_RST) switch (tp->t_state) {

	case TCPS_SYN_RECEIVED:
	case TCPS_ESTABLISHED:
	case TCPS_FIN_WAIT_1:
	case TCPS_FIN_WAIT_2:
	case TCPS_CLOSE_WAIT:
		tp->t_state = TCPS_CLOSED;
                tcp_close(tp);
		goto drop;

	case TCPS_CLOSING:
	case TCPS_LAST_ACK:
	case TCPS_TIME_WAIT:
                tcp_close(tp);
		goto drop;
	}

	/*
	 * If a SYN is in the window, then this is an
	 * error and we send an RST and drop the connection.
	 */
	if (tiflags & TH_SYN) {
		tp = tcp_drop(tp,0);
		goto dropwithreset;
	}

	/*
	 * If the ACK bit is off we drop the segment and return.
	 */
	if ((tiflags & TH_ACK) == 0) goto drop;

	/*
	 * Ack processing.
	 */
	switch (tp->t_state) {
	/*
	 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
	 * ESTABLISHED state and continue processing, otherwise
	 * send an RST.  una<=ack<=max
	 */
	case TCPS_SYN_RECEIVED:

		if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
		    SEQ_GT(ti->ti_ack, tp->snd_max))
			goto dropwithreset;
		tp->t_state = TCPS_ESTABLISHED;
		/*
		 * The sent SYN is ack'ed with our sequence number +1
		 * The first data byte already in the buffer will get
		 * lost if no correction is made.  This is only needed for
		 * SS_CTL since the buffer is empty otherwise.
		 * tp->snd_una++; or:
		 */
		tp->snd_una=ti->ti_ack;
		if (so->so_state & SS_CTL) {
		  /* So tcp_ctl reports the right state */
		  ret = tcp_ctl(so);
		  if (ret == 1) {
		    soisfconnected(so);
		    so->so_state &= ~SS_CTL;   /* success XXX */
		  } else if (ret == 2) {
		    so->so_state &= SS_PERSISTENT_MASK;
		    so->so_state |= SS_NOFDREF; /* CTL_CMD */
		  } else {
		    needoutput = 1;
		    tp->t_state = TCPS_FIN_WAIT_1;
		  }
		} else {
		  soisfconnected(so);
		}

		(void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0);
		tp->snd_wl1 = ti->ti_seq - 1;
		/* Avoid ack processing; snd_una==ti_ack  =>  dup ack */
		goto synrx_to_est;
		/* fall into ... */

	/*
	 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
	 * ACKs.  If the ack is in the range
	 *	tp->snd_una < ti->ti_ack <= tp->snd_max
	 * then advance tp->snd_una to ti->ti_ack and drop
	 * data from the retransmission queue.  If this ACK reflects
	 * more up to date window information we update our window information.
	 */
	case TCPS_ESTABLISHED:
	case TCPS_FIN_WAIT_1:
	case TCPS_FIN_WAIT_2:
	case TCPS_CLOSE_WAIT:
	case TCPS_CLOSING:
	case TCPS_LAST_ACK:
	case TCPS_TIME_WAIT:

		if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
			if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
			  DEBUG_MISC((dfd, " dup ack  m = %lx  so = %lx\n",
				      (long )m, (long )so));
				/*
				 * If we have outstanding data (other than
				 * a window probe), this is a completely
				 * duplicate ack (ie, window info didn't
				 * change), the ack is the biggest we've
				 * seen and we've seen exactly our rexmt
				 * threshold of them, assume a packet
				 * has been dropped and retransmit it.
				 * Kludge snd_nxt & the congestion
				 * window so we send only this one
				 * packet.
				 *
				 * We know we're losing at the current
				 * window size so do congestion avoidance
				 * (set ssthresh to half the current window
				 * and pull our congestion window back to
				 * the new ssthresh).
				 *
				 * Dup acks mean that packets have left the
				 * network (they're now cached at the receiver)
				 * so bump cwnd by the amount in the receiver
				 * to keep a constant cwnd packets in the
				 * network.
				 */
				if (tp->t_timer[TCPT_REXMT] == 0 ||
				    ti->ti_ack != tp->snd_una)
					tp->t_dupacks = 0;
				else if (++tp->t_dupacks == TCPREXMTTHRESH) {
					tcp_seq onxt = tp->snd_nxt;
					u_int win =
					    min(tp->snd_wnd, tp->snd_cwnd) / 2 /
						tp->t_maxseg;

					if (win < 2)
						win = 2;
					tp->snd_ssthresh = win * tp->t_maxseg;
					tp->t_timer[TCPT_REXMT] = 0;
					tp->t_rtt = 0;
					tp->snd_nxt = ti->ti_ack;
					tp->snd_cwnd = tp->t_maxseg;
					(void) tcp_output(tp);
					tp->snd_cwnd = tp->snd_ssthresh +
					       tp->t_maxseg * tp->t_dupacks;
					if (SEQ_GT(onxt, tp->snd_nxt))
						tp->snd_nxt = onxt;
					goto drop;
				} else if (tp->t_dupacks > TCPREXMTTHRESH) {
					tp->snd_cwnd += tp->t_maxseg;
					(void) tcp_output(tp);
					goto drop;
				}
			} else
				tp->t_dupacks = 0;
			break;
		}
	synrx_to_est:
		/*
		 * If the congestion window was inflated to account
		 * for the other side's cached packets, retract it.
		 */
		if (tp->t_dupacks > TCPREXMTTHRESH &&
		    tp->snd_cwnd > tp->snd_ssthresh)
			tp->snd_cwnd = tp->snd_ssthresh;
		tp->t_dupacks = 0;
		if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
			goto dropafterack;
		}
		acked = ti->ti_ack - tp->snd_una;

		/*
		 * If transmit timer is running and timed sequence
		 * number was acked, update smoothed round trip time.
		 * Since we now have an rtt measurement, cancel the
		 * timer backoff (cf., Phil Karn's retransmit alg.).
		 * Recompute the initial retransmit timer.
		 */
		if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
			tcp_xmit_timer(tp,tp->t_rtt);

		/*
		 * If all outstanding data is acked, stop retransmit
		 * timer and remember to restart (more output or persist).
		 * If there is more data to be acked, restart retransmit
		 * timer, using current (possibly backed-off) value.
		 */
		if (ti->ti_ack == tp->snd_max) {
			tp->t_timer[TCPT_REXMT] = 0;
			needoutput = 1;
		} else if (tp->t_timer[TCPT_PERSIST] == 0)
			tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
		/*
		 * When new data is acked, open the congestion window.
		 * If the window gives us less than ssthresh packets
		 * in flight, open exponentially (maxseg per packet).
		 * Otherwise open linearly: maxseg per window
		 * (maxseg^2 / cwnd per packet).
		 */
		{
		  register u_int cw = tp->snd_cwnd;
		  register u_int incr = tp->t_maxseg;

		  if (cw > tp->snd_ssthresh)
		    incr = incr * incr / cw;
		  tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
		}
		if (acked > so->so_snd.sb_cc) {
			tp->snd_wnd -= so->so_snd.sb_cc;
			sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
			ourfinisacked = 1;
		} else {
			sbdrop(&so->so_snd, acked);
			tp->snd_wnd -= acked;
			ourfinisacked = 0;
		}
		tp->snd_una = ti->ti_ack;
		if (SEQ_LT(tp->snd_nxt, tp->snd_una))
			tp->snd_nxt = tp->snd_una;

		switch (tp->t_state) {

		/*
		 * In FIN_WAIT_1 STATE in addition to the processing
		 * for the ESTABLISHED state if our FIN is now acknowledged
		 * then enter FIN_WAIT_2.
		 */
		case TCPS_FIN_WAIT_1:
			if (ourfinisacked) {
				/*
				 * If we can't receive any more
				 * data, then closing user can proceed.
				 * Starting the timer is contrary to the
				 * specification, but if we don't get a FIN
				 * we'll hang forever.
				 */
				if (so->so_state & SS_FCANTRCVMORE) {
					tp->t_timer[TCPT_2MSL] = TCP_MAXIDLE;
				}
				tp->t_state = TCPS_FIN_WAIT_2;
			}
			break;

	 	/*
		 * In CLOSING STATE in addition to the processing for
		 * the ESTABLISHED state if the ACK acknowledges our FIN
		 * then enter the TIME-WAIT state, otherwise ignore
		 * the segment.
		 */
		case TCPS_CLOSING:
			if (ourfinisacked) {
				tp->t_state = TCPS_TIME_WAIT;
				tcp_canceltimers(tp);
				tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			}
			break;

		/*
		 * In LAST_ACK, we may still be waiting for data to drain
		 * and/or to be acked, as well as for the ack of our FIN.
		 * If our FIN is now acknowledged, delete the TCB,
		 * enter the closed state and return.
		 */
		case TCPS_LAST_ACK:
			if (ourfinisacked) {
                                tcp_close(tp);
				goto drop;
			}
			break;

		/*
		 * In TIME_WAIT state the only thing that should arrive
		 * is a retransmission of the remote FIN.  Acknowledge
		 * it and restart the finack timer.
		 */
		case TCPS_TIME_WAIT:
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			goto dropafterack;
		}
	} /* switch(tp->t_state) */

step6:
	/*
	 * Update window information.
	 * Don't look at window if no ACK: TAC's send garbage on first SYN.
	 */
	if ((tiflags & TH_ACK) &&
	    (SEQ_LT(tp->snd_wl1, ti->ti_seq) ||
	    (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
	    (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) {
		tp->snd_wnd = tiwin;
		tp->snd_wl1 = ti->ti_seq;
		tp->snd_wl2 = ti->ti_ack;
		if (tp->snd_wnd > tp->max_sndwnd)
			tp->max_sndwnd = tp->snd_wnd;
		needoutput = 1;
	}

	/*
	 * Process segments with URG.
	 */
	if ((tiflags & TH_URG) && ti->ti_urp &&
	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
		/*
		 * This is a kludge, but if we receive and accept
		 * random urgent pointers, we'll crash in
		 * soreceive.  It's hard to imagine someone
		 * actually wanting to send this much urgent data.
		 */
		if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) {
			ti->ti_urp = 0;
			tiflags &= ~TH_URG;
			goto dodata;
		}
		/*
		 * If this segment advances the known urgent pointer,
		 * then mark the data stream.  This should not happen
		 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
		 * a FIN has been received from the remote side.
		 * In these states we ignore the URG.
		 *
		 * According to RFC961 (Assigned Protocols),
		 * the urgent pointer points to the last octet
		 * of urgent data.  We continue, however,
		 * to consider it to indicate the first octet
		 * of data past the urgent section as the original
		 * spec states (in one of two places).
		 */
		if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
			tp->rcv_up = ti->ti_seq + ti->ti_urp;
			so->so_urgc =  so->so_rcv.sb_cc +
				(tp->rcv_up - tp->rcv_nxt); /* -1; */
			tp->rcv_up = ti->ti_seq + ti->ti_urp;

		}
	} else
		/*
		 * If no out of band data is expected,
		 * pull receive urgent pointer along
		 * with the receive window.
		 */
		if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
			tp->rcv_up = tp->rcv_nxt;
dodata:

	/*
	 * If this is a small packet, then ACK now - with Nagel
	 *      congestion avoidance sender won't send more until
	 *      he gets an ACK.
	 */
	if (ti->ti_len && (unsigned)ti->ti_len <= 5 &&
	    ((struct tcpiphdr_2 *)ti)->first_char == (char)27) {
		tp->t_flags |= TF_ACKNOW;
	}

	/*
	 * Process the segment text, merging it into the TCP sequencing queue,
	 * and arranging for acknowledgment of receipt if necessary.
	 * This process logically involves adjusting tp->rcv_wnd as data
	 * is presented to the user (this happens in tcp_usrreq.c,
	 * case PRU_RCVD).  If a FIN has already been received on this
	 * connection then we just ignore the text.
	 */
	if ((ti->ti_len || (tiflags&TH_FIN)) &&
	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
		TCP_REASS(tp, ti, m, so, tiflags);
	} else {
		m_free(m);
		tiflags &= ~TH_FIN;
	}

	/*
	 * If FIN is received ACK the FIN and let the user know
	 * that the connection is closing.
	 */
	if (tiflags & TH_FIN) {
		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
			/*
			 * If we receive a FIN we can't send more data,
			 * set it SS_FDRAIN
                         * Shutdown the socket if there is no rx data in the
			 * buffer.
			 * soread() is called on completion of shutdown() and
			 * will got to TCPS_LAST_ACK, and use tcp_output()
			 * to send the FIN.
			 */
			sofwdrain(so);

			tp->t_flags |= TF_ACKNOW;
			tp->rcv_nxt++;
		}
		switch (tp->t_state) {

	 	/*
		 * In SYN_RECEIVED and ESTABLISHED STATES
		 * enter the CLOSE_WAIT state.
		 */
		case TCPS_SYN_RECEIVED:
		case TCPS_ESTABLISHED:
		  if(so->so_emu == EMU_CTL)        /* no shutdown on socket */
		    tp->t_state = TCPS_LAST_ACK;
		  else
		    tp->t_state = TCPS_CLOSE_WAIT;
		  break;

	 	/*
		 * If still in FIN_WAIT_1 STATE FIN has not been acked so
		 * enter the CLOSING state.
		 */
		case TCPS_FIN_WAIT_1:
			tp->t_state = TCPS_CLOSING;
			break;

	 	/*
		 * In FIN_WAIT_2 state enter the TIME_WAIT state,
		 * starting the time-wait timer, turning off the other
		 * standard timers.
		 */
		case TCPS_FIN_WAIT_2:
			tp->t_state = TCPS_TIME_WAIT;
			tcp_canceltimers(tp);
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			break;

		/*
		 * In TIME_WAIT state restart the 2 MSL time_wait timer.
		 */
		case TCPS_TIME_WAIT:
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			break;
		}
	}

	/*
	 * Return any desired output.
	 */
	if (needoutput || (tp->t_flags & TF_ACKNOW)) {
		(void) tcp_output(tp);
	}
	return;

dropafterack:
	/*
	 * Generate an ACK dropping incoming segment if it occupies
	 * sequence space, where the ACK reflects our state.
	 */
	if (tiflags & TH_RST)
		goto drop;
	m_free(m);
	tp->t_flags |= TF_ACKNOW;
	(void) tcp_output(tp);
	return;

dropwithreset:
	/* reuses m if m!=NULL, m_free() unnecessary */
	if (tiflags & TH_ACK)
		tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
	else {
		if (tiflags & TH_SYN) ti->ti_len++;
		tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
		    TH_RST|TH_ACK);
	}

	return;

drop:
	/*
	 * Drop space held by incoming segment and return.
	 */
	m_free(m);
}
Exemplo n.º 5
0
/*
 * This function is called upon receipt of new valid data (while not in header
 * prediction mode), and it updates the ordered list of sacks.
 */
void
tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
{
	/*
	 * First reported block MUST be the most recent one.  Subsequent
	 * blocks SHOULD be in the order in which they arrived at the
	 * receiver.  These two conditions make the implementation fully
	 * compliant with RFC 2018.
	 */
	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
	int num_head, num_saved, i;

	/* SACK block for the received segment. */
	head_blk.start = rcv_start;
	head_blk.end = rcv_end;

	/*
	 * Merge updated SACK blocks into head_blk, and
	 * save unchanged SACK blocks into saved_blks[].
	 * num_saved will have the number of the saved SACK blocks.
	 */
	num_saved = 0;
	for (i = 0; i < tp->rcv_numsacks; i++) {
		tcp_seq start = tp->sackblks[i].start;
		tcp_seq end = tp->sackblks[i].end;
		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
			/*
			 * Discard this SACK block.
			 */
		} else if (SEQ_LEQ(head_blk.start, end) &&
			   SEQ_GEQ(head_blk.end, start)) {
			/*
			 * Merge this SACK block into head_blk.
			 * This SACK block itself will be discarded.
			 */
			if (SEQ_GT(head_blk.start, start))
				head_blk.start = start;
			if (SEQ_LT(head_blk.end, end))
				head_blk.end = end;
		} else {
			/*
			 * Save this SACK block.
			 */
			saved_blks[num_saved].start = start;
			saved_blks[num_saved].end = end;
			num_saved++;
		}
	}

	/*
	 * Update SACK list in tp->sackblks[].
	 */
	num_head = 0;
	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
		/*
		 * The received data segment is an out-of-order segment.
		 * Put head_blk at the top of SACK list.
		 */
		tp->sackblks[0] = head_blk;
		num_head = 1;
		/*
		 * If the number of saved SACK blocks exceeds its limit,
		 * discard the last SACK block.
		 */
		if (num_saved >= MAX_SACK_BLKS)
			num_saved--;
	}
	if (num_saved > 0) {
		/*
		 * Copy the saved SACK blocks back.
		 */
		bcopy(saved_blks, &tp->sackblks[num_head],
		      sizeof(struct sackblk) * num_saved);
	}

	/* Save the number of SACK blocks. */
	tp->rcv_numsacks = num_head + num_saved;

	/* If we are requesting SACK recovery, reset the stretch-ack state
	 * so that connection will generate more acks after recovery and
	 * sender's cwnd will open.
	 */
	if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0)
		tcp_reset_stretch_ack(tp);

#if TRAFFIC_MGT
	if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) 
		reset_acc_iaj(tp);
#endif /* TRAFFIC_MGT */
}
Exemplo n.º 6
0
/*
 * Process cumulative ACK and the TCP SACK option to update the scoreboard.
 * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
 * the sequence space).
 */
void
tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
{
    struct sackhole *cur, *temp;
    struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
    int i, j, num_sack_blks;

//ScenSim-Port//    INP_WLOCK_ASSERT(tp->t_inpcb);

    num_sack_blks = 0;
    /*
     * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
     * treat [SND.UNA, SEG.ACK) as if it is a SACK block.
     */
    if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
        sack_blocks[num_sack_blks].start = tp->snd_una;
        sack_blocks[num_sack_blks++].end = th_ack;
    }
    /*
     * Append received valid SACK blocks to sack_blocks[], but only if we
     * received new blocks from the other side.
     */
    if (to->to_flags & TOF_SACK) {
        for (i = 0; i < to->to_nsacks; i++) {
            bcopy((to->to_sacks + i * TCPOLEN_SACK),
                &sack, sizeof(sack));
            sack.start = ntohl(sack.start);
            sack.end = ntohl(sack.end);
            if (SEQ_GT(sack.end, sack.start) &&
                SEQ_GT(sack.start, tp->snd_una) &&
                SEQ_GT(sack.start, th_ack) &&
                SEQ_LT(sack.start, tp->snd_max) &&
                SEQ_GT(sack.end, tp->snd_una) &&
                SEQ_LEQ(sack.end, tp->snd_max))
                sack_blocks[num_sack_blks++] = sack;
        }
    }
    /*
     * Return if SND.UNA is not advanced and no valid SACK block is
     * received.
     */
    if (num_sack_blks == 0)
        return;

    /*
     * Sort the SACK blocks so we can update the scoreboard with just one
     * pass. The overhead of sorting upto 4+1 elements is less than
     * making upto 4+1 passes over the scoreboard.
     */
    for (i = 0; i < num_sack_blks; i++) {
        for (j = i + 1; j < num_sack_blks; j++) {
            if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
                sack = sack_blocks[i];
                sack_blocks[i] = sack_blocks[j];
                sack_blocks[j] = sack;
            }
        }
    }
    if (TAILQ_EMPTY(&tp->snd_holes))
        /*
         * Empty scoreboard. Need to initialize snd_fack (it may be
         * uninitialized or have a bogus value). Scoreboard holes
         * (from the sack blocks received) are created later below
         * (in the logic that adds holes to the tail of the
         * scoreboard).
         */
        tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
    /*
     * In the while-loop below, incoming SACK blocks (sack_blocks[]) and
     * SACK holes (snd_holes) are traversed from their tails with just
     * one pass in order to reduce the number of compares especially when
     * the bandwidth-delay product is large.
     *
     * Note: Typically, in the first RTT of SACK recovery, the highest
     * three or four SACK blocks with the same ack number are received.
     * In the second RTT, if retransmitted data segments are not lost,
     * the highest three or four SACK blocks with ack number advancing
     * are received.
     */
    sblkp = &sack_blocks[num_sack_blks - 1];    /* Last SACK block */
    tp->sackhint.last_sack_ack = sblkp->end;
    if (SEQ_LT(tp->snd_fack, sblkp->start)) {
        /*
         * The highest SACK block is beyond fack.  Append new SACK
         * hole at the tail.  If the second or later highest SACK
         * blocks are also beyond the current fack, they will be
         * inserted by way of hole splitting in the while-loop below.
         */
        temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL);
        if (temp != NULL) {
            tp->snd_fack = sblkp->end;
            /* Go to the previous sack block. */
            sblkp--;
        } else {
            /*
             * We failed to add a new hole based on the current
             * sack block.  Skip over all the sack blocks that
             * fall completely to the right of snd_fack and
             * proceed to trim the scoreboard based on the
             * remaining sack blocks.  This also trims the
             * scoreboard for th_ack (which is sack_blocks[0]).
             */
            while (sblkp >= sack_blocks &&
                   SEQ_LT(tp->snd_fack, sblkp->start))
                sblkp--;
            if (sblkp >= sack_blocks &&
                SEQ_LT(tp->snd_fack, sblkp->end))
                tp->snd_fack = sblkp->end;
        }
    } else if (SEQ_LT(tp->snd_fack, sblkp->end))
        /* fack is advanced. */
        tp->snd_fack = sblkp->end;
    /* We must have at least one SACK hole in scoreboard. */
//ScenSim-Port//    KASSERT(!TAILQ_EMPTY(&tp->snd_holes),
//ScenSim-Port//        ("SACK scoreboard must not be empty"));
    cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole. */
    /*
     * Since the incoming sack blocks are sorted, we can process them
     * making one sweep of the scoreboard.
     */
    while (sblkp >= sack_blocks  && cur != NULL) {
        if (SEQ_GEQ(sblkp->start, cur->end)) {
            /*
             * SACKs data beyond the current hole.  Go to the
             * previous sack block.
             */
            sblkp--;
            continue;
        }
        if (SEQ_LEQ(sblkp->end, cur->start)) {
            /*
             * SACKs data before the current hole.  Go to the
             * previous hole.
             */
            cur = TAILQ_PREV(cur, sackhole_head, scblink);
            continue;
        }
        tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
//ScenSim-Port//        KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
//ScenSim-Port//            ("sackhint bytes rtx >= 0"));
        if (SEQ_LEQ(sblkp->start, cur->start)) {
            /* Data acks at least the beginning of hole. */
            if (SEQ_GEQ(sblkp->end, cur->end)) {
                /* Acks entire hole, so delete hole. */
                temp = cur;
                cur = TAILQ_PREV(cur, sackhole_head, scblink);
                tcp_sackhole_remove(tp, temp);
                /*
                 * The sack block may ack all or part of the
                 * next hole too, so continue onto the next
                 * hole.
                 */
                continue;
            } else {
                /* Move start of hole forward. */
                cur->start = sblkp->end;
                cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
            }
        } else {
            /* Data acks at least the end of hole. */
            if (SEQ_GEQ(sblkp->end, cur->end)) {
                /* Move end of hole backward. */
                cur->end = sblkp->start;
                cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
            } else {
                /*
                 * ACKs some data in middle of a hole; need
                 * to split current hole
                 */
                temp = tcp_sackhole_insert(tp, sblkp->end,
                    cur->end, cur);
                if (temp != NULL) {
                    if (SEQ_GT(cur->rxmit, temp->rxmit)) {
                        temp->rxmit = cur->rxmit;
                        tp->sackhint.sack_bytes_rexmit
                            += (temp->rxmit
                            - temp->start);
                    }
                    cur->end = sblkp->start;
                    cur->rxmit = SEQ_MIN(cur->rxmit,
                        cur->end);
                }
            }
        }
        tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
        /*
         * Testing sblkp->start against cur->start tells us whether
         * we're done with the sack block or the sack hole.
         * Accordingly, we advance one or the other.
         */
        if (SEQ_LEQ(sblkp->start, cur->start))
            cur = TAILQ_PREV(cur, sackhole_head, scblink);
        else
            sblkp--;
    }
}
Exemplo n.º 7
0
/*
 * This function is called upon receipt of new valid data (while not in
 * header prediction mode), and it updates the ordered list of sacks.
 */
void
tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
{
    /*
     * First reported block MUST be the most recent one.  Subsequent
     * blocks SHOULD be in the order in which they arrived at the
     * receiver.  These two conditions make the implementation fully
     * compliant with RFC 2018.
     */
    struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
    int num_head, num_saved, i;

//ScenSim-Port//    INP_WLOCK_ASSERT(tp->t_inpcb);

    /* Check arguments. */
//ScenSim-Port//    KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));

    /* SACK block for the received segment. */
    head_blk.start = rcv_start;
    head_blk.end = rcv_end;

    /*
     * Merge updated SACK blocks into head_blk, and save unchanged SACK
     * blocks into saved_blks[].  num_saved will have the number of the
     * saved SACK blocks.
     */
    num_saved = 0;
    for (i = 0; i < tp->rcv_numsacks; i++) {
        tcp_seq start = tp->sackblks[i].start;
        tcp_seq end = tp->sackblks[i].end;
        if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
            /*
             * Discard this SACK block.
             */
        } else if (SEQ_LEQ(head_blk.start, end) &&
               SEQ_GEQ(head_blk.end, start)) {
            /*
             * Merge this SACK block into head_blk.  This SACK
             * block itself will be discarded.
             */
            if (SEQ_GT(head_blk.start, start))
                head_blk.start = start;
            if (SEQ_LT(head_blk.end, end))
                head_blk.end = end;
        } else {
            /*
             * Save this SACK block.
             */
            saved_blks[num_saved].start = start;
            saved_blks[num_saved].end = end;
            num_saved++;
        }
    }

    /*
     * Update SACK list in tp->sackblks[].
     */
    num_head = 0;
    if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
        /*
         * The received data segment is an out-of-order segment.  Put
         * head_blk at the top of SACK list.
         */
        tp->sackblks[0] = head_blk;
        num_head = 1;
        /*
         * If the number of saved SACK blocks exceeds its limit,
         * discard the last SACK block.
         */
        if (num_saved >= MAX_SACK_BLKS)
            num_saved--;
    }
    if (num_saved > 0) {
        /*
         * Copy the saved SACK blocks back.
         */
        bcopy(saved_blks, &tp->sackblks[num_head],
              sizeof(struct sackblk) * num_saved);
    }

    /* Save the number of SACK blocks. */
    tp->rcv_numsacks = num_head + num_saved;
}
Exemplo n.º 8
0
/*
 * TCP input routine, follows pages 65-76 of the
 * protocol specification dated September, 1981 very closely.
 */
void
tcp_input(usn_mbuf_t *m, int iphlen)
{
	struct tcpiphdr *ti;
	struct inpcb *inp;
	u_char *optp = NULL;
	int optlen;
	int len, tlen, off;
	struct tcpcb *tp = 0;
	int tiflags;
	struct usn_socket *so = 0;
	int todrop, acked, ourfinisacked;
   int needoutput = 0;
	short ostate;
	struct usn_in_addr laddr;
	int dropsocket = 0;
	int iss = 0;
	u_long tiwin, ts_val, ts_ecr;
	int ts_present = 0;

   (void)needoutput;
	g_tcpstat.tcps_rcvtotal++;
 
	// Get IP and TCP header together in first mbuf.
	// Note: IP leaves IP header in first mbuf.
	ti = mtod(m, struct tcpiphdr *);
	if (iphlen > sizeof (usn_ip_t))
		ip_stripoptions(m, (usn_mbuf_t *)0);
	if (m->mlen < sizeof (struct tcpiphdr)) {
		if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
			g_tcpstat.tcps_rcvshort++;
			return;
		}
		ti = mtod(m, struct tcpiphdr *);
	}

#ifdef DUMP_PAYLOAD
   dump_chain(m,"tcp");
#endif

   /*
	 * Checksum extended TCP header and data.
    */
	tlen = ntohs(((usn_ip_t *)ti)->ip_len);
	len = sizeof (usn_ip_t) + tlen;
	ti->ti_next = ti->ti_prev = 0;
	ti->ti_x1 = 0;
	ti->ti_len = (u_short)tlen;
	HTONS(ti->ti_len);
   ti->ti_sum = in_cksum(m, len);
	if (ti->ti_sum) {
		g_tcpstat.tcps_rcvbadsum++;
		goto drop;
	}
   /*
	 * Check that TCP offset makes sense,
	 * pull out TCP options and adjust length. XXX
    */
	off = ti->ti_off << 2;
	if (off < sizeof (struct tcphdr) || off > tlen) {
		g_tcpstat.tcps_rcvbadoff++;
		goto drop;
	}
	tlen -= off;
	ti->ti_len = tlen;
	if (off > sizeof (struct tcphdr)) {
		if (m->mlen < sizeof(usn_ip_t) + off) {
			if ((m = m_pullup(m, sizeof (usn_ip_t) + off)) == 0) {
				g_tcpstat.tcps_rcvshort++;
				return;
			}
			ti = mtod(m, struct tcpiphdr *);
		}
		optlen = off - sizeof (struct tcphdr);
		optp = mtod(m, u_char *) + sizeof (struct tcpiphdr);

      //	Do quick retrieval of timestamp options ("options
      // prediction?"). If timestamp is the only option and it's
      // formatted as recommended in RFC 1323 appendix A, we
      // quickly get the values now and not bother calling
      // tcp_dooptions(), etc.
		if ((optlen == TCPOLEN_TSTAMP_APPA ||
		     (optlen > TCPOLEN_TSTAMP_APPA &&
			optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
		     *(u_int *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
		     (ti->ti_flags & TH_SYN) == 0) {
			ts_present = 1;
			ts_val = ntohl(*(u_long *)(optp + 4));
			ts_ecr = ntohl(*(u_long *)(optp + 8));
			optp = NULL;	// we've parsed the options
		}
	}
	tiflags = ti->ti_flags;

	// Convert TCP protocol specific fields to host format.
	NTOHL(ti->ti_seq);
	NTOHL(ti->ti_ack);
	NTOHS(ti->ti_win);
	NTOHS(ti->ti_urp);

	// Locate pcb for segment.
findpcb:
	inp = g_tcp_last_inpcb;
	if (inp->inp_lport != ti->ti_dport ||
	    inp->inp_fport != ti->ti_sport ||
	    inp->inp_faddr.s_addr != ti->ti_src.s_addr ||
	    inp->inp_laddr.s_addr != ti->ti_dst.s_addr) {
		inp = in_pcblookup(&g_tcb, ti->ti_src, ti->ti_sport,
		    ti->ti_dst, ti->ti_dport, INPLOOKUP_WILDCARD);
		if (inp)
			g_tcp_last_inpcb = inp;
		++g_tcpstat.tcps_pcbcachemiss;
	}

	// If the state is CLOSED (i.e., TCB does not exist) then
	// all data in the incoming segment is discarded.
	// If the TCB exists but is in CLOSED state, it is embryonic,
	// but should either do a listen or a connect soon.
	if (inp == 0)
		goto dropwithreset;

	tp = intotcpcb(inp);

   DEBUG("found inp cb, laddr=%x, lport=%d, faddr=%x,"
         " fport=%d, tp_state=%d, tp_flags=%d",
         inp->inp_laddr.s_addr,
         inp->inp_lport,
         inp->inp_faddr.s_addr,
         inp->inp_fport, tp->t_state, tp->t_flags);

	if (tp == 0)
		goto dropwithreset;
	if (tp->t_state == TCPS_CLOSED)
		goto drop;
	
	// Unscale the window into a 32-bit value. 
	if ((tiflags & TH_SYN) == 0)
		tiwin = ti->ti_win << tp->snd_scale;
	else
		tiwin = ti->ti_win;

	so = inp->inp_socket;
   DEBUG("socket info, options=%x", so->so_options);

	if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) {
		if (so->so_options & SO_DEBUG) {
			ostate = tp->t_state;
			g_tcp_saveti = *ti;
		}
		if (so->so_options & SO_ACCEPTCONN) {
			if ((tiflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
				// Note: dropwithreset makes sure we don't
				// send a reset in response to a RST.
				if (tiflags & TH_ACK) {
					g_tcpstat.tcps_badsyn++;
					goto dropwithreset;
				}
            DEBUG("SYN is expected, tiflags=%d", tiflags);
				goto drop;
			}
			so = sonewconn(so, 0);
			if (so == 0) {
            DEBUG("failed to create new connection, tiflags=%d", tiflags);
				goto drop;
         }

			// Mark socket as temporary until we're
			// committed to keeping it.  The code at
			// ``drop'' and ``dropwithreset'' check the
			// flag dropsocket to see if the temporary
			// socket created here should be discarded.
			// We mark the socket as discardable until
			// we're committed to it below in TCPS_LISTEN.
			dropsocket++;
			inp = (struct inpcb *)so->so_pcb;
			inp->inp_laddr = ti->ti_dst;
			inp->inp_lport = ti->ti_dport;

         // BSD >= 4.3
			inp->inp_options = ip_srcroute();

			tp = intotcpcb(inp);
			tp->t_state = TCPS_LISTEN;

			// Compute proper scaling value from buffer space
			while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
			   TCP_MAXWIN << tp->request_r_scale < so->so_rcv->sb_hiwat)
				tp->request_r_scale++;
		}
	}

	// Segment received on connection.
	// Reset idle time and keep-alive timer.
	tp->t_idle = 0;
	tp->t_timer[TCPT_KEEP] = g_tcp_keepidle;

	// Process options if not in LISTEN state,
	// else do it below (after getting remote address).
	if (optp && tp->t_state != TCPS_LISTEN)
		tcp_dooptions(tp, optp, optlen, ti,
			&ts_present, &ts_val, &ts_ecr);

	// Header prediction: check for the two common cases
	// of a uni-directional data xfer.  If the packet has
	// no control flags, is in-sequence, the window didn't
	// change and we're not retransmitting, it's a
	// candidate.  If the length is zero and the ack moved
	// forward, we're the sender side of the xfer.  Just
	// free the data acked & wake any higher level process
	// that was blocked waiting for space.  If the length
	// is non-zero and the ack didn't move, we're the
	// receiver side.  If we're getting packets in-order
	// (the reassembly queue is empty), add the data to
	// the socket buffer and note that we need a delayed ack.
	if (tp->t_state == TCPS_ESTABLISHED &&
	    (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
	    (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) &&
	    ti->ti_seq == tp->rcv_nxt &&
	    tiwin && tiwin == tp->snd_wnd &&
	    tp->snd_nxt == tp->snd_max) {
		// If last ACK falls within this segment's sequence numbers,
		// record the timestamp.
      if ( ts_present && TSTMP_GEQ(ts_val, tp->ts_recent) &&
            SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) ){
			tp->ts_recent_age = g_tcp_now;
			tp->ts_recent = ts_val;
		}

		if (ti->ti_len == 0) {
			if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
			    SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
			    tp->snd_cwnd >= tp->snd_wnd) {
				// this is a pure ack for outstanding data.
				++g_tcpstat.tcps_predack;
				if (ts_present)
					tcp_xmit_timer(tp, g_tcp_now-ts_ecr+1);
				else if (tp->t_rtt &&
					    SEQ_GT(ti->ti_ack, tp->t_rtseq))
					tcp_xmit_timer(tp, tp->t_rtt);

				acked = ti->ti_ack - tp->snd_una;
				g_tcpstat.tcps_rcvackpack++;
				g_tcpstat.tcps_rcvackbyte += acked;
            TRACE("drop so_snd buffer, drop_bytes=%d, len=%d", 
                  acked, so->so_snd.sb_cc);

				sbdrop(so->so_snd, acked);
				tp->snd_una = ti->ti_ack;
				usn_free_cmbuf(m);

				// If all outstanding data are acked, stop
				// retransmit timer, otherwise restart timer
				// using current (possibly backed-off) value.
				// If process is waiting for space,
				// wakeup/selwakeup/signal.  If data
				// are ready to send, let tcp_output
				// decide between more output or persist.
				if (tp->snd_una == tp->snd_max)
					tp->t_timer[TCPT_REXMT] = 0;
				else if (tp->t_timer[TCPT_PERSIST] == 0)
					tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;

	         if (so->so_options & SO_DEBUG)
             	tcp_trace(TA_INPUT, ostate, tp, &g_tcp_saveti, 0);

				//if (so->so_snd->sb_flags & SB_NOTIFY) {
            //   usnet_tcpin_wwakeup(so, USN_TCP_IN, usn_tcpev_sbnotify, 0);
				//	sowwakeup(so);
            //}

            // send buffer is available for app thread. 
            usnet_tcpin_wwakeup(so, USN_TCP_IN, USN_TCPEV_WRITE, 0);

				if (so->so_snd->sb_cc)
					tcp_output(tp);
				return;
			}
		} else if (ti->ti_ack == tp->snd_una &&
		    tp->seg_next == (struct tcpiphdr *)tp &&
		    ti->ti_len <= sbspace(so->so_rcv)) {

			// this is a pure, in-sequence data packet
			// with nothing on the reassembly queue and
			// we have enough buffer space to take it.
			++g_tcpstat.tcps_preddat;
			tp->rcv_nxt += ti->ti_len;
			g_tcpstat.tcps_rcvpack++;
			g_tcpstat.tcps_rcvbyte += ti->ti_len;

			// Drop TCP, IP headers and TCP options then add data
			// to socket buffer.
			m->head += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
			m->mlen -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);

         TRACE("add data to rcv buf");
			sbappend(so->so_rcv, m);
			sorwakeup(so);

         // new data is available for app threads.
         usnet_tcpin_rwakeup(so, USN_TCP_IN, USN_TCPEV_READ, m);

	      if (so->so_options & SO_DEBUG) {
            TRACE("tcp trace, so_options=%d", so->so_options);
          	tcp_trace(TA_INPUT, ostate, tp, &g_tcp_saveti, 0);
         }

			tp->t_flags |= TF_DELACK;
			return;
		}
	}

	// Drop TCP, IP headers and TCP options.
	m->head += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
	m->mlen -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);

	// Calculate amount of space in receive window,
	// and then do TCP input processing.
	// Receive window is amount of space in rcv queue,
	// but not less than advertised window.
   {
	   int win;
	   win = sbspace(so->so_rcv);
	   if (win < 0)
	      win = 0;
  	   tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
	}

	switch (tp->t_state) {
	// If the state is LISTEN then ignore segment if it contains an RST.
	// If the segment contains an ACK then it is bad and send a RST.
	// If it does not contain a SYN then it is not interesting; drop it.
	// Don't bother responding if the destination was a broadcast.
	// Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
	// tp->iss, and send a segment:
	//     <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
	// Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
	// Fill in remote peer address fields if not previously specified.
	// Enter SYN_RECEIVED state, and process any other fields of this
	// segment in this state.
	case TCPS_LISTEN: {
		usn_mbuf_t *am;
		struct usn_sockaddr_in *sin;

		if (tiflags & TH_RST)
			goto drop;
		if (tiflags & TH_ACK)
			goto dropwithreset;
		if ((tiflags & TH_SYN) == 0)
			goto drop;

		// RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
		// in_broadcast() should never return true on a received
		// packet with M_BCAST not set.

		//if (m->m_flags & (M_BCAST|M_MCAST) ||
		//    IN_MULTICAST(ntohl(ti->ti_dst.s_addr)))
		//	goto drop;

		am = usn_get_mbuf(0, BUF_MSIZE, 0);	// XXX: the size!
		if (am == NULL)
			goto drop;
		am->mlen = sizeof (struct usn_sockaddr_in);
		sin = mtod(am, struct usn_sockaddr_in *);
		sin->sin_family = AF_INET;
		sin->sin_len = sizeof(*sin);
		sin->sin_addr = ti->ti_src;
		sin->sin_port = ti->ti_sport;
		bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));

		laddr = inp->inp_laddr;
		if (inp->inp_laddr.s_addr == USN_INADDR_ANY)
			inp->inp_laddr = ti->ti_dst;

		if (in_pcbconnect(inp, am)) {
			inp->inp_laddr = laddr;
			usn_free_mbuf(am);
			goto drop;
		}
		usn_free_mbuf(am);
		tp->t_template = tcp_template(tp);
		if (tp->t_template == 0) {
			tp = tcp_drop(tp, ENOBUFS);
			dropsocket = 0;		// socket is already gone
			goto drop;
		}
		if (optp)
			tcp_dooptions(tp, optp, optlen, ti,
				&ts_present, &ts_val, &ts_ecr);
		if (iss)
			tp->iss = iss;
		else
			tp->iss = g_tcp_iss;
		g_tcp_iss += TCP_ISSINCR/4;
		tp->irs = ti->ti_seq;
		tcp_sendseqinit(tp);
		tcp_rcvseqinit(tp);
		tp->t_flags |= TF_ACKNOW;
      TRACE("change tcp state to TCPS_SYN_RECEIVED, state=%d, tp_flags=%d",
            tp->t_state, tp->t_flags);
		tp->t_state = TCPS_SYN_RECEIVED;

      // tcp event
      usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_SYN_RECEIVED, 0);

		tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
		dropsocket = 0;		// committed to socket
		g_tcpstat.tcps_accepts++;
		goto trimthenstep6;
	}


	// If the state is SYN_SENT:
	//	if seg contains an ACK, but not for our SYN, drop the input.
	//	if seg contains a RST, then drop the connection.
	//	if seg does not contain SYN, then drop it.
	// Otherwise this is an acceptable SYN segment
	//	initialize tp->rcv_nxt and tp->irs
	//	if seg contains ack then advance tp->snd_una
	//	if SYN has been acked change to ESTABLISHED else SYN_RCVD state
	//	arrange for segment to be acked (eventually)
	//	continue processing rest of data/controls, beginning with URG
	case TCPS_SYN_SENT:
		if ((tiflags & TH_ACK) &&
		    (SEQ_LEQ(ti->ti_ack, tp->iss) ||
		     SEQ_GT(ti->ti_ack, tp->snd_max)))
			goto dropwithreset;
		if (tiflags & TH_RST) {
			if (tiflags & TH_ACK)
				tp = tcp_drop(tp, ECONNREFUSED);
			goto drop;
		}
		if ((tiflags & TH_SYN) == 0)
			goto drop;
		if (tiflags & TH_ACK) {
			tp->snd_una = ti->ti_ack;
			if (SEQ_LT(tp->snd_nxt, tp->snd_una))
				tp->snd_nxt = tp->snd_una;
		   tp->t_timer[TCPT_REXMT] = 0; 
		}
		
		tp->irs = ti->ti_seq;
		tcp_rcvseqinit(tp);
		tp->t_flags |= TF_ACKNOW;
      TRACE("ack now, tp flags=%d", tp->t_flags);

      // XXX: remove second test.
		if (tiflags & TH_ACK /*&& SEQ_GT(tp->snd_una, tp->iss)*/) {
			g_tcpstat.tcps_connects++;
			soisconnected(so);
         TRACE("change tcp state to TCPS_ESTABLISHED,"
               " state=%d, tp_flags=%d", tp->t_state, tp->t_flags);
			tp->t_state = TCPS_ESTABLISHED;

			// Do window scaling on this connection?
			if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
				(TF_RCVD_SCALE|TF_REQ_SCALE)) {
				tp->snd_scale = tp->requested_s_scale;
				tp->rcv_scale = tp->request_r_scale;
			}
			tcp_reass(tp, (struct tcpiphdr *)0, (usn_mbuf_t *)0);

			// if we didn't have to retransmit the SYN,
			// use its rtt as our initial srtt & rtt var.
			if (tp->t_rtt)
				tcp_xmit_timer(tp, tp->t_rtt);
		} else {
         TRACE("change tcp state to TCPS_SYN_RECEIVED, state=%d, tp_flags=%d", 
               tp->t_state, tp->t_flags);
			tp->t_state = TCPS_SYN_RECEIVED;
         // tcp event
         usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_SYN_RECEIVED, 0);
      }

trimthenstep6:

		// Advance ti->ti_seq to correspond to first data byte.
		// If data, trim to stay within window,
		// dropping FIN if necessary.
		ti->ti_seq++;
		if (ti->ti_len > tp->rcv_wnd) {
			todrop = ti->ti_len - tp->rcv_wnd;
			m_adj(m, -todrop);
			ti->ti_len = tp->rcv_wnd;
			tiflags &= ~TH_FIN;
			g_tcpstat.tcps_rcvpackafterwin++;
			g_tcpstat.tcps_rcvbyteafterwin += todrop;
		}
		tp->snd_wl1 = ti->ti_seq - 1;
		tp->rcv_up = ti->ti_seq;
		goto step6;
	}

	// States other than LISTEN or SYN_SENT.
	// First check timestamp, if present.
	// Then check that at least some bytes of segment are within 
	// receive window.  If segment begins before rcv_nxt,
	// drop leading data (and SYN); if nothing left, just ack.
	// 
	// RFC 1323 PAWS: If we have a timestamp reply on this segment
	// and it's less than ts_recent, drop it.
	if (ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent &&
	    TSTMP_LT(ts_val, tp->ts_recent)) {
		// Check to see if ts_recent is over 24 days old.
		if ((int)(g_tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
			// Invalidate ts_recent.  If this segment updates
			// ts_recent, the age will be reset later and ts_recent
			// will get a valid value.  If it does not, setting
			// ts_recent to zero will at least satisfy the
			// requirement that zero be placed in the timestamp
			// echo reply when ts_recent isn't valid.  The
			// age isn't reset until we get a valid ts_recent
			// because we don't want out-of-order segments to be
			// dropped when ts_recent is old.
			tp->ts_recent = 0;
		} else {
			g_tcpstat.tcps_rcvduppack++;
			g_tcpstat.tcps_rcvdupbyte += ti->ti_len;
			g_tcpstat.tcps_pawsdrop++;
			goto dropafterack;
		}
	}

	todrop = tp->rcv_nxt - ti->ti_seq;
	if (todrop > 0) {
		if (tiflags & TH_SYN) {
			tiflags &= ~TH_SYN;
			ti->ti_seq++;
			if (ti->ti_urp > 1) 
				ti->ti_urp--;
			else
				tiflags &= ~TH_URG;
			todrop--;
		}
      if ( todrop >= ti->ti_len || 
           ( todrop == ti->ti_len && (tiflags & TH_FIN ) == 0 ) ) {
         // Any valid FIN must be to the left of the window.
         // At this point the FIN must be a duplicate or
         // out of sequence; drop it.
         tiflags &= ~TH_FIN;
         // Send an ACK to resynchronize and drop any data
         // But keep on processing for RST or ACK.
         tp->t_flags |= TF_ACKNOW;
         TRACE("send ack now to resync, tp_flags=%d", tp->t_flags);
         todrop = ti->ti_len;
         g_tcpstat.tcps_rcvdupbyte += ti->ti_len;
         g_tcpstat.tcps_rcvduppack++;
      } else {
         g_tcpstat.tcps_rcvpartduppack++;
         g_tcpstat.tcps_rcvpartdupbyte += ti->ti_len;
      }

		m_adj(m, todrop);
		ti->ti_seq += todrop;
		ti->ti_len -= todrop;
		if (ti->ti_urp > todrop)
			ti->ti_urp -= todrop;
		else {
			tiflags &= ~TH_URG;
			ti->ti_urp = 0;
		}
	}

	// If new data are received on a connection after the
	// user processes are gone, then RST the other end.
	if ((so->so_state & USN_NOFDREF) && 
	    tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
		tp = tcp_close(tp);
		g_tcpstat.tcps_rcvafterclose++;
		goto dropwithreset;
	}


	// If segment ends after window, drop trailing data
	// (and PUSH and FIN); if nothing left, just ACK.
	todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
	if (todrop > 0) {
		g_tcpstat.tcps_rcvpackafterwin++;
		if (todrop >= ti->ti_len) {
			g_tcpstat.tcps_rcvbyteafterwin += ti->ti_len;

			// If a new connection request is received
			// while in TIME_WAIT, drop the old connection
			// and start over if the sequence numbers
			// are above the previous ones.
			if (tiflags & TH_SYN &&
			    tp->t_state == TCPS_TIME_WAIT &&
			    SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
				iss = tp->snd_nxt + TCP_ISSINCR;
				tp = tcp_close(tp);
				goto findpcb;
			}

			// If window is closed can only take segments at
			// window edge, and have to drop data and PUSH from
			// incoming segments.  Continue processing, but
			// remember to ack.  Otherwise, drop segment
			// and ack.
			if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
				tp->t_flags |= TF_ACKNOW;
				g_tcpstat.tcps_rcvwinprobe++;
			} else
				goto dropafterack;
		} else
			g_tcpstat.tcps_rcvbyteafterwin += todrop;
		m_adj(m, -todrop);
		ti->ti_len -= todrop;
		tiflags &= ~(TH_PUSH|TH_FIN);
	}

   // check valid timestamp. Replace code above.
   if (ts_present && TSTMP_GEQ(ts_val, tp->ts_recent) &&
         SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) ) {
		tp->ts_recent_age = g_tcp_now;
		tp->ts_recent = ts_val;
   }

	// If the RST bit is set examine the state:
	//    SYN_RECEIVED STATE:
	//	If passive open, return to LISTEN state.
	//	If active open, inform user that connection was refused.
	//    ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
	//	Inform user that connection was reset, and close tcb.
	//    CLOSING, LAST_ACK, TIME_WAIT STATES
	//	Close the tcb.
	if (tiflags&TH_RST) switch (tp->t_state) {

	case TCPS_SYN_RECEIVED:
		so->so_error = ECONNREFUSED;
		goto close;

	case TCPS_ESTABLISHED:
	case TCPS_FIN_WAIT_1:
	case TCPS_FIN_WAIT_2:
	case TCPS_CLOSE_WAIT:
		so->so_error = ECONNRESET;
close:
      DEBUG("change tcp state to TCPS_CLOSED, state=%d", tp->t_state);
		tp->t_state = TCPS_CLOSED;
      // tcp event
      usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_CLOSED, 0);
		g_tcpstat.tcps_drops++;
		tp = tcp_close(tp);
		goto drop;

	case TCPS_CLOSING:
	case TCPS_LAST_ACK:
	case TCPS_TIME_WAIT:
		tp = tcp_close(tp);
		goto drop;
	}

	// If a SYN is in the window, then this is an
	// error and we send an RST and drop the connection.
	if (tiflags & TH_SYN) {
		tp = tcp_drop(tp, ECONNRESET);
		goto dropwithreset;
	}

	// If the ACK bit is off we drop the segment and return.
	if ((tiflags & TH_ACK) == 0)
		goto drop;

	// Ack processing.
	switch (tp->t_state) {

	// In SYN_RECEIVED state if the ack ACKs our SYN then enter
	// ESTABLISHED state and continue processing, otherwise
	// send an RST.
	case TCPS_SYN_RECEIVED:
		if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
		    SEQ_GT(ti->ti_ack, tp->snd_max))
			goto dropwithreset;
		g_tcpstat.tcps_connects++;

      DEBUG("change tcp state to TCPS_ESTABLISHED, state=%d", tp->t_state);
		tp->t_state = TCPS_ESTABLISHED;
		soisconnected(so);

		// Do window scaling?
		if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
			(TF_RCVD_SCALE|TF_REQ_SCALE)) {
			tp->snd_scale = tp->requested_s_scale;
			tp->rcv_scale = tp->request_r_scale;
		}
		tcp_reass(tp, (struct tcpiphdr *)0, (usn_mbuf_t *)0);
		tp->snd_wl1 = ti->ti_seq - 1;
		// fall into ...

	// In ESTABLISHED state: drop duplicate ACKs; ACK out of range
	// ACKs.  If the ack is in the range
	//	tp->snd_una < ti->ti_ack <= tp->snd_max
	// then advance tp->snd_una to ti->ti_ack and drop
	// data from the retransmission queue.  If this ACK reflects
	// more up to date window information we update our window information.
	case TCPS_ESTABLISHED:
	case TCPS_FIN_WAIT_1:
	case TCPS_FIN_WAIT_2:
	case TCPS_CLOSE_WAIT:
	case TCPS_CLOSING:
	case TCPS_LAST_ACK:
	case TCPS_TIME_WAIT:

		if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
			if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
				g_tcpstat.tcps_rcvdupack++;
				// If we have outstanding data (other than
				// a window probe), this is a completely
				// duplicate ack (ie, window info didn't
				// change), the ack is the biggest we've
				// seen and we've seen exactly our rexmt
				// threshhold of them, assume a packet
				// has been dropped and retransmit it.
				// Kludge snd_nxt & the congestion
				// window so we send only this one
				// packet.
				//
				// We know we're losing at the current
				// window size so do congestion avoidance
				// (set ssthresh to half the current window
				// and pull our congestion window back to
				// the new ssthresh).
				//
				// Dup acks mean that packets have left the
				// network (they're now cached at the receiver) 
				// so bump cwnd by the amount in the receiver
				// to keep a constant cwnd packets in the
				// network.
				if (tp->t_timer[TCPT_REXMT] == 0 ||
				    ti->ti_ack != tp->snd_una)
					tp->t_dupacks = 0;
				else if (++tp->t_dupacks == g_tcprexmtthresh) {
               // congestion avoidance
					tcp_seq onxt = tp->snd_nxt;
					u_int win =
					    min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;

					if (win < 2)
						win = 2;
					tp->snd_ssthresh = win * tp->t_maxseg;
					tp->t_timer[TCPT_REXMT] = 0;
					tp->t_rtt = 0;
					tp->snd_nxt = ti->ti_ack;
					tp->snd_cwnd = tp->t_maxseg;
					tcp_output(tp);
					tp->snd_cwnd = tp->snd_ssthresh +
					       tp->t_maxseg * tp->t_dupacks;
					if (SEQ_GT(onxt, tp->snd_nxt))
						tp->snd_nxt = onxt;
					goto drop;
				} else if (tp->t_dupacks > g_tcprexmtthresh) {
					tp->snd_cwnd += tp->t_maxseg;
					tcp_output(tp);
					goto drop;
				}
			} else
				tp->t_dupacks = 0;
			break;
		}

		// If the congestion window was inflated to account
		// for the other side's cached packets, retract it.
		if (tp->t_dupacks > g_tcprexmtthresh &&
		    tp->snd_cwnd > tp->snd_ssthresh)
			tp->snd_cwnd = tp->snd_ssthresh;
		tp->t_dupacks = 0;
		if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
			g_tcpstat.tcps_rcvacktoomuch++;
			goto dropafterack;
		}
		acked = ti->ti_ack - tp->snd_una;
		g_tcpstat.tcps_rcvackpack++;
		g_tcpstat.tcps_rcvackbyte += acked;

		// If we have a timestamp reply, update smoothed
		// round trip time.  If no timestamp is present but
		// transmit timer is running and timed sequence
		// number was acked, update smoothed round trip time.
		// Since we now have an rtt measurement, cancel the
		// timer backoff (cf., Phil Karn's retransmit alg.).
		// Recompute the initial retransmit timer.
		if (ts_present)
			tcp_xmit_timer(tp, g_tcp_now-ts_ecr+1);
		else if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
			tcp_xmit_timer(tp,tp->t_rtt);

		// If all outstanding data is acked, stop retransmit
		// timer and remember to restart (more output or persist).
		// If there is more data to be acked, restart retransmit
		// timer, using current (possibly backed-off) value.
		if (ti->ti_ack == tp->snd_max) {
			tp->t_timer[TCPT_REXMT] = 0;
         DEBUG("change needoutput to 1");
			needoutput = 1;
         tp->t_flags |= TF_NEEDOUTPUT;
		} else if (tp->t_timer[TCPT_PERSIST] == 0)
			tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;

		// When new data is acked, open the congestion window.
		// If the window gives us less than ssthresh packets
		// in flight, open exponentially (maxseg per packet).
		// Otherwise open linearly: maxseg per window
		// (maxseg * (maxseg / cwnd) per packet).
		{
		   u_int cw = tp->snd_cwnd;
	   	u_int incr = tp->t_maxseg;

	   	if (cw > tp->snd_ssthresh)
	   		incr = incr * incr / cw;
   		tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
		}

		if (acked > so->so_snd->sb_cc) {
			tp->snd_wnd -= so->so_snd->sb_cc;
         DEBUG("drop all so_snd buffer, drop_bytes=%d, acked=%d", 
               so->so_snd->sb_cc, acked);
			sbdrop(so->so_snd, (int)so->so_snd->sb_cc);
			ourfinisacked = 1;
		} else {
         DEBUG("drop so_snd buffer, drop_bytes=%d, len=%d", acked, so->so_snd->sb_cc);
			sbdrop(so->so_snd, acked);
			tp->snd_wnd -= acked;
			ourfinisacked = 0;
		}
		//if (so->so_snd->sb_flags & SB_NOTIFY) {
			sowwakeup(so);
         usnet_tcpin_wwakeup(so, USN_TCP_IN, USN_TCPEV_WRITE, 0);
      //}

		tp->snd_una = ti->ti_ack;
		if (SEQ_LT(tp->snd_nxt, tp->snd_una))
			tp->snd_nxt = tp->snd_una;

		switch (tp->t_state) {

		// In FIN_WAIT_1 STATE in addition to the processing
		// for the ESTABLISHED state if our FIN is now acknowledged
		// then enter FIN_WAIT_2.
		case TCPS_FIN_WAIT_1:
			if (ourfinisacked) {
				// If we can't receive any more
				// data, then closing user can proceed.
				// Starting the timer is contrary to the
				// specification, but if we don't get a FIN
				// we'll hang forever.
				if (so->so_state & USN_CANTRCVMORE) {
					soisdisconnected(so);
					tp->t_timer[TCPT_2MSL] = g_tcp_maxidle;
				}
            DEBUG("change tcp state to TCPS_FIN_WAIT_2, state=%d", tp->t_state);
				tp->t_state = TCPS_FIN_WAIT_2;
            usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_FIN_WAIT2, 0);
			}
			break;

		// In CLOSING STATE in addition to the processing for
		// the ESTABLISHED state if the ACK acknowledges our FIN
		// then enter the TIME-WAIT state, otherwise ignore
		// the segment.
		case TCPS_CLOSING:
			if (ourfinisacked) {
            DEBUG("change tcp state to TCPS_TIME_WAIT, state=%d", tp->t_state);
				tp->t_state = TCPS_TIME_WAIT;
            usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_TIME_WAIT, 0);
				tcp_canceltimers(tp);
				tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
				soisdisconnected(so);
			}
			break;
		
		// In LAST_ACK, we may still be waiting for data to drain
		// and/or to be acked, as well as for the ack of our FIN.
		// If our FIN is now acknowledged, delete the TCB,
		// enter the closed state and return.
		case TCPS_LAST_ACK:
			if (ourfinisacked) {
				tp = tcp_close(tp);
				goto drop;
			}
			break;


		// In TIME_WAIT state the only thing that should arrive
		// is a retransmission of the remote FIN.  Acknowledge
		// it and restart the finack timer.
		case TCPS_TIME_WAIT:
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			goto dropafterack;
		}
	}

step6:

	// Update window information.
	// Don't look at window if no ACK: TAC's send garbage on first SYN.
	if ((tiflags & TH_ACK) &&
	    (SEQ_LT(tp->snd_wl1, ti->ti_seq) || 
        (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
	     (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd) ))  )) {
		// keep track of pure window updates
		if (ti->ti_len == 0 &&
		    tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd)
			g_tcpstat.tcps_rcvwinupd++;
		tp->snd_wnd = tiwin;
		tp->snd_wl1 = ti->ti_seq;
		tp->snd_wl2 = ti->ti_ack;
		if (tp->snd_wnd > tp->max_sndwnd)
			tp->max_sndwnd = tp->snd_wnd;
      DEBUG("change needoutput to 1");
      tp->t_flags |= TF_NEEDOUTPUT;
		needoutput = 1;
	}

	
	// Process segments with URG.
	if ((tiflags & TH_URG) && ti->ti_urp &&
	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {

		// This is a kludge, but if we receive and accept
		// random urgent pointers, we'll crash in
		// soreceive.  It's hard to imagine someone
		// actually wanting to send this much urgent data.
		if (ti->ti_urp + so->so_rcv->sb_cc > g_sb_max) {
			ti->ti_urp = 0;			// XXX
			tiflags &= ~TH_URG;		// XXX
			goto dodata;			// XXX
		}

		// If this segment advances the known urgent pointer,
		// then mark the data stream.  This should not happen
		// in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
		// a FIN has been received from the remote side. 
		// In these states we ignore the URG.
		//
		// According to RFC961 (Assigned Protocols),
		// the urgent pointer points to the last octet
		// of urgent data.  We continue, however,
		// to consider it to indicate the first octet
		// of data past the urgent section as the original 
		// spec states (in one of two places).
		if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
			tp->rcv_up = ti->ti_seq + ti->ti_urp;
			so->so_oobmark = so->so_rcv->sb_cc +
			    (tp->rcv_up - tp->rcv_nxt) - 1;
			if (so->so_oobmark == 0)
				so->so_state |= USN_RCVATMARK;
			sohasoutofband(so);
         // send async event to app threads.
         usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPEV_OUTOFBOUND, 0);
			tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
		}

		// Remove out of band data so doesn't get presented to user.
		// This can happen independent of advancing the URG pointer,
		// but if two URG's are pending at once, some out-of-band
		// data may creep in... ick.
		if (ti->ti_urp <= ti->ti_len
#ifdef SO_OOBINLINE
		     && (so->so_options & SO_OOBINLINE) == 0
#endif
		     )
			tcp_pulloutofband(so, ti, m);
	} else
		// If no out of band data is expected,
		// pull receive urgent pointer along
		// with the receive window.
		if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
			tp->rcv_up = tp->rcv_nxt;
dodata:							// XXX
#ifdef DUMP_PAYLOAD
   DEBUG("Handle data");
   dump_chain(m,"tcp");
#endif

	// Process the segment text, merging it into the TCP sequencing queue,
	// and arranging for acknowledgment of receipt if necessary.
	// This process logically involves adjusting tp->rcv_wnd as data
	// is presented to the user (this happens in tcp_usrreq.c,
	// case PRU_RCVD).  If a FIN has already been received on this
	// connection then we just ignore the text.
	if ((ti->ti_len || (tiflags&TH_FIN)) &&
	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
		TCP_REASS(tp, ti, m, so, tiflags);
		// Note the amount of data that peer has sent into
		// our window, in order to estimate the sender's
		// buffer size.
		len = so->so_rcv->sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
	} else {
		usn_free_cmbuf(m);
		tiflags &= ~TH_FIN;
	}

	// If FIN is received ACK the FIN and let the user know
	// that the connection is closing.
	if (tiflags & TH_FIN) {
		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
			socantrcvmore(so);
			tp->t_flags |= TF_ACKNOW;
         TRACE("ack FIN now, tp flags=%d", tp->t_flags);
			tp->rcv_nxt++;
		}
		switch (tp->t_state) {

		// In SYN_RECEIVED and ESTABLISHED STATES
		// enter the CLOSE_WAIT state.
		case TCPS_SYN_RECEIVED:
		case TCPS_ESTABLISHED:
         TRACE("change tcp state to TCPS_CLOSE_WAIT, state=%d", tp->t_state);
			tp->t_state = TCPS_CLOSE_WAIT;
         soewakeup(so, 0);
         usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_CLOSE_WAIT, 0);
			break;

		// If still in FIN_WAIT_1 STATE FIN has not been acked so
		// enter the CLOSING state.
		case TCPS_FIN_WAIT_1:
         TRACE("change tcp state to TCPS_CLOSING, state=%d", tp->t_state);
			tp->t_state = TCPS_CLOSING;
         usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_CLOSING, 0);
			break;

		// In FIN_WAIT_2 state enter the TIME_WAIT state,
		// starting the time-wait timer, turning off the other 
		// standard timers.
		case TCPS_FIN_WAIT_2:
         TRACE("change tcp state to TCPS_TIME_WAIT, state=%d", tp->t_state);
			tp->t_state = TCPS_TIME_WAIT;
			tcp_canceltimers(tp);
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			soisdisconnected(so);
         usnet_tcpin_ewakeup(so, USN_TCP_IN, USN_TCPST_TIME_WAIT, 0);
			break;

		// In TIME_WAIT state restart the 2 MSL time_wait timer.
		case TCPS_TIME_WAIT:
			tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
			break;
		}
	}
	if (so->so_options & SO_DEBUG) {
      TRACE("tcp trace, so_options=%d", so->so_options);
		tcp_trace(TA_INPUT, ostate, tp, &g_tcp_saveti, 0);
   }

	// Return any desired output.
	//if (needoutput || (tp->t_flags & TF_ACKNOW)){
	if (tp->t_flags & TF_NEEDOUTPUT || (tp->t_flags & TF_ACKNOW)){
      TRACE("ack now or need to ouput, tp->t_flags=%d", tp->t_flags);
		tcp_output(tp);
   }
	return;

dropafterack:
   TRACE("dropafterack");
	// Generate an ACK dropping incoming segment if it occupies
	// sequence space, where the ACK reflects our state.
	if (tiflags & TH_RST)
		goto drop;
	usn_free_cmbuf(m);
	tp->t_flags |= TF_ACKNOW;
   TRACE("ack now, tp flags=%d", tp->t_flags);
	tcp_output(tp);
	return;

dropwithreset:
   TRACE("dropwithreset");
	// Generate a RST, dropping incoming segment.
	// Make ACK acceptable to originator of segment.
	// Don't bother to respond if destination was broadcast/multicast.
#define USN_MULTICAST(i) (((u_int)(i) & 0xf0000000) == 0xe0000000)
	if ((tiflags & TH_RST) || m->flags & (BUF_BCAST|BUF_MCAST) ||
	    USN_MULTICAST(ntohl(ti->ti_dst.s_addr)))
		goto drop;
   
	if (tiflags & TH_ACK)
		tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
	else {
		if (tiflags & TH_SYN)
			ti->ti_len++;
		tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
		    TH_RST|TH_ACK);
	}
	// destroy temporarily created socket
	if (dropsocket)
		soabort(so);
	return;

drop:
   TRACE("drop");
	// Drop space held by incoming segment and return.
	if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) {
      TRACE("tcp trace: drop a socket");
		tcp_trace(TA_DROP, ostate, tp, &g_tcp_saveti, 0);
   }
	usn_free_cmbuf(m);
	// destroy temporarily created socket
	if (dropsocket)
		soabort(so);
	return;
}
Exemplo n.º 9
0
/*
 * To insert a new blk to the array of SACK blk in receiver.
 *
 * Parameters:
 *	sack_blk_t *head: pointer to the array of SACK blks.
 *	tcp_seq begin: starting seq num of the new blk.
 *	tcp_seq end: ending seq num of the new blk.
 *	int32_t *num: (referenced) total num of SACK blks on the list.
 */
void
tcp_sack_insert(sack_blk_t *head, tcp_seq begin, tcp_seq end, int32_t *num)
{
	int32_t	i, j, old_num, new_num;
	sack_blk_t tmp[MAX_SACK_BLK - 1];

	/* The array is empty, just add the new one. */
	if (*num == 0) {
		head[0].begin = begin;
		head[0].end = end;
		*num = 1;
		return;
	}

	/*
	 * Check for overlap.  There are five cases.
	 *
	 * 1. there is no overlap with any other SACK blks.
	 * 2. new SACK blk is completely contained in another blk.
	 * 3. tail part of new SACK blk overlaps with another blk.
	 * 4. head part of new SACK blk overlaps with another blk.
	 * 5. new SACK blk completely contains another blk.
	 *
	 * Use tmp to hold old SACK blks.  After the loop, copy them back
	 * to head.
	 */
	old_num = *num;
	if (old_num > MAX_SACK_BLK - 1) {
		old_num = MAX_SACK_BLK - 1;
	}
	new_num = old_num;
	j = 0;
	for (i = 0; i < old_num; i++) {
		if (SEQ_LT(end, head[i].begin) || SEQ_GT(begin, head[i].end)) {
			/* Case 1: continue to check. */
			tmp[j].begin = head[i].begin;
			tmp[j].end = head[i].end;
			j++;
			continue;
		} else if (SEQ_GEQ(begin, head[i].begin) &&
		    SEQ_LEQ(end, head[i].end)) {
			/* Case 2: re-insert the old blk to the head. */
			begin = head[i].begin;
			end = head[i].end;
		} else if (SEQ_LEQ(end, head[i].end) &&
		    SEQ_GEQ(end, head[i].begin)) {
			/*
			 * Case 3: Extend the new blk, remove the old one
			 * and continue to check.
			 */
			end = head[i].end;
		} else if (SEQ_GEQ(begin, head[i].begin) &&
		    SEQ_LEQ(begin, head[i].end)) {
			/* Case 4 */
			begin = head[i].begin;
		}
		/*
		 * Common code for all cases except the first one, which
		 * copies the original SACK blk into the tmp storage.  Other
		 * cases remove the original SACK blk by not copying into
		 * tmp storage.
		 */
		new_num--;
	}

	head[0].begin = begin;
	head[0].end = end;
	for (i = 0; i < new_num; i++) {
		head[i+1].begin = tmp[i].begin;
		head[i+1].end = tmp[i].end;
	}
	*num = new_num + 1;
}
Exemplo n.º 10
0
/**
 *  \brief insert a SACK range
 *
 *  \param le left edge in host order
 *  \param re right edge in host order
 *
 *  \retval 0 all is good
 *  \retval -1 error
 */
static int StreamTcpSackInsertRange(TcpStream *stream, uint32_t le, uint32_t re) {
    SCLogDebug("le %u, re %u", le, re);
#ifdef DEBUG
    StreamTcpSackPrintList(stream);
#endif

    /* if to the left of last_ack then ignore */
    if (SEQ_LT(re, stream->last_ack)) {
        SCLogDebug("too far left. discarding");
        goto end;
    }
    /* if to the right of the tcp window then ignore */
    if (SEQ_GT(le, (stream->last_ack + stream->window))) {
        SCLogDebug("too far right. discarding");
        goto end;
    }
    if (stream->sack_head != NULL) {
        StreamTcpSackRecord *rec;

        for (rec = stream->sack_head; rec != NULL; rec = rec->next) {
            SCLogDebug("rec %p, le %u, re %u", rec, rec->le, rec->re);

            if (SEQ_LT(le, rec->le)) {
                SCLogDebug("SEQ_LT(le, rec->le)");
                if (SEQ_LT(re, rec->le)) {
                    SCLogDebug("SEQ_LT(re, rec->le)");
                    // entirely before, prepend
                    StreamTcpSackRecord *stsr = StreamTcpSackRecordAlloc();
                    if (unlikely(stsr == NULL)) {
                        SCReturnInt(-1);
                    }
                    stsr->le = le;
                    stsr->re = re;

                    stsr->next = stream->sack_head;
                    stream->sack_head = stsr;
                    goto end;
                } else if (SEQ_EQ(re, rec->le)) {
                    SCLogDebug("SEQ_EQ(re, rec->le)");
                    // starts before, ends on rec->le, expand
                    rec->le = le;
                } else if (SEQ_GT(re, rec->le)) {
                    SCLogDebug("SEQ_GT(re, rec->le)");
                    // starts before, ends beyond rec->le
                    if (SEQ_LEQ(re, rec->re)) {
                        SCLogDebug("SEQ_LEQ(re, rec->re)");
                        // ends before rec->re, expand
                        rec->le = le;
                    } else { // implied if (re > rec->re)
                        SCLogDebug("implied if (re > rec->re), le set to %u", rec->re);
                        le = rec->re;
                        continue;
                    }
                }
            } else if (SEQ_EQ(le, rec->le)) {
                SCLogDebug("SEQ_EQ(le, rec->le)");
                if (SEQ_LEQ(re, rec->re)) {
                    SCLogDebug("SEQ_LEQ(re, rec->re)");
                    // new record fully overlapped
                    SCReturnInt(0);
                } else { // implied re > rec->re
                    SCLogDebug("implied re > rec->re");
                    if (rec->next != NULL) {
                        if (SEQ_LEQ(re, rec->next->le)) {
                            rec->re = re;
                            goto end;
                        } else {
                            rec->re = rec->next->le;
                            le = rec->next->le;
                            SCLogDebug("le is now %u", le);
                            continue;
                        }
                    } else {
                        rec->re = re;
                        goto end;
                    }
                }
            } else { // implied (le > rec->le)
                SCLogDebug("implied (le > rec->le)");
                if (SEQ_LT(le, rec->re)) {
                    SCLogDebug("SEQ_LT(le, rec->re))");
                    // new record fully overlapped
                    if (SEQ_GT(re, rec->re)) {
                        SCLogDebug("SEQ_GT(re, rec->re)");

                        if (rec->next != NULL) {
                            if (SEQ_LEQ(re, rec->next->le)) {
                                rec->re = re;
                                goto end;
                            } else {
                                rec->re = rec->next->le;
                                le = rec->next->le;
                                continue;
                            }
                        } else {
                            rec->re = re;
                            goto end;
                        }
                    }

                    SCLogDebug("new range fully overlapped");
                    SCReturnInt(0);
                } else if (SEQ_EQ(le, rec->re)) {
                    SCLogDebug("here");
                    // new record fully overlapped
                    //int r = StreamTcpSackInsertRange(stream, rec->re+1, re);
                    //SCReturnInt(r);
                    le = rec->re;
                    continue;
                } else { /* implied le > rec->re */
                    SCLogDebug("implied le > rec->re");
                    if (rec->next == NULL) {
                        SCLogDebug("rec->next == NULL");
                        StreamTcpSackRecord *stsr = StreamTcpSackRecordAlloc();
                        if (unlikely(stsr == NULL)) {
                            SCReturnInt(-1);
                        }
                        stsr->le = le;
                        stsr->re = re;
                        stsr->next = NULL;

                        stream->sack_tail->next = stsr;
                        stream->sack_tail = stsr;
                        goto end;
                    } else {
                        SCLogDebug("implied rec->next != NULL");
                        if (SEQ_LT(le, rec->next->le) && SEQ_LT(re, rec->next->le)) {
                            SCLogDebug("SEQ_LT(le, rec->next->le) && SEQ_LT(re, rec->next->le)");
                            StreamTcpSackRecord *stsr = StreamTcpSackRecordAlloc();
                            if (unlikely(stsr == NULL)) {
                                SCReturnInt(-1);
                            }
                            stsr->le = le;
                            stsr->re = re;
                            stsr->next = rec->next;
                            rec->next = stsr;

                        } else if (SEQ_LT(le, rec->next->le) && SEQ_GEQ(re, rec->next->le)) {
                            SCLogDebug("SEQ_LT(le, rec->next->le) && SEQ_GEQ(re, rec->next->le)");
                            StreamTcpSackRecord *stsr = StreamTcpSackRecordAlloc();
                            if (unlikely(stsr == NULL)) {
                                SCReturnInt(-1);
                            }
                            stsr->le = le;
                            stsr->re = rec->next->le;
                            stsr->next = rec->next;
                            rec->next = stsr;

                            le = rec->next->le;
                        }
                    }
                }
            }
        }
    } else {
        SCLogDebug("implied empty list");
        StreamTcpSackRecord *stsr = StreamTcpSackRecordAlloc();
        if (unlikely(stsr == NULL)) {
            SCReturnInt(-1);
        }
        stsr->le = le;
        stsr->re = re;
        stsr->next = NULL;

        stream->sack_head = stsr;
        stream->sack_tail = stsr;
    }

    StreamTcpSackPruneList(stream);
end:
    SCReturnInt(0);
}
Exemplo n.º 11
0
int tcp_input(struct ifnet * __if, struct iphdr * iph, 
			   struct tcphdr * th, int len)
{
	struct tcp_listen_pcb * mux;
	struct tcp_pcb * tp;
#if (ENABLE_NET_TCP_CHECKSUM)
	unsigned int sum;
#endif
	int ti_len;
	int acked = 0;
	int ourfinisacked = 0;
	int needoutput = 0;
	unsigned int optlen;
	int tiflags;
	int todrop;
	uint32_t snd_una;
	uint32_t snd_nxt;
	uint32_t snd_max;
	uint32_t ti_seq;
	uint32_t ti_ack;
	int rcv_wnd;
	int tiwin;
	int hdrlen;
	uint8_t * data;
	int ret;

#if (ENABLE_TCPDUMP)
	tcp_dump(iph, th, TCPDUMP_RX);
#endif

	/* get TCP options, if any */
	optlen = ((th->th_off << 2) - sizeof(struct tcphdr));
	hdrlen = sizeof(struct tcphdr) + optlen;

	data = (uint8_t *)&th->th_opt[optlen];
	ti_len = len - hdrlen;
	
#if (ENABLE_NET_TCP_CHECKSUM)
	/* initialize checksum */
	sum = htons(len) + (IPPROTO_TCP << 8);
	sum = in_chksum(sum, &iph->saddr,  8);
	sum = in_chksum(sum, th,  hdrlen);

	if (ti_len) {
		sum = in_chksum(sum, data, ti_len);
	}

	if (sum != 0x0000ffff) {
		DCC_LOG3(LOG_WARNING, "checksum error: 0x%04x hdrlen=%d, len=%d", 
				 sum, hdrlen, len);
		TCP_PROTO_STAT_ADD(rx_err, 1);
		goto drop;
	}
#endif

	tiflags = th->th_flags;
	/* convert TCP protocol specific fields to host format */
	tiwin = ntohs(th->th_win);
	ti_seq = ntohl(th->th_seq);
	ti_ack = ntohl(th->th_ack);

	TCP_PROTO_STAT_ADD(rx_ok, 1);

	/* Serch in active list first */
	if ((tp = tcp_active_lookup(iph->saddr, th->th_sport, 
								iph->daddr, th->th_dport)) == NULL) {
		/* lookup into listening pcb list */
		if ((mux = tcp_listen_lookup(iph->saddr, th->th_sport, 
									 iph->daddr, th->th_dport)) == NULL) {
			DCC_LOG(LOG_WARNING, "invalid peer ???");
			goto dropwithreset;
		}

		if ((tiflags & TH_ACK)) {
			DCC_LOG(LOG_WARNING, "listen ACK ?");
			goto dropwithreset;
		}

		if (ti_len != 0) {
			DCC_LOG(LOG_WARNING, "ti_len != 0");
			goto dropwithreset;
		}

		/* Completion of Passive Open
		   Ref.: TCP/IP Illustrated Volume 2, pg. 942 */
		if (!(tiflags & TH_SYN)) {
			DCC_LOG(LOG_WARNING, "listen !SYN ?");
			goto drop;
		}
	
		/* In the LISTEN state, we check for incoming SYN segments,
		   creates a new PCB, and responds with a SYN|ACK. */
		if ((tiflags & TH_RST)) {
			DCC_LOG(LOG_WARNING, "listen RST?");
			goto drop;
		}

		if ((tp = tcp_passive_open(mux, iph, th, optlen)) == NULL) {
			DCC_LOG(LOG_WARNING, "tcp_passive_open()");
			goto dropwithreset;
		}

		/* schedule output */
		tcp_output_sched(tp);

		/* packet handled */
		return 0;
	}

	DCC_LOG1(LOG_MSG, "<%05x> active", (int)tp);

	snd_una = tp->snd_seq;
	snd_nxt = tp->snd_seq + tp->snd_off;
	snd_max = tp->snd_seq + tp->snd_max;

 	/* Remove acknowledged bytes from the send buffer */
	/* Wakeup processes waiting on send buffer */

	/* Segment received on a connection.
	   Reset the idle detection timer 
	   Ref.: TCP/IP Illustrated Volume 2, pg. 932  */
	tp->t_conn_tmr = tcp_idle_det_tmo;
	if (tp->t_flags & TF_IDLE) {
		/* exits from the idle state */
		tp->t_flags &= ~TF_IDLE;
		DCC_LOG1(LOG_INFO, "<%05x> IDLE exit", (int)tp);		
	}

#if 0
	/* Process options, we don't need to check if the socket is 
	   in the LISTEN state, because only active (non LISTENING) sockets
	   will actually fall into this code. 
	   XXX: options after connection stablished ??? 
	 */
	if (optlen)
		tcp_parse_options(tp, th, th->th_opt, optlen);
#endif

	/* Ref.: TCP/IP Illustrated Volume 2, pg. 934  */
#if (TCP_ENABLE_HEADER_PREDICTION)
	if ((tp->t_state == TCPS_ESTABLISHED) &&
		(tiflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK &&
		(ti_seq == tp->rcv_nxt) && 
		(tiwin) && 
		(tiwin == tp->snd_wnd) && 
		(snd_nxt == snd_max)) {

		if (ti_len == 0) {

			if (SEQ_GT(ti_ack, snd_una) &&
				SEQ_LEQ(ti_ack, snd_max)) {
				acked = ti_ack - snd_una;
			
				DCC_LOG(LOG_INFO, "header prediction, ACK ...");

				mbuf_queue_trim(&tp->snd_q, acked);
				snd_una = ti_ack;

				tp->snd_seq = snd_una;
				tp->snd_off = snd_nxt - tp->snd_seq;
				tp->snd_max = snd_max - tp->snd_seq;

				if (snd_una == snd_max) {
					tp->t_rxmt_tmr = 0;
					tp->t_rxmt_cnt = 0;
					DCC_LOG(LOG_INFO, "acked all data, rxmt tmr stopped");
				} else {
					if (tp->t_rxmt_tmr == 0) {
						DCC_LOG(LOG_INFO, 
								"not all data acked restart rxmt tmr");
						tp->t_rxmt_tmr = tcp_rxmtintvl[tp->t_rxmt_cnt / 2];
					}
				}

				thinkos_cond_broadcast(tp->t_cond);

				if (tp->snd_q.len) {
					/* schedule output */
					tcp_output_sched(tp);
				}

				return 0;
			}
		} else {
			if ((ti_ack == snd_una) && 
				ti_len <= (tcp_maxrcv - tp->rcv_q.len)) {
				int len;

				DCC_LOG1(LOG_INFO, "header prediction, data (%d)", ti_len);

				/* append data */
				len = mbuf_queue_add(&tp->rcv_q, data, ti_len);
				tp->rcv_nxt += len;
				thinkos_cond_broadcast(tp->t_cond);

				if (len != ti_len) {
					DCC_LOG1(LOG_WARNING, "<%05x> no more mbufs", (int)tp);
					tp->t_flags |= TF_ACKNOW;
					/* schedule output */
					tcp_output_sched(tp);
				} else {
					tp->t_flags |= TF_DELACK;
				}

				return 0;
			 }
		}
	}

#endif /* TCP_ENABLE_HEADER_PREDICTION */

	/* Slow path input processing
	   Ref.: TCP/IP Illustrated Volume 2, pg. 941  */

	/* TODO: Drop TCP, IP headers and TCP options. 
		Well, only if these structures were dynamic allocated... */
	
	if (ti_len == 0) {
		DCC_LOG(LOG_INFO, "slow path ACK");
	} else {
		DCC_LOG1(LOG_INFO, "slow path (%d)", ti_len);
	}

	/* Calculate the amount of space in receive window,
	   and then do TCP input processing.
	   Receive window is amount of space in rcv queue,
	   but not less than advertise window.
	   Ref.: TCP/IP Illustrated Volume 2, pg. 941  */
	{
		int win;
		
		/* space left in the input queue */
		win = tcp_maxrcv - tp->rcv_q.len;
		
		if (win <= 0) {
			win = 0;
			DCC_LOG(LOG_INFO, "receive buffer full!");
		}


//		rcv_wnd = MAX(win, tp->rcv_adv_wnd);
		rcv_wnd = win;

		DCC_LOG3(LOG_INFO, "adv_wnd=%d rcv_wnd=%d win=%d", 
				tp->rcv_adv_wnd, rcv_wnd, win);
	} 

	if (tp->t_state == TCPS_SYN_SENT) {
		/* response to an active open. 
		   Ref.: TCP/IP Illustrated Volume 2, pg. 947  */

		/* Common proccessing for receipt of SYN. 
		   Ref.: TCP/IP Illustrated Volume 2, pg. 950 */
		if ((tiflags & TH_RST)) {
			goto close;
		}

		if (!(tiflags & TH_SYN)) {
			DCC_LOG(LOG_WARNING, "SYN_SENT SYN ?");
			/* TODO: reset */
			goto close_and_reset;
		}

		if (!(tiflags & TH_ACK)) {
			DCC_LOG(LOG_WARNING, "SYN_SENT ACK ?");
			/* TODO: reset */
			goto close_and_reset;
		}

		if (ti_len != 0) {
			DCC_LOG(LOG_WARNING, "ti_len != 0");
			/* TODO: reset */
			goto close_and_reset;
		}

		/* update the send sequence */
		tp->snd_seq++;
		if (tp->snd_seq != ti_ack) {
			DCC_LOG3(LOG_WARNING, "<%05x> tp->snd_seq(%d) != ti_ack(%d)",
					 (int)tp, tp->snd_seq, ti_ack);
			/* TODO: reset */
			goto close_and_reset;
		}
		tp->snd_off--;
		tp->snd_max--;
//		tp->snd_off = 0;
//		tp->snd_max = 0;

		if (optlen)
			tcp_parse_options(tp, th, th->th_opt, optlen);

		/* Advance tp->ti_seq to correspond to first data byte. */
		ti_seq++;
		if (ti_len > rcv_wnd) {
			DCC_LOG3(LOG_WARNING, "<%05x> ti_len(%d) > rcv_wnd(%d)", 
				(int)tp, ti_len, rcv_wnd);
		/* TODO: if data, trim to stay within window. */
			ti_len = rcv_wnd;
		}

		/* update the sequence number */
		tp->rcv_nxt = ti_seq;

		/* update the window size */
		tp->snd_wnd = ntohs(th->th_win);

		tp->t_state = TCPS_ESTABLISHED;
		DCC_LOG1(LOG_INFO, "<%05x> [ESTABLISHED]", (int)tp);
		/* TODO: initialization of receive urgent pointer
		tcp->rcv_up = ti_seq; */
		/* XXX: */ 
		tp->t_flags |= TF_ACKNOW;
		thinkos_cond_broadcast(tp->t_cond);

		goto step6;

close_and_reset:
		tp->t_state = TCPS_CLOSED;
		pcb_move((struct pcb *)tp, &__tcp__.active, &__tcp__.closed);
		DCC_LOG1(LOG_INFO, "<%05x> [CLOSED]", (int)tp);

		/* XXX: discard the data */
		mbuf_queue_free(&tp->snd_q);
		mbuf_queue_free(&tp->rcv_q);

		/* notify the upper layer */
		thinkos_cond_broadcast(tp->t_cond);

		goto dropwithreset;	
	}

/* States other than LISTEN or SYN_SENT 
   First check timestamp, if present.
   Then check that at least some bytes of segment are within
   receive window.  If segment begins before rcv_nxt,
   drop leading data (and SYN); if nothing left, just ti_ack. */

	/* Trim Segment so Data is Within Window
	   Ref.: TCP/IP Illustrated Volume 2, pg. 954 */
	todrop = tp->rcv_nxt - ti_seq;
	if (todrop > 0) {
		if (tiflags & TH_SYN) {
			DCC_LOG(LOG_INFO, "SYN");
			tiflags &= ~TH_SYN;
			ti_seq++;
			todrop--;
		}
		if ((todrop > ti_len) || 
		   ((todrop == ti_len) && ((tiflags & TH_FIN) == 0))) {
			tiflags &= ~TH_FIN;
			tp->t_flags |= TF_ACKNOW;
			todrop = ti_len;		
		}

		DCC_LOG4(LOG_WARNING, "<%05x> drop: len=%d drop=%d rem=%d!", 
			(int)tp, ti_len, todrop, ti_len - todrop);

		/* adjust the data pointer */
		data += todrop;

		ti_seq += todrop;
		ti_len -= todrop;

		/* TODO: adjust the urgent pointer */
	} 

	/* FIXME: only reset the connection if there are no more 
		application to handle the incomming data, half-close */
	if ((tp->t_state > TCPS_FIN_WAIT_1) && (ti_len)) { 
		DCC_LOG1(LOG_INFO, "<%05x> segment received after FIN", (int)tp);
		/* TODO: stat */
		goto dropwithreset;	
	}

	/* If segment ends after window, drop trailing data
	   and (PUSH and FIN); if nothing left, just ACK.
	   Ref.: TCP/IP Illustrated Volume 2, pg. 958 */
	todrop = (ti_seq + ti_len) - (tp->rcv_nxt + rcv_wnd);

	DCC_LOG4(LOG_INFO, "ti_seq=%u ti_len=%d rcv_nxt=%u rcv_wnd=%d", 
			ti_seq,  ti_len, tp->rcv_nxt, rcv_wnd);
	/* */

	if (todrop > 0) {
//		TCP_LOG(tp, "tcp_input: trailing data drop");
		if (todrop >= ti_len) {

	   		/* 
			 * If a new connection request is received 
			 * while in TIME_WAIT, drop the old connection ...
			 * Ref.: TCP/IP Illustrated Volume 2, pg. 958 
			if ((tiflags & TH_SYN) && (tp->t_state == TCPS_TIMEWAIT) &&
			   (SEQ_GT(ti_seq, tp->rcv_nxt))) {
				__tcp__.iss += tcp_issincr;
				tcp_rst(tp);
				goto findpcb;
			} */

			if ((rcv_wnd == 0) && (ti_seq == tp->rcv_nxt)) {
				tp->t_flags |= TF_ACKNOW;
			} else
				goto dropafterack;
		}

		DCC_LOG2(LOG_WARNING, "<%05x> data drop: %d!", (int)tp, todrop);
		ti_len -= todrop;
		tiflags &= ~(TH_PSH | TH_FIN);
	}

	/* If the RST bit is set eximine the state: ...
	   Ref.: TCP/IP Illustrated Volume 2, pg. 964 */
	if ((tiflags & TH_RST)) {
		DCC_LOG1(LOG_WARNING, "<%05x> RST received", (int)tp);
		switch(tp->t_state) {
		case TCPS_SYN_RCVD:
//			tp->errno = ECONNREFUSED;
			goto close;
		case TCPS_ESTABLISHED:
		case TCPS_CLOSE_WAIT:
//			tp->errno = ECONNRESET;
close:
			/* discard the data */
			mbuf_queue_free(&tp->snd_q);
			mbuf_queue_free(&tp->rcv_q);

			tp->t_state = TCPS_CLOSED;
			pcb_move((struct pcb *)tp, &__tcp__.active, &__tcp__.closed);
			DCC_LOG1(LOG_INFO, "<%05x> [CLOSED]", (int)tp);

			/* notify the upper layer */
			thinkos_cond_broadcast(tp->t_cond);
			/* PCBs in the close state should be cleared by the application */
			goto drop;

		case TCPS_FIN_WAIT_1:
		case TCPS_FIN_WAIT_2:
		case TCPS_CLOSING:
		case TCPS_LAST_ACK:
		case TCPS_TIME_WAIT:
			/* Our side was already closed */
			tcp_pcb_free(tp);
			goto drop;
		}
	}

	/* If a SYN is in the window, then this is an 
	   error and we send an RST and drop the connection.
	   Ref.: TCP/IP Illustrated Volume 2, pg. 965 */
	if ((tiflags & TH_SYN)) {
		DCC_LOG1(LOG_WARNING, "<%05x> the SYN bit is set inside the window", 
			(int)tp);
		goto dropwithreset;
	}

	/* If the ACK bit is off we drop the segment and return. */
	if ((!(tiflags & TH_ACK))) {
		DCC_LOG1(LOG_WARNING, "<%05x> the ACK bit is off", (int)tp);
		goto drop;
	}
	
/*
 * ACK processing.
 * Ref.: TCP/IP Illustrated Volume 2, pg. 969 
 *
 */

	DCC_LOG4(LOG_INFO, "ack=%u una=%u nxt=%u max=%u", 
			 ti_ack, snd_una, snd_nxt, snd_max);

	switch(tp->t_state) {
	case TCPS_SYN_RCVD:
		if (SEQ_GT(snd_una, ti_ack) || 
			SEQ_GT(ti_ack, snd_max)) {
			DCC_LOG1(LOG_WARNING, 
					 "<%05x> ti_ack < snd_una || snd_max < ti_ack", 
					 (int)tp);
			goto dropwithreset;
		}
		tp->t_state = TCPS_ESTABLISHED;
		tp->snd_off--;
		tp->snd_max--;
		DCC_LOG1(LOG_INFO, "<%05x> SYN ackd [ESTABLISHED]", (int)tp);
		/* notify the upper layer*/
//		thinkos_cond_signal(tp->t_cond);

		/* TODO: tcp reassembly
		tcp_reass(tp); */
	case TCPS_ESTABLISHED:
	case TCPS_FIN_WAIT_1:
	case TCPS_FIN_WAIT_2:
	case TCPS_CLOSE_WAIT:
	case TCPS_CLOSING:
	case TCPS_LAST_ACK:
	case TCPS_TIME_WAIT:
		/* TODO: tcp reassembly
		   tcp_reass(tp); */
		if (SEQ_LEQ(ti_ack, snd_una)) {
			/* TODO: check for completly duplicated ACKs.
			   Ref.: TCP/IP Illustrated Volume 2, pg. 971 */
			if ((ti_len == 0) && (tiwin == tp->snd_wnd)) {
				if ((tp->t_rxmt_tmr == 0) || ti_ack != snd_una) {
//					dupacks = 0;
				} else {
					DCC_LOG2(LOG_INFO, "duplicated ACK. ti_ack=%u snd_una=%u", 
							 ti_ack, snd_una);
				}
			} else {
//				dupacks = 0;
			}
			break;
		}

		/* Check out of range ACK */
		/*  Ref.: TCP/IP Illustrated Volume 2, pg. 974 */
		if (SEQ_GT(ti_ack, snd_max)) {
			/* TODO:
			   tcpstat.tcps_rcvacktoomuch++;
			 */
			DCC_LOG3(LOG_WARNING, "(%04x) out of range ACK. "
				"th_ack=%u > snd_max=%u !", 
				(int)tp, ti_ack, snd_max);
			goto dropafterack;	
		}

		acked = ti_ack - snd_una;

		/* TODO:
		   tcpstat.tcps_rcvackpack++;
		   tcpstat.tcps_rcvackbyte += acked;		
		 */

		DCC_LOG1(LOG_INFO, "acked=%d", acked);

		/* If all outstanding data is acked, stop retransmit timer else
		   restarts it ....
		   Ref.: TCP/IP Illustrated Volume 2, pg. 976 */
		if (ti_ack == snd_max) {
			tp->t_rxmt_tmr = 0;
			tp->t_rxmt_cnt = 0;
			needoutput = 1;
			DCC_LOG(LOG_INFO, "acked all data, rxmt tmr stopped");
		} else {
			/* TODO: peristent timer */
//			if (tp->t_persist_tmr == 0) {
				DCC_LOG(LOG_INFO, "not all data acked restart rxmt tmr");
				tp->t_rxmt_tmr = tcp_rxmtintvl[tp->t_rxmt_cnt / 2];
//			}
		}

		/* TODO:
		   tcpstat.tcps_rcvackpack++;
		   tcpstat.tcps_rcvackbyte += acked;		
		 */

		/* TODO: remove acknowledged data from send buffer 
		   Ref.: TCP/IP Illustrated Volume 2, pg. 978 */
		/* FIXME: send buffer bytes count */
		if (acked > tp->snd_q.len) {
			mbuf_queue_trim(&tp->snd_q, tp->snd_q.len);
			ourfinisacked = 1;
		} else {
			/* TODO: estimate the send window */
			mbuf_queue_trim(&tp->snd_q, acked);
			ourfinisacked = 0;
		}

		/* awaken a thread waiting on the send buffer ... */
		thinkos_cond_broadcast(tp->t_cond);

		snd_una = ti_ack;

		if (SEQ_LT(snd_nxt, snd_una)) {
			snd_nxt = snd_una;
		}

		tp->snd_seq = snd_una;
		tp->snd_off = snd_nxt - tp->snd_seq;
		tp->snd_max = snd_max - tp->snd_seq;

		DCC_LOG4(LOG_INFO, "<%05x> snd_seq=%u snd_max=%u snd_q.len=%d", 
			(int)tp, tp->snd_seq, snd_max, tp->snd_q.len); 

		switch(tp->t_state) {
		case TCPS_FIN_WAIT_1:
			if (ourfinisacked) {
				/* FIXME: If we can't receive any more data..
				   Ref.: TCP/IP Illustrated Volume 2, pg. 979 */
				tp->t_conn_tmr = 4 * tcp_msl;
				tp->t_state = TCPS_FIN_WAIT_2;
				DCC_LOG1(LOG_INFO, "<%05x> [FIN_WAIT_2]", (int)tp);
			}
			break;
		case TCPS_CLOSING:
			if (ourfinisacked) {
				mbuf_queue_free(&tp->snd_q);
				mbuf_queue_free(&tp->rcv_q);
				tp->t_state = TCPS_TIME_WAIT;
				DCC_LOG1(LOG_INFO, "<%05x> [TIME_WAIT]", (int)tp);
				tp->t_rxmt_tmr = 0;
				tp->t_conn_tmr = 2 * tcp_msl;
				DCC_LOG1(LOG_INFO, "stop rxmt tmr, start 2MSL tmr: %d",
						 tp->t_conn_tmr);
			}
			break;
		case TCPS_LAST_ACK:
			if (ourfinisacked) {
				tcp_pcb_free(tp);
				goto drop;
			}
			break;

		case TCPS_TIME_WAIT:
			/* restart the finack timer 
			   Ref.: TCP/IP Illustrated Volume 2, pg. 981 */
			tp->t_conn_tmr = 2 * tcp_msl;
			goto dropafterack;
		}
		break;
	}

	DCC_LOG4(LOG_INFO, "<%05x> recvd=%d acked=%d rcv_q.len=%d", (int)tp, 
		ti_len, acked, tp->rcv_q.len);
step6:
	/* Update window information 
	   Ref.: TCP/IP Illustrated Volume 2, pg. 982 */
	DCC_LOG(LOG_MSG, "setp6");
	
//	if ((tiflags & TH_ACK) && (tiwin > tp->snd_wnd)) {
	if ((tiflags & TH_ACK) && (tiwin != tp->snd_wnd)) {
		/* Keep track of pure window updates */
		/* TODO: TCP Statistics */
		/* TODO: Update window information */
		DCC_LOG1(LOG_INFO, "window update, win=%d", tiwin);
		tp->snd_wnd = tiwin;
		needoutput = 1;
	}

	/* TODO: Urgent mode processing */
	/* Process the segment text, 
	   merging it into the TCP sequencing queue,
dodata:
	   ...
	   Ref.: TCP/IP Illustrated Volume 2, pg. 988 */
	if ((ti_len || (tiflags & TH_FIN)) && 
		TCPS_HAVERCVDFIN(tp->t_state) == 0) {

		if ((ti_seq == tp->rcv_nxt) && (tp->t_state == TCPS_ESTABLISHED)) {

			/* append data */
			int n;

			tp->t_flags |= TF_DELACK;

			n = mbuf_queue_add(&tp->rcv_q, data, ti_len);
			if (n != ti_len) {
				DCC_LOG2(LOG_WARNING, "no more mbufs, %d != %d", n, ti_len);
			}
			ti_len = n;

			tp->rcv_nxt += ti_len;
			/* TODO: statistics */

			tiflags &= TH_FIN;

//			if (tp->rcv_q.len == ti_len) {
//				DCC_LOG3(LOG_INFO, "<%05x> rcvd %d, signaling %d ...", 
//					(int)tp, ti_len, tp->t_cond);
			/* 
			 * notify the upper layer of the data arrival...
			 */
			thinkos_cond_signal(tp->t_cond);
//			} else {
//				DCC_LOG2(LOG_INFO, "<%05x> rcvd %d", (int)tp, ti_len);
//			}

		} else {
			/* TODO: half-close */
			/* TODO: reassembly */
//			m = mlink_free(m);
			if (tp->t_state == TCPS_ESTABLISHED) {
//				DCC_LOG(LOG_WARNING, "out of order, drop!");
				DCC_LOG(LOG_WARNING, "out of order, drop");
				TCP_PROTO_STAT_ADD(rx_drop, 1);
			}
			tp->t_flags |= TF_ACKNOW;
		}
	} else {
		DCC_LOG(LOG_INFO, "!!!!!!!!!");
		tiflags &= ~TH_FIN;
	}

	/* FIN Processing */
	if (tiflags & TH_FIN) {
		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
			tp->t_flags |= TF_ACKNOW;
			tp->rcv_nxt++;
		}
		switch(tp->t_state) {
		case TCPS_SYN_RCVD:
		case TCPS_ESTABLISHED:
			tp->t_state = TCPS_CLOSE_WAIT;
			DCC_LOG1(LOG_INFO, "<%05x> [CLOSE_WAIT]", (int)tp);
			/* notify the application that our peer 
			   has closed its side. Sockets: marks 
			   the socket as write-only */
			if (tp->rcv_q.len == 0) {
				thinkos_cond_broadcast(tp->t_cond);
			}
			break;
		case TCPS_FIN_WAIT_1:
			tp->t_state = TCPS_CLOSING;
			DCC_LOG1(LOG_INFO, "<%05x> [CLOSING]", (int)tp);
			break;
		case TCPS_FIN_WAIT_2:
			mbuf_queue_free(&tp->rcv_q);
			mbuf_queue_free(&tp->snd_q);
			tp->t_state = TCPS_TIME_WAIT;
			DCC_LOG1(LOG_INFO, "<%05x> [TIME_WAIT]", (int)tp);
			tp->t_rxmt_tmr = 0;
			tp->t_conn_tmr = 2 * tcp_msl;
			DCC_LOG1(LOG_INFO, "stop rxmt tmr, start 2MSL tmr: %d",
					 tp->t_conn_tmr);
			break;
		case TCPS_TIME_WAIT:
			/* restart the counter */
			tp->t_conn_tmr = 2 * tcp_msl;
			break;
		}
	}

	/* Final Processing */
	if (needoutput || (tp->t_flags & TF_ACKNOW)) {
		if (needoutput) {
			DCC_LOG(LOG_INFO, "needoutput, call tcp_out.");
		}
		if (tp->t_flags & TF_ACKNOW) {
			DCC_LOG(LOG_INFO, "ACKNOW set, call tcp_out.");
		}
		/* schedule output */
		tcp_output_sched(tp);
	}
	return 0;

dropafterack:
	DCC_LOG1(LOG_INFO, "<%05x> drop and ACK", (int)tp);

	if (tiflags & TH_RST)
		goto drop;

	tp->t_flags |= TF_ACKNOW;
	/* schedule output */
	tcp_output_sched(tp);
	return 0;

dropwithreset:
	DCC_LOG1(LOG_TRACE, "<%05x> drop and RST", (int)tp);

	ret = 0;
	/* TODO: check for a broadcast/multicast */
	if (!(tiflags & TH_RST)) {
		if (tiflags & TH_ACK) {
			ret = tcp_respond(iph, th, 0, ti_ack, TH_RST);
		} else if (tiflags & TH_SYN) {
				ti_len++;
			ret = tcp_respond(iph, th, ti_seq + ti_len, 0, TH_ACK | TH_RST);
		}
	}
	TCP_PROTO_STAT_ADD(rx_drop, 1);
	return ret;

drop:
	DCC_LOG(LOG_TRACE, "drop");
	TCP_PROTO_STAT_ADD(rx_drop, 1);

	return 0;
}