Beispiel #1
0
static struct tcp_sock *tcp_listen_child_sock(struct tcp_sock *tsk,
						struct tcp_segment *seg)
{
	struct sock *newsk = tcp_alloc_sock(tsk->sk.protocol);
	struct tcp_sock *newtsk = tcpsk(newsk);
	tcp_set_state(newtsk, TCP_SYN_RECV);
	newsk->sk_saddr = seg->iphdr->ip_dst;
	newsk->sk_daddr = seg->iphdr->ip_src;
	newsk->sk_sport = seg->tcphdr->dst;
	newsk->sk_dport = seg->tcphdr->src;
	/* add to establish hash table for third ACK */
	if (tcp_hash(&newtsk->sk) < 0) {
		free(newsk);
		return NULL;
	}
	/*
	 * Why to get parent reference?
	 * To avoid parent accidental release.
	 * e.g: Parent is interrupted by user
	 *      when child is pending in three-way handshake.
	 */
	newtsk->parent = get_tcp_sock(tsk);
	/* FIXME: add limit to listen queue */
	list_add(&newtsk->list, &tsk->listen_queue);
	/* reference for being listed into parent queue */
	return get_tcp_sock(newtsk);
}
Beispiel #2
0
/* Now we will only be called whenever we need to do
 * something, but we must be sure to process all of the
 * sockets that need it.
 */
void net_timer (unsigned long data)
{
	struct sock *sk = (struct sock*)data;
	int why = sk->timeout;

	/* Only process if socket is not in use. */
	if (atomic_read(&sk->sock_readers)) {
		/* Try again later. */ 
		mod_timer(&sk->timer, jiffies+HZ/20);
		return;
	}

	/* Always see if we need to send an ack. */
	if (sk->tp_pinfo.af_tcp.delayed_acks && !sk->zapped) {
		sk->prot->read_wakeup (sk);
		if (!sk->dead)
			sk->data_ready(sk,0);
	}

	/* Now we need to figure out why the socket was on the timer. */
	switch (why) {
		case TIME_DONE:
			/* If the socket hasn't been closed off, re-try a bit later. */
			if (!sk->dead) {
				net_reset_timer(sk, TIME_DONE, TCP_DONE_TIME);
				break;
			}

			if (sk->state != TCP_CLOSE) {
				printk (KERN_DEBUG "non CLOSE socket in time_done\n");
				break;
			}
			destroy_sock (sk);
			break;

		case TIME_DESTROY:
			/* We've waited for a while for all the memory associated with
			 * the socket to be freed.
			 */
			destroy_sock(sk);
			break;

		case TIME_CLOSE:
			/* We've waited long enough, close the socket. */
			tcp_set_state(sk, TCP_CLOSE);
			sk->shutdown = SHUTDOWN_MASK;
			if (!sk->dead)
				sk->state_change(sk);
			net_reset_timer (sk, TIME_DONE, TCP_DONE_TIME);
			break;

		default:
			/* I want to see these... */
			printk ("net_timer: timer expired - reason %d is unknown\n", why);
			break;
	}
}
Beispiel #3
0
END_TEST

/** Create an ESTABLISHED pcb and check if receive callback is called if a segment
 * overlapping rcv_nxt is received */
START_TEST(test_tcp_recv_inseq_trim)
{
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  char data[PBUF_POOL_BUFSIZE*2];
  u16_t data_len;
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  const u32_t new_data_len = 40;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  data_len = sizeof(data);
  memset(data, 0, sizeof(data));
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = data_len;
  counters.expected_data = data;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);

  /* create a segment (with an overlapping/old seqno so that the new data begins in the 2nd pbuf) */
  p = tcp_create_rx_segment(pcb, counters.expected_data, data_len, (u32_t)(0-(data_len-new_data_len)), 0, 0);
  EXPECT(p != NULL);
  if (p != NULL) {
    EXPECT(p->next != NULL);
    if (p->next != NULL) {
      EXPECT(p->next->next != NULL);
    }
  }
  if ((p != NULL) && (p->next != NULL) && (p->next->next != NULL)) {
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == new_data_len);
    EXPECT(counters.err_calls == 0);
  }

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #4
0
END_TEST

/** Create an ESTABLISHED pcb and check if receive callback is called */
START_TEST(test_tcp_recv_inseq)
{
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  char data[] = {1, 2, 3, 4};
  ip_addr_t remote_ip, local_ip, netmask;
  u16_t data_len;
  u16_t remote_port = 0x100, local_port = 0x101;
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  memset(&netif, 0, sizeof(netif));
  IP_ADDR4(&local_ip, 192, 168, 1, 1);
  IP_ADDR4(&remote_ip, 192, 168, 1, 2);
  IP_ADDR4(&netmask,   255, 255, 255, 0);
  test_tcp_init_netif(&netif, &txcounters, &local_ip, &netmask);
  data_len = sizeof(data);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = data_len;
  counters.expected_data = data;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, local_port, remote_port);

  /* create a segment */
  p = tcp_create_rx_segment(pcb, counters.expected_data, data_len, 0, 0, 0);
  EXPECT(p != NULL);
  if (p != NULL) {
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == data_len);
    EXPECT(counters.err_calls == 0);
  }

  /* make sure the pcb is freed */
  EXPECT(lwip_stats.memp[MEMP_TCP_PCB].used == 1);
  tcp_abort(pcb);
  EXPECT(lwip_stats.memp[MEMP_TCP_PCB].used == 0);
}
Beispiel #5
0
END_TEST

/** Create an ESTABLISHED pcb and check if receive callback is called */
START_TEST(test_tcp_recv_inseq)
{
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  char data[] = {1, 2, 3, 4};
  u16_t data_len;
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  data_len = sizeof(data);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = data_len;
  counters.expected_data = data;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);

  /* create a segment */
  p = tcp_create_rx_segment(pcb, counters.expected_data, data_len, 0, 0, 0);
  EXPECT(p != NULL);
  if (p != NULL) {
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == data_len);
    EXPECT(counters.err_calls == 0);
  }

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #6
0
END_TEST

/** Provoke fast retransmission by duplicate ACKs and then recover by ACKing all sent data.
 * At the end, send more data. */
static void test_tcp_tx_full_window_lost(u8_t zero_window_probe_from_unsent)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf *p;
  ip_addr_t remote_ip, local_ip, netmask;
  u16_t remote_port = 0x100, local_port = 0x101;
  err_t err;
  u16_t sent_total, i;
  u8_t expected = 0xFE;

  for (i = 0; i < sizeof(tx_data); i++) {
    u8_t d = (u8_t)i;
    if (d == 0xFE) {
      d = 0xF0;
    }
    tx_data[i] = d;
  }
  if (zero_window_probe_from_unsent) {
    tx_data[TCP_WND] = expected;
  } else {
    tx_data[0] = expected;
  }

  /* initialize local vars */
  IP_ADDR4(&local_ip,  192, 168,   1, 1);
  IP_ADDR4(&remote_ip, 192, 168,   1, 2);
  IP_ADDR4(&netmask,   255, 255, 255, 0);
  test_tcp_init_netif(&netif, &txcounters, &local_ip, &netmask);
  memset(&counters, 0, sizeof(counters));
  memset(&txcounters, 0, sizeof(txcounters));

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, local_port, remote_port);
  pcb->mss = TCP_MSS;
  /* disable initial congestion window (we don't send a SYN here...) */
  pcb->cwnd = pcb->snd_wnd;

  /* send a full window (minus 1 packets) of TCP data in MSS-sized chunks */
  sent_total = 0;
  if ((TCP_WND - TCP_MSS) % TCP_MSS != 0) {
    u16_t initial_data_len = (TCP_WND - TCP_MSS) % TCP_MSS;
    err = tcp_write(pcb, &tx_data[sent_total], initial_data_len, TCP_WRITE_FLAG_COPY);
    EXPECT_RET(err == ERR_OK);
    err = tcp_output(pcb);
    EXPECT_RET(err == ERR_OK);
    EXPECT(txcounters.num_tx_calls == 1);
    EXPECT(txcounters.num_tx_bytes == initial_data_len + 40U);
    memset(&txcounters, 0, sizeof(txcounters));
    sent_total += initial_data_len;
  }
  for (; sent_total < (TCP_WND - TCP_MSS); sent_total += TCP_MSS) {
    err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
    EXPECT_RET(err == ERR_OK);
    err = tcp_output(pcb);
    EXPECT_RET(err == ERR_OK);
    EXPECT(txcounters.num_tx_calls == 1);
    EXPECT(txcounters.num_tx_bytes == TCP_MSS + 40U);
    memset(&txcounters, 0, sizeof(txcounters));
  }
  EXPECT(sent_total == (TCP_WND - TCP_MSS));

  /* now ACK the packet before the first */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 0, TCP_ACK);
  test_tcp_input(p, &netif);
  /* ensure this didn't trigger a retransmission */
  EXPECT(txcounters.num_tx_calls == 0);
  EXPECT(txcounters.num_tx_bytes == 0);

  EXPECT(pcb->persist_backoff == 0);
  /* send the last packet, now a complete window has been sent */
  err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
  sent_total += TCP_MSS;
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == TCP_MSS + 40U);
  memset(&txcounters, 0, sizeof(txcounters));
  EXPECT(pcb->persist_backoff == 0);

  if (zero_window_probe_from_unsent) {
    /* ACK all data but close the TX window */
    p = tcp_create_rx_segment_wnd(pcb, NULL, 0, 0, TCP_WND, TCP_ACK, 0);
    test_tcp_input(p, &netif);
    /* ensure this didn't trigger any transmission */
    EXPECT(txcounters.num_tx_calls == 0);
    EXPECT(txcounters.num_tx_bytes == 0);
    EXPECT(pcb->persist_backoff == 1);
  }

  /* send one byte more (out of window) -> persist timer starts */
  err = tcp_write(pcb, &tx_data[sent_total], 1, TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  EXPECT(txcounters.num_tx_calls == 0);
  EXPECT(txcounters.num_tx_bytes == 0);
  memset(&txcounters, 0, sizeof(txcounters));
  if (!zero_window_probe_from_unsent) {
    /* no persist timer unless a zero window announcement has been received */
    EXPECT(pcb->persist_backoff == 0);
  } else {
    EXPECT(pcb->persist_backoff == 1);

    /* call tcp_timer some more times to let persist timer count up */
    for (i = 0; i < 4; i++) {
      test_tcp_tmr();
      EXPECT(txcounters.num_tx_calls == 0);
      EXPECT(txcounters.num_tx_bytes == 0);
    }

    /* this should trigger the zero-window-probe */
    txcounters.copy_tx_packets = 1;
    test_tcp_tmr();
    txcounters.copy_tx_packets = 0;
    EXPECT(txcounters.num_tx_calls == 1);
    EXPECT(txcounters.num_tx_bytes == 1 + 40U);
    EXPECT(txcounters.tx_packets != NULL);
    if (txcounters.tx_packets != NULL) {
      u8_t sent;
      u16_t ret;
      ret = pbuf_copy_partial(txcounters.tx_packets, &sent, 1, 40U);
      EXPECT(ret == 1);
      EXPECT(sent == expected);
    }
    if (txcounters.tx_packets != NULL) {
      pbuf_free(txcounters.tx_packets);
      txcounters.tx_packets = NULL;
    }
  }

  /* make sure the pcb is freed */
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 1);
  tcp_abort(pcb);
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 0);
}
Beispiel #7
0
static int tcp_write_timeout(struct sock *sk)
{
	/*
	 *	Look for a 'soft' timeout.
	 */
	if ((sk->state == TCP_ESTABLISHED && sk->retransmits && !(sk->retransmits & 7))
		|| (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR1)) 
	{
		/*
		 *	Attempt to recover if arp has changed (unlikely!) or
		 *	a route has shifted (not supported prior to 1.3).
		 */
		ip_rt_advice(&sk->ip_route_cache, 0);
	}
	
	/*
	 *	Have we tried to SYN too many times (repent repent 8))
	 *	NOTE: we must be careful to do this test for both
	 *	the SYN_SENT and SYN_RECV states, otherwise we take
	 *	23 minutes to timeout on the SYN_RECV state, which
	 *	leaves us (more) open to denial of service attacks
	 *	than we would like.
	 */
	 
	if (sk->retransmits > TCP_SYN_RETRIES
	&& (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV))
	{
		if(sk->err_soft)
			sk->err=sk->err_soft;
		else
			sk->err=ETIMEDOUT;
		sk->error_report(sk);
		del_timer(&sk->retransmit_timer);
		tcp_statistics.TcpAttemptFails++;	/* Is this right ??? - FIXME - */
		tcp_set_state(sk,TCP_CLOSE);
		/* Don't FIN, we got nothing back */
		return 0;
	}
	/*
	 *	Has it gone just too far ?
	 */
	if (sk->retransmits > TCP_RETR2) 
	{
		if(sk->err_soft)
			sk->err = sk->err_soft;
		else
			sk->err = ETIMEDOUT;
		sk->error_report(sk);
		del_timer(&sk->retransmit_timer);
		/*
		 *	Time wait the socket 
		 */
		if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2 || sk->state == TCP_CLOSING ) 
		{
			tcp_set_state(sk,TCP_TIME_WAIT);
			tcp_reset_msl_timer (sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
		}
		else
		{
			/*
			 *	Clean up time.
			 */
			tcp_set_state(sk, TCP_CLOSE);
			return 0;
		}
	}
	return 1;
}
Beispiel #8
0
/**
 * This is main body of the socket close function in Sync Sockets.
 *
 * inet_release() can sleep (as well as tcp_close()), so we make our own
 * non-sleepable socket closing.
 *
 * This function must be used only for data sockets.
 * Use standard sock_release() for listening sockets.
 *
 * In most cases it is called in softirq context and from ksoftirqd which
 * processes data from the socket (RSS and RPS distribute packets that way).
 *
 * Note: it used to be called in process context as well, at the time when
 * Tempesta starts or stops. That's not the case right now, but it may change.
 *
 * TODO In some cases we need to close socket agresively w/o FIN_WAIT_2 state,
 * e.g. by sending RST. So we need to add second parameter to the function
 * which says how to close the socket.
 * One of the examples is rcl_req_limit() (it should reset connections).
 * See tcp_sk(sk)->linger2 processing in standard tcp_close().
 *
 * Called with locked socket.
 */
static void
ss_do_close(struct sock *sk)
{
	struct sk_buff *skb;
	int data_was_unread = 0;
	int state;

	if (unlikely(!sk))
		return;
	SS_DBG("Close socket %p (%s): cpu=%d account=%d refcnt=%d\n",
	       sk, ss_statename[sk->sk_state], smp_processor_id(),
	       sk_has_account(sk), atomic_read(&sk->sk_refcnt));
	assert_spin_locked(&sk->sk_lock.slock);
	ss_sock_cpu_check(sk);
	BUG_ON(sk->sk_state == TCP_LISTEN);
	/* We must return immediately, so LINGER option is meaningless. */
	WARN_ON(sock_flag(sk, SOCK_LINGER));
	/* We don't support virtual containers, so TCP_REPAIR is prohibited. */
	WARN_ON(tcp_sk(sk)->repair);
	/* The socket must have atomic allocation mask. */
	WARN_ON(!(sk->sk_allocation & GFP_ATOMIC));

	/* The below is mostly copy-paste from tcp_close(). */
	sk->sk_shutdown = SHUTDOWN_MASK;

	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
			  tcp_hdr(skb)->fin;
		data_was_unread += len;
		SS_DBG("free rcv skb %p\n", skb);
		__kfree_skb(skb);
	}

	sk_mem_reclaim(sk);

	if (sk->sk_state == TCP_CLOSE)
		goto adjudge_to_death;

	if (data_was_unread) {
		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
		tcp_set_state(sk, TCP_CLOSE);
		tcp_send_active_reset(sk, sk->sk_allocation);
	}
	else if (tcp_close_state(sk)) {
		/* The code below is taken from tcp_send_fin(). */
		struct tcp_sock *tp = tcp_sk(sk);
		int mss_now = tcp_current_mss(sk);

		skb = tcp_write_queue_tail(sk);

		if (tcp_send_head(sk) != NULL) {
			/* Send FIN with data if we have any. */
			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
			TCP_SKB_CB(skb)->end_seq++;
			tp->write_seq++;
		}
		else {
			/* No data to send in the socket, allocate new skb. */
			skb = alloc_skb_fclone(MAX_TCP_HEADER,
					       sk->sk_allocation);
			if (!skb) {
				SS_WARN("can't send FIN due to bad alloc");
			} else {
				skb_reserve(skb, MAX_TCP_HEADER);
				tcp_init_nondata_skb(skb, tp->write_seq,
						     TCPHDR_ACK | TCPHDR_FIN);
				tcp_queue_skb(sk, skb);
			}
		}
		__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
	}

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * SS sockets are processed in softirq only,
	 * so backlog queue should be empty.
	 */
	WARN_ON(sk->sk_backlog.tail);

	percpu_counter_inc(sk->sk_prot->orphan_count);

	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
		return;

	if (sk->sk_state == TCP_FIN_WAIT2) {
		const int tmo = tcp_fin_time(sk);
		if (tmo > TCP_TIMEWAIT_LEN) {
			inet_csk_reset_keepalive_timer(sk,
						tmo - TCP_TIMEWAIT_LEN);
		} else {
			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
			return;
		}
	}
	if (sk->sk_state != TCP_CLOSE) {
		sk_mem_reclaim(sk);
		if (tcp_check_oom(sk, 0)) {
			tcp_set_state(sk, TCP_CLOSE);
			tcp_send_active_reset(sk, GFP_ATOMIC);
			NET_INC_STATS_BH(sock_net(sk),
					 LINUX_MIB_TCPABORTONMEMORY);
		}
	}
	if (sk->sk_state == TCP_CLOSE) {
		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
		if (req != NULL)
			reqsk_fastopen_remove(sk, req, false);
		inet_csk_destroy_sock(sk);
	}
}
Beispiel #9
0
/* this test uses 4 packets:
 * - data (len=TCP_MSS)
 * - FIN
 * - data after FIN (len=1) (invalid)
 * - 2nd FIN (invalid)
 *
 * the parameter 'delay_packet' is a bitmask that choses which on these packets is ooseq
 */
static void test_tcp_recv_ooseq_double_FINs(int delay_packet)
{
  int i, k;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf *p_normal_fin, *p_data_after_fin, *p, *p_2nd_fin_ooseq;
  struct netif netif;
  u32_t exp_rx_calls = 0, exp_rx_bytes = 0, exp_close_calls = 0, exp_oos_pbufs = 0, exp_oos_tcplen = 0;
  int first_dropped = 0xff;

  for(i = 0; i < (int)sizeof(data_full_wnd); i++) {
    data_full_wnd[i] = (char)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, NULL, &test_local_ip, &test_netmask);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = TCP_WND;
  counters.expected_data = data_full_wnd;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->rcv_nxt = 0x8000;

  /* create segments */
  p = tcp_create_rx_segment(pcb, &data_full_wnd[0], TCP_MSS, 0, 0, TCP_ACK);
  p_normal_fin = tcp_create_rx_segment(pcb, NULL, 0, TCP_MSS, 0, TCP_ACK|TCP_FIN);
  k = 1;
  p_data_after_fin = tcp_create_rx_segment(pcb, &data_full_wnd[TCP_MSS+1], k, TCP_MSS+1, 0, TCP_ACK);
  p_2nd_fin_ooseq = tcp_create_rx_segment(pcb, NULL, 0, TCP_MSS+1+k, 0, TCP_ACK|TCP_FIN);

  if(delay_packet & 1) {
    /* drop normal data */
    first_dropped = 1;
  } else {
    /* send normal data */
    test_tcp_input(p, &netif);
    exp_rx_calls++;
    exp_rx_bytes += TCP_MSS;
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 2) {
    /* drop FIN */
    if(first_dropped > 2) {
      first_dropped = 2;
    }
  } else {
    /* send FIN */
    test_tcp_input(p_normal_fin, &netif);
    if (first_dropped < 2) {
      /* already dropped packets, this one is ooseq */
      exp_oos_pbufs++;
      exp_oos_tcplen++;
    } else {
      /* inseq */
      exp_close_calls++;
    }
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 4) {
    /* drop data-after-FIN */
    if(first_dropped > 3) {
      first_dropped = 3;
    }
  } else {
    /* send data-after-FIN */
    test_tcp_input(p_data_after_fin, &netif);
    if (first_dropped < 3) {
      /* already dropped packets, this one is ooseq */
      if (delay_packet & 2) {
        /* correct FIN was ooseq */
        exp_oos_pbufs++;
        exp_oos_tcplen += k;
      }
    } else {
      /* inseq: no change */
    }
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 8) {
    /* drop 2nd-FIN */
    if(first_dropped > 4) {
      first_dropped = 4;
    }
  } else {
    /* send 2nd-FIN */
    test_tcp_input(p_2nd_fin_ooseq, &netif);
    if (first_dropped < 3) {
      /* already dropped packets, this one is ooseq */
      if (delay_packet & 2) {
        /* correct FIN was ooseq */
        exp_oos_pbufs++;
        exp_oos_tcplen++;
      }
    } else {
      /* inseq: no change */
    }
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 1) {
    /* dropped normal data before */
    test_tcp_input(p, &netif);
    exp_rx_calls++;
    exp_rx_bytes += TCP_MSS;
    if((delay_packet & 2) == 0) {
      /* normal FIN was NOT delayed */
      exp_close_calls++;
      exp_oos_pbufs = exp_oos_tcplen = 0;
    }
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 2) {
    /* dropped normal FIN before */
    test_tcp_input(p_normal_fin, &netif);
    exp_close_calls++;
    exp_oos_pbufs = exp_oos_tcplen = 0;
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 4) {
    /* dropped data-after-FIN before */
    test_tcp_input(p_data_after_fin, &netif);
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  if(delay_packet & 8) {
    /* dropped 2nd-FIN before */
    test_tcp_input(p_2nd_fin_ooseq, &netif);
  }
  /* check if counters are as expected */
  check_rx_counters(pcb, &counters, exp_close_calls, exp_rx_calls, exp_rx_bytes, 0, exp_oos_pbufs, exp_oos_tcplen);

  /* check that ooseq data has been dumped */
  EXPECT(pcb->ooseq == NULL);

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #10
0
END_TEST

/** similar to above test, except seqno starts near the max rxwin */
START_TEST(test_tcp_recv_ooseq_overrun_rxwin_edge)
{
#if !TCP_OOSEQ_MAX_BYTES && !TCP_OOSEQ_MAX_PBUFS
  int i, k;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf *pinseq, *p_ovr;
  struct netif netif;
  int datalen = 0;
  int datalen2;

  for(i = 0; i < (int)sizeof(data_full_wnd); i++) {
    data_full_wnd[i] = (char)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, NULL, &test_local_ip, &test_netmask);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = TCP_WND;
  counters.expected_data = data_full_wnd;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->rcv_nxt = 0xffffffff - (TCP_WND / 2);

  /* create segments */
  /* pinseq is sent as last segment! */
  pinseq = tcp_create_rx_segment(pcb, &data_full_wnd[0],  TCP_MSS, 0, 0, TCP_ACK);

  for(i = TCP_MSS, k = 0; i < TCP_WND; i += TCP_MSS, k++) {
    int count, expected_datalen;
    struct pbuf *p = tcp_create_rx_segment(pcb, &data_full_wnd[TCP_MSS*(k+1)],
                                           TCP_MSS, TCP_MSS*(k+1), 0, TCP_ACK);
    EXPECT_RET(p != NULL);
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    count = tcp_oos_count(pcb);
    EXPECT_OOSEQ(count == k+1);
    datalen = tcp_oos_tcplen(pcb);
    if (i + TCP_MSS < TCP_WND) {
      expected_datalen = (k+1)*TCP_MSS;
    } else {
      expected_datalen = TCP_WND - TCP_MSS;
    }
    if (datalen != expected_datalen) {
      EXPECT_OOSEQ(datalen == expected_datalen);
    }
  }

  /* pass in one more segment, cleary overrunning the rxwin */
  p_ovr = tcp_create_rx_segment(pcb, &data_full_wnd[TCP_MSS*(k+1)], TCP_MSS, TCP_MSS*(k+1), 0, TCP_ACK);
  EXPECT_RET(p_ovr != NULL);
  /* pass the segment to tcp_input */
  test_tcp_input(p_ovr, &netif);
  /* check if counters are as expected */
  EXPECT(counters.close_calls == 0);
  EXPECT(counters.recv_calls == 0);
  EXPECT(counters.recved_bytes == 0);
  EXPECT(counters.err_calls == 0);
  /* check ooseq queue */
  EXPECT_OOSEQ(tcp_oos_count(pcb) == k);
  datalen2 = tcp_oos_tcplen(pcb);
  EXPECT_OOSEQ(datalen == datalen2);

  /* now pass inseq */
  test_tcp_input(pinseq, &netif);
  EXPECT(pcb->ooseq == NULL);

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
#endif /* !TCP_OOSEQ_MAX_BYTES && !TCP_OOSEQ_MAX_PBUFS */
  LWIP_UNUSED_ARG(_i);
}
Beispiel #11
0
END_TEST

START_TEST(test_tcp_rto_tracking)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  err_t err;
  size_t i;
  u16_t sent_total = 0;
  LWIP_UNUSED_ARG(_i);

  for (i = 0; i < sizeof(tx_data); i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = SEQNO1 - ISS;
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->mss = TCP_MSS;
  /* Set congestion window large enough to send all our segments */
  pcb->cwnd = 5*TCP_MSS;

  /* send 5 mss-sized segments */
  for (i = 0; i < 5; i++) {
    err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
    EXPECT_RET(err == ERR_OK);
    sent_total += TCP_MSS;
  }
  check_seqnos(pcb->unsent, 5, seqnos);
  EXPECT(pcb->unacked == NULL);
  err = tcp_output(pcb);
  EXPECT(txcounters.num_tx_calls == 5);
  EXPECT(txcounters.num_tx_bytes == 5 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));
  /* Check all 5 are in-flight */
  EXPECT(pcb->unsent == NULL);
  check_seqnos(pcb->unacked, 5, seqnos);

  /* Force us into retransmisson timeout */
  while (!(pcb->flags & TF_RTO)) {
    test_tcp_tmr();
  }
  /* Ensure 4 remaining segments are back on unsent, ready for retransmission */
  check_seqnos(pcb->unsent, 4, &seqnos[1]);
  /* Ensure 1st segment is on unacked (already retransmitted) */
  check_seqnos(pcb->unacked, 1, seqnos);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == TCP_MSS + 40U);
  memset(&txcounters, 0, sizeof(txcounters));
  /* Ensure rto_end points to next byte */
  EXPECT(pcb->rto_end == seqnos[5]);
  EXPECT(pcb->rto_end == pcb->snd_nxt);
  /* Check cwnd was reset */
  EXPECT(pcb->cwnd == pcb->mss);

  /* Add another segment to send buffer which is outside of RTO */
  err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  sent_total += TCP_MSS;
  check_seqnos(pcb->unsent, 5, &seqnos[1]);
  /* Ensure no new data was sent */
  EXPECT(txcounters.num_tx_calls == 0);
  EXPECT(txcounters.num_tx_bytes == 0);
  EXPECT(pcb->rto_end == pcb->snd_nxt);

  /* ACK first segment */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, TCP_MSS, TCP_ACK);
  test_tcp_input(p, &netif);
  /* Next two retranmissions should go out, due to cwnd in slow start */
  EXPECT(txcounters.num_tx_calls == 2);
  EXPECT(txcounters.num_tx_bytes == 2 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));
  check_seqnos(pcb->unacked, 2, &seqnos[1]);
  check_seqnos(pcb->unsent, 3, &seqnos[3]);
  /* RTO should still be marked */
  EXPECT(pcb->flags & TF_RTO);
  /* cwnd should have only grown by 1 MSS */
  EXPECT(pcb->cwnd == (tcpwnd_size_t)(2 * pcb->mss));
  /* Ensure no new data was sent */
  EXPECT(pcb->rto_end == pcb->snd_nxt);

  /* ACK the next two segments */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 2*TCP_MSS, TCP_ACK);
  test_tcp_input(p, &netif);
  /* Final 2 retransmissions and 1 new data should go out */
  EXPECT(txcounters.num_tx_calls == 3);
  EXPECT(txcounters.num_tx_bytes == 3 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));
  check_seqnos(pcb->unacked, 3, &seqnos[3]);
  EXPECT(pcb->unsent == NULL);
  /* RTO should still be marked */
  EXPECT(pcb->flags & TF_RTO);
  /* cwnd should have only grown by 1 MSS */
  EXPECT(pcb->cwnd == (tcpwnd_size_t)(3 * pcb->mss));
  /* snd_nxt should have been advanced past rto_end */
  EXPECT(TCP_SEQ_GT(pcb->snd_nxt, pcb->rto_end));

  /* ACK the next two segments, finishing our RTO, leaving new segment unacked */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 2*TCP_MSS, TCP_ACK);
  test_tcp_input(p, &netif);
  EXPECT(!(pcb->flags & TF_RTO));
  check_seqnos(pcb->unacked, 1, &seqnos[5]);
  /* We should be in ABC congestion avoidance, so no change in cwnd */
  EXPECT(pcb->cwnd == (tcpwnd_size_t)(3 * pcb->mss));
  EXPECT(pcb->cwnd >= pcb->ssthresh);
  /* Ensure ABC congestion avoidance is tracking bytes acked */
  EXPECT(pcb->bytes_acked == (tcpwnd_size_t)(2 * pcb->mss));

  /* make sure the pcb is freed */
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #12
0
END_TEST

/** Send data with sequence numbers that wrap around the u32_t range.
 * Then, provoke RTO retransmission and check that all
 * segment lists are still properly sorted. */
START_TEST(test_tcp_rto_rexmit_wraparound)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  err_t err;
  size_t i;
  u16_t sent_total = 0;
  LWIP_UNUSED_ARG(_i);

  for (i = 0; i < sizeof(tx_data); i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = 0;
  tcp_ticks = 0 - tcp_next_iss(NULL);
  tcp_ticks = SEQNO1 - tcp_next_iss(NULL);
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->mss = TCP_MSS;
  /* disable initial congestion window (we don't send a SYN here...) */
  pcb->cwnd = 2*TCP_MSS;

  /* send 6 mss-sized segments */
  for (i = 0; i < 6; i++) {
    err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
    EXPECT_RET(err == ERR_OK);
    sent_total += TCP_MSS;
  }
  check_seqnos(pcb->unsent, 6, seqnos);
  EXPECT(pcb->unacked == NULL);
  err = tcp_output(pcb);
  EXPECT(txcounters.num_tx_calls == 2);
  EXPECT(txcounters.num_tx_bytes == 2 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  check_seqnos(pcb->unacked, 2, seqnos);
  check_seqnos(pcb->unsent, 4, &seqnos[2]);

  /* call the tcp timer some times */
  for (i = 0; i < 10; i++) {
    test_tcp_tmr();
    EXPECT(txcounters.num_tx_calls == 0);
  }
  /* 11th call to tcp_tmr: RTO rexmit fires */
  test_tcp_tmr();
  EXPECT(txcounters.num_tx_calls == 1);
  check_seqnos(pcb->unacked, 1, seqnos);
  check_seqnos(pcb->unsent, 5, &seqnos[1]);

  /* fake greater cwnd */
  pcb->cwnd = pcb->snd_wnd;
  /* send more data */
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);
  /* check queues are sorted */
  EXPECT(pcb->unsent == NULL);
  check_seqnos(pcb->unacked, 6, seqnos);

  /* make sure the pcb is freed */
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #13
0
END_TEST

START_TEST(test_tcp_persist_split)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb *pcb;
  struct pbuf* p;
  err_t err;
  size_t i;
  LWIP_UNUSED_ARG(_i);

  /* Setup data for four segments */
  for (i = 0; i < 4 * TCP_MSS; i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = SEQNO1 - ISS;
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->mss = TCP_MSS;
  /* set window to three segments */
  pcb->cwnd = 3 * TCP_MSS;
  pcb->snd_wnd = 3 * TCP_MSS;
  pcb->snd_wnd_max = 3 * TCP_MSS;

  /* send three segments */
  err = tcp_write(pcb, &tx_data[0], 3 * TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);

  /* verify segments are in-flight */
  EXPECT(pcb->unsent == NULL);
  EXPECT(pcb->unacked != NULL);
  check_seqnos(pcb->unacked, 3, seqnos);
  EXPECT(txcounters.num_tx_calls == 3);
  EXPECT(txcounters.num_tx_bytes == 3 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  /* ACK the segments and update the window to only 1/2 TCP_MSS */
  p = tcp_create_rx_segment_wnd(pcb, NULL, 0, 0, 3 * TCP_MSS, TCP_ACK, TCP_MSS / 2);
  test_tcp_input(p, &netif);
  EXPECT(pcb->unacked == NULL);
  EXPECT(pcb->unsent == NULL);
  EXPECT(pcb->persist_backoff == 0);
  EXPECT(pcb->snd_wnd == TCP_MSS / 2);

  /* send fourth segment, which is larger than snd_wnd */
  err = tcp_write(pcb, &tx_data[3 * TCP_MSS], TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);

  /* ensure it is buffered and persist timer started */
  EXPECT(pcb->unacked == NULL);
  EXPECT(pcb->unsent != NULL);
  check_seqnos(pcb->unsent, 1, &seqnos[3]);
  EXPECT(txcounters.num_tx_calls == 0);
  EXPECT(txcounters.num_tx_bytes == 0);
  EXPECT(pcb->persist_backoff == 1);

  /* ensure no errors have been recorded */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* call tcp_timer some more times to let persist timer count up */
    for (i = 0; i < 4; i++) {
      test_tcp_tmr();
      EXPECT(txcounters.num_tx_calls == 0);
      EXPECT(txcounters.num_tx_bytes == 0);
    }

  /* this should be the first timer shot, which should split the
   * segment and send a runt (of the remaining window size) */
  txcounters.copy_tx_packets = 1;
  test_tcp_tmr();
  txcounters.copy_tx_packets = 0;
  /* persist will be disabled as RTO timer takes over */
  EXPECT(pcb->persist_backoff == 0);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == ((TCP_MSS /2) + 40U));
  /* verify half segment sent, half still buffered */
  EXPECT(pcb->unsent != NULL);
  EXPECT(pcb->unsent->len == TCP_MSS / 2);
  EXPECT(pcb->unacked != NULL);
  EXPECT(pcb->unacked->len == TCP_MSS / 2);

  /* verify first half segment */
  EXPECT(txcounters.tx_packets != NULL);
  if (txcounters.tx_packets != NULL) {
    u8_t sent[TCP_MSS / 2];
    u16_t ret;
    ret = pbuf_copy_partial(txcounters.tx_packets, &sent, TCP_MSS / 2, 40U);
    EXPECT(ret == TCP_MSS / 2);
    EXPECT(memcmp(sent, &tx_data[3 * TCP_MSS], TCP_MSS / 2) == 0);
  }
  if (txcounters.tx_packets != NULL) {
    pbuf_free(txcounters.tx_packets);
    txcounters.tx_packets = NULL;
  }
  memset(&txcounters, 0, sizeof(txcounters));

  /* ACK the half segment, leave window at half segment */
  p = tcp_create_rx_segment_wnd(pcb, NULL, 0, 0, TCP_MSS / 2, TCP_ACK, TCP_MSS / 2);
  txcounters.copy_tx_packets = 1;
  test_tcp_input(p, &netif);
  txcounters.copy_tx_packets = 0;
  /* ensure remaining half segment was sent */
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == ((TCP_MSS /2 ) + 40U));
  EXPECT(pcb->unsent == NULL);
  EXPECT(pcb->unacked != NULL);
  EXPECT(pcb->unacked->len == TCP_MSS / 2);
  EXPECT(pcb->snd_wnd == TCP_MSS / 2);

  /* verify second half segment */
  EXPECT(txcounters.tx_packets != NULL);
  if (txcounters.tx_packets != NULL) {
    u8_t sent[TCP_MSS / 2];
    u16_t ret;
    ret = pbuf_copy_partial(txcounters.tx_packets, &sent, TCP_MSS / 2, 40U);
    EXPECT(ret == TCP_MSS / 2);
    EXPECT(memcmp(sent, &tx_data[(3 * TCP_MSS) + TCP_MSS / 2], TCP_MSS / 2) == 0);
  }
  if (txcounters.tx_packets != NULL) {
    pbuf_free(txcounters.tx_packets);
    txcounters.tx_packets = NULL;
  }

  /* ensure no errors have been recorded */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* make sure the pcb is freed */
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #14
0
END_TEST

START_TEST(test_tcp_zwp_timeout)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb *pcb, *cur;
  struct pbuf* p;
  err_t err;
  size_t i;
  LWIP_UNUSED_ARG(_i);

  /* Setup data for two segments */
  for (i = 0; i < 2*TCP_MSS; i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = SEQNO1 - ISS;
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->mss = TCP_MSS;
  pcb->cwnd = TCP_MSS;

  /* send first segment */
  err = tcp_write(pcb, &tx_data[0], TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);
  
  /* verify segment is in-flight */
  EXPECT(pcb->unsent == NULL);
  check_seqnos(pcb->unacked, 1, seqnos);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == 1 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  /* ACK the segment and close the TX window */
  p = tcp_create_rx_segment_wnd(pcb, NULL, 0, 0, TCP_MSS, TCP_ACK, 0);
  test_tcp_input(p, &netif);
  EXPECT(pcb->unacked == NULL);
  EXPECT(pcb->unsent == NULL);
  /* send buffer empty, persist should be off */
  EXPECT(pcb->persist_backoff == 0);
  EXPECT(pcb->snd_wnd == 0);

  /* send second segment, should be buffered */
  err = tcp_write(pcb, &tx_data[TCP_MSS], TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);

  /* ensure it is buffered and persist timer started */
  EXPECT(pcb->unacked == NULL);
  check_seqnos(pcb->unsent, 1, &seqnos[1]);
  EXPECT(txcounters.num_tx_calls == 0);
  EXPECT(txcounters.num_tx_bytes == 0);
  EXPECT(pcb->persist_backoff == 1);

  /* ensure no errors have been recorded */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* run timer till first probe */
  EXPECT(pcb->persist_probe == 0);
  while (pcb->persist_probe == 0) {
    test_tcp_tmr();
  }
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == (1 + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  /* respond to probe with remote's current SEQ, ACK, and zero-window */
  p = tcp_create_rx_segment_wnd(pcb, NULL, 0, 0, 0, TCP_ACK, 0);
  test_tcp_input(p, &netif);
  /* ensure zero-window is still active, but probe count reset */
  EXPECT(pcb->persist_backoff > 1);
  EXPECT(pcb->persist_probe == 0);
  EXPECT(pcb->snd_wnd == 0);

  /* ensure no errors have been recorded */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* now run the timer till we hit our maximum probe count */
  while (counters.last_err == ERR_OK) {
    test_tcp_tmr();
  }

  /* check maximum number of 1 byte probes were sent */
  EXPECT(txcounters.num_tx_calls == TCP_MAXRTX);
  EXPECT(txcounters.num_tx_bytes == TCP_MAXRTX * (1 + 40U));

  /* check the connection (pcb) has been aborted */
  EXPECT(counters.err_calls == 1);
  EXPECT(counters.last_err == ERR_ABRT);
  /* check our pcb is no longer active */
  for (cur = tcp_active_pcbs; cur != NULL; cur = cur->next) {
    EXPECT(cur != pcb);
  }  
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #15
0
END_TEST

START_TEST(test_tcp_rto_timeout)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb *pcb, *cur;
  err_t err;
  size_t i;
  LWIP_UNUSED_ARG(_i);

  /* Setup data for a single segment */
  for (i = 0; i < TCP_MSS; i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, &txcounters, &test_local_ip, &test_netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = SEQNO1 - ISS;
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->mss = TCP_MSS;
  pcb->cwnd = TCP_MSS;

  /* send our segment */
  err = tcp_write(pcb, &tx_data[0], TCP_MSS, TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == 1 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  /* ensure no errors have been recorded */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* Force us into retransmisson timeout */
  while (!(pcb->flags & TF_RTO)) {
    test_tcp_tmr();
  }

  /* check first rexmit */
  EXPECT(pcb->nrtx == 1);
  EXPECT(txcounters.num_tx_calls == 1);
  EXPECT(txcounters.num_tx_bytes == 1 * (TCP_MSS + 40U));

  /* still no error expected */
  EXPECT(counters.err_calls == 0);
  EXPECT(counters.last_err == ERR_OK);

  /* keep running the timer till we hit our maximum RTO */
  while (counters.last_err == ERR_OK) {
    test_tcp_tmr();
  }

  /* check number of retransmissions */
  EXPECT(txcounters.num_tx_calls == TCP_MAXRTX);
  EXPECT(txcounters.num_tx_bytes == TCP_MAXRTX * (TCP_MSS + 40U));

  /* check the connection (pcb) has been aborted */
  EXPECT(counters.err_calls == 1);
  EXPECT(counters.last_err == ERR_ABRT);
  /* check our pcb is no longer active */
  for (cur = tcp_active_pcbs; cur != NULL; cur = cur->next) {
    EXPECT(cur != pcb);
  }  
  EXPECT_RET(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #16
0
/*
 * OPEN CALL:
 * sent <SEQ=ISS><CTL=SYN>
 * SND.UNA = ISS, SND.NXT = ISS+1
 */
static void tcp_synsent(struct pkbuf *pkb, struct tcp_segment *seg,
			struct tcp_sock *tsk)
{
	struct tcp *tcphdr = seg->tcphdr;
	tcpsdbg("SYN-SENT");
	/* first check the ACK bit */
	tcpsdbg("1. check ack");
	if (tcphdr->ack) {
		/*
		 * Maybe we can reduce to `seg->ack != tsk->snd_nxt`
		 * Because we should not send data with the first SYN.
		 * (Just assert tsk->iss + 1 == tsk->snd_nxt)
		 */
		if (seg->ack <= tsk->iss || seg->ack > tsk->snd_nxt) {
			tcp_send_reset(tsk, seg);
			goto discarded;
		}
		/*
		 * RFC 793:
		 *   If SND.UNA =< SEG.ACK =< SND.NXT, the ACK is acceptable.
		 *   (Assert SND.UNA == 0)
		 */
	}
	/* second check the RST bit */
	tcpsdbg("2. check rst");
	if (tcphdr->rst) {
		if (tcphdr->ack) {
			/* connect closed port */
			tcpsdbg("Error:connection reset");
			tcp_set_state(tsk, TCP_CLOSED);
			if (tsk->wait_connect)
				wake_up(tsk->wait_connect);
			else
				tcpsdbg("No thread waiting for connection");
		}
		goto discarded;
	}
	/* third check the security and precedence (ignored) */
	tcpsdbg("3. No check the security and precedence");
	/* fouth check the SYN bit */
	tcpsdbg("4. check syn");
	if (tcphdr->syn) {
		tsk->irs = seg->seq;
		tsk->rcv_nxt = seg->seq + 1;
		if (tcphdr->ack)		/* No ack for simultaneous open */
			tsk->snd_una = seg->ack;	/* snd_una: iss -> iss+1 */
		/* delete retransmission queue which waits to be acknowledged */
		if (tsk->snd_una > tsk->iss) {	/* rcv.ack = snd.syn.seq+1 */
			tcp_set_state(tsk, TCP_ESTABLISHED);
			/* RFC 1122: error corrections of RFC 793 */
			tsk->snd_wnd = seg->wnd;
			tsk->snd_wl1 = seg->seq;
			tsk->snd_wl2 = seg->ack;
			/* reply ACK seq=snd.nxt, ack=rcv.nxt at right */
			tcp_send_ack(tsk, seg);
			tcpsdbg("Active three-way handshake successes!(SND.WIN:%d)", tsk->snd_wnd);
			wake_up(tsk->wait_connect);
			/*
			 * Data or controls which were queued for transmission
			 * may be included.  If there are other controls or text
			 * in the segment then continue processing at the sixth
			 * step * below where the URG bit is checked, otherwise
			 * return.
			 */
		} else {		/* simultaneous open */
			/* XXX: test */
			tcp_set_state(tsk, TCP_SYN_RECV);
			/* reply SYN+ACK seq=iss,ack=rcv.nxt */
			tcp_send_synack(tsk, seg);
			tcpsdbg("Simultaneous open(SYN-SENT => SYN-RECV)");
			/*
			 * queue text or other controls after established state
			 * has been reached
			 */
			return;
		}
	}
	/* fifth drop the segment and return */
	tcpsdbg("5. drop the segment");
discarded:
	free_pkb(pkb);
}
Beispiel #17
0
/* Tcp state process method is implemented via RFC 793 #SEGMENT ARRIVE */
void tcp_process(struct pkbuf *pkb, struct tcp_segment *seg, struct sock *sk)
{
	struct tcp_sock *tsk = tcpsk(sk);
	struct tcp *tcphdr = seg->tcphdr;
	tcp_dbg_state(tsk);
	if (!tsk || tsk->state == TCP_CLOSED)
		return tcp_closed(tsk, pkb, seg);
	if (tsk->state == TCP_LISTEN)
		return tcp_listen(pkb, seg, tsk);
	if (tsk->state == TCP_SYN_SENT)
		return tcp_synsent(pkb, seg, tsk);
	if (tsk->state >= TCP_MAX_STATE)
		goto drop;
	/* first check sequence number */
	tcpsdbg("1. check seq");
	if (seq_check(seg, tsk) < 0) {
		/* incoming segment is not acceptable */
		if (!tcphdr->rst)
			tsk->flags |= TCP_F_ACKNOW; /*reply ACK seq=snd.nxt, ack=rcv.nxt*/
		goto drop;
	}
	/* second check the RST bit */
	tcpsdbg("2. check rst");
	if (tcphdr->rst) {
		/* abort a connection */
		switch (tsk->state) {
		case TCP_SYN_RECV:
			if (tsk->parent) {	/* passive open */
				tcp_unhash(&tsk->sk);
			} else {
				/*
				 * signal user "connection refused"
				 * when both users open simultaneously.
				 * XXX: test
				 */
				if (tsk->wait_connect)
					wake_up(tsk->wait_connect);
			}
			break;
		case TCP_ESTABLISHED:
		case TCP_FIN_WAIT1:
		case TCP_FIN_WAIT2:
		case TCP_CLOSE_WAIT:
			/* RECEIVE and SEND receive reset response */
			/* flush all segments queue */
			/* signal user "connection reset" */
			break;
		case TCP_CLOSING:
		case TCP_LAST_ACK:
		case TCP_TIME_WAIT:
			break;
		}
		tcp_set_state(tsk, TCP_CLOSED);
		tcp_unhash(&tsk->sk);
		tcp_unbhash(tsk);
		goto drop;
	}
	/* third check security and precedence (ignored) */
	tcpsdbg("3. NO check security and precedence");
	/* fourth check the SYN bit */
	tcpsdbg("4. check syn");
	if (tcphdr->syn) {
		/* only LISTEN and SYN-SENT can receive SYN */
		tcp_send_reset(tsk, seg);
		/* RECEIVE and SEND receive reset response */
		/* flush all segments queue */
		/* signal user "connection reset" */
		/*
		 * RFC 1122: error corrections of RFC 793:
		 * In SYN-RECEIVED state and if the connection was initiated
		 * with a passive OPEN, then return this connection to the
		 * LISTEN state and return.
		 * - We delete child tsk directly,
		 *   and its parent has been in LISTEN state.
		 */
		if (tsk->state == TCP_SYN_RECV && tsk->parent)
			tcp_unhash(&tsk->sk);
		tcp_set_state(tsk, TCP_CLOSED);
		free_sock(&tsk->sk);
	}
	/* fifth check the ACK field */
	tcpsdbg("5. check ack");
	/*
	 * RFC 793 say:
	 * 1. we should drop the segment and return
	 *    if the ACK bit is off.
	 * 2. Once in the ESTABLISHED state all segments must
	 *    carry current acknowledgment information.
	 * Should we do it ?
	 * -No for xinu
	 * -No for linux
	 */
	if (!tcphdr->ack)
		goto drop;
	switch (tsk->state) {
	case TCP_SYN_RECV:
		/*
		 * previous state LISTEN :
		 *  snd_nxt = iss + 1
		 *  snd_una = iss
		 * previous state SYN-SENT:
		 *  snd_nxt = iss+1
		 *  snd_una = iss
		 * Should we update snd_una to seg->ack here?
		 *  -Unknown for RFC 793
		 *  -Yes for xinu
		 *  -Yes for Linux
		 *  +Yes for tapip
		 * Are 'snd.una == seg.ack' right?
		 *  -Yes for RFC 793
		 *  -Yes for 4.4BSD-Lite
		 *  -Yes for xinu, although duplicate ACK
		 *  -Yes for Linux,
		 *  +Yes for tapip
		 */
		if (tsk->snd_una <= seg->ack && seg->ack <= tsk->snd_nxt) {
			if (tcp_synrecv_ack(tsk) < 0) {
				tcpsdbg("drop");
				goto drop;		/* Should we drop it? */
			}
			tsk->snd_una = seg->ack;
			/* RFC 1122: error corrections of RFC 793(SND.W**) */
			__tcp_update_window(tsk, seg);
			tcp_set_state(tsk, TCP_ESTABLISHED);
		} else {
			tcp_send_reset(tsk, seg);
			goto drop;
		}
		break;
	case TCP_ESTABLISHED:
	case TCP_CLOSE_WAIT:
	case TCP_LAST_ACK:
	case TCP_FIN_WAIT1:
	case TCP_CLOSING:
		tcpsdbg("SND.UNA %u < SEG.ACK %u <= SND.NXT %u",
				tsk->snd_una, seg->ack, tsk->snd_nxt);
		if (tsk->snd_una < seg->ack && seg->ack <= tsk->snd_nxt) {
			tsk->snd_una = seg->ack;
			/*
			 * remove any segments on the restransmission
			 * queue which are thereby entirely acknowledged
			 */
			if (tsk->state == TCP_FIN_WAIT1) {
				tcp_set_state(tsk, TCP_FIN_WAIT2);
			} else if (tsk->state == TCP_CLOSING) {
				tcp_set_timewait_timer(tsk);
				goto drop;
			} else if (tsk->state == TCP_LAST_ACK) {
				tcp_set_state(tsk, TCP_CLOSED);
				tcp_unhash(&tsk->sk);
				/* for tcp active open */
				tcp_unbhash(tsk);
				goto drop;
			}
		} else if (seg->ack > tsk->snd_nxt) {	/* something not yet sent */
			/* reply ACK ack = ? */
			goto drop;
		} else if (seg->ack <= tsk->snd_una) {	/* duplicate ACK */
			/*
			 * RFC 793 say we can ignore duplicate ACK.
			 * What does `ignore` mean?
			 * Should we conitnue and not drop segment ?
			 * -Yes for xinu
			 * -Yes for linux
			 * -Yes for 4.4BSD-Lite
			 * +Yes for tapip
			 *
			 * After three-way handshake connection is established,
			 * then SND.UNA == SND.NXT, which means next remote
			 * packet ACK is always duplicate. Although this
			 * happens frequently, we should not view it as an
			 * error.
			 *
			 * Close simultaneously in FIN_WAIT1 also causes this.
			 *
			 * Also window update packet will cause this situation.
			 */
		}
		tcp_update_window(tsk, seg);
		break;
	case TCP_FIN_WAIT2:
	/*
          In addition to the processing for the ESTABLISHED state, if
          the retransmission queue is empty, the user's CLOSE can be
          acknowledged ("ok") but do not delete the TCB. (wait FIN)
	 */
		break;
	case TCP_TIME_WAIT:
	/*
          The only thing that can arrive in this state is a
          retransmission of the remote FIN.  Acknowledge it, and restart
          the 2 MSL timeout.
	 */
		break;
	}

	/* sixth check the URG bit */
	tcpsdbg("6. check urg");
	if (tcphdr->urg) {
		switch (tsk->state) {
		case TCP_ESTABLISHED:
		case TCP_FIN_WAIT1:
		case TCP_FIN_WAIT2:
	/*
        If the URG bit is set, RCV.UP <- max(RCV.UP,SEG.UP), and signal
        the user that the remote side has urgent data if the urgent
        pointer (RCV.UP) is in advance of the data consumed.  If the
        user has already been signaled (or is still in the "urgent
        mode") for this continuous sequence of urgent data, do not
        signal the user again.
	 */
			break;
		case TCP_CLOSE_WAIT:
		case TCP_CLOSING:
		case TCP_LAST_ACK:
		case TCP_TIME_WAIT:
			/* ignore */
			/* Should we conitnue or drop? */
			break;
		case TCP_SYN_RECV:
			/* ?? */
			break;
		}
	}
	/* seventh process the segment text */
	tcpsdbg("7. segment text");
	switch (tsk->state) {
	case TCP_ESTABLISHED:
	case TCP_FIN_WAIT1:
	case TCP_FIN_WAIT2:
		if (tcphdr->psh || seg->dlen > 0)
			tcp_recv_text(tsk, seg, pkb);
		break;
	/*
	 * CLOSE-WAIT|CLOSING|LAST-ACK|TIME-WAIT:
	 *  FIN has been received, so we ignore the segment text.
	 *
	 * OTHER STATES: segment is ignored!
	 */
	}
	/* eighth check the FIN bit */
	tcpsdbg("8. check fin");
	if (tcphdr->fin) {
		switch (tsk->state) {
		case TCP_SYN_RECV:
			/*
			 * SYN-RECV means remote->local connection is established
			 * see TCP/IP Illustrated Vol.2, tcp_input() L1127-1134
			 */
		case TCP_ESTABLISHED:
			/* waiting user to close */
			tcp_set_state(tsk, TCP_CLOSE_WAIT);
			tsk->flags |= TCP_F_PUSH;
			tsk->sk.ops->recv_notify(&tsk->sk);
			break;
		case TCP_FIN_WAIT1:
			/* both users close simultaneously */
			tcp_set_state(tsk, TCP_CLOSING);
			break;
		case TCP_CLOSE_WAIT:	/* Remain in the CLOSE-WAIT state */
		case TCP_CLOSING:	/* Remain in the CLOSING state */
		case TCP_LAST_ACK:	/* Remain in the LAST-ACK state */
			/* dont handle it, must be duplicate FIN */
			break;
		case TCP_TIME_WAIT:	/* Remain in the TIME-WAIT state */
			/* restart the 2 MSL time-wait timeout */
			tsk->timewait.timeout = TCP_TIMEWAIT_TIMEOUT;
			break;
		case TCP_FIN_WAIT2:
			/* FIXME: turn off the other timers. */
			tcp_set_timewait_timer(tsk);
			break;
		}
		/* singal the user "connection closing" */
		/* return any pending RECEIVEs with same message */
		/* advance rcv.nxt over fin */
		tsk->rcv_nxt = seg->seq + 1;
		/* send ACK for FIN */
		tsk->flags |= TCP_F_ACKNOW;
		/*
		 * FIN implies PUSH for any segment text not yet delivered
		 * to the user.
		 */
	}
drop:
	/* TODO: use ack delay timer instead of sending ack now */
	if (tsk->flags & (TCP_F_ACKNOW|TCP_F_ACKDELAY))
		tcp_send_ack(tsk, seg);
	free_pkb(pkb);
}
Beispiel #18
0
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
	struct rtable *rt;
	u32 daddr, nexthop;
	int tmp;
	int err;

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
	if (inet->opt && inet->opt->srr) {
		if (!daddr)
			return -EINVAL;
		nexthop = inet->opt->faddr;
	}

	tmp = ip_route_connect(&rt, nexthop, inet->saddr,
			       RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			       IPPROTO_TCP,
			       inet->sport, usin->sin_port, sk);
	if (tmp < 0)
		return tmp;

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

	if (!inet->opt || !inet->opt->srr)
		daddr = rt->rt_dst;

	if (!inet->saddr)
		inet->saddr = rt->rt_src;
	inet->rcv_saddr = inet->saddr;

	if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
		tp->write_seq		   = 0;
	}

	if (tcp_death_row.sysctl_tw_recycle &&
	    !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
		struct inet_peer *peer = rt_get_peer(rt);

		/* VJ's idea. We save last timestamp seen from
		 * the destination in peer table, when entering state TIME-WAIT
		 * and initialize rx_opt.ts_recent from it, when trying new connection.
		 */

		if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
			tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
			tp->rx_opt.ts_recent = peer->tcp_ts;
		}
	}

	inet->dport = usin->sin_port;
	inet->daddr = daddr;

	inet_csk(sk)->icsk_ext_hdr_len = 0;
	if (inet->opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;

	tp->rx_opt.mss_clamp = 536;

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
	err = inet_hash_connect(&tcp_death_row, sk);
	if (err)
		goto failure;

	err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
	if (err)
		goto failure;

	/* OK, now commit destination to socket.  */
	sk->sk_gso_type = SKB_GSO_TCPV4;
	sk_setup_caps(sk, &rt->u.dst);

	if (!tp->write_seq)
		tp->write_seq = secure_tcp_sequence_number(inet->saddr,
							   inet->daddr,
							   inet->sport,
							   usin->sin_port);

	inet->id = tp->write_seq ^ jiffies;

	err = tcp_connect(sk);
	rt = NULL;
	if (err)
		goto failure;

	return 0;

failure:
	/* This unhashes the socket and releases the local port, if necessary. */
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
	inet->dport = 0;
	return err;
}
Beispiel #19
0
END_TEST


/** create multiple segments and pass them to tcp_input in a wrong
 * order to see if ooseq-caching works correctly
 * FIN is received IN-SEQUENCE at the end */
START_TEST(test_tcp_recv_ooseq_FIN_INSEQ)
{
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf *p_1_2, *p_4_8, *p_3_11, *p_2_12, *p_15_1, *p_15_1a, *pinseq, *pinseqFIN;
  char data[] = {
     1,  2,  3,  4,
     5,  6,  7,  8,
     9, 10, 11, 12,
    13, 14, 15, 16};
  u16_t data_len;
  struct netif netif;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  test_tcp_init_netif(&netif, NULL, &test_local_ip, &test_netmask);
  data_len = sizeof(data);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = data_len;
  counters.expected_data = data;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &test_local_ip, &test_remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);

  /* create segments */
  /* p1: 7 bytes - 2 before FIN */
  /*     seqno: 1..2 */
  p_1_2  = tcp_create_rx_segment(pcb, &data[1],  2, 1, 0, TCP_ACK);
  /* p2: 4 bytes before p1, including the first 4 bytes of p1 (partly duplicate) */
  /*     seqno: 4..11 */
  p_4_8  = tcp_create_rx_segment(pcb, &data[4],  8, 4, 0, TCP_ACK);
  /* p3: same as p2 but 2 bytes longer and one byte more at the front */
  /*     seqno: 3..13 */
  p_3_11 = tcp_create_rx_segment(pcb, &data[3], 11, 3, 0, TCP_ACK);
  /* p4: 13 bytes - 2 before FIN - should be ignored as contained in p1 and p3 */
  /*     seqno: 2..13 */
  p_2_12 = tcp_create_rx_segment(pcb, &data[2], 12, 2, 0, TCP_ACK);
  /* pinseq is the first segment that is held back to create ooseq! */
  /*     seqno: 0..3 */
  pinseq = tcp_create_rx_segment(pcb, &data[0],  4, 0, 0, TCP_ACK);
  /* p5: last byte before FIN */
  /*     seqno: 15 */
  p_15_1 = tcp_create_rx_segment(pcb, &data[15], 1, 15, 0, TCP_ACK);
  /* p6: same as p5, should be ignored */
  p_15_1a= tcp_create_rx_segment(pcb, &data[15], 1, 15, 0, TCP_ACK);
  /* pinseqFIN: last 2 bytes plus FIN */
  /*     only segment containing seqno 14 and FIN */
  pinseqFIN = tcp_create_rx_segment(pcb,  &data[14], 2, 14, 0, TCP_ACK|TCP_FIN);
  EXPECT(pinseq != NULL);
  EXPECT(p_1_2 != NULL);
  EXPECT(p_4_8 != NULL);
  EXPECT(p_3_11 != NULL);
  EXPECT(p_2_12 != NULL);
  EXPECT(p_15_1 != NULL);
  EXPECT(p_15_1a != NULL);
  EXPECT(pinseqFIN != NULL);
  if ((pinseq != NULL) && (p_1_2 != NULL) && (p_4_8 != NULL) && (p_3_11 != NULL) && (p_2_12 != NULL)
    && (p_15_1 != NULL) && (p_15_1a != NULL) && (pinseqFIN != NULL)) {
    /* pass the segment to tcp_input */
    test_tcp_input(p_1_2, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 2);

    /* pass the segment to tcp_input */
    test_tcp_input(p_4_8, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 2);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 2);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 1) == 4);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 1) == 8);

    /* pass the segment to tcp_input */
    test_tcp_input(p_3_11, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 2);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 2);
    /* p_3_11 has removed p_4_8 from ooseq */
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 1) == 3);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 1) == 11);

    /* pass the segment to tcp_input */
    test_tcp_input(p_2_12, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 2);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 1) == 2);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 1) == 12);

    /* pass the segment to tcp_input */
    test_tcp_input(pinseq, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == 14);
    EXPECT(counters.err_calls == 0);
    EXPECT(pcb->ooseq == NULL);

    /* pass the segment to tcp_input */
    test_tcp_input(p_15_1, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == 14);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 15);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 1);

    /* pass the segment to tcp_input */
    test_tcp_input(p_15_1a, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 1);
    EXPECT(counters.recved_bytes == 14);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue: unchanged */
    EXPECT_OOSEQ(tcp_oos_count(pcb) == 1);
    EXPECT_OOSEQ(tcp_oos_seg_seqno(pcb, 0) == 15);
    EXPECT_OOSEQ(tcp_oos_seg_tcplen(pcb, 0) == 1);

    /* pass the segment to tcp_input */
    test_tcp_input(pinseqFIN, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 1);
    EXPECT(counters.recv_calls == 2);
    EXPECT(counters.recved_bytes == data_len);
    EXPECT(counters.err_calls == 0);
    EXPECT(pcb->ooseq == NULL);
  }

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
}
Beispiel #20
0
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
			  int addr_len)
{
	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 	struct inet_sock *inet = inet_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct in6_addr *saddr = NULL, *final_p = NULL, final;
	struct flowi fl;
	struct dst_entry *dst;
	int addr_type;
	int err;

	if (addr_len < SIN6_LEN_RFC2133) 
		return -EINVAL;

	if (usin->sin6_family != AF_INET6) 
		return(-EAFNOSUPPORT);

	memset(&fl, 0, sizeof(fl));

	if (np->sndflow) {
		fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
		IP6_ECN_flow_init(fl.fl6_flowlabel);
		if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
			struct ip6_flowlabel *flowlabel;
			flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
			if (flowlabel == NULL)
				return -EINVAL;
			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
			fl6_sock_release(flowlabel);
		}
	}

	/*
  	 *	connect() to INADDR_ANY means loopback (BSD'ism).
  	 */
  	
  	if(ipv6_addr_any(&usin->sin6_addr))
		usin->sin6_addr.s6_addr[15] = 0x1; 

	addr_type = ipv6_addr_type(&usin->sin6_addr);

	if(addr_type & IPV6_ADDR_MULTICAST)
		return -ENETUNREACH;

	if (addr_type&IPV6_ADDR_LINKLOCAL) {
		if (addr_len >= sizeof(struct sockaddr_in6) &&
		    usin->sin6_scope_id) {
			/* If interface is set while binding, indices
			 * must coincide.
			 */
			if (sk->sk_bound_dev_if &&
			    sk->sk_bound_dev_if != usin->sin6_scope_id)
				return -EINVAL;

			sk->sk_bound_dev_if = usin->sin6_scope_id;
		}

		/* Connect to link-local address requires an interface */
		if (!sk->sk_bound_dev_if)
			return -EINVAL;
	}

	if (tp->rx_opt.ts_recent_stamp &&
	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
		tp->rx_opt.ts_recent = 0;
		tp->rx_opt.ts_recent_stamp = 0;
		tp->write_seq = 0;
	}

	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
	np->flow_label = fl.fl6_flowlabel;

	/*
	 *	TCP over IPv4
	 */

	if (addr_type == IPV6_ADDR_MAPPED) {
		u32 exthdrlen = icsk->icsk_ext_hdr_len;
		struct sockaddr_in sin;

		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");

		if (__ipv6_only_sock(sk))
			return -ENETUNREACH;

		sin.sin_family = AF_INET;
		sin.sin_port = usin->sin6_port;
		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];

		icsk->icsk_af_ops = &ipv6_mapped;
		sk->sk_backlog_rcv = tcp_v4_do_rcv;

		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));

		if (err) {
			icsk->icsk_ext_hdr_len = exthdrlen;
			icsk->icsk_af_ops = &ipv6_specific;
			sk->sk_backlog_rcv = tcp_v6_do_rcv;
			goto failure;
		} else {
			ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
				      inet->saddr);
			ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
				      inet->rcv_saddr);
		}

		return err;
	}

	if (!ipv6_addr_any(&np->rcv_saddr))
		saddr = &np->rcv_saddr;

	fl.proto = IPPROTO_TCP;
	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
	ipv6_addr_copy(&fl.fl6_src,
		       (saddr ? saddr : &np->saddr));
	fl.oif = sk->sk_bound_dev_if;
	fl.fl_ip_dport = usin->sin6_port;
	fl.fl_ip_sport = inet->sport;

	if (np->opt && np->opt->srcrt) {
		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
		ipv6_addr_copy(&final, &fl.fl6_dst);
		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
		final_p = &final;
	}

	err = ip6_dst_lookup(sk, &dst, &fl);
	if (err)
		goto failure;
	if (final_p) {
		ipv6_addr_copy(&fl.fl6_dst, final_p);
		fl.flags |= FLOWI_FLAG_NOTROUTE;
	}

	if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
		goto failure;

	if (saddr == NULL) {
		saddr = &fl.fl6_src;
		ipv6_addr_copy(&np->rcv_saddr, saddr);
	}

	/* set the source address */
	ipv6_addr_copy(&np->saddr, saddr);
	inet->rcv_saddr = LOOPBACK4_IPV6;

	sk->sk_gso_type = SKB_GSO_TCPV6;
	__ip6_dst_store(sk, dst, NULL, NULL);

	icsk->icsk_ext_hdr_len = 0;
	if (np->opt)
		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
					  np->opt->opt_nflen);

	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);

	inet->dport = usin->sin6_port;

	tcp_set_state(sk, TCP_SYN_SENT);
	err = inet6_hash_connect(&tcp_death_row, sk);
	if (err)
		goto late_failure;

	if (!tp->write_seq)
		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
							     np->daddr.s6_addr32,
							     inet->sport,
							     inet->dport);

	err = tcp_connect(sk);
	if (err)
		goto late_failure;

	return 0;

late_failure:
	tcp_set_state(sk, TCP_CLOSE);
	__sk_dst_reset(sk);
failure:
	inet->dport = 0;
	sk->sk_route_caps = 0;
	return err;
}
Beispiel #21
0
END_TEST

START_TEST(test_tcp_recv_ooseq_max_pbufs)
{
#if TCP_OOSEQ_MAX_PBUFS && (TCP_OOSEQ_MAX_PBUFS < ((TCP_WND / TCP_MSS) + 1)) && (PBUF_POOL_BUFSIZE >= (TCP_MSS + PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))
  int i;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf *p_ovr;
  struct netif netif;
  int datalen = 0;
  int datalen2;

  for(i = 0; i < sizeof(data_full_wnd); i++) {
    data_full_wnd[i] = (char)i;
  }

  /* initialize local vars */
  test_tcp_init_netif(&netif, NULL, &test_local_ip, &test_netmask);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = TCP_WND;
  counters.expected_data = data_full_wnd;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, TEST_LOCAL_PORT, TEST_REMOTE_PORT);
  pcb->rcv_nxt = 0x8000;

  /* don't 'recv' the first segment (1 byte) so that all other segments will be ooseq */

  /* create segments and 'recv' them */
  for(i = 1; i <= TCP_OOSEQ_MAX_PBUFS; i++) {
    int count;
    struct pbuf *p = tcp_create_rx_segment(pcb, &data_full_wnd[i],
                                           1, i, 0, TCP_ACK);
    EXPECT_RET(p != NULL);
    EXPECT_RET(p->next == NULL);
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
    /* check ooseq queue */
    count = tcp_oos_pbuf_count(pcb);
    EXPECT_OOSEQ(count == i);
    datalen = tcp_oos_tcplen(pcb);
    EXPECT_OOSEQ(datalen == i);
  }

  /* pass in one more segment, overrunning the limit */
  p_ovr = tcp_create_rx_segment(pcb, &data_full_wnd[i+1], 1, i+1, 0, TCP_ACK);
  EXPECT_RET(p_ovr != NULL);
  /* pass the segment to tcp_input */
  test_tcp_input(p_ovr, &netif);
  /* check if counters are as expected */
  EXPECT(counters.close_calls == 0);
  EXPECT(counters.recv_calls == 0);
  EXPECT(counters.recved_bytes == 0);
  EXPECT(counters.err_calls == 0);
  /* check ooseq queue (ensure the new segment was not accepted) */
  EXPECT_OOSEQ(tcp_oos_count(pcb) == (i-1));
  datalen2 = tcp_oos_tcplen(pcb);
  EXPECT_OOSEQ(datalen2 == (i-1));

  /* make sure the pcb is freed */
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 1);
  tcp_abort(pcb);
  EXPECT(MEMP_STATS_GET(used, MEMP_TCP_PCB) == 0);
#endif /* TCP_OOSEQ_MAX_PBUFS && (TCP_OOSEQ_MAX_BYTES < (TCP_WND + 1)) && (PBUF_POOL_BUFSIZE >= (TCP_MSS + PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) */
  LWIP_UNUSED_ARG(_i);
}
Beispiel #22
0
END_TEST

/** Check that we handle malformed tcp headers, and discard the pbuf(s) */
START_TEST(test_tcp_malformed_header)
{
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  char data[] = {1, 2, 3, 4};
  ip_addr_t remote_ip, local_ip, netmask;
  u16_t data_len, chksum;
  u16_t remote_port = 0x100, local_port = 0x101;
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct tcp_hdr *hdr;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  memset(&netif, 0, sizeof(netif));
  IP_ADDR4(&local_ip, 192, 168, 1, 1);
  IP_ADDR4(&remote_ip, 192, 168, 1, 2);
  IP_ADDR4(&netmask,   255, 255, 255, 0);
  test_tcp_init_netif(&netif, &txcounters, &local_ip, &netmask);
  data_len = sizeof(data);
  /* initialize counter struct */
  memset(&counters, 0, sizeof(counters));
  counters.expected_data_len = data_len;
  counters.expected_data = data;

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, local_port, remote_port);

  /* create a segment */
  p = tcp_create_rx_segment(pcb, counters.expected_data, data_len, 0, 0, 0);

  pbuf_header(p, -(s16_t)sizeof(struct ip_hdr));

  hdr = (struct tcp_hdr *)p->payload;
  TCPH_HDRLEN_FLAGS_SET(hdr, 15, 0x3d1);

  hdr->chksum = 0;

  chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
                             &remote_ip, &local_ip);

  hdr->chksum = chksum;

  pbuf_header(p, sizeof(struct ip_hdr));

  EXPECT(p != NULL);
  EXPECT(p->next == NULL);
  if (p != NULL) {
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT(counters.close_calls == 0);
    EXPECT(counters.recv_calls == 0);
    EXPECT(counters.recved_bytes == 0);
    EXPECT(counters.err_calls == 0);
  }

  /* make sure the pcb is freed */
  EXPECT(lwip_stats.memp[MEMP_TCP_PCB].used == 1);
  tcp_abort(pcb);
  EXPECT(lwip_stats.memp[MEMP_TCP_PCB].used == 0);
}
Beispiel #23
0
END_TEST


/** Provoke fast retransmission by duplicate ACKs and then recover by ACKing all sent data.
 * At the end, send more data. */
START_TEST(test_tcp_fast_retx_recover)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  struct pbuf* p;
  char data1[] = { 1,  2,  3,  4};
  char data2[] = { 5,  6,  7,  8};
  char data3[] = { 9, 10, 11, 12};
  char data4[] = {13, 14, 15, 16};
  char data5[] = {17, 18, 19, 20};
  char data6[] = {21, 22, 23, 24};
  ip_addr_t remote_ip, local_ip, netmask;
  u16_t remote_port = 0x100, local_port = 0x101;
  err_t err;
  LWIP_UNUSED_ARG(_i);

  /* initialize local vars */
  IP_ADDR4(&local_ip,  192, 168,   1, 1);
  IP_ADDR4(&remote_ip, 192, 168,   1, 2);
  IP_ADDR4(&netmask,   255, 255, 255, 0);
  test_tcp_init_netif(&netif, &txcounters, &local_ip, &netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, local_port, remote_port);
  pcb->mss = TCP_MSS;
  /* disable initial congestion window (we don't send a SYN here...) */
  pcb->cwnd = pcb->snd_wnd;

  /* send data1 */
  err = tcp_write(pcb, data1, sizeof(data1), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  EXPECT_RET(txcounters.num_tx_calls == 1);
  EXPECT_RET(txcounters.num_tx_bytes == sizeof(data1) + sizeof(struct tcp_hdr) + sizeof(struct ip_hdr));
  memset(&txcounters, 0, sizeof(txcounters));
 /* "recv" ACK for data1 */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 4, TCP_ACK);
  EXPECT_RET(p != NULL);
  test_tcp_input(p, &netif);
  EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(pcb->unacked == NULL);
  /* send data2 */
  err = tcp_write(pcb, data2, sizeof(data2), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  EXPECT_RET(txcounters.num_tx_calls == 1);
  EXPECT_RET(txcounters.num_tx_bytes == sizeof(data2) + sizeof(struct tcp_hdr) + sizeof(struct ip_hdr));
  memset(&txcounters, 0, sizeof(txcounters));
  /* duplicate ACK for data1 (data2 is lost) */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 0, TCP_ACK);
  EXPECT_RET(p != NULL);
  test_tcp_input(p, &netif);
  EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(pcb->dupacks == 1);
  /* send data3 */
  err = tcp_write(pcb, data3, sizeof(data3), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /* nagle enabled, no tx calls */
  EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(txcounters.num_tx_bytes == 0);
  memset(&txcounters, 0, sizeof(txcounters));
  /* 2nd duplicate ACK for data1 (data2 and data3 are lost) */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 0, TCP_ACK);
  EXPECT_RET(p != NULL);
  test_tcp_input(p, &netif);
  EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(pcb->dupacks == 2);
  /* queue data4, don't send it (unsent-oversize is != 0) */
  err = tcp_write(pcb, data4, sizeof(data4), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  /* 3nd duplicate ACK for data1 (data2 and data3 are lost) -> fast retransmission */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 0, TCP_ACK);
  EXPECT_RET(p != NULL);
  test_tcp_input(p, &netif);
  /*EXPECT_RET(txcounters.num_tx_calls == 1);*/
  EXPECT_RET(pcb->dupacks == 3);
  memset(&txcounters, 0, sizeof(txcounters));
  /* @todo: check expected data?*/
  
  /* send data5, not output yet */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  /*err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);*/
  EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(txcounters.num_tx_bytes == 0);
  memset(&txcounters, 0, sizeof(txcounters));
  {
    int i = 0;
    do
    {
      err = tcp_write(pcb, data6, TCP_MSS, TCP_WRITE_FLAG_COPY);
      i++;
    }while(err == ERR_OK);
    EXPECT_RET(err != ERR_OK);
  }
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /*EXPECT_RET(txcounters.num_tx_calls == 0);
  EXPECT_RET(txcounters.num_tx_bytes == 0);*/
  memset(&txcounters, 0, sizeof(txcounters));

  /* send even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /* ...and even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /* ...and even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /* ...and even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);

  /* send ACKs for data2 and data3 */
  p = tcp_create_rx_segment(pcb, NULL, 0, 0, 12, TCP_ACK);
  EXPECT_RET(p != NULL);
  test_tcp_input(p, &netif);
  /*EXPECT_RET(txcounters.num_tx_calls == 0);*/

  /* ...and even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);
  /* ...and even more data */
  err = tcp_write(pcb, data5, sizeof(data5), TCP_WRITE_FLAG_COPY);
  EXPECT_RET(err == ERR_OK);
  err = tcp_output(pcb);
  EXPECT_RET(err == ERR_OK);

#if 0
  /* create expected segment */
  p1 = tcp_create_rx_segment(pcb, counters.expected_data, data_len, 0, 0, 0);
  EXPECT_RET(p != NULL);
  if (p != NULL) {
    /* pass the segment to tcp_input */
    test_tcp_input(p, &netif);
    /* check if counters are as expected */
    EXPECT_RET(counters.close_calls == 0);
    EXPECT_RET(counters.recv_calls == 1);
    EXPECT_RET(counters.recved_bytes == data_len);
    EXPECT_RET(counters.err_calls == 0);
  }
#endif
  /* make sure the pcb is freed */
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 1);
  tcp_abort(pcb);
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 0);
}
Beispiel #24
0
END_TEST

/** Send data with sequence numbers that wrap around the u32_t range.
 * Then, provoke RTO retransmission and check that all
 * segment lists are still properly sorted. */
START_TEST(test_tcp_rto_rexmit_wraparound)
{
  struct netif netif;
  struct test_tcp_txcounters txcounters;
  struct test_tcp_counters counters;
  struct tcp_pcb* pcb;
  ip_addr_t remote_ip, local_ip, netmask;
  u16_t remote_port = 0x100, local_port = 0x101;
  err_t err;
#define SEQNO1 (0xFFFFFF00 - TCP_MSS)
#define ISS    6510
  u16_t i, sent_total = 0;
  u32_t seqnos[] = {
    SEQNO1,
    SEQNO1 + (1 * TCP_MSS),
    SEQNO1 + (2 * TCP_MSS),
    SEQNO1 + (3 * TCP_MSS),
    SEQNO1 + (4 * TCP_MSS),
    SEQNO1 + (5 * TCP_MSS)};
  LWIP_UNUSED_ARG(_i);

  for (i = 0; i < sizeof(tx_data); i++) {
    tx_data[i] = (u8_t)i;
  }

  /* initialize local vars */
  IP_ADDR4(&local_ip,  192, 168,   1, 1);
  IP_ADDR4(&remote_ip, 192, 168,   1, 2);
  IP_ADDR4(&netmask,   255, 255, 255, 0);
  test_tcp_init_netif(&netif, &txcounters, &local_ip, &netmask);
  memset(&counters, 0, sizeof(counters));

  /* create and initialize the pcb */
  tcp_ticks = 0;
  tcp_ticks = 0 - tcp_next_iss();
  tcp_ticks = SEQNO1 - tcp_next_iss();
  pcb = test_tcp_new_counters_pcb(&counters);
  EXPECT_RET(pcb != NULL);
  EXPECT(pcb->lastack == SEQNO1);
  tcp_set_state(pcb, ESTABLISHED, &local_ip, &remote_ip, local_port, remote_port);
  pcb->mss = TCP_MSS;
  /* disable initial congestion window (we don't send a SYN here...) */
  pcb->cwnd = 2*TCP_MSS;

  /* send 6 mss-sized segments */
  for (i = 0; i < 6; i++) {
    err = tcp_write(pcb, &tx_data[sent_total], TCP_MSS, TCP_WRITE_FLAG_COPY);
    EXPECT_RET(err == ERR_OK);
    sent_total += TCP_MSS;
  }
  check_seqnos(pcb->unsent, 6, seqnos);
  EXPECT(pcb->unacked == NULL);
  err = tcp_output(pcb);
  EXPECT(txcounters.num_tx_calls == 2);
  EXPECT(txcounters.num_tx_bytes == 2 * (TCP_MSS + 40U));
  memset(&txcounters, 0, sizeof(txcounters));

  check_seqnos(pcb->unacked, 2, seqnos);
  check_seqnos(pcb->unsent, 4, &seqnos[2]);

  /* call the tcp timer some times */
  for (i = 0; i < 10; i++) {
    test_tcp_tmr();
    EXPECT(txcounters.num_tx_calls == 0);
  }
  /* 11th call to tcp_tmr: RTO rexmit fires */
  test_tcp_tmr();
  EXPECT(txcounters.num_tx_calls == 1);
  check_seqnos(pcb->unacked, 1, seqnos);
  check_seqnos(pcb->unsent, 5, &seqnos[1]);

  /* fake greater cwnd */
  pcb->cwnd = pcb->snd_wnd;
  /* send more data */
  err = tcp_output(pcb);
  EXPECT(err == ERR_OK);
  /* check queues are sorted */
  EXPECT(pcb->unsent == NULL);
  check_seqnos(pcb->unacked, 6, seqnos);

  /* make sure the pcb is freed */
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 1);
  tcp_abort(pcb);
  EXPECT_RET(lwip_stats.memp[MEMP_TCP_PCB].used == 0);
}
Beispiel #25
0
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
			  int addr_len)
{
	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
	struct inet_sock *inet = inet_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct in6_addr *saddr = NULL, *final_p, final;
	struct rt6_info *rt;
	struct flowi6 fl6;
	struct dst_entry *dst;
	int addr_type;
	int err;

	if (addr_len < SIN6_LEN_RFC2133)
		return -EINVAL;

	if (usin->sin6_family != AF_INET6)
		return -EAFNOSUPPORT;

	memset(&fl6, 0, sizeof(fl6));

	if (np->sndflow) {
		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
		IP6_ECN_flow_init(fl6.flowlabel);
		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
			struct ip6_flowlabel *flowlabel;
			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
			if (flowlabel == NULL)
				return -EINVAL;
			usin->sin6_addr = flowlabel->dst;
			fl6_sock_release(flowlabel);
		}
	}


	if(ipv6_addr_any(&usin->sin6_addr))
		usin->sin6_addr.s6_addr[15] = 0x1;

	addr_type = ipv6_addr_type(&usin->sin6_addr);

	if(addr_type & IPV6_ADDR_MULTICAST)
		return -ENETUNREACH;

	if (addr_type&IPV6_ADDR_LINKLOCAL) {
		if (addr_len >= sizeof(struct sockaddr_in6) &&
		    usin->sin6_scope_id) {
			if (sk->sk_bound_dev_if &&
			    sk->sk_bound_dev_if != usin->sin6_scope_id)
				return -EINVAL;

			sk->sk_bound_dev_if = usin->sin6_scope_id;
		}

		
		if (!sk->sk_bound_dev_if)
			return -EINVAL;
	}

	if (tp->rx_opt.ts_recent_stamp &&
	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
		tp->rx_opt.ts_recent = 0;
		tp->rx_opt.ts_recent_stamp = 0;
		tp->write_seq = 0;
	}

	np->daddr = usin->sin6_addr;
	np->flow_label = fl6.flowlabel;


	if (addr_type == IPV6_ADDR_MAPPED) {
		u32 exthdrlen = icsk->icsk_ext_hdr_len;
		struct sockaddr_in sin;

		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");

		if (__ipv6_only_sock(sk))
			return -ENETUNREACH;

		sin.sin_family = AF_INET;
		sin.sin_port = usin->sin6_port;
		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];

		icsk->icsk_af_ops = &ipv6_mapped;
		sk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif

		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));

		if (err) {
			icsk->icsk_ext_hdr_len = exthdrlen;
			icsk->icsk_af_ops = &ipv6_specific;
			sk->sk_backlog_rcv = tcp_v6_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
			tp->af_specific = &tcp_sock_ipv6_specific;
#endif
			goto failure;
		} else {
			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
					       &np->rcv_saddr);
		}

		return err;
	}

	if (!ipv6_addr_any(&np->rcv_saddr))
		saddr = &np->rcv_saddr;

	fl6.flowi6_proto = IPPROTO_TCP;
	fl6.daddr = np->daddr;
	fl6.saddr = saddr ? *saddr : np->saddr;
	fl6.flowi6_oif = sk->sk_bound_dev_if;
	fl6.flowi6_mark = sk->sk_mark;
	fl6.fl6_dport = usin->sin6_port;
	fl6.fl6_sport = inet->inet_sport;
	fl6.flowi6_uid = sock_i_uid(sk);

	final_p = fl6_update_dst(&fl6, np->opt, &final);

	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));

	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
	if (IS_ERR(dst)) {
		err = PTR_ERR(dst);
		goto failure;
	}

	if (saddr == NULL) {
		saddr = &fl6.saddr;
		np->rcv_saddr = *saddr;
	}

	
	np->saddr = *saddr;
	inet->inet_rcv_saddr = LOOPBACK4_IPV6;

	sk->sk_gso_type = SKB_GSO_TCPV6;
	__ip6_dst_store(sk, dst, NULL, NULL);

	rt = (struct rt6_info *) dst;
	if (tcp_death_row.sysctl_tw_recycle &&
	    !tp->rx_opt.ts_recent_stamp &&
	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
		struct inet_peer *peer = rt6_get_peer(rt);
		if (peer) {
			inet_peer_refcheck(peer);
			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
				tp->rx_opt.ts_recent = peer->tcp_ts;
			}
		}
	}

	icsk->icsk_ext_hdr_len = 0;
	if (np->opt)
		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
					  np->opt->opt_nflen);

	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);

	inet->inet_dport = usin->sin6_port;

	tcp_set_state(sk, TCP_SYN_SENT);
	err = inet6_hash_connect(&tcp_death_row, sk);
	if (err)
		goto late_failure;

	if (!tp->write_seq)
		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
							     np->daddr.s6_addr32,
							     inet->inet_sport,
							     inet->inet_dport);

	err = tcp_connect(sk);
	if (err)
		goto late_failure;

	return 0;

late_failure:
	tcp_set_state(sk, TCP_CLOSE);
	__sk_dst_reset(sk);
failure:
	inet->inet_dport = 0;
	sk->sk_route_caps = 0;
	return err;
}
Beispiel #26
0
void net_timer (unsigned long data)
{
	struct sock *sk = (struct sock*)data;
	int why = sk->timeout;

	/* 
	 * only process if socket is not in use
	 */

	if (sk->users)
	{
		sk->timer.expires = jiffies+HZ;
		add_timer(&sk->timer);
		sti();
		return;
	}

	/* Always see if we need to send an ack. */

	if (sk->ack_backlog && !sk->zapped) 
	{
		sk->prot->read_wakeup (sk);
		if (! sk->dead)
		sk->data_ready(sk,0);
	}

	/* Now we need to figure out why the socket was on the timer. */

	switch (why) 
	{
		case TIME_DONE:
			/* If the socket hasn't been closed off, re-try a bit later */
			if (!sk->dead) {
				reset_timer(sk, TIME_DONE, TCP_DONE_TIME);
				break;
			}

			if (sk->state != TCP_CLOSE) 
			{
				printk ("non CLOSE socket in time_done\n");
				break;
			}
			destroy_sock (sk);
			break;

		case TIME_DESTROY:
		/*
		 *	We've waited for a while for all the memory associated with
		 *	the socket to be freed.
		 */

			destroy_sock(sk);
			break;

		case TIME_CLOSE:
			/* We've waited long enough, close the socket. */
			tcp_set_state(sk, TCP_CLOSE);
			if (!sk->dead)
				sk->state_change(sk);
			sk->shutdown = SHUTDOWN_MASK;
			break;

		default:
			printk ("net_timer: timer expired - reason %d is unknown\n", why);
			break;
	}
}