Exemple #1
0
/*
 * Hook inserted to be called before each receive packet.
 * Note: arguments must match tcp_rcv_established()!
 */
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
			       struct tcphdr *th, unsigned len)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct inet_sock *inet = inet_sk(sk);

	/* Only update if port matches */
	if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port)
	    && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {

		spin_lock(&tcp_probe.lock);
		/* If log fills, just silently drop */
		if (tcp_probe_avail() > 1) {
			struct tcp_log *p = tcp_probe.log + tcp_probe.head;

			p->tstamp = ktime_get();
			p->saddr = inet->saddr;
			p->sport = inet->sport;
			p->daddr = inet->daddr;
			p->dport = inet->dport;
			p->length = skb->len;
			p->snd_nxt = tp->snd_nxt;
			p->snd_una = tp->snd_una;
			p->snd_cwnd = tp->snd_cwnd;
			p->snd_wnd = tp->snd_wnd;
			p->ssthresh = tcp_current_ssthresh(sk);
			p->srtt = tp->srtt >> 3;

			tcp_probe.head = (tcp_probe.head + 1) % bufsize;
		}
		tcp_probe.lastcwnd = tp->snd_cwnd;
		spin_unlock(&tcp_probe.lock);

		wake_up(&tcp_probe.wait);
	}
Exemple #2
0
/*
 * Hook inserted to be called before each receive packet.
 * Note: arguments must match tcp_rcv_established()!
 */
static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
				 const struct tcphdr *th, unsigned int len)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct inet_sock *inet = inet_sk(sk);

	/* Only update if port or skb mark matches */
	if (((port == 0 && fwmark == 0) ||
	     ntohs(inet->inet_dport) == port ||
	     ntohs(inet->inet_sport) == port ||
	     (fwmark > 0 && skb->mark == fwmark)) &&
	    (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {

		spin_lock(&tcp_probe.lock);
		/* If log fills, just silently drop */
		if (tcp_probe_avail() > 1) {
			struct tcp_log *p = tcp_probe.log + tcp_probe.head;

			p->tstamp = ktime_get();
			switch (sk->sk_family) {
			case AF_INET:
				tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
				tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
				break;
			case AF_INET6:
				memset(&p->src.v6, 0, sizeof(p->src.v6));
				memset(&p->dst.v6, 0, sizeof(p->dst.v6));
#if IS_ENABLED(CONFIG_IPV6)
				p->src.v6.sin6_family = AF_INET6;
				p->src.v6.sin6_port = inet->inet_sport;
				p->src.v6.sin6_addr = inet6_sk(sk)->saddr;

				p->dst.v6.sin6_family = AF_INET6;
				p->dst.v6.sin6_port = inet->inet_dport;
				p->dst.v6.sin6_addr = sk->sk_v6_daddr;
#endif
				break;
			default:
				BUG();
			}

			p->length = skb->len;
			p->snd_nxt = tp->snd_nxt;
			p->snd_una = tp->snd_una;
			p->snd_cwnd = tp->snd_cwnd;
			p->snd_wnd = tp->snd_wnd;
			p->rcv_wnd = tp->rcv_wnd;
			p->ssthresh = tcp_current_ssthresh(sk);
			p->srtt = tp->srtt_us >> 3;

			tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
		}
		tcp_probe.lastcwnd = tp->snd_cwnd;
		spin_unlock(&tcp_probe.lock);

		wake_up(&tcp_probe.wait);
	}
Exemple #3
0
/*
 * Hook inserted to be called before each receive packet.
 * Note: arguments must match tcp_rcv_established()!
 */
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
			       struct tcphdr *th, unsigned len)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct inet_sock *inet = inet_sk(sk);

	/* Only update if port matches */
	if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port)
	    && (full || tp->snd_cwnd != tcpw.lastcwnd)) {
		printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u %u\n",
		       NIPQUAD(inet->saddr), ntohs(inet->sport),
		       NIPQUAD(inet->daddr), ntohs(inet->dport),
		       skb->len, tp->snd_nxt, tp->snd_una,
		       tp->snd_cwnd, tcp_current_ssthresh(sk),
		       tp->snd_wnd, tp->srtt >> 3);
		tcpw.lastcwnd = tp->snd_cwnd;
	}
Exemple #4
0
static int jtcp_sendmsg(struct kiocb *iocb, struct sock *sk,
			struct msghdr *msg, size_t size)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	const struct inet_sock *inet = inet_sk(sk);

	if (port == 0 || ntohs(inet->dport) == port ||
	    ntohs(inet->sport) == port) {
		printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u\n",
		       NIPQUAD(inet->saddr), ntohs(inet->sport),
		       NIPQUAD(inet->daddr), ntohs(inet->dport),
		       size, tp->snd_nxt, tp->snd_una,
		       tp->snd_cwnd, tcp_current_ssthresh(sk),
		       tp->snd_wnd);
	}

	jprobe_return();
	return 0;
}
Exemple #5
0
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
    struct tcp_sock *tp = tcp_sk(sk);
    struct vegas *vegas = inet_csk_ca(sk);

    if (!vegas->doing_vegas_now) {
        tcp_reno_cong_avoid(sk, ack, acked);
        return;
    }

    if (after(ack, vegas->beg_snd_nxt)) {
        /* Do the Vegas once-per-RTT cwnd adjustment. */

        /* Save the extent of the current window so we can use this
         * at the end of the next RTT.
         */
        vegas->beg_snd_nxt  = tp->snd_nxt;

        /* We do the Vegas calculations only if we got enough RTT
         * samples that we can be reasonably sure that we got
         * at least one RTT sample that wasn't from a delayed ACK.
         * If we only had 2 samples total,
         * then that means we're getting only 1 ACK per RTT, which
         * means they're almost certainly delayed ACKs.
         * If  we have 3 samples, we should be OK.
         */

        if (vegas->cntRTT <= 2) {
            /* We don't have enough RTT samples to do the Vegas
             * calculation, so we'll behave like Reno.
             */
            tcp_reno_cong_avoid(sk, ack, acked);
        } else {
            u32 rtt, diff;
            u64 target_cwnd;

            /* We have enough RTT samples, so, using the Vegas
             * algorithm, we determine if we should increase or
             * decrease cwnd, and by how much.
             */

            /* Pluck out the RTT we are using for the Vegas
             * calculations. This is the min RTT seen during the
             * last RTT. Taking the min filters out the effects
             * of delayed ACKs, at the cost of noticing congestion
             * a bit later.
             */
            rtt = vegas->minRTT;

            /* Calculate the cwnd we should have, if we weren't
             * going too fast.
             *
             * This is:
             *     (actual rate in segments) * baseRTT
             */
            target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
            do_div(target_cwnd, rtt);

            /* Calculate the difference between the window we had,
             * and the window we would like to have. This quantity
             * is the "Diff" from the Arizona Vegas papers.
             */
            diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;

            if (diff > gamma && tcp_in_slow_start(tp)) {
                /* Going too fast. Time to slow down
                 * and switch to congestion avoidance.
                 */

                /* Set cwnd to match the actual rate
                 * exactly:
                 *   cwnd = (actual rate) * baseRTT
                 * Then we add 1 because the integer
                 * truncation robs us of full link
                 * utilization.
                 */
                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
                tp->snd_ssthresh = tcp_vegas_ssthresh(tp);

            } else if (tcp_in_slow_start(tp)) {
                /* Slow start.  */
                tcp_slow_start(tp, acked);
            } else {
                /* Congestion avoidance. */

                /* Figure out where we would like cwnd
                 * to be.
                 */
                if (diff > beta) {
                    /* The old window was too fast, so
                     * we slow down.
                     */
                    tp->snd_cwnd--;
                    tp->snd_ssthresh
                        = tcp_vegas_ssthresh(tp);
                } else if (diff < alpha) {
                    /* We don't have enough extra packets
                     * in the network, so speed up.
                     */
                    tp->snd_cwnd++;
                } else {
                    /* Sending just as fast as we
                     * should be.
                     */
                }
            }

            if (tp->snd_cwnd < 2)
                tp->snd_cwnd = 2;
            else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
                tp->snd_cwnd = tp->snd_cwnd_clamp;

            tp->snd_ssthresh = tcp_current_ssthresh(sk);
        }

        /* Wipe the slate clean for the next RTT. */
        vegas->cntRTT = 0;
        vegas->minRTT = 0x7fffffff;
    }
    /* Use normal slow start */
    else if (tcp_in_slow_start(tp))
        tcp_slow_start(tp, acked);
}