Ejemplo n.º 1
0
/*
 * Increase window in response to successful acknowledgment.
 */
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
				    u32 in_flight, int flag)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct illinois *ca = inet_csk_ca(sk);

	if (after(ack, ca->end_seq))
		update_params(sk);

	/* RFC2861 only increase cwnd if fully utilized */
	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	/* In slow start */
	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);

	else {
		u32 delta;

		/* snd_cwnd_cnt is # of packets since last cwnd increment */
		tp->snd_cwnd_cnt += ca->acked;
		ca->acked = 1;

		/* This is close approximation of:
		 * tp->snd_cwnd += alpha/tp->snd_cwnd
		*/
		delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
		if (delta >= tp->snd_cwnd) {
			tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
					   (u32) tp->snd_cwnd_clamp);
			tp->snd_cwnd_cnt = 0;
		}
	}
}
Ejemplo n.º 2
0
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct bictcp *ca = inet_csk_ca(sk);

	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);
	else {
		bictcp_update(ca, tp->snd_cwnd);

		/* In dangerous area, increase slowly.
		 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
		 */
		if (tp->snd_cwnd_cnt >= ca->cnt) {
			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
				tp->snd_cwnd++;
			tp->snd_cwnd_cnt = 0;
		} else
			tp->snd_cwnd_cnt++;
	}

}
Ejemplo n.º 3
0
static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
			     u32 in_flight, int data_acked)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct hstcp *ca = inet_csk_ca(sk);

	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);
	else {
		/* Update AIMD parameters */
		if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
			while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
			       ca->ai < HSTCP_AIMD_MAX - 1)
				ca->ai++;
		} else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
			while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
			       ca->ai > 0)
				ca->ai--;
		}

		/* Do additive increase */
		if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
			/* cwnd = cwnd + a(w) / cwnd */
			tp->snd_cwnd_cnt += ca->ai + 1;
			if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
				tp->snd_cwnd_cnt -= tp->snd_cwnd;
				tp->snd_cwnd++;
			}
		}
	}
}
Ejemplo n.º 4
0
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (!tcp_is_cwnd_limited(sk))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp, acked);
	else
		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
}
Ejemplo n.º 5
0
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct bictcp *ca = inet_csk_ca(sk);

	if (!tcp_is_cwnd_limited(sk))
		return;

	if (tcp_in_slow_start(tp))
		tcp_slow_start(tp, acked);
	else {
		bictcp_update(ca, tp->snd_cwnd);
		tcp_cong_avoid_ai(tp, ca->cnt, 1);
	}
}
Ejemplo n.º 6
0
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct bictcp *ca = inet_csk_ca(sk);

	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);
	else {
		bictcp_update(ca, tp->snd_cwnd);
		tcp_cong_avoid_ai(tp, ca->cnt);
	}

}
Ejemplo n.º 7
0
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
				    u32 in_flight, int flag)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);
	else {
		tp->snd_cwnd_cnt++;
		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
				tp->snd_cwnd++;
			tp->snd_cwnd_cnt = 0;
		}
	}
}
Ejemplo n.º 8
0
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
    struct tcp_sock *tp = tcp_sk(sk);
    struct vegas *vegas = inet_csk_ca(sk);

    if (!vegas->doing_vegas_now) {
        tcp_reno_cong_avoid(sk, ack, acked);
        return;
    }

    if (after(ack, vegas->beg_snd_nxt)) {
        /* Do the Vegas once-per-RTT cwnd adjustment. */

        /* Save the extent of the current window so we can use this
         * at the end of the next RTT.
         */
        vegas->beg_snd_nxt  = tp->snd_nxt;

        /* We do the Vegas calculations only if we got enough RTT
         * samples that we can be reasonably sure that we got
         * at least one RTT sample that wasn't from a delayed ACK.
         * If we only had 2 samples total,
         * then that means we're getting only 1 ACK per RTT, which
         * means they're almost certainly delayed ACKs.
         * If  we have 3 samples, we should be OK.
         */

        if (vegas->cntRTT <= 2) {
            /* We don't have enough RTT samples to do the Vegas
             * calculation, so we'll behave like Reno.
             */
            tcp_reno_cong_avoid(sk, ack, acked);
        } else {
            u32 rtt, diff;
            u64 target_cwnd;

            /* We have enough RTT samples, so, using the Vegas
             * algorithm, we determine if we should increase or
             * decrease cwnd, and by how much.
             */

            /* Pluck out the RTT we are using for the Vegas
             * calculations. This is the min RTT seen during the
             * last RTT. Taking the min filters out the effects
             * of delayed ACKs, at the cost of noticing congestion
             * a bit later.
             */
            rtt = vegas->minRTT;

            /* Calculate the cwnd we should have, if we weren't
             * going too fast.
             *
             * This is:
             *     (actual rate in segments) * baseRTT
             */
            target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
            do_div(target_cwnd, rtt);

            /* Calculate the difference between the window we had,
             * and the window we would like to have. This quantity
             * is the "Diff" from the Arizona Vegas papers.
             */
            diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;

            if (diff > gamma && tcp_in_slow_start(tp)) {
                /* Going too fast. Time to slow down
                 * and switch to congestion avoidance.
                 */

                /* Set cwnd to match the actual rate
                 * exactly:
                 *   cwnd = (actual rate) * baseRTT
                 * Then we add 1 because the integer
                 * truncation robs us of full link
                 * utilization.
                 */
                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
                tp->snd_ssthresh = tcp_vegas_ssthresh(tp);

            } else if (tcp_in_slow_start(tp)) {
                /* Slow start.  */
                tcp_slow_start(tp, acked);
            } else {
                /* Congestion avoidance. */

                /* Figure out where we would like cwnd
                 * to be.
                 */
                if (diff > beta) {
                    /* The old window was too fast, so
                     * we slow down.
                     */
                    tp->snd_cwnd--;
                    tp->snd_ssthresh
                        = tcp_vegas_ssthresh(tp);
                } else if (diff < alpha) {
                    /* We don't have enough extra packets
                     * in the network, so speed up.
                     */
                    tp->snd_cwnd++;
                } else {
                    /* Sending just as fast as we
                     * should be.
                     */
                }
            }

            if (tp->snd_cwnd < 2)
                tp->snd_cwnd = 2;
            else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
                tp->snd_cwnd = tp->snd_cwnd_clamp;

            tp->snd_ssthresh = tcp_current_ssthresh(sk);
        }

        /* Wipe the slate clean for the next RTT. */
        vegas->cntRTT = 0;
        vegas->minRTT = 0x7fffffff;
    }
    /* Use normal slow start */
    else if (tcp_in_slow_start(tp))
        tcp_slow_start(tp, acked);
}
Ejemplo n.º 9
0
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
				u32 in_flight, int flag)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct yeah *yeah = inet_csk_ca(sk);

	if (!tcp_is_cwnd_limited(sk, in_flight))
		return;

	if (tp->snd_cwnd <= tp->snd_ssthresh)
		tcp_slow_start(tp);

	else if (!yeah->doing_reno_now) {
		/* Scalable */

		tp->snd_cwnd_cnt+=yeah->pkts_acked;
		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
				tp->snd_cwnd++;
			tp->snd_cwnd_cnt = 0;
		}

		yeah->pkts_acked = 1;

	} else {
		/* Reno */

		if (tp->snd_cwnd_cnt < tp->snd_cwnd)
			tp->snd_cwnd_cnt++;

		if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
			tp->snd_cwnd++;
			tp->snd_cwnd_cnt = 0;
		}
	}

	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
	 *
	 * These are so named because they represent the approximate values
	 * of snd_una and snd_nxt at the beginning of the current RTT. More
	 * precisely, they represent the amount of data sent during the RTT.
	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
	 * bytes of data have been ACKed during the course of the RTT, giving
	 * an "actual" rate of:
	 *
	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
	 *
	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
	 * because delayed ACKs can cover more than one segment, so they
	 * don't line up yeahly with the boundaries of RTTs.
	 *
	 * Another unfortunate fact of life is that delayed ACKs delay the
	 * advance of the left edge of our send window, so that the number
	 * of bytes we send in an RTT is often less than our cwnd will allow.
	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
	 */

	if (after(ack, yeah->vegas.beg_snd_nxt)) {

		/* We do the Vegas calculations only if we got enough RTT
		 * samples that we can be reasonably sure that we got
		 * at least one RTT sample that wasn't from a delayed ACK.
		 * If we only had 2 samples total,
		 * then that means we're getting only 1 ACK per RTT, which
		 * means they're almost certainly delayed ACKs.
		 * If  we have 3 samples, we should be OK.
		 */

		if (yeah->vegas.cntRTT > 2) {
			u32 rtt, queue;
			u64 bw;

			/* We have enough RTT samples, so, using the Vegas
			 * algorithm, we determine if we should increase or
			 * decrease cwnd, and by how much.
			 */

			/* Pluck out the RTT we are using for the Vegas
			 * calculations. This is the min RTT seen during the
			 * last RTT. Taking the min filters out the effects
			 * of delayed ACKs, at the cost of noticing congestion
			 * a bit later.
			 */
			rtt = yeah->vegas.minRTT;

			/* Compute excess number of packets above bandwidth
			 * Avoid doing full 64 bit divide.
			 */
			bw = tp->snd_cwnd;
			bw *= rtt - yeah->vegas.baseRTT;
			do_div(bw, rtt);
			queue = bw;

			if (queue > TCP_YEAH_ALPHA ||
			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
				if (queue > TCP_YEAH_ALPHA
				    && tp->snd_cwnd > yeah->reno_count) {
					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
							    tp->snd_cwnd >> TCP_YEAH_EPSILON);

					tp->snd_cwnd -= reduction;

					tp->snd_cwnd = max(tp->snd_cwnd,
							   yeah->reno_count);

					tp->snd_ssthresh = tp->snd_cwnd;
				}

				if (yeah->reno_count <= 2)
					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
				else
					yeah->reno_count++;

				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
							   0xffffffU);
			} else {
Ejemplo n.º 10
0
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
                                u32 seq_rtt, u32 in_flight, int flag)
{
    struct tcp_sock *tp = tcp_sk(sk);
    struct veno *veno = inet_csk_ca(sk);

    if (!veno->doing_veno_now)
        return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);

    /* limited by applications */
    if (!tcp_is_cwnd_limited(sk, in_flight))
        return;

    /* We do the Veno calculations only if we got enough rtt samples */
    if (veno->cntrtt <= 2) {
        /* We don't have enough rtt samples to do the Veno
         * calculation, so we'll behave like Reno.
         */
        tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
    } else {
        u32 rtt, target_cwnd;

        /* We have enough rtt samples, so, using the Veno
         * algorithm, we determine the state of the network.
         */

        rtt = veno->minrtt;

        target_cwnd = ((tp->snd_cwnd * veno->basertt)
                       << V_PARAM_SHIFT) / rtt;

        veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;

        if (tp->snd_cwnd <= tp->snd_ssthresh) {
            /* Slow start.  */
            tcp_slow_start(tp);
        } else {
            /* Congestion avoidance. */
            if (veno->diff < beta) {
                /* In the "non-congestive state", increase cwnd
                 *  every rtt.
                 */
                if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
                    if (tp->snd_cwnd < tp->snd_cwnd_clamp)
                        tp->snd_cwnd++;
                    tp->snd_cwnd_cnt = 0;
                } else
                    tp->snd_cwnd_cnt++;
            } else {
                /* In the "congestive state", increase cwnd
                 * every other rtt.
                 */
                if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
                    if (veno->inc
                            && tp->snd_cwnd <
                            tp->snd_cwnd_clamp) {
                        tp->snd_cwnd++;
                        veno->inc = 0;
                    } else
                        veno->inc = 1;
                    tp->snd_cwnd_cnt = 0;
                } else
                    tp->snd_cwnd_cnt++;
            }

        }
        if (tp->snd_cwnd < 2)
            tp->snd_cwnd = 2;
        else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
            tp->snd_cwnd = tp->snd_cwnd_clamp;
    }
    /* Wipe the slate clean for the next rtt. */
    /* veno->cntrtt = 0; */
    veno->minrtt = 0x7fffffff;
}
Ejemplo n.º 11
0
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
				 u32 seq_rtt, u32 in_flight, int flag)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct vegas *vegas = inet_csk_ca(sk);

	if (!vegas->doing_vegas_now)
		return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);

	/* The key players are v_beg_snd_una and v_beg_snd_nxt.
	 *
	 * These are so named because they represent the approximate values
	 * of snd_una and snd_nxt at the beginning of the current RTT. More
	 * precisely, they represent the amount of data sent during the RTT.
	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
	 * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
	 * bytes of data have been ACKed during the course of the RTT, giving
	 * an "actual" rate of:
	 *
	 *     (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
	 *
	 * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
	 * because delayed ACKs can cover more than one segment, so they
	 * don't line up nicely with the boundaries of RTTs.
	 *
	 * Another unfortunate fact of life is that delayed ACKs delay the
	 * advance of the left edge of our send window, so that the number
	 * of bytes we send in an RTT is often less than our cwnd will allow.
	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
	 */

	if (after(ack, vegas->beg_snd_nxt)) {
		/* Do the Vegas once-per-RTT cwnd adjustment. */
		u32 old_wnd, old_snd_cwnd;


		/* Here old_wnd is essentially the window of data that was
		 * sent during the previous RTT, and has all
		 * been acknowledged in the course of the RTT that ended
		 * with the ACK we just received. Likewise, old_snd_cwnd
		 * is the cwnd during the previous RTT.
		 */
		old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
			tp->mss_cache;
		old_snd_cwnd = vegas->beg_snd_cwnd;

		/* Save the extent of the current window so we can use this
		 * at the end of the next RTT.
		 */
		vegas->beg_snd_una  = vegas->beg_snd_nxt;
		vegas->beg_snd_nxt  = tp->snd_nxt;
		vegas->beg_snd_cwnd = tp->snd_cwnd;

		/* We do the Vegas calculations only if we got enough RTT
		 * samples that we can be reasonably sure that we got
		 * at least one RTT sample that wasn't from a delayed ACK.
		 * If we only had 2 samples total,
		 * then that means we're getting only 1 ACK per RTT, which
		 * means they're almost certainly delayed ACKs.
		 * If  we have 3 samples, we should be OK.
		 */

		if (vegas->cntRTT <= 2) {
			/* We don't have enough RTT samples to do the Vegas
			 * calculation, so we'll behave like Reno.
			 */
			tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
		} else {
			u32 rtt, target_cwnd, diff;

			/* We have enough RTT samples, so, using the Vegas
			 * algorithm, we determine if we should increase or
			 * decrease cwnd, and by how much.
			 */

			/* Pluck out the RTT we are using for the Vegas
			 * calculations. This is the min RTT seen during the
			 * last RTT. Taking the min filters out the effects
			 * of delayed ACKs, at the cost of noticing congestion
			 * a bit later.
			 */
			rtt = vegas->minRTT;

			/* Calculate the cwnd we should have, if we weren't
			 * going too fast.
			 *
			 * This is:
			 *     (actual rate in segments) * baseRTT
			 * We keep it as a fixed point number with
			 * V_PARAM_SHIFT bits to the right of the binary point.
			 */
			target_cwnd = ((old_wnd * vegas->baseRTT)
				       << V_PARAM_SHIFT) / rtt;

			/* Calculate the difference between the window we had,
			 * and the window we would like to have. This quantity
			 * is the "Diff" from the Arizona Vegas papers.
			 *
			 * Again, this is a fixed point number with
			 * V_PARAM_SHIFT bits to the right of the binary
			 * point.
			 */
			diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;

			if (tp->snd_cwnd <= tp->snd_ssthresh) {
				/* Slow start.  */
				if (diff > gamma) {
					/* Going too fast. Time to slow down
					 * and switch to congestion avoidance.
					 */
					tp->snd_ssthresh = 2;

					/* Set cwnd to match the actual rate
					 * exactly:
					 *   cwnd = (actual rate) * baseRTT
					 * Then we add 1 because the integer
					 * truncation robs us of full link
					 * utilization.
					 */
					tp->snd_cwnd = min(tp->snd_cwnd,
							   (target_cwnd >>
							    V_PARAM_SHIFT)+1);

				}
				tcp_slow_start(tp);
			} else {