Ejemplo n.º 1
0
void
tcp_newreno_pre_fr(struct tcpcb *tp) {

	uint32_t win;

	win = min(tp->snd_wnd, tp->snd_cwnd) / 
		2 / tp->t_maxseg;
	if ( win < 2 )
		win = 2;
	tp->snd_ssthresh = win * tp->t_maxseg; 
	tcp_cc_resize_sndbuf(tp);

}
Ejemplo n.º 2
0
void
tcp_ledbat_pre_fr(struct tcpcb *tp) {
	uint32_t win;

	win = min(tp->snd_wnd, tp->snd_cwnd) / 
		2 / tp->t_maxseg;
	if ( win < 2 )
		win = 2;
	tp->snd_ssthresh = win * tp->t_maxseg; 
	if (tp->bg_ssthresh > tp->snd_ssthresh)
		tp->bg_ssthresh = tp->snd_ssthresh;

	tcp_cc_resize_sndbuf(tp);
}
Ejemplo n.º 3
0
/* Function to change the congestion window when the retransmit 
 * timer fires. The behavior is the same as that for best-effort
 * TCP, reduce congestion window to one segment and start probing
 * the link using "slow start". The slow start threshold is set
 * to half of the current window. Lower the background slow start
 * threshold also.
 */
void
tcp_ledbat_after_timeout(struct tcpcb *tp) {
	if (tp->t_state >=  TCPS_ESTABLISHED) {
		u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
		if (win < 2)
			win = 2;
		tp->snd_ssthresh = win * tp->t_maxseg;

		if (tp->bg_ssthresh > tp->snd_ssthresh)
			tp->bg_ssthresh = tp->snd_ssthresh;

		tp->snd_cwnd = tp->t_maxseg;
		tcp_cc_resize_sndbuf(tp);
	}
}
Ejemplo n.º 4
0
/* Function to change the congestion window when the retransmit 
 * timer fires.
 */
void
tcp_newreno_after_timeout(struct tcpcb *tp) {
	/*
	 * Close the congestion window down to one segment
	 * (we'll open it by one segment for each ack we get).
	 * Since we probably have a window's worth of unacked
	 * data accumulated, this "slow start" keeps us from
	 * dumping all that data as back-to-back packets (which
	 * might overwhelm an intermediate gateway).
	 *
	 * There are two phases to the opening: Initially we
	 * open by one mss on each ack.  This makes the window
	 * size increase exponentially with time.  If the
	 * window is larger than the path can handle, this
	 * exponential growth results in dropped packet(s)
	 * almost immediately.  To get more time between
	 * drops but still "push" the network to take advantage
	 * of improving conditions, we switch from exponential
	 * to linear window opening at some threshhold size.
	 * For a threshhold, we use half the current window
	 * size, truncated to a multiple of the mss.
	 *
	 * (the minimum cwnd that will give us exponential
	 * growth is 2 mss.  We don't allow the threshhold
	 * to go below this.)
	 */
	if (tp->t_state >=  TCPS_ESTABLISHED) {
		u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
		if (win < 2)
			win = 2;
		tp->snd_ssthresh = win * tp->t_maxseg;

		tp->snd_cwnd = tp->t_maxseg;
		tcp_cc_resize_sndbuf(tp);
	}
}
Ejemplo n.º 5
0
static void
tcp_cubic_pre_fr(struct tcpcb *tp)
{
	uint32_t win, avg;
	int32_t dev;
	tp->t_ccstate->cub_epoch_start = 0;
	tp->t_ccstate->cub_tcp_win = 0;
	tp->t_ccstate->cub_target_win = 0;
	tp->t_ccstate->cub_tcp_bytes_acked = 0;

	win = min(tp->snd_cwnd, tp->snd_wnd);
	/*
	 * Note the congestion window at which packet loss occurred as
	 * cub_last_max.
	 *
	 * If the congestion window is less than the last max window when
	 * loss occurred, it indicates that capacity available in the 
	 * network has gone down. This can happen if a new flow has started
	 * and it is capturing some of the bandwidth. To reach convergence
	 * quickly, backoff a little more. Disable fast convergence to 
	 * disable this behavior.
	 */
	if (win < tp->t_ccstate->cub_last_max &&
		tcp_cubic_fast_convergence == 1)
		tp->t_ccstate->cub_last_max = win * 
			tcp_cubic_fast_convergence_factor;
	else
		tp->t_ccstate->cub_last_max = win;

	if (tp->t_ccstate->cub_last_max == 0) {
		/*
		 * If last_max is zero because snd_wnd is zero or for
		 * any other reason, initialize it to the amount of data
		 * in flight
		 */
		tp->t_ccstate->cub_last_max = tp->snd_max - tp->snd_una;
	}

	/*
	 * Compute average and mean absolute deviation of the
	 * window at which packet loss occurred.
	 */
	if (tp->t_ccstate->cub_avg_lastmax == 0) {
		tp->t_ccstate->cub_avg_lastmax = tp->t_ccstate->cub_last_max;
	} else {
		/*
		 * Average is computed by taking 63 parts of
		 * history and one part of the most recent value
		 */
		avg = tp->t_ccstate->cub_avg_lastmax;
		avg = (avg << 6) - avg;
		tp->t_ccstate->cub_avg_lastmax =
		    (avg + tp->t_ccstate->cub_last_max) >> 6; 
	}

	/* caluclate deviation from average */
	dev = tp->t_ccstate->cub_avg_lastmax - tp->t_ccstate->cub_last_max;

	/* Take the absolute value */
	if (dev < 0)
		dev = -dev;

	if (tp->t_ccstate->cub_mean_dev == 0) {
		tp->t_ccstate->cub_mean_dev = dev;
	} else {
		dev = dev + ((tp->t_ccstate->cub_mean_dev << 4)
		    - tp->t_ccstate->cub_mean_dev);
		tp->t_ccstate->cub_mean_dev = dev >> 4;
	}

	/* Backoff congestion window by tcp_cubic_backoff factor */
	win = win - (win * tcp_cubic_backoff);
	win = (win / tp->t_maxseg);
	if (win < 2)
		win = 2;
	tp->snd_ssthresh = win * tp->t_maxseg;
	tcp_cc_resize_sndbuf(tp);
}