コード例 #1
0
static inline int gred_change_vq(struct Qdisc *sch, int dp,
				 struct tc_gred_qopt *ctl, int prio,
				 u8 *stab, u32 max_P,
				 struct gred_sched_data **prealloc)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct gred_sched_data *q = table->tab[dp];

	if (!q) {
		table->tab[dp] = q = *prealloc;
		*prealloc = NULL;
		if (!q)
			return -ENOMEM;
	}

	q->DP = dp;
	q->prio = prio;
	q->limit = ctl->limit;

	if (q->backlog == 0)
		red_end_of_idle_period(&q->vars);

	red_set_parms(&q->parms,
		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
		      ctl->Scell_log, stab, max_P);
	red_set_vars(&q->vars);
	return 0;
}
コード例 #2
0
ファイル: sch_red.c プロジェクト: gnensis/linux-2.6.15
static int red_change(struct Qdisc *sch, struct rtattr *opt)
{
	struct red_sched_data *q = qdisc_priv(sch);
	struct rtattr *tb[TCA_RED_MAX];
	struct tc_red_qopt *ctl;

	if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
		return -EINVAL;

	if (tb[TCA_RED_PARMS-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
	    tb[TCA_RED_STAB-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
		return -EINVAL;

	ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);

	sch_tree_lock(sch);
	q->flags = ctl->flags;
	q->limit = ctl->limit;

	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
				 ctl->Plog, ctl->Scell_log,
				 RTA_DATA(tb[TCA_RED_STAB-1]));

	if (skb_queue_empty(&sch->q))
		red_end_of_idle_period(&q->parms);

	sch_tree_unlock(sch);
	return 0;
}
コード例 #3
0
ファイル: sch_gred.c プロジェクト: 3sOx/asuswrt-merlin
static inline int gred_change_vq(struct Qdisc *sch, int dp,
				 struct tc_gred_qopt *ctl, int prio, u8 *stab)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct gred_sched_data *q;

	if (table->tab[dp] == NULL) {
		table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
		if (table->tab[dp] == NULL)
			return -ENOMEM;
	}

	q = table->tab[dp];
	q->DP = dp;
	q->prio = prio;
	q->limit = ctl->limit;

	if (q->backlog == 0)
		red_end_of_idle_period(&q->parms);

	red_set_parms(&q->parms,
		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
		      ctl->Scell_log, stab);

	return 0;
}
コード例 #4
0
ファイル: sch_red.c プロジェクト: gnensis/linux-2.6.15
static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct red_sched_data *q = qdisc_priv(sch);

	if (red_is_idling(&q->parms))
		red_end_of_idle_period(&q->parms);

	return qdisc_requeue(skb, sch);
}
コード例 #5
0
ファイル: sch_red.c プロジェクト: gnensis/linux-2.6.15
static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct red_sched_data *q = qdisc_priv(sch);

	q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);

	if (red_is_idling(&q->parms))
		red_end_of_idle_period(&q->parms);

	switch (red_action(&q->parms, q->parms.qavg)) {
		case RED_DONT_MARK:
			break;

		case RED_PROB_MARK:
			sch->qstats.overlimits++;
			if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
				q->stats.prob_drop++;
				goto congestion_drop;
			}

			q->stats.prob_mark++;
			break;

		case RED_HARD_MARK:
			sch->qstats.overlimits++;
			if (red_use_harddrop(q) || !red_use_ecn(q) ||
			    !INET_ECN_set_ce(skb)) {
				q->stats.forced_drop++;
				goto congestion_drop;
			}

			q->stats.forced_mark++;
			break;
	}

	if (sch->qstats.backlog + skb->len <= q->limit)
		return qdisc_enqueue_tail(skb, sch);

	q->stats.pdrop++;
	return qdisc_drop(skb, sch);

congestion_drop:
	qdisc_drop(skb, sch);
	return NET_XMIT_CN;
}
コード例 #6
0
ファイル: sch_gred.c プロジェクト: 3sOx/asuswrt-merlin
static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct gred_sched *t = qdisc_priv(sch);
	struct gred_sched_data *q;
	u16 dp = tc_index_to_dp(skb);

	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
		if (net_ratelimit())
			printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
			       "for requeue, screwing up backlog.\n",
			       tc_index_to_dp(skb));
	} else {
		if (red_is_idling(&q->parms))
			red_end_of_idle_period(&q->parms);
		q->backlog += skb->len;
	}

	return qdisc_requeue(skb, sch);
}
コード例 #7
0
ファイル: sch_gred.c プロジェクト: avagin/linux
static inline int gred_change_vq(struct Qdisc *sch, int dp,
				 struct tc_gred_qopt *ctl, int prio,
				 u8 *stab, u32 max_P,
				 struct gred_sched_data **prealloc,
				 struct netlink_ext_ack *extack)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct gred_sched_data *q = table->tab[dp];

	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
		return -EINVAL;
	}

	if (!q) {
		table->tab[dp] = q = *prealloc;
		*prealloc = NULL;
		if (!q)
			return -ENOMEM;
		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
	}

	q->DP = dp;
	q->prio = prio;
	if (ctl->limit > sch->limit)
		q->limit = sch->limit;
	else
		q->limit = ctl->limit;

	if (q->backlog == 0)
		red_end_of_idle_period(&q->vars);

	red_set_parms(&q->parms,
		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
		      ctl->Scell_log, stab, max_P);
	red_set_vars(&q->vars);
	return 0;
}
コード例 #8
0
static int choke_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_CHOKE_MAX + 1];
	const struct tc_red_qopt *ctl;
	int err;
	struct sk_buff **old = NULL;
	unsigned int mask;
	u32 max_P;

	if (opt == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
	if (err < 0)
		return err;

	if (tb[TCA_CHOKE_PARMS] == NULL ||
	    tb[TCA_CHOKE_STAB] == NULL)
		return -EINVAL;

	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;

	ctl = nla_data(tb[TCA_CHOKE_PARMS]);

	if (ctl->limit > CHOKE_MAX_QUEUE)
		return -EINVAL;

	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
	if (mask != q->tab_mask) {
		struct sk_buff **ntab;

		ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
		if (!ntab)
			ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
		if (!ntab)
			return -ENOMEM;

		sch_tree_lock(sch);
		old = q->tab;
		if (old) {
			unsigned int oqlen = sch->q.qlen, tail = 0;

			while (q->head != q->tail) {
				struct sk_buff *skb = q->tab[q->head];

				q->head = (q->head + 1) & q->tab_mask;
				if (!skb)
					continue;
				if (tail < mask) {
					ntab[tail++] = skb;
					continue;
				}
				sch->qstats.backlog -= qdisc_pkt_len(skb);
				--sch->q.qlen;
				qdisc_drop(skb, sch);
			}
			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
			q->head = 0;
			q->tail = tail;
		}

		q->tab_mask = mask;
		q->tab = ntab;
	} else
		sch_tree_lock(sch);

	q->flags = ctl->flags;
	q->limit = ctl->limit;

	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
		      ctl->Plog, ctl->Scell_log,
		      nla_data(tb[TCA_CHOKE_STAB]),
		      max_P);
	red_set_vars(&q->vars);

	if (q->head == q->tail)
		red_end_of_idle_period(&q->vars);

	sch_tree_unlock(sch);
	choke_free(old);
	return 0;
}
コード例 #9
0
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	const struct red_parms *p = &q->parms;
	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;

	if (q->filter_list) {
		/* If using external classifiers, get result and record it. */
		if (!choke_classify(skb, sch, &ret))
			goto other_drop;	/* Packet was eaten by filter */
	}

	choke_skb_cb(skb)->keys_valid = 0;
	/* Compute average queue usage (see RED) */
	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	/* Is queue small? */
	if (q->vars.qavg <= p->qth_min)
		q->vars.qcount = -1;
	else {
		unsigned int idx;

		/* Draw a packet at random from queue and compare flow */
		if (choke_match_random(q, skb, &idx)) {
			q->stats.matched++;
			choke_drop_by_idx(sch, idx);
			goto congestion_drop;
		}

		/* Queue is large, always mark/drop */
		if (q->vars.qavg > p->qth_max) {
			q->vars.qcount = -1;

			sch->qstats.overlimits++;
			if (use_harddrop(q) || !use_ecn(q) ||
			    !INET_ECN_set_ce(skb)) {
				q->stats.forced_drop++;
				goto congestion_drop;
			}

			q->stats.forced_mark++;
		} else if (++q->vars.qcount) {
			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
				q->vars.qcount = 0;
				q->vars.qR = red_random(p);

				sch->qstats.overlimits++;
				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
					q->stats.prob_drop++;
					goto congestion_drop;
				}

				q->stats.prob_mark++;
			}
		} else
			q->vars.qR = red_random(p);
	}

	/* Admit new packet */
	if (sch->q.qlen < q->limit) {
		q->tab[q->tail] = skb;
		q->tail = (q->tail + 1) & q->tab_mask;
		++sch->q.qlen;
		sch->qstats.backlog += qdisc_pkt_len(skb);
		return NET_XMIT_SUCCESS;
	}

	q->stats.pdrop++;
	sch->qstats.drops++;
	kfree_skb(skb);
	return NET_XMIT_DROP;

 congestion_drop:
	qdisc_drop(skb, sch);
	return NET_XMIT_CN;

 other_drop:
	if (ret & __NET_XMIT_BYPASS)
		sch->qstats.drops++;
	kfree_skb(skb);
	return ret;
}
コード例 #10
0
ファイル: sch_choke.c プロジェクト: AlexShiLucky/linux
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			 struct sk_buff **to_free)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	const struct red_parms *p = &q->parms;

	choke_skb_cb(skb)->keys_valid = 0;
	/* Compute average queue usage (see RED) */
	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	/* Is queue small? */
	if (q->vars.qavg <= p->qth_min)
		q->vars.qcount = -1;
	else {
		unsigned int idx;

		/* Draw a packet at random from queue and compare flow */
		if (choke_match_random(q, skb, &idx)) {
			q->stats.matched++;
			choke_drop_by_idx(sch, idx, to_free);
			goto congestion_drop;
		}

		/* Queue is large, always mark/drop */
		if (q->vars.qavg > p->qth_max) {
			q->vars.qcount = -1;

			qdisc_qstats_overlimit(sch);
			if (use_harddrop(q) || !use_ecn(q) ||
			    !INET_ECN_set_ce(skb)) {
				q->stats.forced_drop++;
				goto congestion_drop;
			}

			q->stats.forced_mark++;
		} else if (++q->vars.qcount) {
			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
				q->vars.qcount = 0;
				q->vars.qR = red_random(p);

				qdisc_qstats_overlimit(sch);
				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
					q->stats.prob_drop++;
					goto congestion_drop;
				}

				q->stats.prob_mark++;
			}
		} else
			q->vars.qR = red_random(p);
	}

	/* Admit new packet */
	if (sch->q.qlen < q->limit) {
		q->tab[q->tail] = skb;
		q->tail = (q->tail + 1) & q->tab_mask;
		++sch->q.qlen;
		qdisc_qstats_backlog_inc(sch, skb);
		return NET_XMIT_SUCCESS;
	}

	q->stats.pdrop++;
	return qdisc_drop(skb, sch, to_free);

congestion_drop:
	qdisc_drop(skb, sch, to_free);
	return NET_XMIT_CN;
}
コード例 #11
0
ファイル: sch_gred.c プロジェクト: avagin/linux
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			struct sk_buff **to_free)
{
	struct gred_sched_data *q = NULL;
	struct gred_sched *t = qdisc_priv(sch);
	unsigned long qavg = 0;
	u16 dp = tc_index_to_dp(skb);

	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
		dp = t->def;

		q = t->tab[dp];
		if (!q) {
			/* Pass through packets not assigned to a DP
			 * if no default DP has been configured. This
			 * allows for DP flows to be left untouched.
			 */
			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
					sch->limit))
				return qdisc_enqueue_tail(skb, sch);
			else
				goto drop;
		}

		/* fix tc_index? --could be controversial but needed for
		   requeueing */
		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
	}

	/* sum up all the qaves of prios < ours to get the new qave */
	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
		int i;

		for (i = 0; i < t->DPs; i++) {
			if (t->tab[i] && t->tab[i]->prio < q->prio &&
			    !red_is_idling(&t->tab[i]->vars))
				qavg += t->tab[i]->vars.qavg;
		}

	}

	q->packetsin++;
	q->bytesin += qdisc_pkt_len(skb);

	if (gred_wred_mode(t))
		gred_load_wred_set(t, q);

	q->vars.qavg = red_calc_qavg(&q->parms,
				     &q->vars,
				     gred_backlog(t, q, sch));

	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	if (gred_wred_mode(t))
		gred_store_wred_set(t, q);

	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
	case RED_DONT_MARK:
		break;

	case RED_PROB_MARK:
		qdisc_qstats_overlimit(sch);
		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
			q->stats.prob_drop++;
			goto congestion_drop;
		}

		q->stats.prob_mark++;
		break;

	case RED_HARD_MARK:
		qdisc_qstats_overlimit(sch);
		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
		    !INET_ECN_set_ce(skb)) {
			q->stats.forced_drop++;
			goto congestion_drop;
		}
		q->stats.forced_mark++;
		break;
	}

	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
		q->backlog += qdisc_pkt_len(skb);
		return qdisc_enqueue_tail(skb, sch);
	}

	q->stats.pdrop++;
drop:
	return qdisc_drop(skb, sch, to_free);

congestion_drop:
	qdisc_drop(skb, sch, to_free);
	return NET_XMIT_CN;
}