Ejemplo n.º 1
0
static unsigned int gred_drop(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_tail(sch);
	if (skb) {
		unsigned int len = qdisc_pkt_len(skb);
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			if (net_ratelimit())
				pr_warning("GRED: Unable to relocate VQ 0x%x "
					   "while dropping, screwing up "
					   "backlog.\n", tc_index_to_dp(skb));
		} else {
			q->backlog -= len;
			q->stats.other++;

			if (!q->backlog && !gred_wred_mode(t))
				red_start_of_idle_period(&q->vars);
		}

		qdisc_drop(skb, sch);
		return len;
	}

	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
		red_start_of_idle_period(&t->wred_set);

	return 0;

}
Ejemplo n.º 2
0
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb) {
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			if (net_ratelimit())
				pr_warning("GRED: Unable to relocate VQ 0x%x "
					   "after dequeue, screwing up "
					   "backlog.\n", tc_index_to_dp(skb));
		} else {
			q->backlog -= qdisc_pkt_len(skb);

			if (!q->backlog && !gred_wred_mode(t))
				red_start_of_idle_period(&q->vars);
		}

		return skb;
	}

	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
		red_start_of_idle_period(&t->wred_set);

	return NULL;
}
Ejemplo n.º 3
0
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb) {
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
					     tc_index_to_dp(skb));
		} else {
			q->backlog -= qdisc_pkt_len(skb);

			if (gred_wred_mode(t)) {
				if (!sch->qstats.backlog)
					red_start_of_idle_period(&t->wred_set);
			} else {
				if (!q->backlog)
					red_start_of_idle_period(&q->vars);
			}
		}

		return skb;
	}

	return NULL;
}
Ejemplo n.º 4
0
static inline unsigned int gred_backlog(struct gred_sched *table,
					struct gred_sched_data *q,
					struct Qdisc *sch)
{
	if (gred_wred_mode(table))
		return sch->qstats.backlog;
	else
		return q->backlog;
}
Ejemplo n.º 5
0
static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct net_device *dev = qdisc_dev(sch);
	struct tc_gred_qopt_offload opt = {
		.command	= command,
		.handle		= sch->handle,
		.parent		= sch->parent,
	};

	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
		return;

	if (command == TC_GRED_REPLACE) {
		unsigned int i;

		opt.set.grio_on = gred_rio_mode(table);
		opt.set.wred_on = gred_wred_mode(table);
		opt.set.dp_cnt = table->DPs;
		opt.set.dp_def = table->def;

		for (i = 0; i < table->DPs; i++) {
			struct gred_sched_data *q = table->tab[i];

			if (!q)
				continue;
			opt.set.tab[i].present = true;
			opt.set.tab[i].limit = q->limit;
			opt.set.tab[i].prio = q->prio;
			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
			opt.set.tab[i].is_ecn = gred_use_ecn(q);
			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
			opt.set.tab[i].probability = q->parms.max_P;
			opt.set.tab[i].backlog = &q->backlog;
		}
		opt.set.qstats = &sch->qstats;
	}

	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
}
Ejemplo n.º 6
0
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct nlattr *parms, *vqs, *opts = NULL;
	int i;
	u32 max_p[MAX_DPs];
	struct tc_gred_sopt sopt = {
		.DPs	= table->DPs,
		.def_DP	= table->def,
		.grio	= gred_rio_mode(table),
		.flags	= table->red_flags,
	};

	if (gred_offload_dump_stats(sch))
		goto nla_put_failure;

	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
	if (opts == NULL)
		goto nla_put_failure;
	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
		goto nla_put_failure;

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];

		max_p[i] = q ? q->parms.max_P : 0;
	}
	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
		goto nla_put_failure;

	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
		goto nla_put_failure;

	/* Old style all-in-one dump of VQs */
	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
	if (parms == NULL)
		goto nla_put_failure;

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];
		struct tc_gred_qopt opt;
		unsigned long qavg;

		memset(&opt, 0, sizeof(opt));

		if (!q) {
			/* hack -- fix at some point with proper message
			   This is how we indicate to tc that there is no VQ
			   at this DP */

			opt.DP = MAX_DPs + i;
			goto append_opt;
		}

		opt.limit	= q->limit;
		opt.DP		= q->DP;
		opt.backlog	= gred_backlog(table, q, sch);
		opt.prio	= q->prio;
		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
		opt.Wlog	= q->parms.Wlog;
		opt.Plog	= q->parms.Plog;
		opt.Scell_log	= q->parms.Scell_log;
		opt.other	= q->stats.other;
		opt.early	= q->stats.prob_drop;
		opt.forced	= q->stats.forced_drop;
		opt.pdrop	= q->stats.pdrop;
		opt.packets	= q->packetsin;
		opt.bytesin	= q->bytesin;

		if (gred_wred_mode(table))
			gred_load_wred_set(table, q);

		qavg = red_calc_qavg(&q->parms, &q->vars,
				     q->vars.qavg >> q->parms.Wlog);
		opt.qave = qavg >> q->parms.Wlog;

append_opt:
		if (nla_append(skb, sizeof(opt), &opt) < 0)
			goto nla_put_failure;
	}

	nla_nest_end(skb, parms);

	/* Dump the VQs again, in more structured way */
	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
	if (!vqs)
		goto nla_put_failure;

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];
		struct nlattr *vq;

		if (!q)
			continue;

		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
		if (!vq)
			goto nla_put_failure;

		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
			goto nla_put_failure;

		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
			goto nla_put_failure;

		/* Stats */
		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
				      TCA_GRED_VQ_PAD))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
				gred_backlog(table, q, sch)))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
				q->stats.prob_drop))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
				q->stats.prob_mark))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
				q->stats.forced_drop))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
				q->stats.forced_mark))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
			goto nla_put_failure;
		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
			goto nla_put_failure;

		nla_nest_end(skb, vq);
	}
	nla_nest_end(skb, vqs);

	return nla_nest_end(skb, opts);

nla_put_failure:
	nla_nest_cancel(skb, opts);
	return -EMSGSIZE;
}

static void gred_destroy(struct Qdisc *sch)
{
	struct gred_sched *table = qdisc_priv(sch);
	int i;

	for (i = 0; i < table->DPs; i++) {
		if (table->tab[i])
			gred_destroy_vq(table->tab[i]);
	}
	gred_offload(sch, TC_GRED_DESTROY);
}

static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
	.id		=	"gred",
	.priv_size	=	sizeof(struct gred_sched),
	.enqueue	=	gred_enqueue,
	.dequeue	=	gred_dequeue,
	.peek		=	qdisc_peek_head,
	.init		=	gred_init,
	.reset		=	gred_reset,
	.destroy	=	gred_destroy,
	.change		=	gred_change,
	.dump		=	gred_dump,
	.owner		=	THIS_MODULE,
};

static int __init gred_module_init(void)
{
	return register_qdisc(&gred_qdisc_ops);
}

static void __exit gred_module_exit(void)
{
	unregister_qdisc(&gred_qdisc_ops);
}

module_init(gred_module_init)
module_exit(gred_module_exit)

MODULE_LICENSE("GPL");
Ejemplo n.º 7
0
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			struct sk_buff **to_free)
{
	struct gred_sched_data *q = NULL;
	struct gred_sched *t = qdisc_priv(sch);
	unsigned long qavg = 0;
	u16 dp = tc_index_to_dp(skb);

	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
		dp = t->def;

		q = t->tab[dp];
		if (!q) {
			/* Pass through packets not assigned to a DP
			 * if no default DP has been configured. This
			 * allows for DP flows to be left untouched.
			 */
			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
					sch->limit))
				return qdisc_enqueue_tail(skb, sch);
			else
				goto drop;
		}

		/* fix tc_index? --could be controversial but needed for
		   requeueing */
		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
	}

	/* sum up all the qaves of prios < ours to get the new qave */
	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
		int i;

		for (i = 0; i < t->DPs; i++) {
			if (t->tab[i] && t->tab[i]->prio < q->prio &&
			    !red_is_idling(&t->tab[i]->vars))
				qavg += t->tab[i]->vars.qavg;
		}

	}

	q->packetsin++;
	q->bytesin += qdisc_pkt_len(skb);

	if (gred_wred_mode(t))
		gred_load_wred_set(t, q);

	q->vars.qavg = red_calc_qavg(&q->parms,
				     &q->vars,
				     gred_backlog(t, q, sch));

	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	if (gred_wred_mode(t))
		gred_store_wred_set(t, q);

	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
	case RED_DONT_MARK:
		break;

	case RED_PROB_MARK:
		qdisc_qstats_overlimit(sch);
		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
			q->stats.prob_drop++;
			goto congestion_drop;
		}

		q->stats.prob_mark++;
		break;

	case RED_HARD_MARK:
		qdisc_qstats_overlimit(sch);
		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
		    !INET_ECN_set_ce(skb)) {
			q->stats.forced_drop++;
			goto congestion_drop;
		}
		q->stats.forced_mark++;
		break;
	}

	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
		q->backlog += qdisc_pkt_len(skb);
		return qdisc_enqueue_tail(skb, sch);
	}

	q->stats.pdrop++;
drop:
	return qdisc_drop(skb, sch, to_free);

congestion_drop:
	qdisc_drop(skb, sch, to_free);
	return NET_XMIT_CN;
}
Ejemplo n.º 8
0
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct nlattr *parms, *opts = NULL;
	int i;
	u32 max_p[MAX_DPs];
	struct tc_gred_sopt sopt = {
		.DPs	= table->DPs,
		.def_DP	= table->def,
		.grio	= gred_rio_mode(table),
		.flags	= table->red_flags,
	};

	opts = nla_nest_start(skb, TCA_OPTIONS);
	if (opts == NULL)
		goto nla_put_failure;
	nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];

		max_p[i] = q ? q->parms.max_P : 0;
	}
	nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);

	parms = nla_nest_start(skb, TCA_GRED_PARMS);
	if (parms == NULL)
		goto nla_put_failure;

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];
		struct tc_gred_qopt opt;

		memset(&opt, 0, sizeof(opt));

		if (!q) {
			/* hack -- fix at some point with proper message
			   This is how we indicate to tc that there is no VQ
			   at this DP */

			opt.DP = MAX_DPs + i;
			goto append_opt;
		}

		opt.limit	= q->limit;
		opt.DP		= q->DP;
		opt.backlog	= q->backlog;
		opt.prio	= q->prio;
		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
		opt.Wlog	= q->parms.Wlog;
		opt.Plog	= q->parms.Plog;
		opt.Scell_log	= q->parms.Scell_log;
		opt.other	= q->stats.other;
		opt.early	= q->stats.prob_drop;
		opt.forced	= q->stats.forced_drop;
		opt.pdrop	= q->stats.pdrop;
		opt.packets	= q->packetsin;
		opt.bytesin	= q->bytesin;

		if (gred_wred_mode(table))
			gred_load_wred_set(table, q);

		opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);

append_opt:
		if (nla_append(skb, sizeof(opt), &opt) < 0)
			goto nla_put_failure;
	}

	nla_nest_end(skb, parms);

	return nla_nest_end(skb, opts);

nla_put_failure:
	nla_nest_cancel(skb, opts);
	return -EMSGSIZE;
}

static void gred_destroy(struct Qdisc *sch)
{
	struct gred_sched *table = qdisc_priv(sch);
	int i;

	for (i = 0; i < table->DPs; i++) {
		if (table->tab[i])
			gred_destroy_vq(table->tab[i]);
	}
}

static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
	.id		=	"gred",
	.priv_size	=	sizeof(struct gred_sched),
	.enqueue	=	gred_enqueue,
	.dequeue	=	gred_dequeue,
	.peek		=	qdisc_peek_head,
	.drop		=	gred_drop,
	.init		=	gred_init,
	.reset		=	gred_reset,
	.destroy	=	gred_destroy,
	.change		=	gred_change,
	.dump		=	gred_dump,
	.owner		=	THIS_MODULE,
};

static int __init gred_module_init(void)
{
	return register_qdisc(&gred_qdisc_ops);
}

static void __exit gred_module_exit(void)
{
	unregister_qdisc(&gred_qdisc_ops);
}

module_init(gred_module_init)
module_exit(gred_module_exit)

MODULE_LICENSE("GPL");
Ejemplo n.º 9
0
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct rtattr *parms, *opts = NULL;
	int i;
	struct tc_gred_sopt sopt = {
		.DPs	= table->DPs,
		.def_DP	= table->def,
		.grio	= gred_rio_mode(table),
		.flags	= table->red_flags,
	};

	opts = RTA_NEST(skb, TCA_OPTIONS);
	RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
	parms = RTA_NEST(skb, TCA_GRED_PARMS);

	for (i = 0; i < MAX_DPs; i++) {
		struct gred_sched_data *q = table->tab[i];
		struct tc_gred_qopt opt;

		memset(&opt, 0, sizeof(opt));

		if (!q) {
			/* hack -- fix at some point with proper message
			   This is how we indicate to tc that there is no VQ
			   at this DP */

			opt.DP = MAX_DPs + i;
			goto append_opt;
		}

		opt.limit	= q->limit;
		opt.DP		= q->DP;
		opt.backlog	= q->backlog;
		opt.prio	= q->prio;
		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
		opt.Wlog	= q->parms.Wlog;
		opt.Plog	= q->parms.Plog;
		opt.Scell_log	= q->parms.Scell_log;
		opt.other	= q->stats.other;
		opt.early	= q->stats.prob_drop;
		opt.forced	= q->stats.forced_drop;
		opt.pdrop	= q->stats.pdrop;
		opt.packets	= q->packetsin;
		opt.bytesin	= q->bytesin;

		if (gred_wred_mode(table)) {
			q->parms.qidlestart =
				table->tab[table->def]->parms.qidlestart;
			q->parms.qavg = table->tab[table->def]->parms.qavg;
		}

		opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);

append_opt:
		RTA_APPEND(skb, sizeof(opt), &opt);
	}

	RTA_NEST_END(skb, parms);

	return RTA_NEST_END(skb, opts);

rtattr_failure:
	return RTA_NEST_CANCEL(skb, opts);
}

static void gred_destroy(struct Qdisc *sch)
{
	struct gred_sched *table = qdisc_priv(sch);
	int i;

	for (i = 0; i < table->DPs; i++) {
		if (table->tab[i])
			gred_destroy_vq(table->tab[i]);
	}
}

static struct Qdisc_ops gred_qdisc_ops = {
	.id		=	"gred",
	.priv_size	=	sizeof(struct gred_sched),
	.enqueue	=	gred_enqueue,
	.dequeue	=	gred_dequeue,
	.requeue	=	gred_requeue,
	.drop		=	gred_drop,
	.init		=	gred_init,
	.reset		=	gred_reset,
	.destroy	=	gred_destroy,
	.change		=	gred_change,
	.dump		=	gred_dump,
	.owner		=	THIS_MODULE,
};

static int __init gred_module_init(void)
{
	return register_qdisc(&gred_qdisc_ops);
}

static void __exit gred_module_exit(void)
{
	unregister_qdisc(&gred_qdisc_ops);
}

module_init(gred_module_init)
module_exit(gred_module_exit)

MODULE_LICENSE("GPL");