예제 #1
0
static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
{
	struct multiq_sched_data *q = qdisc_priv(sch);
	struct tc_multiq_qopt *qopt;
	int i;

	if (!netif_is_multiqueue(qdisc_dev(sch)))
		return -EOPNOTSUPP;
	if (nla_len(opt) < sizeof(*qopt))
		return -EINVAL;

	qopt = nla_data(opt);

	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;

	sch_tree_lock(sch);
	q->bands = qopt->bands;
	for (i = q->bands; i < q->max_bands; i++) {
		if (q->queues[i] != &noop_qdisc) {
			struct Qdisc *child = q->queues[i];
			q->queues[i] = &noop_qdisc;
			qdisc_tree_decrease_qlen(child, child->q.qlen);
			qdisc_destroy(child);
		}
	}

	sch_tree_unlock(sch);

	for (i = 0; i < q->bands; i++) {
		if (q->queues[i] == &noop_qdisc) {
			struct Qdisc *child, *old;
			child = qdisc_create_dflt(sch->dev_queue,
						  &pfifo_qdisc_ops,
						  TC_H_MAKE(sch->handle,
							    i + 1));
			if (child) {
				sch_tree_lock(sch);
				old = q->queues[i];
				q->queues[i] = child;

				if (old != &noop_qdisc) {
					qdisc_tree_decrease_qlen(old,
								 old->q.qlen);
					qdisc_destroy(old);
				}
				sch_tree_unlock(sch);
			}
		}
	}
	return 0;
}
예제 #2
0
static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	struct tc_prio_qopt *qopt = RTA_DATA(opt);
	int i;

	if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
		return -EINVAL;
	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
		return -EINVAL;

	for (i=0; i<=TC_PRIO_MAX; i++) {
		if (qopt->priomap[i] >= qopt->bands)
			return -EINVAL;
	}

	sch_tree_lock(sch);
	q->bands = qopt->bands;
	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);

	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
		struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
		if (child != &noop_qdisc) {
			qdisc_tree_decrease_qlen(child, child->q.qlen);
			qdisc_destroy(child);
		}
	}
	sch_tree_unlock(sch);

	for (i=0; i<q->bands; i++) {
		if (q->queues[i] == &noop_qdisc) {
			struct Qdisc *child;
			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
						  TC_H_MAKE(sch->handle, i + 1));
			if (child) {
				sch_tree_lock(sch);
				child = xchg(&q->queues[i], child);

				if (child != &noop_qdisc) {
					qdisc_tree_decrease_qlen(child,
								 child->q.qlen);
					qdisc_destroy(child);
				}
				sch_tree_unlock(sch);
			}
		}
	}
	return 0;
}
예제 #3
0
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->dequeue(q->qdisc);

	if (skb) {
		psched_time_t now;
		long toks;
		long ptoks = 0;
		unsigned int len = skb->len;

		now = psched_get_time();
		toks = psched_tdiff_bounded(now, q->t_c, q->buffer);

		if (q->P_tab) {
			ptoks = toks + q->ptokens;
			if (ptoks > (long)q->mtu)
				ptoks = q->mtu;
			ptoks -= L2T_P(q, len);
		}
		toks += q->tokens;
		if (toks > (long)q->buffer)
			toks = q->buffer;
		toks -= L2T(q, len);

		if ((toks|ptoks) >= 0) {
			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			sch->q.qlen--;
			sch->flags &= ~TCQ_F_THROTTLED;
			return skb;
		}

		qdisc_watchdog_schedule(&q->watchdog,
					now + max_t(long, -toks, -ptoks));

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
			/* When requeue fails skb is dropped */
			qdisc_tree_decrease_qlen(q->qdisc, 1);
			sch->qstats.drops++;
		}

		sch->qstats.overlimits++;
	}
	return NULL;
}
예제 #4
0
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	smp_mb();
	if (sch->flags & TCQ_F_THROTTLED)
		return NULL;

	skb = q->qdisc->dequeue(q->qdisc);
	if (skb) {
		const struct netem_skb_cb *cb
			= (const struct netem_skb_cb *)skb->cb;
		psched_time_t now = psched_get_time();

		/* if more time remaining? */
		if (cb->time_to_send <= now) {
			pr_debug("netem_dequeue: return skb=%p\n", skb);
			sch->q.qlen--;
			return skb;
		}

		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
			qdisc_tree_decrease_qlen(q->qdisc, 1);
			sch->qstats.drops++;
			printk(KERN_ERR "netem: %s could not requeue\n",
			       q->qdisc->ops->id);
		}

		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
	}

	return NULL;
}
예제 #5
0
파일: sch_tbf.c 프로젝트: BozkurTR/kernel
/* GSO packet is too big, segment it so that tbf can transmit
 * each segment in time
 */
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *segs, *nskb;
	netdev_features_t features = netif_skb_features(skb);
	int ret, nb;

	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);

	if (IS_ERR_OR_NULL(segs))
		return qdisc_reshape_fail(skb, sch);

	nb = 0;
	while (segs) {
		nskb = segs->next;
		segs->next = NULL;
		qdisc_skb_cb(segs)->pkt_len = segs->len;
		ret = qdisc_enqueue(segs, q->qdisc);
		if (ret != NET_XMIT_SUCCESS) {
			if (net_xmit_drop_count(ret))
				sch->qstats.drops++;
		} else {
			nb++;
		}
		segs = nskb;
	}
	sch->q.qlen += nb;
	if (nb > 1)
		qdisc_tree_decrease_qlen(sch, 1 - nb);
	consume_skb(skb);
	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
예제 #6
0
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tc_sfq_qopt *ctl = nla_data(opt);
	unsigned int qlen;

	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
		return -EINVAL;

	sch_tree_lock(sch);
	q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
	q->perturb_period = ctl->perturb_period * HZ;
	if (ctl->limit)
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);

	qlen = sch->q.qlen;
	while (sch->q.qlen > q->limit)
		sfq_drop(sch);
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
		q->perturbation = net_random();
	}
	sch_tree_unlock(sch);
	return 0;
}
예제 #7
0
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	if (qdisc_is_throttled(sch))
		return NULL;

tfifo_dequeue:
	skb = qdisc_peek_head(sch);
	if (skb) {
		const struct netem_skb_cb *cb = netem_skb_cb(skb);

		/* if more time remaining? */
		if (cb->time_to_send <= psched_get_time()) {
			__skb_unlink(skb, &sch->q);
			sch->qstats.backlog -= qdisc_pkt_len(skb);

#ifdef CONFIG_NET_CLS_ACT
			/*
			 * If it's at ingress let's pretend the delay is
			 * from the network (tstamp will be updated).
			 */
			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
				skb->tstamp.tv64 = 0;
#endif

			if (q->qdisc) {
				int err = qdisc_enqueue(skb, q->qdisc);

				if (unlikely(err != NET_XMIT_SUCCESS)) {
					if (net_xmit_drop_count(err)) {
						sch->qstats.drops++;
						qdisc_tree_decrease_qlen(sch, 1);
					}
				}
				goto tfifo_dequeue;
			}
deliver:
			qdisc_unthrottled(sch);
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		if (q->qdisc) {
			skb = q->qdisc->ops->dequeue(q->qdisc);
			if (skb)
				goto deliver;
		}
		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
	}

	if (q->qdisc) {
		skb = q->qdisc->ops->dequeue(q->qdisc);
		if (skb)
			goto deliver;
	}
	return NULL;
}
예제 #8
0
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
	int err;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
	if (err < 0)
		return err;
	if (tb[TCA_FQ_CODEL_FLOWS]) {
		if (q->flows)
			return -EINVAL;
		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
		if (!q->flows_cnt ||
		    q->flows_cnt > 65536)
			return -EINVAL;
	}
	sch_tree_lock(sch);

	if (tb[TCA_FQ_CODEL_TARGET]) {
		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);

		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_FQ_CODEL_INTERVAL]) {
		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);

		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_FQ_CODEL_LIMIT])
		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);

	if (tb[TCA_FQ_CODEL_ECN])
		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);

	if (tb[TCA_FQ_CODEL_QUANTUM])
		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));

	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = fq_codel_dequeue(sch);

		kfree_skb(skb);
		q->cstats.drop_count++;
	}
	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
	q->cstats.drop_count = 0;

	sch_tree_unlock(sch);
	return 0;
}
예제 #9
0
파일: sch_sfq.c 프로젝트: Mr-Aloof/wl500g
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sfq_sched_data tmp;
	struct sk_buff *skb;
	unsigned int qlen;
	int err;

	/* set up tmp queue */
	memset(&tmp, 0, sizeof(struct sfq_sched_data));
	sfq_copy_parameters(&tmp, q);
	if ((err = sfq_q_init(&tmp, opt)))
		return err;

	/* handle perturbation */
	/* This code avoids resetting the perturb_timer unless perturb_period
	 * is changed. Note that the rest of this function leaves
	 * q->perturb_timer alone, whereas all other members of q get
	 * overwritten from tmp. */
	if (!tmp.perturb_period) {
		tmp.perturbation = 0;
		del_timer(&q->perturb_timer);
	} else if (tmp.perturb_period != q->perturb_period) {
		mod_timer(&q->perturb_timer, SFQ_PERTURB(tmp.perturb_period));
	}

	/* move packets from the old queue to the tmp queue */
	sch_tree_lock(sch);
	qlen = sch->q.qlen;
	while (sch->q.qlen >= tmp.limit - 1)
		sfq_drop(sch);
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
	while ((skb = sfq_q_dequeue(q)) != NULL)
		sfq_q_enqueue(skb, &tmp, SFQ_TAIL);

	/* clean up the old queue */
	sfq_q_destroy(q);

	/* copy elements of the tmp queue into the old queue */
	sfq_copy_parameters(q, &tmp);
	q->tail      = tmp.tail;
	q->max_depth = tmp.max_depth;
	q->ht        = tmp.ht;
	q->dep       = tmp.dep;
	q->next      = tmp.next;
	q->allot     = tmp.allot;
	q->hash      = tmp.hash;
	q->qs        = tmp.qs;

	/* finish up */
	sch_tree_unlock(sch);
	return 0;
}
예제 #10
0
static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
	struct fq_codel_flow *flow;
	struct list_head *head;
	u32 prev_drop_count, prev_ecn_mark;

begin:
	head = &q->new_flows;
	if (list_empty(head)) {
		head = &q->old_flows;
		if (list_empty(head))
			return NULL;
	}
	flow = list_first_entry(head, struct fq_codel_flow, flowchain);

	if (flow->deficit <= 0) {
		flow->deficit += q->quantum;
		list_move_tail(&flow->flowchain, &q->old_flows);
		goto begin;
	}

	prev_drop_count = q->cstats.drop_count;
	prev_ecn_mark = q->cstats.ecn_mark;

	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
			    dequeue);

	flow->dropped += q->cstats.drop_count - prev_drop_count;
	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;

	if (!skb) {
		/* force a pass through old_flows to prevent starvation */
		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
			list_move_tail(&flow->flowchain, &q->old_flows);
		else
			list_del_init(&flow->flowchain);
		goto begin;
	}
	qdisc_bstats_update(sch, skb);
	flow->deficit -= qdisc_pkt_len(skb);
	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
	 * or HTB crashes. Defer it for next round.
	 */
	if (q->cstats.drop_count && sch->q.qlen) {
		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
		q->cstats.drop_count = 0;
	}
	return skb;
}
예제 #11
0
/* Drop packet from queue array by creating a "hole" */
static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb = q->tab[idx];

	q->tab[idx] = NULL;

	if (idx == q->head)
		choke_zap_head_holes(q);
	if (idx == q->tail)
		choke_zap_tail_holes(q);

	sch->qstats.backlog -= qdisc_pkt_len(skb);
	qdisc_drop(skb, sch);
	qdisc_tree_decrease_qlen(sch, 1);
	--sch->q.qlen;
}
예제 #12
0
static int codel_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct codel_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_CODEL_MAX + 1];
	unsigned int qlen;
	int err;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	if (tb[TCA_CODEL_TARGET]) {
		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);

		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_CODEL_INTERVAL]) {
		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);

		q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
	}

	if (tb[TCA_CODEL_LIMIT])
		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);

	if (tb[TCA_CODEL_ECN])
		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);

	qlen = sch->q.qlen;
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = __skb_dequeue(&sch->q);

		qdisc_qstats_backlog_dec(sch, skb);
		qdisc_drop(skb, sch);
	}
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	sch_tree_unlock(sch);
	return 0;
}
예제 #13
0
static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
{
	struct codel_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);

	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
	 * or HTB crashes. Defer it for next round.
	 */
	if (q->stats.drop_count && sch->q.qlen) {
		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
		q->stats.drop_count = 0;
	}
	if (skb)
		qdisc_bstats_update(sch, skb);
	return skb;
}
예제 #14
0
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	unsigned int idx;
	struct fq_codel_flow *flow;
	int uninitialized_var(ret);

	idx = fq_codel_classify(skb, sch, &ret);
	if (idx == 0) {
		if (ret & __NET_XMIT_BYPASS)
			qdisc_qstats_drop(sch);
		kfree_skb(skb);
		return ret;
	}
	idx--;
	if (sch->q.qlen > 128)
		skb = skb_reduce_truesize(skb);

	codel_set_enqueue_time(skb);
	flow = &q->flows[idx];
	flow_queue_add(flow, skb);
	q->backlogs[idx] += qdisc_pkt_len(skb);
	qdisc_qstats_backlog_inc(sch, skb);

	if (list_empty(&flow->flowchain)) {
		list_add_tail(&flow->flowchain, &q->new_flows);
		q->new_flow_count++;
		flow->deficit = q->quantum;
		flow->dropped = 0;
	}
	if (++sch->q.qlen <= sch->limit)
		return NET_XMIT_SUCCESS;

	q->drop_overlimit++;
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
	if (fq_codel_drop(sch) == idx)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
}
예제 #15
0
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->dequeue(q->qdisc);
	if (skb) {
		const struct netem_skb_cb *cb
			= (const struct netem_skb_cb *)skb->cb;
		psched_time_t now;

		/* if more time remaining? */
		PSCHED_GET_TIME(now);

		if (PSCHED_TLESS(cb->time_to_send, now)) {
			pr_debug("netem_dequeue: return skb=%p\n", skb);
			sch->q.qlen--;
			sch->flags &= ~TCQ_F_THROTTLED;
			return skb;
		} else {
			psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);

			if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
				qdisc_tree_decrease_qlen(q->qdisc, 1);
				sch->qstats.drops++;
				printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
				       q->qdisc->ops->id);
			}

			mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
			sch->flags |= TCQ_F_THROTTLED;
		}
	}

	return NULL;
}
예제 #16
0
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	struct tc_prio_qopt *qopt;
	int i;
	int flow_change = 0;

	if (nla_len(opt) < sizeof(*qopt))
		return -EINVAL;
	qopt = nla_data(opt);

	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
		return -EINVAL;

	for (i = 0; i <= TC_PRIO_MAX; i++) {
		if (qopt->priomap[i] >= qopt->bands)
			return -EINVAL;
	}

	sch_tree_lock(sch);
	if (q->enable_flow != qopt->enable_flow) {
		q->enable_flow = qopt->enable_flow;
		flow_change = 1;
	}
	q->bands = qopt->bands;
	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);

	for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
		struct Qdisc *child = q->queues[i];
		q->queues[i] = &noop_qdisc;
		if (child != &noop_qdisc) {
			qdisc_tree_decrease_qlen(child, child->q.qlen);
			qdisc_destroy(child);
		}
	}
	sch_tree_unlock(sch);

	for (i = 0; i < q->bands; i++) {
		if (q->queues[i] == &noop_qdisc) {
			struct Qdisc *child, *old;

			child = qdisc_create_dflt(sch->dev_queue,
						  &pfifo_qdisc_ops,
						  TC_H_MAKE(sch->handle, i + 1));
			if (child) {
				sch_tree_lock(sch);
				old = q->queues[i];
				q->queues[i] = child;

				if (old != &noop_qdisc) {
					qdisc_tree_decrease_qlen(old,
								 old->q.qlen);
					qdisc_destroy(old);
				}
				sch_tree_unlock(sch);
			}
		}
	}

	/* Schedule qdisc when flow re-enabled */
	if (flow_change && q->enable_flow) {
		if (!test_bit(__QDISC_STATE_DEACTIVATED,
			      &sch->state))
			__netif_schedule(qdisc_root(sch));
	}
	return 0;
}
예제 #17
0
static int choke_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_CHOKE_MAX + 1];
	const struct tc_red_qopt *ctl;
	int err;
	struct sk_buff **old = NULL;
	unsigned int mask;
	u32 max_P;

	if (opt == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
	if (err < 0)
		return err;

	if (tb[TCA_CHOKE_PARMS] == NULL ||
	    tb[TCA_CHOKE_STAB] == NULL)
		return -EINVAL;

	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;

	ctl = nla_data(tb[TCA_CHOKE_PARMS]);

	if (ctl->limit > CHOKE_MAX_QUEUE)
		return -EINVAL;

	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
	if (mask != q->tab_mask) {
		struct sk_buff **ntab;

		ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
		if (!ntab)
			ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
		if (!ntab)
			return -ENOMEM;

		sch_tree_lock(sch);
		old = q->tab;
		if (old) {
			unsigned int oqlen = sch->q.qlen, tail = 0;

			while (q->head != q->tail) {
				struct sk_buff *skb = q->tab[q->head];

				q->head = (q->head + 1) & q->tab_mask;
				if (!skb)
					continue;
				if (tail < mask) {
					ntab[tail++] = skb;
					continue;
				}
				sch->qstats.backlog -= qdisc_pkt_len(skb);
				--sch->q.qlen;
				qdisc_drop(skb, sch);
			}
			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
			q->head = 0;
			q->tail = tail;
		}

		q->tab_mask = mask;
		q->tab = ntab;
	} else
		sch_tree_lock(sch);

	q->flags = ctl->flags;
	q->limit = ctl->limit;

	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
		      ctl->Plog, ctl->Scell_log,
		      nla_data(tb[TCA_CHOKE_STAB]),
		      max_P);
	red_set_vars(&q->vars);

	if (q->head == q->tail)
		red_end_of_idle_period(&q->vars);

	sch_tree_unlock(sch);
	choke_free(old);
	return 0;
}
예제 #18
0
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	struct tc_prio_qopt *qopt;
	struct nlattr *tb[TCA_PRIO_MAX + 1] = {0};
	int err;
	int i;

	qopt = nla_data(opt);
	if (nla_len(opt) < sizeof(*qopt))
		return -1;

	if (nla_len(opt) >= sizeof(*qopt) + sizeof(struct nlattr)) {
		err = nla_parse_nested(tb, TCA_PRIO_MAX,
				       (struct nlattr *) (qopt + 1), NULL);
		if (err < 0)
			return err;
	}

	q->bands = qopt->bands;
	/* If we're multiqueue, make sure the number of incoming bands
	 * matches the number of queues on the device we're associating with.
	 * If the number of bands requested is zero, then set q->bands to
	 * dev->egress_subqueue_count.  Also, the root qdisc must be the
	 * only one that is enabled for multiqueue, since it's the only one
	 * that interacts with the underlying device.
	 */
	q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
	if (q->mq) {
		if (sch->parent != TC_H_ROOT)
			return -EINVAL;
		if (netif_is_multiqueue(sch->dev)) {
			if (q->bands == 0)
				q->bands = sch->dev->egress_subqueue_count;
			else if (q->bands != sch->dev->egress_subqueue_count)
				return -EINVAL;
		} else
			return -EOPNOTSUPP;
	}

	if (q->bands > TCQ_PRIO_BANDS || q->bands < 2)
		return -EINVAL;

	for (i=0; i<=TC_PRIO_MAX; i++) {
		if (qopt->priomap[i] >= q->bands)
			return -EINVAL;
	}

	sch_tree_lock(sch);
	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);

	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
		struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
		if (child != &noop_qdisc) {
			qdisc_tree_decrease_qlen(child, child->q.qlen);
			qdisc_destroy(child);
		}
	}
	sch_tree_unlock(sch);

	for (i=0; i<q->bands; i++) {
		if (q->queues[i] == &noop_qdisc) {
			struct Qdisc *child;
			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
						  TC_H_MAKE(sch->handle, i + 1));
			if (child) {
				sch_tree_lock(sch);
				child = xchg(&q->queues[i], child);

				if (child != &noop_qdisc) {
					qdisc_tree_decrease_qlen(child,
								 child->q.qlen);
					qdisc_destroy(child);
				}
				sch_tree_unlock(sch);
			}
		}
	}
	return 0;
}
예제 #19
0
파일: sch_tbf.c 프로젝트: BozkurTR/kernel
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_MAX + 1];
	struct tc_tbf_qopt *qopt;
	struct Qdisc *child = NULL;
	struct psched_ratecfg rate;
	struct psched_ratecfg peak;
	u64 max_size;
	s64 buffer, mtu;
	u64 rate64 = 0, prate64 = 0;

	err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
					      tb[TCA_TBF_RTAB]));

	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
						      tb[TCA_TBF_PTAB]));

	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);

	if (tb[TCA_TBF_RATE64])
		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);

	max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);

	if (qopt->peakrate.rate) {
		if (tb[TCA_TBF_PRATE64])
			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
					    peak.rate_bytes_ps, rate.rate_bytes_ps);
			err = -EINVAL;
			goto done;
		}

		max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
	}

	if (max_size < psched_mtu(qdisc_dev(sch)))
		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
				    max_size, qdisc_dev(sch)->name,
				    psched_mtu(qdisc_dev(sch)));

	if (!max_size) {
		err = -EINVAL;
		goto done;
	}

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	q->mtu = PSCHED_TICKS2NS(qopt->mtu);
	q->max_size = max_size;
	q->buffer = PSCHED_TICKS2NS(qopt->buffer);
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
	if (qopt->peakrate.rate) {
		memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
		q->peak_present = true;
	} else {
		q->peak_present = false;
	}

	sch_tree_unlock(sch);
	err = 0;
done:
	return err;
}
예제 #20
0
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tc_sfq_qopt *ctl = nla_data(opt);
	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
	unsigned int qlen;
	struct red_parms *p = NULL;

	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
		return -EINVAL;
	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
		ctl_v1 = nla_data(opt);
	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;
	if (ctl_v1 && ctl_v1->qth_min) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (!p)
			return -ENOMEM;
	}
	sch_tree_lock(sch);
	if (ctl->quantum) {
		q->quantum = ctl->quantum;
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	}
	q->perturb_period = ctl->perturb_period * HZ;
	if (ctl->flows)
		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
	if (ctl->divisor) {
		q->divisor = ctl->divisor;
		q->maxflows = min_t(u32, q->maxflows, q->divisor);
	}
	if (ctl_v1) {
		if (ctl_v1->depth)
			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
		if (p) {
			swap(q->red_parms, p);
			red_set_parms(q->red_parms,
				      ctl_v1->qth_min, ctl_v1->qth_max,
				      ctl_v1->Wlog,
				      ctl_v1->Plog, ctl_v1->Scell_log,
				      NULL,
				      ctl_v1->max_P);
		}
		q->flags = ctl_v1->flags;
		q->headdrop = ctl_v1->headdrop;
	}
	if (ctl->limit) {
		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
		q->maxflows = min_t(u32, q->maxflows, q->limit);
	}

	qlen = sch->q.qlen;
	while (sch->q.qlen > q->limit)
		sfq_drop(sch);
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
		q->perturbation = prandom_u32();
	}
	sch_tree_unlock(sch);
	kfree(p);
	return 0;
}
예제 #21
0
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int hash;
	sfq_index x, qlen;
	struct sfq_slot *slot;
	int uninitialized_var(ret);
	struct sk_buff *head;
	int delta;

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
		if (ret & __NET_XMIT_BYPASS)
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;

	x = q->ht[hash];
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		if (x >= SFQ_MAX_FLOWS)
			return qdisc_drop(skb, sch);
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
		slot->backlog = 0; /* should already be 0 anyway... */
		red_set_vars(&slot->vars);
		goto enqueue;
	}
	if (q->red_parms) {
		slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
							&slot->vars,
							slot->backlog);
		switch (red_action(q->red_parms,
				   &slot->vars,
				   slot->vars.qavg)) {
		case RED_DONT_MARK:
			break;

		case RED_PROB_MARK:
			sch->qstats.overlimits++;
			if (sfq_prob_mark(q)) {
				/* We know we have at least one packet in queue */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.prob_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.prob_mark++;
					break;
				}
			}
			q->stats.prob_drop++;
			goto congestion_drop;

		case RED_HARD_MARK:
			sch->qstats.overlimits++;
			if (sfq_hard_mark(q)) {
				/* We know we have at least one packet in queue */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.forced_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.forced_mark++;
					break;
				}
			}
			q->stats.forced_drop++;
			goto congestion_drop;
		}
	}

	if (slot->qlen >= q->maxdepth) {
congestion_drop:
		if (!sfq_headdrop(q))
			return qdisc_drop(skb, sch);

		/* We know we have at least one packet in queue */
		head = slot_dequeue_head(slot);
		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
		sch->qstats.backlog -= delta;
		slot->backlog -= delta;
		qdisc_drop(head, sch);

		slot_queue_add(slot, skb);
		return NET_XMIT_CN;
	}

enqueue:
	sch->qstats.backlog += qdisc_pkt_len(skb);
	slot->backlog += qdisc_pkt_len(skb);
	slot_queue_add(slot, skb);
	sfq_inc(q, x);
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
		} else {
			slot->next = q->tail->next;
			q->tail->next = x;
		}
		/* We put this flow at the end of our flow list.
		 * This might sound unfair for a new flow to wait after old ones,
		 * but we could endup servicing new flows only, and freeze old ones.
		 */
		q->tail = slot;
		/* We could use a bigger initial quantum for new flows */
		slot->allot = q->scaled_quantum;
	}
	if (++sch->q.qlen <= q->limit)
		return NET_XMIT_SUCCESS;

	qlen = slot->qlen;
	sfq_drop(sch);
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
}
예제 #22
0
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_PTAB + 1];
	struct tc_tbf_qopt *qopt;
	struct qdisc_rate_table *rtab = NULL;
	struct qdisc_rate_table *ptab = NULL;
	struct Qdisc *child = NULL;
	int max_size, n;

	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
	if (rtab == NULL)
		goto done;

	if (qopt->peakrate.rate) {
		if (qopt->peakrate.rate > qopt->rate.rate)
			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
		if (ptab == NULL)
			goto done;
	}

	for (n = 0; n < 256; n++)
		if (rtab->data[n] > qopt->buffer)
			break;
	max_size = (n << qopt->rate.cell_log) - 1;
	if (ptab) {
		int size;

		for (n = 0; n < 256; n++)
			if (ptab->data[n] > qopt->mtu)
				break;
		size = (n << qopt->peakrate.cell_log) - 1;
		if (size < max_size)
			max_size = size;
	}
	if (max_size < 0)
		goto done;

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	q->mtu = qopt->mtu;
	q->max_size = max_size;
	q->buffer = qopt->buffer;
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	swap(q->R_tab, rtab);
	swap(q->P_tab, ptab);

	sch_tree_unlock(sch);
	err = 0;
done:
	if (rtab)
		qdisc_put_rtab(rtab);
	if (ptab)
		qdisc_put_rtab(ptab);
	return err;
}
예제 #23
0
파일: sch_fq.c 프로젝트: 19Dan01/linux
static int fq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_FQ_MAX + 1];
	int err, drop_count = 0;
	u32 fq_log;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	fq_log = q->fq_trees_log;

	if (tb[TCA_FQ_BUCKETS_LOG]) {
		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);

		if (nval >= 1 && nval <= ilog2(256*1024))
			fq_log = nval;
		else
			err = -EINVAL;
	}
	if (tb[TCA_FQ_PLIMIT])
		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);

	if (tb[TCA_FQ_FLOW_PLIMIT])
		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);

	if (tb[TCA_FQ_QUANTUM]) {
		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);

		if (quantum > 0)
			q->quantum = quantum;
		else
			err = -EINVAL;
	}

	if (tb[TCA_FQ_INITIAL_QUANTUM])
		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);

	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));

	if (tb[TCA_FQ_FLOW_MAX_RATE])
		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);

	if (tb[TCA_FQ_RATE_ENABLE]) {
		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);

		if (enable <= 1)
			q->rate_enable = enable;
		else
			err = -EINVAL;
	}

	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;

		q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
	}

	if (tb[TCA_FQ_ORPHAN_MASK])
		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);

	if (!err) {
		sch_tree_unlock(sch);
		err = fq_resize(sch, fq_log);
		sch_tree_lock(sch);
	}
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = fq_dequeue(sch);

		if (!skb)
			break;
		kfree_skb(skb);
		drop_count++;
	}
	qdisc_tree_decrease_qlen(sch, drop_count);

	sch_tree_unlock(sch);
	return err;
}
예제 #24
0
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int hash;
	sfq_index x, qlen;
	struct sfq_slot *slot;
	int uninitialized_var(ret);

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
		if (ret & __NET_XMIT_BYPASS)
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;

	x = q->ht[hash];
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
	}

	/* If selected queue has length q->limit, do simple tail drop,
	 * i.e. drop _this_ packet.
	 */
	if (slot->qlen >= q->limit)
		return qdisc_drop(skb, sch);

	sch->qstats.backlog += qdisc_pkt_len(skb);
	slot_queue_add(slot, skb);
	sfq_inc(q, x);
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
		} else {
			slot->next = q->tail->next;
			q->tail->next = x;
		}
		q->tail = slot;
		slot->allot = q->scaled_quantum;
	}
	if (++sch->q.qlen <= q->limit)
		return NET_XMIT_SUCCESS;

	qlen = slot->qlen;
	sfq_drop(sch);
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
}
예제 #25
0
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int hash;
	sfq_index x, qlen;
	struct sfq_slot *slot;
	int uninitialized_var(ret);
	struct sk_buff *head;
	int delta;

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
		if (ret & __NET_XMIT_BYPASS)
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;

	x = q->ht[hash];
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /*                 */
		if (x >= SFQ_MAX_FLOWS)
			return qdisc_drop(skb, sch);
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
		slot->backlog = 0; /*                               */
		red_set_vars(&slot->vars);
		goto enqueue;
	}
	if (q->red_parms) {
		slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
							&slot->vars,
							slot->backlog);
		switch (red_action(q->red_parms,
				   &slot->vars,
				   slot->vars.qavg)) {
		case RED_DONT_MARK:
			break;

		case RED_PROB_MARK:
			sch->qstats.overlimits++;
			if (sfq_prob_mark(q)) {
				/*                                              */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.prob_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.prob_mark++;
					break;
				}
			}
			q->stats.prob_drop++;
			goto congestion_drop;

		case RED_HARD_MARK:
			sch->qstats.overlimits++;
			if (sfq_hard_mark(q)) {
				/*                                              */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.forced_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.forced_mark++;
					break;
				}
			}
			q->stats.forced_drop++;
			goto congestion_drop;
		}
	}

	if (slot->qlen >= q->maxdepth) {
congestion_drop:
		if (!sfq_headdrop(q))
			return qdisc_drop(skb, sch);

		/*                                              */
		head = slot_dequeue_head(slot);
		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
		sch->qstats.backlog -= delta;
		slot->backlog -= delta;
		qdisc_drop(head, sch);

		slot_queue_add(slot, skb);
		return NET_XMIT_CN;
	}

enqueue:
	sch->qstats.backlog += qdisc_pkt_len(skb);
	slot->backlog += qdisc_pkt_len(skb);
	slot_queue_add(slot, skb);
	sfq_inc(q, x);
	if (slot->qlen == 1) {		/*                 */
		if (q->tail == NULL) {	/*                      */
			slot->next = x;
		} else {
			slot->next = q->tail->next;
			q->tail->next = x;
		}
		/*                                              
                                                                   
                                                                      
   */
		q->tail = slot;
		/*                                                     */
		slot->allot = q->scaled_quantum;
	}
	if (++sch->q.qlen <= q->limit)
		return NET_XMIT_SUCCESS;

	qlen = slot->qlen;
	sfq_drop(sch);
	/*                                                           
                   
  */
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/*                                                          */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
}
예제 #26
0
파일: sch_fq.c 프로젝트: OliverGesch/linux
static int fq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_FQ_MAX + 1];
	int err, drop_count = 0;
	u32 fq_log;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	fq_log = q->fq_trees_log;

	if (tb[TCA_FQ_BUCKETS_LOG]) {
		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);

		if (nval >= 1 && nval <= ilog2(256*1024))
			fq_log = nval;
		else
			err = -EINVAL;
	}
	if (tb[TCA_FQ_PLIMIT])
		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);

	if (tb[TCA_FQ_FLOW_PLIMIT])
		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);

	if (tb[TCA_FQ_QUANTUM])
		q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);

	if (tb[TCA_FQ_INITIAL_QUANTUM])
		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);

	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
		q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);

	if (tb[TCA_FQ_FLOW_MAX_RATE])
		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);

	if (tb[TCA_FQ_RATE_ENABLE]) {
		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);

		if (enable <= 1)
			q->rate_enable = enable;
		else
			err = -EINVAL;
	}

	if (!err)
		err = fq_resize(q, fq_log);

	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = fq_dequeue(sch);

		if (!skb)
			break;
		kfree_skb(skb);
		drop_count++;
	}
	qdisc_tree_decrease_qlen(sch, drop_count);

	sch_tree_unlock(sch);
	return err;
}
예제 #27
0
/*
 * When q->perturbation is changed, we rehash all queued skbs
 * to avoid OOO (Out Of Order) effects.
 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
 * counters.
 */
static void sfq_rehash(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
	int i;
	struct sfq_slot *slot;
	struct sk_buff_head list;
	int dropped = 0;

	__skb_queue_head_init(&list);

	for (i = 0; i < q->maxflows; i++) {
		slot = &q->slots[i];
		if (!slot->qlen)
			continue;
		while (slot->qlen) {
			skb = slot_dequeue_head(slot);
			sfq_dec(q, i);
			__skb_queue_tail(&list, skb);
		}
		slot->backlog = 0;
		red_set_vars(&slot->vars);
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
	}
	q->tail = NULL;

	while ((skb = __skb_dequeue(&list)) != NULL) {
		unsigned int hash = sfq_hash(q, skb);
		sfq_index x = q->ht[hash];

		slot = &q->slots[x];
		if (x == SFQ_EMPTY_SLOT) {
			x = q->dep[0].next; /* get a free slot */
			if (x >= SFQ_MAX_FLOWS) {
drop:				sch->qstats.backlog -= qdisc_pkt_len(skb);
				kfree_skb(skb);
				dropped++;
				continue;
			}
			q->ht[hash] = x;
			slot = &q->slots[x];
			slot->hash = hash;
		}
		if (slot->qlen >= q->maxdepth)
			goto drop;
		slot_queue_add(slot, skb);
		if (q->red_parms)
			slot->vars.qavg = red_calc_qavg(q->red_parms,
							&slot->vars,
							slot->backlog);
		slot->backlog += qdisc_pkt_len(skb);
		sfq_inc(q, x);
		if (slot->qlen == 1) {		/* The flow is new */
			if (q->tail == NULL) {	/* It is the first flow */
				slot->next = x;
			} else {
				slot->next = q->tail->next;
				q->tail->next = x;
			}
			q->tail = slot;
			slot->allot = q->scaled_quantum;
		}
	}
	sch->q.qlen -= dropped;
	qdisc_tree_decrease_qlen(sch, dropped);
}
예제 #28
0
static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
{
	int err = -EINVAL;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct rtattr *tb[TCA_TBF_PTAB];
	struct tc_tbf_qopt *qopt;
	struct qdisc_rate_table *rtab = NULL;
	struct qdisc_rate_table *ptab = NULL;
	struct Qdisc *child = NULL;
	int max_size,n;

	if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) ||
	    tb[TCA_TBF_PARMS-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
		goto done;

	qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
	if (rtab == NULL)
		goto done;

	if (qopt->peakrate.rate) {
		if (qopt->peakrate.rate > qopt->rate.rate)
			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
		if (ptab == NULL)
			goto done;
	}

	for (n = 0; n < 256; n++)
		if (rtab->data[n] > qopt->buffer) break;
	max_size = (n << qopt->rate.cell_log)-1;
	if (ptab) {
		int size;

		for (n = 0; n < 256; n++)
			if (ptab->data[n] > qopt->mtu) break;
		size = (n << qopt->peakrate.cell_log)-1;
		if (size < max_size) max_size = size;
	}
	if (max_size < 0)
		goto done;

	if (qopt->limit > 0) {
		if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
			goto done;
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(xchg(&q->qdisc, child));
	}
	q->limit = qopt->limit;
	q->mtu = qopt->mtu;
	q->max_size = max_size;
	q->buffer = qopt->buffer;
	q->tokens = q->buffer;
	q->ptokens = q->mtu;
	rtab = xchg(&q->R_tab, rtab);
	ptab = xchg(&q->P_tab, ptab);
	sch_tree_unlock(sch);
	err = 0;
done:
	if (rtab)
		qdisc_put_rtab(rtab);
	if (ptab)
		qdisc_put_rtab(ptab);
	return err;
}
예제 #29
0
static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
	int err;
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_TBF_PTAB + 1];
	struct tc_tbf_qopt *qopt;
	struct qdisc_rate_table *rtab = NULL;
	struct qdisc_rate_table *ptab = NULL;
	struct Qdisc *child = NULL;
	int max_size, n;

	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
	if (err < 0)
		return err;

	err = -EINVAL;
	if (tb[TCA_TBF_PARMS] == NULL)
		goto done;

	qopt = nla_data(tb[TCA_TBF_PARMS]);
	rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
	if (rtab == NULL)
		goto done;

	if (qopt->peakrate.rate) {
		if (qopt->peakrate.rate > qopt->rate.rate)
			ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
		if (ptab == NULL)
			goto done;
	}

	for (n = 0; n < 256; n++)
		if (rtab->data[n] > qopt->buffer)
			break;
	max_size = (n << qopt->rate.cell_log) - 1;
	if (ptab) {
		int size;

		for (n = 0; n < 256; n++)
			if (ptab->data[n] > qopt->mtu)
				break;
		size = (n << qopt->peakrate.cell_log) - 1;
		if (size < max_size)
			max_size = size;
	}
	if (max_size < 0)
		goto done;

	if (max_size < psched_mtu(qdisc_dev(sch)))
		pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
				    max_size, qdisc_dev(sch)->name,
				    psched_mtu(qdisc_dev(sch)));

	if (q->qdisc != &noop_qdisc) {
		err = fifo_set_limit(q->qdisc, qopt->limit);
		if (err)
			goto done;
	} else if (qopt->limit > 0) {
		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
		if (IS_ERR(child)) {
			err = PTR_ERR(child);
			goto done;
		}
	}

	sch_tree_lock(sch);
	if (child) {
		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
		qdisc_destroy(q->qdisc);
		q->qdisc = child;
	}
	q->limit = qopt->limit;
	q->mtu = PSCHED_TICKS2NS(qopt->mtu);
	q->max_size = max_size;
	q->buffer = PSCHED_TICKS2NS(qopt->buffer);
	q->tokens = q->buffer;
	q->ptokens = q->mtu;

	psched_ratecfg_precompute(&q->rate, &rtab->rate);
	if (ptab) {
		psched_ratecfg_precompute(&q->peak, &ptab->rate);
		q->peak_present = true;
	} else {
		q->peak_present = false;
	}

	sch_tree_unlock(sch);
	err = 0;
done:
	if (rtab)
		qdisc_put_rtab(rtab);
	if (ptab)
		qdisc_put_rtab(ptab);
	return err;
}
예제 #30
0
파일: sch_pie.c 프로젝트: giwa/fq-pie
static int pie_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct pie_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_PIE_MAX + 1];
	unsigned int qlen;
	int err;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	/* convert from microseconds to pschedtime */
	if (tb[TCA_PIE_TARGET]) {
		/* target is in us */
		u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);

		/* convert to pschedtime */
		q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
	}

	/* tupdate is in jiffies */
	if (tb[TCA_PIE_TUPDATE])
		q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));

	if (tb[TCA_PIE_LIMIT]) {
		u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);

		q->params.limit = limit;
		sch->limit = limit;
	}

	if (tb[TCA_PIE_ALPHA])
		q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);

	if (tb[TCA_PIE_BETA])
		q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);

	if (tb[TCA_PIE_ECN])
		q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);

	if (tb[TCA_PIE_BYTEMODE])
		q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);

	/* Drop excess packets if new limit is lower */
	qlen = sch->q.qlen;
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = __skb_dequeue(&sch->q);

		qdisc_qstats_backlog_dec(sch, skb);
		qdisc_drop(skb, sch);
	}
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	sch_tree_unlock(sch);
	return 0;
}