Пример #1
0
static int gred_change(struct Qdisc *sch, struct rtattr *opt)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct tc_gred_qopt *ctl;
	struct rtattr *tb[TCA_GRED_MAX];
	int err = -EINVAL, prio = GRED_DEF_PRIO;
	u8 *stab;

	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
		return -EINVAL;

	if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
		return gred_change_table_def(sch, opt);

	if (tb[TCA_GRED_PARMS-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
	    tb[TCA_GRED_STAB-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
		return -EINVAL;

	ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
	stab = RTA_DATA(tb[TCA_GRED_STAB-1]);

	if (ctl->DP >= table->DPs)
		goto errout;

	if (gred_rio_mode(table)) {
		if (ctl->prio == 0) {
			int def_prio = GRED_DEF_PRIO;

			if (table->tab[table->def])
				def_prio = table->tab[table->def]->prio;

			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
			       "setting default to %d\n", ctl->DP, def_prio);

			prio = def_prio;
		} else
			prio = ctl->prio;
	}

	sch_tree_lock(sch);

	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
	if (err < 0)
		goto errout_locked;

	if (gred_rio_mode(table)) {
		gred_disable_wred_mode(table);
		if (gred_wred_mode_check(sch))
			gred_enable_wred_mode(table);
	}

	err = 0;

errout_locked:
	sch_tree_unlock(sch);
errout:
	return err;
}
Пример #2
0
static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct tc_gred_sopt *sopt;
	int i;

	if (dps == NULL)
		return -EINVAL;

	sopt = nla_data(dps);

	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
		return -EINVAL;

	sch_tree_lock(sch);
	table->DPs = sopt->DPs;
	table->def = sopt->def_DP;
	table->red_flags = sopt->flags;

	/*
	 * Every entry point to GRED is synchronized with the above code
	 * and the DP is checked against DPs, i.e. shadowed VQs can no
	 * longer be found so we can unlock right here.
	 */
	sch_tree_unlock(sch);

	if (sopt->grio) {
		gred_enable_rio_mode(table);
		gred_disable_wred_mode(table);
		if (gred_wred_mode_check(sch))
			gred_enable_wred_mode(table);
	} else {
		gred_disable_rio_mode(table);
		gred_disable_wred_mode(table);
	}

	for (i = table->DPs; i < MAX_DPs; i++) {
		if (table->tab[i]) {
			pr_warning("GRED: Warning: Destroying "
				   "shadowed VQ 0x%x\n", i);
			gred_destroy_vq(table->tab[i]);
			table->tab[i] = NULL;
		}
	}

	return 0;
}
Пример #3
0
static int gred_change(struct Qdisc *sch, struct nlattr *opt,
		       struct netlink_ext_ack *extack)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct tc_gred_qopt *ctl;
	struct nlattr *tb[TCA_GRED_MAX + 1];
	int err, prio = GRED_DEF_PRIO;
	u8 *stab;
	u32 max_P;
	struct gred_sched_data *prealloc;

	if (opt == NULL)
		return -EINVAL;

	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
					  extack);
	if (err < 0)
		return err;

	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
		if (tb[TCA_GRED_LIMIT] != NULL)
			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
	}

	if (tb[TCA_GRED_PARMS] == NULL ||
	    tb[TCA_GRED_STAB] == NULL ||
	    tb[TCA_GRED_LIMIT] != NULL) {
		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
		return -EINVAL;
	}

	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;

	ctl = nla_data(tb[TCA_GRED_PARMS]);
	stab = nla_data(tb[TCA_GRED_STAB]);

	if (ctl->DP >= table->DPs) {
		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
		return -EINVAL;
	}

	if (tb[TCA_GRED_VQ_LIST]) {
		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
					extack);
		if (err)
			return err;
	}

	if (gred_rio_mode(table)) {
		if (ctl->prio == 0) {
			int def_prio = GRED_DEF_PRIO;

			if (table->tab[table->def])
				def_prio = table->tab[table->def]->prio;

			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
			       "setting default to %d\n", ctl->DP, def_prio);

			prio = def_prio;
		} else
			prio = ctl->prio;
	}

	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
	sch_tree_lock(sch);

	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
			     extack);
	if (err < 0)
		goto err_unlock_free;

	if (tb[TCA_GRED_VQ_LIST])
		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);

	if (gred_rio_mode(table)) {
		gred_disable_wred_mode(table);
		if (gred_wred_mode_check(sch))
			gred_enable_wred_mode(table);
	}

	sch_tree_unlock(sch);
	kfree(prealloc);

	gred_offload(sch, TC_GRED_REPLACE);
	return 0;

err_unlock_free:
	sch_tree_unlock(sch);
	kfree(prealloc);
	return err;
}
Пример #4
0
static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
				 struct netlink_ext_ack *extack)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct tc_gred_sopt *sopt;
	bool red_flags_changed;
	int i;

	if (!dps)
		return -EINVAL;

	sopt = nla_data(dps);

	if (sopt->DPs > MAX_DPs) {
		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
		return -EINVAL;
	}
	if (sopt->DPs == 0) {
		NL_SET_ERR_MSG_MOD(extack,
				   "number of virtual queues can't be 0");
		return -EINVAL;
	}
	if (sopt->def_DP >= sopt->DPs) {
		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
		return -EINVAL;
	}
	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
		return -EINVAL;
	}

	sch_tree_lock(sch);
	table->DPs = sopt->DPs;
	table->def = sopt->def_DP;
	red_flags_changed = table->red_flags != sopt->flags;
	table->red_flags = sopt->flags;

	/*
	 * Every entry point to GRED is synchronized with the above code
	 * and the DP is checked against DPs, i.e. shadowed VQs can no
	 * longer be found so we can unlock right here.
	 */
	sch_tree_unlock(sch);

	if (sopt->grio) {
		gred_enable_rio_mode(table);
		gred_disable_wred_mode(table);
		if (gred_wred_mode_check(sch))
			gred_enable_wred_mode(table);
	} else {
		gred_disable_rio_mode(table);
		gred_disable_wred_mode(table);
	}

	if (red_flags_changed)
		for (i = 0; i < table->DPs; i++)
			if (table->tab[i])
				table->tab[i]->red_flags =
					table->red_flags & GRED_VQ_RED_FLAGS;

	for (i = table->DPs; i < MAX_DPs; i++) {
		if (table->tab[i]) {
			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
				i);
			gred_destroy_vq(table->tab[i]);
			table->tab[i] = NULL;
		}
	}

	gred_offload(sch, TC_GRED_REPLACE);
	return 0;
}
Пример #5
0
static int gred_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct gred_sched *table = qdisc_priv(sch);
	struct tc_gred_qopt *ctl;
	struct nlattr *tb[TCA_GRED_MAX + 1];
	int err, prio = GRED_DEF_PRIO;
	u8 *stab;
	u32 max_P;
	struct gred_sched_data *prealloc;

	if (opt == NULL)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
	if (err < 0)
		return err;

	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
		return gred_change_table_def(sch, opt);

	if (tb[TCA_GRED_PARMS] == NULL ||
	    tb[TCA_GRED_STAB] == NULL)
		return -EINVAL;

	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;

	err = -EINVAL;
	ctl = nla_data(tb[TCA_GRED_PARMS]);
	stab = nla_data(tb[TCA_GRED_STAB]);

	if (ctl->DP >= table->DPs)
		goto errout;

	if (gred_rio_mode(table)) {
		if (ctl->prio == 0) {
			int def_prio = GRED_DEF_PRIO;

			if (table->tab[table->def])
				def_prio = table->tab[table->def]->prio;

			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
			       "setting default to %d\n", ctl->DP, def_prio);

			prio = def_prio;
		} else
			prio = ctl->prio;
	}

	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
	sch_tree_lock(sch);

	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
	if (err < 0)
		goto errout_locked;

	if (gred_rio_mode(table)) {
		gred_disable_wred_mode(table);
		if (gred_wred_mode_check(sch))
			gred_enable_wred_mode(table);
	}

	err = 0;

errout_locked:
	sch_tree_unlock(sch);
	kfree(prealloc);
errout:
	return err;
}