static int nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) { bool existing; int i, err; i = nfp_abm_red_find(alink, opt); existing = i >= 0; if (opt->set.min != opt->set.max || !opt->set.is_ecn) { nfp_warn(alink->abm->app->cpp, "RED offload failed - unsupported parameters\n"); err = -EINVAL; goto err_destroy; } if (existing) { if (alink->parent == TC_H_ROOT) err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); else err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); if (err) goto err_destroy; return 0; } if (opt->parent == TC_H_ROOT) { i = 0; err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, opt->set.min); } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { i = TC_H_MIN(opt->parent) - 1; err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); } else { return -EINVAL; } /* Set the handle to try full clean up, in case IO failed */ alink->qdiscs[i].handle = opt->handle; if (err) goto err_destroy; if (opt->parent == TC_H_ROOT) err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); else err = nfp_abm_ctrl_read_q_stats(alink, i, &alink->qdiscs[i].stats); if (err) goto err_destroy; if (opt->parent == TC_H_ROOT) err = nfp_abm_ctrl_read_xstats(alink, &alink->qdiscs[i].xstats); else err = nfp_abm_ctrl_read_q_xstats(alink, i, &alink->qdiscs[i].xstats); if (err) goto err_destroy; alink->qdiscs[i].stats.backlog_pkts = 0; alink->qdiscs[i].stats.backlog_bytes = 0; return 0; err_destroy: /* If the qdisc keeps on living, but we can't offload undo changes */ if (existing) { opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; opt->set.qstats->backlog -= alink->qdiscs[i].stats.backlog_bytes; } nfp_abm_red_destroy(netdev, alink, opt->handle); return err; }
static unsigned long ingress_get(struct Qdisc *sch, u32 classid) { return TC_H_MIN(classid) + 1; }
static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; spinlock_t *root_lock; struct tcmsg *t; u32 protocol; u32 prio; u32 nprio; u32 parent; struct net_device *dev; struct Qdisc *q; struct tcf_proto **back, **chain; struct tcf_proto *tp; struct tcf_proto_ops *tp_ops; const struct Qdisc_class_ops *cops; unsigned long cl; unsigned long fh; int err; int tp_created = 0; if (net != &init_net) return -EINVAL; replay: t = NLMSG_DATA(n); protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); nprio = prio; parent = t->tcm_parent; cl = 0; if (prio == 0) { /* If no priority is given, user wants we allocated it. */ if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) return -ENOENT; prio = TC_H_MAKE(0x80000000U, 0U); } /* Find head of filter chain. */ /* Find link */ dev = __dev_get_by_index(&init_net, t->tcm_ifindex); if (dev == NULL) return -ENODEV; err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); if (err < 0) return err; /* Find qdisc */ if (!parent) { q = dev->qdisc; parent = q->handle; } else { q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); if (q == NULL) return -EINVAL; } /* Is it classful? */ if ((cops = q->ops->cl_ops) == NULL) return -EINVAL; if (cops->tcf_chain == NULL) return -EOPNOTSUPP; /* Do we search for filter, attached to class? */ if (TC_H_MIN(parent)) { cl = cops->get(q, parent); if (cl == 0) return -ENOENT; } /* And the last stroke */ chain = cops->tcf_chain(q, cl); err = -EINVAL; if (chain == NULL) goto errout; /* Check the chain for existence of proto-tcf with this priority */ for (back = chain; (tp=*back) != NULL; back = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { if (!nprio || (tp->protocol != protocol && protocol)) goto errout; } else tp = NULL; break; } } root_lock = qdisc_root_sleeping_lock(q); if (tp == NULL) { /* Proto-tcf does not exist, create new one */ if (tca[TCA_KIND] == NULL || !protocol) goto errout; err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) goto errout; /* Create new proto tcf */ err = -ENOBUFS; tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (tp == NULL) goto errout; err = -ENOENT; tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]); if (tp_ops == NULL) { #ifdef CONFIG_MODULES struct nlattr *kind = tca[TCA_KIND]; char name[IFNAMSIZ]; if (kind != NULL && nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { rtnl_unlock(); request_module("cls_%s", name); rtnl_lock(); tp_ops = tcf_proto_lookup_ops(kind); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * replay the request. We indicate this using * -EAGAIN. */ if (tp_ops != NULL) { module_put(tp_ops->owner); err = -EAGAIN; } } #endif kfree(tp); goto errout; } tp->ops = tp_ops; tp->protocol = protocol; tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); tp->q = q; tp->classify = tp_ops->classify; tp->classid = parent; err = tp_ops->init(tp); if (err != 0) { module_put(tp_ops->owner); kfree(tp); goto errout; } tp_created = 1; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long choke_get(struct Qdisc *sch, u32 classid) { return 0; } static void choke_put(struct Qdisc *q, unsigned long cl) { } static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch, unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static int choke_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg) { if (!arg->stop) { if (arg->fn(sch, 1, arg) < 0) { arg->stop = 1; return; } arg->count++; } } static const struct Qdisc_class_ops choke_class_ops = { .leaf = choke_leaf, .get = choke_get, .put = choke_put, .tcf_chain = choke_find_tcf, .bind_tcf = choke_bind, .unbind_tcf = choke_put, .dump = choke_dump_class, .walk = choke_walk, }; static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .drop = choke_drop, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
static int flow_parse_opt(struct filter_util *fu, char *handle, int argc, char **argv, struct nlmsghdr *n) { struct tc_police tp; struct tcmsg *t = NLMSG_DATA(n); struct rtattr *tail; __u32 mask = ~0U, xor = 0; __u32 keys = 0, nkeys = 0; __u32 mode = FLOW_MODE_MAP; __u32 tmp; memset(&tp, 0, sizeof(tp)); if (handle) { if (get_u32(&t->tcm_handle, handle, 0)) { fprintf(stderr, "Illegal \"handle\"\n"); return -1; } } tail = NLMSG_TAIL(n); addattr_l(n, 4096, TCA_OPTIONS, NULL, 0); while (argc > 0) { if (matches(*argv, "map") == 0) { mode = FLOW_MODE_MAP; } else if (matches(*argv, "hash") == 0) { mode = FLOW_MODE_HASH; } else if (matches(*argv, "keys") == 0) { NEXT_ARG(); if (flow_parse_keys(&keys, &nkeys, *argv)) return -1; addattr32(n, 4096, TCA_FLOW_KEYS, keys); } else if (matches(*argv, "and") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"mask\"\n"); return -1; } transfer_bitop(&mask, &xor, tmp, 0); } else if (matches(*argv, "or") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"or\"\n"); return -1; } transfer_bitop(&mask, &xor, ~tmp, tmp); } else if (matches(*argv, "xor") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"xor\"\n"); return -1; } transfer_bitop(&mask, &xor, ~0, tmp); } else if (matches(*argv, "rshift") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"rshift\"\n"); return -1; } addattr32(n, 4096, TCA_FLOW_RSHIFT, tmp); } else if (matches(*argv, "addend") == 0) { NEXT_ARG(); if (get_addend(&tmp, *argv, keys)) { fprintf(stderr, "Illegal \"addend\"\n"); return -1; } addattr32(n, 4096, TCA_FLOW_ADDEND, tmp); } else if (matches(*argv, "divisor") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"divisor\"\n"); return -1; } addattr32(n, 4096, TCA_FLOW_DIVISOR, tmp); } else if (matches(*argv, "baseclass") == 0) { NEXT_ARG(); if (get_tc_classid(&tmp, *argv) || TC_H_MIN(tmp) == 0) { fprintf(stderr, "Illegal \"baseclass\"\n"); return -1; } addattr32(n, 4096, TCA_FLOW_BASECLASS, tmp); } else if (matches(*argv, "perturb") == 0) { NEXT_ARG(); if (get_u32(&tmp, *argv, 0)) { fprintf(stderr, "Illegal \"perturb\"\n"); return -1; } addattr32(n, 4096, TCA_FLOW_PERTURB, tmp); } else if (matches(*argv, "police") == 0) { NEXT_ARG(); if (parse_police(&argc, &argv, TCA_FLOW_POLICE, n)) { fprintf(stderr, "Illegal \"police\"\n"); return -1; } continue; } else if (matches(*argv, "action") == 0) { NEXT_ARG(); if (parse_action(&argc, &argv, TCA_FLOW_ACT, n)) { fprintf(stderr, "Illegal \"action\"\n"); return -1; } continue; } else if (matches(*argv, "match") == 0) { NEXT_ARG(); if (parse_ematch(&argc, &argv, TCA_FLOW_EMATCHES, n)) { fprintf(stderr, "Illegal \"ematch\"\n"); return -1; } continue; } else if (matches(*argv, "help") == 0) { explain(); return -1; } else { fprintf(stderr, "What is \"%s\"?\n", *argv); explain(); return -1; } argv++, argc--; } if (nkeys > 1 && mode != FLOW_MODE_HASH) { fprintf(stderr, "Invalid mode \"map\" for multiple keys\n"); return -1; } addattr32(n, 4096, TCA_FLOW_MODE, mode); if (mask != ~0 || xor != 0) { addattr32(n, 4096, TCA_FLOW_MASK, mask); addattr32(n, 4096, TCA_FLOW_XOR, xor); } tail->rta_len = (void *)NLMSG_TAIL(n) - (void *)tail; return 0; }
static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; }
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); struct netdev_queue *dev_queue; struct Qdisc *qdisc; int i, err = -EOPNOTSUPP; struct tc_mqprio_qopt *qopt = NULL; BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (mqprio_parse_opt(dev, qopt)) return -EINVAL; /* pre-allocate qdisc, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); if (priv->qdiscs == NULL) { err = -ENOMEM; goto err; } for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); if (qdisc == NULL) { err = -ENOMEM; goto err; } priv->qdiscs[i] = qdisc; } /* If the mqprio options indicate that hardware should own * the queue mapping then run ndo_setup_tc otherwise use the * supplied and verified mapping */ if (qopt->hw) { priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); if (err) goto err; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); } /* Always use supplied priority mappings */ for (i = 0; i < TC_BITMASK + 1; i++) netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); sch->flags |= TCQ_F_MQROOT; return 0; err: mqprio_destroy(sch); return err; }