static int gred_vqs_validate(struct gred_sched *table, u32 cdp, struct nlattr *vqs, struct netlink_ext_ack *extack) { const struct nlattr *attr; int rem, err; err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, gred_vqe_policy, extack); if (err < 0) return err; nla_for_each_nested(attr, vqs, rem) { switch (nla_type(attr)) { case TCA_GRED_VQ_ENTRY: err = gred_vq_validate(table, cdp, attr, extack); if (err) return err; break; default: NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes"); return -EINVAL; } } if (rem > 0) { NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list"); return -EINVAL; } return 0; }
static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = cb_priv; struct bpf_prog *oldprog; struct nfp_bpf_vnic *bv; int err; if (type != TC_SETUP_CLSBPF) { NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, "only offload of BPF classifiers supported"); return -EOPNOTSUPP; } if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common)) return -EOPNOTSUPP; if (!nfp_net_ebpf_capable(nn)) { NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, "NFP firmware does not support eBPF offload"); return -EOPNOTSUPP; } if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, "only ETH_P_ALL supported as filter protocol"); return -EOPNOTSUPP; } /* Only support TC direct action */ if (!cls_bpf->exts_integrated || tcf_exts_has_actions(cls_bpf->exts)) { NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, "only direct action with no legacy actions supported"); return -EOPNOTSUPP; } if (cls_bpf->command != TC_CLSBPF_OFFLOAD) return -EOPNOTSUPP; bv = nn->app_priv; oldprog = cls_bpf->oldprog; /* Don't remove if oldprog doesn't match driver's state */ if (bv->tc_prog != oldprog) { oldprog = NULL; if (!cls_bpf->prog) return 0; } err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog, cls_bpf->common.extack); if (err) return err; bv->tc_prog = cls_bpf->prog; nn->port->tc_offload_cnt = !!bv->tc_prog; return 0; }
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f, u8 ip_proto) { struct flow_dissector_key_tcp *key, *mask; if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) return 0; if (ip_proto != IPPROTO_TCP) { NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); return -EINVAL; } key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_TCP, f->key); mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_TCP, f->mask); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, ntohs(key->flags), ntohs(mask->flags)); return 0; }
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f, u8 ip_proto) { struct flow_dissector_key_ports *key, *mask; if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) return 0; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); return -EINVAL; } key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_PORTS, f->key); mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_PORTS, f->mask); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, ntohs(key->dst), ntohs(mask->dst)); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, ntohs(key->src), ntohs(mask->src)); return 0; }
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f, u16 n_proto) { struct flow_dissector_key_ip *key, *mask; if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) return 0; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); return -EINVAL; } key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_IP, f->key); mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_IP, f->mask); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, key->ttl, mask->ttl); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, key->tos & 0x3, mask->tos & 0x3); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, key->tos >> 6, mask->tos >> 6); return 0; }
static int gred_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_GRED_MAX + 1]; int err; if (!opt) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, extack); if (err < 0) return err; if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) { NL_SET_ERR_MSG_MOD(extack, "virtual queue configuration can't be specified at initialization time"); return -EINVAL; } if (tb[TCA_GRED_LIMIT]) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); else sch->limit = qdisc_dev(sch)->tx_queue_len * psched_mtu(qdisc_dev(sch)); return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); }
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow) { struct netlink_ext_ack *extack = flow->common.extack; struct nfp_flower_priv *fl_priv = app->priv; if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); return -EOPNOTSUPP; } switch (flow->command) { case TC_CLSMATCHALL_REPLACE: return nfp_flower_install_rate_limiter(app, netdev, flow, extack); case TC_CLSMATCHALL_DESTROY: return nfp_flower_remove_rate_limiter(app, netdev, flow, extack); case TC_CLSMATCHALL_STATS: return nfp_flower_stats_rate_limiter(app, netdev, flow, extack); default: return -EOPNOTSUPP; } }
static int nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); netdev_port_id = nfp_repr_get_port_id(netdev); repr_priv = repr->app_priv; if (!repr_priv->qos_table.netdev_port_id) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist"); return -EOPNOTSUPP; } skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); if (!skb) return -ENOMEM; /* Clear all qos associate data for this interface */ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); fl_priv->qos_rate_limiters--; if (!fl_priv->qos_rate_limiters) cancel_delayed_work_sync(&fl_priv->qos_stats_work); config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
static int gred_vq_validate(struct gred_sched *table, u32 cdp, const struct nlattr *entry, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; int err; u32 dp; err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, extack); if (err < 0) return err; if (!tb[TCA_GRED_VQ_DP]) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified"); return -EINVAL; } dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); if (dp >= table->DPs) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds"); return -EINVAL; } if (dp != cdp && !table->tab[dp]) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated"); return -EINVAL; } if (tb[TCA_GRED_VQ_FLAGS]) { u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); if (table->red_flags && table->red_flags != red_flags) { NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used"); return -EINVAL; } if (red_flags & ~GRED_VQ_RED_FLAGS) { NL_SET_ERR_MSG_MOD(extack, "invalid RED flags specified"); return -EINVAL; } } return 0; }
static int nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_stat_pair *curr_stats; struct nfp_stat_pair *prev_stats; u64 diff_bytes, diff_pkts; struct nfp_repr *repr; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); repr_priv = repr->app_priv; if (!repr_priv->qos_table.netdev_port_id) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update"); return -EOPNOTSUPP; } spin_lock_bh(&fl_priv->qos_stats_lock); curr_stats = &repr_priv->qos_table.curr_stats; prev_stats = &repr_priv->qos_table.prev_stats; diff_pkts = curr_stats->pkts - prev_stats->pkts; diff_bytes = curr_stats->bytes - prev_stats->bytes; prev_stats->pkts = curr_stats->pkts; prev_stats->bytes = curr_stats->bytes; spin_unlock_bh(&fl_priv->qos_stats_lock); flow_stats_update(&flow->stats, diff_bytes, diff_pkts, repr_priv->qos_table.last_update); return 0; }
static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab, u32 max_P, struct gred_sched_data **prealloc, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) { NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); return -EINVAL; } if (!q) { table->tab[dp] = q = *prealloc; *prealloc = NULL; if (!q) return -ENOMEM; q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; } q->DP = dp; q->prio = prio; if (ctl->limit > sch->limit) q->limit = sch->limit; else q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->vars); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); return 0; }
static int gred_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt *ctl; struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; u32 max_P; struct gred_sched_data *prealloc; if (opt == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, extack); if (err < 0) return err; if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { if (tb[TCA_GRED_LIMIT] != NULL) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); } if (tb[TCA_GRED_PARMS] == NULL || tb[TCA_GRED_STAB] == NULL || tb[TCA_GRED_LIMIT] != NULL) { NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time"); return -EINVAL; } max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); if (ctl->DP >= table->DPs) { NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count"); return -EINVAL; } if (tb[TCA_GRED_VQ_LIST]) { err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], extack); if (err) return err; } if (gred_rio_mode(table)) { if (ctl->prio == 0) { int def_prio = GRED_DEF_PRIO; if (table->tab[table->def]) def_prio = table->tab[table->def]->prio; printk(KERN_DEBUG "GRED: DP %u does not have a prio " "setting default to %d\n", ctl->DP, def_prio); prio = def_prio; } else prio = ctl->prio; } prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); sch_tree_lock(sch); err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, extack); if (err < 0) goto err_unlock_free; if (tb[TCA_GRED_VQ_LIST]) gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); if (gred_rio_mode(table)) { gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } sch_tree_unlock(sch); kfree(prealloc); gred_offload(sch, TC_GRED_REPLACE); return 0; err_unlock_free: sch_tree_unlock(sch); kfree(prealloc); return err; }
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, struct tcf_exts *exts, struct netlink_ext_ack *extack) { const struct tc_action *a; int err, i; if (!tcf_exts_has_actions(exts)) return 0; /* Count action is inserted first */ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); if (err) return err; tcf_exts_for_each_action(i, a, exts) { if (is_tcf_gact_ok(a)) { err = mlxsw_sp_acl_rulei_act_terminate(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); return err; } } else if (is_tcf_gact_shot(a)) { err = mlxsw_sp_acl_rulei_act_drop(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); return err; } } else if (is_tcf_gact_trap(a)) { err = mlxsw_sp_acl_rulei_act_trap(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); return err; } } else if (is_tcf_gact_goto_chain(a)) { u32 chain_index = tcf_gact_goto_chain_index(a); struct mlxsw_sp_acl_ruleset *ruleset; u16 group_id; ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); return err; } } else if (is_tcf_mirred_egress_redirect(a)) { struct net_device *out_dev; struct mlxsw_sp_fid *fid; u16 fid_index; fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, fid_index, extack); if (err) return err; out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, out_dev, extack); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { struct net_device *out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, block, out_dev, extack); if (err) return err; } else if (is_tcf_vlan(a)) { u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); u32 action = tcf_vlan_action(a); u8 prio = tcf_vlan_push_prio(a); u16 vid = tcf_vlan_push_vid(a); return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, action, vid, proto, prio, extack); } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; } } return 0; }
static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_sopt *sopt; bool red_flags_changed; int i; if (!dps) return -EINVAL; sopt = nla_data(dps); if (sopt->DPs > MAX_DPs) { NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high"); return -EINVAL; } if (sopt->DPs == 0) { NL_SET_ERR_MSG_MOD(extack, "number of virtual queues can't be 0"); return -EINVAL; } if (sopt->def_DP >= sopt->DPs) { NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count"); return -EINVAL; } if (sopt->flags && gred_per_vq_red_flags_used(table)) { NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used"); return -EINVAL; } sch_tree_lock(sch); table->DPs = sopt->DPs; table->def = sopt->def_DP; red_flags_changed = table->red_flags != sopt->flags; table->red_flags = sopt->flags; /* * Every entry point to GRED is synchronized with the above code * and the DP is checked against DPs, i.e. shadowed VQs can no * longer be found so we can unlock right here. */ sch_tree_unlock(sch); if (sopt->grio) { gred_enable_rio_mode(table); gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } else { gred_disable_rio_mode(table); gred_disable_wred_mode(table); } if (red_flags_changed) for (i = 0; i < table->DPs; i++) if (table->tab[i]) table->tab[i]->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; for (i = table->DPs; i < MAX_DPs; i++) { if (table->tab[i]) { pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n", i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; } } gred_offload(sch, TC_GRED_REPLACE); return 0; }
static int nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct flow_action_entry *action = &flow->rule->action.entries[0]; struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; u64 burst, rate; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); repr_priv = repr->app_priv; if (repr_priv->block_shared) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); return -EOPNOTSUPP; } if (repr->port->type != NFP_PORT_VF_PORT) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports"); return -EOPNOTSUPP; } if (!flow_offload_has_one_action(&flow->rule->action)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action"); return -EOPNOTSUPP; } if (flow->common.prio != (1 << 16)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); return -EOPNOTSUPP; } if (action->id != FLOW_ACTION_POLICE) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action"); return -EOPNOTSUPP; } rate = action->police.rate_bytes_ps; burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst), PSCHED_TICKS_PER_SEC); netdev_port_id = nfp_repr_get_port_id(netdev); skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); config->bkt_tkn_p = cpu_to_be32(burst); config->bkt_tkn_c = cpu_to_be32(burst); config->pbs = cpu_to_be32(burst); config->cbs = cpu_to_be32(burst); config->pir = cpu_to_be32(rate); config->cir = cpu_to_be32(rate); nfp_ctrl_tx(repr->app->ctrl, skb); repr_priv->qos_table.netdev_port_id = netdev_port_id; fl_priv->qos_rate_limiters++; if (fl_priv->qos_rate_limiters == 1) schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); return 0; }
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device"); return -EOPNOTSUPP; }
static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct tc_pedit_key *keys = NULL; struct tcf_pedit_key_ex *keys_ex; struct tc_pedit *parm; struct nlattr *pattr; struct tcf_pedit *p; int ret = 0, err; int ksize; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed"); return -EINVAL; } err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL); if (err < 0) return err; pattr = tb[TCA_PEDIT_PARMS]; if (!pattr) pattr = tb[TCA_PEDIT_PARMS_EX]; if (!pattr) { NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute"); return -EINVAL; } parm = nla_data(pattr); ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (nla_len(pattr) < sizeof(*parm) + ksize) { NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); return -EINVAL; } keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); if (IS_ERR(keys_ex)) return PTR_ERR(keys_ex); err = tcf_idr_check_alloc(tn, &parm->index, a, bind); if (!err) { if (!parm->nkeys) { tcf_idr_cleanup(tn, parm->index); NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); ret = -EINVAL; goto out_free; } ret = tcf_idr_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); if (ret) { tcf_idr_cleanup(tn, parm->index); goto out_free; } ret = ACT_P_CREATED; } else if (err > 0) { if (bind) goto out_free; if (!ovr) { ret = -EEXIST; goto out_release; } } else { return err; } p = to_pedit(*a); spin_lock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED || (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) { keys = kmalloc(ksize, GFP_ATOMIC); if (!keys) { spin_unlock_bh(&p->tcf_lock); ret = -ENOMEM; goto out_release; } kfree(p->tcfp_keys); p->tcfp_keys = keys; p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); p->tcfp_flags = parm->flags; p->tcf_action = parm->action; kfree(p->tcfp_keys_ex); p->tcfp_keys_ex = keys_ex; spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; out_release: tcf_idr_release(*a, bind); out_free: kfree(keys_ex); return ret; }
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { u16 n_proto_mask = 0; u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_TCP) | BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; } mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_CONTROL, f->key); addr_type = key->addr_type; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->key); struct flow_dissector_key_basic *mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->mask); n_proto_key = ntohs(key->n_proto); n_proto_mask = ntohs(mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_ETHERTYPE, n_proto_key, n_proto_mask); ip_proto = key->ip_proto; mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_PROTO, key->ip_proto, mask->ip_proto); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS, f->key); struct flow_dissector_key_eth_addrs *mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS, f->mask); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_32_47, key->dst, mask->dst, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_0_31, key->dst + 2, mask->dst + 2, 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_32_47, key->src, mask->src, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_0_31, key->src + 2, mask->src + 2, 4); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->key); struct flow_dissector_key_vlan *mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); if (mlxsw_sp_acl_block_is_egress_bound(block)) { NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } if (mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, key->vlan_id, mask->vlan_id); if (mask->vlan_priority != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_PCP, key->vlan_priority, mask->vlan_priority); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) mlxsw_sp_flower_parse_ipv4(rulei, f); if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) mlxsw_sp_flower_parse_ipv6(rulei, f); err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); if (err) return err; err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); if (err) return err; err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); if (err) return err; return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, f->common.extack); }