static int nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); netdev_port_id = nfp_repr_get_port_id(netdev); repr_priv = repr->app_priv; if (!repr_priv->qos_table.netdev_port_id) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist"); return -EOPNOTSUPP; } skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); if (!skb) return -ENOMEM; /* Clear all qos associate data for this interface */ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); fl_priv->qos_rate_limiters--; if (!fl_priv->qos_rate_limiters) cancel_delayed_work_sync(&fl_priv->qos_stats_work); config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
static void nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv, u32 netdev_port_id) { struct nfp_police_cfg_head *head; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(fl_priv->app, sizeof(struct nfp_police_cfg_head), NFP_FLOWER_CMSG_TYPE_QOS_STATS, GFP_ATOMIC); if (!skb) return; head = nfp_flower_cmsg_get_data(skb); memset(head, 0, sizeof(struct nfp_police_cfg_head)); head->port = cpu_to_be32(netdev_port_id); nfp_ctrl_tx(fl_priv->app->ctrl, skb); }
int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) { struct nfp_flower_cmsg_portreify *msg; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), NFP_FLOWER_CMSG_TYPE_PORT_REIFY, GFP_KERNEL); if (!skb) return -ENOMEM; msg = nfp_flower_cmsg_get_data(skb); msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); msg->reserved = 0; msg->info = cpu_to_be16(exists); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) { struct nfp_flower_cmsg_portmod *msg; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; msg = nfp_flower_cmsg_get_data(skb); msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); msg->reserved = 0; msg->info = carrier_ok; msg->mtu = cpu_to_be16(repr->netdev->mtu); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
static int nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct flow_action_entry *action = &flow->rule->action.entries[0]; struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; u64 burst, rate; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); repr_priv = repr->app_priv; if (repr_priv->block_shared) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); return -EOPNOTSUPP; } if (repr->port->type != NFP_PORT_VF_PORT) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports"); return -EOPNOTSUPP; } if (!flow_offload_has_one_action(&flow->rule->action)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action"); return -EOPNOTSUPP; } if (flow->common.prio != (1 << 16)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); return -EOPNOTSUPP; } if (action->id != FLOW_ACTION_POLICE) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action"); return -EOPNOTSUPP; } rate = action->police.rate_bytes_ps; burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst), PSCHED_TICKS_PER_SEC); netdev_port_id = nfp_repr_get_port_id(netdev); skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); config->bkt_tkn_p = cpu_to_be32(burst); config->bkt_tkn_c = cpu_to_be32(burst); config->pbs = cpu_to_be32(burst); config->cbs = cpu_to_be32(burst); config->pir = cpu_to_be32(rate); config->cir = cpu_to_be32(rate); nfp_ctrl_tx(repr->app->ctrl, skb); repr_priv->qos_table.netdev_port_id = netdev_port_id; fl_priv->qos_rate_limiters++; if (fl_priv->qos_rate_limiters == 1) schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); return 0; }
static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } for (i = 0; i < eth_tbl->count; i++) { unsigned int phys_port = eth_tbl->ports[i].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, eth_tbl->ports[i].nbi, eth_tbl->ports[i].base, phys_port); RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); err_free_ctrl_skb: kfree_skb(ctrl_skb); return err; }