static void nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_portmod *msg; struct net_device *netdev; bool link; msg = nfp_flower_cmsg_get_data(skb); link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK; rtnl_lock(); rcu_read_lock(); netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); rcu_read_unlock(); if (!netdev) { nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", be32_to_cpu(msg->portnum)); rtnl_unlock(); return; } if (link) { u16 mtu = be16_to_cpu(msg->mtu); netif_carrier_on(netdev); /* An MTU of 0 from the firmware should be ignored */ if (mtu) dev_set_mtu(netdev, mtu); } else { netif_carrier_off(netdev); } rtnl_unlock(); }
void nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, unsigned int nbi, unsigned int nbi_port, unsigned int phys_port) { struct nfp_flower_cmsg_mac_repr *msg; msg = nfp_flower_cmsg_get_data(skb); msg->ports[idx].idx = idx; msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI; msg->ports[idx].nbi_port = nbi_port; msg->ports[idx].phys_port = phys_port; }
static int nfp_flower_xmit_flow(struct net_device *netdev, struct nfp_fl_payload *nfp_flow, u8 mtype) { u32 meta_len, key_len, mask_len, act_len, tot_len; struct nfp_repr *priv = netdev_priv(netdev); struct sk_buff *skb; unsigned char *msg; meta_len = sizeof(struct nfp_fl_rule_metadata); key_len = nfp_flow->meta.key_len; mask_len = nfp_flow->meta.mask_len; act_len = nfp_flow->meta.act_len; tot_len = meta_len + key_len + mask_len + act_len; /* Convert to long words as firmware expects * lengths in units of NFP_FL_LW_SIZ. */ nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM; msg = nfp_flower_cmsg_get_data(skb); memcpy(msg, &nfp_flow->meta, meta_len); memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); memcpy(&msg[meta_len + key_len + mask_len], nfp_flow->action_data, act_len); /* Convert back to bytes as software expects * lengths in units of bytes. */ nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; nfp_ctrl_tx(priv->app->ctrl, skb); return 0; }
static int nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); netdev_port_id = nfp_repr_get_port_id(netdev); repr_priv = repr->app_priv; if (!repr_priv->qos_table.netdev_port_id) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist"); return -EOPNOTSUPP; } skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); if (!skb) return -ENOMEM; /* Clear all qos associate data for this interface */ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); fl_priv->qos_rate_limiters--; if (!fl_priv->qos_rate_limiters) cancel_delayed_work_sync(&fl_priv->qos_stats_work); config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
struct sk_buff * nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) { struct nfp_flower_cmsg_mac_repr *msg; struct sk_buff *skb; unsigned int size; size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL); if (!skb) return NULL; msg = nfp_flower_cmsg_get_data(skb); memset(msg->reserved, 0, sizeof(msg->reserved)); msg->num_ports = num_ports; return skb; }
static void nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv, u32 netdev_port_id) { struct nfp_police_cfg_head *head; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(fl_priv->app, sizeof(struct nfp_police_cfg_head), NFP_FLOWER_CMSG_TYPE_QOS_STATS, GFP_ATOMIC); if (!skb) return; head = nfp_flower_cmsg_get_data(skb); memset(head, 0, sizeof(struct nfp_police_cfg_head)); head->port = cpu_to_be32(netdev_port_id); nfp_ctrl_tx(fl_priv->app->ctrl, skb); }
int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) { struct nfp_flower_cmsg_portreify *msg; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), NFP_FLOWER_CMSG_TYPE_PORT_REIFY, GFP_KERNEL); if (!skb) return -ENOMEM; msg = nfp_flower_cmsg_get_data(skb); msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); msg->reserved = 0; msg->info = cpu_to_be16(exists); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) { struct nfp_flower_cmsg_portmod *msg; struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; msg = nfp_flower_cmsg_get_data(skb); msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); msg->reserved = 0; msg->info = carrier_ok; msg->mtu = cpu_to_be16(repr->netdev->mtu); nfp_ctrl_tx(repr->app->ctrl, skb); return 0; }
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) { unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_stats_frame *stats; unsigned char *msg; u32 ctx_id; int i; msg = nfp_flower_cmsg_get_data(skb); spin_lock(&priv->stats_lock); for (i = 0; i < msg_len / sizeof(*stats); i++) { stats = (struct nfp_fl_stats_frame *)msg + i; ctx_id = be32_to_cpu(stats->stats_con_id); priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count); priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count); priv->stats[ctx_id].used = jiffies; } spin_unlock(&priv->stats_lock); }
static void nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_cmsg_portreify *msg; bool exists; msg = nfp_flower_cmsg_get_data(skb); rcu_read_lock(); exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); rcu_read_unlock(); if (!exists) { nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", be32_to_cpu(msg->portnum)); return; } atomic_inc(&priv->reify_replies); wake_up_interruptible(&priv->reify_wait_queue); }
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_stats_reply *msg; struct nfp_stat_pair *curr_stats; struct nfp_stat_pair *prev_stats; struct net_device *netdev; struct nfp_repr *repr; u32 netdev_port_id; msg = nfp_flower_cmsg_get_data(skb); netdev_port_id = be32_to_cpu(msg->head.port); rcu_read_lock(); netdev = nfp_app_dev_get(app, netdev_port_id, NULL); if (!netdev) goto exit_unlock_rcu; repr = netdev_priv(netdev); repr_priv = repr->app_priv; curr_stats = &repr_priv->qos_table.curr_stats; prev_stats = &repr_priv->qos_table.prev_stats; spin_lock_bh(&fl_priv->qos_stats_lock); curr_stats->pkts = be64_to_cpu(msg->pass_pkts) + be64_to_cpu(msg->drop_pkts); curr_stats->bytes = be64_to_cpu(msg->pass_bytes) + be64_to_cpu(msg->drop_bytes); if (!repr_priv->qos_table.last_update) { prev_stats->pkts = curr_stats->pkts; prev_stats->bytes = curr_stats->bytes; } repr_priv->qos_table.last_update = jiffies; spin_unlock_bh(&fl_priv->qos_stats_lock); exit_unlock_rcu: rcu_read_unlock(); }
static int nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow, struct netlink_ext_ack *extack) { struct flow_action_entry *action = &flow->rule->action.entries[0]; struct nfp_flower_priv *fl_priv = app->priv; struct nfp_flower_repr_priv *repr_priv; struct nfp_police_config *config; struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; u64 burst, rate; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); return -EOPNOTSUPP; } repr = netdev_priv(netdev); repr_priv = repr->app_priv; if (repr_priv->block_shared) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); return -EOPNOTSUPP; } if (repr->port->type != NFP_PORT_VF_PORT) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports"); return -EOPNOTSUPP; } if (!flow_offload_has_one_action(&flow->rule->action)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action"); return -EOPNOTSUPP; } if (flow->common.prio != (1 << 16)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); return -EOPNOTSUPP; } if (action->id != FLOW_ACTION_POLICE) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action"); return -EOPNOTSUPP; } rate = action->police.rate_bytes_ps; burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst), PSCHED_TICKS_PER_SEC); netdev_port_id = nfp_repr_get_port_id(netdev); skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; config = nfp_flower_cmsg_get_data(skb); memset(config, 0, sizeof(struct nfp_police_config)); config->head.port = cpu_to_be32(netdev_port_id); config->bkt_tkn_p = cpu_to_be32(burst); config->bkt_tkn_c = cpu_to_be32(burst); config->pbs = cpu_to_be32(burst); config->cbs = cpu_to_be32(burst); config->pir = cpu_to_be32(rate); config->cir = cpu_to_be32(rate); nfp_ctrl_tx(repr->app->ctrl, skb); repr_priv->qos_table.netdev_port_id = netdev_port_id; fl_priv->qos_rate_limiters++; if (fl_priv->qos_rate_limiters == 1) schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); return 0; }