static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, struct ethtool_rxnfc *nfc) { u32 new_rss_cfg = nn->rss_cfg; u32 nfp_rss_flag; int err; if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) return -EOPNOTSUPP; /* RSS only supports IP SA/DA and L4 src/dst ports */ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; /* We need at least the IP SA/DA fields for hashing */ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type); if (!nfp_rss_flag) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: new_rss_cfg &= ~nfp_rss_flag; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): new_rss_cfg |= nfp_rss_flag; break; default: return -EINVAL; } new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; new_rss_cfg |= NFP_NET_CFG_RSS_MASK; if (new_rss_cfg == nn->rss_cfg) return 0; writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); if (err) return err; nn->rss_cfg = new_rss_cfg; nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg); return 0; }
/** * nfp_net_rx() - receive up to @budget packets on @rx_ring * @rx_ring: RX ring to receive from * @budget: NAPI budget * * Note, this function is separated out from the napi poll function to * more cleanly separate packet receive code from other bookkeeping * functions performed in the napi poll function. * * There are differences between the NFP-3200 firmware and the * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue * to indicate that new packets have arrived. The NFP-6000 does not * have this queue and uses the DD bit in the RX descriptor. This * method cannot be used on the NFP-3200 as it causes a race * condition: The RX ring write pointer on the NFP-3200 is updated * after packets (and descriptors) have been DMAed. If the DD bit is * used and subsequently the read pointer is updated this may lead to * the RX queue to underflow (if the firmware has not yet update the * write pointer). Therefore we use slightly ugly conditional code * below to handle the differences. We may, in the future update the * NFP-3200 firmware to behave the same as the firmware on the * NFP-6000. * * Return: Number of packets received. */ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; unsigned int data_len, meta_len; int avail = 0, pkts_polled = 0; struct sk_buff *skb, *new_skb; struct nfp_net_rx_desc *rxd; dma_addr_t new_dma_addr; u32 qcp_wr_p; int idx; if (nn->is_nfp3200) { /* Work out how many packets arrived */ qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); idx = rx_ring->rd_p % rx_ring->cnt; if (qcp_wr_p == idx) /* No new packets */ return 0; if (qcp_wr_p > idx) avail = qcp_wr_p - idx; else avail = qcp_wr_p + rx_ring->cnt - idx; } else { avail = budget + 1; } while (avail > 0 && pkts_polled < budget) { idx = rx_ring->rd_p % rx_ring->cnt; rxd = &rx_ring->rxds[idx]; if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) { if (nn->is_nfp3200) nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n", rx_ring->idx, idx, rxd->vals[0], rxd->vals[1]); break; } /* Memory barrier to ensure that we won't do other reads * before the DD bit. */ dma_rmb(); rx_ring->rd_p++; pkts_polled++; avail--; skb = rx_ring->rxbufs[idx].skb; new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr, nn->fl_bufsz); if (!new_skb) { nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, rx_ring->rxbufs[idx].dma_addr); u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; u64_stats_update_end(&r_vec->rx_sync); continue; } dma_unmap_single(&nn->pdev->dev, rx_ring->rxbufs[idx].dma_addr, nn->fl_bufsz, DMA_FROM_DEVICE); nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); /* < meta_len > * <-- [rx_offset] --> * --------------------------------------------------------- * | [XX] | metadata | packet | XXXX | * --------------------------------------------------------- * <---------------- data_len ---------------> * * The rx_offset is fixed for all packets, the meta_len can vary * on a packet by packet basis. If rx_offset is set to zero * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the * buffer and is immediately followed by the packet (no [XX]). */ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; data_len = le16_to_cpu(rxd->rxd.data_len); if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) skb_reserve(skb, meta_len); else skb_reserve(skb, nn->rx_offset); skb_put(skb, data_len - meta_len); nfp_net_set_hash(nn->netdev, skb, rxd); /* Pad small frames to minimum */ if (skb_put_padto(skb, 60)) break; /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_pkts++; r_vec->rx_bytes += skb->len; u64_stats_update_end(&r_vec->rx_sync); skb_record_rx_queue(skb, rx_ring->idx); skb->protocol = eth_type_trans(skb, nn->netdev); nfp_net_rx_csum(nn, r_vec, rxd, skb); if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(rxd->rxd.vlan)); napi_gro_receive(&rx_ring->r_vec->napi, skb); } if (nn->is_nfp3200) nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled); return pkts_polled; }
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; struct nfp_net_ring_set rx = { .n_rings = nn->num_rx_rings, .mtu = nn->netdev->mtu, .dcnt = rxd_cnt, }; struct nfp_net_ring_set tx = { .n_rings = nn->num_tx_rings, .dcnt = txd_cnt, }; if (nn->rxd_cnt != rxd_cnt) reconfig_rx = ℞ if (nn->txd_cnt != txd_cnt) reconfig_tx = &tx; return nfp_net_ring_reconfig(nn, &nn->xdp_prog, reconfig_rx, reconfig_tx); } static int nfp_net_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; /* We don't have separate queues/rings for small/large frames. */ if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Round up to supported values */ rxd_cnt = roundup_pow_of_two(ring->rx_pending); txd_cnt = roundup_pow_of_two(ring->tx_pending); if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS || txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct nfp_net *nn = netdev_priv(netdev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_busy", i); p += ETH_GSTRING_LEN; } strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_gather", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; for (i = 0; i < nn->num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); p += ETH_GSTRING_LEN; } break; } } static void nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); struct rtnl_link_stats64 *netdev_stats; struct rtnl_link_stats64 temp = {}; u64 tmp[NN_ET_RVEC_GATHER_STATS]; u8 __iomem *io_p; int i, j, k; u8 *p; netdev_stats = dev_get_stats(netdev, &temp); for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { switch (nfp_net_et_stats[i].type) { case NETDEV_ET_STATS: p = (char *)netdev_stats + nfp_net_et_stats[i].off; data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? *(u64 *)p : *(u32 *)p; break; case NFP_NET_DEV_ET_STATS: io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } for (j = 0; j < nn->num_r_vecs; j++) { unsigned int start; do { start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); data[i++] = nn->r_vecs[j].rx_pkts; tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; tmp[2] = nn->r_vecs[j].hw_csum_rx_error; } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); do { start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); data[i++] = nn->r_vecs[j].tx_pkts; data[i++] = nn->r_vecs[j].tx_busy; tmp[3] = nn->r_vecs[j].hw_csum_tx; tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; tmp[5] = nn->r_vecs[j].tx_gather; tmp[6] = nn->r_vecs[j].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) gathered_stats[k] += tmp[k]; } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; for (j = 0; j < nn->num_tx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } for (j = 0; j < nn->num_rx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) { struct nfp_net *nn = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return NN_ET_STATS_LEN; default: return -EOPNOTSUPP; } } /* RX network flow classification (RSS, filters, etc) */ static u32 ethtool_flow_to_nfp_flag(u32 flow_type) { static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = { [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP, [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP, [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP, [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP, [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4, [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6, }; if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp)) return 0; return xlate_ethtool_to_nfp[flow_type]; }