int netdev_get_stats(const struct vport *vport, struct odp_vport_stats *stats) { const struct netdev_vport *netdev_vport = netdev_vport_priv(vport); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) struct rtnl_link_stats64 *netdev_stats, storage; netdev_stats = dev_get_stats(netdev_vport->dev, &storage); #else const struct net_device_stats *netdev_stats; netdev_stats = dev_get_stats(netdev_vport->dev); #endif stats->rx_bytes = netdev_stats->rx_bytes; stats->rx_packets = netdev_stats->rx_packets; stats->tx_bytes = netdev_stats->tx_bytes; stats->tx_packets = netdev_stats->tx_packets; stats->rx_dropped = netdev_stats->rx_dropped; stats->rx_errors = netdev_stats->rx_errors; stats->rx_frame_err = netdev_stats->rx_frame_errors; stats->rx_over_err = netdev_stats->rx_over_errors; stats->rx_crc_err = netdev_stats->rx_crc_errors; stats->tx_dropped = netdev_stats->tx_dropped; stats->tx_errors = netdev_stats->tx_errors; stats->collisions = netdev_stats->collisions; return 0; }
VOS_UINT32 NFExt_GetBrBytesCnt(VOS_VOID) { #if (FEATURE_ON == FEATURE_SKB_EXP) struct net_device_stats *dev_stats = NULL; /* 在第一次调用本API的时候才会去获取网桥设备,因为协议栈会比网桥设备先启起来 */ if (NULL == g_stExFlowCtrlEntity.pstBrDev) { if (VOS_ERR == NFExt_SaveBrDev()) { return 0; } } dev_stats = dev_get_stats(g_stExFlowCtrlEntity.pstBrDev); if (NULL == dev_stats) { return 0; } return dev_stats->tx_bytes + dev_stats->rx_bytes + g_stExFlowCtrlEntity.aulTxBytesCnt[NF_EXT_TX_BYTES_CNT_BR]; #else return 0; #endif }
static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt[] = "%30s %12lu\n"; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", vlandev->name, dev_info->vlan_id, (int)(dev_info->flags & 1), vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, fmt, "total headroom inc", dev_info->cnt_inc_headroom_on_tx); seq_printf(seq, fmt, "total encap on xmit", dev_info->cnt_encap_on_xmit); seq_printf(seq, "Device: %s", dev_info->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", dev_info->ingress_priority_map[0], dev_info->ingress_priority_map[1], dev_info->ingress_priority_map[2], dev_info->ingress_priority_map[3], dev_info->ingress_priority_map[4], dev_info->ingress_priority_map[5], dev_info->ingress_priority_map[6], dev_info->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = dev_info->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%hu ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
/** * get_ethtool_stats - get detail statistics. * @dev: net device * @stats: statistics info. * @data: statistics data. */ void hns_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { u64 *p = data; struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; const struct rtnl_link_stats64 *net_stats; struct rtnl_link_stats64 temp; if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) { netdev_err(netdev, "get_stats or update_stats is null!\n"); return; } h->dev->ops->update_stats(h, &netdev->stats); net_stats = dev_get_stats(netdev, &temp); /* get netdev statistics */ p[0] = net_stats->rx_packets; p[1] = net_stats->tx_packets; p[2] = net_stats->rx_bytes; p[3] = net_stats->tx_bytes; p[4] = net_stats->rx_errors; p[5] = net_stats->tx_errors; p[6] = net_stats->rx_dropped; p[7] = net_stats->tx_dropped; p[8] = net_stats->multicast; p[9] = net_stats->collisions; p[10] = net_stats->rx_over_errors; p[11] = net_stats->rx_crc_errors; p[12] = net_stats->rx_frame_errors; p[13] = net_stats->rx_fifo_errors; p[14] = net_stats->rx_missed_errors; p[15] = net_stats->tx_aborted_errors; p[16] = net_stats->tx_carrier_errors; p[17] = net_stats->tx_fifo_errors; p[18] = net_stats->tx_heartbeat_errors; p[19] = net_stats->rx_length_errors; p[20] = net_stats->tx_window_errors; p[21] = net_stats->rx_compressed; p[22] = net_stats->tx_compressed; p[23] = netdev->rx_dropped.counter; p[24] = netdev->tx_dropped.counter; p[25] = priv->tx_timeout_count; /* get driver statistics */ h->dev->ops->get_stats(h, &p[26]); }
/* here's the real work! */ static void netdev_trig_timer(unsigned long arg) { struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg; const struct net_device_stats *dev_stats; unsigned new_activity; struct rtnl_link_stats64 temp; write_lock(&trigger_data->lock); if (!trigger_data->link_up || !trigger_data->net_dev || (trigger_data->mode & (MODE_TX | MODE_RX)) == 0) { /* we don't need to do timer work, just reflect link state. */ led_set_brightness(trigger_data->led_cdev, ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up) ? LED_FULL : LED_OFF); goto no_restart; } dev_stats = dev_get_stats(trigger_data->net_dev,&temp); new_activity = ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) + ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0); if (trigger_data->mode & MODE_LINK) { /* base state is ON (link present) */ /* if there's no link, we don't get this far and the LED is off */ /* OFF -> ON always */ /* ON -> OFF on activity */ if (trigger_data->led_cdev->brightness == LED_OFF) { led_set_brightness(trigger_data->led_cdev, LED_FULL); } else if (trigger_data->last_activity != new_activity) { led_set_brightness(trigger_data->led_cdev, LED_OFF); } } else { /* base state is OFF */ /* ON -> OFF always */ /* OFF -> ON on activity */ if (trigger_data->led_cdev->brightness == LED_FULL) { led_set_brightness(trigger_data->led_cdev, LED_OFF); } else if (trigger_data->last_activity != new_activity) { led_set_brightness(trigger_data->led_cdev, LED_FULL); } } trigger_data->last_activity = new_activity; mod_timer(&trigger_data->timer, jiffies + trigger_data->interval); no_restart: write_unlock(&trigger_data->lock); }
/** * ovs_vport_get_stats - retrieve device stats * * @vport: vport from which to retrieve the stats * @stats: location to store stats * * Retrieves transmit, receive, and error stats for the given device. * * Must be called with ovs_mutex or rcu_read_lock. */ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { const struct rtnl_link_stats64 *dev_stats; struct rtnl_link_stats64 temp; dev_stats = dev_get_stats(vport->dev, &temp); stats->rx_errors = dev_stats->rx_errors; stats->tx_errors = dev_stats->tx_errors; stats->tx_dropped = dev_stats->tx_dropped; stats->rx_dropped = dev_stats->rx_dropped; stats->rx_bytes = dev_stats->rx_bytes; stats->rx_packets = dev_stats->rx_packets; stats->tx_bytes = dev_stats->tx_bytes; stats->tx_packets = dev_stats->tx_packets; }
static void xgene_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *dummy, u64 *data) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct rtnl_link_stats64 stats; int i; dev_get_stats(ndev, &stats); for (i = 0; i < XGENE_STATS_LEN; i++) data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset); xgene_get_extd_stats(pdata); for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) data[i + XGENE_STATS_LEN] = pdata->extd_stats[i]; }
/* here's the real work! */ static void netdev_trig_work(struct work_struct *work) { struct led_netdev_data *trigger_data = container_of(work, struct led_netdev_data, work.work); struct rtnl_link_stats64 *dev_stats; unsigned new_activity; struct rtnl_link_stats64 temp; if (!trigger_data->link_up || !trigger_data->net_dev || (trigger_data->mode & (MODE_TX | MODE_RX)) == 0) { /* we don't need to do timer work, just reflect link state. */ led_set_brightness(trigger_data->led_cdev, ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up) ? LED_FULL : LED_OFF); return; } dev_stats = dev_get_stats(trigger_data->net_dev, &temp); new_activity = ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) + ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0); if (trigger_data->mode & MODE_LINK) { /* base state is ON (link present) */ /* if there's no link, we don't get this far and the LED is off */ /* OFF -> ON always */ /* ON -> OFF on activity */ if (trigger_data->led_cdev->brightness == LED_OFF) { led_set_brightness(trigger_data->led_cdev, LED_FULL); } else if (trigger_data->last_activity != new_activity) { led_set_brightness(trigger_data->led_cdev, LED_OFF); } } else { /* base state is OFF */ /* ON -> OFF always */ /* OFF -> ON on activity */ if (trigger_data->led_cdev->brightness == LED_FULL) { led_set_brightness(trigger_data->led_cdev, LED_OFF); } else if (trigger_data->last_activity != new_activity) { led_set_brightness(trigger_data->led_cdev, LED_FULL); } } trigger_data->last_activity = new_activity; schedule_delayed_work(&trigger_data->work, trigger_data->interval); }
static unsigned long get_tx_bytes(void) { struct net_device *dev; struct net *net; unsigned long tx_bytes = 0; read_lock(&dev_base_lock); for_each_net(net) { for_each_netdev(net, dev) { if(!strncmp(dev->name, "wlan", 4) || !strncmp(dev->name, "ap", 2) || !strncmp(dev->name, "p2p", 3)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); tx_bytes = tx_bytes + stats->tx_bytes; } } } read_unlock(&dev_base_lock); return tx_bytes; }
/* Show a given an attribute in the statistics group */ static ssize_t netstat_show(const struct device *d, struct device_attribute *attr, char *buf, unsigned long offset) { struct net_device *dev = to_net_dev(d); ssize_t ret = -EINVAL; WARN_ON(offset > sizeof(struct rtnl_link_stats64) || offset % sizeof(u64) != 0); read_lock(&dev_base_lock); if (dev_isalive(dev)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); } read_unlock(&dev_base_lock); return ret; }
struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage) { const struct net_device_stats *stats = dev_get_stats(dev); #define copy(s) storage->s = stats->s copy(rx_packets); copy(tx_packets); copy(rx_bytes); copy(tx_bytes); copy(rx_errors); copy(tx_errors); copy(rx_dropped); copy(tx_dropped); copy(multicast); copy(collisions); copy(rx_length_errors); copy(rx_over_errors); copy(rx_crc_errors); copy(rx_frame_errors); copy(rx_fifo_errors); copy(rx_missed_errors); copy(tx_aborted_errors); copy(tx_carrier_errors); copy(tx_fifo_errors); copy(tx_heartbeat_errors); copy(tx_window_errors); copy(rx_compressed); copy(tx_compressed); #undef copy return storage; }
/* NDIS Functions */ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, unsigned buf_len, rndis_resp_t *r) { int retval = -ENOTSUPP; u32 length = 4; /* usually */ __le32 *outbuf; int i, count; rndis_query_cmplt_type *resp; struct net_device *net; struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; if (!r) return -ENOMEM; resp = (rndis_query_cmplt_type *)r->buf; if (!resp) return -ENOMEM; if (buf_len && rndis_debug > 1) { pr_debug("query OID %08x value, len %d:\n", OID, buf_len); for (i = 0; i < buf_len; i += 16) { pr_debug("%03d: %08x %08x %08x %08x\n", i, get_unaligned_le32(&buf[i]), get_unaligned_le32(&buf[i + 4]), get_unaligned_le32(&buf[i + 8]), get_unaligned_le32(&buf[i + 12])); } } /* response goes here, right after the header */ outbuf = (__le32 *)&resp[1]; resp->InformationBufferOffset = cpu_to_le32(16); net = rndis_per_dev_params[configNr].dev; stats = dev_get_stats(net, &temp); switch (OID) { /* general oids (table 4-1) */ /* mandatory */ case RNDIS_OID_GEN_SUPPORTED_LIST: pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__); length = sizeof(oid_supported_list); count = length / sizeof(u32); for (i = 0; i < count; i++) outbuf[i] = cpu_to_le32(oid_supported_list[i]); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_HARDWARE_STATUS: pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__); /* Bogus question! * Hardware must be ready to receive high level protocols. * BTW: * reddite ergo quae sunt Caesaris Caesari * et quae sunt Dei Deo! */ *outbuf = cpu_to_le32(0); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_MEDIA_SUPPORTED: pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__); *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_MEDIA_IN_USE: pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__); /* one medium, one transport... (maybe you do it better) */ *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE: pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); retval = 0; } break; /* mandatory */ case RNDIS_OID_GEN_LINK_SPEED: if (rndis_debug > 1) pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__); if (rndis_per_dev_params[configNr].media_state == RNDIS_MEDIA_STATE_DISCONNECTED) *outbuf = cpu_to_le32(0); else *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].speed); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE: pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); retval = 0; } break; /* mandatory */ case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE: pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); if (rndis_per_dev_params[configNr].dev) { *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].dev->mtu); retval = 0; } break; /* mandatory */ case RNDIS_OID_GEN_VENDOR_ID: pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__); *outbuf = cpu_to_le32( rndis_per_dev_params[configNr].vendorID); retval = 0; break; /* mandatory */ case RNDIS_OID_GEN_VENDOR_DESCRIPTION: pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__); if (rndis_per_dev_params[configNr].vendorDescr) { length = strlen(rndis_per_dev_params[configNr]. vendorDescr);
static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; unsigned int start; struct ixgbevf_ring *ring; int i, j; char *p; ixgbevf_update_stats(adapter); net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { switch (ixgbevf_gstrings_stats[i].type) { case NETDEV_STATS: p = (char *)net_stats + ixgbevf_gstrings_stats[i].stat_offset; break; case IXGBEVF_STATS: p = (char *)adapter + ixgbevf_gstrings_stats[i].stat_offset; break; default: data[i] = 0; continue; } data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate Tx queue data */ for (j = 0; j < adapter->num_tx_queues; j++) { ring = adapter->tx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; } /* populate Rx queue data */ for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; } }
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; struct nfp_net_ring_set rx = { .n_rings = nn->num_rx_rings, .mtu = nn->netdev->mtu, .dcnt = rxd_cnt, }; struct nfp_net_ring_set tx = { .n_rings = nn->num_tx_rings, .dcnt = txd_cnt, }; if (nn->rxd_cnt != rxd_cnt) reconfig_rx = ℞ if (nn->txd_cnt != txd_cnt) reconfig_tx = &tx; return nfp_net_ring_reconfig(nn, &nn->xdp_prog, reconfig_rx, reconfig_tx); } static int nfp_net_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; /* We don't have separate queues/rings for small/large frames. */ if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Round up to supported values */ rxd_cnt = roundup_pow_of_two(ring->rx_pending); txd_cnt = roundup_pow_of_two(ring->tx_pending); if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS || txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct nfp_net *nn = netdev_priv(netdev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_busy", i); p += ETH_GSTRING_LEN; } strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_gather", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; for (i = 0; i < nn->num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); p += ETH_GSTRING_LEN; } break; } } static void nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); struct rtnl_link_stats64 *netdev_stats; struct rtnl_link_stats64 temp = {}; u64 tmp[NN_ET_RVEC_GATHER_STATS]; u8 __iomem *io_p; int i, j, k; u8 *p; netdev_stats = dev_get_stats(netdev, &temp); for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { switch (nfp_net_et_stats[i].type) { case NETDEV_ET_STATS: p = (char *)netdev_stats + nfp_net_et_stats[i].off; data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? *(u64 *)p : *(u32 *)p; break; case NFP_NET_DEV_ET_STATS: io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } for (j = 0; j < nn->num_r_vecs; j++) { unsigned int start; do { start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); data[i++] = nn->r_vecs[j].rx_pkts; tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; tmp[2] = nn->r_vecs[j].hw_csum_rx_error; } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); do { start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); data[i++] = nn->r_vecs[j].tx_pkts; data[i++] = nn->r_vecs[j].tx_busy; tmp[3] = nn->r_vecs[j].hw_csum_tx; tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; tmp[5] = nn->r_vecs[j].tx_gather; tmp[6] = nn->r_vecs[j].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) gathered_stats[k] += tmp[k]; } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; for (j = 0; j < nn->num_tx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } for (j = 0; j < nn->num_rx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) { struct nfp_net *nn = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return NN_ET_STATS_LEN; default: return -EOPNOTSUPP; } } /* RX network flow classification (RSS, filters, etc) */ static u32 ethtool_flow_to_nfp_flag(u32 flow_type) { static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = { [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP, [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP, [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP, [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP, [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4, [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6, }; if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp)) return 0; return xlate_ethtool_to_nfp[flow_type]; }