static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct ifb_private *dp = netdev_priv(dev); unsigned int start; do { start = u64_stats_fetch_begin_irq(&dp->rsync); stats->rx_packets = dp->rx_packets; stats->rx_bytes = dp->rx_bytes; } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); do { start = u64_stats_fetch_begin_irq(&dp->tsync); stats->tx_packets = dp->tx_packets; stats->tx_bytes = dp->tx_bytes; } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; }
static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *nvdev = ndc->nvdev; const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; unsigned int start; u64 packets, bytes; int i, j; for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); for (j = 0; j < nvdev->num_chn; j++) { qstats = &nvdev->chan_table[j].tx_stats; do { start = u64_stats_fetch_begin_irq(&qstats->syncp); packets = qstats->packets; bytes = qstats->bytes; } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); data[i++] = packets; data[i++] = bytes; qstats = &nvdev->chan_table[j].rx_stats; do { start = u64_stats_fetch_begin_irq(&qstats->syncp); packets = qstats->packets; bytes = qstats->bytes; } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); data[i++] = packets; data[i++] = bytes; } }
static void ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private; unsigned int start; u64 packets, bytes; int i; for (i = 0; i < dev->num_tx_queues; i++,txp++) { do { start = u64_stats_fetch_begin_irq(&txp->rsync); packets = txp->rx_packets; bytes = txp->rx_bytes; } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; do { start = u64_stats_fetch_begin_irq(&txp->tsync); packets = txp->tx_packets; bytes = txp->tx_bytes; } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; }
/** * fm10k_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: storage space for 64bit statistics * * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This * function replaces fm10k_get_stats for kernels which support it. */ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *ring; unsigned int start, i; u64 bytes, packets; rcu_read_lock(); for (i = 0; i < interface->num_rx_queues; i++) { ring = ACCESS_ONCE(interface->rx_ring[i]); if (!ring) continue; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } for (i = 0; i < interface->num_tx_queues; i++) { ring = ACCESS_ONCE(interface->rx_ring[i]); if (!ring) continue; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } rcu_read_unlock(); /* following stats updated by fm10k_service_task() */ stats->rx_missed_errors = netdev->stats.rx_missed_errors; return stats; }
static void vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; for_each_possible_cpu(i) { u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin_irq(&p->syncp); rxpackets = p->rx_packets; rxbytes = p->rx_bytes; rxmulticast = p->rx_multicast; txpackets = p->tx_packets; txbytes = p->tx_bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; stats->multicast += rxmulticast; stats->tx_packets += txpackets; stats->tx_bytes += txbytes; /* rx_errors & tx_dropped are u32 */ rx_errors += p->rx_errors; tx_dropped += p->tx_dropped; } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; }
/* Often modified stats are per cpu, other are shared (netdev->stats) */ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { int i; netdev_stats_to_stats64(tot, &dev->stats); for_each_possible_cpu(i) { const struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, i); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&tstats->syncp); rx_packets = tstats->rx_packets; tx_packets = tstats->tx_packets; rx_bytes = tstats->rx_bytes; tx_bytes = tstats->tx_bytes; } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } return tot; }
static struct rtnl_link_stats64 * internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; memset(stats, 0, sizeof(*stats)); stats->rx_errors = dev->stats.rx_errors; stats->tx_errors = dev->stats.tx_errors; stats->tx_dropped = dev->stats.tx_dropped; stats->rx_dropped = dev->stats.rx_dropped; for_each_possible_cpu(i) { const struct pcpu_sw_netstats *percpu_stats; struct pcpu_sw_netstats local_stats; unsigned int start; percpu_stats = per_cpu_ptr(dev->tstats, i); do { start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); local_stats = *percpu_stats; } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); stats->rx_bytes += local_stats.rx_bytes; stats->rx_packets += local_stats.rx_packets; stats->tx_bytes += local_stats.tx_bytes; stats->tx_packets += local_stats.tx_packets; } return stats; }
static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; for_each_possible_cpu(i) { const struct pcpu_dstats *dstats; u64 tbytes, tpkts, tdrops, rbytes, rpkts; unsigned int start; dstats = per_cpu_ptr(dev->dstats, i); do { start = u64_stats_fetch_begin_irq(&dstats->syncp); tbytes = dstats->tx_bytes; tpkts = dstats->tx_pkts; tdrops = dstats->tx_drps; rbytes = dstats->rx_bytes; rpkts = dstats->rx_pkts; } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; stats->tx_dropped += tdrops; stats->rx_bytes += rbytes; stats->rx_packets += rpkts; } return stats; }
static void fe_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct fe_priv *priv = netdev_priv(dev); struct fe_hw_stats *hwstats = priv->hw_stats; u64 *data_src, *data_dst; unsigned int start; int i; if (netif_running(dev) && netif_device_present(dev)) { if (spin_trylock(&hwstats->stats_lock)) { fe_stats_update(priv); spin_unlock(&hwstats->stats_lock); } } do { data_src = &hwstats->tx_bytes; data_dst = data; start = u64_stats_fetch_begin_irq(&hwstats->syncp); for (i = 0; i < ARRAY_SIZE(fe_gdma_str); i++) *data_dst++ = *data_src++; } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); }
static int nfp_repr_get_host_stats64(const struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_repr *repr = netdev_priv(netdev); int i; for_each_possible_cpu(i) { u64 tbytes, tpkts, tdrops, rbytes, rpkts; struct nfp_repr_pcpu_stats *repr_stats; unsigned int start; repr_stats = per_cpu_ptr(repr->stats, i); do { start = u64_stats_fetch_begin_irq(&repr_stats->syncp); tbytes = repr_stats->tx_bytes; tpkts = repr_stats->tx_packets; tdrops = repr_stats->tx_drops; rbytes = repr_stats->rx_bytes; rpkts = repr_stats->rx_packets; } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; stats->tx_dropped += tdrops; stats->rx_bytes += rbytes; stats->rx_packets += rpkts; } return 0; }
void rpl_ip_tunnel_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) #endif { int i; netdev_stats_to_stats64(tot, &dev->stats); for_each_possible_cpu(i) { const struct pcpu_sw_netstats *tstats = per_cpu_ptr((struct pcpu_sw_netstats __percpu *)dev->tstats, i); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&tstats->syncp); rx_packets = tstats->rx_packets; tx_packets = tstats->tx_packets; rx_bytes = tstats->rx_bytes; tx_bytes = tstats->tx_bytes; } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } #if !defined(HAVE_VOID_NDO_GET_STATS64) && !defined(HAVE_RHEL7_MAX_MTU) return tot; #endif }
static struct rtnl_link_stats64 * nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { int i; u64 bytes = 0, packets = 0; for_each_possible_cpu(i) { const struct pcpu_lstats *nl_stats; u64 tbytes, tpackets; unsigned int start; nl_stats = per_cpu_ptr(dev->lstats, i); do { start = u64_stats_fetch_begin_irq(&nl_stats->syncp); tbytes = nl_stats->bytes; tpackets = nl_stats->packets; } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start)); packets += tpackets; bytes += tbytes; } stats->rx_packets = packets; stats->tx_packets = 0; stats->rx_bytes = bytes; stats->tx_bytes = 0; return stats; }
static void loopback_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { u64 bytes = 0; u64 packets = 0; int i; for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; u64 tbytes, tpackets; unsigned int start; lb_stats = per_cpu_ptr(dev->lstats, i); do { start = u64_stats_fetch_begin_irq(&lb_stats->syncp); tbytes = lb_stats->bytes; tpackets = lb_stats->packets; } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); bytes += tbytes; packets += tpackets; } stats->rx_packets = packets; stats->tx_packets = packets; stats->rx_bytes = bytes; stats->tx_bytes = bytes; }
static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); int i; if (!nvdev) return; for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; const struct netvsc_stats *stats; u64 packets, bytes, multicast; unsigned int start; stats = &nvchan->tx_stats; do { start = u64_stats_fetch_begin_irq(&stats->syncp); packets = stats->packets; bytes = stats->bytes; } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); t->tx_bytes += bytes; t->tx_packets += packets; stats = &nvchan->rx_stats; do { start = u64_stats_fetch_begin_irq(&stats->syncp); packets = stats->packets; bytes = stats->bytes; multicast = stats->multicast + stats->broadcast; } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); t->rx_bytes += bytes; t->rx_packets += packets; t->multicast += multicast; } t->tx_dropped = net->stats.tx_dropped; t->tx_errors = net->stats.tx_errors; t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; }
static void ena_safe_update_stat(u64 *src, u64 *dst, struct u64_stats_sync *syncp) { unsigned int start; do { start = u64_stats_fetch_begin_irq(syncp); *(dst) = *src; } while (u64_stats_fetch_retry_irq(syncp, start)); }
static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); int cpu; for_each_possible_cpu(cpu) { struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, cpu); struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, cpu); u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast; unsigned int start; do { start = u64_stats_fetch_begin_irq(&tx_stats->syncp); tx_packets = tx_stats->packets; tx_bytes = tx_stats->bytes; } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); do { start = u64_stats_fetch_begin_irq(&rx_stats->syncp); rx_packets = rx_stats->packets; rx_bytes = rx_stats->bytes; rx_multicast = rx_stats->multicast + rx_stats->broadcast; } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); t->tx_bytes += tx_bytes; t->tx_packets += tx_packets; t->rx_bytes += rx_bytes; t->rx_packets += rx_packets; t->multicast += rx_multicast; } t->tx_dropped = net->stats.tx_dropped; t->tx_errors = net->stats.tx_dropped; t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; return t; }
/* Often modified stats are per cpu, other are shared (netdev->stats) */ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { int i; for_each_possible_cpu(i) { const struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, i); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&tstats->syncp); rx_packets = tstats->rx_packets; tx_packets = tstats->tx_packets; rx_bytes = tstats->rx_bytes; tx_bytes = tstats->tx_bytes; } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->multicast = dev->stats.multicast; tot->rx_crc_errors = dev->stats.rx_crc_errors; tot->rx_fifo_errors = dev->stats.rx_fifo_errors; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; tot->rx_errors = dev->stats.rx_errors; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->tx_carrier_errors = dev->stats.tx_carrier_errors; tot->tx_dropped = dev->stats.tx_dropped; tot->tx_aborted_errors = dev->stats.tx_aborted_errors; tot->tx_errors = dev->stats.tx_errors; tot->collisions = dev->stats.collisions; return tot; }
/** * ovs_vport_get_stats - retrieve device stats * * @vport: vport from which to retrieve the stats * @stats: location to store stats * * Retrieves transmit, receive, and error stats for the given device. * * Must be called with ovs_mutex or rcu_read_lock. */ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { int i; /* We potentially have two surces of stats that need to be * combined: those we have collected (split into err_stats and * percpu_stats), and device error stats from netdev->get_stats() * (for errors that happen downstream and therefore aren't * reported through our vport_record_error() function). * Stats from first source are reported by ovs over * OVS_VPORT_ATTR_STATS. * netdev-stats can be directly read over netlink-ioctl. */ stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors); stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors); stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped); stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped); stats->rx_bytes = 0; stats->rx_packets = 0; stats->tx_bytes = 0; stats->tx_packets = 0; for_each_possible_cpu(i) { const struct pcpu_sw_netstats *percpu_stats; struct pcpu_sw_netstats local_stats; unsigned int start; percpu_stats = per_cpu_ptr(vport->percpu_stats, i); do { start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); local_stats = *percpu_stats; } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); stats->rx_bytes += local_stats.rx_bytes; stats->rx_packets += local_stats.rx_packets; stats->tx_bytes += local_stats.tx_bytes; stats->tx_packets += local_stats.tx_packets; } }
/** * ovs_vport_get_stats - retrieve device stats * * @vport: vport from which to retrieve the stats * @stats: location to store stats * * Retrieves transmit, receive, and error stats for the given device. * * Must be called with ovs_mutex or rcu_read_lock. */ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { int i; memset(stats, 0, sizeof(*stats)); /* We potentially have 2 sources of stats that need to be combined: * those we have collected (split into err_stats and percpu_stats) from * set_stats() and device error stats from netdev->get_stats() (for * errors that happen downstream and therefore aren't reported through * our vport_record_error() function). * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS). * netdev-stats can be directly read over netlink-ioctl. */ spin_lock_bh(&vport->stats_lock); stats->rx_errors = vport->err_stats.rx_errors; stats->tx_errors = vport->err_stats.tx_errors; stats->tx_dropped = vport->err_stats.tx_dropped; stats->rx_dropped = vport->err_stats.rx_dropped; spin_unlock_bh(&vport->stats_lock); for_each_possible_cpu(i) { const struct pcpu_sw_netstats *percpu_stats; struct pcpu_sw_netstats local_stats; unsigned int start; percpu_stats = per_cpu_ptr(vport->percpu_stats, i); do { start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); local_stats = *percpu_stats; } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); stats->rx_bytes += local_stats.rx_bytes; stats->rx_packets += local_stats.rx_packets; stats->tx_bytes += local_stats.tx_bytes; stats->tx_packets += local_stats.tx_packets; } }
static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); int cpu; result->packets = 0; result->bytes = 0; for_each_possible_cpu(cpu) { struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); u64 packets, bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&stats->syncp); packets = stats->packets; bytes = stats->bytes; } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); result->packets += packets; result->bytes += bytes; } return atomic64_read(&priv->dropped); }
static void __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu) { int i; for_each_possible_cpu(i) { struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); unsigned int start; u64 bytes; u32 packets; do { start = u64_stats_fetch_begin_irq(&bcpu->syncp); bytes = bcpu->bstats.bytes; packets = bcpu->bstats.packets; } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); bstats->bytes += bytes; bstats->packets += packets; } }
static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; unsigned int start; struct ixgbevf_ring *ring; int i, j; char *p; ixgbevf_update_stats(adapter); net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { switch (ixgbevf_gstrings_stats[i].type) { case NETDEV_STATS: p = (char *)net_stats + ixgbevf_gstrings_stats[i].stat_offset; break; case IXGBEVF_STATS: p = (char *)adapter + ixgbevf_gstrings_stats[i].stat_offset; break; default: data[i] = 0; continue; } data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate Tx queue data */ for (j = 0; j < adapter->num_tx_queues; j++) { ring = adapter->tx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; } /* populate Rx queue data */ for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; } }
static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; struct mlxsw_sx_port_pcpu_stats *pcpu_stats; const struct mlxsw_tx_info tx_info = { .local_port = mlxsw_sx_port->local_port, .is_emad = false, }; u64 len; int err; if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { struct sk_buff *skb_orig = skb; skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); if (!skb) { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } } mlxsw_sx_txhdr_construct(skb, &tx_info); len = skb->len; /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); int err; err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); if (err) return err; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 * mlxsw_sx_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx_port_pcpu_stats *p; u64 rx_packets, rx_bytes, tx_packets, tx_bytes; u32 tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); do { start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* tx_dropped is u32, updated without syncp protection. */ tx_dropped += p->tx_dropped; } stats->tx_dropped = tx_dropped; return stats; } static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_open = mlxsw_sx_port_open, .ndo_stop = mlxsw_sx_port_stop, .ndo_start_xmit = mlxsw_sx_port_xmit, .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, mlxsw_sx_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", mlxsw_sx->bus_info->fw_rev.major, mlxsw_sx->bus_info->fw_rev.minor, mlxsw_sx->bus_info->fw_rev.subminor); strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name, sizeof(drvinfo->bus_info)); } struct mlxsw_sx_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(char *payload); }; static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { { .str = "a_frames_transmitted_ok", .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, }, { .str = "a_frames_received_ok",