Beispiel #1
0
static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
					     struct rtnl_link_stats64 *stats)
{
	struct ifb_private *dp = netdev_priv(dev);
	unsigned int start;

	do {
		start = u64_stats_fetch_begin_bh(&dp->rsync);
		stats->rx_packets = dp->rx_packets;
		stats->rx_bytes = dp->rx_bytes;
	} while (u64_stats_fetch_retry_bh(&dp->rsync, start));

	do {
		start = u64_stats_fetch_begin_bh(&dp->tsync);

		stats->tx_packets = dp->tx_packets;
		stats->tx_bytes = dp->tx_bytes;

	} while (u64_stats_fetch_retry_bh(&dp->tsync, start));

	stats->rx_dropped = dev->stats.rx_dropped;
	stats->tx_dropped = dev->stats.tx_dropped;

	return stats;
}
Beispiel #2
0
static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
						      struct rtnl_link_stats64 *stats)
{
	u64 bytes = 0;
	u64 packets = 0;
	int i;

	for_each_possible_cpu(i) {
		const struct pcpu_lstats *lb_stats;
		u64 tbytes, tpackets;
		unsigned int start;

		lb_stats = per_cpu_ptr(dev->lstats, i);
		do {
			start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
			tbytes = lb_stats->bytes;
			tpackets = lb_stats->packets;
		} while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
		bytes   += tbytes;
		packets += tpackets;
	}
	stats->rx_packets = packets;
	stats->tx_packets = packets;
	stats->rx_bytes   = bytes;
	stats->tx_bytes   = bytes;
	return stats;
}
Beispiel #3
0
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct vlan_pcpu_stats *p;
	u32 rx_errors = 0, tx_dropped = 0;
	int i;

	for_each_possible_cpu(i) {
		u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
		unsigned int start;

		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
		do {
			start = u64_stats_fetch_begin_bh(&p->syncp);
			rxpackets	= p->rx_packets;
			rxbytes		= p->rx_bytes;
			rxmulticast	= p->rx_multicast;
			txpackets	= p->tx_packets;
			txbytes		= p->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&p->syncp, start));

		stats->rx_packets	+= rxpackets;
		stats->rx_bytes		+= rxbytes;
		stats->multicast	+= rxmulticast;
		stats->tx_packets	+= txpackets;
		stats->tx_bytes		+= txbytes;
		/* rx_errors & tx_dropped are u32 */
		rx_errors	+= p->rx_errors;
		tx_dropped	+= p->tx_dropped;
	}
	stats->rx_errors  = rx_errors;
	stats->tx_dropped = tx_dropped;

	return stats;
}
Beispiel #4
0
static void fe_get_ethtool_stats(struct net_device *dev,
		struct ethtool_stats *stats, u64 *data)
{
	struct fe_priv *priv = netdev_priv(dev);
	struct fe_hw_stats *hwstats = priv->hw_stats;
	u64 *data_src, *data_dst;
	unsigned int start;
	int i;

	if (netif_running(dev) && netif_device_present(dev)) {
		if (spin_trylock(&hwstats->stats_lock)) {
			fe_stats_update(priv);
			spin_unlock(&hwstats->stats_lock);
		}
	}

	do {
		data_src = &hwstats->tx_bytes;
		data_dst = data;
		start = u64_stats_fetch_begin_bh(&hwstats->syncp);

		for (i = 0; i < ARRAY_SIZE(fe_gdma_str); i++)
			*data_dst++ = *data_src++;

	} while (u64_stats_fetch_retry_bh(&hwstats->syncp, start));
}
Beispiel #5
0
static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
						  struct rtnl_link_stats64 *tot)
{
	struct veth_priv *priv = netdev_priv(dev);
	int cpu;

	for_each_possible_cpu(cpu) {
		struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
		u64 rx_packets, rx_bytes, rx_dropped;
		u64 tx_packets, tx_bytes;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_bh(&stats->syncp);
			rx_packets = stats->rx_packets;
			tx_packets = stats->tx_packets;
			rx_bytes = stats->rx_bytes;
			tx_bytes = stats->tx_bytes;
			rx_dropped = stats->rx_dropped;
		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
		tot->rx_packets += rx_packets;
		tot->tx_packets += tx_packets;
		tot->rx_bytes   += rx_bytes;
		tot->tx_bytes   += tx_bytes;
		tot->rx_dropped += rx_dropped;
	}

	return tot;
}
Beispiel #6
0
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
	struct rtnl_link_stats64 hw_nat_stats;
	memset(&hw_nat_stats, 0, sizeof(hw_nat_stats));
	if (ra_sw_nat_hook_get_stats)
		if (!ra_sw_nat_hook_get_stats(dev->name, &hw_nat_stats)) {
			struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
			txq->tx_bytes += hw_nat_stats.tx_bytes;
			txq->tx_packets += hw_nat_stats.tx_packets;
		}
#endif

	dev_txq_stats_fold(dev, stats);

	if (vlan_dev_info(dev)->vlan_rx_stats) {
		struct vlan_rx_stats *p, accum = {0};
		int i;

#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
		p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 0);
		p->rx_packets += hw_nat_stats.rx_packets;
		p->rx_bytes += hw_nat_stats.rx_bytes;
#endif
		for_each_possible_cpu(i) {
			u64 rxpackets, rxbytes, rxmulticast;
			unsigned int start;

			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
			do {
				start = u64_stats_fetch_begin_bh(&p->syncp);
				rxpackets	= p->rx_packets;
				rxbytes		= p->rx_bytes;
				rxmulticast	= p->rx_multicast;
			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
			accum.rx_packets += rxpackets;
			accum.rx_bytes   += rxbytes;
			accum.rx_multicast += rxmulticast;
			/* rx_errors is an ulong, not protected by syncp */
			accum.rx_errors  += p->rx_errors;
		}
		stats->rx_packets = accum.rx_packets;
		stats->rx_bytes   = accum.rx_bytes;
		stats->rx_errors  = accum.rx_errors;
		stats->multicast  = accum.rx_multicast;
	}
	return stats;
}
Beispiel #7
0
/**
 *	ovs_vport_get_stats - retrieve device stats
 *
 * @vport: vport from which to retrieve the stats
 * @stats: location to store stats
 *
 * Retrieves transmit, receive, and error stats for the given device.
 *
 * Must be called with RTNL lock or rcu_read_lock.
 */
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
	int i;

	/* We potentially have 3 sources of stats that need to be
	 * combined: those we have collected (split into err_stats and
	 * percpu_stats), offset_stats from set_stats(), and device
	 * error stats from netdev->get_stats() (for errors that happen
	 * downstream and therefore aren't reported through our
	 * vport_record_error() function).
	 * Stats from first two sources are merged and reported by ovs over
	 * OVS_VPORT_ATTR_STATS.
	 * netdev-stats can be directly read over netlink-ioctl.
	 */

	spin_lock_bh(&vport->stats_lock);

	*stats = vport->offset_stats;

	stats->rx_errors	+= vport->err_stats.rx_errors;
	stats->tx_errors	+= vport->err_stats.tx_errors;
	stats->tx_dropped	+= vport->err_stats.tx_dropped;
	stats->rx_dropped	+= vport->err_stats.rx_dropped;

	spin_unlock_bh(&vport->stats_lock);

	for_each_possible_cpu(i) {
		const struct vport_percpu_stats *percpu_stats;
		struct vport_percpu_stats local_stats;
		unsigned int start;

		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);

		do {
			start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
			local_stats = *percpu_stats;
		} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));

		stats->rx_bytes		+= local_stats.rx_bytes;
		stats->rx_packets	+= local_stats.rx_packets;
		stats->tx_bytes		+= local_stats.tx_bytes;
		stats->tx_packets	+= local_stats.tx_packets;
	}
}
Beispiel #8
0
/* Often modified stats are per cpu, other are shared (netdev->stats) */
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
						struct rtnl_link_stats64 *tot)
{
	int i;

	for_each_possible_cpu(i) {
		const struct pcpu_sw_netstats *tstats =
						   per_cpu_ptr(dev->tstats, i);
		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_bh(&tstats->syncp);
			rx_packets = tstats->rx_packets;
			tx_packets = tstats->tx_packets;
			rx_bytes = tstats->rx_bytes;
			tx_bytes = tstats->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));

		tot->rx_packets += rx_packets;
		tot->tx_packets += tx_packets;
		tot->rx_bytes   += rx_bytes;
		tot->tx_bytes   += tx_bytes;
	}

	tot->multicast = dev->stats.multicast;

	tot->rx_crc_errors = dev->stats.rx_crc_errors;
	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;
	tot->rx_errors = dev->stats.rx_errors;

	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
	tot->tx_dropped = dev->stats.tx_dropped;
	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
	tot->tx_errors = dev->stats.tx_errors;

	tot->collisions  = dev->stats.collisions;

	return tot;
}
Beispiel #9
0
static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
{
	struct veth_priv *priv = netdev_priv(dev);
	int cpu;

	result->packets = 0;
	result->bytes = 0;
	for_each_possible_cpu(cpu) {
		struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
		u64 packets, bytes;
		unsigned int start;

		do {
			start = u64_stats_fetch_begin_bh(&stats->syncp);
			packets = stats->packets;
			bytes = stats->bytes;
		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
		result->packets += packets;
		result->bytes += bytes;
	}
	return atomic64_read(&priv->dropped);
}
Beispiel #10
0
static void i40e_get_ethtool_stats(struct net_device *netdev,
				   struct ethtool_stats *stats, u64 *data)
{
	struct i40e_netdev_priv *np = netdev_priv(netdev);
	struct i40e_vsi *vsi = np->vsi;
	struct i40e_pf *pf = vsi->back;
	int i = 0;
	char *p;
	int j;
	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
	unsigned int start;

	i40e_update_stats(vsi);

	for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
		p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
	}
	rcu_read_lock();
	for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
		struct i40e_ring *rx_ring;

		if (!tx_ring)
			continue;

		/* process Tx ring statistics */
		do {
			start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
			data[i] = tx_ring->stats.packets;
			data[i + 1] = tx_ring->stats.bytes;
		} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));

		/* Rx ring is the 2nd half of the queue pair */
		rx_ring = &tx_ring[1];
		do {
			start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
			data[i + 2] = rx_ring->stats.packets;
			data[i + 3] = rx_ring->stats.bytes;
		} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
	}
	rcu_read_unlock();
	if (vsi == pf->vsi[pf->lan_vsi]) {
		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
			p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
			data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
				   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
		}
		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
			data[i++] = pf->stats.priority_xon_tx[j];
			data[i++] = pf->stats.priority_xoff_tx[j];
		}
		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
			data[i++] = pf->stats.priority_xon_rx[j];
			data[i++] = pf->stats.priority_xoff_rx[j];
		}
		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
			data[i++] = pf->stats.priority_xon_2_xoff[j];
	}
}