Esempio n. 1
0
static int fb_counter_proc_show(struct seq_file *m, void *v)
{
	u64 pkts_sum = 0, bytes_sum = 0;
	unsigned int cpu;
	char sline[256];
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_counter_priv __percpu *fb_priv;

	rcu_read_lock();
	fb_priv = (struct fb_counter_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	get_online_cpus();
	for_each_online_cpu(cpu) {
		unsigned int start;
		struct fb_counter_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		do {
			start = u64_stats_fetch_begin(&fb_priv_cpu->syncp);
			pkts_sum += fb_priv_cpu->packets;
			bytes_sum += fb_priv_cpu->bytes;
		} while (u64_stats_fetch_retry(&fb_priv_cpu->syncp, start));
	}
	put_online_cpus();

	memset(sline, 0, sizeof(sline));
	snprintf(sline, sizeof(sline), "%llu %llu\n", pkts_sum, bytes_sum);
	seq_puts(m, sline);

	return 0;
}
static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
						      struct rtnl_link_stats64 *stats)
{
	u64 bytes = 0;
	u64 packets = 0;
	int i;

	for_each_possible_cpu(i) {
		const struct pcpu_lstats *lb_stats;
		u64 tbytes, tpackets;
		unsigned int start;

		lb_stats = per_cpu_ptr(dev->lstats, i);
		do {
			start = u64_stats_fetch_begin(&lb_stats->syncp);
			tbytes = lb_stats->bytes;
			tpackets = lb_stats->packets;
		} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
		bytes   += tbytes;
		packets += tpackets;
	}
	stats->rx_packets = packets;
	stats->tx_packets = packets;
	stats->rx_bytes   = bytes;
	stats->tx_bytes   = bytes;
	return stats;
}
Esempio n. 3
0
static void
nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct netdevsim *ns = netdev_priv(dev);
	unsigned int start;

	do {
		start = u64_stats_fetch_begin(&ns->syncp);
		stats->tx_bytes = ns->tx_bytes;
		stats->tx_packets = ns->tx_packets;
	} while (u64_stats_fetch_retry(&ns->syncp, start));
}
Esempio n. 4
0
static int fb_counter_proc_show(struct seq_file *m, void *v)
{
	char sline[256];
	unsigned int start;
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_counter_priv *fb_priv;

	rcu_read_lock();
	fb_priv = rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();
	do {
		start = u64_stats_fetch_begin(&fb_priv->syncp);
		memset(sline, 0, sizeof(sline));
		snprintf(sline, sizeof(sline), "%llu %llu\n",
			 fb_priv->packets, fb_priv->bytes);
	} while (u64_stats_fetch_retry(&fb_priv->syncp, start));
	seq_puts(m, sline);

	return 0;
}
Esempio n. 5
0
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
				struct l2tp_session *session)
{
	void *hdr;
	struct nlattr *nest;
	struct l2tp_tunnel *tunnel = session->tunnel;
	struct sock *sk = NULL;
	struct l2tp_stats stats;
	unsigned int start;

	sk = tunnel->sock;

	hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
	if (IS_ERR(hdr))
		return PTR_ERR(hdr);

	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
			session->peer_session_id) ||
	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
	    (session->mru &&
	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
		goto nla_put_failure;

	if ((session->ifname && session->ifname[0] &&
	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
	    (session->cookie_len &&
	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
		     &session->cookie[0])) ||
	    (session->peer_cookie_len &&
	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
		     &session->peer_cookie[0])) ||
	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
#ifdef CONFIG_XFRM
	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
#endif
	    (session->reorder_timeout &&
	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
		goto nla_put_failure;

	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
	if (nest == NULL)
		goto nla_put_failure;

	do {
		start = u64_stats_fetch_begin(&session->stats.syncp);
		stats.tx_packets = session->stats.tx_packets;
		stats.tx_bytes = session->stats.tx_bytes;
		stats.tx_errors = session->stats.tx_errors;
		stats.rx_packets = session->stats.rx_packets;
		stats.rx_bytes = session->stats.rx_bytes;
		stats.rx_errors = session->stats.rx_errors;
		stats.rx_seq_discards = session->stats.rx_seq_discards;
		stats.rx_oos_packets = session->stats.rx_oos_packets;
	} while (u64_stats_fetch_retry(&session->stats.syncp, start));

	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
			stats.rx_seq_discards) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
			stats.rx_oos_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
		goto nla_put_failure;
	nla_nest_end(skb, nest);

	return genlmsg_end(skb, hdr);

 nla_put_failure:
	genlmsg_cancel(skb, hdr);
	return -1;
}
Esempio n. 6
0
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
			       struct l2tp_tunnel *tunnel)
{
	void *hdr;
	struct nlattr *nest;
	struct sock *sk = NULL;
	struct inet_sock *inet;
#if IS_ENABLED(CONFIG_IPV6)
	struct ipv6_pinfo *np = NULL;
#endif
	struct l2tp_stats stats;
	unsigned int start;

	hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
			  L2TP_CMD_TUNNEL_GET);
	if (IS_ERR(hdr))
		return PTR_ERR(hdr);

	if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
	    nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
	    nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
	    nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
		goto nla_put_failure;

	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
	if (nest == NULL)
		goto nla_put_failure;

	do {
		start = u64_stats_fetch_begin(&tunnel->stats.syncp);
		stats.tx_packets = tunnel->stats.tx_packets;
		stats.tx_bytes = tunnel->stats.tx_bytes;
		stats.tx_errors = tunnel->stats.tx_errors;
		stats.rx_packets = tunnel->stats.rx_packets;
		stats.rx_bytes = tunnel->stats.rx_bytes;
		stats.rx_errors = tunnel->stats.rx_errors;
		stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
		stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
	} while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));

	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
			stats.rx_seq_discards) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
			stats.rx_oos_packets) ||
	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
		goto nla_put_failure;
	nla_nest_end(skb, nest);

	sk = tunnel->sock;
	if (!sk)
		goto out;

#if IS_ENABLED(CONFIG_IPV6)
	if (sk->sk_family == AF_INET6)
		np = inet6_sk(sk);
#endif

	inet = inet_sk(sk);

	switch (tunnel->encap) {
	case L2TP_ENCAPTYPE_UDP:
		if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
		    nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
		    nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
			       (sk->sk_no_check != UDP_CSUM_NOXMIT)))
			goto nla_put_failure;
		/* NOBREAK */
	case L2TP_ENCAPTYPE_IP:
#if IS_ENABLED(CONFIG_IPV6)
		if (np) {
			if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
				    &np->saddr) ||
			    nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
				    &np->daddr))
				goto nla_put_failure;
		} else
#endif
		if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
		    nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
			goto nla_put_failure;
		break;
	}

out:
	return genlmsg_end(skb, hdr);

nla_put_failure:
	genlmsg_cancel(skb, hdr);
	return -1;
}
Esempio n. 7
0
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
	struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
	struct nfp_net_ring_set rx = {
		.n_rings = nn->num_rx_rings,
		.mtu = nn->netdev->mtu,
		.dcnt = rxd_cnt,
	};
	struct nfp_net_ring_set tx = {
		.n_rings = nn->num_tx_rings,
		.dcnt = txd_cnt,
	};

	if (nn->rxd_cnt != rxd_cnt)
		reconfig_rx = ℞
	if (nn->txd_cnt != txd_cnt)
		reconfig_tx = &tx;

	return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
				     reconfig_rx, reconfig_tx);
}

static int nfp_net_set_ringparam(struct net_device *netdev,
				 struct ethtool_ringparam *ring)
{
	struct nfp_net *nn = netdev_priv(netdev);
	u32 rxd_cnt, txd_cnt;

	/* We don't have separate queues/rings for small/large frames. */
	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
		return -EINVAL;

	/* Round up to supported values */
	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
	txd_cnt = roundup_pow_of_two(ring->tx_pending);

	if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
	    txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
		return -EINVAL;

	if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
		return 0;

	nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
	       nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);

	return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}

static void nfp_net_get_strings(struct net_device *netdev,
				u32 stringset, u8 *data)
{
	struct nfp_net *nn = netdev_priv(netdev);
	u8 *p = data;
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
			memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		for (i = 0; i < nn->num_r_vecs; i++) {
			sprintf(p, "rvec_%u_rx_pkts", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "rvec_%u_tx_pkts", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "rvec_%u_tx_busy", i);
			p += ETH_GSTRING_LEN;
		}
		strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "tx_gather", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		strncpy(p, "tx_lso", ETH_GSTRING_LEN);
		p += ETH_GSTRING_LEN;
		for (i = 0; i < nn->num_tx_rings; i++) {
			sprintf(p, "txq_%u_pkts", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "txq_%u_bytes", i);
			p += ETH_GSTRING_LEN;
		}
		for (i = 0; i < nn->num_rx_rings; i++) {
			sprintf(p, "rxq_%u_pkts", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "rxq_%u_bytes", i);
			p += ETH_GSTRING_LEN;
		}
		break;
	}
}

static void nfp_net_get_stats(struct net_device *netdev,
			      struct ethtool_stats *stats, u64 *data)
{
	u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
	struct nfp_net *nn = netdev_priv(netdev);
	struct rtnl_link_stats64 *netdev_stats;
	struct rtnl_link_stats64 temp = {};
	u64 tmp[NN_ET_RVEC_GATHER_STATS];
	u8 __iomem *io_p;
	int i, j, k;
	u8 *p;

	netdev_stats = dev_get_stats(netdev, &temp);

	for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
		switch (nfp_net_et_stats[i].type) {
		case NETDEV_ET_STATS:
			p = (char *)netdev_stats + nfp_net_et_stats[i].off;
			data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
				*(u64 *)p : *(u32 *)p;
			break;

		case NFP_NET_DEV_ET_STATS:
			io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
			data[i] = readq(io_p);
			break;
		}
	}
	for (j = 0; j < nn->num_r_vecs; j++) {
		unsigned int start;

		do {
			start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
			data[i++] = nn->r_vecs[j].rx_pkts;
			tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
			tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
			tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
		} while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));

		do {
			start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
			data[i++] = nn->r_vecs[j].tx_pkts;
			data[i++] = nn->r_vecs[j].tx_busy;
			tmp[3] = nn->r_vecs[j].hw_csum_tx;
			tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
			tmp[5] = nn->r_vecs[j].tx_gather;
			tmp[6] = nn->r_vecs[j].tx_lso;
		} while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));

		for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
			gathered_stats[k] += tmp[k];
	}
	for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
		data[i++] = gathered_stats[j];
	for (j = 0; j < nn->num_tx_rings; j++) {
		io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
		data[i++] = readq(io_p);
		io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
		data[i++] = readq(io_p);
	}
	for (j = 0; j < nn->num_rx_rings; j++) {
		io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
		data[i++] = readq(io_p);
		io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
		data[i++] = readq(io_p);
	}
}

static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
	struct nfp_net *nn = netdev_priv(netdev);

	switch (sset) {
	case ETH_SS_STATS:
		return NN_ET_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}

/* RX network flow classification (RSS, filters, etc)
 */
static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
{
	static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
		[TCP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_TCP,
		[TCP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_TCP,
		[UDP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_UDP,
		[UDP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_UDP,
		[IPV4_FLOW]	= NFP_NET_CFG_RSS_IPV4,
		[IPV6_FLOW]	= NFP_NET_CFG_RSS_IPV6,
	};

	if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
		return 0;

	return xlate_ethtool_to_nfp[flow_type];
}