Esempio n. 1
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct veth_priv *priv = netdev_priv(dev);
	struct net_device *rcv;
	int length = skb->len;

	rcu_read_lock();
	rcv = rcu_dereference(priv->peer);
	if (unlikely(!rcv)) {
		kfree_skb(skb);
		goto drop;
	}

	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);

		u64_stats_update_begin(&stats->syncp);
		stats->bytes += length;
		stats->packets++;
		u64_stats_update_end(&stats->syncp);
	} else {
drop:
		atomic64_inc(&priv->dropped);
	}
	rcu_read_unlock();
	return NETDEV_TX_OK;
}
Esempio n. 2
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct veth_priv *priv = netdev_priv(dev);
	struct net_device *rcv;
	int length = skb->len;

	rcu_read_lock();
	rcv = rcu_dereference(priv->peer);
	if (unlikely(!rcv)) {
		kfree_skb(skb);
		goto drop;
	}
	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
	 * will cause bad checksum on forwarded packets
	 */
	if (skb->ip_summed == CHECKSUM_NONE &&
	    rcv->features & NETIF_F_RXCSUM)
		skb->ip_summed = CHECKSUM_UNNECESSARY;

	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);

		u64_stats_update_begin(&stats->syncp);
		stats->bytes += length;
		stats->packets++;
		u64_stats_update_end(&stats->syncp);
	} else {
drop:
		atomic64_inc(&priv->dropped);
	}
	rcu_read_unlock();
	return NETDEV_TX_OK;
}
Esempio n. 3
0
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
	struct net_device *dev = spriv->dev;

	if (session->debug & L2TP_MSG_DATA) {
		unsigned int length;
		int offset;
		u8 *ptr = skb->data;

		length = min(32u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		printk(KERN_DEBUG "%s: eth recv: ", session->name);

		offset = 0;
		do {
			printk(" %02X", ptr[offset]);
		} while (++offset < length);

		printk("\n");
	}

	if (data_len < ETH_HLEN)
		goto error;

	secpath_reset(skb);

	/* checksums verified by L2TP */
	skb->ip_summed = CHECKSUM_NONE;

	skb_dst_drop(skb);
	nf_reset(skb);

	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
		dev->last_rx = jiffies;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += data_len;
	} else
		dev->stats.rx_errors++;

	return;

error:
	dev->stats.rx_errors++;
	kfree_skb(skb);
}
Esempio n. 4
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device *rcv = NULL;
	struct veth_priv *priv, *rcv_priv;
	struct veth_net_stats *stats, *rcv_stats;
	int length;

	priv = netdev_priv(dev);
	rcv = priv->peer;
	rcv_priv = netdev_priv(rcv);

	stats = this_cpu_ptr(priv->stats);
	rcv_stats = this_cpu_ptr(rcv_priv->stats);

	if (!(rcv->flags & IFF_UP))
		goto tx_drop;

	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
	   will cause bad checksum on forwarded packets */
	if (skb->ip_summed == CHECKSUM_NONE &&
	    rcv->features & NETIF_F_RXCSUM)
		skb->ip_summed = CHECKSUM_UNNECESSARY;

	length = skb->len;
	if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
		goto rx_drop;

	stats->tx_bytes += length;
	stats->tx_packets++;

	rcv_stats->rx_bytes += length;
	rcv_stats->rx_packets++;

	return NETDEV_TX_OK;

tx_drop:
	kfree_skb(skb);
	stats->tx_dropped++;
	return NETDEV_TX_OK;

rx_drop:
	rcv_stats->rx_dropped++;
	return NETDEV_TX_OK;
}
Esempio n. 5
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device *rcv = NULL;
	struct veth_priv *priv, *rcv_priv;
	struct veth_net_stats *stats, *rcv_stats;
	int length;

	priv = netdev_priv(dev);
	rcv = priv->peer;
	rcv_priv = netdev_priv(rcv);

	stats = this_cpu_ptr(priv->stats);
	rcv_stats = this_cpu_ptr(rcv_priv->stats);

	if (!(rcv->flags & IFF_UP))
		goto tx_drop;

	if (dev->features & NETIF_F_NO_CSUM)
		skb->ip_summed = rcv_priv->ip_summed;

	length = skb->len + ETH_HLEN;
	if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
		goto rx_drop;

	stats->tx_bytes += length;
	stats->tx_packets++;

	rcv_stats->rx_bytes += length;
	rcv_stats->rx_packets++;

	return NETDEV_TX_OK;

tx_drop:
	kfree_skb(skb);
	stats->tx_dropped++;
	return NETDEV_TX_OK;

rx_drop:
	rcv_stats->rx_dropped++;
	return NETDEV_TX_OK;
}
Esempio n. 6
0
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
	struct net_device *dev = spriv->dev;
	struct l2tp_eth *priv = netdev_priv(dev);

	if (session->debug & L2TP_MSG_DATA) {
		unsigned int length;

		length = min(32u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		pr_debug("%s: eth recv\n", session->name);
		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
	}

	if (!pskb_may_pull(skb, ETH_HLEN))
		goto error;

	secpath_reset(skb);

	/* checksums verified by L2TP */
	skb->ip_summed = CHECKSUM_NONE;

	skb_dst_drop(skb);
	nf_reset(skb);

	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
		atomic_long_inc(&priv->rx_packets);
		atomic_long_add(data_len, &priv->rx_bytes);
	} else {
		atomic_long_inc(&priv->rx_errors);
	}
	return;

error:
	atomic_long_inc(&priv->rx_errors);
	kfree_skb(skb);
}
Esempio n. 7
0
static void deth_nl_recv_msg(struct sk_buff* skb)
{
    struct nlmsghdr* nlh = (struct nlmsghdr*) skb->data;
    
    int pktsize = 0;

    const char* ptr = nlmsg_data(nlh);
    u32 index = *(u32*)ptr;

    struct deth_priv* priv = NULL;
    struct deth_net_stats* stats = NULL;
    struct sk_buff* out_skb = NULL;
    struct net* net = sock_net(skb->sk);
    struct net_device* dev = dev_get_by_index(net, index);

    ptr += sizeof(u32);

    if (dev == NULL)
    {
        printk(KERN_ERR "Error retrieving the interface with index %d.\n", index);

        return;
    }

    switch (nlh->nlmsg_type)
    {
    case PACKET:
        break;
    case LINK_CHANGE:
        {
            if (*(u32*)(ptr))
            {
                netif_carrier_on(dev);
            }
            else
            {
                netif_carrier_off(dev);
            }
    
            goto cleanup;
        }
	case LINK_STATS:
		{
			priv = netdev_priv(dev);
			stats = this_cpu_ptr(priv->stats);
			u64_stats_update_begin(&stats->syncp);

			stats->rx_packets = *(u64*)(ptr);
			ptr += sizeof(u64);
			stats->rx_bytes = *(u64*)(ptr);
			ptr += sizeof(u64);
			stats->tx_packets = *(u64*)(ptr);
			ptr += sizeof(u64);
			stats->tx_bytes = *(u64*)(ptr);

			u64_stats_update_end(&stats->syncp);

			goto cleanup;
		}
    default:
        goto cleanup;
    }

    pktsize = nlh->nlmsg_len - sizeof(u32);
    out_skb = netdev_alloc_skb(dev, pktsize + 2);

    if (out_skb == NULL)
    {
        printk(KERN_ERR "Error allocating a sk_buff (%d bytes).\n", pktsize);

        goto rx_drop;
    }

    memcpy(skb_put(out_skb, pktsize), ptr, pktsize);

    out_skb->ip_summed = CHECKSUM_UNNECESSARY;

    if (dev_forward_skb(dev, out_skb) != NET_RX_SUCCESS)
    {
        printk(KERN_ERR "dev_forward_skb failed to forward a packet of %d bytes.\n", pktsize);

        goto rx_drop;
    }

	priv = netdev_priv(dev);
    stats = this_cpu_ptr(priv->stats);
    u64_stats_update_begin(&stats->syncp);
    stats->rx_bytes += pktsize;
    stats->rx_packets++;
    u64_stats_update_end(&stats->syncp);

    goto cleanup;

rx_drop:
    u64_stats_update_begin(&stats->syncp);
    stats->tx_dropped++;
    u64_stats_update_end(&stats->syncp);

cleanup:
    dev_put(dev);
}