/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { if (unlikely(!vport)) { kfree_skb(skb); return; } /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); if (unlikely(compute_ip_summed(skb, false))) { kfree_skb(skb); return; } vlan_copy_skb_tci(skb); ovs_vport_receive(vport, skb); }
static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { #else static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) struct net_device_stats *stats = &internal_dev_priv(netdev)->stats; #else struct net_device_stats *stats = &netdev->stats; #endif #endif struct vport *vport = ovs_internal_dev_get_vport(netdev); struct ovs_vport_stats vport_stats; ovs_vport_get_stats(vport, &vport_stats); /* The tx and rx stats need to be swapped because the * switch and host OS have opposite perspectives. */ stats->rx_packets = vport_stats.tx_packets; stats->tx_packets = vport_stats.rx_packets; stats->rx_bytes = vport_stats.tx_bytes; stats->tx_bytes = vport_stats.rx_bytes; stats->rx_errors = vport_stats.tx_errors; stats->tx_errors = vport_stats.rx_errors; stats->rx_dropped = vport_stats.tx_dropped; stats->tx_dropped = vport_stats.rx_dropped; return stats; } /* Called with rcu_read_lock_bh. */ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { if (unlikely(compute_ip_summed(skb, true))) { kfree_skb(skb); return 0; } vlan_copy_skb_tci(skb); rcu_read_lock(); ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); rcu_read_unlock(); return 0; } static int internal_dev_open(struct net_device *netdev) { netif_start_queue(netdev); return 0; }