Пример #1
0
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
	if (unlikely(!vport)) {
		kfree_skb(skb);
		return;
	}

	/* Make our own copy of the packet.  Otherwise we will mangle the
	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
	 * (No one comes after us, since we tell handle_bridge() that we took
	 * the packet.) */
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return;

	skb_push(skb, ETH_HLEN);

	if (unlikely(compute_ip_summed(skb, false))) {
		kfree_skb(skb);
		return;
	}
	vlan_copy_skb_tci(skb);

	ovs_vport_receive(vport, skb);
}
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	rcu_read_lock();
	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
	rcu_read_unlock();
	return 0;
}
Пример #3
0
static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
							struct rtnl_link_stats64 *stats)
{
#else
static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev)
{
	struct net_device_stats *stats = &netdev->stats;
#endif
	struct vport *vport = ovs_internal_dev_get_vport(netdev);
	struct ovs_vport_stats vport_stats;

	ovs_vport_get_stats(vport, &vport_stats);

	/* The tx and rx stats need to be swapped because the
	 * switch and host OS have opposite perspectives. */
	stats->rx_packets	= vport_stats.tx_packets;
	stats->tx_packets	= vport_stats.rx_packets;
	stats->rx_bytes		= vport_stats.tx_bytes;
	stats->tx_bytes		= vport_stats.rx_bytes;
	stats->rx_errors	= vport_stats.tx_errors;
	stats->tx_errors	= vport_stats.rx_errors;
	stats->rx_dropped	= vport_stats.tx_dropped;
	stats->tx_dropped	= vport_stats.rx_dropped;

	return stats;
}

/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	rcu_read_lock();
	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
	rcu_read_unlock();
	return 0;
}
Пример #4
0
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct sk_buff *skb)
{
	struct vport *vport;

	vport = ovs_netdev_get_vport(skb->dev);
	if (unlikely(!vport))
		goto error;

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	/* Make our own copy of the packet.  Otherwise we will mangle the
	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
	 */
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return;

	skb_push(skb, ETH_HLEN);
	skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
	return;
error:
	kfree_skb(skb);
}
Пример #5
0
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
	if (unlikely(!vport))
		goto error;

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	/* Make our own copy of the packet.  Otherwise we will mangle the
	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
	 * (No one comes after us, since we tell handle_bridge() that we took
	 * the packet.)
	 */
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return;

	skb_push(skb, ETH_HLEN);
	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);

	ovs_vport_receive(vport, skb, NULL);
	return;

error:
	kfree_skb(skb);
}
Пример #6
0
/**
 *	ovs_tnl_rcv - ingress point for generic tunnel code
 *
 * @vport: port this packet was received on
 * @skb: received packet
 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
 *
 * Must be called with rcu_read_lock.
 *
 * Packets received by this function are in the following state:
 * - skb->data points to the inner Ethernet header.
 * - The inner Ethernet header is in the linear data area.
 * - skb->csum does not include the inner Ethernet header.
 * - The layer pointers are undefined.
 */
void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb,
		 struct ovs_key_ipv4_tunnel *tun_key)
{
	struct ethhdr *eh;

	skb_reset_mac_header(skb);
	eh = eth_hdr(skb);

	if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
		skb->protocol = eh->h_proto;
	else
		skb->protocol = htons(ETH_P_802_2);

	skb_dst_drop(skb);
	nf_reset(skb);
	skb_clear_rxhash(skb);
	secpath_reset(skb);
	vlan_set_tci(skb, 0);

	if (unlikely(compute_ip_summed(skb, false))) {
		kfree_skb(skb);
		return;
	}

	ovs_vport_receive(vport, skb, tun_key);
}
static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
							struct rtnl_link_stats64 *stats)
{
#else
static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
	struct net_device_stats *stats = &internal_dev_priv(netdev)->stats;
#else
	struct net_device_stats *stats = &netdev->stats;
#endif
#endif
	struct vport *vport = ovs_internal_dev_get_vport(netdev);
	struct ovs_vport_stats vport_stats;

	ovs_vport_get_stats(vport, &vport_stats);

	/* The tx and rx stats need to be swapped because the
	 * switch and host OS have opposite perspectives. */
	stats->rx_packets	= vport_stats.tx_packets;
	stats->tx_packets	= vport_stats.rx_packets;
	stats->rx_bytes		= vport_stats.tx_bytes;
	stats->tx_bytes		= vport_stats.rx_bytes;
	stats->rx_errors	= vport_stats.tx_errors;
	stats->tx_errors	= vport_stats.rx_errors;
	stats->rx_dropped	= vport_stats.tx_dropped;
	stats->tx_dropped	= vport_stats.rx_dropped;

	return stats;
}

/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	if (unlikely(compute_ip_summed(skb, true))) {
		kfree_skb(skb);
		return 0;
	}

	vlan_copy_skb_tci(skb);

	rcu_read_lock();
	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
	rcu_read_unlock();
	return 0;
}

static int internal_dev_open(struct net_device *netdev)
{
	netif_start_queue(netdev);
	return 0;
}
Пример #8
0
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
	struct ovs_key_ipv4_tunnel tun_key;
	struct vport *vport = vs->data;
	struct iphdr *iph;
	__be64 key;

	/* Save outer tunnel values */
	iph = ip_hdr(skb);
	key = cpu_to_be64(ntohl(vx_vni) >> 8);
	ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);

	ovs_vport_receive(vport, skb, &tun_key);
}
Пример #9
0
static int patch_send(struct vport *vport, struct sk_buff *skb)
{
	struct patch_vport *patch_vport = patch_vport_priv(vport);
	struct vport *peer = rcu_dereference(patch_vport->peer);
	int skb_len = skb->len;

	if (!peer) {
		kfree_skb(skb);
		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);

		return 0;
	}

	ovs_vport_receive(peer, skb);
	return skb_len;
}
Пример #10
0
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
	struct ovs_tunnel_info tun_info;
	struct vport *vport = vs->data;
	struct iphdr *iph;
	__be64 key;

	/* Save outer tunnel values */
	iph = ip_hdr(skb);
	key = cpu_to_be64(ntohl(vx_vni) >> 8);
	ovs_flow_tun_info_init(&tun_info, iph,
			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
			       key, TUNNEL_KEY, NULL, 0);

	ovs_vport_receive(vport, skb, &tun_info);
}
Пример #11
0
static void stt_rcv(struct stt_sock *stt_sock, struct sk_buff *skb)
{
	struct vport *vport = stt_sock->rcv_data;
	struct stthdr *stth = stt_hdr(skb);
	struct ovs_tunnel_info tun_info;
	struct sk_buff *next;

	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
			       tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
			       get_unaligned(&stth->key),
			       TUNNEL_KEY | TUNNEL_CSUM,
			       NULL, 0);
	do {
		next = skb->next;
		skb->next = NULL;
		ovs_vport_receive(vport, skb, &tun_info);
	} while ((skb = next));
}
Пример #12
0
/* Called with rcu_read_lock and BH disabled. */
static int gre_rcv(struct sk_buff *skb,
		   const struct tnl_ptk_info *tpi)
{
	struct ovs_key_ipv4_tunnel tun_key;
	struct ovs_net *ovs_net;
	struct vport *vport;
	__be64 key;

	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
	if (unlikely(!vport))
		return PACKET_REJECT;

	key = key_to_tunnel_id(tpi->key, tpi->seq);
	ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
			      filter_tnl_flags(tpi->flags));

	ovs_vport_receive(vport, skb, &tun_key);
	return PACKET_RCVD;
}
Пример #13
0
/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	int len, err;

	len = skb->len;
	rcu_read_lock();
	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
	rcu_read_unlock();

	if (likely(!err)) {
		struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);

		u64_stats_update_begin(&tstats->syncp);
		tstats->tx_bytes += len;
		tstats->tx_packets++;
		u64_stats_update_end(&tstats->syncp);
	} else {
		netdev->stats.tx_errors++;
	}
	return 0;
}
Пример #14
0
/* Called with rcu_read_lock and BH disabled. */
static int gre_rcv(struct sk_buff *skb,
		   const struct tnl_ptk_info *tpi)
{
	struct ovs_tunnel_info tun_info;
	struct ovs_net *ovs_net;
	struct vport *vport;
	__be64 key;

	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
	if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
		vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
	else
		vport = rcu_dereference(ovs_net->vport_net.gre_vport);
	if (unlikely(!vport))
		return PACKET_REJECT;

	key = key_to_tunnel_id(tpi->key, tpi->seq);
	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
			       filter_tnl_flags(tpi->flags), NULL, 0);

	ovs_vport_receive(vport, skb, &tun_info);
	return PACKET_RCVD;
}
Пример #15
0
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
		      struct vxlan_metadata *md)
{
	struct ovs_tunnel_info tun_info;
	struct vxlan_port *vxlan_port;
	struct vport *vport = vs->data;
	struct iphdr *iph;
	struct ovs_vxlan_opts opts = {
		.gbp = md->gbp,
	};
	__be64 key;
	__be16 flags;

	flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
	vxlan_port = vxlan_vport(vport);
	if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
		flags |= TUNNEL_VXLAN_OPT;

	/* Save outer tunnel values */
	iph = ip_hdr(skb);
	key = cpu_to_be64(ntohl(md->vni) >> 8);
	ovs_flow_tun_info_init(&tun_info, iph,
			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
			       key, flags, &opts, sizeof(opts));

	ovs_vport_receive(vport, skb, &tun_info);
}

static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;

	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
		return -EMSGSIZE;

	if (vxlan_port->exts) {
		struct nlattr *exts;

		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
		if (!exts)
			return -EMSGSIZE;

		if (vxlan_port->exts & VXLAN_F_GBP &&
		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
			return -EMSGSIZE;

		nla_nest_end(skb, exts);
	}

	return 0;
}

static void vxlan_tnl_destroy(struct vport *vport)
{
	struct vxlan_port *vxlan_port = vxlan_vport(vport);

	vxlan_sock_release(vxlan_port->vs);

	ovs_vport_deferred_free(vport);
}

static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
};