Esempio n. 1
0
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
	struct ovs_tunnel_info tun_info;
	struct vport *vport = vs->data;
	struct iphdr *iph;
	__be64 key;

	/* Save outer tunnel values */
	iph = ip_hdr(skb);
	key = cpu_to_be64(ntohl(vx_vni) >> 8);
	ovs_flow_tun_info_init(&tun_info, iph,
			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
			       key, TUNNEL_KEY, NULL, 0);

	ovs_vport_receive(vport, skb, &tun_info);
}
Esempio n. 2
0
static void stt_rcv(struct stt_sock *stt_sock, struct sk_buff *skb)
{
	struct vport *vport = stt_sock->rcv_data;
	struct stthdr *stth = stt_hdr(skb);
	struct ovs_tunnel_info tun_info;
	struct sk_buff *next;

	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
			       tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
			       get_unaligned(&stth->key),
			       TUNNEL_KEY | TUNNEL_CSUM,
			       NULL, 0);
	do {
		next = skb->next;
		skb->next = NULL;
		ovs_vport_receive(vport, skb, &tun_info);
	} while ((skb = next));
}
Esempio n. 3
0
/* Called with rcu_read_lock and BH disabled. */
static int gre_rcv(struct sk_buff *skb,
		   const struct tnl_ptk_info *tpi)
{
	struct ovs_tunnel_info tun_info;
	struct ovs_net *ovs_net;
	struct vport *vport;
	__be64 key;

	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
	if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
		vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
	else
		vport = rcu_dereference(ovs_net->vport_net.gre_vport);
	if (unlikely(!vport))
		return PACKET_REJECT;

	key = key_to_tunnel_id(tpi->key, tpi->seq);
	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
			       filter_tnl_flags(tpi->flags), NULL, 0);

	ovs_vport_receive(vport, skb, &tun_info);
	return PACKET_RCVD;
}
Esempio n. 4
0
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
		      struct vxlan_metadata *md)
{
	struct ovs_tunnel_info tun_info;
	struct vxlan_port *vxlan_port;
	struct vport *vport = vs->data;
	struct iphdr *iph;
	struct ovs_vxlan_opts opts = {
		.gbp = md->gbp,
	};
	__be64 key;
	__be16 flags;

	flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
	vxlan_port = vxlan_vport(vport);
	if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
		flags |= TUNNEL_VXLAN_OPT;

	/* Save outer tunnel values */
	iph = ip_hdr(skb);
	key = cpu_to_be64(ntohl(md->vni) >> 8);
	ovs_flow_tun_info_init(&tun_info, iph,
			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
			       key, flags, &opts, sizeof(opts));

	ovs_vport_receive(vport, skb, &tun_info);
}

static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;

	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
		return -EMSGSIZE;

	if (vxlan_port->exts) {
		struct nlattr *exts;

		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
		if (!exts)
			return -EMSGSIZE;

		if (vxlan_port->exts & VXLAN_F_GBP &&
		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
			return -EMSGSIZE;

		nla_nest_end(skb, exts);
	}

	return 0;
}

static void vxlan_tnl_destroy(struct vport *vport)
{
	struct vxlan_port *vxlan_port = vxlan_vport(vport);

	vxlan_sock_release(vxlan_port->vs);

	ovs_vport_deferred_free(vport);
}

static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
};