static void stt_tnl_destroy(struct vport *vport) { struct stt_port *stt_port = stt_vport(vport); stt_sock_release(stt_port->stt_sock); ovs_vport_deferred_free(vport); }
static void vxlan_tnl_destroy(struct vport *vport) { struct vxlan_port *vxlan_port = vxlan_vport(vport); vxlan_sock_release(vxlan_port->vs); ovs_vport_deferred_free(vport); }
/* Called with rcu_read_lock and BH disabled. */ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, struct vxlan_metadata *md) { struct ovs_tunnel_info tun_info; struct vxlan_port *vxlan_port; struct vport *vport = vs->data; struct iphdr *iph; struct ovs_vxlan_opts opts = { .gbp = md->gbp, }; __be64 key; __be16 flags; flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0); vxlan_port = vxlan_vport(vport); if (vxlan_port->exts & VXLAN_F_GBP && md->gbp) flags |= TUNNEL_VXLAN_OPT; /* Save outer tunnel values */ iph = ip_hdr(skb); key = cpu_to_be64(ntohl(md->vni) >> 8); ovs_flow_tun_info_init(&tun_info, iph, udp_hdr(skb)->source, udp_hdr(skb)->dest, key, flags, &opts, sizeof(opts)); ovs_vport_receive(vport, skb, &tun_info); } static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) { struct vxlan_port *vxlan_port = vxlan_vport(vport); __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port))) return -EMSGSIZE; if (vxlan_port->exts) { struct nlattr *exts; exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION); if (!exts) return -EMSGSIZE; if (vxlan_port->exts & VXLAN_F_GBP && nla_put_flag(skb, OVS_VXLAN_EXT_GBP)) return -EMSGSIZE; nla_nest_end(skb, exts); } return 0; } static void vxlan_tnl_destroy(struct vport *vport) { struct vxlan_port *vxlan_port = vxlan_vport(vport); vxlan_sock_release(vxlan_port->vs); ovs_vport_deferred_free(vport); } static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = { [OVS_VXLAN_EXT_GBP] = { .type = NLA_FLAG, }, };