Beispiel #1
0
static int patch_set_options(struct vport *vport, struct nlattr *options)
{
	struct patch_vport *patch_vport = patch_vport_priv(vport);
	struct patch_config *patchconf;
	int err;

	patchconf = kmemdup(rtnl_dereference(patch_vport->patchconf),
			  sizeof(struct patch_config), GFP_KERNEL);
	if (!patchconf) {
		err = -ENOMEM;
		goto error;
	}

	err = patch_set_config(vport, options, patchconf);
	if (err)
		goto error_free;

	assign_config_rcu(vport, patchconf);

	hlist_del(&patch_vport->hash_node);

	rcu_assign_pointer(patch_vport->peer,
		ovs_vport_locate(ovs_dp_get_net(vport->dp), patchconf->peer_name));

	hlist_add_head(&patch_vport->hash_node,
		       hash_bucket(ovs_dp_get_net(vport->dp), patchconf->peer_name));

	return 0;
error_free:
	kfree(patchconf);
error:
	return err;
}
Beispiel #2
0
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct ovs_key_ipv4_tunnel *tun_key;
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sport(vxlan_port->vs->sock->sk);
	struct vxlan_metadata md = {0};
	struct rtable *rt;
	__be16 src_port;
	__be32 saddr;
	__be16 df;
	int err;
	u32 vxflags;

	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
		err = -EINVAL;
		goto error;
	}

	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;

	/* Route lookup */
	saddr = tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr, tun_key->ipv4_dst,
			IPPROTO_UDP, tun_key->ipv4_tos,
			skb->mark);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
	skb->ignore_df = 1;

	src_port = udp_flow_src_port(net, skb, 0, 0, true);
	md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
	md.gbp = vxlan_ext_gbp(skb);
	vxflags = vxlan_port->exts |
		      (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);

	err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
			     saddr, tun_key->ipv4_dst,
			     tun_key->ipv4_tos,
			     tun_key->ipv4_ttl, df,
			     src_port, dst_port,
			     &md, false, vxflags);
	if (err < 0)
		ip_rt_put(rt);
	return err;
error:
	kfree_skb(skb);
	return err;
}
Beispiel #3
0
static int lisp_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
				      struct dp_upcall_info *upcall)
{
	struct lisp_port *lisp_port = lisp_vport(vport);
	struct net *net = ovs_dp_get_net(vport->dp);
	__be16 dport = htons(lisp_port->port_no);
	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);

	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
					  skb, IPPROTO_UDP, sport, dport);
}
Beispiel #4
0
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct ovs_key_ipv4_tunnel *tun_key;
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sport(vxlan_port->vs->sock->sk);
	struct rtable *rt;
	__be16 src_port;
	__be32 saddr;
	__be16 df;
	int port_min;
	int port_max;
	int err;

	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
		err = -EINVAL;
		goto error;
	}

	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;

	/* Route lookup */
	saddr = tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr, tun_key->ipv4_dst,
			IPPROTO_UDP, tun_key->ipv4_tos,
			skb->mark);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
	skb->local_df = 1;

	inet_get_local_port_range(net, &port_min, &port_max);
	src_port = vxlan_src_port(port_min, port_max, skb);

	err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
			     saddr, tun_key->ipv4_dst,
			     tun_key->ipv4_tos,
			     tun_key->ipv4_ttl, df,
			     src_port, dst_port,
			     htonl(be64_to_cpu(tun_key->tun_id) << 8));
	if (err < 0)
		ip_rt_put(rt);
error:
	return err;
}
Beispiel #5
0
static int stt_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
				   struct ovs_tunnel_info *egress_tun_info)
{
	struct stt_port *stt_port = stt_vport(vport);
	struct net *net = ovs_dp_get_net(vport->dp);
	__be16 dport = inet_sk(stt_port->stt_sock->sock->sk)->inet_sport;
	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);

	/* Get tp_src and tp_dst, refert to stt_build_header().
	 */
	return ovs_tunnel_get_egress_info(egress_tun_info,
					  ovs_dp_get_net(vport->dp),
					  OVS_CB(skb)->egress_tun_info,
					  IPPROTO_TCP, skb->mark, sport, dport);
}
Beispiel #6
0
/**
 *	ovs_vport_receive - pass up received packet to the datapath for processing
 *
 * @vport: vport that received the packet
 * @skb: skb that was received
 * @tun_key: tunnel (if any) that carried packet
 *
 * Must be called with rcu_read_lock.  The packet cannot be shared and
 * skb->data should point to the Ethernet header.
 */
int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
		      const struct ip_tunnel_info *tun_info)
{
	struct sw_flow_key key;
	int error;

	OVS_CB(skb)->input_vport = vport;
	OVS_CB(skb)->mru = 0;
	OVS_CB(skb)->cutlen = 0;
	if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
		u32 mark;

		mark = skb->mark;
		skb_scrub_packet(skb, true);
		skb->mark = mark;
		tun_info = NULL;
	}

	ovs_skb_init_inner_protocol(skb);
	skb_clear_ovs_gso_cb(skb);
	/* Extract flow from 'skb' into 'key'. */
	error = ovs_flow_key_extract(tun_info, skb, &key);
	if (unlikely(error)) {
		kfree_skb(skb);
		return error;
	}
	ovs_dp_process_packet(skb, &key);
	return 0;
}
Beispiel #7
0
static struct vport *gre_tnl_create(const struct vport_parms *parms)
{
	struct net *net = ovs_dp_get_net(parms->dp);
	struct net_device *dev;
	struct vport *vport;
	int err;

	vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
	if (IS_ERR(vport))
		return vport;

	rtnl_lock();
	dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
	if (IS_ERR(dev)) {
		rtnl_unlock();
		ovs_vport_free(vport);
		return ERR_CAST(dev);
	}

	err = dev_change_flags(dev, dev->flags | IFF_UP);
	if (err < 0) {
		rtnl_delete_link(dev);
		rtnl_unlock();
		ovs_vport_free(vport);
		return ERR_PTR(err);
	}

	rtnl_unlock();
	return vport;
}
Beispiel #8
0
static int __send(struct vport *vport, struct sk_buff *skb,
		  int tunnel_hlen,
		  __be32 seq, __be16 gre64_flag)
{
	struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
	struct rtable *rt;
	int min_headroom;
	__be16 df;
	__be32 saddr;
	int err;

	/* Route lookup */
	saddr = tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr, tun_key->ipv4_dst,
			IPPROTO_GRE, tun_key->ipv4_tos,
			skb->mark);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ tunnel_hlen + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
					0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
static struct vport *netdev_create(const struct vport_parms *parms)
{
    struct vport *vport;
    struct netdev_vport *netdev_vport;
    int err;

    vport = ovs_vport_alloc(sizeof(struct netdev_vport),
                            &ovs_netdev_vport_ops, parms);
    if (IS_ERR(vport)) {
        err = PTR_ERR(vport);
        goto error;
    }

    netdev_vport = netdev_vport_priv(vport);

    netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
    if (!netdev_vport->dev) {
        err = -ENODEV;
        goto error_free_vport;
    }

    if (netdev_vport->dev->flags & IFF_LOOPBACK ||
            netdev_vport->dev->type != ARPHRD_ETHER ||
            ovs_is_internal_dev(netdev_vport->dev)) {
        err = -EINVAL;
        goto error_put;
    }

    rtnl_lock();
    err = netdev_master_upper_dev_link(netdev_vport->dev,
                                       get_dpdev(vport->dp));
    if (err)
        goto error_unlock;

    err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
                                     vport);
    if (err)
        goto error_master_upper_dev_unlink;

    dev_set_promiscuity(netdev_vport->dev, 1);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
    dev_disable_lro(netdev_vport->dev);
#endif
    netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
    rtnl_unlock();

    netdev_init();
    return vport;

error_master_upper_dev_unlink:
    netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
error_unlock:
    rtnl_unlock();
error_put:
    dev_put(netdev_vport->dev);
error_free_vport:
    ovs_vport_free(vport);
error:
    return ERR_PTR(err);
}
Beispiel #10
0
static struct vport *netdev_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct netdev_vport *netdev_vport;
	int err;

	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
				&ovs_netdev_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	netdev_vport = netdev_vport_priv(vport);

	netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
	if (!netdev_vport->dev) {
		err = -ENODEV;
		goto error_free_vport;
	}

	if (netdev_vport->dev->flags & IFF_LOOPBACK ||
	    netdev_vport->dev->type != ARPHRD_ETHER ||
	    ovs_is_internal_dev(netdev_vport->dev)) {
		err = -EINVAL;
		goto error_put;
	}

	rtnl_lock();
#ifdef HAVE_RHEL_OVS_HOOK
	rcu_assign_pointer(netdev_vport->dev->ax25_ptr, vport);
	atomic_inc(&nr_bridges);
	rcu_assign_pointer(openvswitch_handle_frame_hook, netdev_frame_hook);
#else
	err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
					 vport);
	if (err)
		goto error_unlock;
#endif

	dev_set_promiscuity(netdev_vport->dev, 1);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
	dev_disable_lro(netdev_vport->dev);
#endif
	netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
	rtnl_unlock();

	return vport;

#ifndef HAVE_RHEL_OVS_HOOK
error_unlock:
#endif
	rtnl_unlock();
error_put:
	dev_put(netdev_vport->dev);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}
Beispiel #11
0
/**
 *	ovs_vport_add - add vport device (for kernel callers)
 *
 * @parms: Information about new vport.
 *
 * Creates a new vport with the specified configuration (which is dependent on
 * device type).  ovs_mutex must be held.
 */
struct vport *ovs_vport_add(const struct vport_parms *parms)
{
	struct vport *vport;
	int err = 0;
	int i;

	for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
		if (vport_ops_list[i]->type == parms->type) {
			struct hlist_head *bucket;

			vport = vport_ops_list[i]->create(parms);
			if (IS_ERR(vport)) {
				err = PTR_ERR(vport);
				goto out;
			}

			bucket = hash_bucket(ovs_dp_get_net(vport->dp),
					     vport->ops->get_name(vport));
			hlist_add_head_rcu(&vport->hash_node, bucket);
			return vport;
		}
	}

	err = -EAFNOSUPPORT;

out:
	return ERR_PTR(err);
}
Beispiel #12
0
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sport(vxlan_port->vs->sock->sk);
	struct rtable *rt;
	__be16 src_port;
	__be32 saddr;
	__be16 df;
	int err;

	if (unlikely(!OVS_CB(skb)->tun_key)) {
		err = -EINVAL;
		goto error;
	}

	/* Route lookup */
	saddr = OVS_CB(skb)->tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr,
			OVS_CB(skb)->tun_key->ipv4_dst,
			IPPROTO_UDP,
			OVS_CB(skb)->tun_key->ipv4_tos,
			skb->mark);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
		htons(IP_DF) : 0;

	skb->local_df = 1;

	src_port = udp_flow_src_port(net, skb, 0, 0, true);

	err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
			     saddr, OVS_CB(skb)->tun_key->ipv4_dst,
			     OVS_CB(skb)->tun_key->ipv4_tos,
			     OVS_CB(skb)->tun_key->ipv4_ttl, df,
			     src_port, dst_port,
			     htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
	if (err < 0)
		ip_rt_put(rt);
error:
	return err;
}
Beispiel #13
0
static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
				   struct ip_tunnel_info *egress_tun_info)
{
	return ovs_tunnel_get_egress_info(egress_tun_info,
					  ovs_dp_get_net(vport->dp),
					  OVS_CB(skb)->egress_tun_info,
					  IPPROTO_GRE, skb->mark, 0, 0);
}
Beispiel #14
0
static void patch_destroy(struct vport *vport)
{
	struct patch_vport *patch_vport = patch_vport_priv(vport);

	update_peers(ovs_dp_get_net(vport->dp), patch_vport->name, NULL);
	hlist_del(&patch_vport->hash_node);
	call_rcu(&patch_vport->rcu, free_port_rcu);
}
Beispiel #15
0
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct internal_dev *internal_dev;
	int err;

	vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	vport->dev = alloc_netdev(sizeof(struct internal_dev),
				  parms->name, NET_NAME_USER, do_setup);
	if (!vport->dev) {
		err = -ENOMEM;
		goto error_free_vport;
	}
	vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
	if (!vport->dev->tstats) {
		err = -ENOMEM;
		goto error_free_netdev;
	}

#ifdef HAVE_IFF_PHONY_HEADROOM
	vport->dev->needed_headroom = vport->dp->max_headroom;
#endif
	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
	internal_dev = internal_dev_priv(vport->dev);
	internal_dev->vport = vport;

	/* Restrict bridge port to current netns. */
	if (vport->port_no == OVSP_LOCAL)
		vport->dev->features |= NETIF_F_NETNS_LOCAL;

	rtnl_lock();
	err = register_netdevice(vport->dev);
	if (err)
		goto error_unlock;

	dev_set_promiscuity(vport->dev, 1);
	rtnl_unlock();
	netif_start_queue(vport->dev);

	return vport;

error_unlock:
	rtnl_unlock();
	free_percpu(vport->dev->tstats);
error_free_netdev:
	free_netdev(vport->dev);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}
Beispiel #16
0
static void gre64_tnl_destroy(struct vport *vport)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct ovs_net *ovs_net;

	ovs_net = net_generic(net, ovs_net_id);

	rcu_assign_pointer(ovs_net->vport_net.gre64_vport, NULL);
	ovs_tnl_destroy(vport);
}
Beispiel #17
0
static void destroy_dp_rcu(struct rcu_head *rcu)
{
	struct datapath *dp = container_of(rcu, struct datapath, rcu);

	ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
	free_percpu(dp->stats_percpu);
	release_net(ovs_dp_get_net(dp));
	kfree(dp->ports);
	kfree(dp);
}
Beispiel #18
0
static int stt_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct stt_port *stt_port = stt_vport(vport);
	__be16 dport = inet_sk(stt_port->stt_sock->sock->sk)->inet_sport;
	const struct ovs_key_ipv4_tunnel *tun_key;
	const struct ovs_tunnel_info *tun_info;
	struct rtable *rt;
	__be16 sport;
	__be32 saddr;
	__be16 df;
	int err;

	tun_info = OVS_CB(skb)->egress_tun_info;
	if (unlikely(!tun_info)) {
		err = -EINVAL;
		goto error;
	}

	tun_key = &tun_info->tunnel;
	/* Route lookup */
	saddr = tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr, tun_key->ipv4_dst,
			IPPROTO_TCP, tun_key->ipv4_tos,
			skb->mark);

	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
	sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
	skb->ignore_df = 1;

	return stt_xmit_skb(skb, rt, saddr, tun_key->ipv4_dst,
			    tun_key->ipv4_tos, tun_key->ipv4_ttl,
			    df, sport, dport, tun_key->tun_id);
error:
	kfree_skb(skb);
	return err;
}
Beispiel #19
0
static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
{
	struct net *net = ovs_dp_get_net(parms->dp);
	struct nlattr *options = parms->options;
	struct vxlan_port *vxlan_port;
	struct vxlan_sock *vs;
	struct vport *vport;
	struct nlattr *a;
	u16 dst_port;
	int err;

	if (!options) {
		err = -EINVAL;
		goto error;
	}
	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
	if (a && nla_len(a) == sizeof(u16)) {
		dst_port = nla_get_u16(a);
	} else {
		/* Require destination port from userspace. */
		err = -EINVAL;
		goto error;
	}

	vport = ovs_vport_alloc(sizeof(struct vxlan_port),
				&ovs_vxlan_vport_ops, parms);
	if (IS_ERR(vport))
		return vport;

	vxlan_port = vxlan_vport(vport);
	strncpy(vxlan_port->name, parms->name, IFNAMSIZ);

	a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
	if (a) {
		err = vxlan_configure_exts(vport, a);
		if (err) {
			ovs_vport_free(vport);
			goto error;
		}
	}

	vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
			    vxlan_port->exts);
	if (IS_ERR(vs)) {
		ovs_vport_free(vport);
		return (void *)vs;
	}
	vxlan_port->vs = vs;

	return vport;

error:
	return ERR_PTR(err);
}
Beispiel #20
0
/**
 *	ovs_vport_locate - find a port that has already been created
 *
 * @name: name of port to find
 *
 * Must be called with ovs or RCU read lock.
 */
struct vport *ovs_vport_locate(const struct net *net, const char *name)
{
	struct hlist_head *bucket = hash_bucket(net, name);
	struct vport *vport;

	hlist_for_each_entry_rcu(vport, bucket, hash_node)
		if (!strcmp(name, ovs_vport_name(vport)) &&
		    net_eq(ovs_dp_get_net(vport->dp), net))
			return vport;

	return NULL;
}
Beispiel #21
0
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
	struct ovs_key_ipv4_tunnel *tun_key;
	struct rtable *rt;
	struct flowi4 fl;
	__be16 src_port;
	__be16 df;
	int err;

	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
		err = -EINVAL;
		goto error;
	}

	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
	/* Route lookup */
	memset(&fl, 0, sizeof(fl));
	fl.daddr = tun_key->ipv4_dst;
	fl.saddr = tun_key->ipv4_src;
	fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
	fl.flowi4_mark = skb->mark;
	fl.flowi4_proto = IPPROTO_UDP;

	rt = ip_route_output_key(net, &fl);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
		htons(IP_DF) : 0;

	skb->ignore_df = 1;

	src_port = udp_flow_src_port(net, skb, 0, 0, true);

	err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
			     fl.saddr, tun_key->ipv4_dst,
			     tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
			     src_port, dst_port,
			     htonl(be64_to_cpu(tun_key->tun_id) << 8),
			     false);
	if (err < 0)
		ip_rt_put(rt);
	return err;
error:
	kfree_skb(skb);
	return err;
}
Beispiel #22
0
static int dp_device_event(struct notifier_block *unused, unsigned long event,
			   void *ptr)
{
	struct net_device *dev = ptr;
	struct vport *vport;

	if (ovs_is_internal_dev(dev))
		vport = ovs_internal_dev_get_vport(dev);
	else
		vport = ovs_netdev_get_vport(dev);

	if (!vport)
		return NOTIFY_DONE;

	switch (event) {
	case NETDEV_UNREGISTER:
		if (!ovs_is_internal_dev(dev)) {
			struct sk_buff *notify;
			struct datapath *dp = vport->dp;

			notify = ovs_vport_cmd_build_info(vport, 0, 0,
							  OVS_VPORT_CMD_DEL);
			ovs_dp_detach_port(vport);
			if (IS_ERR(notify)) {
				netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
						ovs_dp_vport_multicast_group.id,
						PTR_ERR(notify));
				break;
			}

			genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
						ovs_dp_vport_multicast_group.id,
						GFP_KERNEL);
		}
		break;
	}

	return NOTIFY_DONE;
}
Beispiel #23
0
/*
 * 重要!通过相应的参数进行网络设备的注册
 * 何时调用?
*/
static struct vport *netdev_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct netdev_vport *netdev_vport;  // 看vport_netdev.h中的定义
	int err;
	//根据具体的ovs_netdev_vport_ops和参数构造一个vport
	// 第一个参数代表私有数据的大小,这里正是要放struct netdev_vport
	vport = ovs_vport_alloc(sizeof(struct netdev_vport), &ovs_netdev_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	netdev_vport = netdev_vport_priv(vport);

	netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
	if (!netdev_vport->dev) {
		err = -ENODEV;
		goto error_free_vport;
	}

	if (netdev_vport->dev->flags & IFF_LOOPBACK ||
	    netdev_vport->dev->type != ARPHRD_ETHER ||
	    ovs_is_internal_dev(netdev_vport->dev)) {
		err = -EINVAL;
		goto error_put;
	}
	/* 重要!就是设置net_device两个字段  netdev_rx_handler_register - register receive handler
     *      @dev: device to register a handler for
     *      @rx_handler: receive handler to register
     *      @rx_handler_data: data pointer that is used by rx handler
	*/
	err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook, vport);
	if (err)
		goto error_put;

	dev_set_promiscuity(netdev_vport->dev, 1);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
	dev_disable_lro(netdev_vport->dev);
#endif
	netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;

	return vport;

error_put:
	dev_put(netdev_vport->dev);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}
Beispiel #24
0
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct netdev_vport *netdev_vport;
	struct internal_dev *internal_dev;
	int err;

	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
				&ovs_internal_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	netdev_vport = netdev_vport_priv(vport);

	netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
					 parms->name, NET_NAME_UNKNOWN,
					 do_setup);
	if (!netdev_vport->dev) {
		err = -ENOMEM;
		goto error_free_vport;
	}

	dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
	internal_dev = internal_dev_priv(netdev_vport->dev);
	internal_dev->vport = vport;

	/* Restrict bridge port to current netns. */
	if (vport->port_no == OVSP_LOCAL)
		netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;

	rtnl_lock();
	err = register_netdevice(netdev_vport->dev);
	if (err)
		goto error_free_netdev;

	dev_set_promiscuity(netdev_vport->dev, 1);
	rtnl_unlock();
	netif_start_queue(netdev_vport->dev);

	return vport;

error_free_netdev:
	rtnl_unlock();
	free_netdev(netdev_vport->dev);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}
Beispiel #25
0
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->sport;
	struct rtable *rt;
	struct flowi fl;
	__be16 src_port;
	int port_min;
	int port_max;
	__be16 df;
	int err;

	if (unlikely(!OVS_CB(skb)->tun_key)) {
		err = -EINVAL;
		goto error;
	}

	/* Route lookup */
	memset(&fl, 0, sizeof(fl));
	fl.fl4_dst = OVS_CB(skb)->tun_key->ipv4_dst;
	fl.fl4_src = OVS_CB(skb)->tun_key->ipv4_src;
	fl.fl4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
	fl.mark = skb->mark;
	fl.proto = IPPROTO_UDP;

	err = ip_route_output_key(net, &rt, &fl);
	if (err)
		goto error;

	df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
		htons(IP_DF) : 0;

	skb->local_df = 1;

	inet_get_local_port_range(&port_min, &port_max);
	src_port = vxlan_src_port(port_min, port_max, skb);

	err = vxlan_xmit_skb(net, vxlan_port->vs, rt, skb,
			     fl.fl4_src, OVS_CB(skb)->tun_key->ipv4_dst,
			     OVS_CB(skb)->tun_key->ipv4_tos,
			     OVS_CB(skb)->tun_key->ipv4_ttl, df,
			     src_port, dst_port,
			     htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
	if (err < 0)
		ip_rt_put(rt);
error:
	return err;
}
Beispiel #26
0
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
		  const struct dp_upcall_info *upcall_info)
{
	struct dp_stats_percpu *stats;
	int dp_ifindex;
	int err;

	if (upcall_info->portid == 0) {
		err = -ENOTCONN;
		goto err;
	}

	dp_ifindex = get_dpifindex(dp);
	if (!dp_ifindex) {
		err = -ENODEV;
		goto err;
	}

	if (!skb_is_gso(skb))
		err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
	else
		err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
	if (err)
		goto err;

	return 0;

err:
	stats = this_cpu_ptr(dp->stats_percpu);

	u64_stats_update_begin(&stats->sync);
	stats->n_lost++;
	u64_stats_update_end(&stats->sync);

	return err;
}
Beispiel #27
0
static struct vport *gre64_create(const struct vport_parms *parms)
{
	struct net *net = ovs_dp_get_net(parms->dp);
	struct ovs_net *ovs_net;
	struct vport *vport;

	ovs_net = net_generic(net, ovs_net_id);
	if (rtnl_dereference(ovs_net->vport_net.gre64_vport))
		return ERR_PTR(-EEXIST);

	vport = ovs_tnl_create(parms, &ovs_gre64_vport_ops, &gre64_tnl_ops);

	rcu_assign_pointer(ovs_net->vport_net.gre64_vport, vport);
	return vport;
}
Beispiel #28
0
static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
				     struct ovs_tunnel_info *egress_tun_info)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct vxlan_port *vxlan_port = vxlan_vport(vport);
	__be16 dst_port = inet_sport(vxlan_port->vs->sock->sk);
	__be16 src_port;

	src_port = udp_flow_src_port(net, skb, 0, 0, true);

	return ovs_tunnel_get_egress_info(egress_tun_info, net,
					  OVS_CB(skb)->egress_tun_info,
					  IPPROTO_UDP, skb->mark,
					  src_port, dst_port);
}
Beispiel #29
0
static void update_peers(struct net *net, const char *name, struct vport *vport)
{
	struct hlist_head *bucket = hash_bucket(net, name);
	struct patch_vport *peer_vport;
	struct hlist_node *node;

	hlist_for_each_entry(peer_vport, node, bucket, hash_node) {
		struct vport *curr_vport = vport_from_priv(peer_vport);
		const char *peer_name;

		peer_name = rtnl_dereference(peer_vport->patchconf)->peer_name;
		if (!strcmp(peer_name, name) && net_eq(ovs_dp_get_net(curr_vport->dp), net))
			rcu_assign_pointer(peer_vport->peer, vport);
	}
}
Beispiel #30
0
static struct vport *patch_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct patch_vport *patch_vport;
	const char *peer_name;
	struct patch_config *patchconf;
	struct net *net = ovs_dp_get_net(parms->dp);
	int err;

	vport = ovs_vport_alloc(sizeof(struct patch_vport),
				&ovs_patch_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	patch_vport = patch_vport_priv(vport);

	strcpy(patch_vport->name, parms->name);

	patchconf = kmalloc(sizeof(struct patch_config), GFP_KERNEL);
	if (!patchconf) {
		err = -ENOMEM;
		goto error_free_vport;
	}

	err = patch_set_config(vport, parms->options, patchconf);
	if (err)
		goto error_free_patchconf;

	random_ether_addr(patchconf->eth_addr);

	rcu_assign_pointer(patch_vport->patchconf, patchconf);

	peer_name = patchconf->peer_name;
	hlist_add_head(&patch_vport->hash_node, hash_bucket(net, peer_name));
	rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(net, peer_name));
	update_peers(net, patch_vport->name, vport);

	return vport;

error_free_patchconf:
	kfree(patchconf);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}