Exemplo n.º 1
0
static void veth_dellink(struct net_device *dev, struct list_head *head)
{
	struct veth_priv *priv;
	struct net_device *peer;

	priv = netdev_priv(dev);
	peer = priv->peer;

	unregister_netdevice_queue(dev, head);
	unregister_netdevice_queue(peer, head);
}
Exemplo n.º 2
0
static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
{
	int h;

	for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
		struct ip_tunnel *t;
		struct hlist_node *n;
		struct hlist_head *thead = &itn->tunnels[h];

		hlist_for_each_entry_safe(t, n, thead, hash_node)
			unregister_netdevice_queue(t->dev, head);
	}
	if (itn->fb_tunnel_dev)
		unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
Exemplo n.º 3
0
static void
vhost_dellink(struct net_device *dev, struct list_head *head)
#endif
{
    struct vhost_priv *vp;

#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
    unregister_netdevice_queue(dev, head);
#else
    unregister_netdevice(dev);
#endif

    vp = netdev_priv(dev);
    if (vp) {
        if (vp->vp_db_index >= 0)
            vhost_priv_db[vp->vp_db_index] = NULL;

        vp->vp_db_index = -1;

        if (vp->vp_phys_dev) {
            vhost_del_tap_phys(vp->vp_phys_dev);
            vp->vp_phys_dev = NULL;
        }

        vp->vp_phys_name[0] = '\0';
    }

    if (!vhost_num_interfaces)
        BUG();
    vhost_num_interfaces--;

    return;
}
Exemplo n.º 4
0
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
	struct net_device *real_dev = vlan->real_dev;
	struct vlan_info *vlan_info;
	struct vlan_group *grp;
	u16 vlan_id = vlan->vlan_id;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(real_dev->vlan_info);
	BUG_ON(!vlan_info);

	grp = &vlan_info->grp;

	if (vlan_id)
		vlan_vid_del(real_dev, vlan_id);

	grp->nr_vlan_devs--;

	if (vlan->flags & VLAN_FLAG_GVRP)
		vlan_gvrp_request_leave(dev);

	vlan_group_set_device(grp, vlan_id, NULL);
	unregister_netdevice_queue(dev, head);

	if (grp->nr_vlan_devs == 0)
		vlan_gvrp_uninit_applicant(real_dev);

	
	dev_put(real_dev);
}
Exemplo n.º 5
0
static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
{
	int h;
	struct ip6_tnl *t;
	LIST_HEAD(list);

	for (h = 0; h < HASH_SIZE; h++) {
		t = ip6n->tnls_r_l[h];
		while (t != NULL) {
			unregister_netdevice_queue(t->dev, &list);
			t = t->next;
		}
	}

	t = ip6n->tnls_wc[0];
	unregister_netdevice_queue(t->dev, &list);
	unregister_netdevice_many(&list);
}
Exemplo n.º 6
0
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
	struct net_device *port_dev;
	struct list_head *iter;

	netdev_for_each_lower_dev(dev, port_dev, iter)
		vrf_del_slave(dev, port_dev);

	unregister_netdevice_queue(dev, head);
}
Exemplo n.º 7
0
static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
			      struct rtnl_link_ops *ops)
{
	struct ip_tunnel *t;

	t = rtnl_dereference(itn->collect_md_tun);
	if (!t)
		return;
	unregister_netdevice_queue(t->dev, head);
}
Exemplo n.º 8
0
static void veth_dellink(struct net_device *dev, struct list_head *head)
{
	struct veth_priv *priv;
	struct net_device *peer;

	priv = netdev_priv(dev);
	peer = rtnl_dereference(priv->peer);

	/* Note : dellink() is called from default_device_exit_batch(),
	 * before a rcu_synchronize() point. The devices are guaranteed
	 * not being freed before one RCU grace period.
	 */
	RCU_INIT_POINTER(priv->peer, NULL);
	unregister_netdevice_queue(dev, head);

	if (peer) {
		priv = netdev_priv(peer);
		RCU_INIT_POINTER(priv->peer, NULL);
		unregister_netdevice_queue(peer, head);
	}
}
Exemplo n.º 9
0
static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
{
	struct ipoib_dev_priv *priv, *ppriv;

	priv = netdev_priv(dev);
	ppriv = netdev_priv(priv->parent);

	down_write(&ppriv->vlan_rwsem);
	unregister_netdevice_queue(dev, head);
	list_del(&priv->list);
	up_write(&ppriv->vlan_rwsem);
}
Exemplo n.º 10
0
static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
{
	struct ipoib_dev_priv *priv, *ppriv;

	priv = netdev_priv(dev);
	ppriv = netdev_priv(priv->parent);

	mutex_lock(&ppriv->vlan_mutex);
	unregister_netdevice_queue(dev, head);
	list_del(&priv->list);
	mutex_unlock(&ppriv->vlan_mutex);
}
Exemplo n.º 11
0
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct ip_tunnel_net *itn;

	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);

	if (itn->fb_tunnel_dev != dev) {
		ip_tunnel_del(itn, netdev_priv(dev));
		unregister_netdevice_queue(dev, head);
	}
}
Exemplo n.º 12
0
void rpl_ip_tunnel_dellink(struct net_device *dev)
#endif
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct ip_tunnel_net *itn;

	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);

	ip_tunnel_del(itn, netdev_priv(dev));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
	unregister_netdevice_queue(dev, head);
#endif
}
Exemplo n.º 13
0
static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
{
	struct rmnet_walk_data *d = data;
	u8 mux_id;

	mux_id = rmnet_vnd_get_mux(rmnet_dev);

	rmnet_vnd_dellink(mux_id, d->port);
	netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
	unregister_netdevice_queue(rmnet_dev, d->head);

	return 0;
}
Exemplo n.º 14
0
static int vif_delete(struct net *net, int vifi, int notify,
		      struct list_head *head)
{
	struct vif_device *v;
	struct net_device *dev;
	struct in_device *in_dev;

	if (vifi < 0 || vifi >= net->ipv4.maxvif)
		return -EADDRNOTAVAIL;

	v = &net->ipv4.vif_table[vifi];

	write_lock_bh(&mrt_lock);
	dev = v->dev;
	v->dev = NULL;

	if (!dev) {
		write_unlock_bh(&mrt_lock);
		return -EADDRNOTAVAIL;
	}

#ifdef CONFIG_IP_PIMSM
	if (vifi == net->ipv4.mroute_reg_vif_num)
		net->ipv4.mroute_reg_vif_num = -1;
#endif

	if (vifi+1 == net->ipv4.maxvif) {
		int tmp;
		for (tmp=vifi-1; tmp>=0; tmp--) {
			if (VIF_EXISTS(net, tmp))
				break;
		}
		net->ipv4.maxvif = tmp+1;
	}

	write_unlock_bh(&mrt_lock);

	dev_set_allmulti(dev, -1);

	if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
		IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
		ip_rt_multicast_event(in_dev);
	}

	if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
		unregister_netdevice_queue(dev, head);

	dev_put(dev);
	return 0;
}
Exemplo n.º 15
0
static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
			      struct rtnl_link_ops *ops)
{
	struct net *net = dev_net(itn->fb_tunnel_dev);
	struct net_device *dev, *aux;
	int h;

	for_each_netdev_safe(net, dev, aux)
		if (dev->rtnl_link_ops == ops)
			unregister_netdevice_queue(dev, head);

	for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
		struct ip_tunnel *t;
		struct hlist_node *n;
		struct hlist_head *thead = &itn->tunnels[h];

		hlist_for_each_entry_safe(t, n, thead, hash_node)
			/* If dev is in the same netns, it has already
			 * been added to the list by the previous loop.
			 */
			if (!net_eq(dev_net(t->dev), net))
				unregister_netdevice_queue(t->dev, head);
	}
}
Exemplo n.º 16
0
static int mif6_delete(struct net *net, int vifi, struct list_head *head)
{
	struct mif_device *v;
	struct net_device *dev;
	struct inet6_dev *in6_dev;
	if (vifi < 0 || vifi >= net->ipv6.maxvif)
		return -EADDRNOTAVAIL;

	v = &net->ipv6.vif6_table[vifi];

	write_lock_bh(&mrt_lock);
	dev = v->dev;
	v->dev = NULL;

	if (!dev) {
		write_unlock_bh(&mrt_lock);
		return -EADDRNOTAVAIL;
	}

#ifdef CONFIG_IPV6_PIMSM_V2
	if (vifi == net->ipv6.mroute_reg_vif_num)
		net->ipv6.mroute_reg_vif_num = -1;
#endif

	if (vifi + 1 == net->ipv6.maxvif) {
		int tmp;
		for (tmp = vifi - 1; tmp >= 0; tmp--) {
			if (MIF_EXISTS(net, tmp))
				break;
		}
		net->ipv6.maxvif = tmp + 1;
	}

	write_unlock_bh(&mrt_lock);

	dev_set_allmulti(dev, -1);

	in6_dev = __in6_dev_get(dev);
	if (in6_dev)
		in6_dev->cnf.mc_forwarding--;

	if (v->flags & MIFF_REGISTER)
		unregister_netdevice_queue(dev, head);

	dev_put(dev);
	return 0;
}
Exemplo n.º 17
0
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
	struct vlan_dev_info *vlan = vlan_dev_info(dev);
	struct net_device *real_dev = vlan->real_dev;
	const struct net_device_ops *ops = real_dev->netdev_ops;
	struct vlan_group *grp;
	u16 vlan_id = vlan->vlan_id;

	ASSERT_RTNL();

	grp = rtnl_dereference(real_dev->vlgrp);
	BUG_ON(!grp);

	/* Take it out of our own structures, but be sure to interlock with
	 * HW accelerating devices or SW vlan input packet processing if
	 * VLAN is not 0 (leave it there for 802.1p).
	 */
	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);

	grp->nr_vlans--;

	if (vlan->flags & VLAN_FLAG_GVRP)
		vlan_gvrp_request_leave(dev);

	vlan_group_set_device(grp, vlan_id, NULL);
	/* Because unregister_netdevice_queue() makes sure at least one rcu
	 * grace period is respected before device freeing,
	 * we dont need to call synchronize_net() here.
	 */
	unregister_netdevice_queue(dev, head);

	/* If the group is now empty, kill off the group. */
	if (grp->nr_vlans == 0) {
		vlan_gvrp_uninit_applicant(real_dev);

		rcu_assign_pointer(real_dev->vlgrp, NULL);
		if (ops->ndo_vlan_rx_register)
			ops->ndo_vlan_rx_register(real_dev, NULL);

		/* Free the group, after all cpu's are done. */
		call_rcu(&grp->rcu, vlan_rcu_free);
	}

	/* Get rid of the vlan's reference to real_dev */
	dev_put(real_dev);
}
Exemplo n.º 18
0
static void ipoib_unregister_child_dev(struct net_device *dev)
#endif
{
	struct ipoib_dev_priv *priv, *ppriv;

	priv = netdev_priv(dev);
	ppriv = netdev_priv(priv->parent);

	mutex_lock(&ppriv->vlan_mutex);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
	unregister_netdevice_queue(dev, head);
#else
	unregister_netdevice(dev);
#endif
	list_del(&priv->list);
	mutex_unlock(&ppriv->vlan_mutex);
}
Exemplo n.º 19
0
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
	struct net_device *real_dev = vlan->real_dev;
	struct vlan_info *vlan_info;
	struct vlan_group *grp;
	u16 vlan_id = vlan->vlan_id;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(real_dev->vlan_info);
	BUG_ON(!vlan_info);

	grp = &vlan_info->grp;

	grp->nr_vlan_devs--;

	if (vlan->flags & VLAN_FLAG_MVRP)
		vlan_mvrp_request_leave(dev);
	if (vlan->flags & VLAN_FLAG_GVRP)
		vlan_gvrp_request_leave(dev);

	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);

	netdev_upper_dev_unlink(real_dev, dev);
	/* Because unregister_netdevice_queue() makes sure at least one rcu
	 * grace period is respected before device freeing,
	 * we dont need to call synchronize_net() here.
	 */
	unregister_netdevice_queue(dev, head);

	if (grp->nr_vlan_devs == 0) {
		vlan_mvrp_uninit_applicant(real_dev);
		vlan_gvrp_uninit_applicant(real_dev);
	}

	/* Take it out of our own structures, but be sure to interlock with
	 * HW accelerating devices or SW vlan input packet processing if
	 * VLAN is not 0 (leave it there for 802.1p).
	 */
	if (vlan_id)
		vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);

	/* Get rid of the vlan's reference to real_dev */
	dev_put(real_dev);
}
Exemplo n.º 20
0
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
	struct net_device *real_dev;
	struct rmnet_port *port;
	u8 mux_id;

	rcu_read_lock();
	real_dev = netdev_master_upper_dev_get_rcu(dev);
	rcu_read_unlock();

	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
		return;

	port = rmnet_get_port_rtnl(real_dev);

	mux_id = rmnet_vnd_get_mux(dev);
	rmnet_vnd_dellink(mux_id, port);
	netdev_upper_dev_unlink(dev, real_dev);
	rmnet_unregister_real_device(real_dev, port);

	unregister_netdevice_queue(dev, head);
}
Exemplo n.º 21
0
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
	unregister_netdevice_queue(dev, head);
}