/*********************************************************** * mv_eth_stop -- * * stop interface with linux core. stop port activity. * * free skb's from rings. * ***********************************************************/ int mv_eth_stop( struct net_device *dev ) { unsigned long flags; mv_eth_priv *priv = MV_ETH_PRIV(dev); /* first make sure that the port finished its Rx polling - see tg3 */ /* otherwise it may cause issue in SMP, one CPU is here and the other is doing the polling and both of it are messing with the descriptors rings!! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) netif_poll_disable(dev); #else napi_disable(&priv->napi); #endif spin_lock_irqsave(priv->lock, flags); /* stop upper layer */ netif_carrier_off(dev); netif_stop_queue(dev); /* stop tx/rx activity, mask all interrupts, relese skb in rings,*/ mv_eth_stop_internals(priv); spin_unlock_irqrestore(priv->lock, flags); if( dev->irq != 0 ) { free_irq(dev->irq, priv); } printk(KERN_NOTICE "%s: stopped\n", dev->name); return 0; }
void vnic_stop_fastpath(netfront_accel_vnic *vnic) { struct net_device *net_dev = vnic->net_dev; struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev); unsigned long flags1, flags2; DPRINTK("%s\n", __FUNCTION__); vnic_stop_interrupts(vnic); spin_lock_irqsave(&vnic->tx_lock, flags1); vnic->tx_enabled = 0; spin_lock_irqsave(&np->tx_lock, flags2); if (vnic->tx_skb != NULL) { dev_kfree_skb_any(vnic->tx_skb); vnic->tx_skb = NULL; if (netfront_check_queue_ready(net_dev)) { netif_wake_queue(net_dev); NETFRONT_ACCEL_STATS_OP (vnic->stats.queue_wakes++); } } spin_unlock_irqrestore(&np->tx_lock, flags2); spin_unlock_irqrestore(&vnic->tx_lock, flags1); /* Must prevent polls and hold lock to modify poll_enabled */ netif_poll_disable(net_dev); spin_lock_irqsave(&vnic->irq_enabled_lock, flags1); vnic->poll_enabled = 0; spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1); netif_poll_enable(net_dev); }
static int vb_net_down(struct net_device *netdev) { struct voicebus *vb = voicebus_from_netdev(netdev); dev_dbg(&vb->pdev->dev, "%s\n", __func__); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) netif_poll_disable(netdev); #else napi_disable(&vb->napi); #endif return 0; }
static void vnic_start_fastpath(netfront_accel_vnic *vnic) { struct net_device *net_dev = vnic->net_dev; unsigned long flags; DPRINTK("%s\n", __FUNCTION__); spin_lock_irqsave(&vnic->tx_lock, flags); vnic->tx_enabled = 1; spin_unlock_irqrestore(&vnic->tx_lock, flags); netif_poll_disable(net_dev); vnic->poll_enabled = 1; netif_poll_enable(net_dev); vnic_start_interrupts(vnic); }
static void hxge_down(struct hxge_adapter *adapter) { struct net_device *netdev = adapter->netdev; clear_bit(HXGE_DEVICE_UP, &adapter->state); hxge_link_monitor(adapter, LINK_MONITOR_STOP); netif_carrier_off(netdev); /* avoids Tx timeouts */ netif_stop_queue(netdev); #ifdef CONFIG_HXGE_NAPI netif_poll_disable(netdev); #endif hxge_irq_disable(adapter); /* Reset the adapter */ hxge_disable_adapter(adapter); }
int mv_gtw_stop( struct net_device *dev ) { mv_eth_priv *priv = MV_ETH_PRIV(dev); struct mv_vlan_cfg *vlan_cfg = MV_NETDEV_VLAN(dev); printk("mv_gateway: stopping %s\n",dev->name); /* stop upper layer */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) netif_poll_disable(dev); #endif netif_carrier_off(dev); netif_stop_queue(dev); /* stop switch from forwarding packets from this VLAN toward CPU port */ if( gfdbFlushInDB(qd_dev, GT_FLUSH_ALL, MV_GTW_VLAN_TO_GROUP(vlan_cfg->vlan_grp_id)) != GT_OK) { printk("gfdbFlushInDB failed\n"); } if(priv->net_dev == dev) { struct net_device *main_dev = mv_gtw_main_net_dev_get(); if(main_dev == NULL) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) napi_disable(&priv->napi); #endif mv_eth_mask_interrupts(priv); priv->flags &= ~MV_ETH_F_TIMER; del_timer(&priv->timer); free_irq( dev->irq, priv ); } else { priv->net_dev = main_dev; } } return 0; }