static int hxge_lif_up(struct hxge_adapter *hxgep) { struct net_device *netdev = hxgep->netdev; /* Start link monitoring */ hxgep->prev_link_status = -1; hxge_link_monitor(hxgep, LINK_MONITOR_START); #ifdef CONFIG_HXGE_NAPI netif_poll_enable(netdev); #endif if (hxge_enable_adapter(hxgep)) return -1; hxge_irq_enable(hxgep); /* Enable Linux network stack */ netif_carrier_on(netdev); netif_start_queue(netdev); set_bit(HXGE_DEVICE_UP, &hxgep->state); return 0; }
void vnic_stop_fastpath(netfront_accel_vnic *vnic) { struct net_device *net_dev = vnic->net_dev; struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev); unsigned long flags1, flags2; DPRINTK("%s\n", __FUNCTION__); vnic_stop_interrupts(vnic); spin_lock_irqsave(&vnic->tx_lock, flags1); vnic->tx_enabled = 0; spin_lock_irqsave(&np->tx_lock, flags2); if (vnic->tx_skb != NULL) { dev_kfree_skb_any(vnic->tx_skb); vnic->tx_skb = NULL; if (netfront_check_queue_ready(net_dev)) { netif_wake_queue(net_dev); NETFRONT_ACCEL_STATS_OP (vnic->stats.queue_wakes++); } } spin_unlock_irqrestore(&np->tx_lock, flags2); spin_unlock_irqrestore(&vnic->tx_lock, flags1); /* Must prevent polls and hold lock to modify poll_enabled */ netif_poll_disable(net_dev); spin_lock_irqsave(&vnic->irq_enabled_lock, flags1); vnic->poll_enabled = 0; spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1); netif_poll_enable(net_dev); }
static int vb_net_up(struct net_device *netdev) { struct voicebus *vb = voicebus_from_netdev(netdev); dev_dbg(&vb->pdev->dev, "%s\n", __func__); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) netif_poll_enable(netdev); #else napi_enable(&vb->napi); #endif return 0; }
int mv_gtw_start( struct net_device *dev ) { mv_eth_priv *priv = MV_ETH_PRIV(dev); struct mv_vlan_cfg *vlan_cfg = MV_NETDEV_VLAN(dev); unsigned char broadcast[6] = {0xff,0xff,0xff,0xff,0xff,0xff}; printk("mv_gateway: starting %s\n",dev->name); /* start upper layer */ netif_carrier_on(dev); netif_wake_queue(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) netif_poll_enable(dev); #else if ( (priv->net_dev == dev) || !netif_running(priv->net_dev) ) { napi_enable(&priv->napi); } #endif /* Add our MAC addr to the VLAN DB at switch level to forward packets with this DA */ /* to CPU port by using the tunneling feature. The device is always in promisc mode. */ mv_gtw_set_mac_addr_to_switch(dev->dev_addr, MV_GTW_VLAN_TO_GROUP(vlan_cfg->vlan_grp_id), (1<<SWITCH_PORT_CPU), 1); /* We also need to allow L2 broadcasts comming up for this interface */ mv_gtw_set_mac_addr_to_switch(broadcast, MV_GTW_VLAN_TO_GROUP(vlan_cfg->vlan_grp_id), vlan_cfg->ports_mask|(1<<SWITCH_PORT_CPU), 1); if (!(priv->flags & MV_ETH_F_TIMER)) { priv->timer.expires = jiffies + ((HZ*CONFIG_MV_ETH_TIMER_PERIOD)/1000); /*ms*/ add_timer( &(priv->timer) ); priv->flags |= MV_ETH_F_TIMER; } if ( (priv->net_dev == dev) || !netif_running(priv->net_dev) ) { priv->net_dev = dev; /* connect to MAC port interrupt line */ if ( request_irq( ETH_PORT_IRQ_NUM(priv->port), mv_eth_interrupt_handler, (IRQF_DISABLED | IRQF_SAMPLE_RANDOM), "mv_gateway", priv) ) { printk(KERN_ERR "failed to assign irq%d\n", ETH_PORT_IRQ_NUM(priv->port)); } /* unmask interrupts */ mv_eth_unmask_interrupts(priv); } return 0; }
static void vnic_start_fastpath(netfront_accel_vnic *vnic) { struct net_device *net_dev = vnic->net_dev; unsigned long flags; DPRINTK("%s\n", __FUNCTION__); spin_lock_irqsave(&vnic->tx_lock, flags); vnic->tx_enabled = 1; spin_unlock_irqrestore(&vnic->tx_lock, flags); netif_poll_disable(net_dev); vnic->poll_enabled = 1; netif_poll_enable(net_dev); vnic_start_interrupts(vnic); }
/*********************************************************** * mv_eth_start -- * * start a network device. connect and enable interrupts * * set hw defaults. fill rx buffers. restart phy link * * auto neg. set device link flags. report status. * ***********************************************************/ int mv_eth_start(struct net_device *dev) { mv_eth_priv *priv = MV_ETH_PRIV(dev); int err; ETH_DBG( ETH_DBG_LOAD, ("%s: starting... ", dev->name ) ); /* in default link is down */ netif_carrier_off(dev); /* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */ netif_stop_queue(dev); /* enable polling on the port, must be used after netif_poll_disable */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) netif_poll_enable(dev); #else napi_enable(&priv->napi); #endif /* fill rx buffers, start rx/tx activity, set coalescing */ if (mv_eth_start_internals( priv, dev->mtu) != 0) { printk(KERN_ERR "%s: start internals failed\n", dev->name); goto error; } if (priv->flags & MV_ETH_F_FORCED_LINK) { netif_carrier_on(dev); netif_wake_queue(dev); } else { #ifdef CONFIG_MV_ETH_TOOL if ((err = mv_eth_tool_restore_settings(dev)) != 0) { printk(KERN_ERR "%s: mv_eth_tool_restore_settings failed %d\n", dev->name, err); goto error; } if (priv->autoneg_cfg == AUTONEG_DISABLE) { if (priv->flags & MV_ETH_F_LINK_UP) { netif_carrier_on(dev); netif_wake_queue(dev); } } #else mv_eth_restart_autoneg(priv->port); #endif /* CONFIG_MV_ETH_TOOL */ } if (!(priv->flags & MV_ETH_F_TIMER)) { priv->timer.expires = jiffies + ((HZ*CONFIG_MV_ETH_TIMER_PERIOD) / 1000); /* ms */ add_timer(&priv->timer); priv->flags |= MV_ETH_F_TIMER; } /* connect to port interrupt line */ if (request_irq(dev->irq, mv_eth_interrupt_handler, (IRQF_DISABLED | IRQF_SAMPLE_RANDOM), "mv_ethernet", priv)) { printk( KERN_ERR "cannot assign irq%d to %s port%d\n", dev->irq, dev->name, priv->port ); dev->irq = 0; goto error; } mv_eth_unmask_interrupts(priv); ETH_DBG( ETH_DBG_LOAD, ("%s: start ok\n", dev->name) ); printk(KERN_NOTICE "%s: started\n", dev->name); return 0; error: printk( KERN_ERR "%s: start failed\n", dev->name ); return -1; }