static void usbnet_terminate_urbs(struct usbnet *dev) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); int temp; /* ensure there are no more active urbs */ add_wait_queue(&unlink_wakeup, &wait); set_current_state(TASK_UNINTERRUPTIBLE); dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) { schedule_timeout(UNLINK_TIMEOUT_MS); set_current_state(TASK_UNINTERRUPTIBLE); if (netif_msg_ifdown(dev)) devdbg(dev, "waited for %d urb completions", temp); } set_current_state(TASK_RUNNING); dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); }
static void intr_complete(struct urb *urb) #endif { struct usbnet *dev = urb->context; int status = urb->status; switch (status) { /* success */ case 0: dev->driver_info->status(dev, urb); break; /* software-driven interface shutdown */ case -ENOENT: /* urb killed */ case -ESHUTDOWN: /* hardware gone */ if (netif_msg_ifdown(dev)) devdbg(dev, "intr shutdown, code %d", status); return; /* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently */ default: devdbg(dev, "intr status %d", status); break; } if (!netif_running(dev->net)) return; memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb(urb, GFP_ATOMIC); if (status != 0 && netif_msg_timer(dev)) deverr(dev, "intr resubmit --> %d", status); }
static int lpc_eth_close(struct net_device *ndev) { unsigned long flags; struct netdata_local *pldat = netdev_priv(ndev); if (netif_msg_ifdown(pldat)) dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name); napi_disable(&pldat->napi); netif_stop_queue(ndev); if (pldat->phy_dev) phy_stop(pldat->phy_dev); spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); netif_carrier_off(ndev); writel(0, LPC_ENET_MAC1(pldat->net_base)); writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags); __lpc_eth_clock_enable(pldat, false); return 0; }
int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; int retval; netif_stop_queue (net); if (netif_msg_ifdown (dev)) devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors ); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { retval = info->stop(dev); if (retval < 0 && netif_msg_ifdown(dev)) devinfo(dev, "stop fail (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) usbnet_terminate_urbs(dev); usb_kill_urb(dev->interrupt); usbnet_purge_paused_rxq(dev); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); if (info->manage_power) info->manage_power(dev, 0); else usb_autopm_put_interface(dev->intf); return 0; }
static int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); int temp; DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup); DECLARE_WAITQUEUE (wait, current); netif_stop_queue (net); if (netif_msg_ifdown (dev)) devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", dev->stats.rx_packets, dev->stats.tx_packets, dev->stats.rx_errors, dev->stats.tx_errors ); // ensure there are no more active urbs add_wait_queue (&unlink_wakeup, &wait); dev->wait = &unlink_wakeup; temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); // maybe wait for deletions to finish. while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) { msleep(UNLINK_TIMEOUT_MS); if (netif_msg_ifdown (dev)) devdbg (dev, "waited for %d urb completions", temp); } dev->wait = NULL; remove_wait_queue (&unlink_wakeup, &wait); usb_kill_urb(dev->interrupt); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); return 0; }
static INT bcm_close(struct net_device *dev) { struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(dev); if (netif_msg_ifdown(Adapter)) pr_info(PFX "%s: disabling interface\n", dev->name); netif_carrier_off(dev); netif_stop_queue(dev); return 0; }
static int cyrf6936_net_stop(struct net_device *dev) { struct cyrf6936_net *p = netdev_priv(dev); if (netif_msg_ifdown(p)) dev_dbg(&p->netdev->dev, "closing network device\n"); mutex_lock(&p->lock); netif_stop_queue(dev); if (p->pollmode) del_timer_sync(&p->poll_timer); flush_scheduled_work(); if (p->tx_skb) dev_kfree_skb(p->tx_skb); mutex_unlock(&p->lock); return 0; }
/* Stop the interface. * The interface is stopped when it is brought. */ static int emac_stop(struct net_device *ndev) { struct emac_board_info *db = netdev_priv(ndev); if (netif_msg_ifdown(db)) dev_dbg(db->dev, "shutting down %s\n", ndev->name); netif_stop_queue(ndev); netif_carrier_off(ndev); phy_stop(ndev->phydev); emac_mdio_remove(ndev); emac_shutdown(ndev); free_irq(ndev->irq, ndev); return 0; }
static int c2_down(struct net_device *netdev) { struct c2_port *c2_port = netdev_priv(netdev); struct c2_dev *c2dev = c2_port->c2dev; if (netif_msg_ifdown(c2_port)) pr_debug("%s: disabling interface\n", netdev->name); c2_tx_interrupt(netdev); netif_stop_queue(netdev); writel(1, c2dev->regs + C2_IDIS); writel(0, c2dev->regs + C2_NIMR0); c2_reset(c2_port); c2_tx_clean(c2_port); c2_rx_clean(c2_port); kfree(c2_port->rx_ring.start); kfree(c2_port->tx_ring.start); pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, c2_port->dma); return 0; }
static int c2_down(struct net_device *netdev) { struct c2_port *c2_port = netdev_priv(netdev); struct c2_dev *c2dev = c2_port->c2dev; if (netif_msg_ifdown(c2_port)) pr_debug("%s: disabling interface\n", netdev->name); /* Wait for all the queued packets to get sent */ c2_tx_interrupt(netdev); /* Disable network packets */ netif_stop_queue(netdev); /* Disable IRQs by clearing the interrupt mask */ writel(1, c2dev->regs + C2_IDIS); writel(0, c2dev->regs + C2_NIMR0); /* missing: Stop transmitter */ /* missing: Stop receiver */ /* Reset the adapter, ensures the driver is in sync with the RXP */ c2_reset(c2_port); /* missing: Turn off LEDs here */ /* Free all buffers in the host descriptor rings */ c2_tx_clean(c2_port); c2_rx_clean(c2_port); /* Free the host descriptor rings */ kfree(c2_port->rx_ring.start); kfree(c2_port->tx_ring.start); pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, c2_port->dma); return 0; }
static int cp_close (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; if (netif_msg_ifdown(cp)) printk(KERN_DEBUG "%s: disabling interface\n", dev->name); spin_lock_irqsave(&cp->lock, flags); netif_stop_queue(dev); netif_carrier_off(dev); cp_stop_hw(cp); spin_unlock_irqrestore(&cp->lock, flags); synchronize_irq(dev->irq); free_irq(dev->irq, dev); cp_free_rings(cp); return 0; }
/*------------------------------------------------------------ 函数原型: void intr_completion(struct urb * purb) 描述: intr_completion是read_interrupt_ndis_msg的完成例程, 输入: 提交的urb指针 输出: 有效的命令通告 返回值: 无 -------------------------------------------------------------*/ static void intr_completion(struct urb * purb) { int status = 0; /* <DTS2013011408161 move urb request to ndistty z00185914 20130125 BEGIN */ int urbstatus = purb->status; struct usb_ctrlrequest * ctlreq; static int i = 0; if (-ENOENT == urbstatus || -ESHUTDOWN == urbstatus) { if (netif_msg_ifdown (hwcdc_net)){ printk("[%s][%d] intr shutdown, code %d\r\n",__FILE__,__LINE__,urbstatus); } return; } /* DTS2013011408161 move urb request to ndistty z00185914 20130125 END> */ if (purb->actual_length == 0) { if (i++ < 10) { printk("%s -- i = %d\n", __FUNCTION__, i); } goto readtimer;/* lint !e801 */ } else { ctlreq = (struct usb_ctrlrequest *)purb->transfer_buffer; //printk("Request = 0x%02x, Request type = 0x%02x\n", ctlreq->bRequest, ctlreq->bRequestType); if (0xa1 == ctlreq->bRequestType) { switch (ctlreq->bRequest) { case CDC_NOTIFICATION_SERIAL_STATE: { VIEW_TIMER("CDC_NOTIFICATION_SERIAL_STATE"); break; } case CDC_NOTIFICATION_NETWORK_CONNECTION: { VIEW_TIMER("CDC_NOTIFICATION_NETWORK_CONNECTION"); break; } case CDC_NOTIFICATION_CONNECTION_SPD_CHG: { VIEW_TIMER("CDC_NOTIFICATION_CONNECTION_SPD_CHG"); break; } case CDC_NOTIFICATION_RESPONSE_AVAILABLE: { VIEW_TIMER("CDC available message"); if(connect_stat) { VIEW_TIMER("CDC_NOTIFICATION_RESPONSE_AVAILABLE: handle_response_available\n"); // handle_response_available();/* DTS2010112205139.NDISTTY: mayang 2010-11-20 Mod */ schedule_work((struct work_struct*)purb->context); } break; } default: { VIEW_TIMER("CDC, default case."); break; } } } } /* re-submit intr urb */ if(connect_stat) { memset(purb->transfer_buffer, 0, purb->transfer_buffer_length); status = usb_submit_urb (purb, GFP_ATOMIC); /* <DTS2013021703454 优化ndis拨号代码 z00185914 20130217 DELETE 1 ROW */ if (status != 0) { printk("%s - resubmit intr urb failed, status=%d\n", __FUNCTION__, status); } } readtimer: VIEW_TIMER("CDC message end"); }
static int axusbnet_stop(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; int temp; int retval; #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); #else DECLARE_WAIT_QUEUE_HEAD(unlink_wakeup); #endif DECLARE_WAITQUEUE(wait, current); netif_stop_queue(net); if (netif_msg_ifdown(dev)) devinfo(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", dev->stats.rx_packets, dev->stats.tx_packets, dev->stats.rx_errors, dev->stats.tx_errors); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { retval = info->stop(dev); if (retval < 0 && netif_msg_ifdown(dev)) devinfo(dev, "stop fail (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) { /* ensure there are no more active urbs */ add_wait_queue(&unlink_wakeup, &wait); dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) { msleep(UNLINK_TIMEOUT_MS); if (netif_msg_ifdown(dev)) devdbg(dev, "waited for %d urb completions", temp); } dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); } usb_kill_urb(dev->interrupt); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ dev->flags = 0; del_timer_sync(&dev->delay); tasklet_kill(&dev->bh); return 0; }
static void rx_complete(struct urb *urb) #endif { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; skb_put(skb, urb->actual_length); entry->state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { entry->state = rx_cleanup; dev->stats.rx_errors++; dev->stats.rx_length_errors++; if (netif_msg_rx_err(dev)) devdbg(dev, "rx length %d", skb->len); } break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->stats.rx_errors++; axusbnet_defer_kevent(dev, EVENT_RX_HALT); /* FALLTHROUGH */ /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ if (netif_msg_ifdown(dev)) devdbg(dev, "rx shutdown, code %d", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->stats.rx_errors++; if (!timer_pending(&dev->delay)) { mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link(dev)) devdbg(dev, "rx throttle %d", urb_status); } block: entry->state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->stats.rx_over_errors++; /* FALLTHROUGH */ default: entry->state = rx_cleanup; dev->stats.rx_errors++; if (netif_msg_rx_err(dev)) devdbg(dev, "rx status %d", urb_status); break; } defer_bh(dev, skb, &dev->rxq); if (urb) { if (netif_running(dev->net) && !test_bit(EVENT_RX_HALT, &dev->flags)) { rx_submit(dev, urb, GFP_ATOMIC); return; } usb_free_urb(urb); } if (netif_msg_rx_err(dev)) devdbg(dev, "no read resubmitted"); }
static void rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; struct driver_info *info = dev->driver_info; u8 align; #if (AX_FORCE_BUFF_ALIGN) align = 0; #else if (!(info->flags & FLAG_HW_IP_ALIGNMENT)) align = NET_IP_ALIGN; else align = 0; #endif skb = alloc_skb(size + align, flags); if (skb == NULL) { if (netif_msg_rx_err(dev)) devdbg(dev, "no rx skb"); if ((dev->rx_urb_size > 2048) && dev->rx_size) { dev->rx_size--; dev->rx_urb_size = AX88772B_BULKIN_SIZE[dev->rx_size].size; ax8817x_write_cmd_async(dev, 0x2A, AX88772B_BULKIN_SIZE[dev->rx_size].byte_cnt, AX88772B_BULKIN_SIZE[dev->rx_size].threshold, 0, NULL); } if (!(dev->flags & EVENT_RX_MEMORY)) axusbnet_defer_kevent(dev, EVENT_RX_MEMORY); usb_free_urb(urb); return; } if (align) skb_reserve(skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb(urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave(&dev->rxq.lock, lockflags); if (netif_running(dev->net) && netif_device_present(dev->net) && !test_bit(EVENT_RX_HALT, &dev->flags)) { switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) { case -EPIPE: axusbnet_defer_kevent(dev, EVENT_RX_HALT); break; case -ENOMEM: axusbnet_defer_kevent(dev, EVENT_RX_MEMORY); break; case -ENODEV: if (netif_msg_ifdown(dev)) devdbg(dev, "device gone"); netif_device_detach(dev->net); break; default: if (netif_msg_rx_err(dev)) devdbg(dev, "rx submit, %d", retval); tasklet_schedule(&dev->bh); break; case 0: __skb_queue_tail(&dev->rxq, skb); } } else { if (netif_msg_ifdown(dev)) devdbg(dev, "rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore(&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any(skb); usb_free_urb(urb); } }
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { if (netif_msg_rx_err (dev)) devdbg (dev, "no rx skb"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return; } skb_reserve (skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){ case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: if (netif_msg_ifdown (dev)) devdbg (dev, "device gone"); netif_device_detach (dev->net); break; default: if (netif_msg_rx_err (dev)) devdbg (dev, "rx submit, %d", retval); tasklet_schedule (&dev->bh); break; case 0: __skb_queue_tail (&dev->rxq, skb); } } else { if (netif_msg_ifdown (dev)) devdbg (dev, "rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } }
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; #if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)) if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) { #else if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { #endif if (netif_msg_rx_err (dev)) devdbg (dev, "no rx skb"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return; } #if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)) skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN); #else skb_reserve (skb, NET_IP_ALIGN); #endif entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){ case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: if (netif_msg_ifdown (dev)) devdbg (dev, "device gone"); netif_device_detach (dev->net); break; default: if (netif_msg_rx_err (dev)) devdbg (dev, "rx submit, %d", retval); tasklet_schedule (&dev->bh); break; case 0: __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { if (netif_msg_ifdown (dev)) devdbg (dev, "rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } } /*-------------------------------------------------------------------------*/ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) { if (dev->driver_info->rx_fixup && !dev->driver_info->rx_fixup (dev, skb)) goto error; // else network stack removes extra byte if we forced a short packet if (skb->len) usbnet_skb_return (dev, skb); else { if (netif_msg_rx_err (dev)) devdbg (dev, "drop"); error: dev->stats.rx_errors++; skb_queue_tail (&dev->done, skb); } } /*-------------------------------------------------------------------------*/ static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; switch (urb_status) { // success case 0: if (skb->len < dev->net->hard_header_len) { state = rx_cleanup; dev->stats.rx_errors++; dev->stats.rx_length_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx length %d", skb->len); } break; // stalls need manual reset. this is rare ... except that // when going through USB 2.0 TTs, unplug appears this way. // we avoid the highspeed version of the ETIMEOUT/EILSEQ // storm, recovering as needed. case -EPIPE: dev->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH // software-driven interface shutdown case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone if (netif_msg_ifdown (dev)) devdbg (dev, "rx shutdown, code %d", urb_status); goto block; // we get controller i/o faults during khubd disconnect() delays. // throttle down resubmits, to avoid log floods; just temporarily, // so we still recover when the fault isn't a khubd delay. case -EPROTO: case -ETIME: case -EILSEQ: dev->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) devdbg (dev, "rx throttle %d", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; // data overrun ... flush fifo? case -EOVERFLOW: dev->stats.rx_over_errors++; // FALLTHROUGH default: state = rx_cleanup; dev->stats.rx_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx status %d", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); return; } usb_free_urb (urb); } if (netif_msg_rx_err (dev)) devdbg (dev, "no read resubmitted"); } static void intr_complete (struct urb *urb) { struct usbnet *dev = urb->context; int status = urb->status; switch (status) { /* success */ case 0: dev->driver_info->status(dev, urb); break; /* software-driven interface shutdown */ case -ENOENT: // urb killed case -ESHUTDOWN: // hardware gone if (netif_msg_ifdown (dev)) devdbg (dev, "intr shutdown, code %d", status); return; /* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently */ default: devdbg (dev, "intr status %d", status); break; } memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0 && netif_msg_timer (dev)) deverr(dev, "intr resubmit --> %d", status); } /*-------------------------------------------------------------------------*/ // unlink pending rx/tx; completion handlers do all other cleanup static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) { unsigned long flags; struct sk_buff *skb; int count = 0; spin_lock_irqsave (&q->lock, flags); while (!skb_queue_empty(q)) { struct skb_data *entry; struct urb *urb; int retval; skb_queue_walk(q, skb) { entry = (struct skb_data *) skb->cb; if (entry->state != unlink_start) goto found; } break; found: entry->state = unlink_start; urb = entry->urb; /* * Get reference count of the URB to avoid it to be * freed during usb_unlink_urb, which may trigger * use-after-free problem inside usb_unlink_urb since * usb_unlink_urb is always racing with .complete * handler(include defer_bh). */ usb_get_urb(urb); spin_unlock_irqrestore(&q->lock, flags); // during some PM-driven resume scenarios, // these (async) unlinks complete immediately retval = usb_unlink_urb (urb); if (retval != -EINPROGRESS && retval != 0) devdbg (dev, "unlink urb err, %d", retval); else count++; usb_put_urb(urb); spin_lock_irqsave(&q->lock, flags); }