static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; int req_cnt = 0; spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { if (++req_cnt > qlen(dev->gadget)) break; req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { spin_lock_irqsave(&dev->req_lock, flags); list_add(&req->list, &dev->rx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static int usbpn_open(struct net_device *dev) { struct usbpn_dev *pnd = netdev_priv(dev); int err; unsigned i; unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber; err = usb_set_interface(pnd->usb, num, pnd->active_setting); if (err) return err; for (i = 0; i < rxq_size; i++) { struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } pnd->urbs[i] = req; } netif_wake_queue(dev); return 0; }
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; int req_cnt = 0; /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { /* break the nexus of continuous completion and re-submission*/ if (++req_cnt > qlen(dev->gadget)) break; req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { spin_lock_irqsave(&dev->req_lock, flags); list_add(&req->list, &dev->rx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static void usbnet_bh (unsigned long param) { struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; while ((skb = skb_dequeue (&dev->done))) { entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: entry->state = rx_cleanup; rx_process (dev, skb); continue; case tx_done: case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); continue; default: netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); } } // waiting for all pending urbs to complete? if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { wake_up (dev->wait); } // or are we maybe short a few urbs? } else if (netif_running (dev->net) && netif_device_present (dev->net) && !timer_pending (&dev->delay) && !test_bit (EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; int qlen = RX_QLEN (dev); if (temp < qlen) { struct urb *urb; int i; // don't refill the queue all at once for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { urb = usb_alloc_urb (0, GFP_ATOMIC); if (urb != NULL) rx_submit (dev, urb, GFP_ATOMIC); } if (temp != dev->rxq.qlen) netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n", temp, dev->rxq.qlen); if (dev->rxq.qlen < qlen) tasklet_schedule (&dev->bh); } if (dev->txq.qlen < TX_QLEN (dev)) netif_wake_queue (dev->net); } }
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { req = list_first_entry(&dev->rx_reqs, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct pci_request *req; unsigned long flags; // printk("%s:%d\n", __func__, __LINE__); /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { req = container_of(dev->rx_reqs.next, struct pci_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static int usbsvn_start(struct usbsvn *svn) { struct net_device *dev = svn->netdev; int dev_id; int i; for (dev_id = 0; dev_id < svn->dev_count; dev_id++) { for (i = 0; i < rxq_size; i++) { int index = dev_id * rxq_size + i; struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(svn, dev_id, req, GFP_KERNEL)) { usbsvn_stop(svn); return -ENOMEM; } svn->urbs[index] = req; } } netif_wake_queue(dev); return 0; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); } break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; // FALLTHROUGH default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_lowmem; rx_submit (dev, urb, GFP_KERNEL); usb_autopm_put_interface(dev->intf); fail_lowmem: tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; case -ECONNRESET: case -ESHUTDOWN: VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; case -ECONNABORTED: DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void kevent(void *data) { struct usbnet *dev = (struct usbnet *)data; #else static void kevent(struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); #endif int status; /* usb_clear_halt() needs a thread context */ if (test_bit(EVENT_TX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->txq); status = usb_clear_halt(dev->udev, dev->out); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err(dev)) deverr(dev, "can't clear tx halt, status %d", status); } else { clear_bit(EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue(dev->net); } } if (test_bit(EVENT_RX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->rxq); status = usb_clear_halt(dev->udev, dev->in); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err(dev)) deverr(dev, "can't clear rx halt, status %d", status); } else { clear_bit(EVENT_RX_HALT, &dev->flags); tasklet_schedule(&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit(EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; if (netif_running(dev->net)) urb = usb_alloc_urb(0, GFP_KERNEL); else clear_bit(EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit(EVENT_RX_MEMORY, &dev->flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_KERNEL); tasklet_schedule(&dev->bh); } } if (test_bit(EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit(EVENT_LINK_RESET, &dev->flags); if (info->link_reset) { retval = info->link_reset(dev); if (retval < 0) { devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } } } if (dev->flags) devdbg(dev, "kevent done, flags = 0x%lx", dev->flags); } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) static void tx_complete(struct urb *urb, struct pt_regs *regs) #else static void tx_complete(struct urb *urb) #endif { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += entry->length; } else { dev->stats.tx_errors++; switch (urb->status) { case -EPIPE: axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ break; /* like rx, tx gets controller i/o faults during khubd delays */ /* and so it uses the same throttling mechanism. */ case -EPROTO: case -ETIME: case -EILSEQ: if (!timer_pending(&dev->delay)) { mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link(dev)) devdbg(dev, "tx throttle %d", urb->status); } netif_stop_queue(dev->net); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx err %d", entry->urb->status); break; } } urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); } /*-------------------------------------------------------------------------*/ static void axusbnet_tx_timeout(struct net_device *net) { struct usbnet *dev = netdev_priv(net); unlink_urbs(dev, &dev->txq); tasklet_schedule(&dev->bh); /* FIXME: device recovery -- reset? */ } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) static int #else static netdev_tx_t #endif axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; int retval; /* some devices want funky USB-level framing, for */ /* win32 driver (usually) and/or hardware quirks */ if (info->tx_fixup) { skb = info->tx_fixup(dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err(dev)) devdbg(dev, "can't tx_fixup skb"); goto drop; } } length = skb->len; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { if (netif_msg_tx_err(dev)) devdbg(dev, "no urb"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } spin_lock_irqsave(&dev->txq.lock, flags); switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue(net); axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx: submit urb err %d", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail(&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN(dev)) netif_stop_queue(net); } spin_unlock_irqrestore(&dev->txq.lock, flags); if (retval) { if (netif_msg_tx_err(dev)) devdbg(dev, "drop, code %d", retval); drop: dev->stats.tx_dropped++; if (skb) dev_kfree_skb_any(skb); usb_free_urb(urb); } else if (netif_msg_tx_queued(dev)) { devdbg(dev, "> tx, len %d, type 0x%x", length, skb->protocol); } return NETDEV_TX_OK; } /*-------------------------------------------------------------------------*/ /* tasklet (work deferred from completions, in_irq) or timer */ static void axusbnet_bh(unsigned long param) { struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; while ((skb = skb_dequeue(&dev->done))) { entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: entry->state = rx_cleanup; rx_process(dev, skb); continue; case tx_done: case rx_cleanup: usb_free_urb(entry->urb); dev_kfree_skb(skb); continue; default: devdbg(dev, "bogus skb state %d", entry->state); } } /* waiting for all pending urbs to complete? */ if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) wake_up(dev->wait); /* or are we maybe short a few urbs? */ } else if (netif_running(dev->net) && netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; int qlen = RX_QLEN(dev); if (temp < qlen) { struct urb *urb; int i; /* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb != NULL) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_ATOMIC); } } if (temp != dev->rxq.qlen && netif_msg_link(dev)) devdbg(dev, "rxqlen %d --> %d", temp, dev->rxq.qlen); if (dev->rxq.qlen < qlen) tasklet_schedule(&dev->bh); } if (dev->txq.qlen < TX_QLEN(dev)) netif_wake_queue(dev->net); } } /*------------------------------------------------------------------------- * * USB Device Driver support * *-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static void axusbnet_disconnect(struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; struct net_device *net; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; xdev = interface_to_usbdev(intf); if (netif_msg_probe(dev)) devinfo(dev, "unregister '%s' usb-%s-%s, %s", intf->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description); net = dev->net; unregister_netdev(net); /* we don't hold rtnl here ... */ flush_scheduled_work(); if (dev->driver_info->unbind) dev->driver_info->unbind(dev, intf); free_netdev(net); usb_put_dev(xdev); } /*-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static int axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; struct driver_info *info; struct usb_device *xdev; int status; const char *name; name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; if (!info) { printk(KERN_ERR "blacklisted by %s\n", name); return -ENODEV; } xdev = interface_to_usbdev(udev); interface = udev->cur_altsetting; usb_get_dev(xdev); status = -ENOMEM; /* set up our own records */ net = alloc_etherdev(sizeof(*dev)); if (!net) { printk(KERN_ERR "can't kmalloc dev"); goto out; } dev = netdev_priv(net); dev->udev = xdev; dev->intf = udev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); skb_queue_head_init(&dev->done); dev->bh.func = axusbnet_bh; dev->bh.data = (unsigned long) dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&dev->kevent, kevent, dev); #else INIT_WORK(&dev->kevent, kevent); #endif dev->delay.function = axusbnet_bh; dev->delay.data = (unsigned long) dev; init_timer(&dev->delay); /* mutex_init(&dev->phy_mutex); */ dev->net = net; /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; #if 0 /* dma_supported() is deeply broken on almost all architectures */ /* possible with some EHCI controllers */ if (dma_supported(&udev->dev, DMA_BIT_MASK(64))) net->features |= NETIF_F_HIGHDMA; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) net->open = axusbnet_open, net->stop = axusbnet_stop, net->hard_start_xmit = axusbnet_start_xmit, net->tx_timeout = axusbnet_tx_timeout, net->get_stats = axusbnet_get_stats; #endif net->watchdog_timeo = TX_TIMEOUT_JIFFIES; net->ethtool_ops = &axusbnet_ethtool_ops; /* allow device-specific bind/init procedures */ /* NOTE net->name still not usable ... */ status = info->bind(dev, udev); if (status < 0) { deverr(dev, "Binding device failed: %d", status); goto out1; } /* maybe the remote can't receive an Ethernet MTU */ if (net->mtu > (dev->hard_mtu - net->hard_header_len)) net->mtu = dev->hard_mtu - net->hard_header_len; status = init_status(dev, udev); if (status < 0) goto out3; if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); SET_NETDEV_DEV(net, &udev->dev); status = register_netdev(net); if (status) { deverr(dev, "net device registration failed: %d", status); goto out3; } if (netif_msg_probe(dev)) devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, net->dev_addr); /* ok, it's ready to go. */ usb_set_intfdata(udev, dev); /* start as if the link is up */ netif_device_attach(net); return 0; out3: if (info->unbind) info->unbind(dev, udev); out1: free_netdev(net); out: usb_put_dev(xdev); return status; } /*-------------------------------------------------------------------------*/ /* * suspend the whole driver as soon as the first interface is suspended * resume only when the last interface is resumed */ static int axusbnet_suspend(struct usb_interface *intf, #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10) pm_message_t message) #else u32 message) #endif { struct usbnet *dev = usb_get_intfdata(intf); if (!dev->suspend_count++) { /* * accelerate emptying of the rx and queues, to avoid * having everything error out. */ netif_device_detach(dev->net); (void) unlink_urbs(dev, &dev->rxq); (void) unlink_urbs(dev, &dev->txq); usb_kill_urb(dev->interrupt); /* * reattach so runtime management can use and * wake the device */ netif_device_attach(dev->net); } return 0; } static int axusbnet_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); int retval = 0; if (!--dev->suspend_count) tasklet_schedule(&dev->bh); retval = init_status(dev, intf); if (retval < 0) return retval; if (dev->interrupt) { retval = usb_submit_urb(dev->interrupt, GFP_KERNEL); if (retval < 0 && netif_msg_ifup(dev)) deverr(dev, "intr submit %d", retval); } return retval; }
void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; if (enable_tx_rx_debug && (urb_status != -ECONNRESET)) netdev_info(dev->net, "[RMNET_D]rx_c, status: %d\n", urb_status); switch (urb_status) { /* success */ case 0: break; case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); case -ECONNRESET: case -ESHUTDOWN: netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, true); #endif usb_mark_last_busy(dev->udev); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
static void rx_complete(struct urb *req) { struct net_device *dev = req->context; struct usbpn_dev *pnd = netdev_priv(dev); struct page *page = virt_to_page(req->transfer_buffer); struct sk_buff *skb; unsigned long flags; switch (req->status) { case 0: spin_lock_irqsave(&pnd->rx_lock, flags); skb = pnd->rx_skb; if (!skb) { skb = pnd->rx_skb = netdev_alloc_skb(dev, 12); if (likely(skb)) { /* Can't use pskb_pull() on page in IRQ */ memcpy(skb_put(skb, 1), page_address(page), 1); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1, req->actual_length); page = NULL; } } else { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, req->actual_length); page = NULL; } if (req->actual_length < PAGE_SIZE) pnd->rx_skb = NULL; /* Last fragment */ else skb = NULL; spin_unlock_irqrestore(&pnd->rx_lock, flags); if (skb) { skb->protocol = htons(ETH_P_PHONET); skb_reset_mac_header(skb); __skb_pull(skb, 1); skb->dev = dev; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); } goto resubmit; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: req = NULL; break; case -EOVERFLOW: dev->stats.rx_over_errors++; dev_dbg(&dev->dev, "RX overflow\n"); break; case -EILSEQ: dev->stats.rx_crc_errors++; break; } dev->stats.rx_errors++; resubmit: if (page) netdev_free_page(dev, page); if (req) rx_submit(pnd, req, GFP_ATOMIC); }
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; #if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)) if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) { #else if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { #endif if (netif_msg_rx_err (dev)) devdbg (dev, "no rx skb"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return; } #if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)) skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN); #else skb_reserve (skb, NET_IP_ALIGN); #endif entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){ case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: if (netif_msg_ifdown (dev)) devdbg (dev, "device gone"); netif_device_detach (dev->net); break; default: if (netif_msg_rx_err (dev)) devdbg (dev, "rx submit, %d", retval); tasklet_schedule (&dev->bh); break; case 0: __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { if (netif_msg_ifdown (dev)) devdbg (dev, "rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } } /*-------------------------------------------------------------------------*/ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) { if (dev->driver_info->rx_fixup && !dev->driver_info->rx_fixup (dev, skb)) goto error; // else network stack removes extra byte if we forced a short packet if (skb->len) usbnet_skb_return (dev, skb); else { if (netif_msg_rx_err (dev)) devdbg (dev, "drop"); error: dev->stats.rx_errors++; skb_queue_tail (&dev->done, skb); } } /*-------------------------------------------------------------------------*/ static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; switch (urb_status) { // success case 0: if (skb->len < dev->net->hard_header_len) { state = rx_cleanup; dev->stats.rx_errors++; dev->stats.rx_length_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx length %d", skb->len); } break; // stalls need manual reset. this is rare ... except that // when going through USB 2.0 TTs, unplug appears this way. // we avoid the highspeed version of the ETIMEOUT/EILSEQ // storm, recovering as needed. case -EPIPE: dev->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH // software-driven interface shutdown case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone if (netif_msg_ifdown (dev)) devdbg (dev, "rx shutdown, code %d", urb_status); goto block; // we get controller i/o faults during khubd disconnect() delays. // throttle down resubmits, to avoid log floods; just temporarily, // so we still recover when the fault isn't a khubd delay. case -EPROTO: case -ETIME: case -EILSEQ: dev->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) devdbg (dev, "rx throttle %d", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; // data overrun ... flush fifo? case -EOVERFLOW: dev->stats.rx_over_errors++; // FALLTHROUGH default: state = rx_cleanup; dev->stats.rx_errors++; if (netif_msg_rx_err (dev)) devdbg (dev, "rx status %d", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); return; } usb_free_urb (urb); } if (netif_msg_rx_err (dev)) devdbg (dev, "no read resubmitted"); } static void intr_complete (struct urb *urb) { struct usbnet *dev = urb->context; int status = urb->status; switch (status) { /* success */ case 0: dev->driver_info->status(dev, urb); break; /* software-driven interface shutdown */ case -ENOENT: // urb killed case -ESHUTDOWN: // hardware gone if (netif_msg_ifdown (dev)) devdbg (dev, "intr shutdown, code %d", status); return; /* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently */ default: devdbg (dev, "intr status %d", status); break; } memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0 && netif_msg_timer (dev)) deverr(dev, "intr resubmit --> %d", status); } /*-------------------------------------------------------------------------*/ // unlink pending rx/tx; completion handlers do all other cleanup static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) { unsigned long flags; struct sk_buff *skb; int count = 0; spin_lock_irqsave (&q->lock, flags); while (!skb_queue_empty(q)) { struct skb_data *entry; struct urb *urb; int retval; skb_queue_walk(q, skb) { entry = (struct skb_data *) skb->cb; if (entry->state != unlink_start) goto found; } break; found: entry->state = unlink_start; urb = entry->urb; /* * Get reference count of the URB to avoid it to be * freed during usb_unlink_urb, which may trigger * use-after-free problem inside usb_unlink_urb since * usb_unlink_urb is always racing with .complete * handler(include defer_bh). */ usb_get_urb(urb); spin_unlock_irqrestore(&q->lock, flags); // during some PM-driven resume scenarios, // these (async) unlinks complete immediately retval = usb_unlink_urb (urb); if (retval != -EINPROGRESS && retval != 0) devdbg (dev, "unlink urb err, %d", retval); else count++; usb_put_urb(urb); spin_lock_irqsave(&q->lock, flags); }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { //HTC+++ //lock cpu perf usbnet_lock_perf(); //queue usbnet_unlock_perf_delayed_work usbnet_rx_len = 0; schedule_delayed_work(&usbnet_unlock_perf_delayed_work, msecs_to_jiffies(PM_QOS_USBNET_PERF_UNLOCK_TIMER)); pr_info("%s(%d) [USBNET] EVENT_RX_HALT unlink_urbs !!!\n", __func__, __LINE__); pr_info("%s(%d) [USBNET] dev->rxq.qlen:%d\n", __func__, __LINE__, dev->rxq.qlen); //HTC--- unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); //HTC+++ pr_info("%s(%d) [USBNET] EVENT_RX_HALT usb_clear_halt:%d !!!\n", __func__, __LINE__, status); //HTC--- usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { //HTC+++ pr_info("%s(%d) [USBNET] clear_bit EVENT_RX_HALT !!!\n", __func__, __LINE__); //HTC--- clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) { #else if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { #endif netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return -ENOMEM; } #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN); #else skb_reserve (skb, NET_IP_ALIGN); #endif entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach (dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); tasklet_schedule (&dev->bh); break; case 0: __skb_queue_tail (&dev->rxq, skb); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } return retval; } /*-------------------------------------------------------------------------*/ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) { if (dev->driver_info->rx_fixup && !dev->driver_info->rx_fixup (dev, skb)) goto error; // else network stack removes extra byte if we forced a short packet if (skb->len) usbnet_skb_return (dev, skb); else { netif_dbg(dev, rx_err, dev->net, "drop\n"); error: dev->net->stats.rx_errors++; skb_queue_tail (&dev->done, skb); } } /*-------------------------------------------------------------------------*/ static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; skb_put (skb, urb->actual_length); entry->state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { entry->state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); } break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: entry->state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; // FALLTHROUGH default: entry->state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } defer_bh(dev, skb, &dev->rxq); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { rx_submit (dev, urb, GFP_ATOMIC); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); } static void intr_complete (struct urb *urb) { struct usbnet *dev = urb->context; int status = urb->status; switch (status) { /* success */ case 0: dev->driver_info->status(dev, urb); break; /* software-driven interface shutdown */ case -ENOENT: /* urb killed */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "intr shutdown, code %d\n", status); return; /* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently */ default: netdev_dbg(dev->net, "intr status %d\n", status); break; } if (!netif_running (dev->net)) return; memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0) netif_err(dev, timer, dev->net, "intr resubmit --> %d\n", status); } /*-------------------------------------------------------------------------*/ void usbnet_pause_rx(struct usbnet *dev) { set_bit(EVENT_RX_PAUSED, &dev->flags); netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); }
static void rx_complete(struct pci_ep *ep, struct pci_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; unsigned int *temp; // printk("%s:%d\n", __func__, __LINE__); total_rx_complete++; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) status = dev->unwrap(skb); if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); break; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; temp = (unsigned int *)skb->data; // printk("temp = %p, Data [0] = %x, [1] = %x [2] = %x, [3] = %x [4] = %x, [5] = %x [6] = %x, [7] = %x\n", temp, // temp[0], temp[1], temp[2], temp[3], temp[4], temp[5], temp[6], temp[7]); /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb); skb = NULL; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { #ifdef CONFIG_USB_RNDIS_MULTIPACKET struct sk_buff *skb = req->context; bool queue = 0; #else struct sk_buff *skb = req->context, *skb2; #endif struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (status == -EINVAL) dev->net->stats.rx_errors++; else if (status == -EOVERFLOW) dev->net->stats.rx_over_errors++; #endif } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (!status) queue = 1; #else skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > VLAN_ETH_FRAME_LEN) { #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE /* Need to revisit net->mtu does not include header size incase of changed MTU */ if(!strcmp(dev->port_usb->func.name,"ncm")) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > (dev->net->mtu + ETH_HLEN)) { printk(KERN_DEBUG "usb: %s drop incase of NCM rx length %d\n",__func__,skb2->len); } else { printk(KERN_DEBUG "usb: %s Dont drop incase of NCM rx length %d\n",__func__,skb2->len); goto process_frame; } } printk(KERN_DEBUG "usb: %s Drop rx length %d\n",__func__,skb2->len); #endif dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE process_frame: #endif skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } #endif break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: #ifdef CONFIG_USB_RNDIS_MULTIPACKET queue = 1; dev_kfree_skb_any(skb); #endif dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } #ifndef CONFIG_USB_RNDIS_MULTIPACKET if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { #endif clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (queue) schedule_uether_rx(dev); #else req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); #endif }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: #ifdef SKB_MEMCOPY if(req->buf != skb->data) memcpy(skb->data, req->buf, req->actual); #endif skb_put(skb, req->actual); if (dev->unwrap) status = dev->unwrap(skb); if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); break; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif skb = NULL; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) { dev_kfree_skb_any(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif } if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void rx_complete(struct urb *req) { struct usbsvn_rx *svn_rx = req->context; struct net_device *dev = svn_rx->netdev; struct usbsvn *svn = netdev_priv(dev); struct page *page = virt_to_page(req->transfer_buffer); struct sipc4_rx_data rx_data; int dev_id = svn_rx->dev_id; int flags = 0; int err; usb_mark_last_busy(svn->usbdev); switch (req->status) { case -ENOENT: if (req->actual_length == 0) { req = NULL; break; } printk(KERN_DEBUG "%s: Rx ENOENT", __func__); case 0: if (!svn->driver_info) flags |= SIPC4_RX_HDLC; if (req->actual_length < PAGE_SIZE) flags |= SIPC4_RX_LAST; rx_data.dev = dev; rx_data.skb = svn->devdata[dev_id].rx_skb; rx_data.page = page; rx_data.size = req->actual_length; rx_data.format = dev_id; rx_data.flags = flags; rx_data.rx_hdr = &svn->devdata[dev_id].rx_hdr; page = NULL; if (rx_debug) { char *buf = req->transfer_buffer; int i; printk(KERN_DEBUG "[RX] dev_id: %d, size: %d\n", dev_id, req->actual_length); for (i = 0; i < req->actual_length; i++) printk(KERN_DEBUG "%x ", *(buf + i)); } if (dev_id == SIPC4_CMD) err = usbsvn_cmd_rx(&rx_data, svn); else err = sipc4_rx(&rx_data); if (err < 0) { svn->devdata[dev_id].rx_skb = NULL; break; } svn->devdata[dev_id].rx_skb = rx_data.skb; if (dev_id == SIPC4_RAW) wake_lock_timeout_data(svn); goto resubmit; case -ECONNRESET: case -ESHUTDOWN: if (!svn->suspended) printk(KERN_DEBUG "%s: RX complete Status(%d)\n", __func__, req->status); req = NULL; break; case -EOVERFLOW: dev->stats.rx_over_errors++; dev_err(&dev->dev, "RX overflow\n"); break; case -EILSEQ: dev->stats.rx_crc_errors++; break; } dev->stats.rx_errors++; resubmit: kfree(svn_rx); if (page) netdev_free_page(dev, page); if (req && req->status != -ENOENT) { rx_submit(svn, dev_id, req, GFP_ATOMIC); } }