static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; bool queue = 0; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); if (status == -EINVAL) dev->net->stats.rx_errors++; else if (status == -EOVERFLOW) dev->net->stats.rx_over_errors++; } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } if (!status) queue = 1; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: queue = 1; dev_kfree_skb_any(skb); dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); if (queue) queue_work(uether_wq, &dev->rx_work); }
static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval = -ENOMEM; size_t size = 0; struct usb_ep *out; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) out = dev->port_usb->out_ep; else out = NULL; spin_unlock_irqrestore(&dev->lock, flags); if (!out) return -ENOTCONN; /* Padding up to RX_EXTRA handles minor disagreements with host. * Normally we use the USB "terminate on short read" convention; * so allow up to (N*maxpacket), since that memory is normally * already allocated. Some hardware doesn't deal well with short * reads (e.g. DMA must be N*maxpacket), so for now don't trim a * byte off the end (to force hardware errors on overflow). * * RNDIS uses internal framing, and explicitly allows senders to * pad to end-of-packet. That's potentially nice for speed, but * means receivers can't recover lost synch on their own (because * new packets don't only start after a short RX). */ size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; size += dev->port_usb->header_len; size += out->maxpacket - 1; size -= size % out->maxpacket; if (dev->ul_max_pkts_per_xfer) size *= dev->ul_max_pkts_per_xfer; if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); pr_debug("%s: size: %d", __func__, size); skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; } /* Some platforms perform better when IP packets are aligned, * but on at least one, checksumming fails otherwise. Note: * RNDIS headers involve variable numbers of LE32 values. */ skb_reserve(skb, NET_IP_ALIGN); req->buf = skb->data; req->length = size; req->context = skb; retval = usb_ep_queue(out, req, gfp_flags); if (retval == -ENOMEM) enomem: defer_kevent(dev, WORK_RX_MEMORY); if (retval) { DBG(dev, "rx submit --> %d\n", retval); if (skb) dev_kfree_skb_any(skb); } return retval; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; bool queue = 0; switch (status) { case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } if (!status) queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; case -ECONNABORTED: DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: if (skb) dev_kfree_skb_any(skb); goto clean; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: queue = 1; dev_kfree_skb_any(skb); dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); if (queue) queue_work(uether_wq, &dev->rx_work); }
static void rx_complete(struct pci_ep *ep, struct pci_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; unsigned int *temp; // printk("%s:%d\n", __func__, __LINE__); total_rx_complete++; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) status = dev->unwrap(skb); if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); break; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; temp = (unsigned int *)skb->data; // printk("temp = %p, Data [0] = %x, [1] = %x [2] = %x, [3] = %x [4] = %x, [5] = %x [6] = %x, [7] = %x\n", temp, // temp[0], temp[1], temp[2], temp[3], temp[4], temp[5], temp[6], temp[7]); /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb); skb = NULL; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { #ifdef CONFIG_USB_RNDIS_MULTIPACKET struct sk_buff *skb = req->context; bool queue = 0; #else struct sk_buff *skb = req->context, *skb2; #endif struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (status == -EINVAL) dev->net->stats.rx_errors++; else if (status == -EOVERFLOW) dev->net->stats.rx_over_errors++; #endif } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (!status) queue = 1; #else skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > VLAN_ETH_FRAME_LEN) { #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE /* Need to revisit net->mtu does not include header size incase of changed MTU */ if(!strcmp(dev->port_usb->func.name,"ncm")) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > (dev->net->mtu + ETH_HLEN)) { printk(KERN_DEBUG "usb: %s drop incase of NCM rx length %d\n",__func__,skb2->len); } else { printk(KERN_DEBUG "usb: %s Dont drop incase of NCM rx length %d\n",__func__,skb2->len); goto process_frame; } } printk(KERN_DEBUG "usb: %s Drop rx length %d\n",__func__,skb2->len); #endif dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } #ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE process_frame: #endif skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } #endif break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: #ifdef CONFIG_USB_RNDIS_MULTIPACKET queue = 1; dev_kfree_skb_any(skb); #endif dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } #ifndef CONFIG_USB_RNDIS_MULTIPACKET if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { #endif clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); #ifdef CONFIG_USB_RNDIS_MULTIPACKET if (queue) schedule_uether_rx(dev); #else req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); #endif }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { /* normal completion */ case 0: #ifdef SKB_MEMCOPY if(req->buf != skb->data) memcpy(skb->data, req->buf, req->actual); #endif skb_put(skb, req->actual); if (dev->unwrap) status = dev->unwrap(skb); if (status < 0 || ETH_HLEN > skb->len || skb->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb->len); break; } skb->protocol = eth_type_trans(skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; /* no buffer copies needed, unless hardware can't * use skb buffers. */ status = netif_rx(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif skb = NULL; break; /* software-driven interface shutdown */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; /* for hardware automagic (such as pxa) */ case -ECONNABORTED: /* endpoint reset */ DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif goto clean; /* data overrun */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; /* FALLTHROUGH */ default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) { dev_kfree_skb_any(skb); #ifdef SKB_MEMCOPY if(req->buf != skb->data) kfree(req->buf); #endif } if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }
static int rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval = -ENOMEM; size_t size = 0; struct usb_ep *out; unsigned long flags; unsigned short reserve_headroom; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) out = dev->port_usb->out_ep; else out = NULL; spin_unlock_irqrestore(&dev->lock, flags); if (!out) return -ENOTCONN; size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; size += dev->port_usb->header_len; size += out->maxpacket - 1; size -= size % out->maxpacket; if (dev->ul_max_pkts_per_xfer) size *= dev->ul_max_pkts_per_xfer; if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); if (dev->rx_needed_headroom) reserve_headroom = dev->rx_needed_headroom; else reserve_headroom = NET_IP_ALIGN; pr_debug("%s: size: %zu + %d(hr)", __func__, size, reserve_headroom); skb = alloc_skb(size + reserve_headroom, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; } skb_reserve(skb, reserve_headroom); req->buf = skb->data; req->length = size; req->context = skb; retval = usb_ep_queue(out, req, gfp_flags); if (retval == -ENOMEM) enomem: defer_kevent(dev, WORK_RX_MEMORY); if (retval) { DBG(dev, "rx submit --> %d\n", retval); if (skb) dev_kfree_skb_any(skb); } return retval; }
static void rx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context, *skb2; struct eth_dev *dev = ep->driver_data; int status = req->status; switch (status) { case 0: skb_put(skb, req->actual); if (dev->unwrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { status = dev->unwrap(dev->port_usb, skb, &dev->rx_frames); } else { dev_kfree_skb_any(skb); status = -ENOTCONN; } spin_unlock_irqrestore(&dev->lock, flags); } else { skb_queue_tail(&dev->rx_frames, skb); } skb = NULL; skb2 = skb_dequeue(&dev->rx_frames); while (skb2) { if (status < 0 || ETH_HLEN > skb2->len || skb2->len > ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); dev_kfree_skb_any(skb2); goto next_frame; } skb2->protocol = eth_type_trans(skb2, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb2->len; status = netif_rx(skb2); next_frame: skb2 = skb_dequeue(&dev->rx_frames); } break; case -ECONNRESET: case -ESHUTDOWN: VDBG(dev, "rx shutdown, code %d\n", status); goto quiesce; case -ECONNABORTED: DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: dev_kfree_skb_any(skb); goto clean; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: dev->net->stats.rx_errors++; DBG(dev, "rx status %d\n", status); break; } if (skb) dev_kfree_skb_any(skb); if (!netif_running(dev->net)) { clean: spin_lock(&dev->req_lock); list_add(&req->list, &dev->rx_reqs); spin_unlock(&dev->req_lock); req = NULL; } if (req) rx_submit(dev, req, GFP_ATOMIC); }