static netdev_tx_t eth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct eth_dev *dev = netdev_priv(net); int length = skb->len; int retval; struct usb_request *req = NULL; unsigned long flags; struct usb_ep *in; u16 cdc_filter; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { in = dev->port_usb->in_ep; cdc_filter = dev->port_usb->cdc_filter; } else { in = NULL; cdc_filter = 0; } spin_unlock_irqrestore(&dev->lock, flags); if (!in) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* apply outgoing CDC or RNDIS filters */ if (!is_promisc(cdc_filter)) { u8 *dest = skb->data; if (is_multicast_ether_addr(dest)) { u16 type; /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host * SET_ETHERNET_MULTICAST_FILTERS requests */ if (is_broadcast_ether_addr(dest)) type = USB_CDC_PACKET_TYPE_BROADCAST; else type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; if (!(cdc_filter & type)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ } spin_lock_irqsave(&dev->req_lock, flags); /* * this freelist can be empty if an interrupt triggered disconnect() * and reconfigured the gadget (shutting down this queue) after the * network stack decided to xmit but before we got the spinlock. */ if (list_empty(&dev->tx_reqs)) { spin_unlock_irqrestore(&dev->req_lock, flags); return NETDEV_TX_BUSY; } req = container_of(dev->tx_reqs.next, struct usb_request, list); list_del(&req->list); /* temporarily stop TX queue when the freelist empties */ if (list_empty(&dev->tx_reqs)) netif_stop_queue(net); spin_unlock_irqrestore(&dev->req_lock, flags); /* no buffer copies needed, unless the network stack did it * or the hardware can't use skb buffers. * or there's not enough space for extra headers we need */ if (dev->wrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) skb = dev->wrap(dev->port_usb, skb); spin_unlock_irqrestore(&dev->lock, flags); if (!skb) goto drop; length = skb->len; } req->buf = skb->data; req->context = skb; req->complete = tx_complete; /* NCM requires no zlp if transfer is dwNtbInMaxSize */ if (dev->port_usb->is_fixed && length == dev->port_usb->fixed_in_len && (length % in->maxpacket) == 0) req->zero = 0; else req->zero = 1; /* use zlp framing on tx for strict CDC-Ether conformance, * though any robust network rx path ignores extra padding. * and some hardware doesn't like to write zlps. */ if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) length++; req->length = length; /* throttle highspeed IRQ rate back slightly */ if (gadget_is_dualspeed(dev->gadget)) req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH) ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) : 0; retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { default: DBG(dev, "tx queue err %d\n", retval); break; case 0: net->trans_start = jiffies; atomic_inc(&dev->tx_qlen); } if (retval) { dev_kfree_skb_any(skb); drop: dev->net->stats.tx_dropped++; spin_lock_irqsave(&dev->req_lock, flags); if (list_empty(&dev->tx_reqs)) netif_start_queue(net); list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); } return NETDEV_TX_OK; }
static netdev_tx_t eth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct eth_dev *dev = netdev_priv(net); int length = skb->len; int retval; struct usb_request *req = NULL; unsigned long flags; struct usb_ep *in; u16 cdc_filter; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { in = dev->port_usb->in_ep; cdc_filter = dev->port_usb->cdc_filter; } else { in = NULL; cdc_filter = 0; } spin_unlock_irqrestore(&dev->lock, flags); if (!in) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (!is_promisc(cdc_filter)) { u8 *dest = skb->data; if (is_multicast_ether_addr(dest)) { u16 type; if (is_broadcast_ether_addr(dest)) type = USB_CDC_PACKET_TYPE_BROADCAST; else type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; if (!(cdc_filter & type)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } } spin_lock_irqsave(&dev->req_lock, flags); if (list_empty(&dev->tx_reqs)) { spin_unlock_irqrestore(&dev->req_lock, flags); return NETDEV_TX_BUSY; } req = container_of(dev->tx_reqs.next, struct usb_request, list); list_del(&req->list); if (list_empty(&dev->tx_reqs)) netif_stop_queue(net); spin_unlock_irqrestore(&dev->req_lock, flags); if (dev->wrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) skb = dev->wrap(dev->port_usb, skb); spin_unlock_irqrestore(&dev->lock, flags); if (!skb) goto drop; length = skb->len; } req->buf = skb->data; req->context = skb; req->complete = tx_complete; req->zero = 1; if (!dev->zlp && (length % in->maxpacket) == 0) length++; req->length = length; if (gadget_is_dualspeed(dev->gadget)) req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH) ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) : 0; retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { default: DBG(dev, "tx queue err %d\n", retval); break; case 0: net->trans_start = jiffies; atomic_inc(&dev->tx_qlen); } if (retval) { dev_kfree_skb_any(skb); drop: dev->net->stats.tx_dropped++; spin_lock_irqsave(&dev->req_lock, flags); if (list_empty(&dev->tx_reqs)) netif_start_queue(net); list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); } return NETDEV_TX_OK; }
netdev_tx_t eth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct eth_dev *dev = netdev_priv(net); int length = 0; int retval =0; unsigned long flags; struct usb_ep *in; u16 cdc_filter; struct ctx* ctx; if(skb == NULL) goto timeout_send; length = skb->len; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { in = dev->port_usb->in_ep; cdc_filter = dev->port_usb->cdc_filter; } else { in = NULL; cdc_filter = 0; } spin_unlock_irqrestore(&dev->lock, flags); if (!in) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* apply outgoing CDC or RNDIS filters */ if (!is_promisc(cdc_filter)) { u8 *dest = skb->data; if (is_multicast_ether_addr(dest)) { u16 type; /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host * SET_ETHERNET_MULTICAST_FILTERS requests */ if (is_broadcast_ether_addr(dest)) type = USB_CDC_PACKET_TYPE_BROADCAST; else type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; if (!(cdc_filter & type)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ } spin_lock_irqsave(&dev->req_lock, flags); /* * this freelist can be empty if an interrupt triggered disconnect() * and reconfigured the gadget (shutting down this queue) after the * network stack decided to xmit but before we got the spinlock. */ if (list_empty(&dev->tx_reqs)) { spin_unlock_irqrestore(&dev->req_lock, flags); return NETDEV_TX_BUSY; } /* temporarily stop TX queue when the freelist empties */ if (list_empty(&dev->tx_reqs)) netif_stop_queue(net); spin_unlock_irqrestore(&dev->req_lock, flags); /* no buffer copies needed, unless the network stack did it * or the hardware can't use skb buffers. * or there's not enough space for extra headers we need */ if (dev->wrap) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) skb = dev->wrap(dev->port_usb, skb); spin_unlock_irqrestore(&dev->lock, flags); if (!skb) goto drop; length = skb->len; } /* copy data to req buffer for 4-byte align */ /* req->buf = skb->data; */ spin_lock_irqsave(&dev->req_lock, flags); if(dev->req == NULL){ dev->req = container_of(dev->tx_reqs.next, struct usb_request, list); dev->req->length = 0; list_del(&dev->req->list); ctx = (struct ctx*) kzalloc(sizeof(*ctx), GFP_KERNEL); if(!ctx) { printk("[%s]: alloc ctx failed...\n", __func__); dev_kfree_skb_any(skb); spin_unlock_irqrestore(&dev->req_lock, flags); goto drop; } dev->req->context = ctx; } else {