int mlx4_en_free_tx_buf(struct ether *dev, struct mlx4_en_tx_ring *ring) { panic("Disabled"); #if 0 // AKAROS_PORT struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((uint32_t) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, !!(ring->cons & ring->size), 0); ring->cons += ring->last_nr_txbb; cnt++; } netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; #endif }
static void cpmac_end_xmit(struct net_device *dev, int queue) { struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); desc = &priv->desc_ring[queue]; cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); if (likely(desc->skb)) { spin_lock(&priv->lock); dev->stats.tx_packets++; dev->stats.tx_bytes += desc->skb->len; spin_unlock(&priv->lock); dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, DMA_TO_DEVICE); if (unlikely(netif_msg_tx_done(priv))) printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, desc->skb, desc->skb->len); dev_kfree_skb_irq(desc->skb); desc->skb = NULL; if (netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } else { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: end_xmit: spurious interrupt\n", dev->name); if (netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } }
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, !!(ring->cons & ring->size)); ring->cons += ring->last_nr_txbb; cnt++; } if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; }
static int get_real_size(struct sk_buff *skb, struct net_device *dev, int *lso_header_size) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; if (skb_is_gso(skb)) { *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if * it contains data */ if (*lso_header_size < skb_headlen(skb)) real_size += DS_SIZE; else { if (netif_msg_tx_err(priv)) en_warn(priv, "Non-linear headers\n"); return 0; } } } else { *lso_header_size = 0; if (!is_inline(skb, NULL)) real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; else real_size = inline_size(skb); } return real_size; }
void mpodp_tx_update_cache(struct mpodp_if_priv *priv) { int i; for (i = 0; i < priv->n_txqs; ++i) { struct mpodp_txq *txq = &priv->txqs[i]; /* check for new descriptors */ if (atomic_read(&txq->head) != 0 && txq->cached_head != txq->size) { uint32_t tx_head; struct mpodp_cache_entry *entry; tx_head = readl(txq->head_addr); /* Nothing yet */ if (tx_head < 0) continue; if (tx_head >= txq->mppa_size) { if (netif_msg_tx_err(priv)) netdev_err(priv->netdev, "Invalid head %d set in Txq[%d]\n", tx_head, txq->id); return; } /* In autoloop, we need to cache new elements */ while (txq->cached_head < tx_head) { entry = &txq->cache[txq->cached_head]; entry->addr = readq(entry->entry_addr + offsetof(struct mpodp_h2c_entry, pkt_addr)); txq->cached_head++; } } }
static void ag71xx_tx_timeout(struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); if (netif_msg_tx_err(ag)) printk(KERN_DEBUG "%s: tx timeout\n", ag->dev->name); schedule_work(&ag->restart_work); }
static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; while (tx_tail != tx_head) { struct sk_buff *skb; u32 status; rmb(); status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); if (status & DescOwn) break; skb = cp->tx_skb[tx_tail].skb; if (!skb) BUG(); pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, skb->len, PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { if (netif_msg_tx_err(cp)) printk(KERN_DEBUG "%s: tx err, status 0x%x\n", cp->dev->name, status); cp->net_stats.tx_errors++; if (status & TxOWC) cp->net_stats.tx_window_errors++; if (status & TxMaxCol) cp->net_stats.tx_aborted_errors++; if (status & TxLinkFail) cp->net_stats.tx_carrier_errors++; if (status & TxFIFOUnder) cp->net_stats.tx_fifo_errors++; } else { cp->net_stats.collisions += ((status >> TxColCntShift) & TxColCntMask); cp->net_stats.tx_packets++; cp->net_stats.tx_bytes += skb->len; if (netif_msg_tx_done(cp)) printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail); } dev_kfree_skb_irq(skb); } cp->tx_skb[tx_tail].skb = NULL; tx_tail = NEXT_TX(tx_tail); } cp->tx_tail = tx_tail; if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(cp->dev); }
static void tx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone break; // like rx, tx gets controller i/o faults during khubd delays // and so it uses the same throttling mechanism. case -EPROTO: case -ETIME: case -EILSEQ: #if defined(CONFIG_ERICSSON_F3307_ENABLE) usb_mark_last_busy(dev->udev); #endif if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) devdbg (dev, "tx throttle %d", urb->status); } netif_stop_queue (dev->net); break; default: if (netif_msg_tx_err (dev)) devdbg (dev, "tx err %d", entry->urb->status); break; } } #if defined(CONFIG_ERICSSON_F3307_ENABLE) usb_autopm_put_interface_async(dev->intf); #endif urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue, len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); if (unlikely(atomic_read(&priv->reset_pending))) return NETDEV_TX_BUSY; if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; len = max(skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); #ifdef CONFIG_NETDEVICES_MULTIQUEUE netif_stop_subqueue(dev, queue); #else netif_stop_queue(dev); #endif desc = &priv->desc_ring[queue]; if (unlikely(desc->dataflags & CPMAC_OWN)) { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: tx dma ring full\n", dev->name); return NETDEV_TX_BUSY; } spin_lock(&priv->lock); dev->trans_start = jiffies; spin_unlock(&priv->lock); desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE); desc->hw_data = (u32)desc->data_mapping; desc->datalen = len; desc->buflen = len; if (unlikely(netif_msg_tx_queued(priv))) printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, skb->len); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); if (unlikely(netif_msg_pktdata(priv))) cpmac_dump_skb(dev, skb); cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); return NETDEV_TX_OK; }
static int get_real_size(const struct sk_buff *skb, const struct skb_shared_info *shinfo, struct net_device *dev, int *lso_header_size, bool *inline_ok, void **pfrag) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; if (shinfo->gso_size) { *inline_ok = false; if (skb->encapsulation) *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); else *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if * it contains data */ if (*lso_header_size < skb_headlen(skb)) real_size += DS_SIZE; else { if (netif_msg_tx_err(priv)) en_warn(priv, "Non-linear headers\n"); return 0; } } } else { *lso_header_size = 0; *inline_ok = is_inline(priv->prof->inline_thold, skb, shinfo, pfrag); if (*inline_ok) real_size = inline_size(skb); else real_size = CTRL_SIZE + (shinfo->nr_frags + 1) * DS_SIZE; } return real_size; }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { //HTC+++ //lock cpu perf usbnet_lock_perf(); //queue usbnet_unlock_perf_delayed_work usbnet_rx_len = 0; schedule_delayed_work(&usbnet_unlock_perf_delayed_work, msecs_to_jiffies(PM_QOS_USBNET_PERF_UNLOCK_TIMER)); pr_info("%s(%d) [USBNET] EVENT_RX_HALT unlink_urbs !!!\n", __func__, __LINE__); pr_info("%s(%d) [USBNET] dev->rxq.qlen:%d\n", __func__, __LINE__, dev->rxq.qlen); //HTC--- unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); //HTC+++ pr_info("%s(%d) [USBNET] EVENT_RX_HALT usb_clear_halt:%d !!!\n", __func__, __LINE__, status); //HTC--- usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { //HTC+++ pr_info("%s(%d) [USBNET] clear_bit EVENT_RX_HALT !!!\n", __func__, __LINE__); //HTC--- clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
INT SetupNextSend(PMINI_ADAPTER Adapter, struct sk_buff *Packet, USHORT Vcid) { int status=0; BOOLEAN bHeaderSupressionEnabled = FALSE; B_UINT16 uiClassifierRuleID; u16 QueueIndex = skb_get_queue_mapping(Packet); LEADER Leader={0}; if(Packet->len > MAX_DEVICE_DESC_SIZE) { status = STATUS_FAILURE; goto errExit; } uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET); bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled & Adapter->bPHSEnabled; if(Adapter->device_removed) { status = STATUS_FAILURE; goto errExit; } status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID, bHeaderSupressionEnabled, (UINT *)&Packet->len, Adapter->PackInfo[QueueIndex].bEthCSSupport); if(status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "PHS Transmit failed..\n"); goto errExit; } Leader.Vcid = Vcid; if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET )) Leader.Status = LEADER_STATUS_TCP_ACK; else Leader.Status = LEADER_STATUS; if(Adapter->PackInfo[QueueIndex].bEthCSSupport) { Leader.PLength = Packet->len; if(skb_headroom(Packet) < LEADER_SIZE) { if((status = skb_cow(Packet,LEADER_SIZE))) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"bcm_transmit : Failed To Increase headRoom\n"); goto errExit; } } skb_push(Packet, LEADER_SIZE); memcpy(Packet->data, &Leader, LEADER_SIZE); } else { Leader.PLength = Packet->len - ETH_HLEN; memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE); } status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter, Packet->data, (Leader.PLength + LEADER_SIZE)); if(status) { ++Adapter->dev->stats.tx_errors; if (netif_msg_tx_err(Adapter)) pr_info(PFX "%s: transmit error %d\n", Adapter->dev->name, status); } else { struct net_device_stats *netstats = &Adapter->dev->stats; Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength; netstats->tx_bytes += Leader.PLength; ++netstats->tx_packets; Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3; Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len); Adapter->PackInfo[QueueIndex].uiSentPackets++; Adapter->PackInfo[QueueIndex].NumOfPacketsSent++; atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount); Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength; } atomic_dec(&Adapter->CurrNumFreeTxDesc); errExit: dev_kfree_skb(Packet); return status; }
static void kevent(void *data) { struct usbnet *dev = (struct usbnet *)data; #else static void kevent(struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); #endif int status; /* usb_clear_halt() needs a thread context */ if (test_bit(EVENT_TX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->txq); status = usb_clear_halt(dev->udev, dev->out); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err(dev)) deverr(dev, "can't clear tx halt, status %d", status); } else { clear_bit(EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue(dev->net); } } if (test_bit(EVENT_RX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->rxq); status = usb_clear_halt(dev->udev, dev->in); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err(dev)) deverr(dev, "can't clear rx halt, status %d", status); } else { clear_bit(EVENT_RX_HALT, &dev->flags); tasklet_schedule(&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit(EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; if (netif_running(dev->net)) urb = usb_alloc_urb(0, GFP_KERNEL); else clear_bit(EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit(EVENT_RX_MEMORY, &dev->flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_KERNEL); tasklet_schedule(&dev->bh); } } if (test_bit(EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit(EVENT_LINK_RESET, &dev->flags); if (info->link_reset) { retval = info->link_reset(dev); if (retval < 0) { devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } } } if (dev->flags) devdbg(dev, "kevent done, flags = 0x%lx", dev->flags); } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) static void tx_complete(struct urb *urb, struct pt_regs *regs) #else static void tx_complete(struct urb *urb) #endif { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += entry->length; } else { dev->stats.tx_errors++; switch (urb->status) { case -EPIPE: axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ break; /* like rx, tx gets controller i/o faults during khubd delays */ /* and so it uses the same throttling mechanism. */ case -EPROTO: case -ETIME: case -EILSEQ: if (!timer_pending(&dev->delay)) { mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link(dev)) devdbg(dev, "tx throttle %d", urb->status); } netif_stop_queue(dev->net); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx err %d", entry->urb->status); break; } } urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); } /*-------------------------------------------------------------------------*/ static void axusbnet_tx_timeout(struct net_device *net) { struct usbnet *dev = netdev_priv(net); unlink_urbs(dev, &dev->txq); tasklet_schedule(&dev->bh); /* FIXME: device recovery -- reset? */ } /*-------------------------------------------------------------------------*/ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) static int #else static netdev_tx_t #endif axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; int retval; /* some devices want funky USB-level framing, for */ /* win32 driver (usually) and/or hardware quirks */ if (info->tx_fixup) { skb = info->tx_fixup(dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err(dev)) devdbg(dev, "can't tx_fixup skb"); goto drop; } } length = skb->len; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { if (netif_msg_tx_err(dev)) devdbg(dev, "no urb"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } spin_lock_irqsave(&dev->txq.lock, flags); switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue(net); axusbnet_defer_kevent(dev, EVENT_TX_HALT); break; default: if (netif_msg_tx_err(dev)) devdbg(dev, "tx: submit urb err %d", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail(&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN(dev)) netif_stop_queue(net); } spin_unlock_irqrestore(&dev->txq.lock, flags); if (retval) { if (netif_msg_tx_err(dev)) devdbg(dev, "drop, code %d", retval); drop: dev->stats.tx_dropped++; if (skb) dev_kfree_skb_any(skb); usb_free_urb(urb); } else if (netif_msg_tx_queued(dev)) { devdbg(dev, "> tx, len %d, type 0x%x", length, skb->protocol); } return NETDEV_TX_OK; } /*-------------------------------------------------------------------------*/ /* tasklet (work deferred from completions, in_irq) or timer */ static void axusbnet_bh(unsigned long param) { struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; while ((skb = skb_dequeue(&dev->done))) { entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: entry->state = rx_cleanup; rx_process(dev, skb); continue; case tx_done: case rx_cleanup: usb_free_urb(entry->urb); dev_kfree_skb(skb); continue; default: devdbg(dev, "bogus skb state %d", entry->state); } } /* waiting for all pending urbs to complete? */ if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) wake_up(dev->wait); /* or are we maybe short a few urbs? */ } else if (netif_running(dev->net) && netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; int qlen = RX_QLEN(dev); if (temp < qlen) { struct urb *urb; int i; /* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb != NULL) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) urb->transfer_flags |= URB_ASYNC_UNLINK; #endif rx_submit(dev, urb, GFP_ATOMIC); } } if (temp != dev->rxq.qlen && netif_msg_link(dev)) devdbg(dev, "rxqlen %d --> %d", temp, dev->rxq.qlen); if (dev->rxq.qlen < qlen) tasklet_schedule(&dev->bh); } if (dev->txq.qlen < TX_QLEN(dev)) netif_wake_queue(dev->net); } } /*------------------------------------------------------------------------- * * USB Device Driver support * *-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static void axusbnet_disconnect(struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; struct net_device *net; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; xdev = interface_to_usbdev(intf); if (netif_msg_probe(dev)) devinfo(dev, "unregister '%s' usb-%s-%s, %s", intf->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description); net = dev->net; unregister_netdev(net); /* we don't hold rtnl here ... */ flush_scheduled_work(); if (dev->driver_info->unbind) dev->driver_info->unbind(dev, intf); free_netdev(net); usb_put_dev(xdev); } /*-------------------------------------------------------------------------*/ /* precondition: never called in_interrupt */ static int axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; struct driver_info *info; struct usb_device *xdev; int status; const char *name; name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; if (!info) { printk(KERN_ERR "blacklisted by %s\n", name); return -ENODEV; } xdev = interface_to_usbdev(udev); interface = udev->cur_altsetting; usb_get_dev(xdev); status = -ENOMEM; /* set up our own records */ net = alloc_etherdev(sizeof(*dev)); if (!net) { printk(KERN_ERR "can't kmalloc dev"); goto out; } dev = netdev_priv(net); dev->udev = xdev; dev->intf = udev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); skb_queue_head_init(&dev->done); dev->bh.func = axusbnet_bh; dev->bh.data = (unsigned long) dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&dev->kevent, kevent, dev); #else INIT_WORK(&dev->kevent, kevent); #endif dev->delay.function = axusbnet_bh; dev->delay.data = (unsigned long) dev; init_timer(&dev->delay); /* mutex_init(&dev->phy_mutex); */ dev->net = net; /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; #if 0 /* dma_supported() is deeply broken on almost all architectures */ /* possible with some EHCI controllers */ if (dma_supported(&udev->dev, DMA_BIT_MASK(64))) net->features |= NETIF_F_HIGHDMA; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) net->open = axusbnet_open, net->stop = axusbnet_stop, net->hard_start_xmit = axusbnet_start_xmit, net->tx_timeout = axusbnet_tx_timeout, net->get_stats = axusbnet_get_stats; #endif net->watchdog_timeo = TX_TIMEOUT_JIFFIES; net->ethtool_ops = &axusbnet_ethtool_ops; /* allow device-specific bind/init procedures */ /* NOTE net->name still not usable ... */ status = info->bind(dev, udev); if (status < 0) { deverr(dev, "Binding device failed: %d", status); goto out1; } /* maybe the remote can't receive an Ethernet MTU */ if (net->mtu > (dev->hard_mtu - net->hard_header_len)) net->mtu = dev->hard_mtu - net->hard_header_len; status = init_status(dev, udev); if (status < 0) goto out3; if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); SET_NETDEV_DEV(net, &udev->dev); status = register_netdev(net); if (status) { deverr(dev, "net device registration failed: %d", status); goto out3; } if (netif_msg_probe(dev)) devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, net->dev_addr); /* ok, it's ready to go. */ usb_set_intfdata(udev, dev); /* start as if the link is up */ netif_device_attach(net); return 0; out3: if (info->unbind) info->unbind(dev, udev); out1: free_netdev(net); out: usb_put_dev(xdev); return status; } /*-------------------------------------------------------------------------*/ /* * suspend the whole driver as soon as the first interface is suspended * resume only when the last interface is resumed */ static int axusbnet_suspend(struct usb_interface *intf, #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10) pm_message_t message) #else u32 message) #endif { struct usbnet *dev = usb_get_intfdata(intf); if (!dev->suspend_count++) { /* * accelerate emptying of the rx and queues, to avoid * having everything error out. */ netif_device_detach(dev->net); (void) unlink_urbs(dev, &dev->rxq); (void) unlink_urbs(dev, &dev->txq); usb_kill_urb(dev->interrupt); /* * reattach so runtime management can use and * wake the device */ netif_device_attach(dev->net); } return 0; } static int axusbnet_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); int retval = 0; if (!--dev->suspend_count) tasklet_schedule(&dev->bh); retval = init_status(dev, intf); if (retval < 0) return retval; if (dev->interrupt) { retval = usb_submit_urb(dev->interrupt, GFP_KERNEL); if (retval < 0 && netif_msg_ifup(dev)) deverr(dev, "intr submit %d", retval); } return retval; }
static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; int retval = NET_XMIT_SUCCESS; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; // some devices want funky USB-level framing, for // win32 driver (usually) and/or hardware quirks if (info->tx_fixup) { skb = info->tx_fixup (dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err (dev)) devdbg (dev, "can't tx_fixup skb"); goto drop; } } length = skb->len; if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { if (netif_msg_tx_err (dev)) devdbg (dev, "no urb"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ if ((length % dev->maxpacket) == 0) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } spin_lock_irqsave (&dev->txq.lock, flags); switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue (net); usbnet_defer_kevent (dev, EVENT_TX_HALT); break; default: if (netif_msg_tx_err (dev)) devdbg (dev, "tx: submit urb err %d", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail (&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } spin_unlock_irqrestore (&dev->txq.lock, flags); if (retval) { if (netif_msg_tx_err (dev)) devdbg (dev, "drop, code %d", retval); drop: retval = NET_XMIT_SUCCESS; dev->stats.tx_dropped++; if (skb) dev_kfree_skb_any (skb); usb_free_urb (urb); } else if (netif_msg_tx_queued (dev)) { devdbg (dev, "> tx, len %d, type 0x%x", length, skb->protocol); } return retval; }
netdev_tx_t mpodp_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct mpodp_if_priv *priv = netdev_priv(netdev); struct mpodp_tx *tx; struct dma_async_tx_descriptor *dma_txd; struct mpodp_cache_entry *entry; int ret; uint8_t fifo_mode; int16_t requested_engine; struct mpodp_pkt_hdr *hdr; uint32_t tx_autoloop_next; uint32_t tx_submitted, tx_next, tx_done; uint32_t tx_mppa_idx; int qidx; unsigned long flags = 0; struct mpodp_txq *txq; /* Fetch HW queue selected by the kernel */ qidx = skb_get_queue_mapping(skb); txq = &priv->txqs[qidx]; if (atomic_read(&priv->reset) == 1) { mpodp_clean_tx_unlocked(priv, txq, -1); goto addr_error; } tx_submitted = atomic_read(&txq->submitted); /* Compute txd id */ tx_next = (tx_submitted + 1); if (tx_next == txq->size) tx_next = 0; /* MPPA H2C Entry to use */ tx_mppa_idx = atomic_read(&txq->autoloop_cur); tx_done = atomic_read(&txq->done); if (tx_done != tx_submitted && ((txq->ring[tx_done].jiffies + msecs_to_jiffies(5) >= jiffies) || (tx_submitted < tx_done && tx_submitted + txq->size - tx_done >= TX_POLL_THRESHOLD) || (tx_submitted >= tx_done && tx_submitted - tx_done >= TX_POLL_THRESHOLD))) { mpodp_clean_tx_unlocked(priv, txq, -1); } /* Check if there are txd available */ if (tx_next == atomic_read(&txq->done)) { /* Ring is full */ if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d]: ring full \n", txq->id); netif_tx_stop_queue(txq->txq); return NETDEV_TX_BUSY; } tx = &(txq->ring[tx_submitted]); entry = &(txq->cache[tx_mppa_idx]); /* take the time */ mppa_pcie_time_get(priv->tx_time, &tx->time); /* configure channel */ tx->dst_addr = entry->addr; /* Check the provided address */ ret = mppa_pcie_dma_check_addr(priv->pdata, tx->dst_addr, &fifo_mode, &requested_engine); if (ret) { if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d] tx[%d]: invalid send address %llx\n", txq->id, tx_submitted, tx->dst_addr); goto addr_error; } if (!fifo_mode) { if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d] tx[%d]: %llx is not a PCI2Noc addres\n", txq->id, tx_submitted, tx->dst_addr); goto addr_error; } if (requested_engine >= MPODP_NOC_CHAN_COUNT) { if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d] tx[%d]: address %llx using NoC engine out of range (%d >= %d)\n", txq->id, tx_submitted, tx->dst_addr, requested_engine, MPODP_NOC_CHAN_COUNT); goto addr_error; } tx->chanidx = requested_engine; /* The packet needs a header to determine size,timestamp, etc. * Add it */ if (skb_headroom(skb) < sizeof(struct mpodp_pkt_hdr)) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, sizeof(struct mpodp_pkt_hdr)); if (!skb_new) { netdev->stats.tx_errors++; kfree_skb(skb); return NETDEV_TX_OK; } kfree_skb(skb); skb = skb_new; } hdr = (struct mpodp_pkt_hdr *) skb_push(skb, sizeof(struct mpodp_pkt_hdr)); hdr->timestamp = priv->packet_id; hdr->info._.pkt_id = priv->packet_id; hdr->info.dword = 0ULL; hdr->info._.pkt_size = skb->len; /* Also count the header size */ hdr->info._.pkt_id = priv->packet_id; priv->packet_id++; /* save skb to free it later */ tx->skb = skb; tx->len = skb->len; /* prepare sg */ if (map_skb(&priv->pdev->dev, skb, tx)){ if (netif_msg_tx_err(priv)) netdev_err(netdev, "tx %d: failed to map skb to dma\n", tx_submitted); goto busy; } if (priv->n_txqs > MPODP_NOC_CHAN_COUNT) spin_lock_irqsave(&priv->tx_lock[requested_engine], flags); /* Prepare slave args */ priv->tx_config[requested_engine].cfg.dst_addr = tx->dst_addr; priv->tx_config[requested_engine].requested_engine = requested_engine; /* FIFO mode, direction, latency were filled at setup */ if (dmaengine_slave_config(priv->tx_chan[requested_engine], &priv->tx_config[requested_engine].cfg)) { /* board has reset, wait for reset of netdev */ netif_tx_stop_queue(txq->txq); netif_carrier_off(netdev); if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d] tx[%d]: cannot configure channel\n", txq->id, tx_submitted); goto busy; } /* get transfer descriptor */ dma_txd = dmaengine_prep_slave_sg(priv->tx_chan[requested_engine], tx->sg, tx->sg_len, DMA_MEM_TO_DEV, 0); if (dma_txd == NULL) { /* dmaengine_prep_slave_sg failed, retry */ if (netif_msg_tx_err(priv)) netdev_err(netdev, "txq[%d] tx[%d]: cannot get dma descriptor\n", txq->id, tx_submitted); goto busy; } if (netif_msg_tx_queued(priv)) netdev_info(netdev, "txq[%d] tx[%d]: transfer start (submitted: %d done: %d) len=%d, sg_len=%d\n", txq->id, tx_submitted, tx_next, atomic_read(&txq->done), tx->len, tx->sg_len); skb_orphan(skb); /* submit and issue descriptor */ tx->jiffies = jiffies; tx->cookie = dmaengine_submit(dma_txd); dma_async_issue_pending(priv->tx_chan[requested_engine]); if (priv->n_txqs > MPODP_NOC_CHAN_COUNT) spin_unlock_irqrestore(&priv->tx_lock[requested_engine], flags); /* Count number of bytes on the fly for DQL */ netdev_tx_sent_queue(txq->txq, skb->len); if (test_bit(__QUEUE_STATE_STACK_XOFF, &txq->txq->state)){ /* We reached over the limit of DQL. Try to clean some * tx so we are rescheduled right now */ mpodp_clean_tx_unlocked(priv, txq, -1); } /* Increment tail pointer locally */ atomic_set(&txq->submitted, tx_next); /* Update H2C entry offset */ tx_autoloop_next = tx_mppa_idx + 1; if (tx_autoloop_next == txq->cached_head) tx_autoloop_next = 0; atomic_set(&txq->autoloop_cur, tx_autoloop_next); skb_tx_timestamp(skb); /* Check if there is room for another txd * or stop the queue if there is not */ tx_next = (tx_next + 1); if (tx_next == txq->size) tx_next = 0; if (tx_next == atomic_read(&txq->done)) { if (netif_msg_tx_queued(priv)) netdev_info(netdev, "txq[%d]: ring full \n", txq->id); netif_tx_stop_queue(txq->txq); } return NETDEV_TX_OK; busy: unmap_skb(&priv->pdev->dev, skb, tx); return NETDEV_TX_BUSY; addr_error: netdev->stats.tx_dropped++; dev_kfree_skb(skb); /* We can't do anything, just stop the queue artificially */ netif_tx_stop_queue(txq->txq); return NETDEV_TX_OK; }
void mpodp_tx_timeout(struct net_device *netdev) { struct mpodp_if_priv *priv = netdev_priv(netdev); if (netif_msg_tx_err(priv)) netdev_err(netdev, "tx timeout\n"); }
static void tx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; #ifdef TX_URB_MONITOR unsigned char b_usb_if_num = 0; int iRet = get_usb_interface(urb, &b_usb_if_num); #endif //#ifdef TX_URB_MONITOR if (urb->status == 0) { if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone break; // like rx, tx gets controller i/o faults during khubd delays // and so it uses the same throttling mechanism. case -EPROTO: case -ETIME: case -EILSEQ: if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) #if (LINUX_VERSION_CODE != KERNEL_VERSION( 3,0,6 )) devdbg (dev, "tx throttle %d", urb->status); #else netif_dbg(dev, link, dev->net, "tx throttle %d\n", urb->status); #endif } netif_stop_queue (dev->net); break; default: if (netif_msg_tx_err (dev)) #if (LINUX_VERSION_CODE != KERNEL_VERSION( 3,0,6 )) devdbg (dev, "tx err %d", entry->urb->status); #else netif_dbg(dev, tx_err, dev->net, "tx err %d\n", entry->urb->status); #endif break; } } usb_autopm_put_interface_async(dev->intf); urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); #ifdef TX_URB_MONITOR if ((URB_monitor) && (0==iRet)) { URB_monitor(false, b_usb_if_num); } #endif //#ifdef TX_URB_MONITOR }
static void cyrf6936_rx_tx(struct cyrf6936_net *p) { u8 val, *data; size_t rx_len; struct sk_buff *skb; /* update signal level */ cyrf6936_iw_rssi(p); /* rx */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; if (val & RXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; /* get data length*/ rx_len = cyrf6936_rreg(p, RX_LENGTH); /* allocate buffer */ skb = dev_alloc_skb(rx_len + NET_IP_ALIGN); if (!skb) goto err_oom; skb_reserve(skb, NET_IP_ALIGN); data = skb_put(skb, rx_len); /* read data */ cyrf6936_wreg(p, RX_IRQ_STATUS, RXOW_IRQ); while (rx_len--) *data++ = cyrf6936_rreg(p, RX_BUFFER); /* dump received packet data */ if (netif_msg_pktdata(p)) print_hex_dump_bytes("cyrf6936 rx data: ", DUMP_PREFIX_NONE, skb->data, skb->len); skb->dev = p->netdev; skb->protocol = htons(ETH_P_ALL); skb->ip_summed = CHECKSUM_UNNECESSARY; p->stats.rx_packets++; p->stats.rx_bytes += rx_len; netif_rx_ni(skb); cyrf6936_rx_enable(p); } /* tx */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; if (val & TXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; /* tx ok*/ p->stats.tx_packets++; p->stats.tx_bytes += p->tx_skb->len; if (netif_msg_tx_done(p)) dev_dbg(&p->netdev->dev, "tx done\n"); dev_kfree_skb(p->tx_skb); p->tx_skb = NULL; netif_wake_queue(p->netdev); cyrf6936_rx_enable(p); } if (!p->pollmode) enable_irq(p->netdev->irq); return; err_rxe: if (netif_msg_rx_err(p)) dev_info(&p->netdev->dev, "rx error\n"); p->stats.rx_errors++; cyrf6936_rx_enable(p); return; err_txe: if (netif_msg_tx_err(p)) dev_info(&p->netdev->dev, "tx error\n"); p->stats.tx_errors++; cyrf6936_rx_enable(p); return; err_oom: dev_err(&p->netdev->dev, "out of memory, packet dropped\n"); p->stats.rx_dropped++; cyrf6936_rx_enable(p); }
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; unsigned long flags; int retval; // some devices want funky USB-level framing, for // win32 driver (usually) and/or hardware quirks if (info->tx_fixup) { skb = info->tx_fixup (dev, skb, GFP_ATOMIC); if (!skb) { if (netif_msg_tx_err(dev)) { netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); goto drop; } else { /* cdc_ncm collected packet; waits for more */ goto not_drop; } } } length = skb->len; if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { netif_dbg(dev, tx_err, dev->net, "no urb\n"); goto drop; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = tx_start; entry->length = length; usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. * NOTE2: CDC NCM specification is different from CDC ECM when * handling ZLP/short packets, so cdc_ncm driver will make short * packet itself if needed. */ if (length % dev->maxpacket == 0) { if (!(info->flags & FLAG_SEND_ZLP)) { if (!(info->flags & FLAG_MULTI_PACKET)) { urb->transfer_buffer_length++; if (skb_tailroom(skb)) { skb->data[skb->len] = 0; __skb_put(skb, 1); } } } else urb->transfer_flags |= URB_ZERO_PACKET; } spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); if (retval < 0) { spin_unlock_irqrestore(&dev->txq.lock, flags); goto drop; } #ifdef CONFIG_PM /* if this triggers the device is still a sleep */ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { /* transmission will be done in resume */ usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */ netif_stop_queue(net); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred; } #endif switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { case -EPIPE: netif_stop_queue (net); usbnet_defer_kevent (dev, EVENT_TX_HALT); usb_autopm_put_interface_async(dev->intf); break; default: usb_autopm_put_interface_async(dev->intf); netif_dbg(dev, tx_err, dev->net, "tx: submit urb err %d\n", retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail (&dev->txq, skb); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } spin_unlock_irqrestore (&dev->txq.lock, flags); if (retval) { netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); drop: dev->net->stats.tx_dropped++; not_drop: if (skb) dev_kfree_skb_any (skb); usb_free_urb (urb); } else netif_dbg(dev, tx_queued, dev->net, "> tx, len %d, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM deferred: #endif return NETDEV_TX_OK; }
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct mlx4_en_priv *priv = netdev_priv(dev); union mlx4_wqe_qpn_vlan qpn_vlan = {}; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; struct mlx4_wqe_data_seg *data; struct mlx4_en_tx_info *tx_info; int tx_ind; int nr_txbb; int desc_size; int real_size; u32 index, bf_index; __be32 op_own; int lso_header_size; void *fragptr = NULL; bool bounce = false; bool send_doorbell; bool stop_queue; bool inline_ok; u8 data_offset; u32 ring_cons; bool bf_ok; tx_ind = skb_get_queue_mapping(skb); ring = priv->tx_ring[TX][tx_ind]; if (unlikely(!priv->port_up)) goto tx_drop; /* fetch ring->cons far ahead before needing it to avoid stall */ ring_cons = READ_ONCE(ring->cons); real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); if (unlikely(!real_size)) goto tx_drop_count; /* Align descriptor to TXBB size */ desc_size = ALIGN(real_size, TXBB_SIZE); nr_txbb = desc_size >> LOG_TXBB_SIZE; if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { if (netif_msg_tx_err(priv)) en_warn(priv, "Oversized header or SG list\n"); goto tx_drop_count; } bf_ok = ring->bf_enabled; if (skb_vlan_tag_present(skb)) { u16 vlan_proto; qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb)); vlan_proto = be16_to_cpu(skb->vlan_proto); if (vlan_proto == ETH_P_8021AD) qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; else if (vlan_proto == ETH_P_8021Q) qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; else qpn_vlan.ins_vlan = 0; bf_ok = false; } netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); /* Track current inflight packets for performance analysis */ AVG_PERF_COUNTER(priv->pstats.inflight_avg, (u32)(ring->prod - ring_cons - 1)); /* Packet is good - grab an index and transmit it */ index = ring->prod & ring->size_mask; bf_index = ring->prod; /* See if we have enough space for whole descriptor TXBB for setting * SW ownership on next descriptor; if not, use a bounce buffer. */ if (likely(index + nr_txbb <= ring->size)) tx_desc = ring->buf + (index << LOG_TXBB_SIZE); else { tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; bounce = true; bf_ok = false; } /* Save skb in tx_info ring */ tx_info = &ring->tx_info[index]; tx_info->skb = skb; tx_info->nr_txbb = nr_txbb; if (!lso_header_size) { data = &tx_desc->data; data_offset = offsetof(struct mlx4_en_tx_desc, data); } else {
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, }; if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); } return tx_info->nr_txbb; } int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, 0, 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } if (ring->tx_queue) netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index, ring_index, stamp_index; u32 txbbs_skipped = 0; u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; u32 ring_cons; if (unlikely(!priv->port_up)) return true; netdev_txq_bql_complete_prefetchw(ring->tx_queue); index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; last_nr_txbb = READ_ONCE(ring->last_nr_txbb); ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size) && (done < budget)) { u16 new_index; /* * make sure we read the CQE after we read the * ownership bit */ dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", cqe_err->vendor_err_syndrome, cqe_err->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { u64 timestamp = 0; txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); /* we want to dirty this cache line once */ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. */ if (netif_tx_queue_stopped(ring->tx_queue) && !mlx4_en_is_tx_ring_full(ring)) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (likely(priv->port_up)) napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } /* TX CQ polling - called by NAPI */ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); bool clean_complete; clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, unsigned int desc_size) { u32 copy = (ring->size - index) << LOG_TXBB_SIZE; int i; for (i = desc_size - copy - 4; i >= 0; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *) (ring->buf + i)) = *((u32 *) (ring->bounce_buf + copy + i)); } for (i = copy - 4; i >= 4 ; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) = *((u32 *) (ring->bounce_buf + i)); } /* Return real descriptor location */ return ring->buf + (index << LOG_TXBB_SIZE); }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_lowmem; rx_submit (dev, urb, GFP_KERNEL); usb_autopm_put_interface(dev->intf); fail_lowmem: tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
/* error control function */ static void sh_eth_error(struct net_device *ndev, int intr_status) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 felic_stat; u32 link_stat; u32 mask; if (intr_status & EESR_ECI) { felic_stat = sh_eth_read(ndev, ECSR); sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ if (felic_stat & ECSR_ICD) mdp->stats.tx_carrier_errors++; if (felic_stat & ECSR_LCHNG) { /* Link Changed */ if (mdp->cd->no_psr || mdp->no_ether_link) { if (mdp->link == PHY_DOWN) link_stat = 0; else link_stat = PHY_ST_LINK; } else { link_stat = (sh_eth_read(ndev, PSR)); if (mdp->ether_link_active_low) link_stat = ~link_stat; } if (!(link_stat & PHY_ST_LINK)) sh_eth_rcv_snd_disable(ndev); else { /* Link Up */ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & ~DMAC_M_ECI, EESIPR); /*clear int */ sh_eth_write(ndev, sh_eth_read(ndev, ECSR), ECSR); sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | DMAC_M_ECI, EESIPR); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } } } if (intr_status & EESR_TWB) { /* Write buck end. unused write back interrupt */ if (intr_status & EESR_TABT) /* Transmit Abort int */ mdp->stats.tx_aborted_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit Abort\n"); } if (intr_status & EESR_RABT) { /* Receive Abort int */ if (intr_status & EESR_RFRMER) { /* Receive Frame Overflow int */ mdp->stats.rx_frame_errors++; if (netif_msg_rx_err(mdp)) dev_err(&ndev->dev, "Receive Abort\n"); } } if (intr_status & EESR_TDE) { /* Transmit Descriptor Empty int */ mdp->stats.tx_fifo_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); } if (intr_status & EESR_TFE) { /* FIFO under flow */ mdp->stats.tx_fifo_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); } if (intr_status & EESR_RDE) { /* Receive Descriptor Empty int */ mdp->stats.rx_over_errors++; if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) sh_eth_write(ndev, EDRRR_R, EDRRR); if (netif_msg_rx_err(mdp)) dev_err(&ndev->dev, "Receive Descriptor Empty\n"); } if (intr_status & EESR_RFE) { /* Receive FIFO Overflow int */ mdp->stats.rx_fifo_errors++; if (netif_msg_rx_err(mdp)) dev_err(&ndev->dev, "Receive FIFO Overflow\n"); } if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { /* Address Error */ mdp->stats.tx_fifo_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Address Error\n"); } mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; if (mdp->cd->no_ade) mask &= ~EESR_ADE; if (intr_status & mask) { /* Tx error */ u32 edtrr = sh_eth_read(ndev, EDTRR); /* dmesg */ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", intr_status, mdp->cur_tx); dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", mdp->dirty_tx, (u32) ndev->state, edtrr); /* dirty buffer free */ sh_eth_txfree(ndev); /* SH7712 BUG */ if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { /* tx dma start */ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); } /* wakeup */ netif_wake_queue(ndev); } }