static inline void complete_tx_only(struct xdpsock *xsk) { u64 descs[BATCH_SIZE]; unsigned int rcvd; if (!xsk->outstanding_tx) return; kick_tx(xsk->sfd); rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE); if (rcvd > 0) { xsk->outstanding_tx -= rcvd; xsk->tx_npkts += rcvd; } }
static inline void complete_tx_l2fwd(struct xdpsock *xsk) { u64 descs[BATCH_SIZE]; unsigned int rcvd; size_t ndescs; if (!xsk->outstanding_tx) return; kick_tx(xsk->sfd); ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE : xsk->outstanding_tx; /* re-add completed Tx buffers */ rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs); if (rcvd > 0) { umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd); xsk->outstanding_tx -= rcvd; xsk->tx_npkts += rcvd; } }
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; unsigned long flags; struct sgiseeq_tx_desc *td; int len, entry; spin_lock_irqsave(&sp->tx_lock, flags); /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) { spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; } len = ETH_ZLEN; } dev->stats.tx_bytes += len; entry = sp->tx_new; td = &sp->tx_desc[entry]; dma_sync_desc_cpu(dev, td); /* Create entry. There are so many races with adding a new * descriptor to the chain: * 1) Assume that the HPC is off processing a DMA chain while * we are changing all of the following. * 2) Do no allow the HPC to look at a new descriptor until * we have completely set up it's state. This means, do * not clear HPCDMA_EOX in the current last descritptor * until the one we are adding looks consistent and could * be processes right now. * 3) The tx interrupt code must notice when we've added a new * entry and the HPC got to the end of the chain before we * added this new entry and restarted it. */ td->skb = skb; td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, len, DMA_TO_DEVICE); td->tdma.cntinfo = (len & HPCDMA_BCNT) | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; dma_sync_desc_dev(dev, td); if (sp->tx_old != sp->tx_new) { struct sgiseeq_tx_desc *backend; backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; dma_sync_desc_cpu(dev, backend); backend->tdma.cntinfo &= ~HPCDMA_EOX; dma_sync_desc_dev(dev, backend); } sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ /* Maybe kick the HPC back into motion. */ if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) kick_tx(dev, sp, hregs); dev->trans_start = jiffies; if (!TX_BUFFS_AVAIL(sp)) netif_stop_queue(dev); spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; }