static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hip04_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; unsigned int tx_head = priv->tx_head, count; struct tx_desc *desc = &priv->tx_desc[tx_head]; dma_addr_t phys; smp_rmb(); count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); if (count == (TX_DESC_NUM - 1)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&ndev->dev, phys)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } priv->tx_skb[tx_head] = skb; priv->tx_phys[tx_head] = phys; desc->send_addr = cpu_to_be32(phys); desc->send_size = cpu_to_be32(skb->len); desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); desc->wb_addr = cpu_to_be32(phys); skb_tx_timestamp(skb); hip04_set_xmit_desc(priv, phys); priv->tx_head = TX_NEXT(tx_head); count++; netdev_sent_queue(ndev, skb->len); stats->tx_bytes += skb->len; stats->tx_packets++; /* Ensure tx_head update visible to tx reclaim */ smp_wmb(); /* queue is getting full, better start cleaning up now */ if (count >= priv->tx_coalesce_frames) { if (napi_schedule_prep(&priv->napi)) { /* disable rx interrupt and timer */ priv->reg_inten &= ~(RCV_INT); writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); hrtimer_cancel(&priv->tx_coalesce_timer); __napi_schedule(&priv->napi); } } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { /* cleanup not pending yet, start a new timer */ hip04_start_tx_timer(priv); } return NETDEV_TX_OK; }
static int hip04_tx_reclaim(struct net_device *ndev, bool force) { struct hip04_priv *priv = netdev_priv(ndev); unsigned tx_tail = priv->tx_tail; struct tx_desc *desc; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int count; smp_rmb(); count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); if (count == 0) goto out; while (count) { desc = &priv->tx_desc[tx_tail]; if (desc->send_addr != 0) { if (force) desc->send_addr = 0; else break; } if (priv->tx_phys[tx_tail]) { dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], priv->tx_skb[tx_tail]->len, DMA_TO_DEVICE); priv->tx_phys[tx_tail] = 0; } pkts_compl++; bytes_compl += priv->tx_skb[tx_tail]->len; dev_kfree_skb(priv->tx_skb[tx_tail]); priv->tx_skb[tx_tail] = NULL; tx_tail = TX_NEXT(tx_tail); count--; } priv->tx_tail = tx_tail; smp_wmb(); /* Ensure tx_tail visible to xmit */ out: if (pkts_compl || bytes_compl) netdev_completed_queue(ndev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1))) netif_wake_queue(ndev); return count; }
static int awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp) { bus_dma_segment_t segs[TX_MAX_SEGS]; int error, nsegs, cur, i, flags; u_int csum_flags; struct mbuf *m; m = *mp; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); if (m == NULL) return (0); *mp = m; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); } if (error != 0) return (0); bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, BUS_DMASYNC_PREWRITE); flags = TX_FIR_DESC; if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) csum_flags = TX_CHECKSUM_CTL_FULL; else csum_flags = TX_CHECKSUM_CTL_IP; flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); } for (cur = index, i = 0; i < nsegs; i++) { sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); if (i == nsegs - 1) flags |= TX_LAST_DESC; awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr, segs[i].ds_len); flags &= ~TX_FIR_DESC; cur = TX_NEXT(cur); } return (nsegs); }