Exemple #1
0
static void
ramips_eth_tx_housekeeping(unsigned long ptr)
{
	struct net_device *dev = (struct net_device*)ptr;
	struct raeth_priv *re = netdev_priv(dev);
	unsigned int bytes_compl = 0, pkts_compl = 0;

	spin_lock(&re->page_lock);
	while (1) {
		struct raeth_tx_info *txi;
		struct ramips_tx_dma *txd;

		txi = &re->tx_info[re->skb_free_idx];
		txd = txi->tx_desc;

		if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
			break;

		pkts_compl++;
		bytes_compl += txi->tx_skb->len;

		dev_kfree_skb_irq(txi->tx_skb);
		txi->tx_skb = NULL;
		re->skb_free_idx++;
		if (re->skb_free_idx >= NUM_TX_DESC)
			re->skb_free_idx = 0;
	}
	netdev_completed_queue(dev, pkts_compl, bytes_compl);
	spin_unlock(&re->page_lock);

	ramips_fe_int_enable(TX_DLY_INT);
}
Exemple #2
0
/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	struct device *dma_dev = bgmac->dma_dev;
	int empty_slot;
	bool freed = false;
	unsigned bytes_compl = 0, pkts_compl = 0;

	/* The last slot that hardware didn't consume yet */
	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
	empty_slot -= ring->index_base;
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
	empty_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != ring->end) {
		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
		u32 ctl0, ctl1;
		int len;

		if (slot_idx == empty_slot)
			break;

		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
		len = ctl1 & BGMAC_DESC_CTL1_LEN;
		if (ctl0 & BGMAC_DESC_CTL0_SOF)
			/* Unmap no longer used buffer */
			dma_unmap_single(dma_dev, slot->dma_addr, len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr, len,
				       DMA_TO_DEVICE);

		if (slot->skb) {
			bgmac->net_dev->stats.tx_bytes += slot->skb->len;
			bgmac->net_dev->stats.tx_packets++;
			bytes_compl += slot->skb->len;
			pkts_compl++;

			/* Free memory! :) */
			dev_kfree_skb(slot->skb);
			slot->skb = NULL;
		}

		slot->dma_addr = 0;
		ring->start++;
		freed = true;
	}

	if (!pkts_compl)
		return;

	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);

	if (netif_queue_stopped(bgmac->net_dev))
		netif_wake_queue(bgmac->net_dev);
}
Exemple #3
0
static int hip04_tx_reclaim(struct net_device *ndev, bool force)
{
    struct hip04_priv *priv = netdev_priv(ndev);
    unsigned tx_tail = priv->tx_tail;
    struct tx_desc *desc;
    unsigned int bytes_compl = 0, pkts_compl = 0;
    unsigned int count;

    smp_rmb();
    count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
    if (count == 0)
        goto out;

    while (count) {
        desc = &priv->tx_desc[tx_tail];
        if (desc->send_addr != 0) {
            if (force)
                desc->send_addr = 0;
            else
                break;
        }

        if (priv->tx_phys[tx_tail]) {
            dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
                             priv->tx_skb[tx_tail]->len,
                             DMA_TO_DEVICE);
            priv->tx_phys[tx_tail] = 0;
        }
        pkts_compl++;
        bytes_compl += priv->tx_skb[tx_tail]->len;
        dev_kfree_skb(priv->tx_skb[tx_tail]);
        priv->tx_skb[tx_tail] = NULL;
        tx_tail = TX_NEXT(tx_tail);
        count--;
    }

    priv->tx_tail = tx_tail;
    smp_wmb(); /* Ensure tx_tail visible to xmit */

out:
    if (pkts_compl || bytes_compl)
        netdev_completed_queue(ndev, pkts_compl, bytes_compl);

    if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
        netif_wake_queue(ndev);

    return count;
}
Exemple #4
0
static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
{
	struct vector_private *vp = netdev_priv(qi->dev);
	int queue_depth;
	int packet_len;
	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
	int iov_count;

	spin_lock(&qi->tail_lock);
	spin_lock(&qi->head_lock);
	queue_depth = qi->queue_depth;
	spin_unlock(&qi->head_lock);

	if (skb)
		packet_len = skb->len;

	if (queue_depth < qi->max_depth) {

		*(qi->skbuff_vector + qi->tail) = skb;
		mmsg_vector += qi->tail;
		iov_count = prep_msg(
			vp,
			skb,
			mmsg_vector->msg_hdr.msg_iov
		);
		if (iov_count < 1)
			goto drop;
		mmsg_vector->msg_hdr.msg_iovlen = iov_count;
		mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
		mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
		queue_depth = vector_advancetail(qi, 1);
	} else
		goto drop;
	spin_unlock(&qi->tail_lock);
	return queue_depth;
drop:
	qi->dev->stats.tx_dropped++;
	if (skb != NULL) {
		packet_len = skb->len;
		dev_consume_skb_any(skb);
		netdev_completed_queue(qi->dev, 1, packet_len);
	}
	spin_unlock(&qi->tail_lock);
	return queue_depth;
}
Exemple #5
0
/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	int empty_slot;
	bool freed = false;
	unsigned bytes_compl = 0, pkts_compl = 0;

	/* The last slot that hardware didn't consume yet */
	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
	empty_slot -= ring->index_base;
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
	empty_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != empty_slot) {
		struct bgmac_slot_info *slot = &ring->slots[ring->start];

		if (slot->skb) {
			/* Unmap no longer used buffer */
			dma_unmap_single(dma_dev, slot->dma_addr,
					 slot->skb->len, DMA_TO_DEVICE);
			slot->dma_addr = 0;

			bytes_compl += slot->skb->len;
			pkts_compl++;

			/* Free memory! :) */
			dev_kfree_skb(slot->skb);
			slot->skb = NULL;
		} else {
			bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
				  ring->start, ring->end);
		}

		if (++ring->start >= BGMAC_TX_RING_SLOTS)
			ring->start = 0;
		freed = true;
	}

	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);

	if (freed && netif_queue_stopped(bgmac->net_dev))
		netif_wake_queue(bgmac->net_dev);
}
Exemple #6
0
static int consume_vector_skbs(struct vector_queue *qi, int count)
{
	struct sk_buff *skb;
	int skb_index;
	int bytes_compl = 0;

	for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
		skb = *(qi->skbuff_vector + skb_index);
		/* mark as empty to ensure correct destruction if
		 * needed
		 */
		bytes_compl += skb->len;
		*(qi->skbuff_vector + skb_index) = NULL;
		dev_consume_skb_any(skb);
	}
	qi->dev->stats.tx_bytes += bytes_compl;
	qi->dev->stats.tx_packets += count;
	netdev_completed_queue(qi->dev, count, bytes_compl);
	return vector_advancehead(qi, count);
}
Exemple #7
0
static void hisi_femac_xmit_reclaim(struct net_device *dev)
{
	struct sk_buff *skb;
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *txq = &priv->txq;
	unsigned int bytes_compl = 0, pkts_compl = 0;
	u32 val;

	netif_tx_lock(dev);

	val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
	while (val < priv->tx_fifo_used_cnt) {
		skb = txq->skb[txq->tail];
		if (unlikely(!skb)) {
			netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
				   val, priv->tx_fifo_used_cnt);
			break;
		}
		hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
		pkts_compl++;
		bytes_compl += skb->len;
		dev_kfree_skb_any(skb);

		priv->tx_fifo_used_cnt--;

		val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
		txq->skb[txq->tail] = NULL;
		txq->tail = (txq->tail + 1) % txq->num;
	}

	netdev_completed_queue(dev, pkts_compl, bytes_compl);

	if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
		netif_wake_queue(dev);

	netif_tx_unlock(dev);
}