示例#1
0
文件: hip04_eth.c 项目: mhei/linux
static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct hip04_priv *priv = netdev_priv(ndev);
    struct net_device_stats *stats = &ndev->stats;
    unsigned int tx_head = priv->tx_head, count;
    struct tx_desc *desc = &priv->tx_desc[tx_head];
    dma_addr_t phys;

    smp_rmb();
    count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
    if (count == (TX_DESC_NUM - 1)) {
        netif_stop_queue(ndev);
        return NETDEV_TX_BUSY;
    }

    phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
    if (dma_mapping_error(&ndev->dev, phys)) {
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
    }

    priv->tx_skb[tx_head] = skb;
    priv->tx_phys[tx_head] = phys;
    desc->send_addr = cpu_to_be32(phys);
    desc->send_size = cpu_to_be32(skb->len);
    desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
    phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
    desc->wb_addr = cpu_to_be32(phys);
    skb_tx_timestamp(skb);

    hip04_set_xmit_desc(priv, phys);
    priv->tx_head = TX_NEXT(tx_head);
    count++;
    netdev_sent_queue(ndev, skb->len);

    stats->tx_bytes += skb->len;
    stats->tx_packets++;

    /* Ensure tx_head update visible to tx reclaim */
    smp_wmb();

    /* queue is getting full, better start cleaning up now */
    if (count >= priv->tx_coalesce_frames) {
        if (napi_schedule_prep(&priv->napi)) {
            /* disable rx interrupt and timer */
            priv->reg_inten &= ~(RCV_INT);
            writel_relaxed(DEF_INT_MASK & ~RCV_INT,
                           priv->base + PPE_INTEN);
            hrtimer_cancel(&priv->tx_coalesce_timer);
            __napi_schedule(&priv->napi);
        }
    } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
        /* cleanup not pending yet, start a new timer */
        hip04_start_tx_timer(priv);
    }

    return NETDEV_TX_OK;
}
示例#2
0
static int
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct raeth_priv *re = netdev_priv(dev);
	struct raeth_tx_info *txi, *txi_next;
	struct ramips_tx_dma *txd, *txd_next;
	unsigned long tx;
	unsigned int tx_next;
	dma_addr_t mapped_addr;

	if (re->plat->min_pkt_len) {
		if (skb->len < re->plat->min_pkt_len) {
			if (skb_padto(skb, re->plat->min_pkt_len)) {
				printk(KERN_ERR
				       "ramips_eth: skb_padto failed\n");
				kfree_skb(skb);
				return 0;
			}
			skb_put(skb, re->plat->min_pkt_len - skb->len);
		}
	}

	dev->trans_start = jiffies;
	mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
				     DMA_TO_DEVICE);

	spin_lock(&re->page_lock);
	tx = ramips_fe_trr(RAETH_REG_TX_CTX_IDX0);
	tx_next = (tx + 1) % NUM_TX_DESC;

	txi = &re->tx_info[tx];
	txd = txi->tx_desc;
	txi_next = &re->tx_info[tx_next];
	txd_next = txi_next->tx_desc;

	if ((txi->tx_skb) || (txi_next->tx_skb) ||
	    !(txd->txd2 & TX_DMA_DONE) ||
	    !(txd_next->txd2 & TX_DMA_DONE))
		goto out;

	txi->tx_skb = skb;

	txd->txd1 = (unsigned int) mapped_addr;
	wmb();
	txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	ramips_fe_twr(tx_next, RAETH_REG_TX_CTX_IDX0);
	netdev_sent_queue(dev, skb->len);
	spin_unlock(&re->page_lock);
	return NETDEV_TX_OK;

 out:
	spin_unlock(&re->page_lock);
	dev->stats.tx_dropped++;
	kfree_skb(skb);
	return NETDEV_TX_OK;
}
示例#3
0
static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
				       struct net_device *dev)
{
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *txq = &priv->txq;
	dma_addr_t addr;
	u32 val;

	val = readl(priv->port_base + ADDRQ_STAT);
	val &= BIT_TX_READY;
	if (!val) {
		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
		dev->stats.tx_dropped++;
		dev->stats.tx_fifo_errors++;
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}

	if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
				 txq->num))) {
		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
		dev->stats.tx_dropped++;
		dev->stats.tx_fifo_errors++;
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}

	addr = dma_map_single(priv->dev, skb->data,
			      skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(priv->dev, addr))) {
		dev_kfree_skb_any(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	txq->dma_phys[txq->head] = addr;

	txq->skb[txq->head] = skb;
	txq->head = (txq->head + 1) % txq->num;

	writel(addr, priv->port_base + EQ_ADDR);
	writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);

	priv->tx_fifo_used_cnt++;

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	netdev_sent_queue(dev, skb->len);

	return NETDEV_TX_OK;
}
示例#4
0
文件: bgmac.c 项目: 545191228/linux
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
	int nr_frags;
	u32 flags;
	int i;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_drop;
	}

	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
					DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;

	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
	ring->end += nr_frags + 1;
	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));

	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

	while (i > 0) {
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
	bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
		  ring->mmio_base);

err_drop:
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
示例#5
0
文件: bgmac.c 项目: 7799/linux
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	struct bgmac_dma_desc *dma_desc;
	struct bgmac_slot_info *slot;
	u32 ctl0, ctl1;
	int free_slots;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_stop_drop;
	}

	if (ring->start <= ring->end)
		free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
	else
		free_slots = ring->start - ring->end;
	if (free_slots == 1) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot = &ring->slots[ring->end];
	slot->skb = skb;
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
		bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
			  ring->mmio_base);
		goto err_stop_drop;
	}

	ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
	if (ring->end == ring->num_slots - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;

	dma_desc = ring->cpu_base;
	dma_desc += ring->end;
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);

	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	if (++ring->end >= BGMAC_TX_RING_SLOTS)
		ring->end = 0;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));

	/* Always keep one slot free to allow detecting bugged calls. */
	if (--free_slots == 1)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_stop_drop:
	netif_stop_queue(net_dev);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}