예제 #1
0
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct b44 *bp = netdev_priv(dev);
	dma_addr_t mapping;
	u32 len, entry, ctrl;

	len = skb->len;
	spin_lock_irq(&bp->lock);

	/* This is a hard error, log it. */
	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&bp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

	entry = bp->tx_prod;
	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);

	bp->tx_buffers[entry].skb = skb;
	pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);

	ctrl  = (len & DESC_CTRL_LEN);
	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
	if (entry == (B44_TX_RING_SIZE - 1))
		ctrl |= DESC_CTRL_EOT;

	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);

	entry = NEXT_TX(entry);

	bp->tx_prod = entry;

	wmb();

	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_REORDER_BUG)
		br32(bp, B44_DMATX_PTR);

	if (TX_BUFFS_AVAIL(bp) < 1)
		netif_stop_queue(dev);

	spin_unlock_irq(&bp->lock);

	dev->trans_start = jiffies;

	return 0;
}
예제 #2
0
static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *) dev_id;
	struct sgiseeq_private *sp = netdev_priv(dev);
	struct hpc3_ethregs *hregs = sp->hregs;
	struct sgiseeq_regs *sregs = sp->sregs;

	spin_lock(&sp->tx_lock);

	/* Ack the IRQ and set software state. */
	hregs->reset = HPC3_ERST_CLRIRQ;

	/* Always check for received packets. */
	sgiseeq_rx(dev, sp, hregs, sregs);

	/* Only check for tx acks if we have something queued. */
	if (sp->tx_old != sp->tx_new)
		sgiseeq_tx(dev, sp, hregs, sregs);

	if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
		netif_wake_queue(dev);
	}
	spin_unlock(&sp->tx_lock);

	return IRQ_HANDLED;
}
예제 #3
0
static void b44_tx(struct b44 *bp)
{
	u32 cur, cons;

	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
	cur /= sizeof(struct dma_desc);

	/* XXX needs updating when NETIF_F_SG is supported */
	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
		struct ring_info *rp = &bp->tx_buffers[cons];
		struct sk_buff *skb = rp->skb;

		if (unlikely(skb == NULL))
			BUG();

		pci_unmap_single(bp->pdev,
				 pci_unmap_addr(rp, mapping),
				 skb->len,
				 PCI_DMA_TODEVICE);
		rp->skb = NULL;
		dev_kfree_skb_irq(skb);
	}

	bp->tx_cons = cons;
	if (netif_queue_stopped(bp->dev) &&
	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
		netif_wake_queue(bp->dev);

	bw32(bp, B44_GPTIMER, 0);
}
예제 #4
0
static void cp_tx (struct cp_private *cp)
{
	unsigned tx_head = cp->tx_head;
	unsigned tx_tail = cp->tx_tail;

	while (tx_tail != tx_head) {
		struct sk_buff *skb;
		u32 status;

		rmb();
		status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
		if (status & DescOwn)
			break;

		skb = cp->tx_skb[tx_tail].skb;
		if (!skb)
			BUG();

		pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
					skb->len, PCI_DMA_TODEVICE);

		if (status & LastFrag) {
			if (status & (TxError | TxFIFOUnder)) {
				if (netif_msg_tx_err(cp))
					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
					       cp->dev->name, status);
				cp->net_stats.tx_errors++;
				if (status & TxOWC)
					cp->net_stats.tx_window_errors++;
				if (status & TxMaxCol)
					cp->net_stats.tx_aborted_errors++;
				if (status & TxLinkFail)
					cp->net_stats.tx_carrier_errors++;
				if (status & TxFIFOUnder)
					cp->net_stats.tx_fifo_errors++;
			} else {
				cp->net_stats.collisions +=
					((status >> TxColCntShift) & TxColCntMask);
				cp->net_stats.tx_packets++;
				cp->net_stats.tx_bytes += skb->len;
				if (netif_msg_tx_done(cp))
					printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
			}
			dev_kfree_skb_irq(skb);
		}

		cp->tx_skb[tx_tail].skb = NULL;

		tx_tail = NEXT_TX(tx_tail);
	}

	cp->tx_tail = tx_tail;

	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
		netif_wake_queue(cp->dev);
}
예제 #5
0
/* Get a packet queued to go onto the wire. */
static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct sunqe *qep = netdev_priv(dev);
	struct sunqe_buffers *qbufs = qep->buffers;
	__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
	unsigned char *txbuf;
	int len, entry;

	spin_lock_irq(&qep->lock);

	qe_tx_reclaim(qep);

	len = skb->len;
	entry = qep->tx_new;

	txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
	txbuf_dvma = qbufs_dvma +
		qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));

	/* Avoid a race... */
	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;

	skb_copy_from_linear_data(skb, txbuf, len);

	qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
	qep->qe_block->qe_txd[entry].tx_flags =
		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
	qep->tx_new = NEXT_TX(entry);

	/* Get it going. */
	dev->trans_start = jiffies;
	sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += len;

	if (TX_BUFFS_AVAIL(qep) <= 0) {
		/* Halt the net queue and enable tx interrupts.
		 * When the tx queue empties the tx irq handler
		 * will wake up the queue and return us back to
		 * the lazy tx reclaim scheme.
		 */
		netif_stop_queue(dev);
		sbus_writel(0, qep->qcregs + CREG_TIMASK);
	}
	spin_unlock_irq(&qep->lock);

	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}
예제 #6
0
파일: sunqe.c 프로젝트: Lyude/linux
/* Interrupts for all QE's get filtered out via the QEC master controller,
 * so we just run through each qe and check to see who is signaling
 * and thus needs to be serviced.
 */
static irqreturn_t qec_interrupt(int irq, void *dev_id)
{
	struct sunqec *qecp = dev_id;
	u32 qec_status;
	int channel = 0;

	/* Latch the status now. */
	qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
	while (channel < 4) {
		if (qec_status & 0xf) {
			struct sunqe *qep = qecp->qes[channel];
			u32 qe_status;

			qe_status = sbus_readl(qep->qcregs + CREG_STAT);
			if (qe_status & CREG_STAT_ERRORS) {
				if (qe_is_bolixed(qep, qe_status))
					goto next;
			}
			if (qe_status & CREG_STAT_RXIRQ)
				qe_rx(qep);
			if (netif_queue_stopped(qep->dev) &&
			    (qe_status & CREG_STAT_TXIRQ)) {
				spin_lock(&qep->lock);
				qe_tx_reclaim(qep);
				if (TX_BUFFS_AVAIL(qep) > 0) {
					/* Wake net queue and return to
					 * lazy tx reclaim.
					 */
					netif_wake_queue(qep->dev);
					sbus_writel(1, qep->qcregs + CREG_TIMASK);
				}
				spin_unlock(&qep->lock);
			}
	next:
			;
		}
		qec_status >>= 4;
		channel++;
	}

	return IRQ_HANDLED;
}
예제 #7
0
파일: sunqe.c 프로젝트: Lyude/linux
static void qe_tx_timeout(struct net_device *dev)
{
	struct sunqe *qep = netdev_priv(dev);
	int tx_full;

	spin_lock_irq(&qep->lock);

	/* Try to reclaim, if that frees up some tx
	 * entries, we're fine.
	 */
	qe_tx_reclaim(qep);
	tx_full = TX_BUFFS_AVAIL(qep) <= 0;

	spin_unlock_irq(&qep->lock);

	if (! tx_full)
		goto out;

	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
	qe_init(qep, 1);

out:
	netif_wake_queue(dev);
}
예제 #8
0
static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct myri_eth *mp = netdev_priv(dev);
	struct sendq __iomem *sq = mp->sq;
	struct myri_txd __iomem *txd;
	unsigned long flags;
	unsigned int head, tail;
	int len, entry;
	u32 dma_addr;

	DTX(("myri_start_xmit: "));

	myri_tx(mp, dev);

	netif_stop_queue(dev);

	/* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
	head = sbus_readl(&sq->head);
	tail = sbus_readl(&sq->tail);

	if (!TX_BUFFS_AVAIL(head, tail)) {
		DTX(("no buffs available, returning 1\n"));
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->irq_lock, flags);

	DHDR(("xmit[skbdata(%p)]\n", skb->data));
#ifdef DEBUG_HEADER
	dump_ehdr_and_myripad(((unsigned char *) skb->data));
#endif

	/* XXX Maybe this can go as well. */
	len = skb->len;
	if (len & 3) {
		DTX(("len&3 "));
		len = (len + 4) & (~3);
	}

	entry = sbus_readl(&sq->tail);

	txd = &sq->myri_txd[entry];
	mp->tx_skbs[entry] = skb;

	/* Must do this before we sbus map it. */
	if (skb->data[MYRI_PAD_LEN] & 0x1) {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew(0xffff, &txd->addr[1]);
		sbus_writew(0xffff, &txd->addr[2]);
		sbus_writew(0xffff, &txd->addr[3]);
	} else {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
		sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
		sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
	}

	dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
				  len, DMA_TO_DEVICE);
	sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
	sbus_writel(len, &txd->myri_gathers[0].len);
	sbus_writel(1, &txd->num_sg);
	sbus_writel(KERNEL_CHANNEL, &txd->chan);
	sbus_writel(len, &txd->len);
	sbus_writel((u32)-1, &txd->csum_off);
	sbus_writel(0, &txd->csum_field);

	sbus_writel(NEXT_TX(entry), &sq->tail);
	DTX(("BangTheChip "));
	bang_the_chip(mp);

	DTX(("tbusy=0, returning 0\n"));
	netif_start_queue(dev);
	spin_unlock_irqrestore(&mp->irq_lock, flags);
	return NETDEV_TX_OK;
}
예제 #9
0
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct sgiseeq_private *sp = netdev_priv(dev);
	struct hpc3_ethregs *hregs = sp->hregs;
	unsigned long flags;
	struct sgiseeq_tx_desc *td;
	int len, entry;

	spin_lock_irqsave(&sp->tx_lock, flags);

	/* Setup... */
	len = skb->len;
	if (len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN)) {
			spin_unlock_irqrestore(&sp->tx_lock, flags);
			return NETDEV_TX_OK;
		}
		len = ETH_ZLEN;
	}

	dev->stats.tx_bytes += len;
	entry = sp->tx_new;
	td = &sp->tx_desc[entry];
	dma_sync_desc_cpu(dev, td);

	/* Create entry.  There are so many races with adding a new
	 * descriptor to the chain:
	 * 1) Assume that the HPC is off processing a DMA chain while
	 *    we are changing all of the following.
	 * 2) Do no allow the HPC to look at a new descriptor until
	 *    we have completely set up it's state.  This means, do
	 *    not clear HPCDMA_EOX in the current last descritptor
	 *    until the one we are adding looks consistent and could
	 *    be processes right now.
	 * 3) The tx interrupt code must notice when we've added a new
	 *    entry and the HPC got to the end of the chain before we
	 *    added this new entry and restarted it.
	 */
	td->skb = skb;
	td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
				       len, DMA_TO_DEVICE);
	td->tdma.cntinfo = (len & HPCDMA_BCNT) |
	                   HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
	dma_sync_desc_dev(dev, td);
	if (sp->tx_old != sp->tx_new) {
		struct sgiseeq_tx_desc *backend;

		backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
		dma_sync_desc_cpu(dev, backend);
		backend->tdma.cntinfo &= ~HPCDMA_EOX;
		dma_sync_desc_dev(dev, backend);
	}
	sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */

	/* Maybe kick the HPC back into motion. */
	if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
		kick_tx(dev, sp, hregs);

	dev->trans_start = jiffies;

	if (!TX_BUFFS_AVAIL(sp))
		netif_stop_queue(dev);
	spin_unlock_irqrestore(&sp->tx_lock, flags);

	return NETDEV_TX_OK;
}
예제 #10
0
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
	struct cp_private *cp = netdev_priv(dev);
	unsigned entry;
	u32 eor;
#if CP_VLAN_TAG_USED
	u32 vlan_tag = 0;
#endif

	spin_lock_irq(&cp->lock);

	/* This is a hard error, log it. */
	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&cp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

#if CP_VLAN_TAG_USED
	if (cp->vlgrp && vlan_tx_tag_present(skb))
		vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
#endif

	entry = cp->tx_head;
	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
	if (skb_shinfo(skb)->nr_frags == 0) {
		struct cp_desc *txd = &cp->tx_ring[entry];
		u32 len;
		dma_addr_t mapping;

		len = skb->len;
		mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			const struct iphdr *ip = skb->nh.iph;
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(eor | len | DescOwn |
						 FirstFrag | LastFrag);
		wmb();

		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = mapping;
		cp->tx_skb[entry].frag = 0;
		entry = NEXT_TX(entry);
	} else {
		struct cp_desc *txd;
		u32 first_len, first_eor;
		dma_addr_t first_mapping;
		int frag, first_entry = entry;
		const struct iphdr *ip = skb->nh.iph;

		/* We must give this initial chunk to the device last.
		 * Otherwise we could race with the device.
		 */
		first_eor = eor;
		first_len = skb_headlen(skb);
		first_mapping = pci_map_single(cp->pdev, skb->data,
					       first_len, PCI_DMA_TODEVICE);
		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = first_mapping;
		cp->tx_skb[entry].frag = 1;
		entry = NEXT_TX(entry);

		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
			u32 len;
			u32 ctrl;
			dma_addr_t mapping;

			len = this_frag->size;
			mapping = pci_map_single(cp->pdev,
						 ((void *) page_address(this_frag->page) +
						  this_frag->page_offset),
						 len, PCI_DMA_TODEVICE);
			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;

			if (skb->ip_summed == CHECKSUM_HW) {
				ctrl = eor | len | DescOwn | IPCS;
				if (ip->protocol == IPPROTO_TCP)
					ctrl |= TCPCS;
				else if (ip->protocol == IPPROTO_UDP)
					ctrl |= UDPCS;
				else
					BUG();
			} else
				ctrl = eor | len | DescOwn;

			if (frag == skb_shinfo(skb)->nr_frags - 1)
				ctrl |= LastFrag;

			txd = &cp->tx_ring[entry];
			CP_VLAN_TX_TAG(txd, vlan_tag);
			txd->addr = cpu_to_le64(mapping);
			wmb();

			txd->opts1 = cpu_to_le32(ctrl);
			wmb();

			cp->tx_skb[entry].skb = skb;
			cp->tx_skb[entry].mapping = mapping;
			cp->tx_skb[entry].frag = frag + 2;
			entry = NEXT_TX(entry);
		}

		txd = &cp->tx_ring[first_entry];
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(first_mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(first_eor | first_len |
						 FirstFrag | DescOwn);
		wmb();
	}
	cp->tx_head = entry;
	if (netif_msg_tx_queued(cp))
		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
		       dev->name, entry, skb->len);
	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
		netif_stop_queue(dev);

	spin_unlock_irq(&cp->lock);

	cpw8(TxPoll, NormalTxPoll);
	dev->trans_start = jiffies;

	return 0;
}