示例#1
0
static void myri_init_rings(struct myri_eth *mp, int from_irq)
{
	struct recvq *rq = mp->rq;
	struct myri_rxd *rxd = &rq->myri_rxd[0];
	struct net_device *dev = mp->dev;
	int gfp_flags = GFP_KERNEL;
	int i;

	if (from_irq || in_interrupt())
		gfp_flags = GFP_ATOMIC;

	myri_clean_rings(mp);
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
		u32 dma_addr;

		if (!skb)
			continue;
		mp->rx_skbs[i] = skb;
		skb->dev = dev;
		skb_put(skb, RX_ALLOC_SIZE);

		dma_addr = sbus_map_single(mp->myri_sdev, skb->data, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
		sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
		sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
		sbus_writel(i, &rxd[i].ctx);
		sbus_writel(1, &rxd[i].num_sg);
	}
	sbus_writel(0, &rq->head);
	sbus_writel(RX_RING_SIZE, &rq->tail);
}
示例#2
0
文件: sunbmac.c 项目: cilynx/dd-wrt
static void bigmac_init_rings(struct bigmac *bp, int from_irq)
{
	struct bmac_init_block *bb = bp->bmac_block;
	struct net_device *dev = bp->dev;
	int i;
	gfp_t gfp_flags = GFP_KERNEL;

	if (from_irq || in_interrupt())
		gfp_flags = GFP_ATOMIC;

	bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;

	/* Free any skippy bufs left around in the rings. */
	bigmac_clean_rings(bp);

	/* Now get new skbufs for the receive ring. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb;

		skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
		if (!skb)
			continue;

		bp->rx_skbs[i] = skb;
		skb->dev = dev;

		/* Because we reserve afterwards. */
		skb_put(skb, ETH_FRAME_LEN);
		skb_reserve(skb, 34);

		bb->be_rxd[i].rx_addr =
			sbus_map_single(bp->bigmac_sdev, skb->data,
					RX_BUF_ALLOC_SIZE - 34,
					SBUS_DMA_FROMDEVICE);
		bb->be_rxd[i].rx_flags =
			(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
	}

	for (i = 0; i < TX_RING_SIZE; i++)
		bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
}
示例#3
0
static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct myri_eth *mp = (struct myri_eth *) dev->priv;
	struct sendq *sq = mp->sq;
	struct myri_txd *txd;
	unsigned long flags;
	unsigned int head, tail;
	int len, entry;
	u32 dma_addr;

	DTX(("myri_start_xmit: "));

	myri_tx(mp, dev);

	netif_stop_queue(dev);

	/* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
	head = sbus_readl(&sq->head);
	tail = sbus_readl(&sq->tail);

	if (!TX_BUFFS_AVAIL(head, tail)) {
		DTX(("no buffs available, returning 1\n"));
		return 1;
	}

	save_and_cli(flags);

	DHDR(("xmit[skbdata(%p)]\n", skb->data));
#ifdef DEBUG_HEADER
	dump_ehdr_and_myripad(((unsigned char *) skb->data));
#endif

	/* XXX Maybe this can go as well. */
	len = skb->len;
	if (len & 3) {
		DTX(("len&3 "));
		len = (len + 4) & (~3);
	}

	entry = sbus_readl(&sq->tail);

	txd = &sq->myri_txd[entry];
	mp->tx_skbs[entry] = skb;

	/* Must do this before we sbus map it. */
	if (skb->data[MYRI_PAD_LEN] & 0x1) {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew(0xffff, &txd->addr[1]);
		sbus_writew(0xffff, &txd->addr[2]);
		sbus_writew(0xffff, &txd->addr[3]);
	} else {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
		sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
		sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
	}

	dma_addr = sbus_map_single(mp->myri_sdev, skb->data, len, SBUS_DMA_TODEVICE);
	sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
	sbus_writel(len, &txd->myri_gathers[0].len);
	sbus_writel(1, &txd->num_sg);
	sbus_writel(KERNEL_CHANNEL, &txd->chan);
	sbus_writel(len, &txd->len);
	sbus_writel((u32)-1, &txd->csum_off);
	sbus_writel(0, &txd->csum_field);

	sbus_writel(NEXT_TX(entry), &sq->tail);
	DTX(("BangTheChip "));
	bang_the_chip(mp);

	DTX(("tbusy=0, returning 0\n"));
	netif_start_queue(dev);
	restore_flags(flags);
	return 0;
}
示例#4
0
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
	struct recvq *rq	= mp->rq;
	struct recvq *rqa	= mp->rqack;
	int entry		= sbus_readl(&rqa->head);
	int limit		= sbus_readl(&rqa->tail);
	int drops;

	DRX(("entry[%d] limit[%d] ", entry, limit));
	if (entry == limit)
		return;
	drops = 0;
	DRX(("\n"));
	while (entry != limit) {
		struct myri_rxd *rxdack = &rqa->myri_rxd[entry];
		u32 csum		= sbus_readl(&rxdack->csum);
		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
		int index		= sbus_readl(&rxdack->ctx);
		struct myri_rxd *rxd	= &rq->myri_rxd[rq->tail];
		struct sk_buff *skb	= mp->rx_skbs[index];

		/* Ack it. */
		sbus_writel(NEXT_RX(entry), &rqa->head);

		/* Check for errors. */
		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
		sbus_dma_sync_single(mp->myri_sdev,
				     sbus_readl(&rxd->myri_scatters[0].addr),
				     RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
			DRX(("ERROR["));
			mp->enet_stats.rx_errors++;
			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
				DRX(("BAD_LENGTH] "));
				mp->enet_stats.rx_length_errors++;
			} else {
				DRX(("NO_PADDING] "));
				mp->enet_stats.rx_frame_errors++;
			}

			/* Return it to the LANAI. */
	drop_it:
			drops++;
			DRX(("DROP "));
			mp->enet_stats.rx_dropped++;
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
			goto next;
		}

		DRX(("len[%d] ", len));
		if (len > RX_COPY_THRESHOLD) {
			struct sk_buff *new_skb;
			u32 dma_addr;

			DRX(("BIGBUFF "));
			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
			if (new_skb == NULL) {
				DRX(("skb_alloc(FAILED) "));
				goto drop_it;
			}
			sbus_unmap_single(mp->myri_sdev,
					  sbus_readl(&rxd->myri_scatters[0].addr),
					  RX_ALLOC_SIZE,
					  SBUS_DMA_FROMDEVICE);
			mp->rx_skbs[index] = new_skb;
			new_skb->dev = dev;
			skb_put(new_skb, RX_ALLOC_SIZE);
			dma_addr = sbus_map_single(mp->myri_sdev,
						   new_skb->data,
						   RX_ALLOC_SIZE,
						   SBUS_DMA_FROMDEVICE);
			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			/* Trim the original skb for the netif. */
			DRX(("trim(%d) ", len));
			skb_trim(skb, len);
		} else {
			struct sk_buff *copy_skb = dev_alloc_skb(len);

			DRX(("SMALLBUFF "));
			if (copy_skb == NULL) {
				DRX(("dev_alloc_skb(FAILED) "));
				goto drop_it;
			}
			/* DMA sync already done above. */
			copy_skb->dev = dev;
			DRX(("resv_and_put "));
			skb_put(copy_skb, len);
			memcpy(copy_skb->data, skb->data, len);

			/* Reuse original ring buffer. */
			DRX(("reuse "));
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			skb = copy_skb;
		}

		/* Just like the happy meal we get checksums from this card. */
		skb->csum = csum;
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */

		skb->protocol = myri_type_trans(skb, dev);
		DRX(("prot[%04x] netif_rx ", skb->protocol));
		netif_rx(skb);

		mp->enet_stats.rx_packets++;
	next:
		DRX(("NEXT\n"));
		entry = NEXT_RX(entry);
	}
}