示例#1
0
文件: greth.c 项目: 513855417/linux
static int greth_rx(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb;
	int pkt_len;
	int bad, count;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);

		if (unlikely(status & GRETH_BD_EN)) {
			break;
		}

		dma_addr = greth_read_bd(&bdp->addr);
		bad = 0;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {
			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			}
			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			}
			if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}
		if (unlikely(bad)) {
			dev->stats.rx_errors++;

		} else {

			pkt_len = status & GRETH_BD_LEN;

			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);

			if (unlikely(skb == NULL)) {

				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");

				dev->stats.rx_dropped++;

			} else {
				skb_reserve(skb, NET_IP_ALIGN);

				dma_sync_single_for_cpu(greth->dev,
							dma_addr,
							pkt_len,
							DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);

				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_bytes += pkt_len;
				dev->stats.rx_packets++;
				netif_receive_skb(skb);
			}
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);

		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);

		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);

		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;
}
示例#2
0
文件: greth.c 项目: 513855417/linux
static int greth_rx_gbit(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb, *newskb;
	int pkt_len;
	int bad, count = 0;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		skb = greth->rx_skbuff[greth->rx_cur];
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);
		bad = 0;

		if (status & GRETH_BD_EN)
			break;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {

			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			} else if (status &
				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			} else if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}

		/* Allocate new skb to replace current, not needed if the
		 * current skb can be reused */
		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
			skb_reserve(newskb, NET_IP_ALIGN);

			dma_addr = dma_map_single(greth->dev,
						      newskb->data,
						      MAX_FRAME_SIZE + NET_IP_ALIGN,
						      DMA_FROM_DEVICE);

			if (!dma_mapping_error(greth->dev, dma_addr)) {
				/* Process the incoming frame. */
				pkt_len = status & GRETH_BD_LEN;

				dma_unmap_single(greth->dev,
						 greth_read_bd(&bdp->addr),
						 MAX_FRAME_SIZE + NET_IP_ALIGN,
						 DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);

				skb_put(skb, pkt_len);

				if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
					skb->ip_summed = CHECKSUM_UNNECESSARY;
				else
					skb_checksum_none_assert(skb);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += pkt_len;
				netif_receive_skb(skb);

				greth->rx_skbuff[greth->rx_cur] = newskb;
				greth_write_bd(&bdp->addr, dma_addr);
			} else {
				if (net_ratelimit())
					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
				dev_kfree_skb(newskb);
				/* reusing current skb, so it is a drop */
				dev->stats.rx_dropped++;
			}
		} else if (bad) {
			/* Bad Frame transfer, the skb is reused */
			dev->stats.rx_dropped++;
		} else {
			/* Failed Allocating a new skb. This is rather stupid
			 * but the current "filled" skb is reused, as if
			 * transfer failure. One could argue that RX descriptor
			 * table handling should be divided into cleaning and
			 * filling as the TX part of the driver
			 */
			if (net_ratelimit())
				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
			/* reusing current skb, so it is a drop */
			dev->stats.rx_dropped++;
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);
		spin_lock_irqsave(&greth->devlock, flags);
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);
		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;

}
static int greth_rx_gbit(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb, *newskb;
	int pkt_len;
	int bad, count = 0;
	u32 status, dma_addr;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		skb = greth->rx_skbuff[greth->rx_cur];
		status = greth_read_bd(&bdp->stat);
		bad = 0;

		if (status & GRETH_BD_EN)
			break;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {

			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			} else if (status &
				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			} else if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}

		/* Allocate new skb to replace current */
		newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);

		if (!bad && newskb) {
			skb_reserve(newskb, NET_IP_ALIGN);

			dma_addr = dma_map_single(greth->dev,
						      newskb->data,
						      MAX_FRAME_SIZE + NET_IP_ALIGN,
						      DMA_FROM_DEVICE);

			if (!dma_mapping_error(greth->dev, dma_addr)) {
				/* Process the incoming frame. */
				pkt_len = status & GRETH_BD_LEN;

				dma_unmap_single(greth->dev,
						 greth_read_bd(&bdp->addr),
						 MAX_FRAME_SIZE + NET_IP_ALIGN,
						 DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);

				skb_put(skb, pkt_len);

				if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
					skb->ip_summed = CHECKSUM_UNNECESSARY;
				else
					skb->ip_summed = CHECKSUM_NONE;

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				netif_receive_skb(skb);

				greth->rx_skbuff[greth->rx_cur] = newskb;
				greth_write_bd(&bdp->addr, dma_addr);
			} else {
				if (net_ratelimit())
					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
				dev_kfree_skb(newskb);
				dev->stats.rx_dropped++;
			}
		} else {
			if (net_ratelimit())
				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
			dev->stats.rx_dropped++;
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);
		greth_enable_rx(greth);
		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;

}