Ejemplo n.º 1
0
/* Per-QE receive interrupt service routine.  Just like on the happy meal
 * we receive directly into skb's with a small packet copy water mark.
 */
static void qe_rx(struct sunqe *qep)
{
	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
	struct net_device *dev = qep->dev;
	struct qe_rxd *this;
	struct sunqe_buffers *qbufs = qep->buffers;
	__u32 qbufs_dvma = qep->buffers_dvma;
	int elem = qep->rx_new;
	u32 flags;

	this = &rxbase[elem];
	while (!((flags = this->rx_flags) & RXD_OWN)) {
		struct sk_buff *skb;
		unsigned char *this_qbuf =
			&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
		__u32 this_qbuf_dvma = qbufs_dvma +
			qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
		struct qe_rxd *end_rxd =
			&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */

		/* Check for errors. */
		if (len < ETH_ZLEN) {
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			dev->stats.rx_dropped++;
		} else {
			skb = netdev_alloc_skb(dev, len + 2);
			if (skb == NULL) {
				dev->stats.rx_dropped++;
			} else {
				skb_reserve(skb, 2);
				skb_put(skb, len);
				skb_copy_to_linear_data(skb, this_qbuf,
						 len);
				skb->protocol = eth_type_trans(skb, qep->dev);
				netif_rx(skb);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += len;
			}
		}
		end_rxd->rx_addr = this_qbuf_dvma;
		end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));

		elem = NEXT_RX(elem);
		this = &rxbase[elem];
	}
	qep->rx_new = elem;
}
Ejemplo n.º 2
0
static void el_receive(struct net_device *dev)
{
	int ioaddr = dev->base_addr;
	int pkt_len;
	struct sk_buff *skb;

	pkt_len = inw(RX_LOW);

	if (el_debug > 4)
		pr_debug(" el_receive %d.\n", pkt_len);

	if (pkt_len < 60 || pkt_len > 1536) {
		if (el_debug)
			pr_debug("%s: bogus packet, length=%d\n",
						dev->name, pkt_len);
		dev->stats.rx_over_errors++;
		return;
	}

	/*
	 *	Command mode so we can empty the buffer
	 */

	outb(AX_SYS, AX_CMD);
	skb = netdev_alloc_skb(dev, pkt_len + 2);

	/*
	 *	Start of frame
	 */

	outw(0x00, GP_LOW);
	if (skb == NULL) {
		pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
		dev->stats.rx_dropped++;
		return;
	} else {
		skb_reserve(skb, 2);	/* Force 16 byte alignment */
		/*
		 *	The read increments through the bytes. The interrupt
		 *	handler will fix the pointer when it returns to
		 *	receive mode.
		 */
		insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
		skb->protocol = eth_type_trans(skb, dev);
		netif_rx(skb);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += pkt_len;
	}
}
Ejemplo n.º 3
0
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
				    struct cpmac_desc *desc)
{
	struct sk_buff *skb, *result = NULL;

	if (unlikely(netif_msg_hw(priv)))
		cpmac_dump_desc(priv->dev, desc);
	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
	if (unlikely(!desc->datalen)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: spurious interrupt\n",
			       priv->dev->name);
		return NULL;
	}

	skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
	if (likely(skb)) {
		skb_reserve(skb, 2);
		skb_put(desc->skb, desc->datalen);
		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
		desc->skb->ip_summed = CHECKSUM_NONE;
		priv->dev->stats.rx_packets++;
		priv->dev->stats.rx_bytes += desc->datalen;
		result = desc->skb;
		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
		desc->skb = skb;
		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
						    CPMAC_SKB_SIZE,
						    DMA_FROM_DEVICE);
		desc->hw_data = (u32)desc->data_mapping;
		if (unlikely(netif_msg_pktdata(priv))) {
			printk(KERN_DEBUG "%s: received packet:\n",
			       priv->dev->name);
			cpmac_dump_skb(priv->dev, result);
		}
	} else {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING
			       "%s: low on skbs, dropping packet\n",
			       priv->dev->name);
		priv->dev->stats.rx_dropped++;
	}

	desc->buflen = CPMAC_SKB_SIZE;
	desc->dataflags = CPMAC_OWN;

	return result;
}
Ejemplo n.º 4
0
void fs_init_bds(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	cbd_t __iomem *bdp;
	struct sk_buff *skb;
	int i;

	fs_cleanup_bds(dev);

	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
	fep->tx_free = fep->tx_ring;
	fep->cur_rx = fep->rx_bd_base;

	/*
	 * Initialize the receive buffer descriptors.
	 */
	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
		skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
		if (skb == NULL)
			break;

		skb_align(skb, ENET_RX_ALIGN);
		fep->rx_skbuff[i] = skb;
		CBDW_BUFADDR(bdp,
			dma_map_single(fep->dev, skb->data,
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);	/* zero */
		CBDW_SC(bdp, BD_ENET_RX_EMPTY |
			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
	}
	/*
	 * if we failed, fillup remainder
	 */
	for (; i < fep->rx_ring; i++, bdp++) {
		fep->rx_skbuff[i] = NULL;
		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
	}

	/*
	 * ...and the same for transmit.
	 */
	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
		fep->tx_skbuff[i] = NULL;
		CBDW_BUFADDR(bdp, 0);
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
	}
}
Ejemplo n.º 5
0
Archivo: ks8842.c Proyecto: 7799/linux
static int __ks8842_start_new_rx_dma(struct net_device *netdev)
{
	struct ks8842_adapter *adapter = netdev_priv(netdev);
	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
	struct scatterlist *sg = &ctl->sg;
	int err;

	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
	if (ctl->skb) {
		sg_init_table(sg, 1);
		sg_dma_address(sg) = dma_map_single(adapter->dev,
			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
		err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
		if (unlikely(err)) {
			sg_dma_address(sg) = 0;
			goto out;
		}

		sg_dma_len(sg) = DMA_BUFFER_SIZE;

		ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);

		if (!ctl->adesc)
			goto out;

		ctl->adesc->callback_param = netdev;
		ctl->adesc->callback = ks8842_dma_rx_cb;
		ctl->adesc->tx_submit(ctl->adesc);
	} else {
		err = -ENOMEM;
		sg_dma_address(sg) = 0;
		goto out;
	}

	return err;
out:
	if (sg_dma_address(sg))
		dma_unmap_single(adapter->dev, sg_dma_address(sg),
			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
	sg_dma_address(sg) = 0;
	if (ctl->skb)
		dev_kfree_skb(ctl->skb);

	ctl->skb = NULL;

	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
	return err;
}
Ejemplo n.º 6
0
/**
 * gelic_net_prepare_rx_descr - reinitializes a rx descriptor
 * @card: card structure
 * @descr: descriptor to re-init
 *
 * return 0 on succes, <0 on failure
 *
 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
 * Activate the descriptor state-wise
 */
static int gelic_net_prepare_rx_descr(struct gelic_net_card *card,
				      struct gelic_net_descr *descr)
{
	int offset;
	unsigned int bufsize;

	if (gelic_net_get_descr_status(descr) !=  GELIC_NET_DESCR_NOT_IN_USE) {
		dev_info(ctodev(card), "%s: ERROR status \n", __func__);
	}
	/* we need to round up the buffer size to a multiple of 128 */
	bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);

	/* and we need to have it 128 byte aligned, therefore we allocate a
	 * bit more */
	descr->skb = netdev_alloc_skb(card->netdev,
		bufsize + GELIC_NET_RXBUF_ALIGN - 1);
	if (!descr->skb) {
		descr->buf_addr = 0; /* tell DMAC don't touch memory */
		dev_info(ctodev(card),
			 "%s:allocate skb failed !!\n", __func__);
		return -ENOMEM;
	}
	descr->buf_size = bufsize;
	descr->dmac_cmd_status = 0;
	descr->result_size = 0;
	descr->valid_size = 0;
	descr->data_error = 0;

	offset = ((unsigned long)descr->skb->data) &
		(GELIC_NET_RXBUF_ALIGN - 1);
	if (offset)
		skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
	/* io-mmu-map the skb */
	descr->buf_addr = dma_map_single(ctodev(card), descr->skb->data,
					 GELIC_NET_MAX_MTU,
					 DMA_FROM_DEVICE);
	if (!descr->buf_addr) {
		dev_kfree_skb_any(descr->skb);
		descr->skb = NULL;
		dev_info(ctodev(card),
			 "%s:Could not iommu-map rx buffer\n", __func__);
		gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE);
		return -ENOMEM;
	} else {
		gelic_net_set_descr_status(descr, GELIC_NET_DESCR_CARDOWNED);
		return 0;
	}
}
Ejemplo n.º 7
0
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
				     struct efx_rx_buffer *rx_buf,
				     unsigned int n_frags,
				     u8 *eh, int hdr_len)
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

	/* Allocate an SKB to store the headers */
	skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
	if (unlikely(skb == NULL))
		return NULL;

	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);

	skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
	memcpy(__skb_put(skb, hdr_len), eh, hdr_len);

	/* Append the remaining page(s) onto the frag list */
	if (rx_buf->len > hdr_len) {
		rx_buf->page_offset += hdr_len;
		rx_buf->len -= hdr_len;

		for (;;) {
			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
					   rx_buf->page, rx_buf->page_offset,
					   rx_buf->len);
			rx_buf->page = NULL;
			skb->len += rx_buf->len;
			skb->data_len += rx_buf->len;
			if (skb_shinfo(skb)->nr_frags == n_frags)
				break;

			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
		}
	} else {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		rx_buf->page = NULL;
		n_frags = 0;
	}

	skb->truesize += n_frags * efx->rx_buffer_truesize;

	/* Move past the ethernet header */
	skb->protocol = eth_type_trans(skb, efx->net_dev);

	return skb;
}
Ejemplo n.º 8
0
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
	struct sk_buff *skb;

	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
	if (unlikely(!skb))
		return NULL;

	skb->protocol = htons(ETH_P_CAN);
	skb->pkt_type = PACKET_BROADCAST;
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
	memset(*cf, 0, sizeof(struct can_frame));

	return skb;
}
Ejemplo n.º 9
0
static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
				       struct pci_dev *hwdev,
				       dma_addr_t *dma_handle)
{
	struct sk_buff *skb;
	skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
	if (!skb)
		return NULL;
	*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
				     PCI_DMA_FROMDEVICE);
	if (pci_dma_mapping_error(hwdev, *dma_handle)) {
		dev_kfree_skb_any(skb);
		return NULL;
	}
	skb_reserve(skb, 2);	/* make IP header 4byte aligned */
	return skb;
}
Ejemplo n.º 10
0
static void
ramips_eth_rx_hw(unsigned long ptr)
{
	struct net_device *dev = (struct net_device *) ptr;
	struct raeth_priv *priv = netdev_priv(dev);
	int rx;
	int max_rx = 16;

	while (max_rx) {
		struct sk_buff *rx_skb, *new_skb;

		rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
		if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
			break;
		max_rx--;

		new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
		/* Reuse the buffer on allocation failures */
		if (new_skb) {
			rx_skb = priv->rx_skb[rx];
			skb_put(rx_skb, RX_DMA_PLEN0(priv->rx[rx].rxd2));
			rx_skb->dev = dev;
			rx_skb->protocol = eth_type_trans(rx_skb, dev);
			rx_skb->ip_summed = CHECKSUM_NONE;
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += rx_skb->len;
			netif_rx(rx_skb);

			priv->rx_skb[rx] = new_skb;
			skb_reserve(new_skb, NET_IP_ALIGN);
			priv->rx[rx].rxd1 = dma_map_single(NULL,
							   new_skb->data,
							   MAX_RX_LENGTH,
							   DMA_FROM_DEVICE);
		}

		priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
		wmb();
		ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
	}

	if (max_rx == 0)
		tasklet_schedule(&priv->rx_tasklet);
	else
		ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
}
Ejemplo n.º 11
0
static struct sk_buff *
vbb_to_skb(struct net_device *netdev, const void *vbb, const int tx,
	   const u32 des0, const u16 tag)
{
	struct voicebus *vb = voicebus_from_netdev(netdev);
	struct sk_buff *skb;
	struct voicebus_net_hdr *hdr;
	/* 0x88B5 is the local experimental ethertype */
	const u16 VOICEBUS_ETHTYPE = 0x88b5;
	const u8 BOARD_MAC[6] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11};
	const u8 HOST_MAC[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};

	skb = netdev_alloc_skb(vb->netdev,
		VOICEBUS_SFRAME_SIZE + sizeof(*hdr) + NET_IP_ALIGN);
	if (!skb)
		return NULL;

	skb_reserve(skb, NET_IP_ALIGN);
	skb->dev = netdev;
	hdr = (struct voicebus_net_hdr *)skb_put(skb, VOICEBUS_SFRAME_SIZE +
						 sizeof(*hdr));
	/* Fill in the source and destination mac addresses appropriately
	 * depending on whether this is a packet we are transmitting or a packet
	 * that we have received. */
	if (tx) {
		memcpy(hdr->ethhdr.h_dest, BOARD_MAC, sizeof(BOARD_MAC));
		memcpy(hdr->ethhdr.h_source, HOST_MAC, sizeof(HOST_MAC));
		hdr->seq_num = cpu_to_be16(atomic_inc_return(
			&vb->tx_seqnum));
	} else {
		memcpy(hdr->ethhdr.h_dest, HOST_MAC, sizeof(HOST_MAC));
		memcpy(hdr->ethhdr.h_source, BOARD_MAC, sizeof(BOARD_MAC));
		hdr->seq_num = cpu_to_be16(atomic_inc_return(
			&vb->rx_seqnum));
	}
	memset(hdr->filler, 0, sizeof(hdr->filler));
	hdr->des0 = cpu_to_be32(des0);
	hdr->tag = cpu_to_be16(tag);
	hdr->ethhdr.h_proto = htons(VOICEBUS_ETHTYPE);
	/* copy the rest of the packet. */
	memcpy(skb->data + sizeof(*hdr), vbb, VOICEBUS_SFRAME_SIZE);
	skb->protocol = eth_type_trans(skb, netdev);

	return skb;
}
Ejemplo n.º 12
0
/* Allocate and construct an SKB around a struct page.*/
static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
				     struct efx_nic *efx,
				     int hdr_len)
{
	struct sk_buff *skb;

	/* Allocate an SKB to store the headers */
	skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
	if (unlikely(skb == NULL)) {
		EFX_ERR_RL(efx, "RX out of memory for skb\n");
		return NULL;
	}

	EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);

	skb->ip_summed = CHECKSUM_UNNECESSARY;
	skb_reserve(skb, EFX_PAGE_SKB_ALIGN);

	skb->len = rx_buf->len;
	skb->truesize = rx_buf->len + sizeof(struct sk_buff);
	memcpy(skb->data, rx_buf->data, hdr_len);
	skb->tail += hdr_len;

	/* Append the remaining page onto the frag list */
	if (unlikely(rx_buf->len > hdr_len)) {
		struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
		frag->page = rx_buf->page;
		frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
		frag->size = skb->len - hdr_len;
		skb_shinfo(skb)->nr_frags = 1;
		skb->data_len = frag->size;
	} else {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		skb->data_len = 0;
	}

	/* Ownership has transferred from the rx_buf to skb */
	rx_buf->page = NULL;

	/* Move past the ethernet header */
	skb->protocol = eth_type_trans(skb, efx->net_dev);

	return skb;
}
Ejemplo n.º 13
0
static void
ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
{
	/*                                                          */
	int buff_n;

	for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
		if (!ksp->rx_buffers[buff_n].skb) {
			struct sk_buff *skb =
				netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
			dma_addr_t mapping;

			ksp->rx_buffers[buff_n].skb = skb;
			if (skb == NULL) {
				/*                                
                             
     */
				break;
			}

			mapping = dma_map_single(ksp->dev, skb->data,
						 MAX_RXBUF_SIZE,
						 DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
				/*                                       */
				dev_kfree_skb_irq(skb);
				ksp->rx_buffers[buff_n].skb = NULL;
				break;
			}
			ksp->rx_buffers[buff_n].dma_ptr = mapping;
			ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;

			/*                               */
			ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
			ksp->rx_ring[buff_n].length =
				cpu_to_le32(MAX_RXBUF_SIZE);

			wmb();

			/*                                         */
			ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
		}
	}
}
Ejemplo n.º 14
0
static int el3_rx(struct net_device *dev)
{
    unsigned int ioaddr = dev->base_addr;
    int worklimit = 32;
    short rx_status;

    netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
	       inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
    while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
		    worklimit > 0) {
	worklimit--;
	if (rx_status & 0x4000) { /* Error, update stats. */
	    short error = rx_status & 0x3800;
	    dev->stats.rx_errors++;
	    switch (error) {
	    case 0x0000:	dev->stats.rx_over_errors++; break;
	    case 0x0800:	dev->stats.rx_length_errors++; break;
	    case 0x1000:	dev->stats.rx_frame_errors++; break;
	    case 0x1800:	dev->stats.rx_length_errors++; break;
	    case 0x2000:	dev->stats.rx_frame_errors++; break;
	    case 0x2800:	dev->stats.rx_crc_errors++; break;
	    }
	} else {
	    short pkt_len = rx_status & 0x7ff;
	    struct sk_buff *skb;

	    skb = netdev_alloc_skb(dev, pkt_len + 5);

	    netdev_dbg(dev, "    Receiving packet size %d status %4.4x.\n",
		       pkt_len, rx_status);
	    if (skb != NULL) {
		skb_reserve(skb, 2);
		insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
			(pkt_len+3)>>2);
		skb->protocol = eth_type_trans(skb, dev);
		netif_rx(skb);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += pkt_len;
	    } else {
		netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
			   pkt_len);
		dev->stats.rx_dropped++;
	    }
	}
Ejemplo n.º 15
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb = netdev_alloc_skb(dev, size);

			size -= 4; /* strip the CRC */
			skb_reserve(skb, 2); /* align TCP/IP header */

			if (likely(skb)) {
				void *src = phys_to_virt(bd.addr);
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				priv->stats.rx_packets++;
				priv->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				priv->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		priv->cur_rx++;
	}
Ejemplo n.º 16
0
static int init_hdlc_queues(struct port *port)
{
	int i;

	if (!ports_open) {
		dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
					   POOL_ALLOC_SIZE, 32, 0);
		if (!dma_pool)
			return -ENOMEM;
	}

	if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
					      &port->desc_tab_phys)))
		return -ENOMEM;
	memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));

	/* Setup RX buffers */
	for (i = 0; i < RX_DESCS; i++) {
		struct desc *desc = rx_desc_ptr(port, i);
		buffer_t *buff;
		void *data;
#ifdef __ARMEB__
		if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
			return -ENOMEM;
		data = buff->data;
#else
		if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
			return -ENOMEM;
		data = buff;
#endif
		desc->buf_len = RX_SIZE;
		desc->data = dma_map_single(&port->netdev->dev, data,
					    RX_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
			free_buffer(buff);
			return -EIO;
		}
		port->rx_buff_tab[i] = buff;
	}

	return 0;
}
Ejemplo n.º 17
0
/**
 *	ks8695_refill_rxbuffers - Re-fill the RX buffer ring
 *	@ksp: The device to refill
 *
 *	Iterates the RX ring of the device looking for empty slots.
 *	For each empty slot, we allocate and map a new SKB and give it
 *	to the hardware.
 *	This can be called from interrupt context safely.
 */
static void
ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
{
	/* Run around the RX ring, filling in any missing sk_buff's */
	int buff_n;

	for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
		if (!ksp->rx_buffers[buff_n].skb) {
			struct sk_buff *skb =
				netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
			dma_addr_t mapping;

			ksp->rx_buffers[buff_n].skb = skb;
			if (skb == NULL) {
				/* Failed to allocate one, perhaps
				 * we'll try again later.
				 */
				break;
			}

			mapping = dma_map_single(ksp->dev, skb->data,
						 MAX_RXBUF_SIZE,
						 DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
				/* Failed to DMA map this SKB, try later */
				dev_kfree_skb_irq(skb);
				ksp->rx_buffers[buff_n].skb = NULL;
				break;
			}
			ksp->rx_buffers[buff_n].dma_ptr = mapping;
			ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;

			/* Record this into the DMA ring */
			ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
			ksp->rx_ring[buff_n].length =
				cpu_to_le32(MAX_RXBUF_SIZE);

			wmb();

			/* And give ownership over to the hardware */
			ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
		}
	}
}
Ejemplo n.º 18
0
static void
rio_timer (unsigned long data)
{
	struct net_device *dev = (struct net_device *)data;
	struct netdev_private *np = netdev_priv(dev);
	unsigned int entry;
	int next_tick = 1*HZ;
	unsigned long flags;

	spin_lock_irqsave(&np->rx_lock, flags);
	/* Recover rx ring exhausted error */
	if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
		printk(KERN_INFO "Try to recover rx ring exhausted...\n");
		/* Re-allocate skbuffs to fill the descriptor ring */
		for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
			struct sk_buff *skb;
			entry = np->old_rx % RX_RING_SIZE;
			/* Dropped packets don't need to re-allocate */
			if (np->rx_skbuff[entry] == NULL) {
				skb = netdev_alloc_skb (dev, np->rx_buf_sz);
				if (skb == NULL) {
					np->rx_ring[entry].fraginfo = 0;
					printk (KERN_INFO
						"%s: Still unable to re-allocate Rx skbuff.#%d\n",
						dev->name, entry);
					break;
				}
				np->rx_skbuff[entry] = skb;
				/* 16 byte align the IP header */
				skb_reserve (skb, 2);
				np->rx_ring[entry].fraginfo =
				    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
			}
			np->rx_ring[entry].fraginfo |=
			    cpu_to_le64((u64)np->rx_buf_sz << 48);
			np->rx_ring[entry].status = 0;
		} /* end for */
	} /* end if */
	spin_unlock_irqrestore (&np->rx_lock, flags);
	np->timer.expires = jiffies + next_tick;
	add_timer(&np->timer);
}
Ejemplo n.º 19
0
static void
ramips_eth_rx_hw(unsigned long ptr)
{
	struct net_device *dev = (struct net_device*)ptr;
	struct raeth_priv *priv = netdev_priv(dev);
	int rx;
	int max_rx = 16;

	while(max_rx)
	{
		struct sk_buff *rx_skb, *new_skb;

		rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
		if(!(priv->rx[rx].rxd2 & RX_DMA_DONE))
			break;
		max_rx--;

		rx_skb = priv->rx_skb[rx];
		rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
		rx_skb->dev = dev;
		rx_skb->protocol = eth_type_trans(rx_skb, dev);
		rx_skb->ip_summed = CHECKSUM_NONE;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += rx_skb->len;
		netif_rx(rx_skb);

		new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
		priv->rx_skb[rx] = new_skb;
		BUG_ON(!new_skb);
		skb_reserve(new_skb, 2);
		priv->rx[rx].rxd1 =
			dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2,
			DMA_FROM_DEVICE);
		priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
		wmb();
		ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
	}
	if(max_rx == 0)
		tasklet_schedule(&priv->rx_tasklet);
	else
		ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
			RAMIPS_FE_INT_ENABLE);
}
Ejemplo n.º 20
0
static void elmc_rcv_int(struct net_device *dev)
{
	int status;
	unsigned short totlen;
	struct sk_buff *skb;
	struct rbd_struct *rbd;
	struct priv *p = netdev_priv(dev);

	for (; (status = p->rfd_top->status) & STAT_COMPL;) {
		rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);

		if (status & STAT_OK) {		/* frame received without error? */
			if ((totlen = rbd->status) & RBD_LAST) {	/* the first and the last buffer? */
				totlen &= RBD_MASK;	/* length of this frame */
				rbd->status = 0;
				skb = netdev_alloc_skb(dev, totlen + 2);
				if (skb != NULL) {
					skb_reserve(skb, 2);	/* 16 byte alignment */
					skb_put(skb,totlen);
					skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
					skb->protocol = eth_type_trans(skb, dev);
					netif_rx(skb);
					dev->stats.rx_packets++;
					dev->stats.rx_bytes += totlen;
				} else {
					dev->stats.rx_dropped++;
				}
			} else {
				pr_warning("%s: received oversized frame.\n", dev->name);
				dev->stats.rx_dropped++;
			}
		} else {	/* frame !(ok), only with 'save-bad-frames' */
			pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
			dev->stats.rx_errors++;
		}
		p->rfd_top->status = 0;
		p->rfd_top->last = RFD_SUSP;
		p->rfd_last->last = 0;	/* delete RU_SUSP  */
		p->rfd_last = p->rfd_top;
		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next);	/* step to next RFD */
	}
}
Ejemplo n.º 21
0
static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
	struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
	struct sk_buff *skb;
	int i, loop, cnt = 0;

	for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
		skb_put(skb, QLCNIC_ILB_PKT_SIZE);

		adapter->diag_cnt = 0;
		qlcnic_xmit_frame(skb, adapter->netdev);

		loop = 0;
		do {
			msleep(1);
			qlcnic_process_rcv_ring_diag(sds_ring);
			if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
				break;
		} while (!adapter->diag_cnt);

		dev_kfree_skb_any(skb);

		if (!adapter->diag_cnt)
			QLCDB(adapter, DRV,
			"LB Test: packet #%d was not received\n", i + 1);
		else
			cnt++;
	}
	if (cnt != i) {
		dev_warn(&adapter->pdev->dev, "LB Test failed\n");
		if (mode != QLCNIC_ILB_MODE) {
			dev_warn(&adapter->pdev->dev,
				"WARNING: Please make sure external"
				"loopback connector is plugged in\n");
		}
		return -1;
	}
	return 0;
}
Ejemplo n.º 22
0
static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
				   struct skb_frag_struct *frags,
				   int len, int true_size,
				   void *mac_hdr,
				   int hlen, __wsum sum,
				   u32 ip_summed)
{
	struct sk_buff *skb;
	struct skb_frag_struct *skb_frags;
	int data_len = len;
	int hdr_len = min(len, hlen);

	skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
	if (!skb)
		return NULL;

	skb_reserve(skb, lro_mgr->frag_align_pad);
	skb->len = len;
	skb->data_len = len - hdr_len;
	skb->truesize += true_size;
	skb->tail += hdr_len;

	memcpy(skb->data, mac_hdr, hdr_len);

	skb_frags = skb_shinfo(skb)->frags;
	while (data_len > 0) {
		*skb_frags = *frags;
		data_len -= frags->size;
		skb_frags++;
		frags++;
		skb_shinfo(skb)->nr_frags++;
	}

	skb_shinfo(skb)->frags[0].page_offset += hdr_len;
	skb_shinfo(skb)->frags[0].size -= hdr_len;

	skb->ip_summed = ip_summed;
	skb->csum = sum;
	skb->protocol = eth_type_trans(skb, lro_mgr->dev);
	return skb;
}
int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
	struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
	struct sk_buff *skb;
	int i, loop, cnt = 0;

	for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
		skb_put(skb, QLCNIC_ILB_PKT_SIZE);
		adapter->ahw->diag_cnt = 0;
		qlcnic_xmit_frame(skb, adapter->netdev);
		loop = 0;

		do {
			msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
			qlcnic_process_rcv_ring_diag(sds_ring);
			if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
				break;
		} while (!adapter->ahw->diag_cnt);

		dev_kfree_skb_any(skb);

		if (!adapter->ahw->diag_cnt)
			dev_warn(&adapter->pdev->dev,
				 "LB Test: packet #%d was not received\n",
				 i + 1);
		else
			cnt++;
	}
	if (cnt != i) {
		dev_err(&adapter->pdev->dev,
			"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
		if (mode != QLCNIC_ILB_MODE)
			dev_warn(&adapter->pdev->dev,
				 "WARNING: Please check loopback cable\n");
		return -1;
	}
	return 0;
}
Ejemplo n.º 24
0
/*
 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
 */
static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
					       struct sk_buff *skb)
{
	struct sk_buff *new_skb;

	/* Alloc new skb */
	new_skb = netdev_alloc_skb(dev, skb->len + 4);
	if (!new_skb)
		return NULL;

	/* Make sure new skb is properly aligned */
	skb_align(new_skb, 4);

	/* Copy data to new skb ... */
	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
	skb_put(new_skb, skb->len);

	/* ... and free an old one */
	dev_kfree_skb_any(skb);

	return new_skb;
}
Ejemplo n.º 25
0
int nm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{    
	struct nm_adapter *adapter = netdev_priv(netdev);
	struct nm_packet *pkt = (struct nm_packet *)(skb->data - sizeof(pkt->port));
	struct sk_buff *new_skb = NULL;

	adapter->stats.tx_packets++;
	adapter->stats.tx_bytes += skb->len;

	new_skb = netdev_alloc_skb(nm_ctl_netdev,NM_PKT_SIZE);
	skb_reserve(new_skb,2 + 16);
	skb_put(new_skb,skb->len);
	memcpy(new_skb->data,skb->data,skb->len);
	skb_set_nm_port(new_skb,adapter->port);
	skb_set_eth(new_skb);
	show_skb(new_skb);
	//printk("%s xmit!send skb protocol:0x%04X,len:%d\n",netdev->name,skb->protocol,skb->len);
	dev_kfree_skb(skb);

	nm_ctl_netdev->netdev_ops->ndo_start_xmit(new_skb,nm_ctl_netdev);
	return 0;
}
Ejemplo n.º 26
0
Archivo: rx.c Proyecto: 71eh/open80211s
/**
 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
 * struct efx_rx_buffer for each one. Return a negative error code or 0
 * on success. May fail having only inserted fewer than EFX_RX_BATCH
 * buffers.
 */
static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	struct net_device *net_dev = efx->net_dev;
	struct efx_rx_buffer *rx_buf;
	struct sk_buff *skb;
	int skb_len = efx->rx_buffer_len;
	unsigned index, count;

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		index = rx_queue->added_count & rx_queue->ptr_mask;
		rx_buf = efx_rx_buffer(rx_queue, index);

		rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
		if (unlikely(!skb))
			return -ENOMEM;

		/* Adjust the SKB for padding and checksum */
		skb_reserve(skb, NET_IP_ALIGN);
		rx_buf->len = skb_len - NET_IP_ALIGN;
		rx_buf->is_page = false;
		skb->ip_summed = CHECKSUM_UNNECESSARY;

		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
						  skb->data, rx_buf->len,
						  PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
						   rx_buf->dma_addr))) {
			dev_kfree_skb_any(skb);
			rx_buf->u.skb = NULL;
			return -EIO;
		}

		++rx_queue->added_count;
		++rx_queue->alloc_skb_count;
	}

	return 0;
}
Ejemplo n.º 27
0
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
				struct canfd_frame **cfd)
{
	struct sk_buff *skb;

	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
			       sizeof(struct canfd_frame));
	if (unlikely(!skb))
		return NULL;

	skb->protocol = htons(ETH_P_CANFD);
	skb->pkt_type = PACKET_BROADCAST;
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	can_skb_reserve(skb);
	can_skb_prv(skb)->ifindex = dev->ifindex;

	*cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
	memset(*cfd, 0, sizeof(struct canfd_frame));

	return skb;
}
Ejemplo n.º 28
0
static int seeq_init_ring(struct net_device *dev)
{
	struct sgiseeq_private *sp = netdev_priv(dev);
	int i;

	netif_stop_queue(dev);
	sp->rx_new = sp->tx_new = 0;
	sp->rx_old = sp->tx_old = 0;

	__sgiseeq_set_mac_address(dev);

	/* Setup tx ring. */
	for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
		sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
		dma_sync_desc_dev(dev, &sp->tx_desc[i]);
	}

	/* And now the rx ring. */
	for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
		if (!sp->rx_desc[i].skb) {
			dma_addr_t dma_addr;
			struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);

			if (skb == NULL)
				return -ENOMEM;
			skb_reserve(skb, 2);
			dma_addr = dma_map_single(dev->dev.parent,
						  skb->data - 2,
						  PKT_BUF_SZ, DMA_FROM_DEVICE);
			sp->rx_desc[i].skb = skb;
			sp->rx_desc[i].rdma.pbuf = dma_addr;
		}
		sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
		dma_sync_desc_dev(dev, &sp->rx_desc[i]);
	}
	sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
	dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
	return 0;
}
Ejemplo n.º 29
0
int eth0_pack_rcv(struct sk_buff *skb,struct net_device *dev,struct packet_type *pt, struct net_device *orig_dev)
{
	struct nm_packet *pkt = (struct nm_packet *)skb->data;
	struct net_device *dev_nm = nm_dev[ntohs(pkt->port)];
	struct nm_adapter *adapter = netdev_priv(dev_nm);
	struct sk_buff *new_skb;
	//printk("skb:%p,len:%d,skb->protocol:%X,skb_type:%d,head-data:%d, head:0x%lx, data:0x%lx\n",
				//	skb,skb->len,skb->protocol,skb->pkt_type,skb->head-skb->data, 
				//(unsigned long)skb->head, (unsigned long)skb->data);

	skb_pull(skb,sizeof(pkt->port));

	new_skb = netdev_alloc_skb(dev_nm,NM_PKT_SIZE);
	skb_reserve(new_skb,2);
	memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
	adapter->stats.rx_packets++;
	adapter->stats.rx_bytes += skb->len;
	new_skb->protocol = eth_type_trans(new_skb, dev_nm);
	//printk("%s recv pkt ! skb protocol:0x%04X,len:%d\n",dev_nm->name,new_skb->protocol,skb->len);
	dev_kfree_skb(skb);
	netif_receive_skb(new_skb);
	return 0;
}
Ejemplo n.º 30
0
/**
 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
 * @rx_ring:	RX ring structure of the skb
 * @dma_addr:	Pointer to storage for DMA address (output param)
 * @fl_bufsz:	size of freelist buffers
 *
 * This function will allcate a new skb, map it for DMA.
 *
 * Return: allocated skb or NULL on failure.
 */
static struct sk_buff *
nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
		     unsigned int fl_bufsz)
{
	struct nfp_net *nn = rx_ring->r_vec->nfp_net;
	struct sk_buff *skb;

	skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
	if (!skb) {
		nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
		return NULL;
	}

	*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
				   fl_bufsz, DMA_FROM_DEVICE);
	if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
		dev_kfree_skb_any(skb);
		nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
		return NULL;
	}

	return skb;
}