Exemple #1
0
/**
 * tipc_msg_build - create buffer chain containing specified header and data
 * @mhdr: Message header, to be prepended to data
 * @m: User message
 * @offset: Posision in iov to start copying from
 * @dsz: Total length of user data
 * @pktmax: Max packet size that can be used
 * @list: Buffer or chain of buffers to be returned to caller
 *
 * Returns message data size or errno: -ENOMEM, -EFAULT
 */
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
		   int dsz, int pktmax, struct sk_buff_head *list)
{
	int mhsz = msg_hdr_sz(mhdr);
	int msz = mhsz + dsz;
	int pktno = 1;
	int pktsz;
	int pktrem = pktmax;
	int drem = dsz;
	struct tipc_msg pkthdr;
	struct sk_buff *skb;
	char *pktpos;
	int rc;

	msg_set_size(mhdr, msz);

	/* No fragmentation needed? */
	if (likely(msz <= pktmax)) {
		skb = tipc_buf_acquire(msz);
		if (unlikely(!skb))
			return -ENOMEM;
		__skb_queue_tail(list, skb);
		skb_copy_to_linear_data(skb, mhdr, mhsz);
		pktpos = skb->data + mhsz;
		if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
						 dsz))
			return dsz;
		rc = -EFAULT;
		goto error;
	}

	/* Prepare reusable fragment header */
	tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
		      INT_H_SIZE, msg_destnode(mhdr));
	msg_set_size(&pkthdr, pktmax);
	msg_set_fragm_no(&pkthdr, pktno);

	/* Prepare first fragment */
	skb = tipc_buf_acquire(pktmax);
	if (!skb)
		return -ENOMEM;
	__skb_queue_tail(list, skb);
	pktpos = skb->data;
	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
	pktpos += INT_H_SIZE;
	pktrem -= INT_H_SIZE;
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
	pktpos += mhsz;
	pktrem -= mhsz;

	do {
		if (drem < pktrem)
			pktrem = drem;

		if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
			rc = -EFAULT;
			goto error;
		}
		drem -= pktrem;
		offset += pktrem;

		if (!drem)
			break;

		/* Prepare new fragment: */
		if (drem < (pktmax - INT_H_SIZE))
			pktsz = drem + INT_H_SIZE;
		else
			pktsz = pktmax;
		skb = tipc_buf_acquire(pktsz);
		if (!skb) {
			rc = -ENOMEM;
			goto error;
		}
		__skb_queue_tail(list, skb);
		msg_set_type(&pkthdr, FRAGMENT);
		msg_set_size(&pkthdr, pktsz);
		msg_set_fragm_no(&pkthdr, ++pktno);
		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
		pktpos = skb->data + INT_H_SIZE;
		pktrem = pktsz - INT_H_SIZE;

	} while (1);
	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
	return dsz;
error:
	__skb_queue_purge(list);
	__skb_queue_head_init(list);
	return rc;
}
Exemple #2
0
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
	struct net_device *dev = (struct net_device *)vcc->proto_data;
	struct sk_buff *new_skb;
	eg_cache_entry *eg;
	struct mpoa_client *mpc;
	__be32 tag;
	char *tmp;

	ddprintk("(%s)\n", dev->name);
	if (skb == NULL) {
		dprintk("(%s) null skb, closing VCC\n", dev->name);
		mpc_vcc_close(vcc, dev);
		return;
	}

	skb->dev = dev;
	if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
		   sizeof(struct llc_snap_hdr)) == 0) {
		struct sock *sk = sk_atm(vcc);

		dprintk("(%s) control packet arrived\n", dev->name);
		/* Pass control packets to daemon */
		skb_queue_tail(&sk->sk_receive_queue, skb);
		sk->sk_data_ready(sk, skb->len);
		return;
	}

	/* data coming over the shortcut */
	atm_return(vcc, skb->truesize);

	mpc = find_mpc_by_lec(dev);
	if (mpc == NULL) {
		pr_info("(%s) unknown MPC\n", dev->name);
		return;
	}

	if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
		   sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
		ddprintk("(%s) tagged data packet arrived\n", dev->name);

	} else if (memcmp(skb->data, &llc_snap_mpoa_data,
			  sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
		pr_info("(%s) Unsupported non-tagged data packet arrived.  Purging\n",
			dev->name);
		dev_kfree_skb_any(skb);
		return;
	} else {
		pr_info("(%s) garbage arrived, purging\n", dev->name);
		dev_kfree_skb_any(skb);
		return;
	}

	tmp = skb->data + sizeof(struct llc_snap_hdr);
	tag = *(__be32 *)tmp;

	eg = mpc->eg_ops->get_by_tag(tag, mpc);
	if (eg == NULL) {
		pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
			dev->name, tag);
		purge_egress_shortcut(vcc, NULL);
		dev_kfree_skb_any(skb);
		return;
	}

	/*
	 * See if ingress MPC is using shortcut we opened as a return channel.
	 * This means we have a bi-directional vcc opened by us.
	 */
	if (eg->shortcut == NULL) {
		eg->shortcut = vcc;
		pr_info("(%s) egress SVC in use\n", dev->name);
	}

	skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
					/* get rid of LLC/SNAP header */
	new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
					/* LLC/SNAP is shorter than MAC header :( */
	dev_kfree_skb_any(skb);
	if (new_skb == NULL) {
		mpc->eg_ops->put(eg);
		return;
	}
	skb_push(new_skb, eg->ctrl_info.DH_length);     /* add MAC header */
	skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header,
				eg->ctrl_info.DH_length);
	new_skb->protocol = eth_type_trans(new_skb, dev);
	skb_reset_network_header(new_skb);

	eg->latest_ip_addr = ip_hdr(new_skb)->saddr;
	eg->packets_rcvd++;
	mpc->eg_ops->put(eg);

	memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
	netif_rx(new_skb);
}
Exemple #3
0
static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
{
	struct sk_buff *skb;
	struct mrt6msg *msg;
	int ret;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
						+sizeof(*msg));
	else
#endif
		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);

	if (!skb)
		return -ENOBUFS;

	/* I suppose that internal messages
	 * do not require checksums */

	skb->ip_summed = CHECKSUM_UNNECESSARY;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix length etc.
		   And all this only to mangle msg->im6_msgtype and
		   to set msg->im6_mbz to "mbz" :-)
		 */
		skb_push(skb, -skb_network_offset(pkt));

		skb_push(skb, sizeof(*msg));
		skb_reset_transport_header(skb);
		msg = (struct mrt6msg *)skb_transport_header(skb);
		msg->im6_mbz = 0;
		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
		msg->im6_mif = reg_vif_num;
		msg->im6_pad = 0;
		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else
#endif
	{
	/*
	 *	Copy the IP header
	 */

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));

	/*
	 *	Add our header
	 */
	skb_put(skb, sizeof(*msg));
	skb_reset_transport_header(skb);
	msg = (struct mrt6msg *)skb_transport_header(skb);

	msg->im6_mbz = 0;
	msg->im6_msgtype = assert;
	msg->im6_mif = mifi;
	msg->im6_pad = 0;
	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

	skb->dst = dst_clone(pkt->dst);
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	skb_pull(skb, sizeof(struct ipv6hdr));
	}

	if (mroute6_socket == NULL) {
		kfree_skb(skb);
		return -EINVAL;
	}

	/*
	 *	Deliver to user space multicast routing algorithms
	 */
	if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
		if (net_ratelimit())
			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
		kfree_skb(skb);
	}

	return ret;
}
Exemple #4
0
static void z8530_rx_done(struct z8530_channel *c)
{
	struct sk_buff *skb;
	int ct;

	/*
	 *	Is our receive engine in DMA mode
	 */

	if(c->rxdma_on)
	{
		/*
		 *	Save the ready state and the buffer currently
		 *	being used as the DMA target
		 */

		int ready=c->dma_ready;
		unsigned char *rxb=c->rx_buf[c->dma_num];
		unsigned long flags;

		/*
		 *	Complete this DMA. Neccessary to find the length
		 */

		flags=claim_dma_lock();

		disable_dma(c->rxdma);
		clear_dma_ff(c->rxdma);
		c->rxdma_on=0;
		ct=c->mtu-get_dma_residue(c->rxdma);
		if(ct<0)
			ct=2;	/* Shit happens.. */
		c->dma_ready=0;

		/*
		 *	Normal case: the other slot is free, start the next DMA
		 *	into it immediately.
		 */

		if(ready)
		{
			c->dma_num^=1;
			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
			set_dma_count(c->rxdma, c->mtu);
			c->rxdma_on = 1;
			enable_dma(c->rxdma);
			/* Stop any frames that we missed the head of
			   from passing */
			write_zsreg(c, R0, RES_Rx_CRC);
		}
		else
			/* Can't occur as we dont reenable the DMA irq until
			   after the flip is done */
			printk(KERN_WARNING "%s: DMA flip overrun!\n",
			       c->netdevice->name);

		release_dma_lock(flags);

		/*
		 *	Shove the old buffer into an sk_buff. We can't DMA
		 *	directly into one on a PC - it might be above the 16Mb
		 *	boundary. Optimisation - we could check to see if we
		 *	can avoid the copy. Optimisation 2 - make the memcpy
		 *	a copychecksum.
		 */

		skb = dev_alloc_skb(ct);
		if (skb == NULL) {
			c->netdevice->stats.rx_dropped++;
			printk(KERN_WARNING "%s: Memory squeeze.\n",
			       c->netdevice->name);
		} else {
			skb_put(skb, ct);
			skb_copy_to_linear_data(skb, rxb, ct);
			c->netdevice->stats.rx_packets++;
			c->netdevice->stats.rx_bytes += ct;
		}
		c->dma_ready = 1;
	} else {
		RT_LOCK;
		skb = c->skb;

		/*
		 *	The game we play for non DMA is similar. We want to
		 *	get the controller set up for the next packet as fast
		 *	as possible. We potentially only have one byte + the
		 *	fifo length for this. Thus we want to flip to the new
		 *	buffer and then mess around copying and allocating
		 *	things. For the current case it doesn't matter but
		 *	if you build a system where the sync irq isnt blocked
		 *	by the kernel IRQ disable then you need only block the
		 *	sync IRQ for the RT_LOCK area.
		 *
		 */
		ct=c->count;

		c->skb = c->skb2;
		c->count = 0;
		c->max = c->mtu;
		if (c->skb) {
			c->dptr = c->skb->data;
			c->max = c->mtu;
		} else {
			c->count = 0;
			c->max = 0;
		}
		RT_UNLOCK;

		c->skb2 = dev_alloc_skb(c->mtu);
		if (c->skb2 == NULL)
			printk(KERN_WARNING "%s: memory squeeze.\n",
			       c->netdevice->name);
		else
			skb_put(c->skb2, c->mtu);
		c->netdevice->stats.rx_packets++;
		c->netdevice->stats.rx_bytes += ct;
	}
	/*
	 *	If we received a frame we must now process it.
	 */
	if (skb) {
		skb_trim(skb, ct);
		c->rx_function(c, skb);
	} else {
		c->netdevice->stats.rx_dropped++;
		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
	}
}
Exemple #5
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;
	int	pkt_received = 0;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, ndev);

			/* Get receive timestamp from the skb */
			if (fep->hwts_rx_en && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps *shhwtstamps =
							    skb_hwtstamps(skb);
				unsigned long flags;
				struct bufdesc_ex *ebdp =
					(struct bufdesc_ex *)bdp;

				memset(shhwtstamps, 0, sizeof(*shhwtstamps));

				spin_lock_irqsave(&fep->tmreg_lock, flags);
				shhwtstamps->hwtstamp = ns_to_ktime(
				    timecounter_cyc2time(&fep->tc, ebdp->ts));
				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			}

			if (!skb_defer_rx_timestamp(skb))
				napi_gro_receive(&fep->napi, skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			ebdp->cbd_esc = BD_ENET_RX_INT;
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	return pkt_received;
}
static void sun3_82586_rcv_int(struct net_device *dev)
{
	int status,cnt=0;
	unsigned short totlen;
	struct sk_buff *skb;
	struct rbd_struct *rbd;
	struct priv *p = netdev_priv(dev);

	if(debuglevel > 0)
		printk("R");

	for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
	{
			rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);

			if(status & RFD_OK) /* frame received without error? */
			{
				if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
				{
					totlen &= RBD_MASK; /* length of this frame */
					rbd->status = 0;
					skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
					if(skb != NULL)
					{
						skb_reserve(skb,2);
						skb_put(skb,totlen);
						skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
						skb->protocol=eth_type_trans(skb,dev);
						netif_rx(skb);
						p->stats.rx_packets++;
					}
					else
						p->stats.rx_dropped++;
				}
				else
				{
					int rstat;
						 /* free all RBD's until RBD_LAST is set */
					totlen = 0;
					while(!((rstat=swab16(rbd->status)) & RBD_LAST))
					{
						totlen += rstat & RBD_MASK;
						if(!rstat)
						{
							printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
							break;
						}
						rbd->status = 0;
						rbd = (struct rbd_struct *) make32(rbd->next);
					}
					totlen += rstat & RBD_MASK;
					rbd->status = 0;
					printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
					p->stats.rx_dropped++;
			 }
		}
		else /* frame !(ok), only with 'save-bad-frames' */
		{
			printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
			p->stats.rx_errors++;
		}
		p->rfd_top->stat_high = 0;
		p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
		p->rfd_top->rbd_offset = 0xffff;
		p->rfd_last->last = 0;				/* delete RFD_SUSP	*/
		p->rfd_last = p->rfd_top;
		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
		p->scb->rfa_offset = make16(p->rfd_top);

		if(debuglevel > 0)
			printk("%d",cnt++);
	}

	if(automatic_resume)
	{
		WAIT_4_SCB_CMD();
		p->scb->cmd_ruc = RUC_RESUME;
		sun3_attn586();
		WAIT_4_SCB_CMD_RUC();
	}

#ifdef WAIT_4_BUSY
	{
		int i;
		for(i=0;i<1024;i++)
		{
			if(p->rfd_top->status)
				break;
			DELAY_16();
			if(i == 1023)
				printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
		}
	}
#endif

#if 0
	if(!at_least_one)
	{
		int i;
		volatile struct rfd_struct *rfds=p->rfd_top;
		volatile struct rbd_struct *rbds;
		printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
		for(i=0;i< (p->num_recv_buffs+4);i++)
		{
			rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
			printk("%04x:%04x ",rfds->status,rbds->status);
			rfds = (struct rfd_struct *) make32(rfds->next);
		}
		printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
		printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
	}
	old_at_least = at_least_one;
#endif

	if(debuglevel > 0)
		printk("r");
}
Exemple #7
0
static inline void
async_bump(struct net_device *dev,
	   struct net_device_stats *stats,
	   iobuff_t *rx_buff)
{
	struct sk_buff *newskb;
	struct sk_buff *dataskb;
	int		docopy;

	/* Check if we need to copy the data to a new skb or not.
	 * If the driver doesn't use ZeroCopy Rx, we have to do it.
	 * With ZeroCopy Rx, the rx_buff already point to a valid
	 * skb. But, if the frame is small, it is more efficient to
	 * copy it to save memory (copy will be fast anyway - that's
	 * called Rx-copy-break). Jean II */
	docopy = ((rx_buff->skb == NULL) ||
		  (rx_buff->len < IRDA_RX_COPY_THRESHOLD));

	/* Allocate a new skb */
	newskb = dev_alloc_skb(docopy ? rx_buff->len + 1 : rx_buff->truesize);
	if (!newskb)  {
		stats->rx_dropped++;
		/* We could deliver the current skb if doing ZeroCopy Rx,
		 * but this would stall the Rx path. Better drop the
		 * packet... Jean II */
		return;
	}

	/* Align IP header to 20 bytes (i.e. increase skb->data)
	 * Note this is only useful with IrLAN, as PPP has a variable
	 * header size (2 or 1 bytes) - Jean II */
	skb_reserve(newskb, 1);

	if(docopy) {
		/* Copy data without CRC (length already checked) */
		skb_copy_to_linear_data(newskb, rx_buff->data,
					rx_buff->len - 2);
		/* Deliver this skb */
		dataskb = newskb;
	} else {
		/* We are using ZeroCopy. Deliver old skb */
		dataskb = rx_buff->skb;
		/* And hook the new skb to the rx_buff */
		rx_buff->skb = newskb;
		rx_buff->head = newskb->data;	/* NOT newskb->head */
		//printk(KERN_DEBUG "ZeroCopy : len = %d, dataskb = %p, newskb = %p\n", rx_buff->len, dataskb, newskb);
	}

	/* Set proper length on skb (without CRC) */
	skb_put(dataskb, rx_buff->len - 2);

	/* Feed it to IrLAP layer */
	dataskb->dev = dev;
	skb_reset_mac_header(dataskb);
	dataskb->protocol = htons(ETH_P_IRDA);

	netif_rx(dataskb);

	stats->rx_packets++;
	stats->rx_bytes += rx_buff->len;

	/* Clean up rx_buff (redundant with async_unwrap_bof() ???) */
	rx_buff->data = rx_buff->head;
	rx_buff->len = 0;
}
Exemple #8
0
/*
 *	This is where all X.25 information frames pass.
 *
 *      Returns the amount of user data bytes sent on success
 *      or a negative error code on failure.
 */
int x25_output(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char header[X25_EXT_MIN_LEN];
	int err, frontlen, len;
	int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
	struct x25_sock *x25 = x25_sk(sk);
	int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
						    X25_STD_MIN_LEN;
	int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);

	if (skb->len - header_len > max_len) {
		/* Save a copy of the Header */
		skb_copy_from_linear_data(skb, header, header_len);
		skb_pull(skb, header_len);

		frontlen = skb_headroom(skb);

		while (skb->len > 0) {
			release_sock(sk);
			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
						   noblock, &err);
			lock_sock(sk);
			if (!skbn) {
				if (err == -EWOULDBLOCK && noblock){
					kfree_skb(skb);
					return sent;
				}
				SOCK_DEBUG(sk, "x25_output: fragment alloc"
					       " failed, err=%d, %d bytes "
					       "sent\n", err, sent);
				return err;
			}

			skb_reserve(skbn, frontlen);

			len = max_len > skb->len ? skb->len : max_len;

			/* Copy the user data */
			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
			skb_pull(skb, len);

			/* Duplicate the Header */
			skb_push(skbn, header_len);
			skb_copy_to_linear_data(skbn, header, header_len);

			if (skb->len > 0) {
				if (x25->neighbour->extended)
					skbn->data[3] |= X25_EXT_M_BIT;
				else
					skbn->data[2] |= X25_STD_M_BIT;
			}

			skb_queue_tail(&sk->sk_write_queue, skbn);
			sent += len;
		}

		kfree_skb(skb);
	} else {
		skb_queue_tail(&sk->sk_write_queue, skb);
		sent = skb->len - header_len;
	}
	return sent;
}
static int
receive_packet (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int entry = np->cur_rx % RX_RING_SIZE;
	int cnt = 30;

	/*                                                                    */
	while (1) {
		struct netdev_desc *desc = &np->rx_ring[entry];
		int pkt_len;
		u64 frame_status;

		if (!(desc->status & cpu_to_le64(RFDDone)) ||
		    !(desc->status & cpu_to_le64(FrameStart)) ||
		    !(desc->status & cpu_to_le64(FrameEnd)))
			break;

		/*                     */
		frame_status = le64_to_cpu(desc->status);
		pkt_len = frame_status & 0xffff;
		if (--cnt < 0)
			break;
		/*                                          */
		if (frame_status & RFS_Errors) {
			np->stats.rx_errors++;
			if (frame_status & (RxRuntFrame | RxLengthError))
				np->stats.rx_length_errors++;
			if (frame_status & RxFCSError)
				np->stats.rx_crc_errors++;
			if (frame_status & RxAlignmentError && np->speed != 1000)
				np->stats.rx_frame_errors++;
			if (frame_status & RxFIFOOverrun)
	 			np->stats.rx_fifo_errors++;
		} else {
			struct sk_buff *skb;

			/*                                 */
			if (pkt_len > copy_thresh) {
				pci_unmap_single (np->pdev,
						  desc_to_dma(desc),
						  np->rx_buf_sz,
						  PCI_DMA_FROMDEVICE);
				skb_put (skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
				pci_dma_sync_single_for_cpu(np->pdev,
							    desc_to_dma(desc),
							    np->rx_buf_sz,
							    PCI_DMA_FROMDEVICE);
				skb_copy_to_linear_data (skb,
						  np->rx_skbuff[entry]->data,
						  pkt_len);
				skb_put (skb, pkt_len);
				pci_dma_sync_single_for_device(np->pdev,
							       desc_to_dma(desc),
							       np->rx_buf_sz,
							       PCI_DMA_FROMDEVICE);
			}
			skb->protocol = eth_type_trans (skb, dev);
#if 0
			/*                                                  */
			if (np->pdev->pci_rev_id >= 0x0c &&
				!(frame_status & (TCPError | UDPError | IPError))) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			}
#endif
			netif_rx (skb);
		}
		entry = (entry + 1) % RX_RING_SIZE;
	}
	spin_lock(&np->rx_lock);
	np->cur_rx = entry;
	/*                                                 */
	entry = np->old_rx;
	while (entry != np->cur_rx) {
		struct sk_buff *skb;
		/*                                           */
		if (np->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
			if (skb == NULL) {
				np->rx_ring[entry].fraginfo = 0;
				printk (KERN_INFO
					"%s: receive_packet: "
					"Unable to re-allocate Rx skbuff.#%d\n",
					dev->name, entry);
				break;
			}
			np->rx_skbuff[entry] = skb;
			np->rx_ring[entry].fraginfo =
			    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
		}
		np->rx_ring[entry].fraginfo |=
		    cpu_to_le64((u64)np->rx_buf_sz << 48);
		np->rx_ring[entry].status = 0;
		entry = (entry + 1) % RX_RING_SIZE;
	}
	np->old_rx = entry;
	spin_unlock(&np->rx_lock);
	return 0;
}
Exemple #10
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
scc_enet_rx(struct net_device *dev)
{
    struct	scc_enet_private *cep;
    volatile cbd_t	*bdp;
    struct	sk_buff *skb;
    ushort	pkt_len;

    cep = dev->priv;

    /* First, grab all of the stats for the incoming packet.
     * These get messed up if we get called due to a busy condition.
     */
    bdp = cep->cur_rx;

    for (;;) {
        if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
            break;

#ifndef final_version
        /* Since we have allocated space to hold a complete frame, both
         * the first and last indicators should be set.
         */
        if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
                (BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
            printk("CPM ENET: rcv is not first+last\n");
#endif

        /* Frame too long or too short.
        */
        if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
            cep->stats.rx_length_errors++;
        if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
            cep->stats.rx_frame_errors++;
        if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
            cep->stats.rx_crc_errors++;
        if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
            cep->stats.rx_crc_errors++;

        /* Report late collisions as a frame error.
         * On this error, the BD is closed, but we don't know what we
         * have in the buffer.  So, just drop this frame on the floor.
         */
        if (bdp->cbd_sc & BD_ENET_RX_CL) {
            cep->stats.rx_frame_errors++;
        }
        else {

            /* Process the incoming frame.
            */
            cep->stats.rx_packets++;
            pkt_len = bdp->cbd_datlen;
            cep->stats.rx_bytes += pkt_len;

            /* This does 16 byte alignment, much more than we need.
             * The packet length includes FCS, but we don't want to
             * include that when passing upstream as it messes up
             * bridging applications.
             */
            skb = dev_alloc_skb(pkt_len-4);

            if (skb == NULL) {
                printk("%s: Memory squeeze, dropping packet.\n", dev->name);
                cep->stats.rx_dropped++;
            }
            else {
                skb_put(skb,pkt_len-4);	/* Make room */
                skb_copy_to_linear_data(skb,
                                        (unsigned char *)__va(bdp->cbd_bufaddr),
                                        pkt_len-4);
                skb->protocol=eth_type_trans(skb,dev);
                netif_rx(skb);
            }
        }

        /* Clear the status flags for this buffer.
        */
        bdp->cbd_sc &= ~BD_ENET_RX_STATS;

        /* Mark the buffer empty.
        */
        bdp->cbd_sc |= BD_ENET_RX_EMPTY;

        /* Update BD pointer to next entry.
        */
        if (bdp->cbd_sc & BD_ENET_RX_WRAP)
            bdp = cep->rx_bd_base;
        else
            bdp++;

    }
    cep->cur_rx = (cbd_t *)bdp;

    return 0;
}
Exemple #11
0
static int udp_uncompress(struct sk_buff *skb, size_t needed)
{
	u8 tmp = 0, val = 0;
	struct udphdr uh;
	bool fail;
	int err;

	fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));

	pr_debug("UDP header uncompression\n");
	switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
	case LOWPAN_NHC_UDP_CS_P_00:
		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
		break;
	case LOWPAN_NHC_UDP_CS_P_01:
		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
		break;
	case LOWPAN_NHC_UDP_CS_P_10:
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
		break;
	case LOWPAN_NHC_UDP_CS_P_11:
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
		uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
		break;
	default:
		BUG();
	}

	pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
		 ntohs(uh.source), ntohs(uh.dest));

	/* checksum */
	if (tmp & LOWPAN_NHC_UDP_CS_C) {
		pr_debug_ratelimited("checksum elided currently not supported\n");
		fail = true;
	} else {
		fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
	}

	if (fail)
		return -EINVAL;

	/* UDP length needs to be infered from the lower layers
	 * here, we obtain the hint from the remaining size of the
	 * frame
	 */
	uh.len = htons(skb->len + sizeof(struct udphdr));
	pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));

	/* replace the compressed UDP head by the uncompressed UDP
	 * header
	 */
	err = skb_cow(skb, needed);
	if (unlikely(err))
		return err;

	skb_push(skb, sizeof(struct udphdr));
	skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));

	return 0;
}
static void fifo_dma_copy_to_linear_skb(struct ccat_eth_fifo *const fifo,
					struct sk_buff *skb, const size_t len)
{
	skb_copy_to_linear_data(skb, fifo->dma.next->data, len);
}
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
{
	unsigned int len, stat, data;

	
	len = DTADR(si->rxdma) - si->dma_rx_buff_phy;

	do {
		
		stat = ICSR1;
		rmb();
		data = ICDR;

		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
			dev->stats.rx_errors++;
			if (stat & ICSR1_CRE) {
				printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
				dev->stats.rx_crc_errors++;
			}
			if (stat & ICSR1_ROR) {
				printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
				dev->stats.rx_over_errors++;
			}
		} else	{
			si->dma_rx_buff[len++] = data;
		}
		
		if (stat & ICSR1_EOF)
			break;
	} while (ICSR0 & ICSR0_EIF);

	if (stat & ICSR1_EOF) {
		
		struct sk_buff *skb;

		if (icsr0 & ICSR0_FRE) {
			printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
			dev->stats.rx_dropped++;
			return;
		}

		skb = alloc_skb(len+1,GFP_ATOMIC);
		if (!skb)  {
			printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
			dev->stats.rx_dropped++;
			return;
		}

		
		skb_reserve(skb, 1);
		skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
		skb_put(skb, len);

		
		skb->dev = dev;
		skb_reset_mac_header(skb);
		skb->protocol = htons(ETH_P_IRDA);
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	}
}
static int sendup_buffer (struct net_device *dev)
{
	/* on entry, command is in ltdmacbuf, data in ltdmabuf */
	/* called from idle, non-reentrant */

	int dnode, snode, llaptype, len; 
	int sklen;
	struct sk_buff *skb;
	struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;

	if (ltc->command != LT_RCVLAP) {
		printk("unknown command 0x%02x from ltpc card\n",ltc->command);
		return(-1);
	}
	dnode = ltc->dnode;
	snode = ltc->snode;
	llaptype = ltc->laptype;
	len = ltc->length; 

	sklen = len;
	if (llaptype == 1) 
		sklen += 8;  /* correct for short ddp */
	if(sklen > 800) {
		printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
			dev->name,sklen);
		return -1;
	}

	if ( (llaptype==0) || (llaptype>2) ) {
		printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
		return -1;
	}


	skb = dev_alloc_skb(3+sklen);
	if (skb == NULL) 
	{
		printk("%s: dropping packet due to memory squeeze.\n",
			dev->name);
		return -1;
	}
	skb->dev = dev;

	if (sklen > len)
		skb_reserve(skb,8);
	skb_put(skb,len+3);
	skb->protocol = htons(ETH_P_LOCALTALK);
	/* add LLAP header */
	skb->data[0] = dnode;
	skb->data[1] = snode;
	skb->data[2] = llaptype;
	skb_reset_mac_header(skb);	/* save pointer to llap header */
	skb_pull(skb,3);

	/* copy ddp(s,e)hdr + contents */
	skb_copy_to_linear_data(skb, ltdmabuf, len);

	skb_reset_transport_header(skb);

	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	/* toss it onwards */
	netif_rx(skb);
	return 0;
}
Exemple #15
0
/*
 * Au1000 receive routine.
 */
static int au1000_rx(struct net_device *dev)
{
	struct au1000_private *aup = netdev_priv(dev);
	struct sk_buff *skb;
	volatile rx_dma_t *prxd;
	u32 buff_stat, status;
	db_dest_t *pDB;
	u32	frmlen;

	if (au1000_debug > 5)
		printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);

	prxd = aup->rx_dma_ring[aup->rx_head];
	buff_stat = prxd->buff_stat;
	while (buff_stat & RX_T_DONE)  {
		status = prxd->status;
		pDB = aup->rx_db_inuse[aup->rx_head];
		update_rx_stats(dev, status);
		if (!(status & RX_ERROR))  {

			/* good frame */
			frmlen = (status & RX_FRAME_LEN_MASK);
			frmlen -= 4; /* Remove FCS */
			skb = dev_alloc_skb(frmlen + 2);
			if (skb == NULL) {
				printk(KERN_ERR
				       "%s: Memory squeeze, dropping packet.\n",
				       dev->name);
				dev->stats.rx_dropped++;
				continue;
			}
			skb_reserve(skb, 2);	/* 16 byte IP header align */
			skb_copy_to_linear_data(skb,
				(unsigned char *)pDB->vaddr, frmlen);
			skb_put(skb, frmlen);
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);	/* pass the packet to upper layers */
		}
		else {
			if (au1000_debug > 4) {
				if (status & RX_MISSED_FRAME)
					printk("rx miss\n");
				if (status & RX_WDOG_TIMER)
					printk("rx wdog\n");
				if (status & RX_RUNT)
					printk("rx runt\n");
				if (status & RX_OVERLEN)
					printk("rx overlen\n");
				if (status & RX_COLL)
					printk("rx coll\n");
				if (status & RX_MII_ERROR)
					printk("rx mii error\n");
				if (status & RX_CRC_ERROR)
					printk("rx crc error\n");
				if (status & RX_LEN_ERROR)
					printk("rx len error\n");
				if (status & RX_U_CNTRL_FRAME)
					printk("rx u control frame\n");
			}
		}
		prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
		aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
		au_sync();

		/* next descriptor */
		prxd = aup->rx_dma_ring[aup->rx_head];
		buff_stat = prxd->buff_stat;
	}
	return 0;
}
int vnic_rx_skb(struct vnic_login *login, struct vnic_rx_ring *ring,
		struct ib_wc *wc, int ip_summed, char *eth_hdr_va)
{
	u64 wr_id = (unsigned int)wc->wr_id;
	struct sk_buff *skb;
	int used_frags;
	char *va = eth_hdr_va;
	int length = wc->byte_len - VNIC_EOIB_HDR_SIZE - VNIC_VLAN_OFFSET(login),
	    linear_length = (length <= SMALL_PACKET_SIZE) ?
	    length : SMALL_PACKET_SIZE, hdr_len = min(length, HEADER_COPY_SIZE),
	    offest = NET_IP_ALIGN + 16;
	struct ib_device *ib_dev = login->port->dev->ca;

	/* alloc a small linear SKB */
	skb = alloc_skb(linear_length + offest, GFP_ATOMIC);
	if (unlikely(!skb))
		return -ENOMEM;

	skb_record_rx_queue(skb, ring->index);
	skb_reserve(skb, offest);

	if (vnic_linear_small_pkt && length <= SMALL_PACKET_SIZE) {
		u64 dma;

		/* We are copying all relevant data to the skb - temporarily
		 * synch buffers for the copy
		 */
		dma = ring->rx_info[wr_id].dma_addr[0] + VNIC_EOIB_HDR_SIZE +
			VNIC_VLAN_OFFSET(login);
		ib_dma_sync_single_for_cpu(ib_dev, dma, length,
					   DMA_FROM_DEVICE);
		skb_copy_to_linear_data(skb, va, length);
		ib_dma_sync_single_for_device(ib_dev, dma, length,
					      DMA_FROM_DEVICE);
		skb->tail += length;
	} else {
		/* unmap the needed fragmentand reallocate them. Fragments that
		 * were not used will not be reused as is. */
		used_frags = vnic_unmap_and_replace_rx(ring, ib_dev,
						       skb_shinfo(skb)->frags,
						       wr_id, wc->byte_len);
		if (!used_frags)
			goto free_and_repost;

		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, hdr_len);
		skb->tail += hdr_len;
		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset +=
		    (VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
		     hdr_len);

		/* Adjust size of first fragment */
		skb_shinfo(skb)->frags[0].size -=
		    (VNIC_EOIB_HDR_SIZE + VNIC_VLAN_OFFSET(login) +
		     hdr_len);
		skb->data_len = length - hdr_len;
	}

	/* update skb fields */
	skb->len = length;
	skb->truesize = length + sizeof(struct sk_buff);
	skb->ip_summed = ip_summed;
	skb->dev = login->dev;
	skb->protocol = eth_type_trans(skb, skb->dev);

	return vnic_rx(login, skb, wc);

free_and_repost:
	dev_kfree_skb(skb);
	return -ENODEV;

}
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipcomp_data *ipcd = x->data;
	const int plen = skb->len;
	int dlen = IPCOMP_SCRATCH_SIZE;
	const u8 *start = skb->data;
	const int cpu = get_cpu();
	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
	int len;

	if (err)
		goto out;

	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
		err = -EINVAL;
		goto out;
	}

	len = dlen - plen;
	if (len > skb_tailroom(skb))
		len = skb_tailroom(skb);

	__skb_put(skb, len);

	len += plen;
	skb_copy_to_linear_data(skb, scratch, len);

	while ((scratch += len, dlen -= len) > 0) {
		skb_frag_t *frag;
		struct page *page;

		err = -EMSGSIZE;
		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
			goto out;

		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
		page = alloc_page(GFP_ATOMIC);

		err = -ENOMEM;
		if (!page)
			goto out;

		__skb_frag_set_page(frag, page);

		len = PAGE_SIZE;
		if (dlen < len)
			len = dlen;

		frag->page_offset = 0;
		skb_frag_size_set(frag, len);
		memcpy(skb_frag_address(frag), scratch, len);

		skb->truesize += len;
		skb->data_len += len;
		skb->len += len;

		skb_shinfo(skb)->nr_frags++;
	}

	err = 0;

out:
	put_cpu();
	return err;
}
Exemple #18
0
/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
   and -1 on error (which may be packet drops or other errors). */
int
nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
{
	struct mbuf *m = a->m;
	struct ifnet *ifp = a->ifp;
	u_int len = a->len;
	netdev_tx_t ret;

	/* We know that the driver needs to prepend ifp->needed_headroom bytes
	 * to each packet to be transmitted. We then reset the mbuf pointers
	 * to the correct initial state:
	 *    ___________________________________________
	 *    ^           ^                             ^
	 *    |           |                             |
	 *   head        data                          end
	 *               tail
	 *
	 * which correspond to an empty buffer with exactly
	 * ifp->needed_headroom bytes between head and data.
	 */
	m->len = 0;
	m->data = m->head + ifp->needed_headroom;
	skb_reset_tail_pointer(m);
	skb_reset_mac_header(m);
	skb_reset_network_header(m);

	/* Copy a netmap buffer into the mbuf.
	 * TODO Support the slot flags (NS_MOREFRAG, NS_INDIRECT). */
	skb_copy_to_linear_data(m, a->addr, len); // skb_store_bits(m, 0, addr, len);
	skb_put(m, len);

	/* Hold a reference on this, we are going to recycle mbufs as
	 * much as possible. */
	NM_ATOMIC_INC(&m->users);

	/* On linux m->dev is not reliable, since it can be changed by the
	 * ndo_start_xmit() callback. This happens, for instance, with veth
	 * and bridge drivers. For this reason, the nm_os_generic_xmit_frame()
	 * implementation for linux stores a copy of m->dev into the
	 * destructor_arg field. */
	m->dev = ifp;
	skb_shinfo(m)->destructor_arg = m->dev;

	/* Tell generic_ndo_start_xmit() to pass this mbuf to the driver. */
	skb_set_queue_mapping(m, a->ring_nr);
	m->priority = a->qevent ? NM_MAGIC_PRIORITY_TXQE : NM_MAGIC_PRIORITY_TX;

	ret = dev_queue_xmit(m);

	if (unlikely(ret != NET_XMIT_SUCCESS)) {
		/* Reset priority, so that generic_netmap_tx_clean() can
		 * reclaim this mbuf. */
		m->priority = 0;

		/* Qdisc queue is full (this cannot happen with
		 * the netmap-aware qdisc, see exaplanation in
		 * netmap_generic_txsync), or qdisc is being
		 * deactivated. In the latter case dev_queue_xmit()
		 * does not call the enqueue method and returns
		 * NET_XMIT_DROP.
		 * If there is no carrier, the generic qdisc is
		 * not yet active (is pending in the qdisc_sleeping
		 * field), and so the temporary noop qdisc enqueue
		 * method will drop the packet and return NET_XMIT_CN.
		 */
		RD(3, "Warning: dev_queue_xmit() is dropping [%d]", ret);
		return -1;
	}

	return 0;
}
Exemple #19
0
/** Routine to push packets arriving on Octeon interface upto network layer.
 * @param oct_id   - octeon device id.
 * @param skbuff   - skbuff struct to be passed to network layer.
 * @param len      - size of total data received.
 * @param rh       - Control header associated with the packet
 * @param param    - additional control data with the packet
 * @param arg      - farg registered in droq_ops
 */
static void
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
		     void *skbuff,
		     u32 len,
		     union octeon_rh *rh,
		     void *param,
		     void *arg)
{
	struct net_device *netdev = (struct net_device *)arg;
	struct octeon_droq *droq =
	    container_of(param, struct octeon_droq, napi);
	struct sk_buff *skb = (struct sk_buff *)skbuff;
	struct skb_shared_hwtstamps *shhwtstamps;
	struct napi_struct *napi = param;
	u16 vtag = 0;
	u32 r_dh_off;
	u64 ns;

	if (netdev) {
		struct lio *lio = GET_LIO(netdev);
		struct octeon_device *oct = lio->oct_dev;

		/* Do not proceed if the interface is not in RUNNING state. */
		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
			recv_buffer_free(skb);
			droq->stats.rx_dropped++;
			return;
		}

		skb->dev = netdev;

		skb_record_rx_queue(skb, droq->q_no);
		if (likely(len > MIN_SKB_SIZE)) {
			struct octeon_skb_page_info *pg_info;
			unsigned char *va;

			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
			if (pg_info->page) {
				/* For Paged allocation use the frags */
				va = page_address(pg_info->page) +
					pg_info->page_offset;
				memcpy(skb->data, va, MIN_SKB_SIZE);
				skb_put(skb, MIN_SKB_SIZE);
				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
						pg_info->page,
						pg_info->page_offset +
						MIN_SKB_SIZE,
						len - MIN_SKB_SIZE,
						LIO_RXBUFFER_SZ);
			}
		} else {
			struct octeon_skb_page_info *pg_info =
				((struct octeon_skb_page_info *)(skb->cb));
			skb_copy_to_linear_data(skb, page_address(pg_info->page)
						+ pg_info->page_offset, len);
			skb_put(skb, len);
			put_page(pg_info->page);
		}

		r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;

		if (oct->ptp_enable) {
			if (rh->r_dh.has_hwtstamp) {
				/* timestamp is included from the hardware at
				 * the beginning of the packet.
				 */
				if (ifstate_check
					(lio,
					 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
					/* Nanoseconds are in the first 64-bits
					 * of the packet.
					 */
					memcpy(&ns, (skb->data + r_dh_off),
					       sizeof(ns));
					r_dh_off -= BYTES_PER_DHLEN_UNIT;
					shhwtstamps = skb_hwtstamps(skb);
					shhwtstamps->hwtstamp =
						ns_to_ktime(ns +
							    lio->ptp_adjust);
				}
			}
		}

		if (rh->r_dh.has_hash) {
			__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
			u32 hash = be32_to_cpu(*hash_be);

			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
			r_dh_off -= BYTES_PER_DHLEN_UNIT;
		}

		skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
		skb->protocol = eth_type_trans(skb, skb->dev);

		if ((netdev->features & NETIF_F_RXCSUM) &&
		    (((rh->r_dh.encap_on) &&
		      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
		     (!(rh->r_dh.encap_on) &&
		      (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
			/* checksum has already been verified */
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;

		/* Setting Encapsulation field on basis of status received
		 * from the firmware
		 */
		if (rh->r_dh.encap_on) {
			skb->encapsulation = 1;
			skb->csum_level = 1;
			droq->stats.rx_vxlan++;
		}

		/* inbound VLAN tag */
		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
		    rh->r_dh.vlan) {
			u16 priority = rh->r_dh.priority;
			u16 vid = rh->r_dh.vlan;

			vtag = (priority << VLAN_PRIO_SHIFT) | vid;
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
		}

		napi_gro_receive(napi, skb);

		droq->stats.rx_bytes_received += len -
			rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
		droq->stats.rx_pkts_received++;
	} else {
		recv_buffer_free(skb);
	}
}
Exemple #20
0
static int ariadne_rx(struct net_device *dev)
{
	struct ariadne_private *priv = netdev_priv(dev);
	int entry = priv->cur_rx % RX_RING_SIZE;
	int i;

	/* If we own the next entry, it's a new packet. Send it up */
	while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
		int status = lowb(priv->rx_ring[entry]->RMD1);

		if (status != (RF_STP | RF_ENP)) {	/* There was an error */
			/* There is a tricky error noted by
			 * John Murphy <*****@*****.**> to Russ Nelson:
			 * Even with full-sized buffers it's possible for a
			 * jabber packet to use two buffers, with only the
			 * last correctly noting the error
			 */
			/* Only count a general error at the end of a packet */
			if (status & RF_ENP)
				dev->stats.rx_errors++;
			if (status & RF_FRAM)
				dev->stats.rx_frame_errors++;
			if (status & RF_OFLO)
				dev->stats.rx_over_errors++;
			if (status & RF_CRC)
				dev->stats.rx_crc_errors++;
			if (status & RF_BUFF)
				dev->stats.rx_fifo_errors++;
			priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
		} else {
			/* Malloc up new buffer, compatible with net-3 */
			short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
			struct sk_buff *skb;

			skb = dev_alloc_skb(pkt_len + 2);
			if (skb == NULL) {
				netdev_warn(dev, "Memory squeeze, deferring packet\n");
				for (i = 0; i < RX_RING_SIZE; i++)
					if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
						break;

				if (i > RX_RING_SIZE - 2) {
					dev->stats.rx_dropped++;
					priv->rx_ring[entry]->RMD1 |= RF_OWN;
					priv->cur_rx++;
				}
				break;
			}


			skb_reserve(skb, 2);	/* 16 byte align */
			skb_put(skb, pkt_len);	/* Make room */
			skb_copy_to_linear_data(skb,
						(const void *)priv->rx_buff[entry],
						pkt_len);
			skb->protocol = eth_type_trans(skb, dev);
			netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
				   ((u_short *)skb->data)[6],
				   skb->data + 6, skb->data,
				   (int)skb->data, (int)skb->len);

			netif_rx(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;
		}

		priv->rx_ring[entry]->RMD1 |= RF_OWN;
		entry = (++priv->cur_rx) % RX_RING_SIZE;
	}

	priv->cur_rx = priv->cur_rx % RX_RING_SIZE;

	/* We should check that at least two ring entries are free.
	 * If not, we should free one and mark stats->rx_dropped++
	 */

	return 0;
}
/* EIF(Error in FIFO/End in Frame) handler for FIR */
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
{
	unsigned int len, stat, data;

	/* Get the current data position. */
	len = DTADR(si->rxdma) - si->dma_rx_buff_phy;

	do {
		/* Read Status, and then Data. 	 */
		stat = ICSR1;
		rmb();
		data = ICDR;

		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
			dev->stats.rx_errors++;
			if (stat & ICSR1_CRE) {
				printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
				dev->stats.rx_crc_errors++;
			}
			if (stat & ICSR1_ROR) {
				printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
				dev->stats.rx_over_errors++;
			}
		} else	{
			si->dma_rx_buff[len++] = data;
		}
		/* If we hit the end of frame, there's no point in continuing. */
		if (stat & ICSR1_EOF)
			break;
	} while (ICSR0 & ICSR0_EIF);

	if (stat & ICSR1_EOF) {
		/* end of frame. */
		struct sk_buff *skb;

		if (icsr0 & ICSR0_FRE) {
			printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
			dev->stats.rx_dropped++;
			return;
		}

		skb = alloc_skb(len+1,GFP_ATOMIC);
		if (!skb)  {
			printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
			dev->stats.rx_dropped++;
			return;
		}

		/* Align IP header to 20 bytes  */
		skb_reserve(skb, 1);
		skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
		skb_put(skb, len);

		/* Feed it to IrLAP  */
		skb->dev = dev;
		skb_reset_mac_header(skb);
		skb->protocol = htons(ETH_P_IRDA);
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	}
}
Exemple #22
0
int tulip_poll(struct napi_struct *napi, int budget)
{
	struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
	struct net_device *dev = tp->dev;
	int entry = tp->cur_rx % RX_RING_SIZE;
	int work_done = 0;
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
	int received = 0;
#endif

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (budget >=RX_RING_SIZE) budget--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);

       do {
		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
			break;
		}
               /* Acknowledge current RX interrupt sources. */
               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);


               /* If we own the next entry, it is a new packet. Send it up. */
               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);

                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                               break;

                       if (tulip_debug > 5)
                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                      dev->name, entry, status);

		       if (++work_done >= budget)
                               goto not_done;

                       if ((status & 0x38008300) != 0x0300) {
                               if ((status & 0x38000300) != 0x0300) {
                                /* Ingore earlier buffers. */
                                       if ((status & 0xffff) != 0x7fff) {
                                               if (tulip_debug > 1)
                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
                                                              "spanned multiple buffers, status %8.8x!\n",
                                                              dev->name, status);
                                               tp->stats.rx_length_errors++;
                                       }
                               } else if (status & RxDescFatalErr) {
                                /* There was a fatal error. */
                                       if (tulip_debug > 2)
                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
                                                      dev->name, status);
                                       tp->stats.rx_errors++; /* end of a packet.*/
                                       if (status & 0x0890) tp->stats.rx_length_errors++;
                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
                               }
                       } else {
                               /* Omit the four octet CRC from the length. */
                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
                               struct sk_buff *skb;

#ifndef final_version
                               if (pkt_len > 1518) {
                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
                                              dev->name, pkt_len, pkt_len);
                                       pkt_len = 1518;
                                       tp->stats.rx_length_errors++;
                               }
#endif
                               /* Check if the packet is long enough to accept without copying
                                  to a minimally-sized skbuff. */
                               if (pkt_len < tulip_rx_copybreak
                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                       pci_dma_sync_single_for_cpu(tp->pdev,
								   tp->rx_buffers[entry].mapping,
								   pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
                                       skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
                                                        pkt_len);
                                       skb_put(skb, pkt_len);
#else
                                       memcpy(skb_put(skb, pkt_len),
                                              tp->rx_buffers[entry].skb->data,
                                              pkt_len);
#endif
                                       pci_dma_sync_single_for_device(tp->pdev,
								      tp->rx_buffers[entry].mapping,
								      pkt_len, PCI_DMA_FROMDEVICE);
                               } else {        /* Pass up the skb already on the Rx ring. */
                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                            pkt_len);

#ifndef final_version
                                       if (tp->rx_buffers[entry].mapping !=
                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
                                                      dev->name,
                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
                                                      skb->head, temp);
                                       }
#endif

                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

                                       tp->rx_buffers[entry].skb = NULL;
                                       tp->rx_buffers[entry].mapping = 0;
                               }
                               skb->protocol = eth_type_trans(skb, dev);

                               netif_receive_skb(skb);

                               dev->last_rx = jiffies;
                               tp->stats.rx_packets++;
                               tp->stats.rx_bytes += pkt_len;
                       }
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
		       received++;
#endif

                       entry = (++tp->cur_rx) % RX_RING_SIZE;
                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                               tulip_refill_rx(dev);

                }

               /* New ack strategy... irq does not ack Rx any longer
                  hopefully this helps */

               /* Really bad things can happen here... If new packet arrives
                * and an irq arrives (tx or just due to occasionally unset
                * mask), it will be acked by irq handler, but new thread
                * is not scheduled. It is major hole in design.
                * No idea how to fix this if "playing with fire" will fail
                * tomorrow (night 011029). If it will not fail, we won
                * finally: amount of IO did not increase at all. */
       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));

 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

          /* We use this simplistic scheme for IM. It's proven by
             real life installations. We can have IM enabled
            continuesly but this would cause unnecessary latency.
            Unfortunely we can't use all the NET_RX_* feedback here.
            This would turn on IM for devices that is not contributing
            to backlog congestion with unnecessary latency.

             We monitor the device RX-ring and have:

             HW Interrupt Mitigation either ON or OFF.

            ON:  More then 1 pkt received (per intr.) OR we are dropping
             OFF: Only 1 pkt received

             Note. We only use min and max (0, 15) settings from mit_table */


          if( tp->flags &  HAS_INTR_MITIGATION) {
                 if( received > 1 ) {
                         if( ! tp->mit_on ) {
                                 tp->mit_on = 1;
                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                         }
                  }
                 else {
                         if( tp->mit_on ) {
                                 tp->mit_on = 0;
                                 iowrite32(0, tp->base_addr + CSR11);
                         }
                  }
          }

#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */

         tulip_refill_rx(dev);

         /* If RX ring is not full we are out of memory. */
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
		 goto oom;

         /* Remove us from polling list and enable RX intr. */

         netif_rx_complete(dev, napi);
         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);

         /* The last op happens after poll completion. Which means the following:
          * 1. it can race with disabling irqs in irq handler
          * 2. it can race with dise/enabling irqs in other poll threads
          * 3. if an irq raised after beginning loop, it will be immediately
          *    triggered here.
          *
          * Summarizing: the logic results in some redundant irqs both
          * due to races in masking and due to too late acking of already
          * processed irqs. But it must not result in losing events.
          */

         return work_done;

 not_done:
         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                 tulip_refill_rx(dev);

         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
		 goto oom;

         return work_done;

 oom:    /* Executed with RX ints disabled */

         /* Start timer, stop polling, but do not enable rx interrupts. */
         mod_timer(&tp->oom_timer, jiffies+1);

         /* Think: timer_pending() was an explicit signature of bug.
          * Timer can be pending now but fired and completed
          * before we did netif_rx_complete(). See? We would lose it. */

         /* remove ourselves from the polling list */
         netif_rx_complete(dev, napi);

         return work_done;
}
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
			      struct hpc3_ethregs *hregs,
			      struct sgiseeq_regs *sregs)
{
	struct sgiseeq_rx_desc *rd;
	struct sk_buff *skb = NULL;
	struct sk_buff *newskb;
	unsigned char pkt_status;
	int len = 0;
	unsigned int orig_end = PREV_RX(sp->rx_new);

	/* Service every received packet. */
	rd = &sp->rx_desc[sp->rx_new];
	dma_sync_desc_cpu(dev, rd);
	while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
		len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
		dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
				 PKT_BUF_SZ, DMA_FROM_DEVICE);
		pkt_status = rd->skb->data[len];
		if (pkt_status & SEEQ_RSTAT_FIG) {
			/* Packet is OK. */
			/* We don't want to receive our own packets */
			if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
				if (len > rx_copybreak) {
					skb = rd->skb;
					newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
					if (!newskb) {
						newskb = skb;
						skb = NULL;
						goto memory_squeeze;
					}
					skb_reserve(newskb, 2);
				} else {
					skb = netdev_alloc_skb_ip_align(dev, len);
					if (skb)
						skb_copy_to_linear_data(skb, rd->skb->data, len);

					newskb = rd->skb;
				}
memory_squeeze:
				if (skb) {
					skb_put(skb, len);
					skb->protocol = eth_type_trans(skb, dev);
					netif_rx(skb);
					dev->stats.rx_packets++;
					dev->stats.rx_bytes += len;
				} else {
					printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
						dev->name);
					dev->stats.rx_dropped++;
				}
			} else {
				/* Silently drop my own packets */
				newskb = rd->skb;
			}
		} else {
			record_rx_errors(dev, pkt_status);
			newskb = rd->skb;
		}
		rd->skb = newskb;
		rd->rdma.pbuf = dma_map_single(dev->dev.parent,
					       newskb->data - 2,
					       PKT_BUF_SZ, DMA_FROM_DEVICE);

		/* Return the entry to the ring pool. */
		rd->rdma.cntinfo = RCNTINFO_INIT;
		sp->rx_new = NEXT_RX(sp->rx_new);
		dma_sync_desc_dev(dev, rd);
		rd = &sp->rx_desc[sp->rx_new];
		dma_sync_desc_cpu(dev, rd);
	}
	dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
	sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
	dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
	dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
	sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
	dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
	rx_maybe_restart(sp, hregs, sregs);
}
Exemple #24
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single_for_cpu(tp->pdev,
							    tp->rx_buffers[entry].mapping,
							    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
						 pkt_len);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->data,
				       pkt_len);
#endif
				pci_dma_sync_single_for_device(tp->pdev,
							       tp->rx_buffers[entry].mapping,
							       pkt_len, PCI_DMA_FROMDEVICE);
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);

			netif_rx(skb);

			dev->last_rx = jiffies;
			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}
Exemple #25
0
static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
{
	in_cache_entry *entry;
	struct iphdr *iph;
	char *buff;
	__be32 ipaddr = 0;

	static struct {
		struct llc_snap_hdr hdr;
		__be32 tag;
	} tagged_llc_snap_hdr = {
		{0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}},
		0
	};

	buff = skb->data + mpc->dev->hard_header_len;
	iph = (struct iphdr *)buff;
	ipaddr = iph->daddr;

	ddprintk("(%s) ipaddr 0x%x\n",
		 mpc->dev->name, ipaddr);

	entry = mpc->in_ops->get(ipaddr, mpc);
	if (entry == NULL) {
		entry = mpc->in_ops->add_entry(ipaddr, mpc);
		if (entry != NULL)
			mpc->in_ops->put(entry);
		return 1;
	}
	/* threshold not exceeded or VCC not ready */
	if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
		ddprintk("(%s) cache_hit: returns != OPEN\n",
			 mpc->dev->name);
		mpc->in_ops->put(entry);
		return 1;
	}

	ddprintk("(%s) using shortcut\n",
		 mpc->dev->name);
	/* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
	if (iph->ttl <= 1) {
		ddprintk("(%s) IP ttl = %u, using LANE\n",
			 mpc->dev->name, iph->ttl);
		mpc->in_ops->put(entry);
		return 1;
	}
	iph->ttl--;
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	if (entry->ctrl_info.tag != 0) {
		ddprintk("(%s) adding tag 0x%x\n",
			 mpc->dev->name, entry->ctrl_info.tag);
		tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
		skb_push(skb, sizeof(tagged_llc_snap_hdr));
						/* add LLC/SNAP header   */
		skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
					sizeof(tagged_llc_snap_hdr));
	} else {
		skb_pull(skb, ETH_HLEN);	/* get rid of Eth header */
		skb_push(skb, sizeof(struct llc_snap_hdr));
						/* add LLC/SNAP header + tag  */
		skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
					sizeof(struct llc_snap_hdr));
	}

	atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
	entry->shortcut->send(entry->shortcut, skb);
	entry->packets_fwded++;
	mpc->in_ops->put(entry);

	return 0;
}
Exemple #26
0
static void tms380tr_rcv_status_irq(struct net_device *dev)
{
	struct net_local *tp = netdev_priv(dev);
	unsigned char *ReceiveDataPtr;
	struct sk_buff *skb;
	unsigned int Length, Length2;
	RPL *rpl;
	RPL *SaveHead;
	dma_addr_t dmabuf;

	

	for(;;)
	{
		rpl = tp->RplHead;
		if(rpl->Status & RX_VALID)
			break;		

		
		SaveHead = tp->RplHead;
		tp->RplHead = rpl->NextRPLPtr;

		
		Length = be16_to_cpu(rpl->FrameSize);

		
		if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
			== VALID_SINGLE_BUFFER_FRAME)
		{
			ReceiveDataPtr = rpl->MData;

			
			Length2 = be16_to_cpu(rpl->FrameSize);

			if(Length == 0 || Length != Length2)
			{
				tp->RplHead = SaveHead;
				break;	
			}
			tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
			  
			if(tms380tr_debug > 3)
				printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
					dev->name, Length, Length);
			  
			
			skb = rpl->Skb;
			if(rpl->SkbStat == SKB_UNAVAILABLE)
			{
				
				skb = dev_alloc_skb(tp->MaxPacketSize);
				if(skb == NULL)
				{
					
				}
				else
				{
					skb_put(skb, tp->MaxPacketSize);
					rpl->SkbStat 	= SKB_DATA_COPY;
					ReceiveDataPtr 	= rpl->MData;
				}
			}

			if(skb && (rpl->SkbStat == SKB_DATA_COPY
				|| rpl->SkbStat == SKB_DMA_DIRECT))
			{
				if(rpl->SkbStat == SKB_DATA_COPY)
					skb_copy_to_linear_data(skb, ReceiveDataPtr,
						       Length);

				
				rpl->Skb = NULL;
				skb_trim(skb,Length);
				skb->protocol = tr_type_trans(skb,dev);
				netif_rx(skb);
			}
		}
		else	
		{
			if(rpl->Skb != NULL)
				dev_kfree_skb_irq(rpl->Skb);

			
			if(rpl->Status & RX_START_FRAME)
				
				tp->MacStat.rx_errors++;
		}
		if (rpl->DMABuff)
			dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
		rpl->DMABuff = 0;

		
		rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
		
		if(rpl->Skb == NULL)
		{
			rpl->SkbStat = SKB_UNAVAILABLE;
			rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
			rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
		}
		else	
		{
			rpl->Skb->dev = dev;
			skb_put(rpl->Skb, tp->MaxPacketSize);

			
			dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
			{
				rpl->SkbStat = SKB_DATA_COPY;
				rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
				rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
			}
			else
			{
				
				rpl->SkbStat = SKB_DMA_DIRECT;
				rpl->FragList[0].DataAddr = htonl(dmabuf);
				rpl->MData = rpl->Skb->data;
				rpl->DMABuff = dmabuf;
			}
		}

		rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
		rpl->FrameSize = 0;

		
		tp->RplTail->FrameSize = 0;

		
		tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);

		
		tp->RplTail = tp->RplTail->NextRPLPtr;

		
		tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
	}

	return;
}
Exemple #27
0
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_desc *rx_desc,
				      struct skb_frag_struct *skb_frags,
				      struct mlx4_en_rx_alloc *page_alloc,
				      unsigned int length)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

	skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
	if (!skb) {
		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
		return NULL;
	}
	skb->dev = priv->dev;
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;
	skb->truesize = length + sizeof(struct sk_buff);

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
		 * synch buffers for the copy */
		dma = be64_to_cpu(rx_desc->data[0].addr);
		dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
					DMA_FROM_DEVICE);
		skb_copy_to_linear_data(skb, va, length);
		dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
					   DMA_FROM_DEVICE);
		skb->tail += length;
	} else {

		/* Move relevant fragments to skb */
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
						      skb_shinfo(skb)->frags,
						      page_alloc, length);
		if (unlikely(!used_frags)) {
			kfree_skb(skb);
			return NULL;
		}
		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, HEADER_COPY_SIZE);
		skb->tail += HEADER_COPY_SIZE;

		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;

		/* Adjust size of first fragment */
		skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
		skb->data_len = length - HEADER_COPY_SIZE;
	}
	return skb;
}
Exemple #28
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

restart_poll:
	do {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Exemple #29
0
static int
receive_packet (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int entry = np->cur_rx % RX_RING_SIZE;
	int cnt = 30;

	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
	while (1) {
		struct netdev_desc *desc = &np->rx_ring[entry];
		int pkt_len;
		u64 frame_status;

		if (!(desc->status & cpu_to_le64(RFDDone)) ||
		    !(desc->status & cpu_to_le64(FrameStart)) ||
		    !(desc->status & cpu_to_le64(FrameEnd)))
			break;

		/* Chip omits the CRC. */
		frame_status = le64_to_cpu(desc->status);
		pkt_len = frame_status & 0xffff;
		if (--cnt < 0)
			break;
		/* Update rx error statistics, drop packet. */
		if (frame_status & RFS_Errors) {
			np->stats.rx_errors++;
			if (frame_status & (RxRuntFrame | RxLengthError))
				np->stats.rx_length_errors++;
			if (frame_status & RxFCSError)
				np->stats.rx_crc_errors++;
			if (frame_status & RxAlignmentError && np->speed != 1000)
				np->stats.rx_frame_errors++;
			if (frame_status & RxFIFOOverrun)
	 			np->stats.rx_fifo_errors++;
		} else {
			struct sk_buff *skb;

			/* Small skbuffs for short packets */
			if (pkt_len > copy_thresh) {
				pci_unmap_single (np->pdev,
						  desc_to_dma(desc),
						  np->rx_buf_sz,
						  PCI_DMA_FROMDEVICE);
				skb_put (skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			} else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
				pci_dma_sync_single_for_cpu(np->pdev,
							    desc_to_dma(desc),
							    np->rx_buf_sz,
							    PCI_DMA_FROMDEVICE);
				/* 16 byte align the IP header */
				skb_reserve (skb, 2);
				skb_copy_to_linear_data (skb,
						  np->rx_skbuff[entry]->data,
						  pkt_len);
				skb_put (skb, pkt_len);
				pci_dma_sync_single_for_device(np->pdev,
							       desc_to_dma(desc),
							       np->rx_buf_sz,
							       PCI_DMA_FROMDEVICE);
			}
			skb->protocol = eth_type_trans (skb, dev);
#if 0
			/* Checksum done by hw, but csum value unavailable. */
			if (np->pdev->pci_rev_id >= 0x0c &&
				!(frame_status & (TCPError | UDPError | IPError))) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			}
#endif
			netif_rx (skb);
		}
		entry = (entry + 1) % RX_RING_SIZE;
	}
	spin_lock(&np->rx_lock);
	np->cur_rx = entry;
	/* Re-allocate skbuffs to fill the descriptor ring */
	entry = np->old_rx;
	while (entry != np->cur_rx) {
		struct sk_buff *skb;
		/* Dropped packets don't need to re-allocate */
		if (np->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb(dev, np->rx_buf_sz);
			if (skb == NULL) {
				np->rx_ring[entry].fraginfo = 0;
				printk (KERN_INFO
					"%s: receive_packet: "
					"Unable to re-allocate Rx skbuff.#%d\n",
					dev->name, entry);
				break;
			}
			np->rx_skbuff[entry] = skb;
			/* 16 byte align the IP header */
			skb_reserve (skb, 2);
			np->rx_ring[entry].fraginfo =
			    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
		}
		np->rx_ring[entry].fraginfo |=
		    cpu_to_le64((u64)np->rx_buf_sz << 48);
		np->rx_ring[entry].status = 0;
		entry = (entry + 1) % RX_RING_SIZE;
	}
	np->old_rx = entry;
	spin_unlock(&np->rx_lock);
	return 0;
}
Exemple #30
0
netdev_tx_t
islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb = priv->control_block;
	u32 index;
	dma_addr_t pci_map_address;
	int frame_size;
	isl38xx_fragment *fragment;
	int offset;
	struct sk_buff *newskb;
	int newskb_offset;
	unsigned long flags;
	unsigned char wds_mac[6];
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif

	/* lock the driver code */
	spin_lock_irqsave(&priv->slock, flags);

	/* check whether the destination queue has enough fragments for the frame */
	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
	if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
		printk(KERN_ERR "%s: transmit device queue full when awake\n",
		       ndev->name);
		netif_stop_queue(ndev);

		/* trigger the device */
		isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
				  ISL38XX_DEV_INT_REG);
		udelay(ISL38XX_WRITEIO_DELAY);
		goto drop_free;
	}
	/* Check alignment and WDS frame formatting. The start of the packet should
	 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
	 * and add WDS address information */
	if (likely(((long) skb->data & 0x03) | init_wds)) {
		/* get the number of bytes to add and re-align */
		offset = (4 - (long) skb->data) & 0x03;
		offset += init_wds ? 6 : 0;

		/* check whether the current skb can be used  */
		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
			unsigned char *src = skb->data;

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
			      init_wds);
#endif

			/* align the buffer on 4-byte boundary */
			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
			if (init_wds) {
				/* wds requires an additional address field of 6 bytes */
				skb_put(skb, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
				memmove(skb->data + 6, src, skb->len);
				skb_copy_to_linear_data(skb, wds_mac, 6);
			} else {
				memmove(skb->data, src, skb->len);
			}

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
			      src, skb->len);
#endif
		} else {
			newskb =
			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
			if (unlikely(newskb == NULL)) {
				printk(KERN_ERR "%s: Cannot allocate skb\n",
				       ndev->name);
				goto drop_free;
			}
			newskb_offset = (4 - (long) newskb->data) & 0x03;

			/* Check if newskb->data is aligned */
			if (newskb_offset)
				skb_reserve(newskb, newskb_offset);

			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
			if (init_wds) {
				skb_copy_from_linear_data(skb,
							  newskb->data + 6,
							  skb->len);
				skb_copy_to_linear_data(newskb, wds_mac, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
			} else
				skb_copy_from_linear_data(skb, newskb->data,
							  skb->len);

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
			      newskb->data, skb->data, skb->len, init_wds);
#endif

			newskb->dev = skb->dev;
			dev_kfree_skb_irq(skb);
			skb = newskb;
		}
	}
	/* display the buffer contents for debugging */
#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
	display_buffer((char *) skb->data, skb->len);
#endif

	/* map the skb buffer to pci memory for DMA operation */
	pci_map_address = pci_map_single(priv->pdev,
					 (void *) skb->data, skb->len,
					 PCI_DMA_TODEVICE);
	if (unlikely(pci_map_address == 0)) {
		printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
		       ndev->name);
		goto drop_free;
	}
	/* Place the fragment in the control block structure. */
	index = curr_frag % ISL38XX_CB_TX_QSIZE;
	fragment = &cb->tx_data_low[index];

	priv->pci_map_tx_address[index] = pci_map_address;
	/* store the skb address for future freeing  */
	priv->data_low_tx[index] = skb;
	/* set the proper fragment start address and size information */
	frame_size = skb->len;
	fragment->size = cpu_to_le16(frame_size);
	fragment->flags = cpu_to_le16(0);	/* set to 1 if more fragments */
	fragment->address = cpu_to_le32(pci_map_address);
	curr_frag++;

	/* The fragment address in the control block must have been
	 * written before announcing the frame buffer to device. */
	wmb();
	cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);

	if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
	    > ISL38XX_CB_TX_QSIZE) {
		/* stop sends from upper layers */
		netif_stop_queue(ndev);

		/* set the full flag for the transmission queue */
		priv->data_low_tx_full = 1;
	}

	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;

	/* trigger the device */
	islpci_trigger(priv);

	/* unlock the driver code */
	spin_unlock_irqrestore(&priv->slock, flags);

	return NETDEV_TX_OK;

      drop_free:
	ndev->stats.tx_dropped++;
	spin_unlock_irqrestore(&priv->slock, flags);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}