Exemplo n.º 1
0
static int mpodp_flush_rx_trans(struct mpodp_if_priv *priv, struct mpodp_rxq *rxq,
				struct mpodp_rx *rx, uint32_t first_slot)
{
	struct net_device *netdev = priv->netdev;
	struct dma_async_tx_descriptor *dma_txd;

	rx->dma_len =
		dma_map_sg(&priv->pdev->dev, rx->sg, rx->sg_len,
			   DMA_FROM_DEVICE);
	if (rx->dma_len == 0)
		return -1;

	/* configure channel */
	priv->rx_config.cfg.src_addr =
		    rxq->mppa_entries[first_slot].pkt_addr;
	if (dmaengine_slave_config(priv->rx_chan, &priv->rx_config.cfg)) {
		/* board has reset, wait for reset of netdev */
		netif_carrier_off(netdev);
		if (netif_msg_rx_err(priv))
			netdev_err(netdev,
				   "rxq[%d] rx[%d]: cannot configure channel\n",
				   rxq->id, first_slot);
		goto dma_failed;
	}

	/* get transfer descriptor */
	dma_txd = dmaengine_prep_slave_sg(priv->rx_chan,
					  rx->sg, rx->dma_len,
					  DMA_DEV_TO_MEM, 0);
	if (dma_txd == NULL) {
		if (netif_msg_rx_err(priv))
			netdev_err(netdev,
				   "rxq[%d] rx[%d]: cannot get dma descriptor",
				   rxq->id, first_slot);
		goto dma_failed;
	}

	if (netif_msg_rx_status(priv))
		netdev_info(netdev, "rxq[%d] rx[%d]: transfer start (%d)\n",
			   rxq->id, rxq->avail, rx->sg_len);

	/* submit and issue descriptor */
	rx->cookie = dmaengine_submit(dma_txd);

	return 0;
 dma_failed:
	dma_unmap_sg(&priv->pdev->dev, rx->sg, rx->sg_len, DMA_FROM_DEVICE);
	return -1;
}
Exemplo n.º 2
0
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
				    struct cpmac_desc *desc)
{
	struct sk_buff *skb, *result = NULL;

	if (unlikely(netif_msg_hw(priv)))
		cpmac_dump_desc(priv->dev, desc);
	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
	if (unlikely(!desc->datalen)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: spurious interrupt\n",
			       priv->dev->name);
		return NULL;
	}

	skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
	if (likely(skb)) {
		skb_reserve(skb, 2);
		skb_put(desc->skb, desc->datalen);
		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
		desc->skb->ip_summed = CHECKSUM_NONE;
		priv->dev->stats.rx_packets++;
		priv->dev->stats.rx_bytes += desc->datalen;
		result = desc->skb;
		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
		desc->skb = skb;
		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
						    CPMAC_SKB_SIZE,
						    DMA_FROM_DEVICE);
		desc->hw_data = (u32)desc->data_mapping;
		if (unlikely(netif_msg_pktdata(priv))) {
			printk(KERN_DEBUG "%s: received packet:\n",
			       priv->dev->name);
			cpmac_dump_skb(priv->dev, result);
		}
	} else {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING
			       "%s: low on skbs, dropping packet\n",
			       priv->dev->name);
		priv->dev->stats.rx_dropped++;
	}

	desc->buflen = CPMAC_SKB_SIZE;
	desc->dataflags = CPMAC_OWN;

	return result;
}
Exemplo n.º 3
0
static int mlx4_en_fill_rx_buf(struct net_device *dev,
			       struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int num = 0;
	int err;

	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
		err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
					      ring->size_mask);
		if (err) {
			if (netif_msg_rx_err(priv))
				mlx4_warn(priv->mdev,
					  "Failed preparing rx descriptor\n");
			priv->port_stats.rx_alloc_failed++;
			break;
		}
		++num;
		++ring->prod;
	}
	if ((u32) (ring->prod - ring->cons) == ring->size)
		ring->full = 1;

	return num;
}
Exemplo n.º 4
0
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
			ring = &priv->rx_ring[ring_ind];

			if (mlx4_en_prepare_rx_desc(priv, ring,
						    ring->actual_size)) {
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
					mlx4_err(mdev, "Failed to allocate "
						       "enough rx buffers\n");
					return -ENOMEM;
				} else {
					if (netif_msg_rx_err(priv))
						mlx4_warn(mdev,
							  "Only %d buffers allocated\n",
							  ring->actual_size);
					goto out;
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
out:
	return 0;
}
Exemplo n.º 5
0
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
#ifdef AG71XX_NAPI_TX
	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
#endif
	struct net_device *dev = ag->dev;
	unsigned long flags;
	u32 status;
	int done;

#ifdef AG71XX_NAPI_TX
	ar71xx_ddr_flush(pdata->flush_reg);
	ag71xx_tx_packets(ag);
#endif

	DBG("%s: processing RX ring\n", dev->name);
	done = ag71xx_rx_packets(ag, limit);

	/* TODO: add OOM handler */

	status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
	status &= AG71XX_INT_POLL;

	if ((done < limit) && (!status)) {
		DBG("%s: disable polling mode, done=%d, status=%x\n",
			dev->name, done, status);

		netif_rx_complete(dev, napi);

		/* enable interrupts */
		spin_lock_irqsave(&ag->lock, flags);
		ag71xx_int_enable(ag, AG71XX_INT_POLL);
		spin_unlock_irqrestore(&ag->lock, flags);
		return 0;
	}

	if (status & AG71XX_INT_RX_OF) {
		if (netif_msg_rx_err(ag))
			printk(KERN_ALERT "%s: rx owerflow, restarting dma\n",
				dev->name);

		/* ack interrupt */
		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
		/* restart RX */
		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
	}

	DBG("%s: stay in polling mode, done=%d, status=%x\n",
			dev->name, done, status);
	return 1;
}
Exemplo n.º 6
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb) = FOE_MAGIC_EXTIF;
	FOE_AI_UNHIT(skb);
	if(ra_sw_nat_hook_rx != NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
				devdbg (dev, "netif_rx status %d", status);
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
			devdbg (dev, "netif_rx status %d", status);
	}

#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
#endif
}
Exemplo n.º 7
0
static void nc_ensure_sync(struct usbnet *dev)
{
	dev->frame_errors++;
	if (dev->frame_errors > 5) {
		struct urb		*urb;
		struct usb_ctrlrequest	*req;
		int			status;

		/* Send a flush */
		urb = usb_alloc_urb(0, SLAB_ATOMIC);
		if (!urb)
			return;

		req = kmalloc(sizeof *req, GFP_ATOMIC);
		if (!req) {
			usb_free_urb(urb);
			return;
		}

		req->bRequestType = USB_DIR_OUT
			| USB_TYPE_VENDOR
			| USB_RECIP_DEVICE;
		req->bRequest = REQUEST_REGISTER;
		req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
				| USBCTL_FLUSH_OTHER);
		req->wIndex = cpu_to_le16(REG_USBCTL);
		req->wLength = cpu_to_le16(0);

		/* queue an async control request, we don't need
		 * to do anything when it finishes except clean up.
		 */
		usb_fill_control_urb(urb, dev->udev,
			usb_sndctrlpipe(dev->udev, 0),
			(unsigned char *) req,
			NULL, 0,
			nc_flush_complete, req);
		status = usb_submit_urb(urb, GFP_ATOMIC);
		if (status) {
			kfree(req);
			usb_free_urb(urb);
			return;
		}

		if (netif_msg_rx_err(dev))
			devdbg(dev, "flush net1080; too many framing errors");
		dev->frame_errors = 0;
	}
}
Exemplo n.º 8
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
}
Exemplo n.º 9
0
static inline void rx_process(struct usbnet *dev, struct sk_buff *skb)
{
	if (dev->driver_info->rx_fixup
			&& !dev->driver_info->rx_fixup(dev, skb))
		goto error;
	/* else network stack removes extra byte if we forced a short packet */

	if (skb->len)
		axusbnet_skb_return(dev, skb);
	else {
		if (netif_msg_rx_err(dev))
			devdbg(dev, "drop");
error:
		dev->stats.rx_errors++;
		skb_queue_tail(&dev->done, skb);
	}
}
Exemplo n.º 10
0
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
			    u32 status, u32 len)
{
	if (netif_msg_rx_err (cp))
		printk (KERN_DEBUG
			"%s: rx err, slot %d status 0x%x len %d\n",
			cp->dev->name, rx_tail, status, len);
	cp->net_stats.rx_errors++;
	if (status & RxErrFrame)
		cp->net_stats.rx_frame_errors++;
	if (status & RxErrCRC)
		cp->net_stats.rx_crc_errors++;
	if ((status & RxErrRunt) || (status & RxErrLong))
		cp->net_stats.rx_length_errors++;
	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
		cp->net_stats.rx_length_errors++;
	if (status & RxErrFIFO)
		cp->net_stats.rx_fifo_errors++;
}
Exemplo n.º 11
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
}
Exemplo n.º 12
0
static void cpmac_clear_rx(struct net_device *dev)
{
	struct cpmac_priv *priv = netdev_priv(dev);
	struct cpmac_desc *desc;
	int i;
	if (unlikely(!priv->rx_head))
		return;
	desc = priv->rx_head;
	for (i = 0; i < priv->ring_size; i++) {
		if ((desc->dataflags & CPMAC_OWN) == 0) {
			if (netif_msg_rx_err(priv) && net_ratelimit())
				printk(KERN_WARNING "%s: packet dropped\n",
				       dev->name);
			if (unlikely(netif_msg_hw(priv)))
				cpmac_dump_desc(dev, desc);
			desc->dataflags = CPMAC_OWN;
			dev->stats.rx_dropped++;
		}
		desc->hw_next = desc->next->mapping;
		desc = desc->next;
	}
	priv->rx_head->prev->hw_next = 0;
}
Exemplo n.º 13
0
static void kevent(void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent(struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit(EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->txq);
		status = usb_clear_halt(dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err(dev))
				deverr(dev, "can't clear tx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue(dev->net);
		}
	}
	if (test_bit(EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->rxq);
		status = usb_clear_halt(dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err(dev))
				deverr(dev, "can't clear rx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_RX_HALT, &dev->flags);
			tasklet_schedule(&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running(dev->net))
			urb = usb_alloc_urb(0, GFP_KERNEL);
		else
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit(dev, urb, GFP_KERNEL);
			tasklet_schedule(&dev->bh);
		}
	}

	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit(EVENT_LINK_RESET, &dev->flags);

		if (info->link_reset) {
			retval = info->link_reset(dev);
			if (retval < 0) {
				devinfo(dev,
					"link reset failed (%d) usbnet usb-%s-%s, %s",
					retval,
					dev->udev->bus->bus_name,
					dev->udev->devpath,
					info->description);
			}
		}
	}

	if (dev->flags)
		devdbg(dev, "kevent done, flags = 0x%lx", dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
static void tx_complete(struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete(struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent(dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		/* async unlink */
		case -ESHUTDOWN:		/* hardware gone */
			break;

		/* like rx, tx gets controller i/o faults during khubd delays */
		/* and so it uses the same throttling mechanism. */
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending(&dev->delay)) {
				mod_timer(&dev->delay,
					  jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link(dev))
					devdbg(dev, "tx throttle %d",
					       urb->status);
			}
			netif_stop_queue(dev->net);
			break;
		default:
			if (netif_msg_tx_err(dev))
				devdbg(dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout(struct net_device *net)
{
	struct usbnet *dev = netdev_priv(net);

	unlink_urbs(dev, &dev->txq);
	tasklet_schedule(&dev->bh);

	/* FIXME: device recovery -- reset? */
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	/* some devices want funky USB-level framing, for */
	/* win32 driver (usually) and/or hardware quirks */
	if (info->tx_fixup) {
		skb = info->tx_fixup(dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev))
				devdbg(dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data,
			  skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave(&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue(net);
		axusbnet_defer_kevent(dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err(dev))
			devdbg(dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail(&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN(dev))
			netif_stop_queue(net);
	}
	spin_unlock_irqrestore(&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any(skb);
		usb_free_urb(urb);
	} else if (netif_msg_tx_queued(dev)) {
		devdbg(dev, "> tx, len %d, type 0x%x",
		       length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

/* tasklet (work deferred from completions, in_irq) or timer */

static void axusbnet_bh(unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue(&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process(dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb(entry->urb);
			dev_kfree_skb(skb);
			continue;
		default:
			devdbg(dev, "bogus skb state %d", entry->state);
		}
	}

	/* waiting for all pending urbs to complete? */
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0)
			wake_up(dev->wait);

	/* or are we maybe short a few urbs? */
	} else if (netif_running(dev->net)
			&& netif_device_present(dev->net)
			&& !timer_pending(&dev->delay)
			&& !test_bit(EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN(dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			/* don't refill the queue all at once */
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb(0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit(dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link(dev))
				devdbg(dev, "rxqlen %d --> %d",
				       temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule(&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN(dev))
			netif_wake_queue(dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static
void axusbnet_disconnect(struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev(intf);

	if (netif_msg_probe(dev))
		devinfo(dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev(net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind(dev, intf);

	free_netdev(net);
	usb_put_dev(xdev);
}

/*-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static int
axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk(KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev(udev);
	interface = udev->cur_altsetting;

	usb_get_dev(xdev);

	status = -ENOMEM;

	/* set up our own records */
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		printk(KERN_ERR "can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV |
					 NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init(&dev->rxq);
	skb_queue_head_init(&dev->txq);
	skb_queue_head_init(&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
	INIT_WORK(&dev->kevent, kevent, dev);
#else
	INIT_WORK(&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer(&dev->delay);
	/* mutex_init(&dev->phy_mutex); */

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
	/* dma_supported() is deeply broken on almost all architectures */
	/* possible with some EHCI controllers */
	if (dma_supported(&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	/* allow device-specific bind/init procedures */
	/* NOTE net->name still not usable ... */
	status = info->bind(dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status(dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev(net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe(dev))
		devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	/* ok, it's ready to go. */
	usb_set_intfdata(udev, dev);

	/* start as if the link is up */
	netif_device_attach(net);

	return 0;

out3:
	if (info->unbind)
		info->unbind(dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend(struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet *dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach(dev->net);
		(void) unlink_urbs(dev, &dev->rxq);
		(void) unlink_urbs(dev, &dev->txq);
		usb_kill_urb(dev->interrupt);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach(dev->net);
	}
	return 0;
}

static int
axusbnet_resume(struct usb_interface *intf)
{
	struct usbnet	*dev = usb_get_intfdata(intf);
	int	retval = 0;

	if (!--dev->suspend_count)
		tasklet_schedule(&dev->bh);

	retval = init_status(dev, intf);
	if (retval < 0)
		return retval;

	if (dev->interrupt) {
		retval = usb_submit_urb(dev->interrupt, GFP_KERNEL);
		if (retval < 0 && netif_msg_ifup(dev))
			deverr(dev, "intr submit %d", retval);
	}

	return retval;
}
Exemplo n.º 14
0
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
	struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
	struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
	struct net_device *dev = ag->dev;
	struct ag71xx_ring *rx_ring;
	unsigned long flags;
	u32 status;
	int done;

	pdata->ddr_flush();
	ag71xx_tx_packets(ag);

	DBG("%s: processing RX ring\n", dev->name);
	done = ag71xx_rx_packets(ag, limit);

	rx_ring = &ag->rx_ring;
	if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL)
		goto oom;

	status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
	if (unlikely(status & RX_STATUS_OF)) {
		ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
		dev->stats.rx_fifo_errors++;

		/* restart RX */
		ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
	}

	if (done < limit) {
		if (status & RX_STATUS_PR)
			goto more;

		status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
		if (status & TX_STATUS_PS)
			goto more;

		DBG("%s: disable polling mode, done=%d, limit=%d\n",
			dev->name, done, limit);

		netif_rx_complete(dev, napi);

		/* enable interrupts */
		spin_lock_irqsave(&ag->lock, flags);
		ag71xx_int_enable(ag, AG71XX_INT_POLL);
		spin_unlock_irqrestore(&ag->lock, flags);
		return done;
	}

 more:
	DBG("%s: stay in polling mode, done=%d, limit=%d\n",
			dev->name, done, limit);
	return done;

 oom:
	if (netif_msg_rx_err(ag))
		printk(KERN_DEBUG "%s: out of memory\n", dev->name);

	mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
	netif_rx_complete(dev, napi);
	return 0;
}
Exemplo n.º 15
0
/* work that cannot be done in interrupt context uses keventd.
 *
 * NOTE:  with 2.5 we could do more of this using completion callbacks,
 * especially now that control transfers can be queued.
 */
static void
kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->txq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_pipe;
		status = usb_clear_halt (dev->udev, dev->out);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
fail_pipe:
				netdev_err(dev->net, "can't clear tx halt, status %d\n",
					   status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {

		//HTC+++
		//lock cpu perf
		usbnet_lock_perf();

		//queue usbnet_unlock_perf_delayed_work
		usbnet_rx_len = 0;
		schedule_delayed_work(&usbnet_unlock_perf_delayed_work, msecs_to_jiffies(PM_QOS_USBNET_PERF_UNLOCK_TIMER));

		pr_info("%s(%d) [USBNET] EVENT_RX_HALT unlink_urbs !!!\n", __func__, __LINE__);
		pr_info("%s(%d) [USBNET] dev->rxq.qlen:%d\n", __func__, __LINE__, dev->rxq.qlen);
		//HTC---
		unlink_urbs (dev, &dev->rxq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_halt;
		status = usb_clear_halt (dev->udev, dev->in);

		//HTC+++
		pr_info("%s(%d) [USBNET] EVENT_RX_HALT usb_clear_halt:%d !!!\n", __func__, __LINE__, status);
		//HTC---
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
fail_halt:
				netdev_err(dev->net, "can't clear rx halt, status %d\n",
					   status);
		} else {

			//HTC+++
			pr_info("%s(%d) [USBNET] clear_bit EVENT_RX_HALT !!!\n", __func__, __LINE__);
			//HTC---
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;
		int resched = 1;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
			status = usb_autopm_get_interface(dev->intf);
			if (status < 0) {
				usb_free_urb(urb);
				goto fail_lowmem;
			}
			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
				resched = 0;
			usb_autopm_put_interface(dev->intf);
fail_lowmem:
			if (resched)
				tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto skip_reset;
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			usb_autopm_put_interface(dev->intf);
skip_reset:
			netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
				    retval,
				    dev->udev->bus->bus_name,
				    dev->udev->devpath,
				    info->description);
		} else {
			usb_autopm_put_interface(dev->intf);
		}
	}

	if (dev->flags)
		netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
}
Exemplo n.º 16
0
static void rx_complete(struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;

	skb_put(skb, urb->actual_length);
	entry->state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	/* success */
	case 0:
		if (skb->len < dev->net->hard_header_len) {
			entry->state = rx_cleanup;
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			if (netif_msg_rx_err(dev))
				devdbg(dev, "rx length %d", skb->len);
		}
		break;

	/* stalls need manual reset. this is rare ... except that
	 * when going through USB 2.0 TTs, unplug appears this way.
	 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
	 * storm, recovering as needed.
	 */
	case -EPIPE:
		dev->stats.rx_errors++;
		axusbnet_defer_kevent(dev, EVENT_RX_HALT);
		/* FALLTHROUGH */

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* async unlink */
	case -ESHUTDOWN:		/* hardware gone */
		if (netif_msg_ifdown(dev))
			devdbg(dev, "rx shutdown, code %d", urb_status);
		goto block;

	/* we get controller i/o faults during khubd disconnect() delays.
	 * throttle down resubmits, to avoid log floods; just temporarily,
	 * so we still recover when the fault isn't a khubd delay.
	 */
	case -EPROTO:
	case -ETIME:
	case -EILSEQ:
		dev->stats.rx_errors++;
		if (!timer_pending(&dev->delay)) {
			mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES);
			if (netif_msg_link(dev))
				devdbg(dev, "rx throttle %d", urb_status);
		}
block:
		entry->state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	/* data overrun ... flush fifo? */
	case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		entry->state = rx_cleanup;
		dev->stats.rx_errors++;
		if (netif_msg_rx_err(dev))
			devdbg(dev, "rx status %d", urb_status);
		break;
	}

	defer_bh(dev, skb, &dev->rxq);

	if (urb) {
		if (netif_running(dev->net) &&
		    !test_bit(EVENT_RX_HALT, &dev->flags)) {
			rx_submit(dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb(urb);
	}
	if (netif_msg_rx_err(dev))
		devdbg(dev, "no read resubmitted");
}
Exemplo n.º 17
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
	skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
Exemplo n.º 18
0
static void rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;
	struct driver_info	*info = dev->driver_info;
	u8			align;

#if (AX_FORCE_BUFF_ALIGN)
	align = 0;
#else
	if (!(info->flags & FLAG_HW_IP_ALIGNMENT))
		align = NET_IP_ALIGN;
	else
		align = 0;
#endif
	skb = alloc_skb(size + align, flags);
	if (skb == NULL) {

		if (netif_msg_rx_err(dev))
			devdbg(dev, "no rx skb");

		if ((dev->rx_urb_size > 2048) && dev->rx_size) {
			dev->rx_size--;
			dev->rx_urb_size =
				AX88772B_BULKIN_SIZE[dev->rx_size].size;

			ax8817x_write_cmd_async(dev, 0x2A,
				AX88772B_BULKIN_SIZE[dev->rx_size].byte_cnt,
				AX88772B_BULKIN_SIZE[dev->rx_size].threshold,
				0, NULL);
		}

		if (!(dev->flags & EVENT_RX_MEMORY))
			axusbnet_defer_kevent(dev, EVENT_RX_MEMORY);
		usb_free_urb(urb);
		return;
	}

	if (align)
		skb_reserve(skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb(urb, dev->udev, dev->in, skb->data,
			  size, rx_complete, skb);

	spin_lock_irqsave(&dev->rxq.lock, lockflags);

	if (netif_running(dev->net)
			&& netif_device_present(dev->net)
			&& !test_bit(EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) {
		case -EPIPE:
			axusbnet_defer_kevent(dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			axusbnet_defer_kevent(dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown(dev))
				devdbg(dev, "device gone");
			netif_device_detach(dev->net);
			break;
		default:
			if (netif_msg_rx_err(dev))
				devdbg(dev, "rx submit, %d", retval);
			tasklet_schedule(&dev->bh);
			break;
		case 0:
			__skb_queue_tail(&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown(dev))
			devdbg(dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any(skb);
		usb_free_urb(urb);
	}
}
Exemplo n.º 19
0
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u32 felic_stat;
	u32 link_stat;
	u32 mask;

	if (intr_status & EESR_ECI) {
		felic_stat = sh_eth_read(ndev, ECSR);
		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
		if (felic_stat & ECSR_ICD)
			mdp->stats.tx_carrier_errors++;
		if (felic_stat & ECSR_LCHNG) {
			/* Link Changed */
			if (mdp->cd->no_psr || mdp->no_ether_link) {
				if (mdp->link == PHY_DOWN)
					link_stat = 0;
				else
					link_stat = PHY_ST_LINK;
			} else {
				link_stat = (sh_eth_read(ndev, PSR));
				if (mdp->ether_link_active_low)
					link_stat = ~link_stat;
			}
			if (!(link_stat & PHY_ST_LINK))
				sh_eth_rcv_snd_disable(ndev);
			else {
				/* Link Up */
				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
					  ~DMAC_M_ECI, EESIPR);
				/*clear int */
				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
					  ECSR);
				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
					  DMAC_M_ECI, EESIPR);
				/* enable tx and rx */
				sh_eth_rcv_snd_enable(ndev);
			}
		}
	}

	if (intr_status & EESR_TWB) {
		/* Write buck end. unused write back interrupt */
		if (intr_status & EESR_TABT)	/* Transmit Abort int */
			mdp->stats.tx_aborted_errors++;
			if (netif_msg_tx_err(mdp))
				dev_err(&ndev->dev, "Transmit Abort\n");
	}

	if (intr_status & EESR_RABT) {
		/* Receive Abort int */
		if (intr_status & EESR_RFRMER) {
			/* Receive Frame Overflow int */
			mdp->stats.rx_frame_errors++;
			if (netif_msg_rx_err(mdp))
				dev_err(&ndev->dev, "Receive Abort\n");
		}
	}

	if (intr_status & EESR_TDE) {
		/* Transmit Descriptor Empty int */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
	}

	if (intr_status & EESR_TFE) {
		/* FIFO under flow */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
	}

	if (intr_status & EESR_RDE) {
		/* Receive Descriptor Empty int */
		mdp->stats.rx_over_errors++;

		if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
			sh_eth_write(ndev, EDRRR_R, EDRRR);
		if (netif_msg_rx_err(mdp))
			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
	}

	if (intr_status & EESR_RFE) {
		/* Receive FIFO Overflow int */
		mdp->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(mdp))
			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
	}

	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
		/* Address Error */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Address Error\n");
	}

	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
	if (mdp->cd->no_ade)
		mask &= ~EESR_ADE;
	if (intr_status & mask) {
		/* Tx error */
		u32 edtrr = sh_eth_read(ndev, EDTRR);
		/* dmesg */
		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
				intr_status, mdp->cur_tx);
		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
				mdp->dirty_tx, (u32) ndev->state, edtrr);
		/* dirty buffer free */
		sh_eth_txfree(ndev);

		/* SH7712 BUG */
		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
			/* tx dma start */
			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
		}
		/* wakeup */
		netif_wake_queue(ndev);
	}
}
Exemplo n.º 20
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) {
#else
	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
#endif
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN);
#else
	skb_reserve (skb, NET_IP_ALIGN);
#endif
	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}


/*-------------------------------------------------------------------------*/

static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
	if (dev->driver_info->rx_fixup
			&& !dev->driver_info->rx_fixup (dev, skb))
		goto error;
	// else network stack removes extra byte if we forced a short packet

	if (skb->len)
		usbnet_skb_return (dev, skb);
	else {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "drop");
error:
		dev->stats.rx_errors++;
		skb_queue_tail (&dev->done, skb);
	}
}

/*-------------------------------------------------------------------------*/

static void rx_complete (struct urb *urb)
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;
	enum skb_state		state;

	skb_put (skb, urb->actual_length);
	state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	    // success
	    case 0:
		if (skb->len < dev->net->hard_header_len) {
			state = rx_cleanup;
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx length %d", skb->len);
		}
		break;

	    // stalls need manual reset. this is rare ... except that
	    // when going through USB 2.0 TTs, unplug appears this way.
	    // we avoid the highspeed version of the ETIMEOUT/EILSEQ
	    // storm, recovering as needed.
	    case -EPIPE:
		dev->stats.rx_errors++;
		usbnet_defer_kevent (dev, EVENT_RX_HALT);
		// FALLTHROUGH

	    // software-driven interface shutdown
	    case -ECONNRESET:		// async unlink
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx shutdown, code %d", urb_status);
		goto block;

	    // we get controller i/o faults during khubd disconnect() delays.
	    // throttle down resubmits, to avoid log floods; just temporarily,
	    // so we still recover when the fault isn't a khubd delay.
	    case -EPROTO:
	    case -ETIME:
	    case -EILSEQ:
		dev->stats.rx_errors++;
		if (!timer_pending (&dev->delay)) {
			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
			if (netif_msg_link (dev))
				devdbg (dev, "rx throttle %d", urb_status);
		}
block:
		state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	    // data overrun ... flush fifo?
	    case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		// FALLTHROUGH

	    default:
		state = rx_cleanup;
		dev->stats.rx_errors++;
		if (netif_msg_rx_err (dev))
			devdbg (dev, "rx status %d", urb_status);
		break;
	}

	state = defer_bh(dev, skb, &dev->rxq, state);

	if (urb) {
		if (netif_running (dev->net)
		    && !test_bit (EVENT_RX_HALT, &dev->flags) &&
		    state != unlink_start) {
			rx_submit (dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb (urb);
	}
	if (netif_msg_rx_err (dev))
		devdbg (dev, "no read resubmitted");
}

static void intr_complete (struct urb *urb)
{
	struct usbnet	*dev = urb->context;
	int		status = urb->status;

	switch (status) {
	    /* success */
	    case 0:
		dev->driver_info->status(dev, urb);
		break;

	    /* software-driven interface shutdown */
	    case -ENOENT:		// urb killed
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "intr shutdown, code %d", status);
		return;

	    /* NOTE:  not throttling like RX/TX, since this endpoint
	     * already polls infrequently
	     */
	    default:
		devdbg (dev, "intr status %d", status);
		break;
	}

	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
	status = usb_submit_urb (urb, GFP_ATOMIC);
	if (status != 0 && netif_msg_timer (dev))
		deverr(dev, "intr resubmit --> %d", status);
}

/*-------------------------------------------------------------------------*/

// unlink pending rx/tx; completion handlers do all other cleanup

static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
	unsigned long		flags;
	struct sk_buff		*skb;
	int			count = 0;

	spin_lock_irqsave (&q->lock, flags);
	while (!skb_queue_empty(q)) {
		struct skb_data		*entry;
		struct urb		*urb;
		int			retval;

		skb_queue_walk(q, skb) {
		entry = (struct skb_data *) skb->cb;
			if (entry->state != unlink_start)
				goto found;
		}
		break;
found:
		entry->state = unlink_start;
		urb = entry->urb;

		/*
		 * Get reference count of the URB to avoid it to be
		 * freed during usb_unlink_urb, which may trigger
		 * use-after-free problem inside usb_unlink_urb since
		 * usb_unlink_urb is always racing with .complete
		 * handler(include defer_bh).
		 */
		usb_get_urb(urb);
		spin_unlock_irqrestore(&q->lock, flags);
		// during some PM-driven resume scenarios,
		// these (async) unlinks complete immediately
		retval = usb_unlink_urb (urb);
		if (retval != -EINPROGRESS && retval != 0)
			devdbg (dev, "unlink urb err, %d", retval);
		else
			count++;
		usb_put_urb(urb);
		spin_lock_irqsave(&q->lock, flags);
	}
Exemplo n.º 21
0
/* Received a packet and pass to upper layer
 */
static void emac_rx(struct net_device *dev)
{
	struct emac_board_info *db = netdev_priv(dev);
	struct sk_buff *skb;
	u8 *rdptr;
	bool good_packet;
	static int rxlen_last;
	unsigned int reg_val;
	u32 rxhdr, rxstatus, rxcount, rxlen;

	/* Check packet ready or not */
	while (1) {
		/* race warning: the first packet might arrive with
		 * the interrupts disabled, but the second will fix
		 * it
		 */
		rxcount = readl(db->membase + EMAC_RX_FBC_REG);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "RXCount: %x\n", rxcount);

		if ((db->skb_last != NULL) && (rxlen_last > 0)) {
			dev->stats.rx_bytes += rxlen_last;

			/* Pass to upper layer */
			db->skb_last->protocol = eth_type_trans(db->skb_last,
								dev);
			netif_rx(db->skb_last);
			dev->stats.rx_packets++;
			db->skb_last = NULL;
			rxlen_last = 0;

			reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			reg_val &= ~EMAC_RX_CTL_DMA_EN;
			writel(reg_val, db->membase + EMAC_RX_CTL_REG);
		}

		if (!rxcount) {
			db->emacrx_completed_flag = 1;
			reg_val = readl(db->membase + EMAC_INT_CTL_REG);
			reg_val |= (0xf << 0) | (0x01 << 8);
			writel(reg_val, db->membase + EMAC_INT_CTL_REG);

			/* had one stuck? */
			rxcount = readl(db->membase + EMAC_RX_FBC_REG);
			if (!rxcount)
				return;
		}

		reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG);
		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "receive header: %x\n", reg_val);
		if (reg_val != EMAC_UNDOCUMENTED_MAGIC) {
			/* disable RX */
			reg_val = readl(db->membase + EMAC_CTL_REG);
			writel(reg_val & ~EMAC_CTL_RX_EN,
			       db->membase + EMAC_CTL_REG);

			/* Flush RX FIFO */
			reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			writel(reg_val | (1 << 3),
			       db->membase + EMAC_RX_CTL_REG);

			do {
				reg_val = readl(db->membase + EMAC_RX_CTL_REG);
			} while (reg_val & (1 << 3));

			/* enable RX */
			reg_val = readl(db->membase + EMAC_CTL_REG);
			writel(reg_val | EMAC_CTL_RX_EN,
			       db->membase + EMAC_CTL_REG);
			reg_val = readl(db->membase + EMAC_INT_CTL_REG);
			reg_val |= (0xf << 0) | (0x01 << 8);
			writel(reg_val, db->membase + EMAC_INT_CTL_REG);

			db->emacrx_completed_flag = 1;

			return;
		}

		/* A packet ready now  & Get status/length */
		good_packet = true;

		rxhdr = readl(db->membase + EMAC_RX_IO_DATA_REG);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr)));

		rxlen = EMAC_RX_IO_DATA_LEN(rxhdr);
		rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr);

		if (netif_msg_rx_status(db))
			dev_dbg(db->dev, "RX: status %02x, length %04x\n",
				rxstatus, rxlen);

		/* Packet Status check */
		if (rxlen < 0x40) {
			good_packet = false;
			if (netif_msg_rx_err(db))
				dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
		}

		if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) {
			good_packet = false;

			if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) {
				if (netif_msg_rx_err(db))
					dev_dbg(db->dev, "crc error\n");
				dev->stats.rx_crc_errors++;
			}

			if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) {
				if (netif_msg_rx_err(db))
					dev_dbg(db->dev, "length error\n");
				dev->stats.rx_length_errors++;
			}
		}

		/* Move data from EMAC */
		if (good_packet) {
			skb = netdev_alloc_skb(dev, rxlen + 4);
			if (!skb)
				continue;
			skb_reserve(skb, 2);
			rdptr = skb_put(skb, rxlen - 4);

			/* Read received packet from RX SRAM */
			if (netif_msg_rx_status(db))
				dev_dbg(db->dev, "RxLen %x\n", rxlen);

			emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
					rdptr, rxlen);
			dev->stats.rx_bytes += rxlen;

			/* Pass to upper layer */
			skb->protocol = eth_type_trans(skb, dev);
			netif_rx(skb);
			dev->stats.rx_packets++;
		}
	}
}
Exemplo n.º 22
0
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		netif_rx_complete(priv->dev, napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		netif_rx_complete(priv->dev, napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	netif_rx_complete(priv->dev, napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
Exemplo n.º 23
0
static void cyrf6936_rx_tx(struct cyrf6936_net *p)
{
	u8 val, *data;
	size_t rx_len;
	struct sk_buff *skb;

	/* update signal level */
	cyrf6936_iw_rssi(p);
	/* rx */
	val = cyrf6936_rreg(p, RX_IRQ_STATUS);
	if (val & RXE_IRQ)
		goto err_rxe;
	if (val & RXC_IRQ) {
		/* debouncing, 2nd read */
		val = cyrf6936_rreg(p, RX_IRQ_STATUS);
		if (val & RXE_IRQ)
			goto err_rxe;
		/* get data length*/
		rx_len = cyrf6936_rreg(p, RX_LENGTH);
		/* allocate buffer */
		skb = dev_alloc_skb(rx_len + NET_IP_ALIGN);
		if (!skb)
			goto err_oom;
		skb_reserve(skb, NET_IP_ALIGN);
		data = skb_put(skb, rx_len);
		/* read data */
		cyrf6936_wreg(p, RX_IRQ_STATUS, RXOW_IRQ);
		while (rx_len--)
			*data++ = cyrf6936_rreg(p, RX_BUFFER);
		/* dump received packet data */
		if (netif_msg_pktdata(p))
			print_hex_dump_bytes("cyrf6936 rx data: ",
					DUMP_PREFIX_NONE, skb->data, skb->len);
		skb->dev = p->netdev;
		skb->protocol = htons(ETH_P_ALL);
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		p->stats.rx_packets++;
		p->stats.rx_bytes += rx_len;
		netif_rx_ni(skb);
		cyrf6936_rx_enable(p);
	}
	/* tx */
	val = cyrf6936_rreg(p, TX_IRQ_STATUS);
	if (val & TXE_IRQ)
		goto err_txe;
	if (val & TXC_IRQ) {
		/* debouncing, 2nd read */
		val = cyrf6936_rreg(p, TX_IRQ_STATUS);
		if (val & TXE_IRQ)
			goto err_txe;
		/* tx ok*/
		p->stats.tx_packets++;
		p->stats.tx_bytes += p->tx_skb->len;
		if (netif_msg_tx_done(p))
			dev_dbg(&p->netdev->dev, "tx done\n");
		dev_kfree_skb(p->tx_skb);
		p->tx_skb = NULL;
		netif_wake_queue(p->netdev);
		cyrf6936_rx_enable(p);
	}

	if (!p->pollmode)
		enable_irq(p->netdev->irq);
	return;
err_rxe:
	if (netif_msg_rx_err(p))
		dev_info(&p->netdev->dev, "rx error\n");
	p->stats.rx_errors++;
	cyrf6936_rx_enable(p);
	return;
err_txe:
	if (netif_msg_tx_err(p))
		dev_info(&p->netdev->dev, "tx error\n");
	p->stats.tx_errors++;
	cyrf6936_rx_enable(p);
	return;
err_oom:
	dev_err(&p->netdev->dev, "out of memory, packet dropped\n");
	p->stats.rx_dropped++;
	cyrf6936_rx_enable(p);
}
Exemplo n.º 24
0
/* work that cannot be done in interrupt context uses keventd.
 *
 * NOTE:  with 2.5 we could do more of this using completion callbacks,
 * especially now that control transfers can be queued.
 */
static void
kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->txq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_pipe;
		status = usb_clear_halt (dev->udev, dev->out);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
fail_pipe:
				netdev_err(dev->net, "can't clear tx halt, status %d\n",
					   status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
		unlink_urbs (dev, &dev->rxq);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto fail_halt;
		status = usb_clear_halt (dev->udev, dev->in);
		usb_autopm_put_interface(dev->intf);
		if (status < 0 &&
		    status != -EPIPE &&
		    status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
fail_halt:
				netdev_err(dev->net, "can't clear rx halt, status %d\n",
					   status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
			status = usb_autopm_get_interface(dev->intf);
			if (status < 0)
				goto fail_lowmem;
			rx_submit (dev, urb, GFP_KERNEL);
			usb_autopm_put_interface(dev->intf);
fail_lowmem:
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		status = usb_autopm_get_interface(dev->intf);
		if (status < 0)
			goto skip_reset;
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			usb_autopm_put_interface(dev->intf);
skip_reset:
			netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
				    retval,
				    dev->udev->bus->bus_name,
				    dev->udev->devpath,
				    info->description);
		} else {
			usb_autopm_put_interface(dev->intf);
		}
	}

	if (dev->flags)
		netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
}
static void read_bulk_callback(struct urb *urb)
{
	struct sk_buff *skb = NULL;
	BOOLEAN bHeaderSupressionEnabled = FALSE;
	int QueueIndex = NO_OF_QUEUES + 1;
	UINT uiIndex=0;
	int process_done = 1;
	
	PUSB_RCB pRcb = (PUSB_RCB)urb->context;
	PS_INTERFACE_ADAPTER psIntfAdapter = pRcb->psIntfAdapter;
	PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
	PLEADER pLeader = urb->transfer_buffer;

	if (unlikely(netif_msg_rx_status(Adapter)))
		pr_info(PFX "%s: rx urb status %d length %d\n",
			Adapter->dev->name, urb->status, urb->actual_length);

	if((Adapter->device_removed == TRUE)  ||
		(TRUE == Adapter->bEndPointHalted) ||
		(0 == urb->actual_length)
		)
	{
	 	pRcb->bUsed = FALSE;
 		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}

	if(urb->status != STATUS_SUCCESS)
	{
		if(urb->status == -EPIPE)
		{
			Adapter->bEndPointHalted = TRUE ;
			wake_up(&Adapter->tx_packet_wait_queue);
		}
		else
		{
			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"Rx URB has got cancelled. status :%d", urb->status);
		}
		pRcb->bUsed = FALSE;
 		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		urb->status = STATUS_SUCCESS ;
		return ;
	}

	if(Adapter->bDoSuspend && (Adapter->bPreparingForLowPowerMode))
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"device is going in low power mode while PMU option selected..hence rx packet should not be process");
		return ;
	}

	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Read back done len %d\n", pLeader->PLength);
	if(!pLeader->PLength)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Length 0");
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
	if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
	{
		if (netif_msg_rx_err(Adapter))
			pr_info(PFX "%s: corrupted leader length...%d\n",
				Adapter->dev->name, pLeader->PLength);
		++Adapter->dev->stats.rx_dropped;
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}

	QueueIndex = SearchVcid( Adapter,pLeader->Vcid);
	if(QueueIndex < NO_OF_QUEUES)
	{
		bHeaderSupressionEnabled =
			Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
		bHeaderSupressionEnabled =
			bHeaderSupressionEnabled & Adapter->bPHSEnabled;
	}

	skb = dev_alloc_skb (pLeader->PLength + SKB_RESERVE_PHS_BYTES + SKB_RESERVE_ETHERNET_HEADER);
	if(!skb)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "NO SKBUFF!!! Dropping the Packet");
		atomic_dec(&psIntfAdapter->uNumRcbUsed);
		return;
	}
    
	if((ntohs(pLeader->Vcid) == VCID_CONTROL_PACKET) ||
	    (!(pLeader->Status >= 0x20  &&  pLeader->Status <= 0x3F)))
	{
	    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_CTRL, DBG_LVL_ALL, "Received control pkt...");
		*(PUSHORT)skb->data = pLeader->Status;
       	memcpy(skb->data+sizeof(USHORT), urb->transfer_buffer +
			(sizeof(LEADER)), pLeader->PLength);
		skb->len = pLeader->PLength + sizeof(USHORT);

		spin_lock(&Adapter->control_queue_lock);
		ENQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail,skb);
		spin_unlock(&Adapter->control_queue_lock);

		atomic_inc(&Adapter->cntrlpktCnt);
		wake_up(&Adapter->process_rx_cntrlpkt);
	}
	else
	{
        BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt...");
		skb_reserve(skb, 2 + SKB_RESERVE_PHS_BYTES);
		memcpy(skb->data+ETH_HLEN, (PUCHAR)urb->transfer_buffer + sizeof(LEADER), pLeader->PLength);
		skb->dev = Adapter->dev;

		
		skb_put (skb, pLeader->PLength + ETH_HLEN);
		Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
		Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
        BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Received Data pkt of len :0x%X", pLeader->PLength);

		if(netif_running(Adapter->dev))
		{
			
			skb_pull(skb, ETH_HLEN);
			PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len,
					NULL,bHeaderSupressionEnabled);

			if(!Adapter->PackInfo[QueueIndex].bEthCSSupport)
			{
				skb_push(skb, ETH_HLEN);

				memcpy(skb->data, skb->dev->dev_addr, 6);
				memcpy(skb->data+6, skb->dev->dev_addr, 6);
				(*(skb->data+11))++;
				*(skb->data+12) = 0x08;
				*(skb->data+13) = 0x00;
				pLeader->PLength+=ETH_HLEN;
			}

			skb->protocol = eth_type_trans(skb, Adapter->dev);
			process_done = netif_rx(skb);
		}
		else
		{
		    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
			dev_kfree_skb(skb);
		}

		++Adapter->dev->stats.rx_packets;
		Adapter->dev->stats.rx_bytes += pLeader->PLength;

		for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
		{
			if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
				&& (pLeader->PLength > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
				Adapter->aRxPktSizeHist[uiIndex]++;
		}
	}
 	Adapter->PrevNumRecvDescs++;
	pRcb->bUsed = FALSE;
	atomic_dec(&psIntfAdapter->uNumRcbUsed);
}