static void data_bridge_read_cb(struct urb *urb)
{
	struct bridge		*brdg;
	struct sk_buff		*skb = urb->context;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = info->dev;
	bool			queue = 0;

	brdg = dev->brdg;
	skb_put(skb, urb->actual_length);

	switch (urb->status) {
	case -ENOENT: /* suspended */
	case 0: /* success */
		queue = 1;
		info->rx_done = get_timestamp();
		spin_lock(&dev->rx_done.lock);
		__skb_queue_tail(&dev->rx_done, skb);
		spin_unlock(&dev->rx_done.lock);
#ifdef CONFIG_MDM_HSIC_PM
		/* wakelock for fast dormancy */
		if (urb->actual_length)
			fast_dormancy_wakelock(rmnet_pm_dev);
#endif
		break;

	/*do not resubmit*/
	case -EPIPE:
		set_bit(RX_HALT, &dev->flags);
		dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		/* FALLTHROUGH */
	case -ESHUTDOWN:
	case -ECONNRESET: /* unplug */
	case -EPROTO:
		dev_kfree_skb_any(skb);
		break;

	/*resubmit */
	case -EOVERFLOW: /*babble error*/
	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
			__func__, urb->status);
		break;
	}

	spin_lock(&dev->rx_done.lock);
	urb->context = NULL;
	list_add_tail(&urb->urb_list, &dev->rx_idle);
	spin_unlock(&dev->rx_done.lock);

	/* during suspend handle rx packet, but do not queue rx work */
	if (urb->status == -ENOENT)
		return;

	if (queue)
		queue_work(dev->wq, &dev->process_rx_w);
}
示例#2
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	if (!skb->protocol)
		skb->protocol = eth_type_trans(skb, dev->net);

	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
		  skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));

	if (skb_defer_rx_timestamp(skb))
		return;

	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS)
		netif_dbg(dev, rx_err, dev->net,
			  "netif_rx status %d\n", status);
#ifdef CONFIG_MDM_HSIC_PM
	if (dev->udev->descriptor.idProduct == 0x9048 ||
				dev->udev->descriptor.idProduct == 0x904C) {
		pr_debug("rx fast dormancy wakelock\n");
		fast_dormancy_wakelock(rmnet_pm_dev);
	}
#endif
}
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;
#if !defined(CONFIG_MDM_HSIC_PM)
	/* if the bridge is open or not, resume to consume mdm request
	 * because this link is not dead, it's alive
	 */
	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
		return;
#endif

#ifdef CONFIG_MDM_HSIC_PM
	/* wakelock for fast dormancy */
	fast_dormancy_wakelock(rmnet_pm_dev);
#endif

	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
#ifdef CONFIG_MDM_HSIC_PM
		/* if the bridge is open or not, resume to consume mdm request
		 * because this link is not dead, it's alive
		 */
		if (!brdg) {
			print_hex_dump(KERN_INFO, "dun:", 0, 1, 1, skb->data,
							skb->len, false);
			dev_kfree_skb_any(skb);
			continue;
		}
#endif
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		/* hand off sk_buff to client,they'll need to free it */
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}