Exemplo n.º 1
0
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;
#if !defined(CONFIG_MDM_HSIC_PM)
	/* if the bridge is open or not, resume to consume mdm request
	 * because this link is not dead, it's alive
	 */
	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
		return;
#endif
	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
#ifdef CONFIG_MDM_HSIC_PM
		/* if the bridge is open or not, resume to consume mdm request
		 * because this link is not dead, it's alive
		 */
		if (!brdg) {
			print_hex_dump(KERN_INFO, "dun:", 0, 1, 1, skb->data,
							skb->len, false);
			dev_kfree_skb_any(skb);
			continue;
		}
#endif
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		/* hand off sk_buff to client,they'll need to free it */
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;

// ASUS_BSP+++ Wenli "tty device for AT command"
#ifndef DISABLE_ASUS_DUN
	if (is_open_asus) {
		data_bridge_process_rx_asus(work);
		return;
	}
#endif
// ASUS_BSP--- Wenli "tty device for AT command"
	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev) || !is_open_usb)
		return;

	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		/* hand off sk_buff to client,they'll need to free it */
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
Exemplo n.º 3
0
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;

	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
		return;

	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
Exemplo n.º 4
0
static void data_bridge_process_rx(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	struct bridge		*brdg = dev->brdg;

	if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
		return;

	while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
		dev->to_host++;
		/* hand off sk_buff to client,they'll need to free it */
		retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
		if (retval == -ENOTCONN || retval == -EINVAL) {
			return;
		} else if (retval == -EBUSY) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	if (dev->rx_done.qlen > stop_submit_urb_limit && rx_throttled(brdg)) {
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		return;
	}

	while (!list_empty(&dev->rx_idle)) {

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval)
			break;
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
static void data_bridge_process_rx_asus(struct work_struct *work)
{
	int			retval;
	unsigned long		flags;
	struct urb		*rx_idle;
	struct sk_buff		*skb;
	struct timestamp_info	*info;
	struct data_bridge	*dev =
		container_of(work, struct data_bridge, process_rx_w);

	if (rx_halted(dev))
		return;

	while ((skb = skb_dequeue(&dev->rx_done))) {
		dev->to_host++;
		info = (struct timestamp_info *)skb->cb;
		info->rx_done_sent = get_timestamp();
		/* hand off sk_buff to client,they'll need to free it */
		retval = gdun_rx_string(skb);
		dev_kfree_skb_any(skb);
		if (retval < 0) {
			dev->rx_throttled_cnt++;
			break;
		}
	}

	spin_lock_irqsave(&dev->rx_done.lock, flags);
	while (!list_empty(&dev->rx_idle)) {
		if (dev->rx_done.qlen > stop_submit_urb_limit)
			break;

		rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
		list_del(&rx_idle->urb_list);
		spin_unlock_irqrestore(&dev->rx_done.lock, flags);
		retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
		spin_lock_irqsave(&dev->rx_done.lock, flags);
		if (retval) {
			list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
			break;
		}
	}
	spin_unlock_irqrestore(&dev->rx_done.lock, flags);
}
Exemplo n.º 6
0
static void work_urbs(struct work_struct *work)
{
	struct atusb *atusb =
	    container_of(to_delayed_work(work), struct atusb, work);
	struct usb_device *usb_dev = atusb->usb_dev;
	struct urb *urb;
	int ret;

	if (atusb->shutdown)
		return;

	do {
		urb = usb_get_from_anchor(&atusb->idle_urbs);
		if (!urb)
			return;
		ret = submit_rx_urb(atusb, urb);
	} while (!ret);

	usb_anchor_urb(urb, &atusb->idle_urbs);
	dev_warn_ratelimited(&usb_dev->dev,
			     "atusb_in: can't allocate/submit URB (%d)\n", ret);
	schedule_delayed_work(&atusb->work,
			      msecs_to_jiffies(ALLOC_DELAY_MS) + 1);
}