static void data_bridge_process_rx(struct work_struct *work) { int retval; unsigned long flags; struct urb *rx_idle; struct sk_buff *skb; struct timestamp_info *info; struct data_bridge *dev = container_of(work, struct data_bridge, process_rx_w); struct bridge *brdg = dev->brdg; #if !defined(CONFIG_MDM_HSIC_PM) /* if the bridge is open or not, resume to consume mdm request * because this link is not dead, it's alive */ if (!brdg || !brdg->ops.send_pkt || rx_halted(dev)) return; #endif while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { #ifdef CONFIG_MDM_HSIC_PM /* if the bridge is open or not, resume to consume mdm request * because this link is not dead, it's alive */ if (!brdg) { print_hex_dump(KERN_INFO, "dun:", 0, 1, 1, skb->data, skb->len, false); dev_kfree_skb_any(skb); continue; } #endif dev->to_host++; info = (struct timestamp_info *)skb->cb; info->rx_done_sent = get_timestamp(); /* hand off sk_buff to client,they'll need to free it */ retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); if (retval == -ENOTCONN || retval == -EINVAL) { return; } else if (retval == -EBUSY) { dev->rx_throttled_cnt++; break; } } spin_lock_irqsave(&dev->rx_done.lock, flags); while (!list_empty(&dev->rx_idle)) { if (dev->rx_done.qlen > stop_submit_urb_limit) break; rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); list_del(&rx_idle->urb_list); spin_unlock_irqrestore(&dev->rx_done.lock, flags); retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); spin_lock_irqsave(&dev->rx_done.lock, flags); if (retval) { list_add_tail(&rx_idle->urb_list, &dev->rx_idle); break; } } spin_unlock_irqrestore(&dev->rx_done.lock, flags); }
static void data_bridge_process_rx(struct work_struct *work) { int retval; unsigned long flags; struct urb *rx_idle; struct sk_buff *skb; struct data_bridge *dev = container_of(work, struct data_bridge, process_rx_w); struct bridge *brdg = dev->brdg; if (!brdg || !brdg->ops.send_pkt || rx_halted(dev)) return; while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { dev->to_host++; /* hand off sk_buff to client,they'll need to free it */ retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); if (retval == -ENOTCONN || retval == -EINVAL) { return; } else if (retval == -EBUSY) { dev->rx_throttled_cnt++; break; } } spin_lock_irqsave(&dev->rx_done.lock, flags); if (dev->rx_done.qlen > stop_submit_urb_limit && rx_throttled(brdg)) { spin_unlock_irqrestore(&dev->rx_done.lock, flags); return; } while (!list_empty(&dev->rx_idle)) { rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); list_del(&rx_idle->urb_list); spin_unlock_irqrestore(&dev->rx_done.lock, flags); retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); spin_lock_irqsave(&dev->rx_done.lock, flags); if (retval) break; } spin_unlock_irqrestore(&dev->rx_done.lock, flags); }
static void data_bridge_process_rx(struct work_struct *work) { int retval; unsigned long flags; struct urb *rx_idle; struct sk_buff *skb; struct timestamp_info *info; struct data_bridge *dev = container_of(work, struct data_bridge, process_rx_w); struct bridge *brdg = dev->brdg; // ASUS_BSP+++ Wenli "tty device for AT command" #ifndef DISABLE_ASUS_DUN if (is_open_asus) { data_bridge_process_rx_asus(work); return; } #endif // ASUS_BSP--- Wenli "tty device for AT command" if (!brdg || !brdg->ops.send_pkt || rx_halted(dev) || !is_open_usb) return; while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { dev->to_host++; info = (struct timestamp_info *)skb->cb; info->rx_done_sent = get_timestamp(); /* hand off sk_buff to client,they'll need to free it */ retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); if (retval == -ENOTCONN || retval == -EINVAL) { return; } else if (retval == -EBUSY) { dev->rx_throttled_cnt++; break; } } spin_lock_irqsave(&dev->rx_done.lock, flags); while (!list_empty(&dev->rx_idle)) { if (dev->rx_done.qlen > stop_submit_urb_limit) break; rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); list_del(&rx_idle->urb_list); spin_unlock_irqrestore(&dev->rx_done.lock, flags); retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); spin_lock_irqsave(&dev->rx_done.lock, flags); if (retval) { list_add_tail(&rx_idle->urb_list, &dev->rx_idle); break; } } spin_unlock_irqrestore(&dev->rx_done.lock, flags); }
static void data_bridge_process_rx(struct work_struct *work) { int retval; unsigned long flags; struct urb *rx_idle; struct sk_buff *skb; struct timestamp_info *info; struct data_bridge *dev = container_of(work, struct data_bridge, process_rx_w); struct bridge *brdg = dev->brdg; if (!brdg || !brdg->ops.send_pkt || rx_halted(dev)) return; while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { dev->to_host++; info = (struct timestamp_info *)skb->cb; info->rx_done_sent = get_timestamp(); retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); if (retval == -ENOTCONN || retval == -EINVAL) { return; } else if (retval == -EBUSY) { dev->rx_throttled_cnt++; break; } } spin_lock_irqsave(&dev->rx_done.lock, flags); while (!list_empty(&dev->rx_idle)) { if (dev->rx_done.qlen > stop_submit_urb_limit) break; rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); list_del(&rx_idle->urb_list); spin_unlock_irqrestore(&dev->rx_done.lock, flags); retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); spin_lock_irqsave(&dev->rx_done.lock, flags); if (retval) { list_add_tail(&rx_idle->urb_list, &dev->rx_idle); break; } } spin_unlock_irqrestore(&dev->rx_done.lock, flags); }