static void tx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone break; // like rx, tx gets controller i/o faults during khubd delays // and so it uses the same throttling mechanism. case -EPROTO: case -ETIME: case -EILSEQ: //-------------------------------------------------------- #ifdef CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG usb_mark_intf_last_busy(dev->intf, true); #endif //CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG //-------------------------------------------------------- usb_mark_last_busy(dev->udev); if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "tx throttle %d\n", urb->status); } netif_stop_queue (dev->net); break; default: netif_dbg(dev, tx_err, dev->net, "tx err %d\n", entry->urb->status); break; } } usb_autopm_put_interface_async(dev->intf); urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); }
static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb, gfp_t flags) { struct sk_buff *skb; struct timestamp_info *info; int retval = -EINVAL; unsigned int created; created = get_timestamp(); skb = alloc_skb(RMNET_RX_BUFSIZE, flags); if (!skb) return -ENOMEM; info = (struct timestamp_info *)skb->cb; info->dev = dev; info->created = created; usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in, skb->data, RMNET_RX_BUFSIZE, data_bridge_read_cb, skb); if (test_bit(SUSPENDED, &dev->flags)) goto suspended; usb_anchor_urb(rx_urb, &dev->rx_active); info->rx_queued = get_timestamp(); retval = usb_submit_urb(rx_urb, flags); if (retval) goto fail; #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, false); #endif usb_mark_last_busy(dev->udev); return 0; fail: usb_unanchor_urb(rx_urb); suspended: dev_kfree_skb_any(skb); return retval; }
void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; if (enable_tx_rx_debug && (urb_status != -ECONNRESET)) netdev_info(dev->net, "[RMNET_D]rx_c, status: %d\n", urb_status); switch (urb_status) { /* success */ case 0: break; case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); case -ECONNRESET: case -ESHUTDOWN: netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, true); #endif usb_mark_last_busy(dev->udev); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; usb_complete_t complete_fn; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return -ENOMEM; } if (dev->net->type != ARPHRD_RAWIP) skb_reserve(skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->length = 0; if (dev->driver_info->rx_complete) complete_fn = dev->driver_info->rx_complete; else complete_fn = rx_complete; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, complete_fn, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach (dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); queue_work(usbnet_wq, &dev->bh_w); break; case 0: #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, true); #endif usb_mark_last_busy(dev->udev); __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } return retval; }
static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; skb_put (skb, urb->actual_length); entry->state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { entry->state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); } //HTC+++ usbnet_rx_len += skb->len; //HTC--- break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: entry->state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; // FALLTHROUGH default: entry->state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } defer_bh(dev, skb, &dev->rxq); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags)) { rx_submit (dev, urb, GFP_ATOMIC); //-------------------------------------------------------- #ifdef CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG usb_mark_intf_last_busy(dev->intf, true); #endif //CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG //-------------------------------------------------------- usb_mark_last_busy(dev->udev); return; } //HTC+++ pr_info("%s(%d) [USBNET] usb_free_urb urb:%p !!!\n", __func__, __LINE__, urb); //HTC--- usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return -ENOMEM; } if (dev->net->type != ARPHRD_RAWIP) skb_reserve(skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach (dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); tasklet_schedule (&dev->bh); break; case 0: //-------------------------------------------------------- #ifdef CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG usb_mark_intf_last_busy(dev->intf, true); #endif //CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG //-------------------------------------------------------- usb_mark_last_busy(dev->udev); __skb_queue_tail (&dev->rxq, skb); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } return retval; }
static void resp_avail_cb(struct urb *urb) { struct usb_device *udev; struct ctrl_pkt_list_elem *list_elem = NULL; struct rmnet_ctrl_dev *dev = urb->context; void *cpkt; int status = 0; size_t cpkt_size = 0; udev = interface_to_usbdev(dev->intf); usb_autopm_put_interface_async(dev->intf); #ifdef HTC_DEBUG_QMI_STUCK del_timer(&dev->rcv_timer); if (expired_rcvurb == urb) { expired_rcvurb = NULL; dev_err(&(dev->intf->dev), "[RMNET] %s(%d) urb->status:%d urb->actual_length:%u !!!\n", __func__, __LINE__, urb->status, urb->actual_length); if (urb->status != 0) { if (urb->actual_length > 0) { dev_err(&(dev->intf->dev), "[RMNET] %s(%d) set urb->status 0 !!!\n", __func__, __LINE__); urb->status = 0; } else { dev_err(&(dev->intf->dev), "[RMNET] %s(%d) dev->inturb->anchor(%x) !!!\n", __func__, __LINE__, (dev->inturb) ? (unsigned int)dev->inturb->anchor : (unsigned int)(0xffffffff)); dev_err(&(dev->intf->dev), "[RMNET] %s(%d) goto resubmit_int_urb !!!\n", __func__, __LINE__); goto resubmit_int_urb; } } } #endif switch (urb->status) { case 0: dev->get_encap_resp_cnt++; break; case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: case -EPROTO: return; case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } dev_dbg(dev->devicep, "Read %d bytes for %s\n", urb->actual_length, dev->name); #ifdef HTC_LOG_RMNET_USB_CTRL log_rmnet_usb_ctrl_event(dev->intf, "Rx cb", urb->actual_length); #endif cpkt = urb->transfer_buffer; cpkt_size = urb->actual_length; if (!cpkt_size) { dev->zlp_cnt++; dev_dbg(dev->devicep, "%s: zero length pkt received\n", __func__); goto resubmit_int_urb; } list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC); if (!list_elem) { dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__); return; } list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC); if (!list_elem->cpkt.data) { dev_err(dev->devicep, "%s: list_elem->data alloc failed\n", __func__); kfree(list_elem); return; } memcpy(list_elem->cpkt.data, cpkt, cpkt_size); list_elem->cpkt.data_size = cpkt_size; spin_lock(&dev->rx_lock); list_add_tail(&list_elem->list, &dev->rx_list); spin_unlock(&dev->rx_lock); wake_up(&dev->read_wait_queue); resubmit_int_urb: #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, false); #endif if (!dev->inturb->anchor) { usb_mark_last_busy(udev); usb_anchor_urb(dev->inturb, &dev->rx_submitted); status = usb_submit_urb(dev->inturb, GFP_ATOMIC); if (status) { usb_unanchor_urb(dev->inturb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } } }
static void notification_available_cb(struct urb *urb) { int status; struct usb_cdc_notification *ctrl; struct usb_device *udev; struct rmnet_ctrl_dev *dev = urb->context; udev = interface_to_usbdev(dev->intf); switch (urb->status) { case 0: case -ENOENT: break; case -ESHUTDOWN: case -ECONNRESET: case -EPROTO: return; case -EPIPE: pr_err_ratelimited("%s: Stall on int endpoint\n", __func__); return; case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } if (!urb->actual_length) return; ctrl = urb->transfer_buffer; switch (ctrl->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev->resp_avail_cnt++; #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, false); #endif usb_mark_last_busy(udev); queue_work(dev->wq, &dev->get_encap_work); if (!dev->resp_available) { dev->resp_available = true; if (dev->intf) dev_info(&dev->intf->dev, "%s[%d]:dev->resp_available:%d\n", __func__, __LINE__, dev->resp_available); wake_up(&dev->open_wait_queue); } return; default: dev_err(dev->devicep, "%s:Command not implemented\n", __func__); } resubmit_int_urb: #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, false); #endif usb_mark_last_busy(udev); usb_anchor_urb(urb, &dev->rx_submitted); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { usb_unanchor_urb(urb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } return; }
int rmnet_usb_ctrl_probe(struct usb_interface *intf, struct usb_host_endpoint *int_in, struct rmnet_ctrl_dev *dev) { u16 wMaxPacketSize; struct usb_endpoint_descriptor *ep; struct usb_device *udev; int interval; int ret = 0; udev = interface_to_usbdev(intf); if (!dev) { pr_err("%s: Ctrl device not found\n", __func__); return -ENODEV; } dev->int_pipe = usb_rcvintpipe(udev, int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); mutex_lock(&dev->dev_lock); dev->intf = intf; dev->cbits_tolocal = ACM_CTRL_CD; dev->cbits_tomdm = ACM_CTRL_DTR; mutex_unlock(&dev->dev_lock); dev->resp_available = false; dev->snd_encap_cmd_cnt = 0; dev->get_encap_resp_cnt = 0; dev->resp_avail_cnt = 0; dev->tx_ctrl_err_cnt = 0; dev->set_ctrl_line_state_cnt = 0; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE), dev->cbits_tomdm, dev->intf->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; dev->set_ctrl_line_state_cnt++; dev->inturb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->inturb) { dev_err(dev->devicep, "Error allocating int urb\n"); return -ENOMEM; } ep = &dev->intf->cur_altsetting->endpoint[0].desc; wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); if (!dev->intbuf) { usb_free_urb(dev->inturb); dev_err(dev->devicep, "Error allocating int buffer\n"); return -ENOMEM; } dev->in_ctlreq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; dev->in_ctlreq->wValue = 0; dev->in_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber; dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); interval = max((int)int_in->desc.bInterval, (udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL : FS_LS_INTERVAL); usb_fill_int_urb(dev->inturb, udev, dev->int_pipe, dev->intbuf, wMaxPacketSize, notification_available_cb, dev, interval); #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, false); #endif usb_mark_last_busy(udev); dev_info(&intf->dev, "%s[%d]:rmnet_usb_ctrl_start_rx dev->resp_available:%d\n", __func__, __LINE__, dev->resp_available); ret = rmnet_usb_ctrl_start_rx(dev); #ifdef HTC_MDM_RESTART_IF_RMNET_OPEN_TIMEOUT if (!ret) dev->connected_jiffies = jiffies; #endif if (!ret) dev->is_connected = true; return ret; }