static void btusb_intr_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hdev->driver_data; int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (hci_recv_fragment(hdev, HCI_EVENT_PKT, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) return; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } }
static int start_ipc(struct link_device *ld, struct io_device *iod) { struct sk_buff *skb; char data[1] = {'a'}; int err; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; if (!usb_ld->if_usb_connected) { mif_err("HSIC not connected, skip start ipc\n"); err = -ENODEV; goto exit; } mif_err("send 'a'\n"); skb = alloc_skb(16, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; memcpy(skb_put(skb, 1), data, 1); skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (err < 0) { mif_err("usb_tx_urb fail\n"); dev_kfree_skb_any(skb); goto exit; } exit: return err; }
/* Even if usb_tx_urb_with_skb is failed, does not release the skb to retry */ static int usb_tx_urb_with_skb(struct usb_device *usbdev, struct sk_buff *skb, struct if_usb_devdata *pipe_data) { int ret; struct urb *urb; if (pipe_data->disconnected) return -ENOENT; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { mif_err("alloc urb error\n"); return -ENOMEM; } #if 0 int i; for (i = 0; i < skb->len; i++) { if (i > 16) break; mif_err("[0x%02x]", *(skb->data + i)); } #endif urb->transfer_flags = URB_ZERO_PACKET; usb_fill_bulk_urb(urb, pipe_data->usbdev, pipe_data->tx_pipe, skb->data, skb->len, usb_tx_complete, (void *)skb); usb_mark_last_busy(usbdev); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { mif_err("usb_submit_urb with ret(%d)\n", ret); usb_free_urb(urb); return ret; } return 0; }
static int rx_submit(struct usbsvn *svn, int dev_id, struct urb *req, gfp_t gfp_flags) { struct net_device *dev = svn->netdev; struct usbsvn_devdata *devdata = &svn->devdata[dev_id]; struct usbsvn_rx *svn_rx; struct page *page; int err; svn_rx = kzalloc(sizeof(struct usbsvn_rx), gfp_flags); if (!svn_rx) return -ENOMEM; page = __netdev_alloc_page(dev, gfp_flags); if (!page) { kfree(svn_rx); return -ENOMEM; } svn_rx->netdev = dev; svn_rx->dev_id = dev_id; usb_fill_bulk_urb(req, svn->usbdev, devdata->rx_pipe, page_address(page), PAGE_SIZE, rx_complete, svn_rx); req->transfer_flags = 0; err = usb_submit_urb(req, gfp_flags); if (unlikely(err)) { dev_err(&dev->dev, "RX submit error (%d)\n", err); kfree(svn_rx); netdev_free_page(dev, page); } usb_mark_last_busy(req->dev); return err; }
int usbsvn_request_resume(void) { struct device *dev; int err=0; if (!share_svn->usbdev) return -EFAULT; dev = &share_svn->usbdev->dev; if (share_svn->dpm_suspending) { share_svn->skip_hostwakeup = 1; printk(KERN_DEBUG "%s: suspending skip host wakeup\n", __func__); return 0; } usb_mark_last_busy(share_svn->usbdev); if (share_svn->resume_debug >= 1) { printk(KERN_DEBUG "%s: resumeing, return\n", __func__); return 0; } if (dev->power.status != DPM_OFF) { wake_lock_pm(share_svn); printk(KERN_DEBUG "%s:run time resume\n", __func__); share_svn->resume_debug = 1; err = pm_runtime_resume(dev); if (!err && dev->power.timer_expires == 0 && dev->power.request_pending == false) { printk(KERN_DEBUG "%s:run time idle\n", __func__); pm_runtime_idle(dev); } share_svn->resume_debug = 0; } return 0; }
static void play_deferred(struct btusb_data *data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&data->deferred))) { /************************************/ usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("play_deferred urb %p submission failed", urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } usb_free_urb(urb); /************************************/ data->tx_in_flight++; } mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek usb_scuttle_anchored_urbs(&data->deferred); }
static void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); } break; /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); // FALLTHROUGH /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; /* we get controller i/o faults during khubd disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, * so we still recover when the fault isn't a khubd delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; // FALLTHROUGH default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); usb_mark_last_busy(dev->udev); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
static ssize_t wdm_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { int rv, cntr = 0; int i = 0; struct wdm_device *desc = file->private_data; rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */ if (rv < 0) return -ERESTARTSYS; if (desc->length == 0) { desc->read = 0; retry: if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { rv = cntr ? cntr : -EAGAIN; goto err; } rv = 0; } else { rv = wait_event_interruptible(desc->wait, test_bit(WDM_READ, &desc->flags)); } /* may have happened while we slept */ if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } usb_mark_last_busy(interface_to_usbdev(desc->intf)); if (rv < 0) { rv = -ERESTARTSYS; goto err; } spin_lock_irq(&desc->iuspin); if (desc->rerr) { /* read completed, error happened */ desc->rerr = 0; spin_unlock_irq(&desc->iuspin); rv = -EIO; goto err; } /* * recheck whether we've lost the race * against the completion handler */ if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */ spin_unlock_irq(&desc->iuspin); goto retry; } if (!desc->reslength) { /* zero length read */ spin_unlock_irq(&desc->iuspin); goto retry; } clear_bit(WDM_READ, &desc->flags); spin_unlock_irq(&desc->iuspin); } cntr = count > desc->length ? desc->length : count; rv = copy_to_user(buffer, desc->ubuf, cntr); if (rv > 0) { rv = -EFAULT; goto err; } for (i = 0; i < desc->length - cntr; i++) desc->ubuf[i] = desc->ubuf[i + cntr]; desc->length -= cntr; /* in case we had outstanding data */ if (!desc->length) clear_bit(WDM_READ, &desc->flags); rv = cntr; err: mutex_unlock(&desc->lock); return rv; }
static void usbhid_mark_busy(struct usbhid_device *usbhid) { struct usb_interface *intf = usbhid->intf; usb_mark_last_busy(interface_to_usbdev(intf)); }
void rx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; enum skb_state state; skb_put (skb, urb->actual_length); state = rx_done; entry->urb = NULL; if (enable_tx_rx_debug && (urb_status != -ECONNRESET)) netdev_info(dev->net, "[RMNET_D]rx_c, status: %d\n", urb_status); switch (urb_status) { /* success */ case 0: break; case -EPIPE: dev->net->stats.rx_errors++; usbnet_defer_kevent (dev, EVENT_RX_HALT); case -ECONNRESET: case -ESHUTDOWN: netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); goto block; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } block: state = rx_cleanup; entry->urb = urb; urb = NULL; break; case -EOVERFLOW: dev->net->stats.rx_over_errors++; default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); #ifdef HTC_PM_DBG if (usb_pm_debug_enabled) usb_mark_intf_last_busy(dev->intf, true); #endif usb_mark_last_busy(dev->udev); return; } usb_free_urb (urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); }
static void usb_rx_complete(struct urb *urb) { struct if_usb_devdata *pipe_data = urb->context; struct usb_link_device *usb_ld = usb_get_intfdata(pipe_data->data_intf); struct io_device *iod; int iod_format = IPC_FMT; int ret; usb_mark_last_busy(urb->dev); switch (urb->status) { case 0: case -ENOENT: if (!urb->actual_length) goto re_submit; /* call iod recv */ /* how we can distinguish boot ch with fmt ch ?? */ switch (pipe_data->format) { case IF_USB_FMT_EP: iod_format = IPC_FMT; pr_buffer("rx", (char *)urb->transfer_buffer, (size_t)urb->actual_length, 16); break; case IF_USB_RAW_EP: iod_format = IPC_MULTI_RAW; break; case IF_USB_RFS_EP: iod_format = IPC_RFS; break; default: break; } /* during boot stage fmt end point */ /* shared with boot io device */ /* when we use fmt device only, at boot and ipc exchange it can be reduced to 1 device */ if (iod_format == IPC_FMT && usb_ld->ld.com_state == COM_BOOT) iod_format = IPC_BOOT; if (iod_format == IPC_FMT && usb_ld->ld.com_state == COM_CRASH) iod_format = IPC_RAMDUMP; iod = link_get_iod_with_format(&usb_ld->ld, iod_format); if (iod) { ret = iod->recv(iod, &usb_ld->ld, (char *)urb->transfer_buffer, urb->actual_length); if (ret < 0) mif_err("io device recv error :%d\n", ret); } re_submit: if (urb->status || atomic_read(&usb_ld->suspend_count)) break; usb_mark_last_busy(urb->dev); usb_rx_submit(pipe_data, urb, GFP_ATOMIC); return; case -ESHUTDOWN: case -EPROTO: break; case -EOVERFLOW: mif_err("RX overflow\n"); break; default: mif_err("RX complete Status (%d)\n", urb->status); break; } usb_anchor_urb(urb, &pipe_data->urbs); }
static int rx_threadfn(void *x_) { struct hsictty_port_private *portdata = x_; long rc = 0; struct sched_param param = {.sched_priority = 50 }; sched_setscheduler(current, SCHED_FIFO, ¶m); while (!kthread_should_stop()) { if (portdata->thread_exit) { msleep(5); continue; } process_rx_data(portdata); rc = wait_for_completion_timeout(&portdata->rx_notifier, 5 * HZ); INIT_COMPLETION(portdata->rx_notifier); } return 0; } #endif static void hsictty_read_callback(struct urb *urb) { int endpoint; struct _HSICTTY_MSG *msg = NULL; int msg_index = -1; struct usb_serial_port *port; int status = urb->status; struct hsictty_port_private *portdata; unsigned long flags; int err = -1; u8 channel = 0; struct hsictty_intf_private *intfdata; static int error_times = 0; int error_times_limits = 50; hsictty_dbg("%s: %p\n", __func__, urb); endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(port->serial); channel = portdata->channel; if (status) { hsictty_dbg ("%s: nonzero status: %d on channel:%d, endpoint %02x.\n", __func__, status, channel, endpoint); if (intfdata->multi_channel_mode) { if (((status == -EPROTO) || (status == -EOVERFLOW)) && error_times++ < error_times_limits) { hsictty_error ("%s: an halted error detected, will try again, status: %d on channel:%d, endpoint %02x.\n", __func__, status, channel, endpoint); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM) { printk(KERN_ERR "%s: resubmit read urb failed in channel:%d.\n" "(%d)", __func__, channel, err); /* busy also in error unless we are killed */ usb_mark_last_busy(port-> serial->dev); } } else { usb_mark_last_busy(port->serial->dev); } } else if (status == -EPROTO) { hsictty_error ("%s: unrecorvery halted error detected, please check the hsic connection\n", __func__); } } } else { error_times = 0; port = urb->context; portdata = usb_get_serial_port_data(port); if ((msg_index = get_read_msg_index(portdata)) < 0) { hsictty_error ("%s: get read msg fail in channel:%d, endpoint:%d.\n", __func__, channel, endpoint); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM) { printk(KERN_ERR "%s: resubmit read urb failed in channel:%d.\n" "(%d)", __func__, channel, err); /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } } else usb_mark_last_busy(port->serial->dev); return; } msg = &portdata->read_msg[msg_index]; #ifdef BACKUP_DATA_DUMP if (!dumped) backup_log(portdata->channel, 0, urb->transfer_buffer, urb->actual_length); #endif INIT_LIST_HEAD(&msg->link); msg->urb = urb; wakeup_device(port, 0); wake_lock_timeout(&intfdata->rx_wakelock, HZ); spin_lock_irqsave(&portdata->pool_lock, flags); list_add_tail(&msg->link, &portdata->pool); spin_unlock_irqrestore(&portdata->pool_lock, flags); #ifdef USE_READ_WORK queue_work(intfdata->hsictty_read_wq, &(portdata->hsictty_read_work)); #else complete_all(&portdata->rx_notifier); #endif } }
int rmnet_usb_ctrl_probe(struct usb_interface *intf, struct usb_host_endpoint *int_in, unsigned long rmnet_devnum, unsigned long *data) { struct rmnet_ctrl_dev *dev = NULL; u16 wMaxPacketSize; struct usb_endpoint_descriptor *ep; struct usb_device *udev = interface_to_usbdev(intf); int interval; int ret = 0, n; /* Find next available ctrl_dev */ for (n = 0; n < insts_per_dev; n++) { dev = &ctrl_devs[rmnet_devnum][n]; if (!dev->claimed) break; } if (!dev || n == insts_per_dev) { pr_err("%s: No available ctrl devices for %lu\n", __func__, rmnet_devnum); return -ENODEV; } dev->int_pipe = usb_rcvintpipe(udev, int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); dev->intf = intf; dev->id = rmnet_devnum; dev->snd_encap_cmd_cnt = 0; dev->get_encap_resp_cnt = 0; dev->resp_avail_cnt = 0; dev->tx_ctrl_err_cnt = 0; dev->set_ctrl_line_state_cnt = 0; dev->inturb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->inturb) { dev_err(dev->devicep, "Error allocating int urb\n"); return -ENOMEM; } /*use max pkt size from ep desc*/ ep = &dev->intf->cur_altsetting->endpoint[0].desc; wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); if (!dev->intbuf) { usb_free_urb(dev->inturb); dev_err(dev->devicep, "Error allocating int buffer\n"); return -ENOMEM; } dev->in_ctlreq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; dev->in_ctlreq->wValue = 0; dev->in_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber; dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); interval = max((int)int_in->desc.bInterval, (udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL : FS_LS_INTERVAL); usb_fill_int_urb(dev->inturb, udev, dev->int_pipe, dev->intbuf, wMaxPacketSize, notification_available_cb, dev, interval); usb_mark_last_busy(udev); ret = rmnet_usb_ctrl_start_rx(dev); if (ret) { usb_free_urb(dev->inturb); kfree(dev->intbuf); return ret; } ctl_msg_dbg_mask = MSM_USB_CTL_DUMP_BUFFER; dev->claimed = true; /*mux info is passed to data parameter*/ if (*data) set_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status); *data = (unsigned long)dev; /* If MUX is enabled, wakeup the open process here */ if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) { set_bit(RMNET_CTRL_DEV_READY, &dev->status); wake_up(&dev->open_wait_queue); } return 0; }
static void usb_tx_work(struct work_struct *work) { int ret = 0; struct link_device *ld = container_of(work, struct link_device, tx_delayed_work.work); struct usb_link_device *usb_ld = to_usb_link_device(ld); struct sk_buff *skb; struct link_pm_data *pm_data = usb_ld->link_pm_data; if (!usb_ld->usbdev) { mif_info("usbdev is invalid\n"); return; } while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) { /* request and check usb runtime pm first */ ret = link_pm_runtime_get_active(pm_data); if (ret < 0) { if (ret == -ENODEV) mif_err("link not avail, retry reconnect.\n"); else queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, msecs_to_jiffies(20)); return; } usb_mark_last_busy(usb_ld->usbdev); pm_runtime_get_sync(&usb_ld->usbdev->dev); ret = 0; /* send skb from fmt_txq and raw_txq,*/ /* one by one for fair flow control */ skb = skb_dequeue(&ld->sk_fmt_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { if (ret != -ENODEV && ret != -ENOENT) pm_runtime_put(&usb_ld->usbdev->dev); /* Do not call runtime_put if ret is ENODEV. Unless it * will invoke bugs */ else skb_queue_head(&ld->sk_fmt_tx_q, skb); return; } skb = skb_dequeue(&ld->sk_raw_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { if (ret != -ENODEV && ret != -ENOENT) pm_runtime_put(&usb_ld->usbdev->dev); else skb_queue_head(&ld->sk_raw_tx_q, skb); return; } pm_runtime_put(&usb_ld->usbdev->dev); usb_mark_last_busy(usb_ld->usbdev); } wake_unlock(&pm_data->tx_async_wake); }
static int _usb_tx_work(struct sk_buff *skb) { struct sk_buff_head *txq; struct io_device *iod = skbpriv(skb)->iod; struct link_device *ld = skbpriv(skb)->ld; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct if_usb_devdata *pipe_data; int ret; switch (iod->format) { case IPC_BOOT: case IPC_FMT: /* boot device uses same intf with fmt*/ pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; txq = &ld->sk_fmt_tx_q; break; case IPC_RAW: pipe_data = &usb_ld->devdata[IF_USB_RAW_EP]; txq = &ld->sk_raw_tx_q; break; case IPC_RFS: pipe_data = &usb_ld->devdata[IF_USB_RFS_EP]; txq = &ld->sk_fmt_tx_q; break; default: /* wrong packet, drop it */ pipe_data = NULL; break; } if (!pipe_data) { dev_kfree_skb_any(skb); return -ENOENT; } if (iod->format == IPC_FMT && usb_ld->if_usb_is_main) pr_skb("IPC-TX", skb); if (iod->format == IPC_RAW) mif_debug("TX[RAW]\n"); if (iod->format == IPC_RFS) pr_skb("RFS-TX", skb); if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); ret = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (ret < 0) { if (ret == -ENODEV || ret == -ENOENT) { mif_err("link broken while in runtime active ..." " purge!\n"); return ret; } mif_err("usb_tx_urb_with_skb for iod(%d), ret=%d\n", iod->format, ret); skb_queue_head(txq, skb); queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, msecs_to_jiffies(20)); return ret; } return 0; }
int rmnet_usb_ctrl_probe(struct usb_interface *intf, struct usb_host_endpoint *int_in, struct rmnet_ctrl_dev *dev) { u16 wMaxPacketSize; struct usb_endpoint_descriptor *ep; struct usb_device *udev; int interval; int ret = 0; udev = interface_to_usbdev(intf); if (!dev) { pr_err("%s: Ctrl device not found\n", __func__); return -ENODEV; } dev->int_pipe = usb_rcvintpipe(udev, int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); mutex_lock(&dev->dev_lock); dev->intf = intf; /*TBD: for now just update CD status*/ dev->cbits_tolocal = ACM_CTRL_CD; /*send DTR high to modem*/ dev->cbits_tomdm = ACM_CTRL_DTR; mutex_unlock(&dev->dev_lock); dev->resp_available = false; dev->snd_encap_cmd_cnt = 0; dev->get_encap_resp_cnt = 0; dev->resp_avail_cnt = 0; dev->tx_ctrl_err_cnt = 0; dev->set_ctrl_line_state_cnt = 0; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE), dev->cbits_tomdm, dev->intf->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; dev->set_ctrl_line_state_cnt++; dev->inturb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->inturb) { dev_err(dev->devicep, "Error allocating int urb\n"); return -ENOMEM; } /*use max pkt size from ep desc*/ ep = &dev->intf->cur_altsetting->endpoint[0].desc; wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); if (!dev->intbuf) { usb_free_urb(dev->inturb); dev_err(dev->devicep, "Error allocating int buffer\n"); return -ENOMEM; } dev->in_ctlreq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; dev->in_ctlreq->wValue = 0; dev->in_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber; dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); interval = max((int)int_in->desc.bInterval, (udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL : FS_LS_INTERVAL); usb_fill_int_urb(dev->inturb, udev, dev->int_pipe, dev->intbuf, wMaxPacketSize, notification_available_cb, dev, interval); usb_mark_last_busy(udev); ret = rmnet_usb_ctrl_start_rx(dev); if (!ret) dev->is_connected = true; ctl_msg_dbg_mask = 0; return ret; }
void process_rx_data(struct hsictty_port_private *portdata) { struct _HSICTTY_MSG *msg = NULL; unsigned long flags; // hsictty_dbg("%s: \n", __func__); while (!list_empty(&portdata->pool)) { struct tty_struct *tty = NULL; struct usb_serial_port *port; struct hsictty_intf_private *intfdata; int status; int err; int endpoint; struct urb *urb = NULL; unsigned char *data; u16 channel; channel = portdata->channel; spin_lock_irqsave(&portdata->pool_lock, flags); msg = list_first_entry(&portdata->pool, struct _HSICTTY_MSG, link); if (NULL == msg) { spin_unlock_irqrestore(&portdata->pool_lock, flags); hsictty_error("%s: try to push a NULL msg\n", __func__); goto out; } list_del(&msg->link); urb = msg->urb; spin_unlock_irqrestore(&portdata->pool_lock, flags); port = urb->context; intfdata = usb_get_serial_data(port->serial); status = urb->status; data = urb->transfer_buffer; endpoint = usb_pipeendpoint(urb->pipe); hsictty_dbg("read length: %d in channel:%d, endpoint:%d\n", urb->actual_length, channel, endpoint); wake_lock_timeout(&intfdata->rx_wakelock, HZ); down(&portdata->ch_sem_r); if (port->serial && !port->serial->disconnected && portdata->opened) { tty = tty_port_tty_get(&port->port); } else { hsictty_info ("%s: tty[%d] already close, no need push\n", __func__, channel); } if (tty) { if (urb->actual_length) { #ifdef USE_TTY_CORE_BUFFER //do not use tty buffer u32 nleft = 0, npushed = 0, once = 2048, throttle_limit = 120; unsigned char *ptr = NULL; nleft = urb->actual_length; ptr = data; hsictty_dbg("nleft = %d\n", nleft); while (nleft > 0) { int receive_room = 0; u32 receive_room_limit = IN_BUFLEN; if (tty->receive_room > receive_room_limit) { tty->ldisc->ops-> receive_buf(tty, ptr, NULL, nleft); nleft = 0; continue; } wait_rx_allowed: //Note: should not use tty receive_room, it is not protected in tty core, will cause data lost. receive_room = tty->receive_room; if (receive_room <= throttle_limit) { int rc; hsictty_dbg ("ch:%d wait tty room,room left(%d)\n", channel, tty->receive_room); #ifdef USB_TTY_THROTTLE_CB up(&portdata->ch_sem_r); rc = wait_for_completion_interruptible_timeout(&portdata->rx_push_notifier, 5 * HZ); down(&portdata->ch_sem_r); //INIT_COMPLETION(portdata->rx_push_notifier); if (rc <= 0) { hsictty_error ("%s: error wait push in in channel:%d, endpoint:%d, error(%s)\n\n", __func__, channel, endpoint, (rc < 0) ? "-EINT" : "-EBUSY"); if (rc < 0) break; } if ((port->serial && port->serial->disconnected) || !portdata->opened) { hsictty_info ("%s: detect disconnect or close while reading on channel:%d, need exit\n", __func__, channel); break; } else goto wait_rx_allowed; #else if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); usleep_range(1, 2); #endif goto wait_rx_allowed; } if (nleft >= receive_room - throttle_limit) { once = receive_room - throttle_limit; npushed = once; nleft -= once; } else { npushed = nleft; nleft = 0; } hsictty_dbg ("%s:ch:%d,ep:%d npushed:%d,recvroom:%d\n", __func__, channel, endpoint, npushed, tty->receive_room); tty->ldisc->ops->receive_buf(tty, ptr, NULL, npushed); ptr += npushed; } #else u32 nleft = 0, npushed = 0, once = 16000; unsigned char *ptr = NULL; nleft = urb->actual_length; ptr = data; while (nleft > 0) { npushed = tty_insert_flip_string(tty->port, ptr, nleft); tty_flip_buffer_push(tty->port); nleft -= npushed; ptr += npushed; //usleep_range(1,2); //udelay(5); } #endif } tty_kref_put(tty); } up(&portdata->ch_sem_r); /* Resubmit urb so we continue receiving */ spin_lock_irqsave(&portdata->pool_lock, flags); msg->urb = NULL; spin_unlock_irqrestore(&portdata->pool_lock, flags); if (portdata->opened) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM) { hsictty_error ("%s: resubmit read urb failed in channel:%d.\n" "(%d)", __func__, channel, err); /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } } else usb_mark_last_busy(port->serial->dev); } usb_autopm_put_interface_async(port->serial->interface); } out: ; //hsictty_dbg("%s: exit\n", __func__); }
//仔细研读 static int usb_stor_control_thread(void * __us) { struct us_data *us = (struct us_data *)__us; struct Scsi_Host *host = us_to_host(us); //--------------------------- pr_info("7 usb stor control thread\n"); for(;;) { US_DEBUGP("*** thread sleeping.\n"); if (wait_for_completion_interruptible(&us->cmnd_ready)) break; US_DEBUGP("*** thread awakened.\n"); /* lock the device pointers */ mutex_lock(&(us->dev_mutex)); /* lock access to the state */ scsi_lock(host); /* When we are called with no command pending, we're done */ if (us->srb == NULL) { scsi_unlock(host); mutex_unlock(&us->dev_mutex); US_DEBUGP("-- exiting\n"); break; } /* has the command timed out *already* ? */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { us->srb->result = DID_ABORT << 16; goto SkipForAbort; } scsi_unlock(host); /* reject the command if the direction indicator * is UNKNOWN */ if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) { US_DEBUGP("UNKNOWN data direction\n"); us->srb->result = DID_ERROR << 16; } /* reject if target != 0 or if LUN is higher than * the maximum known LUN */ else if (us->srb->device->id && !(us->fflags & US_FL_SCM_MULT_TARG)) { US_DEBUGP("Bad target number (%d:%d)\n", us->srb->device->id, us->srb->device->lun); us->srb->result = DID_BAD_TARGET << 16; } else if (us->srb->device->lun > us->max_lun) { US_DEBUGP("Bad LUN (%d:%d)\n", us->srb->device->id, us->srb->device->lun); us->srb->result = DID_BAD_TARGET << 16; } /* Handle those devices which need us to fake * their inquiry data */ else if ((us->srb->cmnd[0] == INQUIRY) && (us->fflags & US_FL_FIX_INQUIRY)) { unsigned char data_ptr[36] = { 0x00, 0x80, 0x02, 0x02, 0x1F, 0x00, 0x00, 0x00}; US_DEBUGP("Faking INQUIRY command\n"); fill_inquiry_response(us, data_ptr, 36); us->srb->result = SAM_STAT_GOOD; } /* we've got a command, let's do it! */ else { US_DEBUG(usb_stor_show_command(us->srb)); us->proto_handler(us->srb, us); usb_mark_last_busy(us->pusb_dev); } /* lock access to the state */ scsi_lock(host); /* indicate that the command is done */ if (us->srb->result != DID_ABORT << 16) { US_DEBUGP("scsi cmd done, result=0x%x\n", us->srb->result); us->srb->scsi_done(us->srb); } else { SkipForAbort: US_DEBUGP("scsi command aborted\n"); } /* If an abort request was received we need to signal that * the abort has finished. The proper test for this is * the TIMED_OUT flag, not srb->result == DID_ABORT, because * the timeout might have occurred after the command had * already completed with a different result code. */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { complete(&(us->notify)); /* Allow USB transfers to resume */ clear_bit(US_FLIDX_ABORTING, &us->dflags); clear_bit(US_FLIDX_TIMED_OUT, &us->dflags); } /* finished working on this command */ us->srb = NULL; scsi_unlock(host); /* unlock the device pointers */ mutex_unlock(&us->dev_mutex); } /* for (;;) */ /* Wait until we are told to stop */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; schedule(); } __set_current_state(TASK_RUNNING); return 0; }
static ssize_t wdm_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { int rv, cntr = 0; int i = 0; struct wdm_device *desc = file->private_data; rv = mutex_lock_interruptible(&desc->rlock); if (rv < 0) return -ERESTARTSYS; if (desc->length == 0) { desc->read = 0; retry: if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { rv = cntr ? cntr : -EAGAIN; goto err; } rv = 0; } else { rv = wait_event_interruptible(desc->wait, test_bit(WDM_READ, &desc->flags)); } if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } usb_mark_last_busy(interface_to_usbdev(desc->intf)); if (rv < 0) { rv = -ERESTARTSYS; goto err; } spin_lock_irq(&desc->iuspin); if (desc->rerr) { int t = desc->rerr; desc->rerr = 0; spin_unlock_irq(&desc->iuspin); dev_err(&desc->intf->dev, "reading had resulted in %d\n", t); rv = -EIO; goto err; } if (!test_bit(WDM_READ, &desc->flags)) { spin_unlock_irq(&desc->iuspin); goto retry; } if (!desc->reslength) { spin_unlock_irq(&desc->iuspin); goto retry; } clear_bit(WDM_READ, &desc->flags); spin_unlock_irq(&desc->iuspin); } cntr = count > desc->length ? desc->length : count; rv = copy_to_user(buffer, desc->ubuf, cntr); if (rv > 0) { rv = -EFAULT; goto err; } for (i = 0; i < desc->length - cntr; i++) desc->ubuf[i] = desc->ubuf[i + cntr]; desc->length -= cntr; if (!desc->length) clear_bit(WDM_READ, &desc->flags); rv = cntr; err: mutex_unlock(&desc->rlock); if (rv < 0 && rv != -EAGAIN) dev_err(&desc->intf->dev, "wdm_read: exit error\n"); return rv; }
static void usb_rx_retry_work(struct work_struct *work) { int ret = 0; struct usb_link_device *usb_ld = container_of(work, struct usb_link_device, rx_retry_work.work); struct urb *urb = usb_ld->retry_urb; struct if_usb_devdata *pipe_data = urb->context; struct io_device *iod; int iod_format; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return; if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); switch (pipe_data->format) { case IF_USB_FMT_EP: if (usb_ld->if_usb_is_main) { pr_urb("IPC-RX, retry", urb); iod_format = IPC_FMT; } else { iod_format = IPC_BOOT; } break; case IF_USB_RAW_EP: iod_format = IPC_MULTI_RAW; break; case IF_USB_RFS_EP: iod_format = IPC_RFS; pr_urb("RFS-RX, retry", urb); break; case IF_USB_CMD_EP: iod_format = IPC_CMD; break; default: iod_format = -1; break; } iod = link_get_iod_with_format(&usb_ld->ld, iod_format); if (iod) { ret = iod->recv(iod, &usb_ld->ld, (char *)urb->transfer_buffer, urb->actual_length); if (ret == -ENOMEM) { /* TODO: check the retry count */ /* retry the delay work after 20ms and resubit*/ mif_err("ENOMEM, +retry 20ms\n"); if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); usb_ld->retry_urb = urb; if (usb_ld->rx_retry_cnt++ < 10) queue_delayed_work(usb_ld->ld.tx_wq, &usb_ld->rx_retry_work, 10); return; } if (ret < 0) mif_err("io device recv error (%d)\n", ret); usb_ld->rx_retry_cnt = 0; } if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); usb_rx_submit(usb_ld, pipe_data, GFP_ATOMIC); }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; #ifdef CONFIG_PM struct urb *res; #endif spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #else if (!acm->delayed_wb) { acm->delayed_wb = wb; } else { if (acm->delayed_wb->len + wb->len <= acm->writesize ) { memcpy(acm->delayed_wb->buf + acm->delayed_wb->len, wb->buf, wb->len); acm->delayed_wb->len += wb->len; } wb->use = 0; usb_autopm_put_interface_async(acm->control); } #endif spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rc = usb_submit_urb(res, GFP_ATOMIC); if (rc < 0) { dbg("usb_submit_urb(pending request) failed: %d", rc); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } #endif rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static void usb_rx_complete(struct urb *urb) { struct if_usb_devdata *pipe_data = urb->context; struct usb_link_device *usb_ld = pipe_data->usb_ld; struct io_device *iod; int iod_format; int ret; if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); switch (urb->status) { case -ENOENT: /* case for 'link pm suspended but rx data had remained' */ mif_debug("urb->status = -ENOENT\n"); case 0: if (!urb->actual_length) { mif_debug("urb has zero length!\n"); goto rx_submit; } usb_ld->link_pm_data->rx_cnt++; /* call iod recv */ /* how we can distinguish boot ch with fmt ch ?? */ switch (pipe_data->format) { case IF_USB_FMT_EP: if (usb_ld->if_usb_is_main) { //pr_urb("IPC-RX", urb); iod_format = IPC_FMT; } else { iod_format = IPC_BOOT; } break; case IF_USB_RAW_EP: iod_format = IPC_MULTI_RAW; break; case IF_USB_RFS_EP: iod_format = IPC_RFS; break; case IF_USB_CMD_EP: iod_format = IPC_CMD; break; default: iod_format = -1; break; } /* flow control CMD by CP, not use io device */ if (unlikely(iod_format == IPC_CMD)) { ret = link_rx_flowctl_cmd(&usb_ld->ld, (char *)urb->transfer_buffer, urb->actual_length); if (ret < 0) mif_err("no multi raw device (%d)\n", ret); goto rx_submit; } iod = link_get_iod_with_format(&usb_ld->ld, iod_format); if (iod) { ret = iod->recv(iod, &usb_ld->ld, (char *)urb->transfer_buffer, urb->actual_length); if (ret == -ENOMEM) { /* retry the delay work and resubit*/ mif_err("ENOMEM, retry\n"); if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); usb_ld->retry_urb = urb; queue_delayed_work(usb_ld->ld.tx_wq, &usb_ld->rx_retry_work, 0); return; } if (ret < 0) mif_err("io device recv error (%d)\n", ret); } rx_submit: if (urb->status == 0) { if (usb_ld->usbdev) usb_mark_last_busy(usb_ld->usbdev); usb_rx_submit(usb_ld, pipe_data, GFP_ATOMIC); } break; default: mif_err("urb err status = %d\n", urb->status); break; } }
/* control interface reports status changes with "interrupt" transfers */ static void acm_ctrl_irq(struct urb *urb) { struct acm *acm = urb->context; struct usb_cdc_notification *dr = urb->transfer_buffer; struct tty_struct *tty; unsigned char *data; int newctrl; int retval; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&acm->control->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&acm->control->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } if (!ACM_READY(acm)) goto exit; usb_mark_last_busy(acm->dev); data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: dev_dbg(&acm->control->dev, "%s - network connection: %d\n", __func__, dr->wValue); break; case USB_CDC_NOTIFY_SERIAL_STATE: tty = tty_port_tty_get(&acm->port); newctrl = get_unaligned_le16(data); if (tty) { if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { dev_dbg(&acm->control->dev, "%s - calling hangup\n", __func__); tty_hangup(tty); } tty_kref_put(tty); } acm->ctrlin = newctrl; dev_dbg(&acm->control->dev, "%s - input control lines: dcd%c dsr%c break%c " "ring%c framing%c parity%c overrun%c\n", __func__, acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-', acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-', acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-', acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-'); break; default: dev_dbg(&acm->control->dev, "%s - unknown notification %d received: index %d " "len %d data0 %d data1 %d\n", __func__, dr->bNotificationType, dr->wIndex, dr->wLength, data[0], data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); }
static int start_ipc(struct link_device *ld, struct io_device *iod) { struct sk_buff *skb; char data[1] = {'a'}; int err; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct link_pm_data *pm_data = usb_ld->link_pm_data; struct device *dev = &usb_ld->usbdev->dev; struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; if (!usb_ld->if_usb_connected) { mif_err("HSIC not connected, skip start ipc\n"); err = -ENODEV; goto exit; } retry: if (ld->mc->phone_state != STATE_ONLINE) { mif_err("MODEM is not online, skip start ipc\n"); err = -ENODEV; goto exit; } /* check usb runtime pm first */ if (dev->power.runtime_status != RPM_ACTIVE) { if (!pm_data->resume_requested) { mif_debug("QW PM\n"); INIT_COMPLETION(pm_data->active_done); queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0); } mif_debug("Wait pm\n"); err = wait_for_completion_timeout(&pm_data->active_done, msecs_to_jiffies(500)); /* timeout or -ERESTARTSYS */ if (err <= 0) goto retry; } pm_runtime_get_sync(dev); mif_err("send 'a'\n"); skb = alloc_skb(16, GFP_ATOMIC); if (unlikely(!skb)) { pm_runtime_put(dev); return -ENOMEM; } memcpy(skb_put(skb, 1), data, 1); skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (err < 0) { mif_err("usb_tx_urb fail\n"); dev_kfree_skb_any(skb); } pm_runtime_put(dev); exit: return err; }
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size = dev->rx_urb_size; if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usb_free_urb (urb); return -ENOMEM; } if (dev->net->type != ARPHRD_RAWIP) skb_reserve(skb, NET_IP_ALIGN); entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, skb->data, size, rx_complete, skb); spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (dev->net) && netif_device_present (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); break; case -ENOMEM: usbnet_defer_kevent (dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach (dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); queue_work(usbnet_wq, &dev->bh_w); break; case 0: usb_mark_last_busy(dev->udev); __skb_queue_tail (&dev->rxq, skb); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); } return retval; }
static int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg, size_t tx_msg_size) { int result = 0; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; int usb_pipe, sent_size, do_autopm; struct usb_endpoint_descriptor *epd; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); do_autopm = atomic_read(&i2400mu->do_autopm); result = do_autopm ? usb_autopm_get_interface(i2400mu->usb_iface) : 0; if (result < 0) { dev_err(dev, "TX: can't get autopm: %d\n", result); do_autopm = 0; } epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out); usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); retry: result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe, tx_msg, tx_msg_size, &sent_size, 200); usb_mark_last_busy(i2400mu->usb_dev); switch (result) { case 0: if (sent_size != tx_msg_size) { /* */ dev_err(dev, "TX: short write (%d B vs %zu " "expected)\n", sent_size, tx_msg_size); result = -EIO; } break; case -EPIPE: /* */ if (edc_inc(&i2400mu->urb_edc, 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { dev_err(dev, "BM-CMD: too many stalls in " "URB; resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); /* */ } else { usb_clear_halt(i2400mu->usb_dev, usb_pipe); msleep(10); /* */ goto retry; } case -EINVAL: /* */ case -ENODEV: /* */ case -ENOENT: /* */ case -ESHUTDOWN: /* */ case -ECONNRESET: result = -ESHUTDOWN; break; default: /* */ if (edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { dev_err(dev, "TX: maximum errors in URB " "exceeded; resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); } else { dev_err(dev, "TX: cannot send URB; retrying. " "tx_msg @%zu %zu B [%d sent]: %d\n", (void *) tx_msg - i2400m->tx_buf, tx_msg_size, sent_size, result); goto retry; } } if (do_autopm) usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu); return result; }
static int btusb_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hdev->driver_data; struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = data->cmdreq_type; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: if (!data->bulk_tx_ep || (hdev->conn_hash.acl_num < 1 && hdev->conn_hash.le_num < 1)) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_isoc_tx_complete, skb, data->isoc_tx_ep->bInterval); urb->transfer_flags = URB_ISO_ASAP; __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); hdev->stat.sco_tx++; goto skip_waking; default: return -EILSEQ; } err = inc_tx(data); if (err) { usb_anchor_urb(urb, &data->deferred); schedule_work(&data->waker); err = 0; goto done; } skip_waking: usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } usb_free_urb(urb); done: return err; }
static void notification_available_cb(struct urb *urb) { int status; struct usb_cdc_notification *ctrl; struct usb_device *udev; struct rmnet_ctrl_dev *dev = urb->context; unsigned int iface_num; udev = interface_to_usbdev(dev->intf); iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber; switch (urb->status) { case 0: /*if non zero lenght of data received while unlink*/ case -ENOENT: DBG_NOTI("[NACB:%d]<", iface_num); /*success*/ break; /*do not resubmit*/ case -ESHUTDOWN: case -ECONNRESET: case -EPROTO: return; case -EPIPE: pr_err_ratelimited("%s: Stall on int endpoint\n", __func__); /* TBD : halt to be cleared in work */ return; /*resubmit*/ case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } if (!urb->actual_length) { pr_err("Received Zero actual length: %d", urb->actual_length); return; } ctrl = urb->transfer_buffer; switch (ctrl->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev->resp_avail_cnt++; usb_mark_last_busy(udev); if (urb->status == -ENOENT) pr_info("URB status is ENOENT"); queue_work(dev->wq, &dev->get_encap_work); if (!dev->resp_available) { dev->resp_available = true; wake_up(&dev->open_wait_queue); } return; default: dev_err(dev->devicep, "%s:Command not implemented\n", __func__); } resubmit_int_urb: usb_anchor_urb(urb, &dev->rx_submitted); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { usb_unanchor_urb(urb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } return; }
/* control interface reports status changes with "interrupt" transfers */ static void acm_ctrl_irq(struct urb *urb) { struct acm *acm = urb->context; struct usb_cdc_notification *dr = urb->transfer_buffer; unsigned char *data; int newctrl; int retval; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __FUNCTION__, status); return; default: dbg("%s - nonzero urb status received: %d", __FUNCTION__, status); goto exit; } if (!ACM_READY(acm)) goto exit; data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: dbg("%s network", dr->wValue ? "connected to" : "disconnected from"); break; case USB_CDC_NOTIFY_SERIAL_STATE: newctrl = le16_to_cpu(get_unaligned((__le16 *) data)); if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { dbg("calling hangup"); tty_hangup(acm->tty); } acm->ctrlin = newctrl; dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c", acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-', acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-', acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-', acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-'); break; default: dbg("unknown notification %d received: index %d len %d data0 %d data1 %d", dr->bNotificationType, dr->wIndex, dr->wLength, data[0], data[1]); break; } exit: usb_mark_last_busy(acm->dev); retval = usb_submit_urb (urb, GFP_ATOMIC); if (retval) err ("%s - usb_submit_urb failed with result %d", __FUNCTION__, retval); }
static void resp_avail_cb(struct urb *urb) { struct usb_device *udev; struct ctrl_pkt_list_elem *list_elem = NULL; struct rmnet_ctrl_dev *dev = urb->context; void *cpkt; int status = 0; size_t cpkt_size = 0; unsigned int iface_num; udev = interface_to_usbdev(dev->intf); iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber; usb_autopm_put_interface_async(dev->intf); switch (urb->status) { case 0: /*success*/ dev->get_encap_resp_cnt++; break; /*do not resubmit*/ case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: case -EPROTO: return; /*resubmit*/ case -EOVERFLOW: pr_err_ratelimited("%s: Babble error happened\n", __func__); default: pr_debug_ratelimited("%s: Non zero urb status = %d\n", __func__, urb->status); goto resubmit_int_urb; } dev_dbg(dev->devicep, "Read %d bytes for %s\n", urb->actual_length, dev->name); cpkt = urb->transfer_buffer; cpkt_size = urb->actual_length; if (!cpkt_size) { dev->zlp_cnt++; dev_dbg(dev->devicep, "%s: zero length pkt received\n", __func__); goto resubmit_int_urb; } list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC); if (!list_elem) { dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__); return; } list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC); if (!list_elem->cpkt.data) { dev_err(dev->devicep, "%s: list_elem->data alloc failed\n", __func__); kfree(list_elem); return; } memcpy(list_elem->cpkt.data, cpkt, cpkt_size); list_elem->cpkt.data_size = cpkt_size; spin_lock(&dev->rx_lock); list_add_tail(&list_elem->list, &dev->rx_list); spin_unlock(&dev->rx_lock); rd_cb_time = cpu_clock(smp_processor_id()); wake_up(&dev->read_wait_queue); resubmit_int_urb: /*check if it is already submitted in resume*/ if (!dev->inturb->anchor) { usb_mark_last_busy(udev); usb_anchor_urb(dev->inturb, &dev->rx_submitted); status = usb_submit_urb(dev->inturb, GFP_ATOMIC); if (status) { usb_unanchor_urb(dev->inturb); dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n", __func__, status); } DBG_NOTI("[CHKRA:%d]>", iface_num); } }