static void usb_net_raw_ip_tx_urb_comp(struct urb *urb) { struct baseband_usb *usb; pr_debug("usb_net_raw_ip_tx_urb_comp {\n"); /* check input */ if (!urb) { pr_err("no urb\n"); return; } usb = (struct baseband_usb *)urb->context; switch (urb->status) { case 0: break; case -ENOENT: /* fall through */ case -ESHUTDOWN: /* fall through */ case -EPROTO: pr_info("%s: tx urb %p - link shutdown %d\n", __func__, urb, urb->status); usb_autopm_put_interface_async(usb->usb.interface); goto err_exit; default: pr_info("%s: tx urb %p - status %d\n", __func__, urb, urb->status); break; } if (urb->status) usb->stats.tx_errors++; else { usb->stats.tx_packets++; usb->stats.tx_bytes += urb->transfer_buffer_length; } /* autosuspend after tx completed */ if (!usb->usb.interface) { pr_err("%s: usb interface disconnected" " before tx urb completed!\n", __func__); goto err_exit; } usb_autopm_put_interface_async(usb->usb.interface); err_exit: /* free tx urb transfer buffer */ if (urb->transfer_buffer) { kfree(urb->transfer_buffer); urb->transfer_buffer = (void *) 0; } pr_debug("usb_net_raw_ip_tx_urb_comp }\n"); }
static void acm_port_down(struct acm *acm) { struct urb *urb; struct acm_wb *wb; int i; if (acm->dev) { usb_autopm_get_interface(acm->control); acm_set_control(acm, acm->ctrlout = 0); for (;;) { urb = usb_get_from_anchor(&acm->delayed); if (!urb) break; wb = urb->context; wb->use = 0; usb_autopm_put_interface_async(acm->control); } usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } }
int ctrl_bridge_resume(unsigned int id) { struct ctrl_bridge *dev; struct urb *urb; if (id >= MAX_BRIDGE_DEVICES) return -EINVAL; dev = __dev[id]; if (!dev) return -ENODEV; if (!test_and_clear_bit(SUSPENDED, &dev->flags)) return 0; /* submit pending write requests */ while ((urb = usb_get_from_anchor(&dev->tx_deferred))) { int ret; usb_anchor_urb(urb, &dev->tx_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { usb_unanchor_urb(urb); kfree(urb->setup_packet); kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(dev->intf); } } return ctrl_bridge_start_read(dev); }
static void diag_bridge_write_cb(struct urb *urb) { struct diag_bridge *dev = urb->context; struct diag_bridge_ops *cbs = dev->ops; usb_autopm_put_interface_async(dev->ifc); if (urb->status == -EPROTO) { dev_err(&dev->udev->dev, "%s: proto error\n", __func__); /* save error so that subsequent read/write returns ESHUTDOWN */ dev->err = urb->status; usb_free_urb(urb); return; } if (cbs && cbs->write_complete_cb) cbs->write_complete_cb(cbs->ctxt, urb->transfer_buffer, urb->transfer_buffer_length, urb->status < 0 ? urb->status : urb->actual_length); dev->bytes_to_mdm += urb->actual_length; dev->pending_writes--; usb_free_urb(urb); }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; #ifdef CONFIG_PM struct urb *res; #endif spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #else if (!acm->delayed_wb) acm->delayed_wb = wb; else { usb_autopm_put_interface_async(acm->control); printk(KERN_INFO "%s: acm->delayed_wb is not NULL, " "returning -EAGAIN\n", __func__); spin_unlock_irqrestore(&acm->write_lock, flags); return -EAGAIN; } #endif spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rc = usb_submit_urb(res, GFP_ATOMIC); if (rc < 0) { dbg("usb_submit_urb(pending request) failed: %d", rc); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } #endif rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static void modem_usb_wkup_work(struct work_struct *work) { struct modem_port *modem_port_ptr = container_of(work, struct modem_port, usb_wkup_work); struct usb_serial *serial; int result; serial = modem_port_ptr->serial; down(&serial->interface->dev.sem); if (serial->interface->dev.power.status >= DPM_OFF || serial->interface->dev.power.status == DPM_RESUMING) { up(&serial->interface->dev.sem); return; } if (!(atomic_cmpxchg(&modem_port_ptr->wakeup_flag, 0, 1))) { result = usb_autopm_get_interface(serial->interface); if (result < 0) { atomic_set(&modem_port_ptr->wakeup_flag, 0); dev_err(&serial->interface->dev, "%s: autopm failed. result = %d \n", __func__, result); up(&serial->interface->dev.sem); return; } if (cdma_modem_debug) dev_info(&serial->interface->dev, "%s: woke up interface\n", __func__); usb_autopm_put_interface_async(serial->interface); } up(&serial->interface->dev.sem); }
static void sierra_outdat_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct sierra_intf_private *intfdata; int status = urb->status; intfdata = usb_get_serial_data(port->serial); /* free up the transfer buffer, as usb_free_urb() does not do this */ kfree(urb->transfer_buffer); usb_autopm_put_interface_async(port->serial->interface); if (status) dev_dbg(&port->dev, "%s - nonzero write bulk status " "received: %d\n", __func__, status); spin_lock(&portdata->lock); --portdata->outstanding_urbs; spin_unlock(&portdata->lock); spin_lock(&intfdata->susp_lock); --intfdata->in_flight; spin_unlock(&intfdata->susp_lock); usb_serial_port_softint(port); }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; bool put = true; ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0) { if (ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("autopm_get failed:%d", ret); return; } put = false; } for (i = 0; i < NO_RX_REQS; i++) { if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) break; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); break; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); break; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); break; } usb_free_urb(urb); } if (put) usb_autopm_put_interface_async(ksb->ifc); }
static void ctrl_write_callback(struct urb *urb) { #ifdef HTC_DEBUG_QMI_STUCK struct ctrl_write_context *context = urb->context; struct rmnet_ctrl_dev *dev = context->dev; #else struct rmnet_ctrl_dev *dev = urb->context; #endif #ifdef HTC_DEBUG_QMI_STUCK del_timer(&context->timer); if (unlikely(time_is_before_jiffies(context->start_jiffies + HZ))) pr_err("[%s] urb %p takes %d msec to complete.\n", __func__, urb, jiffies_to_msecs(jiffies - context->start_jiffies)); #endif if (urb->status) { dev->tx_ctrl_err_cnt++; pr_debug_ratelimited("Write status/size %d/%d\n", urb->status, urb->actual_length); } #ifdef HTC_LOG_RMNET_USB_CTRL log_rmnet_usb_ctrl_event(dev->intf, "Tx cb", urb->actual_length); #endif kfree(urb->setup_packet); kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(dev->intf); #ifdef HTC_DEBUG_QMI_STUCK kfree(context); #endif }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) acm->delayed_wb = wb; else usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int hid_submit_out(struct hid_device *hid) { struct hid_report *report; char *raw_report; struct usbhid_device *usbhid = hid->driver_data; int r; report = usbhid->out[usbhid->outtail].report; raw_report = usbhid->out[usbhid->outtail].raw_report; r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return -1; /* * if the device hasn't been woken, we leave the output * to resume() */ if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) { usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0); usbhid->urbout->dev = hid_to_usb_dev(hid); memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length); kfree(raw_report); dbg_hid("submitting out urb\n"); if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) { hid_err(hid, "usb_submit_urb(out) failed\n"); usb_autopm_put_interface_async(usbhid->intf); return -1; } usbhid->last_out = jiffies; }
static void ksb_tx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C TX_URB", urb->status, 0); #if 0 dev_dbg(&ksb->udev->dev, "status:%d", urb->status); #endif if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) usb_autopm_put_interface_async(ksb->ifc); if (urb->status < 0) pr_err_ratelimited("%s: urb failed with err:%d", ksb->fs_dev.name, urb->status); if ((ksb->ifc->cur_altsetting->desc.bInterfaceNumber == 2)) dev_info(ksb->fs_dev.this_device, "write: %d bytes", urb->actual_length); ksb_free_data_pkt(pkt); atomic_dec(&ksb->tx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static void acm_port_shutdown(struct tty_port *port) { struct acm *acm = container_of(port, struct acm, port); struct urb *urb; struct acm_wb *wb; int i; int pm_err; dev_dbg(&acm->control->dev, "%s\n", __func__); mutex_lock(&acm->mutex); if (!acm->disconnected) { pm_err = usb_autopm_get_interface(acm->control); acm_set_control(acm, acm->ctrlout = 0); for (;;) { urb = usb_get_from_anchor(&acm->delayed); if (!urb) break; wb = urb->context; wb->use = 0; usb_autopm_put_interface_async(acm->control); } usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); acm->control->needs_remote_wakeup = 0; if (!pm_err) usb_autopm_put_interface(acm->control); } mutex_unlock(&acm->mutex); }
int usbnet_resume (struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct sk_buff *skb; struct urb *res; int retval; if (!--dev->suspend_count) { spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { printk(KERN_INFO"%s has delayed data\n", __func__); skb = (struct sk_buff *)res->context; retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { dev_kfree_skb_any(skb); usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else { dev->net->trans_start = jiffies; __skb_queue_tail(&dev->txq, skb); } } smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_start_queue(dev->net); tasklet_schedule (&dev->bh); } return 0; }
int hif_pm_runtime_allow_suspend(void *ol_sc) { if (usb_sc && usb_sc->interface) usb_autopm_put_interface_async(usb_sc->interface); else pr_err("%s: USB interface isn't ready for autopm\n", __func__); return 0; }
static void data_bridge_write_cb(struct urb *urb) { struct sk_buff *skb = urb->context; struct timestamp_info *info = (struct timestamp_info *)skb->cb; struct data_bridge *dev = info->dev; struct bridge *brdg = dev->brdg; int pending; pr_debug("%s: dev:%p\n", __func__, dev); switch (urb->status) { case 0: /*success*/ dev->to_modem++; dev->tx_num_of_bytes += skb->len; dbg_timestamp("UL", skb); break; case -EPROTO: dev->err = -EPROTO; break; case -EPIPE: set_bit(TX_HALT, &dev->flags); dev_err(&dev->intf->dev, "%s: epout halted\n", __func__); schedule_work(&dev->kevent); /* FALLTHROUGH */ case -ESHUTDOWN: case -ENOENT: /* suspended */ case -ECONNRESET: /* unplug */ case -EOVERFLOW: /*babble error*/ /* FALLTHROUGH */ default: pr_debug_ratelimited("%s: non zero urb status = %d\n", __func__, urb->status); } usb_free_urb(urb); dev_kfree_skb_any(skb); pending = atomic_dec_return(&dev->pending_txurbs); /*flow ctrl*/ if (brdg && fctrl_support && pending <= fctrl_dis_thld && test_and_clear_bit(TX_THROTTLED, &brdg->flags)) { pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n", __func__, pending); dev->tx_unthrottled_cnt++; if (brdg->ops.unthrottle_tx) brdg->ops.unthrottle_tx(brdg->ctx); } /* if we are here after device disconnect * usb_unbind_interface() takes care of * residual pm_autopm_get_interface_* calls */ if (urb->dev->state != USB_STATE_NOTATTACHED) usb_autopm_put_interface_async(dev->intf); }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; for (i = 0; i < NO_RX_REQS; i++) { pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); return; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); return; } ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("autopm_get failed:%d", ret); usb_free_urb(urb); ksb_free_data_pkt(pkt); return; } ksb->alloced_read_pkts++; usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); ksb->alloced_read_pkts--; usb_autopm_put_interface(ksb->ifc); return; } usb_autopm_put_interface_async(ksb->ifc); usb_free_urb(urb); } }
static void tx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone break; // like rx, tx gets controller i/o faults during khubd delays // and so it uses the same throttling mechanism. case -EPROTO: case -ETIME: case -EILSEQ: //-------------------------------------------------------- #ifdef CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG usb_mark_intf_last_busy(dev->intf, true); #endif //CONFIG_HTC_QCT_9K_MDM_HSIC_PM_DBG //-------------------------------------------------------- usb_mark_last_busy(dev->udev); if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "tx throttle %d\n", urb->status); } netif_stop_queue (dev->net); break; default: netif_dbg(dev, tx_err, dev->net, "tx err %d\n", entry->urb->status); break; } } usb_autopm_put_interface_async(dev->intf); urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); }
static void tx_complete (struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: // async unlink case -ESHUTDOWN: // hardware gone break; // like rx, tx gets controller i/o faults during khubd delays // and so it uses the same throttling mechanism. case -EPROTO: case -ETIME: case -EILSEQ: #if defined(CONFIG_ERICSSON_F3307_ENABLE) usb_mark_last_busy(dev->udev); #endif if (!timer_pending (&dev->delay)) { mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); if (netif_msg_link (dev)) devdbg (dev, "tx throttle %d", urb->status); } netif_stop_queue (dev->net); break; default: if (netif_msg_tx_err (dev)) devdbg (dev, "tx err %d", entry->urb->status); break; } } #if defined(CONFIG_ERICSSON_F3307_ENABLE) usb_autopm_put_interface_async(dev->intf); #endif urb->dev = NULL; entry->state = tx_done; defer_bh(dev, skb, &dev->txq); }
static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct acm *acm = tty->driver_data; int stat; unsigned long flags; int wbn; struct acm_wb *wb; if (!count) return 0; dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count); spin_lock_irqsave(&acm->write_lock, flags); wbn = acm_wb_alloc(acm); if (wbn < 0) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; } wb = &acm->wb[wbn]; if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } count = (count > acm->writesize) ? acm->writesize : count; dev_vdbg(&acm->data->dev, "%s - write %d\n", __func__, count); memcpy(wb->buf, buf, count); wb->len = count; usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) acm->delayed_wb = wb; else usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return count; /* A white lie */ } usb_mark_last_busy(acm->dev); stat = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); if (stat < 0) return stat; return count; }
static void data_bridge_write_cb(struct urb *urb) { struct sk_buff *skb = urb->context; struct timestamp_info *info = (struct timestamp_info *)skb->cb; struct data_bridge *dev = info->dev; struct bridge *brdg = dev->brdg; int pending; pr_debug("%s: dev:%p\n", __func__, dev); switch (urb->status) { case 0: dbg_timestamp("UL", skb); break; case -EPROTO: dev->err = -EPROTO; break; case -EPIPE: set_bit(TX_HALT, &dev->flags); dev_err(&dev->intf->dev, "%s: epout halted\n", __func__); schedule_work(&dev->kevent); case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: case -EOVERFLOW: default: pr_debug_ratelimited("%s: non zero urb status = %d\n", __func__, urb->status); } usb_free_urb(urb); dev_kfree_skb_any(skb); pending = atomic_dec_return(&dev->pending_txurbs); if (brdg && fctrl_support && pending <= fctrl_dis_thld && test_and_clear_bit(TX_THROTTLED, &brdg->flags)) { pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n", __func__, pending); dev->tx_unthrottled_cnt++; if (brdg->ops.unthrottle_tx) brdg->ops.unthrottle_tx(brdg->ctx); } if (urb->dev->state != USB_STATE_NOTATTACHED) usb_autopm_put_interface_async(dev->intf); }
static void ctrl_write_callback(struct urb *urb) { struct ctrl_bridge *dev = urb->context; if (urb->status) { pr_debug("Write status/size %d/%d\n", urb->status, urb->actual_length); } kfree(urb->transfer_buffer); kfree(urb->setup_packet); usb_free_urb(urb); usb_autopm_put_interface_async(dev->intf); }
static void hsictty_write_callback(struct urb *urb) { struct usb_serial_port *port; struct hsictty_port_private *portdata; struct hsictty_intf_private *intfdata; int i; port = urb->context; intfdata = usb_get_serial_data(port->serial); portdata = usb_get_serial_port_data(port); if (urb->actual_length <= 0) { hsictty_error ("%s: write failed, write length: %d in channel:%d, endpoint:%d\n", __func__, urb->actual_length, portdata->channel, usb_pipeendpoint(urb->pipe)); } else { hsictty_dbg("%s: write length: %d in channel:%d, endpoint:%d\n", __func__, urb->actual_length, portdata->channel, usb_pipeendpoint(urb->pipe)); } #ifdef BACKUP_DATA_DUMP if (!dumped) backup_log(portdata->channel, 1, urb->transfer_buffer, urb->transfer_buffer_length); #endif usb_serial_port_softint(port); usb_autopm_put_interface_async(port->serial->interface); portdata = usb_get_serial_port_data(port); spin_lock(&intfdata->susp_lock); intfdata->in_flight--; spin_unlock(&intfdata->susp_lock); for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { smp_mb__before_clear_bit(); hsictty_dbg ("%s: urb(%d) freed on channel:%d, endpoint:%d, in_flight:%d, pm use cnt:%d\n", __func__, i, portdata->channel, usb_pipeendpoint(urb->pipe), intfdata->in_flight, atomic_read(&port->serial->interface->dev.power. usage_count)); clear_bit(i, &portdata->out_busy); complete_all(&portdata->tx_notifier); break; } } }
static void ipc_bridge_write_cb(struct urb *urb) { struct ipc_bridge *dev = urb->context; usb_autopm_put_interface_async(dev->intf); if (urb->dev->state == USB_STATE_NOTATTACHED) dev->write_result = -ENODEV; else if (urb->status < 0) dev->write_result = urb->status; else dev->write_result = urb->actual_length; complete(&dev->write_done); }
static void ctrl_write_callback(struct urb *urb) { struct rmnet_ctrl_dev *dev = urb->context; if (urb->status) { dev->tx_ctrl_err_cnt++; pr_debug_ratelimited("Write status/size %d/%d\n", urb->status, urb->actual_length); } kfree(urb->setup_packet); kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(dev->intf); }
static void resp_avail_cb(struct urb *urb) { struct ctrl_bridge *dev = urb->context; int status = 0; int resubmit_urb = 1; struct bridge *brdg = dev->brdg; usb_autopm_put_interface_async(dev->intf); switch (urb->status) { case 0: /*success*/ dev->get_encap_res++; if (brdg && brdg->ops.send_pkt) brdg->ops.send_pkt(brdg->ctx, urb->transfer_buffer, urb->actual_length); break; /*do not resubmit*/ case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: /* unplug */ case -EPROTO: /*babble error*/ resubmit_urb = 0; /*resubmit*/ case -EOVERFLOW: default: dev_dbg(&dev->intf->dev, "%s: non zero urb status = %d\n", __func__, urb->status); } if (resubmit_urb) { /*re- submit int urb to check response available*/ if(!dev->inturb->anchor){ usb_mark_last_busy(dev->udev); usb_anchor_urb(dev->inturb, &dev->tx_submitted); status = usb_submit_urb(dev->inturb, GFP_ATOMIC); if (status) { dev_err(&dev->intf->dev, "%s: Error re-submitting Int URB %d\n", __func__, status); usb_unanchor_urb(dev->inturb); } } } }
static void ksb_tx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C TX_URB", urb->status, 0); pr_debug("status:%d", urb->status); if (ksb->ifc) usb_autopm_put_interface_async(ksb->ifc); if (urb->status < 0) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); }
static void resp_avail_cb(struct urb *urb) { struct ctrl_bridge *dev = urb->context; int resubmit_urb = 1; struct bridge *brdg = dev->brdg; unsigned long flags; /*usb device disconnect*/ if (urb->dev->state == USB_STATE_NOTATTACHED) return; switch (urb->status) { case 0: /*success*/ dev->get_encap_res++; if (brdg && brdg->ops.send_pkt) brdg->ops.send_pkt(brdg->ctx, urb->transfer_buffer, urb->actual_length); break; /*do not resubmit*/ case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: /* unplug */ case -EPROTO: /*babble error*/ resubmit_urb = 0; /*resubmit*/ case -EOVERFLOW: default: dev_dbg(&dev->intf->dev, "%s: non zero urb status = %d\n", __func__, urb->status); } if (resubmit_urb) { /*re- submit int urb to check response available*/ ctrl_bridge_start_read(dev, GFP_ATOMIC); } else { spin_lock_irqsave(&dev->lock, flags); dev->rx_state = RX_IDLE; spin_unlock_irqrestore(&dev->lock, flags); } usb_autopm_put_interface_async(dev->intf); }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dbg("%s susp_count: %d", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM printk("%s buffer urb\n", __func__); acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #endif if (!acm->delayed_wb) acm->delayed_wb = wb; else { usb_autopm_put_interface_async(acm->control); printk(KERN_INFO "%s: acm->delayed_wb is not NULL, " "returning -EAGAIN\n", __func__); spin_unlock_irqrestore(&acm->write_lock, flags); return -EAGAIN; } spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
int usbnet_resume (struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct sk_buff *skb; struct urb *res; int retval; if (!--dev->suspend_count) { /* resume interrupt URBs */ if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) usb_submit_urb(dev->interrupt, GFP_NOIO); spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { //HTC +++ // refer to acm_write_start() and usb_net_raw_ip_tx_urb_work(), need to // decrement urb ref count after usb_get_from_anchor() to prevent memory leak usb_put_urb(res); //HTC --- skb = (struct sk_buff *)res->context; retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { dev_kfree_skb_any(skb); usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else { dev->net->trans_start = jiffies; __skb_queue_tail(&dev->txq, skb); } } smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_start_queue(dev->net); tasklet_schedule (&dev->bh); } } return 0; }