static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) acm->delayed_wb = wb; else usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dbg("%s susp_count: %d", __func__, acm->susp_count); if (acm->susp_count) { acm->delayed_wb = wb; schedule_work(&acm->waker); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; #ifdef CONFIG_PM struct urb *res; #endif spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #else if (!acm->delayed_wb) acm->delayed_wb = wb; else { usb_autopm_put_interface_async(acm->control); printk(KERN_INFO "%s: acm->delayed_wb is not NULL, " "returning -EAGAIN\n", __func__); spin_unlock_irqrestore(&acm->write_lock, flags); return -EAGAIN; } #endif spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rc = usb_submit_urb(res, GFP_ATOMIC); if (rc < 0) { dbg("usb_submit_urb(pending request) failed: %d", rc); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } #endif rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct acm_wb *wb; int rv = 0; struct urb *res; int cnt; spin_lock_irq(&acm->read_lock); acm->susp_count -= 1; cnt = acm->susp_count; spin_unlock_irq(&acm->read_lock); if (cnt) return 0; mutex_lock(&acm->mutex); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { printk("%s process buffered request \n", __func__); rv = usb_submit_urb(res, GFP_ATOMIC); if (rv < 0) { dbg("usb_submit_urb(pending request) failed: %d", rv); } } #endif if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; tasklet_schedule(&acm->urb_task); } err_out: mutex_unlock(&acm->mutex); return rv; }
static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct acm *acm = tty->driver_data; int stat; unsigned long flags; int wbn; struct acm_wb *wb; if (!count) return 0; dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count); spin_lock_irqsave(&acm->write_lock, flags); wbn = acm_wb_alloc(acm); if (wbn < 0) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; } wb = &acm->wb[wbn]; if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } count = (count > acm->writesize) ? acm->writesize : count; dev_vdbg(&acm->data->dev, "%s - write %d\n", __func__, count); memcpy(wb->buf, buf, count); wb->len = count; usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) acm->delayed_wb = wb; else usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return count; /* A white lie */ } usb_mark_last_busy(acm->dev); stat = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); if (stat < 0) return stat; return count; }
static void acm_waker(struct work_struct *waker) { struct acm *acm = container_of(waker, struct acm, waker); int rv; rv = usb_autopm_get_interface(acm->control); if (rv < 0) { dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__); return; } if (acm->delayed_wb) { acm_start_wb(acm, acm->delayed_wb); acm->delayed_wb = NULL; } usb_autopm_put_interface(acm->control); }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dbg("%s susp_count: %d", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM printk("%s buffer urb\n", __func__); acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #endif if (!acm->delayed_wb) acm->delayed_wb = wb; else { usb_autopm_put_interface_async(acm->control); printk(KERN_INFO "%s: acm->delayed_wb is not NULL, " "returning -EAGAIN\n", __func__); spin_unlock_irqrestore(&acm->write_lock, flags); return -EAGAIN; } spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } if (!acm->write_ready) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } wb = &acm->wb[wbn]; if(acm_wb_is_avail(acm) <= 1) acm->write_ready = 0; dbg("%s susp_count: %d", __FUNCTION__, acm->susp_count); if (acm->susp_count) { acm->old_ready = acm->write_ready; acm->delayed_wb = wb; acm->write_ready = 0; schedule_work(&acm->waker); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); if (!acm_wb_is_used(acm, wbn)) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; } rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct acm_wb *wb; int rv = 0; int cnt; spin_lock_irq(&acm->read_lock); acm->susp_count -= 1; cnt = acm->susp_count; spin_unlock_irq(&acm->read_lock); if (cnt) return 0; mutex_lock(&acm->mutex); if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; tasklet_schedule(&acm->urb_task); } err_out: mutex_unlock(&acm->mutex); return rv; }
static void acm_waker(struct work_struct *waker) { struct acm *acm = container_of(waker, struct acm, waker); long flags; int rv; rv = usb_autopm_get_interface(acm->control); if (rv < 0) { err("Autopm failure in %s", __FUNCTION__); return; } if (acm->delayed_wb) { acm_start_wb(acm, acm->delayed_wb); acm->delayed_wb = NULL; } spin_lock_irqsave(&acm->write_lock, flags); acm->write_ready = acm->old_ready; spin_unlock_irqrestore(&acm->write_lock, flags); usb_autopm_put_interface(acm->control); }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct acm_wb *wb; int rv = 0; int cnt; spin_lock_irq(&acm->read_lock); acm->susp_count -= 1; cnt = acm->susp_count; spin_unlock_irq(&acm->read_lock); if (cnt) return 0; if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; rv = acm_submit_read_urbs(acm, GFP_NOIO); } err_out: return rv; }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); struct urb *urb; int rv = 0; mutex_lock(&acm->mutex); spin_lock_irq(&acm->read_lock); spin_lock(&acm->write_lock); if (--acm->susp_count) goto out; if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC); for (;;) { urb = usb_get_from_anchor(&acm->delayed); if (!urb) break; acm_start_wb(acm, urb->context); } /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto out; rv = acm_submit_read_urbs(acm, GFP_ATOMIC); } out: spin_unlock(&acm->write_lock); spin_unlock_irq(&acm->read_lock); mutex_unlock(&acm->mutex); return rv; }
static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dbg("%s susp_count: %d", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { if (!acm->delayed_wb) { acm->delayed_wb = wb; } else { if (acm->delayed_wb->len + wb->len <= acm->writesize) { memcpy(acm->delayed_wb->buf + acm->delayed_wb->len, wb->buf, wb->len); acm->delayed_wb->len += wb->len; } wb->use = 0; usb_autopm_put_interface_async(acm->control); } spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); int rv = 0; int cnt; #ifdef CONFIG_PM struct urb *res; #else struct acm_wb *wb; #endif if (!acm) { pr_err("%s: !acm\n", __func__); return -ENODEV; } spin_lock_irq(&acm->read_lock); if (acm->susp_count > 0) { acm->susp_count -= 1; cnt = acm->susp_count; } else { spin_unlock_irq(&acm->read_lock); return 0; } spin_unlock_irq(&acm->read_lock); if (cnt) return 0; mutex_lock(&acm->mutex); if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rv = usb_submit_urb(res, GFP_ATOMIC); if (rv < 0) { dbg("usb_submit_urb(pending request)" " failed: %d", rv); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } spin_unlock_irq(&acm->write_lock); #else if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } #endif /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; rv = acm_submit_read_urbs(acm, GFP_NOIO); } err_out: mutex_unlock(&acm->mutex); return rv; }