static int acm_write_start(struct acm *acm, int wbn) { unsigned long flags; struct acm_wb *wb = &acm->wb[wbn]; int rc; #ifdef CONFIG_PM struct urb *res; #endif spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { wb->use = 0; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__, acm->susp_count); usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { #ifdef CONFIG_PM acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; usb_anchor_urb(wb->urb, &acm->deferred); #else if (!acm->delayed_wb) acm->delayed_wb = wb; else { usb_autopm_put_interface_async(acm->control); printk(KERN_INFO "%s: acm->delayed_wb is not NULL, " "returning -EAGAIN\n", __func__); spin_unlock_irqrestore(&acm->write_lock, flags); return -EAGAIN; } #endif spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } usb_mark_last_busy(acm->dev); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rc = usb_submit_urb(res, GFP_ATOMIC); if (rc < 0) { dbg("usb_submit_urb(pending request) failed: %d", rc); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } #endif rc = acm_start_wb(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); return rc; }
/* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb, struct pt_regs *regs) { struct acm *acm = (struct acm *)urb->context; dbg("Entering acm_write_bulk with status %d\n", urb->status); acm_write_done(acm); acm_write_start(acm); if (ACM_READY(acm)) schedule_work(&acm->work); }
/* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb) { struct acm *acm; struct acm_wb *wb = urb->context; dbg("Entering acm_write_bulk with status %d", urb->status); acm = wb->instance; acm_write_done(acm, wb); if (ACM_READY(acm)) schedule_work(&acm->work); }
static int acm_start_wb(struct acm *acm, struct acm_wb *wb) { int rc; acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) { dbg("usb_submit_urb(write bulk) failed: %d", rc); acm_write_done(acm, wb); } return rc; }
/* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb) { struct acm_wb *wb = urb->context; struct acm *acm = wb->instance; if (verbose || urb->status || (urb->actual_length != urb->transfer_buffer_length)) dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n", urb->actual_length, urb->transfer_buffer_length, urb->status); acm_write_done(acm, wb); if (ACM_READY(acm)) schedule_work(&acm->work); else wake_up_interruptible(&acm->drain_wait); }
/* data interface wrote those outgoing bytes */ static void acm_write_bulk(struct urb *urb) { struct acm_wb *wb = urb->context; struct acm *acm = wb->instance; unsigned long flags; if (urb->status || (urb->actual_length != urb->transfer_buffer_length)) dev_vdbg(&acm->data->dev, "%s - len %d/%d, status %d\n", __func__, urb->actual_length, urb->transfer_buffer_length, urb->status); spin_lock_irqsave(&acm->write_lock, flags); acm_write_done(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); schedule_work(&acm->work); }
static int acm_start_wb(struct acm *acm, struct acm_wb *wb) { int rc; acm->transmitting++; wb->urb->transfer_buffer = wb->buf; wb->urb->transfer_dma = wb->dmah; wb->urb->transfer_buffer_length = wb->len; wb->urb->dev = acm->dev; rc = usb_submit_urb(wb->urb, GFP_ATOMIC); if (rc < 0) { dev_err(&acm->data->dev, "%s - usb_submit_urb(write bulk) failed: %d\n", __func__, rc); acm_write_done(acm, wb); } return rc; }
/* * Poke write. */ static int acm_write_start(struct acm *acm) { unsigned long flags; int wbn; struct acm_wb *wb; int rc; spin_lock_irqsave(&acm->write_lock, flags); if (!acm->dev) { spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } if (!acm->write_ready) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } wbn = acm->write_current; if (!acm_wb_is_used(acm, wbn)) { spin_unlock_irqrestore(&acm->write_lock, flags); return 0; } wb = &acm->wb[wbn]; acm->write_ready = 0; spin_unlock_irqrestore(&acm->write_lock, flags); acm->writeurb->transfer_buffer = wb->buf; acm->writeurb->transfer_dma = wb->dmah; acm->writeurb->transfer_buffer_length = wb->len; acm->writeurb->dev = acm->dev; if ((rc = usb_submit_urb(acm->writeurb, GFP_ATOMIC)) < 0) { dbg("usb_submit_urb(write bulk) failed: %d", rc); acm_write_done(acm); } return rc; }
static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); int rv = 0; int cnt; #ifdef CONFIG_PM struct urb *res; #else struct acm_wb *wb; #endif if (!acm) { pr_err("%s: !acm\n", __func__); return -ENODEV; } spin_lock_irq(&acm->read_lock); if (acm->susp_count > 0) { acm->susp_count -= 1; cnt = acm->susp_count; } else { spin_unlock_irq(&acm->read_lock); return 0; } spin_unlock_irq(&acm->read_lock); if (cnt) return 0; mutex_lock(&acm->mutex); if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); spin_lock_irq(&acm->write_lock); #ifdef CONFIG_PM while ((res = usb_get_from_anchor(&acm->deferred))) { /* decrement ref count*/ usb_put_urb(res); rv = usb_submit_urb(res, GFP_ATOMIC); if (rv < 0) { dbg("usb_submit_urb(pending request)" " failed: %d", rv); usb_unanchor_urb(res); acm_write_done(acm, res->context); } } spin_unlock_irq(&acm->write_lock); #else if (acm->delayed_wb) { wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } #endif /* * delayed error checking because we must * do the write path at all cost */ if (rv < 0) goto err_out; rv = acm_submit_read_urbs(acm, GFP_NOIO); } err_out: mutex_unlock(&acm->mutex); return rv; }