static void timeout_kill(unsigned long data) { struct urb *urb = (struct urb *) data; usb_unlink_urb(urb); }
/* This is the common part of the URB message submission code * * All URBs from the usb-storage driver involved in handling a queued scsi * command _must_ pass through this function (or something like it) for the * abort mechanisms to work properly. */ static int usb_stor_msg_common(struct us_data *us, int timeout) { struct completion urb_done; long timeleft; int status; /* don't submit URBs during abort processing */ if (test_bit(US_FLIDX_ABORTING, &us->dflags)) return -EIO; /* set up data structures for the wakeup system */ init_completion(&urb_done); /* fill the common fields in the URB */ us->current_urb->context = &urb_done; us->current_urb->transfer_flags = 0; /* we assume that if transfer_buffer isn't us->iobuf then it * hasn't been mapped for DMA. Yes, this is clunky, but it's * easier than always having the caller tell us whether the * transfer buffer has already been mapped. */ if (us->current_urb->transfer_buffer == us->iobuf) us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; us->current_urb->transfer_dma = us->iobuf_dma; /* submit the URB */ status = usb_submit_urb(us->current_urb, GFP_NOIO); if (status) { /* something went wrong */ return status; } /* since the URB has been submitted successfully, it's now okay * to cancel it */ set_bit(US_FLIDX_URB_ACTIVE, &us->dflags); /* did an abort occur during the submission? */ if (test_bit(US_FLIDX_ABORTING, &us->dflags)) { /* cancel the URB, if it hasn't been cancelled already */ if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) { US_DEBUGP("-- cancelling URB\n"); usb_unlink_urb(us->current_urb); } } /* wait for the completion of the URB */ timeleft = wait_for_completion_interruptible_timeout( &urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT); clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags); if (timeleft <= 0) { US_DEBUGP("%s -- cancelling URB\n", timeleft == 0 ? "Timeout" : "Signal"); usb_kill_urb(us->current_urb); } /* return the URB status */ return us->current_urb->status; }
/* This is the common part of the URB message submission code * * All URBs from the usb-storage driver involved in handling a queued scsi * command _must_ pass through this function (or something like it) for the * abort mechanisms to work properly. */ static int usb_stor_msg_common(struct us_data *us, int timeout) { struct completion urb_done; struct timer_list to_timer; int status; /* don't submit URBs during abort/disconnect processing */ if (us->flags & ABORTING_OR_DISCONNECTING) return -EIO; /* set up data structures for the wakeup system */ init_completion(&urb_done); /* fill the common fields in the URB */ us->current_urb->context = &urb_done; us->current_urb->actual_length = 0; us->current_urb->error_count = 0; us->current_urb->status = 0; /* we assume that if transfer_buffer isn't us->iobuf then it * hasn't been mapped for DMA. Yes, this is clunky, but it's * easier than always having the caller tell us whether the * transfer buffer has already been mapped. */ us->current_urb->transfer_flags = URB_ASYNC_UNLINK | URB_NO_SETUP_DMA_MAP; if (us->current_urb->transfer_buffer == us->iobuf) us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; us->current_urb->transfer_dma = us->iobuf_dma; us->current_urb->setup_dma = us->cr_dma; /* submit the URB */ status = usb_submit_urb(us->current_urb, GFP_NOIO); if (status) { /* something went wrong */ return status; } /* since the URB has been submitted successfully, it's now okay * to cancel it */ set_bit(US_FLIDX_URB_ACTIVE, &us->flags); /* did an abort/disconnect occur during the submission? */ if (us->flags & ABORTING_OR_DISCONNECTING) { /* cancel the URB, if it hasn't been cancelled already */ if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->flags)) { US_DEBUGP("-- cancelling URB\n"); usb_unlink_urb(us->current_urb); } } /* submit the timeout timer, if a timeout was requested */ if (timeout > 0) { init_timer(&to_timer); to_timer.expires = jiffies + timeout; to_timer.function = timeout_handler; to_timer.data = (unsigned long) us; add_timer(&to_timer); } /* wait for the completion of the URB */ wait_for_completion(&urb_done); clear_bit(US_FLIDX_URB_ACTIVE, &us->flags); /* clean up the timeout timer */ if (timeout > 0) del_timer_sync(&to_timer); /* return the URB status */ return us->current_urb->status; }
static void sg_complete (struct urb *urb, struct pt_regs *regs) { struct usb_sg_request *io = (struct usb_sg_request *) urb->context; spin_lock (&io->lock); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || urb->status != -ECONNRESET) && urb->actual_length) { dev_err (io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_pipeendpoint (urb->pipe), usb_pipein (urb->pipe) ? "in" : "out", urb->status, io->status); // BUG (); } if (io->status == 0 && urb->status && urb->status != -ECONNRESET) { int i, found, status; io->status = urb->status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock (&io->lock); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs [i] || !io->urbs [i]->dev) continue; if (found) { status = usb_unlink_urb (io->urbs [i]); if (status != -EINPROGRESS && status != -EBUSY) dev_err (&io->dev->dev, "%s, unlink --> %d\n", __FUNCTION__, status); } else if (urb == io->urbs [i]) found = 1; } spin_lock (&io->lock); } urb->dev = NULL; /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete (&io->complete); spin_unlock (&io->lock); }
static int register_dvb(struct tm6000_core *dev) { int ret = -1; struct tm6000_dvb *dvb = dev->dvb; mutex_init(&dvb->mutex); dvb->streams = 0; /* */ ret = tm6000_dvb_attach_frontend(dev); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't attach the frontend!\n"); goto err; } ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T", THIS_MODULE, &dev->udev->dev, adapter_nr); dvb->adapter.priv = dev; if (dvb->frontend) { switch (dev->tuner_type) { case TUNER_XC2028: { struct xc2028_config cfg = { .i2c_adap = &dev->i2c_adap, .i2c_addr = dev->tuner_addr, }; dvb->frontend->callback = tm6000_tuner_callback; ret = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't register frontend\n"); goto adapter_err; } if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) { printk(KERN_ERR "tm6000: couldn't register " "frontend (xc3028)\n"); ret = -EINVAL; goto frontend_err; } printk(KERN_INFO "tm6000: XC2028/3028 asked to be " "attached to frontend!\n"); break; } case TUNER_XC5000: { struct xc5000_config cfg = { .i2c_address = dev->tuner_addr, }; dvb->frontend->callback = tm6000_xc5000_callback; ret = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't register frontend\n"); goto adapter_err; } if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) { printk(KERN_ERR "tm6000: couldn't register " "frontend (xc5000)\n"); ret = -EINVAL; goto frontend_err; } printk(KERN_INFO "tm6000: XC5000 asked to be " "attached to frontend!\n"); break; } } } else printk(KERN_ERR "tm6000: no frontend found\n"); dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = dev; dvb->demux.filternum = 8; dvb->demux.feednum = 8; dvb->demux.start_feed = tm6000_start_feed; dvb->demux.stop_feed = tm6000_stop_feed; dvb->demux.write_to_decoder = NULL; ret = dvb_dmx_init(&dvb->demux); if (ret < 0) { printk(KERN_ERR "tm6000: dvb_dmx_init failed (errno = %d)\n", ret); goto frontend_err; } dvb->dmxdev.filternum = dev->dvb->demux.filternum; dvb->dmxdev.demux = &dev->dvb->demux.dmx; dvb->dmxdev.capabilities = 0; ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (ret < 0) { printk(KERN_ERR "tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret); goto dvb_dmx_err; } return 0; dvb_dmx_err: dvb_dmx_release(&dvb->demux); frontend_err: if (dvb->frontend) { dvb_frontend_detach(dvb->frontend); dvb_unregister_frontend(dvb->frontend); } adapter_err: dvb_unregister_adapter(&dvb->adapter); err: return ret; } static void unregister_dvb(struct tm6000_core *dev) { struct tm6000_dvb *dvb = dev->dvb; if (dvb->bulk_urb != NULL) { struct urb *bulk_urb = dvb->bulk_urb; kfree(bulk_urb->transfer_buffer); bulk_urb->transfer_buffer = NULL; usb_unlink_urb(bulk_urb); usb_free_urb(bulk_urb); } /* */ if (dvb->frontend) { dvb_frontend_detach(dvb->frontend); dvb_unregister_frontend(dvb->frontend); } dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_adapter(&dvb->adapter); mutex_destroy(&dvb->mutex); /* */ } static int dvb_init(struct tm6000_core *dev) { struct tm6000_dvb *dvb; int rc; if (!dev) return 0; if (!dev->caps.has_dvb) return 0; if (dev->udev->speed == USB_SPEED_FULL) { printk(KERN_INFO "This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)\n"); return 0; } dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL); if (!dvb) { printk(KERN_INFO "Cannot allocate memory\n"); return -ENOMEM; } dev->dvb = dvb; rc = register_dvb(dev); if (rc < 0) { kfree(dvb); dev->dvb = NULL; return 0; } return 0; } static int dvb_fini(struct tm6000_core *dev) { if (!dev) return 0; if (!dev->caps.has_dvb) return 0; if (dev->dvb) { unregister_dvb(dev); kfree(dev->dvb); dev->dvb = NULL; } return 0; } static struct tm6000_ops dvb_ops = { .type = TM6000_DVB, .name = "TM6000 dvb Extension", .init = dvb_init, .fini = dvb_fini, }; static int __init tm6000_dvb_register(void) { return tm6000_register_extension(&dvb_ops); } static void __exit tm6000_dvb_unregister(void) { tm6000_unregister_extension(&dvb_ops); } module_init(tm6000_dvb_register); module_exit(tm6000_dvb_unregister);
static void usblp_unlink_urbs(struct usblp *usblp) { usb_unlink_urb(&usblp->writeurb); if (usblp->bidir) usb_unlink_urb(&usblp->readurb); }
static void cxacru_timeout_kill(unsigned long data) { usb_unlink_urb((struct urb *) data); }
/* * Start or stop the transfer on the B channel. */ static void st5481B_mode(struct st5481_bcs *bcs, int mode) { struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; DBG(4,"B%d,mode=%d", bcs->channel + 1, mode); if (bcs->mode == mode) return; bcs->mode = mode; // Cancel all USB transfers on this B channel usb_unlink_urb(b_out->urb[0]); usb_unlink_urb(b_out->urb[1]); b_out->busy = 0; st5481_in_mode(&bcs->b_in, mode); if (bcs->mode != L1_MODE_NULL) { // Open the B channel if (bcs->mode != L1_MODE_TRANS) { u32 features = HDLC_BITREVERSE; if (bcs->mode == L1_MODE_HDLC_56K) features |= HDLC_56KBIT; isdnhdlc_out_init(&b_out->hdlc_state, features); } st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2, NULL, NULL); // Enable B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel*2), OUT_UP+OUT_DOWN+OUT_UNDERRUN, NULL, NULL); // Enable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel*2), 32, st5481B_start_xfer, bcs); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds |= B1_LED; } else { adapter->leds |= B2_LED; } } } else { // Disble B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel*2), 0, NULL, NULL); // Disable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel*2), 0, NULL, NULL); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds &= ~B1_LED; } else { adapter->leds &= ~B2_LED; } } else { st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL); } if (b_out->tx_skb) { dev_kfree_skb_any(b_out->tx_skb); b_out->tx_skb = NULL; } } }
/* * The Device write callback Function * If a 8Byte Command is received, it will be send to the camera. * After this the driver initiates the request for the answer or * just waits until the camera becomes ready. */ static ssize_t mdc800_device_write (struct file *file, const char *buf, size_t len, loff_t *pos) { int i=0; spin_lock (&mdc800->io_lock); if (mdc800->state != READY) { spin_unlock (&mdc800->io_lock); return -EBUSY; } if (!mdc800->open ) { spin_unlock (&mdc800->io_lock); return -EBUSY; } while (i<len) { if (signal_pending (current)) { spin_unlock (&mdc800->io_lock); return -EINTR; } /* check for command start */ if (buf [i] == (char) 0x55) { mdc800->in_count=0; mdc800->out_count=0; mdc800->out_ptr=0; mdc800->download_left=0; } /* save command byte */ if (mdc800->in_count < 8) { mdc800->in[mdc800->in_count]=buf[i]; mdc800->in_count++; } else { err ("Command is to long !\n"); spin_unlock (&mdc800->io_lock); return -EIO; } /* Command Buffer full ? -> send it to camera */ if (mdc800->in_count == 8) { int answersize; if (mdc800_usb_waitForIRQ (0,TO_GET_READY)) { err ("Camera didn't get ready.\n"); spin_unlock (&mdc800->io_lock); return -EIO; } answersize=mdc800_getAnswerSize (mdc800->in[1]); mdc800->state=WORKING; memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); mdc800->write_urb->dev = mdc800->dev; if (usb_submit_urb (mdc800->write_urb)) { err ("submitting write urb fails (status=%i)", mdc800->write_urb->status); spin_unlock (&mdc800->io_lock); return -EIO; } interruptible_sleep_on_timeout (&mdc800->write_wait, TO_WRITE_GET_READY*HZ/1000); if (mdc800->state == WORKING) { usb_unlink_urb (mdc800->write_urb); spin_unlock (&mdc800->io_lock); return -EIO; } switch ((unsigned char) mdc800->in[1]) { case 0x05: /* Download Image */ case 0x3e: /* Take shot in Fine Mode (WCam Mode) */ if (mdc800->pic_len < 0) { err ("call 0x07 before 0x05,0x3e"); mdc800->state=READY; spin_unlock (&mdc800->io_lock); return -EIO; } mdc800->pic_len=-1; case 0x09: /* Download Thumbnail */ mdc800->download_left=answersize+64; mdc800->state=DOWNLOAD; mdc800_usb_waitForIRQ (0,TO_DOWNLOAD_GET_BUSY); break; default: if (answersize) { if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ)) { err ("requesting answer from irq fails"); spin_unlock (&mdc800->io_lock); return -EIO; } /* Write dummy data, (this is ugly but part of the USB Protokoll */ /* if you use endpoint 1 as bulk and not as irq */ memcpy (mdc800->out, mdc800->camera_response,8); /* This is the interpreted answer */ memcpy (&mdc800->out[8], mdc800->camera_response,8); mdc800->out_ptr=0; mdc800->out_count=16; /* Cache the Imagesize, if command was getImageSize */ if (mdc800->in [1] == (char) 0x07) { mdc800->pic_len=(int) 65536*(unsigned char) mdc800->camera_response[0]+256*(unsigned char) mdc800->camera_response[1]+(unsigned char) mdc800->camera_response[2]; dbg ("cached imagesize = %i",mdc800->pic_len); } } else { if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND)) { err ("Command Timeout."); spin_unlock (&mdc800->io_lock); return -EIO; } } mdc800->state=READY; break; } } i++; } spin_unlock (&mdc800->io_lock); return i; }
/* * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb(). * By unlinking the urb asynchronously, stub_rx can continuously * process coming urbs. Even if the urb is unlinked, its completion * handler will be called and stub_tx will send a return pdu. * * See also comments about unlinking strategy in vhci_hcd.c. */ static int stub_recv_cmd_unlink(struct stub_device *sdev, struct usbip_header *pdu) { struct list_head *listhead = &sdev->priv_init; struct list_head *ptr; unsigned long flags; struct stub_priv *priv; spin_lock_irqsave(&sdev->priv_lock, flags); for (ptr = listhead->next; ptr != listhead; ptr = ptr->next) { priv = list_entry(ptr, struct stub_priv, list); if (priv->seqnum == pdu->u.cmd_unlink.seqnum) { int ret; uinfo("unlink urb %p\n", priv->urb); /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are * cancelling it. The unlinking flag means that we are * now not going to return the normal result pdu of a * submission request, but going to return a result pdu * of the unlink request. */ priv->unlinking = 1; /* * In the case that unlinking flag is on, prev->seqnum * is changed from the seqnum of the cancelling urb to * the seqnum of the unlink request. This will be used * to make the result pdu of the unlink request. */ priv->seqnum = pdu->base.seqnum; spin_unlock_irqrestore(&sdev->priv_lock, flags); /* * usb_unlink_urb() is now out of spinlocking to avoid * spinlock recursion since stub_complete() is * sometimes called in this context but not in the * interrupt context. If stub_complete() is executed * before we call usb_unlink_urb(), usb_unlink_urb() * will return an error value. In this case, stub_tx * will return the result pdu of this unlink request * though submission is completed and actual unlinking * is not executed. OK? */ /* In the above case, urb->status is not -ECONNRESET, * so a driver in a client host will know the failure * of the unlink request ? */ ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) uerr("faild to unlink a urb %p, ret %d\n", priv->urb, ret); return 0; } } dbg_stub_rx("seqnum %d is not pending\n", pdu->u.cmd_unlink.seqnum); /* * The urb of the unlink target is not found in priv_init queue. It was * already completed and its results is/was going to be sent by a * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only * return the completeness of this unlink request to vhci_hcd. */ stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0); spin_unlock_irqrestore(&sdev->priv_lock, flags); return 0; }
void SysUsbUnlinkUrb(pUrb urb) { usb_unlink_urb((struct urb *)urb); }