void dtf_if_in_set_halt_out(void) { _dbgmsg_in( "IN\n" ); _dbgmsg_gadget( "usb_ep_set_halt\n" ); usb_ep_set_halt( _dtf_dev->pg.ep_out ); _dbgmsg_in( "OUT\n" ); }
static int check_read_data( struct zero_dev *dev, struct usb_ep *ep, struct usb_request *req ) { unsigned i; u8 *buf = req->buf; for (i = 0; i < req->actual; i++, buf++) { switch (pattern) { /* all-zeroes has no synchronization issues */ case 0: if (*buf == 0) continue; break; /* mod63 stays in sync with short-terminated transfers, * or otherwise when host and gadget agree on how large * each usb transfer request should be. resync is done * with set_interface or set_config. */ case 1: if (*buf == (u8)(i % 63)) continue; break; } ERROR(dev, "bad OUT byte, buf[%d] = %d\n", i, *buf); usb_ep_set_halt(ep); return -EINVAL; } return 0; }
static int thor_rx_data(void) { struct thor_dev *dev = thor_func->dev; int data_to_rx, tmp, status; data_to_rx = dev->out_req->length; tmp = data_to_rx; do { dev->out_req->length = data_to_rx; debug("dev->out_req->length:%d dev->rxdata:%d\n", dev->out_req->length, dev->rxdata); status = usb_ep_queue(dev->out_ep, dev->out_req, 0); if (status) { error("kill %s: resubmit %d bytes --> %d", dev->out_ep->name, dev->out_req->length, status); usb_ep_set_halt(dev->out_ep); return -EAGAIN; } while (!dev->rxdata) { usb_gadget_handle_interrupts(0); if (ctrlc()) return -1; } dev->rxdata = 0; data_to_rx -= dev->out_req->actual; } while (data_to_rx); return tmp; }
static void thor_tx_data(unsigned char *data, int len) { struct thor_dev *dev = thor_func->dev; unsigned char *ptr = dev->in_req->buf; int status; memset(ptr, 0, len); memcpy(ptr, data, len); dev->in_req->length = len; debug("%s: dev->in_req->length:%d to_cpy:%d\n", __func__, dev->in_req->length, sizeof(data)); status = usb_ep_queue(dev->in_ep, dev->in_req, 0); if (status) { error("kill %s: resubmit %d bytes --> %d", dev->in_ep->name, dev->in_req->length, status); usb_ep_set_halt(dev->in_ep); } /* Wait until tx interrupt received */ while (!dev->txdata) usb_gadget_handle_interrupts(0); dev->txdata = 0; }
void dtf_if_in_set_halt_bulk_in(void) { _dbgmsg_in( "IN\n" ); /* MSEMSEMSE */ _dbgmsg_gadget( "usb_ep_set_halt\n" ); /* MSEMSEMSE */ usb_ep_set_halt( _dtf_dev->pg.ep_in ); _dbgmsg_in( "OUT\n" ); /* MSEMSEMSE */ }
static void acm_complete_set_line_coding(struct usb_ep *ep, struct usb_request *req) { struct f_acm *acm = ep->driver_data; struct usb_composite_dev *cdev = acm->port.func.config->cdev; if (req->status != 0) { DBG(cdev, "acm ttyGS%d completion, err %d\n", acm->port_num, req->status); return; } /* normal completion */ if (req->actual != sizeof(acm->port_line_coding)) { DBG(cdev, "acm ttyGS%d short resp, len %d\n", acm->port_num, req->actual); usb_ep_set_halt(ep); } else { struct usb_cdc_line_coding *value = req->buf; /* REVISIT: we currently just remember this data. * If we change that, (a) validate it first, then * (b) update whatever hardware needs updating, * (c) worry about locking. This is information on * the order of 9600-8-N-1 ... most of which means * nothing unless we control a real RS232 line. */ acm->port_line_coding = *value; } }
static void jig_response_complete(struct usb_ep *ep, struct usb_request *req) { struct psfreedom_device *dev = ep->driver_data; int status = req->status; unsigned long flags; spin_lock_irqsave (&dev->lock, flags); DBG (dev, "Jig response sent (status %d). Sent data so far : %d + %d\n", status, dev->response_len, req->length); switch (status) { case 0: /* normal completion */ if (ep == dev->in_ep) { /* our transmit completed. see if there's more to go. hub_transmit eats req, don't queue it again. */ dev->response_len += req->length; if (dev->response_len < 64) { jig_response_send (dev, req); } else { dev->status = DEVICE5_READY; SET_TIMER (150); } spin_unlock_irqrestore (&dev->lock, flags); return; } break; /* this endpoint is normally active while we're configured */ case -ECONNABORTED: /* hardware forced ep reset */ case -ESHUTDOWN: /* disconnect from host */ VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); case -ECONNRESET: /* request dequeued */ hub_interrupt_queued = 0; spin_unlock_irqrestore (&dev->lock, flags); return; case -EOVERFLOW: /* buffer overrun on read means that * we didn't provide a big enough * buffer. */ default: DBG(dev, "%s complete --> %d, %d/%d\n", ep->name, status, req->actual, req->length); break; case -EREMOTEIO: /* short read */ break; } status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { ERROR(dev, "kill %s: resubmit %d bytes --> %d\n", ep->name, req->length, status); usb_ep_set_halt(ep); /* FIXME recover later ... somehow */ } spin_unlock_irqrestore (&dev->lock, flags); }
/* Received challenge data */ static void jig_interrupt_complete(struct usb_ep *ep, struct usb_request *req) { struct psfreedom_device *dev = ep->driver_data; int status = req->status; unsigned long flags; spin_lock_irqsave (&dev->lock, flags); DBG (dev, "******Out interrupt complete (status %d) : length %d, actual %d\n", status, req->length, req->actual); switch (status) { case 0: /* normal completion */ if (ep == dev->out_ep) { /* our transmit completed */ /* TODO handle data */ dev->challenge_len += req->actual; DBG (dev, "******Challenge length : %d\n", dev->challenge_len); if (dev->challenge_len >= 64) { dev->status = DEVICE5_CHALLENGED; SET_TIMER (450); } } break; /* this endpoint is normally active while we're configured */ case -ECONNABORTED: /* hardware forced ep reset */ case -ECONNRESET: /* request dequeued */ case -ESHUTDOWN: /* disconnect from host */ VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); spin_unlock_irqrestore (&dev->lock, flags); return; case -EOVERFLOW: /* buffer overrun on read means that * we didn't provide a big enough * buffer. */ default: DBG(dev, "%s complete --> %d, %d/%d\n", ep->name, status, req->actual, req->length); break; case -EREMOTEIO: /* short read */ break; } status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { ERROR(dev, "kill %s: resubmit %d bytes --> %d\n", ep->name, req->length, status); usb_ep_set_halt(ep); /* FIXME recover later ... somehow */ } spin_unlock_irqrestore (&dev->lock, flags); }
static int uvc_video_pump(struct uvc_video *video) { struct usb_request *req; struct uvc_buffer *buf; unsigned long flags; int ret; /* */ while (1) { /* */ spin_lock_irqsave(&video->req_lock, flags); if (list_empty(&video->req_free)) { spin_unlock_irqrestore(&video->req_lock, flags); return 0; } req = list_first_entry(&video->req_free, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&video->req_lock, flags); /* */ spin_lock_irqsave(&video->queue.irqlock, flags); buf = uvc_queue_head(&video->queue); if (buf == NULL) { spin_unlock_irqrestore(&video->queue.irqlock, flags); break; } video->encode(req, video, buf); /* */ if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) { printk(KERN_INFO "Failed to queue request (%d)\n", ret); usb_ep_set_halt(video->ep); spin_unlock_irqrestore(&video->queue.irqlock, flags); break; } spin_unlock_irqrestore(&video->queue.irqlock, flags); } spin_lock_irqsave(&video->req_lock, flags); list_add_tail(&req->list, &video->req_free); spin_unlock_irqrestore(&video->req_lock, flags); return 0; }
/* * uvc_video_pump - Pump video data into the USB requests * * This function fills the available USB requests (listed in req_free) with * video data from the queued buffers. */ static int uvc_video_pump(struct uvc_video *video) { struct usb_request *req; struct uvc_buffer *buf; unsigned long flags; int ret; /* FIXME TODO Race between uvc_video_pump and requests completion * handler ??? */ while (1) { /* Retrieve the first available USB request, protected by the * request lock. */ spin_lock_irqsave(&video->req_lock, flags); if (list_empty(&video->req_free)) { spin_unlock_irqrestore(&video->req_lock, flags); return 0; } req = list_first_entry(&video->req_free, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&video->req_lock, flags); /* Retrieve the first available video buffer and fill the * request, protected by the video queue irqlock. */ spin_lock_irqsave(&video->queue.irqlock, flags); buf = uvc_queue_head(&video->queue); if (buf == NULL) { spin_unlock_irqrestore(&video->queue.irqlock, flags); break; } video->encode(req, video, buf); /* Queue the USB request */ if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) { printk(KERN_INFO "Failed to queue request (%d)\n", ret); usb_ep_set_halt(video->ep); spin_unlock_irqrestore(&video->queue.irqlock, flags); break; } spin_unlock_irqrestore(&video->queue.irqlock, flags); } spin_lock_irqsave(&video->req_lock, flags); list_add_tail(&req->list, &video->req_free); spin_unlock_irqrestore(&video->req_lock, flags); return 0; }
/* if there is only one request in the queue, there'll always be an * irq delay between end of one request and start of the next. * that prevents using hardware dma queues. */ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) { struct zero_dev *dev = ep->driver_data; int status = req->status; switch (status) { case 0: /* normal completion? */ if (ep == dev->out_ep) { check_read_data(dev, ep, req); memset(req->buf, 0x55, req->length); } else reinit_write_data(ep, req); break; /* this endpoint is normally active while we're configured */ case -ECONNABORTED: /* hardware forced ep reset */ case -ECONNRESET: /* request dequeued */ case -ESHUTDOWN: /* disconnect from host */ VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); if (ep == dev->out_ep) check_read_data(dev, ep, req); free_ep_req(ep, req); return; case -EOVERFLOW: /* buffer overrun on read means that * we didn't provide a big enough * buffer. */ default: #if 1 DBG(dev, "%s complete --> %d, %d/%d\n", ep->name, status, req->actual, req->length); #endif case -EREMOTEIO: /* short read */ break; } status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { ERROR(dev, "kill %s: resubmit %d bytes --> %d\n", ep->name, req->length, status); usb_ep_set_halt(ep); /* FIXME recover later ... somehow */ } }
static void uvc_video_complete(struct usb_ep *ep, struct usb_request *req) { struct uvc_video *video = req->context; struct uvc_buffer *buf; unsigned long flags; int ret; switch (req->status) { case 0: break; case -ESHUTDOWN: printk(KERN_INFO "VS request cancelled.\n"); goto requeue; default: printk(KERN_INFO "VS request completed with status %d.\n", req->status); goto requeue; } spin_lock_irqsave(&video->queue.irqlock, flags); buf = uvc_queue_head(&video->queue); if (buf == NULL) { spin_unlock_irqrestore(&video->queue.irqlock, flags); goto requeue; } video->encode(req, video, buf); if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { printk(KERN_INFO "Failed to queue request (%d).\n", ret); usb_ep_set_halt(ep); spin_unlock_irqrestore(&video->queue.irqlock, flags); goto requeue; } spin_unlock_irqrestore(&video->queue.irqlock, flags); return; requeue: spin_lock_irqsave(&video->req_lock, flags); list_add_tail(&req->list, &video->req_free); spin_unlock_irqrestore(&video->req_lock, flags); }
static int uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) { struct usb_composite_dev *cdev = uvc->func.config->cdev; struct usb_request *req = uvc->control_req; if (data->length < 0) return usb_ep_set_halt(cdev->gadget->ep0); req->length = min_t(unsigned int, uvc->event_length, data->length); req->zero = data->length < uvc->event_length; req->dma = DMA_ADDR_INVALID; memcpy(req->buf, data->data, data->length); return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL); }
/* handle a synchronous IN bulk/intr/iso transfer */ static ssize_t ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) { struct ep_data *data = fd->private_data; void *kbuf; ssize_t value; if ((value = get_ready_ep (fd->f_flags, data)) < 0) return value; /* halt any endpoint by doing a "wrong direction" i/o call */ if (!usb_endpoint_dir_in(&data->desc)) { if (usb_endpoint_xfer_isoc(&data->desc)) { mutex_unlock(&data->lock); return -EINVAL; } DBG (data->dev, "%s halt\n", data->name); spin_lock_irq (&data->dev->lock); if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); mutex_unlock(&data->lock); return -EBADMSG; } /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ value = -ENOMEM; kbuf = kmalloc (len, GFP_KERNEL); if (!kbuf) goto free1; if (copy_from_user (kbuf, buf, len)) { value = -EFAULT; goto free1; } value = ep_io (data, kbuf, len); VDEBUG (data->dev, "%s write %zu IN, status %d\n", data->name, len, (int) value); free1: mutex_unlock(&data->lock); kfree (kbuf); return value; }
/* handle a synchronous OUT bulk/intr/iso transfer */ static ssize_t ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) { struct ep_data *data = fd->private_data; void *kbuf; ssize_t value; if ((value = get_ready_ep (fd->f_flags, data)) < 0) return value; /* halt any endpoint by doing a "wrong direction" i/o call */ if (usb_endpoint_dir_in(&data->desc)) { if (usb_endpoint_xfer_isoc(&data->desc)) { mutex_unlock(&data->lock); return -EINVAL; } DBG (data->dev, "%s halt\n", data->name); spin_lock_irq (&data->dev->lock); if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); mutex_unlock(&data->lock); return -EBADMSG; } /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ value = -ENOMEM; kbuf = kmalloc (len, GFP_KERNEL); if (unlikely (!kbuf)) goto free1; value = ep_io (data, kbuf, len); VDEBUG (data->dev, "%s read %zu OUT, status %d\n", data->name, len, (int) value); if (value >= 0 && copy_to_user (buf, kbuf, value)) value = -EFAULT; free1: mutex_unlock(&data->lock); kfree (kbuf); return value; }
static void gser_complete_set_line_coding(struct usb_ep *ep, struct usb_request *req) { struct f_gser *gser = ep->driver_data; struct usb_composite_dev *cdev = gser->port.func.config->cdev; if (req->status != 0) { DBG(cdev, "gser ttyGS%d completion, err %d\n", gser->port_num, req->status); return; } /* normal completion */ if (req->actual != sizeof(gser->port_line_coding)) { DBG(cdev, "gser ttyGS%d short resp, len %d\n", gser->port_num, req->actual); usb_ep_set_halt(ep); } else { struct usb_cdc_line_coding *value = req->buf; gser->port_line_coding = *value; } }