Example #1
0
/*
 * Interpret the results of a URB transfer
 *
 * This function prints appropriate debugging messages, clears halts on
 * non-control endpoints, and translates the status to the corresponding
 * USB_STOR_XFER_xxx return code.
 */
static int interpret_urb_result(struct us_data *us, unsigned int pipe,
		unsigned int length, int result, unsigned int partial)
{
	US_DEBUGP("Status code %d; transferred %u/%u\n",
			result, partial, length);
	switch (result) {

	/* no error code; did we send all the data? */
	case 0:
		if (partial != length) {
			US_DEBUGP("-- short transfer\n");
			return USB_STOR_XFER_SHORT;
		}

		US_DEBUGP("-- transfer complete\n");
		return USB_STOR_XFER_GOOD;

	/* stalled */
	case -EPIPE:
		/* for control endpoints, (used by CB[I]) a stall indicates
		 * a failed command */
		if (usb_pipecontrol(pipe)) {
			US_DEBUGP("-- stall on control pipe\n");
			return USB_STOR_XFER_STALLED;
		}

		/* for other sorts of endpoint, clear the stall */
		US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
		if (usb_stor_clear_halt(us, pipe) < 0)
			return USB_STOR_XFER_ERROR;
		return USB_STOR_XFER_STALLED;

	/* babble - the device tried to send more than we wanted to read */
	case -EOVERFLOW:
		US_DEBUGP("-- babble\n");
		return USB_STOR_XFER_LONG;

	/* the transfer was cancelled by abort, disconnect, or timeout */
	case -ECONNRESET:
		US_DEBUGP("-- transfer cancelled\n");
		return USB_STOR_XFER_ERROR;

	/* short scatter-gather read transfer */
	case -EREMOTEIO:
		US_DEBUGP("-- short read transfer\n");
		return USB_STOR_XFER_SHORT;

	/* abort or disconnect in progress */
	case -EIO:
		US_DEBUGP("-- abort or disconnect in progress\n");
		return USB_STOR_XFER_ERROR;

	/* the catch-all error case */
	default:
		US_DEBUGP("-- unknown error\n");
		return USB_STOR_XFER_ERROR;
	}
}
Example #2
0
File: urb.c Project: cilynx/dd-wrt
/**
 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
 * @urb: pointer to the urb describing the request
 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
 *	of valid options for this.
 *
 * This submits a transfer request, and transfers control of the URB
 * describing that request to the USB subsystem.  Request completion will
 * be indicated later, asynchronously, by calling the completion handler.
 * The three types of completion are success, error, and unlink
 * (a software-induced fault, also called "request cancellation").  
 *
 * URBs may be submitted in interrupt context.
 *
 * The caller must have correctly initialized the URB before submitting
 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
 * available to ensure that most fields are correctly initialized, for
 * the particular kind of transfer, although they will not initialize
 * any transfer flags.
 *
 * Successful submissions return 0; otherwise this routine returns a
 * negative error number.  If the submission is successful, the complete()
 * callback from the URB will be called exactly once, when the USB core and
 * Host Controller Driver (HCD) are finished with the URB.  When the completion
 * function is called, control of the URB is returned to the device
 * driver which issued the request.  The completion handler may then
 * immediately free or reuse that URB.
 *
 * With few exceptions, USB device drivers should never access URB fields
 * provided by usbcore or the HCD until its complete() is called.
 * The exceptions relate to periodic transfer scheduling.  For both
 * interrupt and isochronous urbs, as part of successful URB submission
 * urb->interval is modified to reflect the actual transfer period used
 * (normally some power of two units).  And for isochronous urbs,
 * urb->start_frame is modified to reflect when the URB's transfers were
 * scheduled to start.  Not all isochronous transfer scheduling policies
 * will work, but most host controller drivers should easily handle ISO
 * queues going from now until 10-200 msec into the future.
 *
 * For control endpoints, the synchronous usb_control_msg() call is
 * often used (in non-interrupt context) instead of this call.
 * That is often used through convenience wrappers, for the requests
 * that are standardized in the USB 2.0 specification.  For bulk
 * endpoints, a synchronous usb_bulk_msg() call is available.
 *
 * Request Queuing:
 *
 * URBs may be submitted to endpoints before previous ones complete, to
 * minimize the impact of interrupt latencies and system overhead on data
 * throughput.  With that queuing policy, an endpoint's queue would never
 * be empty.  This is required for continuous isochronous data streams,
 * and may also be required for some kinds of interrupt transfers. Such
 * queuing also maximizes bandwidth utilization by letting USB controllers
 * start work on later requests before driver software has finished the
 * completion processing for earlier (successful) requests.
 *
 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
 * than one.  This was previously a HCD-specific behavior, except for ISO
 * transfers.  Non-isochronous endpoint queues are inactive during cleanup
 * after faults (transfer errors or cancellation).
 *
 * Reserved Bandwidth Transfers:
 *
 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
 * using the interval specified in the urb.  Submitting the first urb to
 * the endpoint reserves the bandwidth necessary to make those transfers.
 * If the USB subsystem can't allocate sufficient bandwidth to perform
 * the periodic request, submitting such a periodic request should fail.
 *
 * Device drivers must explicitly request that repetition, by ensuring that
 * some URB is always on the endpoint's queue (except possibly for short
 * periods during completion callacks).  When there is no longer an urb
 * queued, the endpoint's bandwidth reservation is canceled.  This means
 * drivers can use their completion handlers to ensure they keep bandwidth
 * they need, by reinitializing and resubmitting the just-completed urb
 * until the driver longer needs that periodic bandwidth.
 *
 * Memory Flags:
 *
 * The general rules for how to decide which mem_flags to use
 * are the same as for kmalloc.  There are four
 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
 * GFP_ATOMIC.
 *
 * GFP_NOFS is not ever used, as it has not been implemented yet.
 *
 * GFP_ATOMIC is used when
 *   (a) you are inside a completion handler, an interrupt, bottom half,
 *       tasklet or timer, or
 *   (b) you are holding a spinlock or rwlock (does not apply to
 *       semaphores), or
 *   (c) current->state != TASK_RUNNING, this is the case only after
 *       you've changed it.
 * 
 * GFP_NOIO is used in the block io path and error handling of storage
 * devices.
 *
 * All other situations use GFP_KERNEL.
 *
 * Some more specific rules for mem_flags can be inferred, such as
 *  (1) start_xmit, timeout, and receive methods of network drivers must
 *      use GFP_ATOMIC (they are called with a spinlock held);
 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
 *      called with a spinlock held);
 *  (3) If you use a kernel thread with a network driver you must use
 *      GFP_NOIO, unless (b) or (c) apply;
 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
 *      apply or your are in a storage driver's block io path;
 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
 *  (6) changing firmware on a running storage or net device uses
 *      GFP_NOIO, unless b) or c) apply
 *
 */
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
	int			pipe, temp, max;
	struct usb_device	*dev;
	int			is_out;

	if (!urb || urb->hcpriv || !urb->complete)
		return -EINVAL;
	if (!(dev = urb->dev) ||
	    (dev->state < USB_STATE_DEFAULT) ||
	    (!dev->bus) || (dev->devnum <= 0))
		return -ENODEV;
	if (dev->bus->controller->power.power_state.event != PM_EVENT_ON
			|| dev->state == USB_STATE_SUSPENDED)
		return -EHOSTUNREACH;

	urb->status = -EINPROGRESS;
	urb->actual_length = 0;

	/* Lots of sanity checks, so HCDs can rely on clean data
	 * and don't need to duplicate tests
	 */
	pipe = urb->pipe;
	temp = usb_pipetype(pipe);
	is_out = usb_pipeout(pipe);

	if (!usb_pipecontrol(pipe) && dev->state < USB_STATE_CONFIGURED)
		return -ENODEV;

	/* FIXME there should be a sharable lock protecting us against
	 * config/altsetting changes and disconnects, kicking in here.
	 * (here == before maxpacket, and eventually endpoint type,
	 * checks get made.)
	 */

	max = usb_maxpacket(dev, pipe, is_out);
	if (max <= 0) {
		dev_dbg(&dev->dev,
			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
			usb_pipeendpoint(pipe), is_out ? "out" : "in",
			__FUNCTION__, max);
		return -EMSGSIZE;
	}

	/* periodic transfers limit size per frame/uframe,
	 * but drivers only control those sizes for ISO.
	 * while we're checking, initialize return status.
	 */
	if (temp == PIPE_ISOCHRONOUS) {
		int	n, len;

		/* "high bandwidth" mode, 1-3 packets/uframe? */
		if (dev->speed == USB_SPEED_HIGH) {
			int	mult = 1 + ((max >> 11) & 0x03);
			max &= 0x07ff;
			max *= mult;
		}
Example #3
0
static int interpret_urb_result(struct us_data *us, unsigned int pipe,
		unsigned int length, int result, unsigned int partial)
{
	US_DEBUGP("Status code %d; transferred %u/%u\n",
			result, partial, length);
	switch (result) {

	
	case 0:
		if (partial != length) {
			US_DEBUGP("-- short transfer\n");
			return USB_STOR_XFER_SHORT;
		}

		US_DEBUGP("-- transfer complete\n");
		return USB_STOR_XFER_GOOD;

	
	case -EPIPE:
		if (usb_pipecontrol(pipe)) {
			US_DEBUGP("-- stall on control pipe\n");
			return USB_STOR_XFER_STALLED;
		}

		
		US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
		if (usb_stor_clear_halt(us, pipe) < 0)
			return USB_STOR_XFER_ERROR;
		return USB_STOR_XFER_STALLED;

	
	case -EOVERFLOW:
		US_DEBUGP("-- babble\n");
		return USB_STOR_XFER_LONG;

	
	case -ECONNRESET:
		US_DEBUGP("-- transfer cancelled\n");
		return USB_STOR_XFER_ERROR;

	
	case -EREMOTEIO:
		US_DEBUGP("-- short read transfer\n");
		return USB_STOR_XFER_SHORT;

	
	case -EIO:
		US_DEBUGP("-- abort or disconnect in progress\n");
		return USB_STOR_XFER_ERROR;

	
	default:
		US_DEBUGP("-- unknown error\n");
		return USB_STOR_XFER_ERROR;
	}
}
Example #4
0
static void timeout_kill(unsigned long data)
{
	struct urb	*urb = (struct urb *) data;

	dev_warn(&urb->dev->dev, "%s timeout on ep%d%s\n",
		usb_pipecontrol(urb->pipe) ? "control" : "bulk",
		usb_pipeendpoint(urb->pipe),
		usb_pipein(urb->pipe) ? "in" : "out");
	usb_unlink_urb(urb);
}
Example #5
0
void urb_print (urb_t * urb, char * str, int small)
{
	unsigned int pipe= urb->pipe;
	int i, len;

	if (!urb->dev || !urb->dev->bus) {
		dbg("%s URB: no dev", str);
		return;
	}

	printk("%s URB:[%4x] dev:%2d,ep:%2d-%c,type:%s,flags:%4x,len:%d/%d,stat:%d(%x)\n",
			str,
		 	hci_get_current_frame_number (urb->dev),
		 	usb_pipedevice (pipe),
		 	usb_pipeendpoint (pipe),
		 	usb_pipeout (pipe)? 'O': 'I',
		 	usb_pipetype (pipe) < 2? (usb_pipeint (pipe)? "INTR": "ISOC"):
		 		(usb_pipecontrol (pipe)? "CTRL": "BULK"),
		 	urb->transfer_flags,
		 	urb->actual_length,
		 	urb->transfer_buffer_length,
		 	urb->status, urb->status);
	if (!small) {
		if (usb_pipecontrol (pipe)) {
			printk ( __FILE__ ": cmd(8):");
			for (i = 0; i < 8 ; i++)
				printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
			printk ("\n");
		}
		if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
			printk ( __FILE__ ": data(%d/%d):",
				urb->actual_length,
				urb->transfer_buffer_length);
			len = usb_pipeout (pipe)?
						urb->transfer_buffer_length: urb->actual_length;
			for (i = 0; i < 16 && i < len; i++)
				printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
			printk ("%s stat:%d\n", i < len? "...": "", urb->status);
		}
	}
}
Example #6
0
static inline char mon_text_get_setup(struct mon_event_text *ep,
    struct urb *urb, char ev_type)
{

	if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
		return '-';

	if (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)
		return mon_dmapeek(ep->setup, urb->setup_dma, SETUP_MAX);
	if (urb->setup_packet == NULL)
		return 'Z';	/* '0' would be not as pretty. */

	memcpy(ep->setup, urb->setup_packet, SETUP_MAX);
	return 0;
}
/**ltl
功能:把要传输usb设备的数据进行dma映射。
参数:
返回值:聚散列表元素个数
说明:这个接口给usb_storage驱动使用
*/
int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe,
		struct scatterlist *sg, int nents)
{
	struct usb_bus		*bus;
	struct device		*controller;

	if (!dev
			|| usb_pipecontrol (pipe)
			|| !(bus = dev->bus)
			|| !(controller = bus->controller)
			|| !controller->dma_mask)
		return -1;

	// FIXME generic api broken like pci, can't report errors
	return dma_map_sg (controller, sg, nents,
			usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
    int len, char ev_type)
{
	int pipe = urb->pipe;

	if (len <= 0)
		return 'L';
	if (len >= DATA_MAX)
		len = DATA_MAX;

	/*
	 * Bulk is easy to shortcut reliably. 
	 * XXX Other pipe types need consideration. Currently, we overdo it
	 * and collect garbage for them: better more than less.
	 */
	if (usb_pipebulk(pipe) || usb_pipecontrol(pipe)) {
		if (usb_pipein(pipe)) {
			if (ev_type == 'S')
				return '<';
		} else {
			if (ev_type == 'C')
				return '>';
		}
	}

	/*
	 * The check to see if it's safe to poke at data has an enormous
	 * number of corner cases, but it seems that the following is
	 * more or less safe.
	 *
	 * We do not even try to look transfer_buffer, because it can
	 * contain non-NULL garbage in case the upper level promised to
	 * set DMA for the HCD.
	 */
	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
		return mon_dmapeek(ep->data, urb->transfer_dma, len);

	if (urb->transfer_buffer == NULL)
		return 'Z';	/* '0' would be not as pretty. */

	memcpy(ep->data, urb->transfer_buffer, len);
	return 0;
}
Example #9
0
/*
 *		pipe control
 */
static void usbhsh_endpoint_sequence_save(struct usbhsh_hpriv *hpriv,
					  struct urb *urb,
					  struct usbhs_pkt *pkt)
{
	int len = urb->actual_length;
	int maxp = usb_endpoint_maxp(&urb->ep->desc);
	int t = 0;

	/* DCP is out of sequence control */
	if (usb_pipecontrol(urb->pipe))
		return;

	/*
	 * renesas_usbhs pipe has a limitation in a number.
	 * So, driver should re-use the limited pipe for each device/endpoint.
	 * DATA0/1 sequence should be saved for it.
	 * see [image of mod_host]
	 *     [HARDWARE LIMITATION]
	 */

	/*
	 * next sequence depends on actual_length
	 *
	 * ex) actual_length = 1147, maxp = 512
	 * data0 : 512
	 * data1 : 512
	 * data0 : 123
	 * data1 is the next sequence
	 */
	t = len / maxp;
	if (len % maxp)
		t++;
	if (pkt->zero)
		t++;
	t %= 2;

	if (t)
		usb_dotoggle(urb->dev,
			     usb_pipeendpoint(urb->pipe),
			     usb_pipeout(urb->pipe));
}
Example #10
0
//----- interpret_urb_result() ---------------------
static int interpret_urb_result(struct us_data *us, unsigned int pipe,
		unsigned int length, int result, unsigned int partial)
{
	//printk("transport --- interpret_urb_result\n");
	switch (result) {
	/* no error code; did we send all the data? */
	case 0:
		if (partial != length)
		{
			//printk("-- short transfer\n");
			return USB_STOR_XFER_SHORT;
		}
		//printk("-- transfer complete\n");
		return USB_STOR_XFER_GOOD;
	case -EPIPE:
		if (usb_pipecontrol(pipe))
		{
			//printk("-- stall on control pipe\n");
			return USB_STOR_XFER_STALLED;
		}
		//printk("clearing endpoint halt for pipe 0x%x\n", pipe);
		if (usb_stor_clear_halt(us, pipe) < 0)
			return USB_STOR_XFER_ERROR;
		return USB_STOR_XFER_STALLED;
	case -EOVERFLOW:
		//printk("-- babble\n");
		return USB_STOR_XFER_LONG;
	case -ECONNRESET:
		//printk("-- transfer cancelled\n");
		return USB_STOR_XFER_ERROR;
	case -EREMOTEIO:
		//printk("-- short read transfer\n");
		return USB_STOR_XFER_SHORT;
	case -EIO:
		//printk("-- abort or disconnect in progress\n");
		return USB_STOR_XFER_ERROR;
	default:
		//printk("-- unknown error\n");
		return USB_STOR_XFER_ERROR;
	}
}
Example #11
0
/* this function must be called with interrupt disabled */
static void pipe_setting(struct r8a66597 *r8a66597, struct r8a66597_td *td)
{
	struct r8a66597_pipe_info *info;
	struct urb *urb = td->urb;

	if (td->pipenum > 0) {
		info = &td->pipe->info;
		cfifo_change(r8a66597, 0);
		pipe_buffer_setting(r8a66597, info);

		if (!usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
				   usb_pipeout(urb->pipe)) &&
		    !usb_pipecontrol(urb->pipe)) {
			r8a66597_pipe_toggle(r8a66597, td->pipe, 0);
			pipe_toggle_set(r8a66597, td->pipe, urb, 0);
			clear_all_buffer(r8a66597, td->pipe);
			usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
				      usb_pipeout(urb->pipe), 1);
		}
		pipe_toggle_restore(r8a66597, td->pipe, urb);
	}
}
Example #12
0
/**
 * Initializes a QTD structure.
 *
 * @param[in] qtd The QTD to initialize.
 * @param[in] urb The URB to use for initialization.  */
void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb)
{
	memset (qtd, 0, sizeof (dwc_otg_qtd_t));
	qtd->urb = urb;
	if (usb_pipecontrol(urb->pipe)) {
		/*
		 * The only time the QTD data toggle is used is on the data
		 * phase of control transfers. This phase always starts with
		 * DATA1.
		 */
		qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
		qtd->control_phase = DWC_OTG_CONTROL_SETUP;
	}

	/* start split */
	qtd->complete_split = 0;
	qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
	qtd->isoc_split_offset = 0;

	/* Store the qtd ptr in the urb to reference what QTD. */
	urb->hcpriv = qtd;
	return;
}
Example #13
0
/**
 * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
 * @urb: urb whose transfer_buffer/setup_packet will be synchronized
 */
void usb_buffer_dmasync(struct urb *urb)
{
	struct usb_bus		*bus;
	struct device		*controller;

	if (!urb
			|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
			|| !urb->dev
			|| !(bus = urb->dev->bus)
			|| !(controller = bus->controller))
		return;

	if (controller->dma_mask) {
		dma_sync_single(controller,
			urb->transfer_dma, urb->transfer_buffer_length,
			usb_pipein(urb->pipe)
				? DMA_FROM_DEVICE : DMA_TO_DEVICE);
		if (usb_pipecontrol(urb->pipe))
			dma_sync_single(controller,
					urb->setup_dma,
					sizeof(struct usb_ctrlrequest),
					DMA_TO_DEVICE);
	}
}
Example #14
0
/**
 * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
 * @io: request block being initialized.  until usb_sg_wait() returns,
 *	treat this as a pointer to an opaque block of memory,
 * @dev: the usb device that will send or receive the data
 * @pipe: endpoint "pipe" used to transfer the data
 * @period: polling rate for interrupt endpoints, in frames or
 * 	(for high speed endpoints) microframes; ignored for bulk
 * @sg: scatterlist entries
 * @nents: how many entries in the scatterlist
 * @length: how many bytes to send from the scatterlist, or zero to
 * 	send every byte identified in the list.
 * @mem_flags: SLAB_* flags affecting memory allocations in this call
 *
 * Returns zero for success, else a negative errno value.  This initializes a
 * scatter/gather request, allocating resources such as I/O mappings and urb
 * memory (except maybe memory used by USB controller drivers).
 *
 * The request must be issued using usb_sg_wait(), which waits for the I/O to
 * complete (or to be canceled) and then cleans up all resources allocated by
 * usb_sg_init().
 *
 * The request may be canceled with usb_sg_cancel(), either before or after
 * usb_sg_wait() is called.
 */
int usb_sg_init (
	struct usb_sg_request	*io,
	struct usb_device	*dev,
	unsigned		pipe, 
	unsigned		period,
	struct scatterlist	*sg,
	int			nents,
	size_t			length,
	int			mem_flags
)
{
	int			i;
	int			urb_flags;
	int			dma;

	if (!io || !dev || !sg
			|| usb_pipecontrol (pipe)
			|| usb_pipeisoc (pipe)
			|| nents <= 0)
		return -EINVAL;

	spin_lock_init (&io->lock);
	io->dev = dev;
	io->pipe = pipe;
	io->sg = sg;
	io->nents = nents;

	/* not all host controllers use DMA (like the mainstream pci ones);
	 * they can use PIO (sl811) or be software over another transport.
	 */
	dma = (dev->dev.dma_mask != 0);
	if (dma)
		io->entries = usb_buffer_map_sg (dev, pipe, sg, nents);
	else
		io->entries = nents;

	/* initialize all the urbs we'll use */
	if (io->entries <= 0)
		return io->entries;

	io->count = 0;
	io->urbs = kmalloc (io->entries * sizeof *io->urbs, mem_flags);
	if (!io->urbs)
		goto nomem;

	urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP
			| URB_NO_INTERRUPT;
	if (usb_pipein (pipe))
		urb_flags |= URB_SHORT_NOT_OK;

	for (i = 0; i < io->entries; i++, io->count = i) {
		unsigned		len;

		io->urbs [i] = usb_alloc_urb (0, mem_flags);
		if (!io->urbs [i]) {
			io->entries = i;
			goto nomem;
		}

		io->urbs [i]->dev = NULL;
		io->urbs [i]->pipe = pipe;
		io->urbs [i]->interval = period;
		io->urbs [i]->transfer_flags = urb_flags;

		io->urbs [i]->complete = sg_complete;
		io->urbs [i]->context = io;
		io->urbs [i]->status = -EINPROGRESS;
		io->urbs [i]->actual_length = 0;

		if (dma) {
			/* hc may use _only_ transfer_dma */
			io->urbs [i]->transfer_dma = sg_dma_address (sg + i);
			len = sg_dma_len (sg + i);
		} else {
			/* hc may use _only_ transfer_buffer */
			io->urbs [i]->transfer_buffer =
				page_address (sg [i].page) + sg [i].offset;
			len = sg [i].length;
		}

		if (length) {
			len = min_t (unsigned, len, length);
			length -= len;
			if (length == 0)
				io->entries = i + 1;
		}
		io->urbs [i]->transfer_buffer_length = len;
	}
	io->urbs [--i]->transfer_flags &= ~URB_NO_INTERRUPT;

	/* transaction state */
	io->status = 0;
	io->bytes = 0;
	init_completion (&io->complete);
	return 0;

nomem:
	sg_clean (io);
	return -ENOMEM;
}
Example #15
0
/**
 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
 * @urb: pointer to the urb describing the request
 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
 *	of valid options for this.
 *
 * This submits a transfer request, and transfers control of the URB
 * describing that request to the USB subsystem.  Request completion will
 * be indicated later, asynchronously, by calling the completion handler.
 * The three types of completion are success, error, and unlink
 * (also called "request cancellation").
 * URBs may be submitted in interrupt context.
 *
 * The caller must have correctly initialized the URB before submitting
 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
 * available to ensure that most fields are correctly initialized, for
 * the particular kind of transfer, although they will not initialize
 * any transfer flags.
 *
 * Successful submissions return 0; otherwise this routine returns a
 * negative error number.  If the submission is successful, the complete()
 * callback from the urb will be called exactly once, when the USB core and
 * host controller driver are finished with the urb.  When the completion
 * function is called, control of the URB is returned to the device
 * driver which issued the request.  The completion handler may then
 * immediately free or reuse that URB.
 *
 * For control endpoints, the synchronous usb_control_msg() call is
 * often used (in non-interrupt context) instead of this call.
 * That is often used through convenience wrappers, for the requests
 * that are standardized in the USB 2.0 specification.  For bulk
 * endpoints, a synchronous usb_bulk_msg() call is available.
 *
 * Request Queuing:
 *
 * URBs may be submitted to endpoints before previous ones complete, to
 * minimize the impact of interrupt latencies and system overhead on data
 * throughput.  This is required for continuous isochronous data streams,
 * and may also be required for some kinds of interrupt transfers. Such
 * queueing also maximizes bandwidth utilization by letting USB controllers
 * start work on later requests before driver software has finished the
 * completion processing for earlier requests.
 *
 * Bulk and Isochronous URBs may always be queued.  At this writing, all
 * mainstream host controller drivers support queueing for control and
 * interrupt transfer requests.
 *
 * Reserved Bandwidth Transfers:
 *
 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
 * using the interval specified in the urb.  Submitting the first urb to
 * the endpoint reserves the bandwidth necessary to make those transfers.
 * If the USB subsystem can't allocate sufficient bandwidth to perform
 * the periodic request, submitting such a periodic request should fail.
 *
 * Device drivers must explicitly request that repetition, by ensuring that
 * some URB is always on the endpoint's queue (except possibly for short
 * periods during completion callacks).  When there is no longer an urb
 * queued, the endpoint's bandwidth reservation is canceled.  This means
 * drivers can use their completion handlers to ensure they keep bandwidth
 * they need, by reinitializing and resubmitting the just-completed urb
 * until the driver longer needs that periodic bandwidth.
 *
 * Memory Flags:
 *
 * The general rules for how to decide which mem_flags to use
 * are the same as for kmalloc.  There are four
 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
 * GFP_ATOMIC.
 *
 * GFP_NOFS is not ever used, as it has not been implemented yet.
 *
 * GFP_ATOMIC is used when
 *   (a) you are inside a completion handler, an interrupt, bottom half,
 *       tasklet or timer, or
 *   (b) you are holding a spinlock or rwlock (does not apply to
 *       semaphores), or
 *   (c) current->state != TASK_RUNNING, this is the case only after
 *       you've changed it.
 * 
 * GFP_NOIO is used in the block io path and error handling of storage
 * devices.
 *
 * All other situations use GFP_KERNEL.
 *
 * Some more specific rules for mem_flags can be inferred, such as
 *  (1) start_xmit, timeout, and receive methods of network drivers must
 *      use GFP_ATOMIC (they are called with a spinlock held);
 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
 *      called with a spinlock held);
 *  (3) If you use a kernel thread with a network driver you must use
 *      GFP_NOIO, unless (b) or (c) apply;
 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
 *      apply or your are in a storage driver's block io path;
 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
 *  (6) changing firmware on a running storage or net device uses
 *      GFP_NOIO, unless b) or c) apply
 *
 */
int usb_submit_urb(struct urb *urb, int mem_flags)
{
	int			pipe, temp, max;
	struct usb_device	*dev;
	struct usb_operations	*op;
	int			is_out;
//	printk("sub dev %p bus %p num %i op %p sub %p\n",
//	       urb->dev, urb->dev->bus,urb->dev->devnum,urb->dev->bus->op, urb->dev->bus->op->submit_urb);
	if (!urb || urb->hcpriv || !urb->complete)
		return -EINVAL;
	if (!(dev = urb->dev) ||
	    (dev->state < USB_STATE_DEFAULT) ||
	    (!dev->bus) || (dev->devnum <= 0))
		return -ENODEV;
	if (!(op = dev->bus->op) || !op->submit_urb)
		return -ENODEV;

	urb->status = -EINPROGRESS;
	urb->actual_length = 0;
	urb->bandwidth = 0;

	/* Lots of sanity checks, so HCDs can rely on clean data
	 * and don't need to duplicate tests
	 */
	pipe = urb->pipe;
	temp = usb_pipetype (pipe);
	is_out = usb_pipeout (pipe);

	if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED)
		return -ENODEV;

	/* (actually HCDs may need to duplicate this, endpoint might yet
	 * stall due to queued bulk/intr transactions that complete after
	 * we check)
	 */
	if (usb_endpoint_halted (dev, usb_pipeendpoint (pipe), is_out))
		return -EPIPE;

	/* FIXME there should be a sharable lock protecting us against
	 * config/altsetting changes and disconnects, kicking in here.
	 * (here == before maxpacket, and eventually endpoint type,
	 * checks get made.)
	 */

	max = usb_maxpacket (dev, pipe, is_out);
	if (max <= 0) {
		dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)",
			__FUNCTION__,
			usb_pipeendpoint (pipe), is_out ? "OUT" : "IN",
			dev->bus->bus_name, dev->devpath,
			max);
		return -EMSGSIZE;
	}

	/* periodic transfers limit size per frame/uframe,
	 * but drivers only control those sizes for ISO.
	 * while we're checking, initialize return status.
	 */
	if (temp == PIPE_ISOCHRONOUS) {
		int	n, len;

		/* "high bandwidth" mode, 1-3 packets/uframe? */
		if (dev->speed == USB_SPEED_HIGH) {
			int	mult = 1 + ((max >> 11) & 0x03);
			max &= 0x03ff;
			max *= mult;
		}
Example #16
0
static inline int qu_pipeindex (__u32 pipe) 
{
	return (usb_pipeendpoint (pipe) << 1) | (usb_pipecontrol (pipe) ? 
				0 : usb_pipeout (pipe));
}
Example #17
0
/*
  Set up PTD's.
*/
static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
			u16 fno)
{
	struct ptd *ptd;
	int toggle;
	int dir;
	u16 len;
	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;

	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);

	ptd = &ep->ptd;

	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;

	switch (ep->nextpid) {
	case USB_PID_IN:
		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
		dir = PTD_DIR_IN;
		if (usb_pipecontrol(urb->pipe)) {
			len = min_t(size_t, ep->maxpacket, buf_len);
		} else if (usb_pipeisoc(urb->pipe)) {
			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
		} else
			len = max_transfer_size(epq, buf_len, ep->maxpacket);
		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
		    (int)buf_len);
		break;
	case USB_PID_OUT:
		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
		dir = PTD_DIR_OUT;
		if (usb_pipecontrol(urb->pipe))
			len = min_t(size_t, ep->maxpacket, buf_len);
		else if (usb_pipeisoc(urb->pipe))
			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
		else
			len = max_transfer_size(epq, buf_len, ep->maxpacket);
		if (len == 0)
			pr_info("%s: Sending ZERO packet: %d\n", __func__,
			     urb->transfer_flags & URB_ZERO_PACKET);
		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
		    (int)buf_len);
		break;
	case USB_PID_SETUP:
		toggle = 0;
		dir = PTD_DIR_SETUP;
		len = sizeof(struct usb_ctrlrequest);
		DBG(1, "%s: SETUP len %d\n", __func__, len);
		ep->data = urb->setup_packet;
		break;
	case USB_PID_ACK:
		toggle = 1;
		len = 0;
		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
			PTD_DIR_OUT : PTD_DIR_IN;
		DBG(1, "%s: ACK   len %d\n", __func__, len);
		break;
	default:
		toggle = dir = len = 0;
		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
		BUG_ON(1);
	}

	ep->length = len;
	if (!len)
		ep->data = NULL;

	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
		PTD_EP(ep->epnum);
	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));

	if (usb_pipeint(urb->pipe)) {
		ptd->faddr |= PTD_SF_INT(ep->branch);
		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
	}
	if (usb_pipeisoc(urb->pipe))
		ptd->faddr |= PTD_SF_ISO(fno);

	DBG(1, "%s: Finished\n", __func__);
}
Example #18
0
/*****************************************************************
 *
 * Function Name: hc_interrupt
 *
 * Interrupt service routine. 
 *
 * 1) determine the causes of interrupt
 * 2) clears all interrupts
 * 3) calls appropriate function to service the interrupt
 *
 * Input:  irq = interrupt line associated with the controller 
 *         hci = data structure for the host controller
 *         r = holds the snapshot of the processor's context before 
 *             the processor entered interrupt code. (not used here) 
 *
 * Return value  : None.
 *                
 *****************************************************************/
static void hc_interrupt (int irq, void *__hci, struct pt_regs *r)
{
	char ii;
	hci_t *hci = __hci;
	int isExcessNak = 0;
	int urb_state = 0;
	char tmpIrq = 0;

	/* Get value from interrupt status register */

	ii = SL811Read (hci, SL11H_INTSTATREG);

	if (ii & SL11H_INTMASK_INSRMV) {
		/* Device insertion or removal detected for the USB port */

		SL811Write (hci, SL11H_INTENBLREG, 0);
		SL811Write (hci, SL11H_CTLREG1, 0);
		mdelay (100);	// wait for device stable 
		handleInsRmvIntr (hci);
		return;
	}

	/* Clear all interrupts */

	SL811Write (hci, SL11H_INTSTATREG, 0xff);

	if (ii & SL11H_INTMASK_XFERDONE) {
		/* USB Done interrupt occurred */

		urb_state = sh_done_list (hci, &isExcessNak);
#ifdef WARNING
		if (hci->td_array->len > 0)
			printk ("WARNING: IRQ, td_array->len = 0x%x, s/b:0\n",
				hci->td_array->len);
#endif
		if (hci->td_array->len == 0 && !isExcessNak
		    && !(ii & SL11H_INTMASK_SOFINTR) && (urb_state == 0)) {
			if (urb_state == 0) {
				/* All urb_state has not been finished yet! 
				 * continue with the current urb transaction 
				 */

				if (hci->last_packet_nak == 0) {
					if (!usb_pipecontrol
					    (hci->td_array->td[0].urb->pipe))
						sh_add_packet (hci, hci->td_array-> td[0].urb);
				}
			} else {
				/* The last transaction has completed:
				 * schedule the next transaction 
				 */

				sh_schedule_trans (hci, 0);
			}
		}
		SL811Write (hci, SL11H_INTSTATREG, 0xff);
		return;
	}

	if (ii & SL11H_INTMASK_SOFINTR) {
		hci->frame_number = (hci->frame_number + 1) % 2048;
		if (hci->td_array->len == 0)
			sh_schedule_trans (hci, 1);
		else {
			if (sofWaitCnt++ > 100) {
				/* The last transaction has not completed.
				 * Need to retire the current td, and let
				 * it transmit again later on.
				 * (THIS NEEDS TO BE WORK ON MORE, IT SHOULD NEVER 
				 *  GET TO THIS POINT)
				 */

				DBGERR ("SOF interrupt: td_array->len = 0x%x, s/b: 0\n",
					hci->td_array->len);
				urb_print (hci->td_array->td[hci->td_array->len - 1].urb,
					   "INTERRUPT", 0);
				sh_done_list (hci, &isExcessNak);
				SL811Write (hci, SL11H_INTSTATREG, 0xff);
				hci->td_array->len = 0;
				sofWaitCnt = 0;
			}
		}
		tmpIrq = SL811Read (hci, SL11H_INTSTATREG) & SL811Read (hci, SL11H_INTENBLREG);
		if (tmpIrq) {
			DBG ("IRQ occurred while service SOF: irq = 0x%x\n",
			     tmpIrq);

			/* If we receive a DONE IRQ after schedule, need to 
			 * handle DONE IRQ again 
			 */

			if (tmpIrq & SL11H_INTMASK_XFERDONE) {
				DBGERR ("IRQ occurred while service SOF: irq = 0x%x\n",
					tmpIrq);
				urb_state = sh_done_list (hci, &isExcessNak);
			}
			SL811Write (hci, SL11H_INTSTATREG, 0xff);
		}
	} else {
		DBG ("SL811 ISR: unknown, int = 0x%x \n", ii);
	}

	SL811Write (hci, SL11H_INTSTATREG, 0xff);
	return;
}
Example #19
0
static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
			      struct urb *urb,
			      gfp_t mem_flags)
{
	struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
	struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
	struct device *dev = usbhs_priv_to_dev(priv);
	struct usb_host_endpoint *ep = urb->ep;
	struct usbhsh_device *new_udev = NULL;
	int is_dir_in = usb_pipein(urb->pipe);
	int ret;

	dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");

	if (!usbhsh_is_running(hpriv)) {
		ret = -EIO;
		dev_err(dev, "host is not running\n");
		goto usbhsh_urb_enqueue_error_not_linked;
	}

	ret = usb_hcd_link_urb_to_ep(hcd, urb);
	if (ret) {
		dev_err(dev, "urb link failed\n");
		goto usbhsh_urb_enqueue_error_not_linked;
	}

	/*
	 * attach udev if needed
	 * see [image of mod_host]
	 */
	if (!usbhsh_device_get(hpriv, urb)) {
		new_udev = usbhsh_device_attach(hpriv, urb);
		if (!new_udev) {
			ret = -EIO;
			dev_err(dev, "device attach failed\n");
			goto usbhsh_urb_enqueue_error_not_linked;
		}
	}

	/*
	 * attach endpoint if needed
	 * see [image of mod_host]
	 */
	if (!usbhsh_ep_to_uep(ep)) {
		ret = usbhsh_endpoint_attach(hpriv, urb, mem_flags);
		if (ret < 0) {
			dev_err(dev, "endpoint attach failed\n");
			goto usbhsh_urb_enqueue_error_free_device;
		}
	}

	/*
	 * attach pipe to endpoint
	 * see [image of mod_host]
	 */
	ret = usbhsh_pipe_attach(hpriv, urb);
	if (ret < 0) {
		dev_err(dev, "pipe attach failed\n");
		goto usbhsh_urb_enqueue_error_free_endpoint;
	}

	/*
	 * push packet
	 */
	if (usb_pipecontrol(urb->pipe))
		ret = usbhsh_dcp_queue_push(hcd, urb, mem_flags);
	else
		ret = usbhsh_queue_push(hcd, urb, mem_flags);

	return ret;

usbhsh_urb_enqueue_error_free_endpoint:
	usbhsh_endpoint_detach(hpriv, ep);
usbhsh_urb_enqueue_error_free_device:
	if (new_udev)
		usbhsh_device_detach(hpriv, new_udev);
usbhsh_urb_enqueue_error_not_linked:

	dev_dbg(dev, "%s error\n", __func__);

	return ret;
}