static int hci_submit_urb (struct urb * urb) { hci_t * hci; unsigned int pipe = urb->pipe; unsigned long flags; int ret; if (!urb->dev || !urb->dev->bus || urb->hcpriv) return -EINVAL; if (usb_endpoint_halted (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe))) return -EPIPE; hci = (hci_t *) urb->dev->bus->hcpriv; /* a request to the virtual root hub */ if (usb_pipedevice (pipe) == hci->rh.devnum) { return rh_submit_urb (urb); } if (urb_debug) urb_print (urb, "SUB", usb_pipein (pipe)); /* queue the URB to its endpoint-queue */ spin_lock_irqsave (&usb_urb_lock, flags); ret = hcs_urb_queue (hci, urb); spin_unlock_irqrestore (&usb_urb_lock, flags); return ret; }
//transfer buffer must be aligned on an 8 bytes boundary static int hci_submit_urb (urb_t * urb) { hci_t * hci; unsigned int pipe = urb->pipe; unsigned long flags; int ret; if (!urb->dev || !urb->dev->bus || urb->hcpriv) return -EINVAL; #if BURST_TRANSFER_SIZE >= 8 if ( ((int)urb->transfer_buffer) & 7) { if ( (urb->transfer_buffer_length >8) || ( ( ((int)urb->transfer_buffer) & 1 ) && (urb->transfer_buffer_length >1) ) ) { printk(KERN_ERR __FILE__ ": improper alignment, size %d bytes\n",urb->transfer_buffer_length); return -EINVAL; } } #endif if (usb_endpoint_halted (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe))) return -EPIPE; hci = (hci_t *) urb->dev->bus->hcpriv; /* a request to the virtual root hub */ if (usb_pipedevice (pipe) == hci->rh.devnum) { return rh_submit_urb (urb); } if (urb_debug) urb_print (urb, "SUB", usb_pipein (pipe)); /* queue the URB to its endpoint-queue */ spin_lock_irqsave (&hci->urb_list_lock, flags); ret = hcs_urb_queue (hci, urb); spin_unlock_irqrestore (&hci->urb_list_lock, flags); return ret; }
/** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (also called "request cancellation"). * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the urb will be called exactly once, when the USB core and * host controller driver are finished with the urb. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queueing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier requests. * * Bulk and Isochronous URBs may always be queued. At this writing, all * mainstream host controller drivers support queueing for control and * interrupt transfer requests. * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, int mem_flags) { int pipe, temp, max; struct usb_device *dev; struct usb_operations *op; int is_out; // printk("sub dev %p bus %p num %i op %p sub %p\n", // urb->dev, urb->dev->bus,urb->dev->devnum,urb->dev->bus->op, urb->dev->bus->op->submit_urb); if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; if (!(dev = urb->dev) || (dev->state < USB_STATE_DEFAULT) || (!dev->bus) || (dev->devnum <= 0)) return -ENODEV; if (!(op = dev->bus->op) || !op->submit_urb) return -ENODEV; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->bandwidth = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ pipe = urb->pipe; temp = usb_pipetype (pipe); is_out = usb_pipeout (pipe); if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) return -ENODEV; /* (actually HCDs may need to duplicate this, endpoint might yet * stall due to queued bulk/intr transactions that complete after * we check) */ if (usb_endpoint_halted (dev, usb_pipeendpoint (pipe), is_out)) return -EPIPE; /* FIXME there should be a sharable lock protecting us against * config/altsetting changes and disconnects, kicking in here. * (here == before maxpacket, and eventually endpoint type, * checks get made.) */ max = usb_maxpacket (dev, pipe, is_out); if (max <= 0) { dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)", __FUNCTION__, usb_pipeendpoint (pipe), is_out ? "OUT" : "IN", dev->bus->bus_name, dev->devpath, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (temp == PIPE_ISOCHRONOUS) { int n, len; /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x03ff; max *= mult; }