static void ehci_urb_done ( struct ehci_hcd *ehci, dma_addr_t addr, struct urb *urb ) { if (urb->transfer_buffer_length) pci_unmap_single (ehci->hcd.pdev, addr, urb->transfer_buffer_length, usb_pipein (urb->pipe) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); if (likely (urb->hcpriv != 0)) { qh_put (ehci, (struct ehci_qh *) urb->hcpriv); urb->hcpriv = 0; } if (likely (urb->status == -EINPROGRESS)) { if (urb->actual_length != urb->transfer_buffer_length && (urb->transfer_flags & USB_DISABLE_SPD)) urb->status = -EREMOTEIO; else urb->status = 0; } /* hand off urb ownership */ usb_hcd_giveback_urb (&ehci->hcd, urb); }
/* * queue push/pop */ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt) { struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt); struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); struct urb *urb = ureq->urb; struct device *dev = usbhs_priv_to_dev(priv); int status = 0; dev_dbg(dev, "%s\n", __func__); if (!urb) { dev_warn(dev, "pkt doesn't have urb\n"); return; } if (!usbhsh_is_running(hpriv)) status = -ESHUTDOWN; urb->actual_length = pkt->actual; usbhsh_endpoint_sequence_save(hpriv, urb, pkt); usbhsh_ureq_free(hpriv, ureq); usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep)); usb_hcd_unlink_urb_from_ep(hcd, urb); usb_hcd_giveback_urb(hcd, urb, status); }
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; int ret; u32 temp; struct xhci_hcd *xhci; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) goto done; temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff) { xhci_dbg(xhci, "HW died, freeing TD.\n"); td = (struct xhci_td *) urb->hcpriv; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); kfree(td); return ret; } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep_ring = ep->ring; xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; ep->cancels_pending++; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); if (ep->cancels_pending == 1) { xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } done: spin_unlock_irqrestore(&xhci->lock, flags); return ret; }
/** * wusbhc_giveback_urb - return an URB to the USB core * @wusbhc: the host controller the URB is from. * @urb: the URB. * @status: the URB's status. * * Return an URB to the USB core doing some additional WUSB specific * processing. * * - After a successful transfer, update the trust timeout timestamp * for the WUSB device. * * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion * condition for the WCONNECTACK_IE is that the host has observed * the associated device responding to a control transfer. */ void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status) { struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); if (status == 0 && wusb_dev) { wusb_dev->entry_ts = jiffies; /* wusbhc_devconnect_acked() can't be called from atomic context so defer it to a work queue. */ if (!list_empty(&wusb_dev->cack_node)) queue_work(wusbd, &wusb_dev->devconnect_acked_work); else wusb_dev_put(wusb_dev); } usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); }
void dde_linux26_usb_vhcd_urb_complete(void *urb_handle, dde_kit_size_t data_size, void *data) { LOG("URB %p completed", urb_handle); struct urb *urb = (struct urb *)urb_handle; LOG("urb @ %p, urb->setup_packet @ %p data_size %zd data %p hc_priv %p", urb, urb->setup_packet, data_size, data, urb->hcpriv); /* FIXME the URB may have been unlinked */ if (!urb) return; /* FIXME update URB */ // urb->status = irq_d_urb->status; // urb->actual_length = irq_d_urb->actual_length; // urb->start_frame = irq_d_urb->start_frame; // urb->interval = irq_d_urb->interval; // urb->error_count = irq_d_urb->error_count; // urb->transfer_flags = irq_d_urb->transfer_flags; if (urb->setup_packet) memcpy(urb->setup_packet, data, 8); /* XXX no ISOC */ // if (urb->number_of_packets) // memcpy(urb->iso_frame_desc, irq_d_urb->iso_desc, // urb->number_of_packets * sizeof(struct usb_iso_packet_descriptor)); struct usb_hcd *hcd = vhcd_to_hcd(urb->hcpriv); urb->hcpriv = 0; LOG("hcd %p", hcd); usb_hcd_giveback_urb(hcd, urb); usb_put_urb(urb); }
/* * Remove the URB's TD from the endpoint ring. This may cause the HC to stop * USB transfers, potentially stopping in the middle of a TRB buffer. The HC * should pick up where it left off in the TD, unless a Set Transfer Ring * Dequeue Pointer is issued. * * The TRBs that make up the buffers for the canceled URB will be "removed" from * the ring. Since the ring is a contiguous structure, they can't be physically * removed. Instead, there are two options: * * 1) If the HC is in the middle of processing the URB to be canceled, we * simply move the ring's dequeue pointer past those TRBs using the Set * Transfer Ring Dequeue Pointer command. This will be the common case, * when drivers timeout on the last submitted URB and attempt to cancel. * * 2) If the HC is in the middle of a different TD, we turn the TRBs into a * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The * HC will need to invalidate the any TRBs it has cached after the stop * endpoint command, as noted in the xHCI 0.95 errata. * * 3) The TD may have completed by the time the Stop Endpoint Command * completes, so software needs to handle that case too. * * This function should protect against the TD enqueueing code ringing the * doorbell while this code is waiting for a Stop Endpoint command to complete. * It also needs to account for multiple cancellations on happening at the same * time for the same endpoint. * * Note that this function can be called in any context, or so says * usb_hcd_unlink_urb() */ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; int ret, i; u32 temp; struct xhci_hcd *xhci; struct urb_priv *urb_priv; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct xhci_command *command; xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); /* Make sure the URB hasn't completed or been unlinked already */ ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) goto done; temp = readl(&xhci->op_regs->status); if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HW died, freeing TD."); urb_priv = urb->hcpriv; for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { td = urb_priv->td[i]; if (!list_empty(&td->td_list)) list_del_init(&td->td_list); if (!list_empty(&td->cancelled_td_list)) list_del_init(&td->cancelled_td_list); } usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); xhci_urb_free_priv(urb_priv); return ret; } if ((xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Ep 0x%x: URB %p to be canceled on " "non-responsive xHCI host.", urb->ep->desc.bEndpointAddress, urb); /* Let the stop endpoint command watchdog timer (which set this * state) finish cleaning up the endpoint TD lists. We must * have caught it in the middle of dropping a lock and giving * back an URB. */ goto done; } ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep_ring = xhci_urb_to_transfer_ring(xhci, urb); if (!ep_ring) { ret = -EINVAL; goto done; } urb_priv = urb->hcpriv; i = urb_priv->td_cnt; if (i < urb_priv->length) xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cancel URB %p, dev %s, ep 0x%x, " "starting at offset 0x%llx", urb, urb->dev->devpath, urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( urb_priv->td[i]->start_seg, urb_priv->td[i]->first_trb)); for (; i < urb_priv->length; i++) { td = urb_priv->td[i]; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); } /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ if (!(ep->ep_state & EP_HALT_PENDING)) { command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } ep->ep_state |= EP_HALT_PENDING; ep->stop_cmds_pending++; ep->stop_cmd_timer.expires = jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ; add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, ep_index, 0); xhci_ring_cmd_db(xhci); } done: spin_unlock_irqrestore(&xhci->lock, flags); return ret; }
static int vhcd_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, gfp_t mem_flags) { int ret = 0; unsigned int transfer_flags = 0 ; struct usb_device * udev = urb->dev; /* FIXME Check for non existent device */ if (!HC_IS_RUNNING(hcd->state)) { LOG("HC is not running\n"); return -ENODEV; } /* we have to trap some control messages, i.e. USB_REQ_SET_ADDRESS... */ /* TODO we don't have to do it here, but in the server */ if (usb_pipedevice(urb->pipe) == 0) { __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; if (type != PIPE_CONTROL || !ctrlreq ) { LOG("invalid request to devnum 0\n"); ret = -EINVAL; goto no_need_xmit; } switch (ctrlreq->bRequest) { case USB_REQ_SET_ADDRESS: LOG("SetAddress Request (%d) to port %d\n", ctrlreq->wValue, urb->dev->portnum); spin_lock (&urb->lock); if (urb->status == -EINPROGRESS) { /* This request is successfully completed. */ /* If not -EINPROGRESS, possibly unlinked. */ urb->status = 0; } spin_unlock (&urb->lock); goto no_need_xmit; case USB_REQ_GET_DESCRIPTOR: if (ctrlreq->wValue == (USB_DT_DEVICE << 8)) LOG("Get_Descriptor to device 0 (get max pipe size)\n"); goto out; default: /* NOT REACHED */ LOG("invalid request to devnum 0 bRequest %u, wValue %u\n", ctrlreq->bRequest, ctrlreq->wValue); ret = -EINVAL; goto no_need_xmit; } } out: if (urb->status != -EINPROGRESS) { LOG("URB already unlinked!, status %d\n", urb->status); return urb->status; } if (usb_pipeisoc(urb->pipe)) { LOG("ISO URBs not supported"); ret = -EINVAL; goto no_need_xmit; } urb->hcpriv = (void *) hcd_to_vhcd(hcd); LOG("hcpriv %p", urb->hcpriv); transfer_flags = urb->transfer_flags; usb_get_urb(urb); #if 0 d_urb->type = usb_pipetype(urb->pipe); d_urb->dev_id = data->gadget[urb->dev->portnum-1].id; d_urb->endpoint = usb_pipeendpoint(urb->pipe); d_urb->direction = 0 || usb_pipein(urb->pipe); d_urb->interval = urb->interval; d_urb->transfer_flags = urb->transfer_flags; d_urb->number_of_packets = urb->number_of_packets; d_urb->priv = priv; d_urb->size = urb->transfer_buffer_length; d_urb->data = urb->transfer_buffer; d_urb->phys_addr = d_urb->data?virt_to_phys(d_urb->data):0; if (urb->setup_packet) { memcpy(d_urb->setup_packet, urb->setup_packet, 8); } /* XXX ISO ? */ // if (urb->number_of_packets) // memcpy(d_urb->iso_desc, urb->iso_frame_desc, urb->number_of_packets*sizeof(struct usb_iso_packet_descriptor)); ret = libddeusb_submit_d_urb(d_urb); #else unsigned port_num = urb->dev->portnum; switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *) urb->setup_packet; dde_linux26_usb_vhcd_submit_control_urb_cb(port_num, usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe), urb, /* handle */ sizeof(*req), req); } break; case PIPE_INTERRUPT: printk(" int\n"); // dde_linux26_usb_vhcd_submit_urb(urb->transfer_buffer, // urb->transfer_buffer_length); return -EINVAL; break; /* unsupported transfer types */ case PIPE_BULK: printk(" bulk\n"); return -EINVAL; case PIPE_ISOCHRONOUS: printk(" isoc\n"); return -EINVAL; } #endif // if (ret) { // LOG("URB SUBMIT FAILED (%d).",ret); // /* s.t. went wrong. */ // spin_lock_irqsave(&data->lock, flags); // data->rcv_buf[i]=NULL; // spin_unlock_irqrestore(&data->lock, flags); // down(&data->rcv_buf_free); // kmem_cache_free(priv_cache, urb->hcpriv); // usb_put_urb(urb); // urb->status = ret; // urb->hcpriv = NULL; // libddeusb_free_d_urb(d_urb); // return ret; // } LOG("URB %p submitted", urb); return 0; no_need_xmit: usb_hcd_giveback_urb(hcd, urb); return 0; }
/** * Sets the final status of an URB and returns it to the device driver. Any * required cleanup of the URB is performed. */ static int _complete(dwc_otg_hcd_t * hcd, void *urb_handle, dwc_otg_hcd_urb_t * dwc_otg_urb, uint32_t status) { struct urb *urb = (struct urb *)urb_handle; #ifdef DEBUG if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { DWC_PRINTF("%s: urb %p, device %d, ep %d %s, status=%d\n", __func__, urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "IN" : "OUT", status); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; for (i = 0; i < urb->number_of_packets; i++) { DWC_PRINTF(" ISO Desc %d status: %d\n", i, urb->iso_frame_desc[i].status); } } } #endif urb->actual_length = dwc_otg_hcd_urb_get_actual_length(dwc_otg_urb); /* Convert status value. */ switch (status) { case -DWC_E_PROTOCOL: status = -EPROTO; break; case -DWC_E_IN_PROGRESS: status = -EINPROGRESS; break; case -DWC_E_PIPE: status = -EPIPE; break; case -DWC_E_IO: status = -EIO; break; case -DWC_E_TIMEOUT: status = -ETIMEDOUT; break; default: if (status) { /* alan.K * DWC_OTG IP don't know this status, so assumed to be a DWC_E_PROTOCOL. */ DWC_WARN("Unknown urb status %d, but assumed to be an EPROTO\n", status); status = -EPROTO; } } if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; urb->error_count = dwc_otg_hcd_urb_get_error_count(dwc_otg_urb); for (i = 0; i < urb->number_of_packets; ++i) { urb->iso_frame_desc[i].actual_length = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); urb->iso_frame_desc[i].status = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); } } urb->status = status; urb->hcpriv = NULL; if (!status) { if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length)) { urb->status = -EREMOTEIO; } } if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) || (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) { struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); if (ep) { free_bus_bandwidth(dwc_otg_hcd_to_hcd(hcd), dwc_otg_hcd_get_ep_bandwidth(hcd, ep->hcpriv), urb); } } dwc_free(dwc_otg_urb); usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); return 0; }