/** * Schedules an interrupt or isochronous transfer in the periodic schedule. * * @param hcd The HCD state structure for the DWC OTG controller. * @param qh QH for the periodic transfer. The QH should already contain the * scheduling information. * * @return 0 if successful, negative error code otherwise. */ static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { int status = 0; status = periodic_channel_available(hcd); if (status) { DWC_NOTICE("%s: No host channel available for periodic " "transfer.\n", __func__); return status; } status = check_periodic_bandwidth(hcd, qh); if (status) { DWC_NOTICE("%s: Insufficient periodic bandwidth for " "periodic transfer.\n", __func__); return status; } status = check_max_xfer_size(hcd, qh); if (status) { DWC_NOTICE("%s: Channel max transfer size too small " "for periodic transfer.\n", __func__); return status; } /* Always start in the inactive schedule. */ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); /* Reserve the periodic channel. */ hcd->periodic_channels++; /* Update claimed usecs per (micro)frame. */ hcd->periodic_usecs += qh->usecs; /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; if (qh->ep_type == USB_ENDPOINT_XFER_INT) { hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", qh, qh->usecs, qh->interval); } else { hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", qh, qh->usecs, qh->interval); } return status; }
/** Free each QTD in the QH's QTD-list then free the QH. QH should already be * removed from a list. QTD list should already be empty if called from URB * Dequeue. * * @param[in] hcd HCD instance. * @param[in] qh The QH to free. */ void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { dwc_otg_qtd_t *qtd; struct list_head *pos; unsigned long flags; /* Free each QTD in the QTD list */ SPIN_LOCK_IRQSAVE(&hcd->lock, flags) for (pos = qh->qtd_list.next; pos != &qh->qtd_list; pos = qh->qtd_list.next) { list_del (pos); qtd = dwc_list_to_qtd (pos); dwc_otg_hcd_qtd_free (qtd); } SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) if (qh->dw_align_buf) { dma_free_coherent((dwc_otg_hcd_to_hcd(hcd))->self.controller, hcd->core_if->core_params->max_transfer_size, qh->dw_align_buf, qh->dw_align_buf_dma); } kfree (qh); return; }
static int _disconnect(dwc_otg_hcd_t * hcd) { struct usb_hcd *usb_hcd = dwc_otg_hcd_to_hcd(hcd); usb_hcd->self.is_b_host = 0; return 0; }
/** * Removes the HCD. * Frees memory and resources associated with the HCD and deregisters the bus. */ void hcd_remove(struct platform_device *_dev) { dwc_otg_device_t *otg_dev = platform_get_otgdata(_dev); dwc_otg_hcd_t *dwc_otg_hcd; struct usb_hcd *hcd; DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); if (!otg_dev) { DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); return; } dwc_otg_hcd = otg_dev->hcd; if (!dwc_otg_hcd) { DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); return; } hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); if (!hcd) { DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); return; } usb_remove_hcd(hcd); dwc_otg_hcd_set_priv_data(dwc_otg_hcd, NULL); dwc_otg_hcd_remove(dwc_otg_hcd); usb_put_hcd(hcd); }
static int _start(dwc_otg_hcd_t * hcd) { struct usb_hcd *usb_hcd = dwc_otg_hcd_to_hcd(hcd); usb_hcd->self.is_b_host = dwc_otg_hcd_is_b_host(hcd); hcd_start(usb_hcd); return 0; }
/** * Schedules an interrupt or isochronous transfer in the periodic schedule. */ static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { int status; struct usb_bus *bus = hcd_to_bus(dwc_otg_hcd_to_hcd(hcd)); int frame; status = find_uframe(hcd, qh); frame = -1; if (status == 0) { frame = 7; } else { if (status > 0) frame = status - 1; } /* Set the new frame up */ if (frame > -1) { qh->sched_frame &= ~0x7; qh->sched_frame |= (frame & 7); } if (status != -1) status = 0; if (status) { pr_notice("%s: Insufficient periodic bandwidth for " "periodic transfer.\n", __func__); return status; } status = check_max_xfer_size(hcd, qh); if (status) { pr_notice("%s: Channel max transfer size too small " "for periodic transfer.\n", __func__); return status; } /* Always start in the inactive schedule. */ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); /* Update claimed usecs per (micro)frame. */ hcd->periodic_usecs += qh->usecs; /* * Update average periodic bandwidth claimed and # periodic reqs for * usbfs. */ bus->bandwidth_allocated += qh->usecs / qh->interval; if (qh->ep_type == USB_ENDPOINT_XFER_INT) bus->bandwidth_int_reqs++; else bus->bandwidth_isoc_reqs++; return status; }
/** * Removes an interrupt or isochronous transfer from the periodic schedule. * * @param hcd The HCD state structure for the DWC OTG controller. * @param qh QH for the periodic transfer. */ static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { list_del_init(&qh->qh_list_entry); /* Release the periodic channel reservation. */ hcd->periodic_channels--; /* Update claimed usecs per (micro)frame. */ hcd->periodic_usecs -= qh->usecs; /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; if (qh->ep_type == USB_ENDPOINT_XFER_INT) { hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", qh, qh->usecs, qh->interval); } else { hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", qh, qh->usecs, qh->interval); } }
/** * Removes an interrupt or isochronous transfer from the periodic schedule. */ static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { struct usb_bus *bus = hcd_to_bus(dwc_otg_hcd_to_hcd(hcd)); int i; list_del_init(&qh->qh_list_entry); /* Update claimed usecs per (micro)frame. */ hcd->periodic_usecs -= qh->usecs; for (i = 0; i < 8; i++) { hcd->frame_usecs[i] += qh->frame_usecs[i]; qh->frame_usecs[i] = 0; } /* * Update average periodic bandwidth claimed and # periodic reqs for * usbfs. */ bus->bandwidth_allocated -= qh->usecs / qh->interval; if (qh->ep_type == USB_ENDPOINT_XFER_INT) bus->bandwidth_int_reqs--; else bus->bandwidth_isoc_reqs--; }
/** * Deactivates a QH. For non-periodic QHs, removes the QH from the active * non-periodic schedule. The QH is added to the inactive non-periodic * schedule if any QTDs are still attached to the QH. * * For periodic QHs, the QH is removed from the periodic queued schedule. If * there are any QTDs still attached to the QH, the QH is added to either the * periodic inactive schedule or the periodic ready schedule and its next * scheduled frame is calculated. The QH is placed in the ready schedule if * the scheduled frame has been reached already. Otherwise it's placed in the * inactive schedule. If there are no QTDs attached to the QH, the QH is * completely removed from the periodic schedule. */ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) { unsigned long flags; SPIN_LOCK_IRQSAVE(&hcd->lock, flags); if (dwc_qh_is_non_per(qh)) { dwc_otg_hcd_qh_remove(hcd, qh); if (!list_empty(&qh->qtd_list)) { /* Add back to inactive non-periodic schedule. */ dwc_otg_hcd_qh_add(hcd, qh); } } else { uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); if (qh->do_split) { /* Schedule the next continuing periodic split transfer */ if (sched_next_periodic_split) { qh->sched_frame = frame_number; if (dwc_frame_num_le(frame_number, dwc_frame_num_inc(qh->start_split_frame, 1))) { /* * Allow one frame to elapse after start * split microframe before scheduling * complete split, but DONT if we are * doing the next start split in the * same frame for an ISOC out. */ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); } } } else { qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, qh->interval); if (dwc_frame_num_le(qh->sched_frame, frame_number)) { qh->sched_frame = frame_number; } qh->sched_frame |= 0x7; qh->start_split_frame = qh->sched_frame; } } else { qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); if (dwc_frame_num_le(qh->sched_frame, frame_number)) { qh->sched_frame = frame_number; } } if (list_empty(&qh->qtd_list)) { dwc_otg_hcd_qh_remove(hcd, qh); } else { /* * Remove from periodic_sched_queued and move to * appropriate queue. */ if (qh->sched_frame == frame_number) { list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); } else { list_move(&qh->qh_list_entry, &hcd->periodic_sched_inactive); } } } SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); }
/** * Sets the final status of an URB and returns it to the device driver. Any * required cleanup of the URB is performed. */ static int _complete(dwc_otg_hcd_t * hcd, void *urb_handle, dwc_otg_hcd_urb_t * dwc_otg_urb, uint32_t status) { struct urb *urb = (struct urb *)urb_handle; #ifdef DEBUG if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { DWC_PRINTF("%s: urb %p, device %d, ep %d %s, status=%d\n", __func__, urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "IN" : "OUT", status); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; for (i = 0; i < urb->number_of_packets; i++) { DWC_PRINTF(" ISO Desc %d status: %d\n", i, urb->iso_frame_desc[i].status); } } } #endif urb->actual_length = dwc_otg_hcd_urb_get_actual_length(dwc_otg_urb); /* Convert status value. */ switch (status) { case -DWC_E_PROTOCOL: status = -EPROTO; break; case -DWC_E_IN_PROGRESS: status = -EINPROGRESS; break; case -DWC_E_PIPE: status = -EPIPE; break; case -DWC_E_IO: status = -EIO; break; case -DWC_E_TIMEOUT: status = -ETIMEDOUT; break; default: if (status) { /* alan.K * DWC_OTG IP don't know this status, so assumed to be a DWC_E_PROTOCOL. */ DWC_WARN("Unknown urb status %d, but assumed to be an EPROTO\n", status); status = -EPROTO; } } if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; urb->error_count = dwc_otg_hcd_urb_get_error_count(dwc_otg_urb); for (i = 0; i < urb->number_of_packets; ++i) { urb->iso_frame_desc[i].actual_length = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); urb->iso_frame_desc[i].status = dwc_otg_hcd_urb_get_iso_desc_actual_length (dwc_otg_urb, i); } } urb->status = status; urb->hcpriv = NULL; if (!status) { if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length)) { urb->status = -EREMOTEIO; } } if ((usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) || (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) { struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); if (ep) { free_bus_bandwidth(dwc_otg_hcd_to_hcd(hcd), dwc_otg_hcd_get_ep_bandwidth(hcd, ep->hcpriv), urb); } } dwc_free(dwc_otg_urb); usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); return 0; }
static int _get_b_hnp_enable(dwc_otg_hcd_t * hcd) { struct usb_hcd *usb_hcd = dwc_otg_hcd_to_hcd(hcd); return usb_hcd->self.b_hnp_enable; }