static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td, u32 status, u32 token, u32 buffer) { td->status = cpu_to_hc32(uhci, status); td->token = cpu_to_hc32(uhci, token); td->buffer = cpu_to_hc32(uhci, buffer); }
static int admhc_eds_init(struct admhcd *ahcd) { struct ed *ed; ed = ed_create(ahcd, PIPE_INTERRUPT, ED_DUMMY_INFO); if (!ed) goto err; ahcd->ed_tails[PIPE_INTERRUPT] = ed; ed = ed_create(ahcd, PIPE_ISOCHRONOUS, ED_DUMMY_INFO); if (!ed) goto err; ahcd->ed_tails[PIPE_ISOCHRONOUS] = ed; ed->ed_prev = ahcd->ed_tails[PIPE_INTERRUPT]; ahcd->ed_tails[PIPE_INTERRUPT]->ed_next = ed; ahcd->ed_tails[PIPE_INTERRUPT]->hwNextED = cpu_to_hc32(ahcd, ed->dma); ed = ed_create(ahcd, PIPE_CONTROL, ED_DUMMY_INFO); if (!ed) goto err; ahcd->ed_tails[PIPE_CONTROL] = ed; ed->ed_prev = ahcd->ed_tails[PIPE_ISOCHRONOUS]; ahcd->ed_tails[PIPE_ISOCHRONOUS]->ed_next = ed; ahcd->ed_tails[PIPE_ISOCHRONOUS]->hwNextED = cpu_to_hc32(ahcd, ed->dma); ed = ed_create(ahcd, PIPE_BULK, ED_DUMMY_INFO); if (!ed) goto err; ahcd->ed_tails[PIPE_BULK] = ed; ed->ed_prev = ahcd->ed_tails[PIPE_CONTROL]; ahcd->ed_tails[PIPE_CONTROL]->ed_next = ed; ahcd->ed_tails[PIPE_CONTROL]->hwNextED = cpu_to_hc32(ahcd, ed->dma); ahcd->ed_head = ahcd->ed_tails[PIPE_INTERRUPT]; #ifdef ADMHC_VERBOSE_DEBUG admhc_dump_ed(ahcd, "ed intr", ahcd->ed_tails[PIPE_INTERRUPT], 1); admhc_dump_ed(ahcd, "ed isoc", ahcd->ed_tails[PIPE_ISOCHRONOUS], 1); admhc_dump_ed(ahcd, "ed ctrl", ahcd->ed_tails[PIPE_CONTROL], 1); admhc_dump_ed(ahcd, "ed bulk", ahcd->ed_tails[PIPE_BULK], 1); #endif return 0; err: admhc_eds_cleanup(ahcd); return -ENOMEM; }
static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) { uint32_t delta, next; uint32_t addr = (uint32_t)buf; size_t rsz = roundup(sz, 32); int idx; if (sz != rsz) debug("EHCI-HCD: Misaligned buffer size (%08x)\n", sz); if (addr & 31) debug("EHCI-HCD: Misaligned buffer address (%p)\n", buf); idx = 0; while (idx < 5) { flush_dcache_range(addr, addr + rsz); td->qt_buffer[idx] = cpu_to_hc32(addr); td->qt_buffer_hi[idx] = 0; next = (addr + 4096) & ~4095; delta = next - addr; if (delta >= sz) break; sz -= delta; addr = next; idx++; } if (idx == 5) { debug("out of buffer pointers (%u bytes left)\n", sz); return -1; } return 0; }
static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) { uint32_t addr, delta, next; int idx; addr = (uint32_t) buf; idx = 0; while (idx < 5) { td->qt_buffer[idx] = cpu_to_hc32(addr); td->qt_buffer_hi[idx] = 0; next = (addr + 4096) & ~4095; delta = next - addr; if (delta >= sz) break; sz -= delta; addr = next; idx++; } if (idx == 5) { debug("out of buffer pointers (%u bytes left)\n", sz); return -1; } return 0; }
static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) { uint32_t addr, delta, next; int idx; void *buf_noncache; addr = (uint32_t) buf; idx = 0; while (idx < 5) { td->qt_buffer[idx] = EHCI_virt_to_bus(cpu_to_hc32(addr)); next = (addr + 4096) & ~4095; delta = next - addr; if (delta >= sz) break; sz -= delta; addr = next; idx++; } if (idx == 5) { debug("out of buffer pointers (%u bytes left)\n", sz); return -1; } buf_noncache = KSEG1ADDR(buf); memcpy(buf_noncache, buf, sz); return 0; }
static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) { uint32_t delta, next; uint32_t addr = (unsigned long)buf; int idx; if (addr != ALIGN(addr, ARCH_DMA_MINALIGN)) debug("EHCI-HCD: Misaligned buffer address (%p)\n", buf); flush_dcache_range(addr, ALIGN(addr + sz, ARCH_DMA_MINALIGN)); idx = 0; while (idx < QT_BUFFER_CNT) { td->qt_buffer[idx] = cpu_to_hc32(addr); td->qt_buffer_hi[idx] = 0; next = (addr + EHCI_PAGE_SIZE) & ~(EHCI_PAGE_SIZE - 1); delta = next - addr; if (delta >= sz) break; sz -= delta; addr = next; idx++; } if (idx == QT_BUFFER_CNT) { printf("out of buffer pointers (%zu bytes left)\n", sz); return -1; } return 0; }
void reinit_ehci_headers(void) { init_qh_and_qtd(); create_qtd_dummy(); ehci->async= qh_pointer[0]; ehci->asyncqh= qh_pointer[1]; in_qh=qh_pointer[2]; out_qh=qh_pointer[3]; dummy_qh=qh_pointer[4]; ehci_dma_unmap_bidir((dma_addr_t) ehci->async,sizeof(struct ehci_qh)); ehci->async->ehci = ehci; ehci->async->qtd_head = NULL; ehci->async->qh_dma = ehci_virt_to_dma(ehci->async); ehci->async->hw_next = QH_NEXT(dummy_qh->qh_dma/* ehci->async->qh_dma*/); ehci->async->hw_info1 = cpu_to_hc32( QH_HEAD); ehci->async->hw_info2 = cpu_to_hc32( 0); ehci->async->hw_token = cpu_to_hc32( QTD_STS_HALT); ehci->async->hw_qtd_next =EHCI_LIST_END(); ehci->async->hw_alt_next =EHCI_LIST_END(); //QTD_NEXT(get_qtd_dummy()); ehci_dma_map_bidir(ehci->async,sizeof(struct ehci_qh)); ehci_dma_unmap_bidir((dma_addr_t)ehci->asyncqh,sizeof(struct ehci_qh)); ehci->asyncqh->ehci = ehci; ehci->asyncqh->qtd_head = NULL; ehci->asyncqh->qh_dma = ehci_virt_to_dma(ehci->asyncqh); ehci_dma_unmap_bidir((dma_addr_t)in_qh,sizeof(struct ehci_qh)); in_qh->ehci = ehci; in_qh->qtd_head = NULL; in_qh->qh_dma = ehci_virt_to_dma(in_qh); ehci_dma_map_bidir(in_qh,sizeof(struct ehci_qh)); ehci_dma_unmap_bidir((dma_addr_t)out_qh,sizeof(struct ehci_qh)); out_qh->ehci = ehci; out_qh->qtd_head = NULL; out_qh->qh_dma = ehci_virt_to_dma(out_qh); ehci_dma_map_bidir(out_qh,sizeof(struct ehci_qh)); }
static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t dma) { memset (qtd, 0, sizeof *qtd); qtd->qtd_dma = dma; qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); qtd->hw_next = EHCI_LIST_END(ehci); qtd->hw_alt_next = EHCI_LIST_END(ehci); INIT_LIST_HEAD (&qtd->qtd_list); }
static int ehci_td_buffer(struct qTD *td, void *buf, size_t sz) { #ifndef CONFIG_OCTEON uint32_t addr, delta, next; #else dma_addr_t addr, delta, next; #endif int idx; #ifdef CONFIG_OCTEON addr = virt_to_phys(buf); #else addr = (uint32_t) buf; #endif idx = 0; while (idx < 5) { #ifdef CONFIG_OCTEON td->qt_buffer_hi[idx] = cpu_to_hc32((u32)((addr >> 16) >> 16)); td->qt_buffer[idx] = cpu_to_hc32((u32)addr); next = (addr + 4096) & ~4095ull; debug("%s: idx: %d, buf ptr: 0x%llx (%p), next: 0x%llx, size: %u\n", __func__, idx, (u64)addr, buf, (u64)next, sz); #else td->qt_buffer[idx] = cpu_to_hc32(addr); td->qt_buffer_hi[idx] = 0; next = (addr + 4096) & ~4095; #endif delta = next - addr; if (delta >= sz) break; sz -= delta; addr = next; idx++; } if (idx == 5) { debug("out of buffer pointers (%u bytes left)\n", sz); return -1; } return 0; }
static void ehci_update_endpt2_dev_n_port(struct usb_device *udev, struct QH *qh) { struct usb_device *ttdev; int parent_devnum; if (udev->speed != USB_SPEED_LOW && udev->speed != USB_SPEED_FULL) return; /* * For full / low speed devices we need to get the devnum and portnr of * the tt, so of the first upstream usb-2 hub, there may be usb-1 hubs * in the tree before that one! */ #ifdef CONFIG_DM_USB /* * When called from usb-uclass.c: usb_scan_device() udev->dev points * to the parent udevice, not the actual udevice belonging to the * udev as the device is not instantiated yet. So when searching * for the first usb-2 parent start with udev->dev not * udev->dev->parent . */ struct udevice *parent; struct usb_device *uparent; ttdev = udev; parent = udev->dev; uparent = dev_get_parentdata(parent); while (uparent->speed != USB_SPEED_HIGH) { struct udevice *dev = parent; if (device_get_uclass_id(dev->parent) != UCLASS_USB_HUB) { printf("ehci: Error cannot find high-speed parent of usb-1 device\n"); return; } ttdev = dev_get_parentdata(dev); parent = dev->parent; uparent = dev_get_parentdata(parent); } parent_devnum = uparent->devnum; #else ttdev = udev; while (ttdev->parent && ttdev->parent->speed != USB_SPEED_HIGH) ttdev = ttdev->parent; if (!ttdev->parent) return; parent_devnum = ttdev->parent->devnum; #endif qh->qh_endpt2 |= cpu_to_hc32(QH_ENDPT2_PORTNUM(ttdev->portnr) | QH_ENDPT2_HUBADDR(parent_devnum)); }
static void td_free (struct ohci_hcd *hc, struct td *td) { struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)]; while (*prev && *prev != td) prev = &(*prev)->td_hash; if (*prev) *prev = td->td_hash; else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0) ohci_dbg (hc, "no hash for td %p\n", td); dma_pool_free (hc->td_cache, td, td->td_dma); }
static void ehci_update_endpt2_dev_n_port(struct usb_device *udev, struct QH *qh) { uint8_t portnr = 0; uint8_t hubaddr = 0; if (udev->speed != USB_SPEED_LOW && udev->speed != USB_SPEED_FULL) return; usb_find_usb2_hub_address_port(udev, &hubaddr, &portnr); qh->qh_endpt2 |= cpu_to_hc32(QH_ENDPT2_PORTNUM(portnr) | QH_ENDPT2_HUBADDR(hubaddr)); }
/* TDs ... */ static struct td * td_alloc (struct ohci_hcd *hc, gfp_t mem_flags) { dma_addr_t dma; struct td *td; td = dma_pool_alloc (hc->td_cache, mem_flags, &dma); if (td) { /* in case hc fetches it, make it look dead */ memset (td, 0, sizeof *td); td->hwNextTD = cpu_to_hc32 (hc, dma); td->td_dma = dma; /* hashed in td_fill */ } return td; }
/* These routines rely on the bus (pci, platform, etc) * to handle powerdown and wakeup, and currently also on * transceivers that don't need any software attention to set up * the right sort of wakeup. * * They're also used for turning on/off the port when doing OTG. */ static int ehci_fsl_drv_suspend(struct platform_device *pdev, pm_message_t message) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 tmp; hcd->state = HC_STATE_SUSPENDED; pdev->dev.power.power_state = PMSG_SUSPEND; if (hcd->driver->suspend) return hcd->driver->suspend(hcd, message); /* ignore non-host interrupts */ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* stop the controller */ tmp = ehci_readl(ehci, &ehci->regs->command); tmp &= ~CMD_RUN; ehci_writel(ehci, tmp, &ehci->regs->command); /* save EHCI registers */ usb_ehci_regs.command = ehci_readl(ehci, &ehci->regs->command); usb_ehci_regs.status = ehci_readl(ehci, &ehci->regs->status); usb_ehci_regs.intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable); usb_ehci_regs.frame_index = ehci_readl(ehci, &ehci->regs->frame_index); usb_ehci_regs.segment = ehci_readl(ehci, &ehci->regs->segment); usb_ehci_regs.frame_list = ehci_readl(ehci, &ehci->regs->frame_list); usb_ehci_regs.async_next = ehci_readl(ehci, &ehci->regs->async_next); usb_ehci_regs.configured_flag = ehci_readl(ehci, &ehci->regs->configured_flag); usb_ehci_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]); /* clear the W1C bits */ usb_ehci_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS); /* clear PP to cut power to the port */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); tmp &= ~PORT_POWER; ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); return 0; }
static void ehci_update_endpt2_dev_n_port(struct usb_device *dev, struct QH *qh) { struct usb_device *ttdev; if (dev->speed != USB_SPEED_LOW && dev->speed != USB_SPEED_FULL) return; /* * For full / low speed devices we need to get the devnum and portnr of * the tt, so of the first upstream usb-2 hub, there may be usb-1 hubs * in the tree before that one! */ ttdev = dev; while (ttdev->parent && ttdev->parent->speed != USB_SPEED_HIGH) ttdev = ttdev->parent; if (!ttdev->parent) return; qh->qh_endpt2 |= cpu_to_hc32(QH_ENDPT2_PORTNUM(ttdev->portnr) | QH_ENDPT2_HUBADDR(ttdev->parent->devnum)); }
/* one-time init, only for memory state */ static int ehci_init(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 temp; int retval; u32 hcc_params; spin_lock_init(&ehci->lock); init_timer(&ehci->watchdog); ehci->watchdog.function = ehci_watchdog; ehci->watchdog.data = (unsigned long) ehci; init_timer(&ehci->iaa_watchdog); ehci->iaa_watchdog.function = ehci_iaa_watchdog; ehci->iaa_watchdog.data = (unsigned long) ehci; /* * hw default: 1K periodic list heads, one per frame. * periodic_size can shrink by USBCMD update if hcc_params allows. */ ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; /* controllers may cache some of the periodic schedule ... */ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache ehci->i_thresh = 8; else // N microframes cached ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); ehci->reclaim = NULL; ehci->next_uframe = -1; ehci->clock_frame = -1; /* * dedicate a qh for the async ring head, since we couldn't unlink * a 'real' qh without stopping the async schedule [4.8]. use it * as the 'reclamation list head' too. * its dummy is used in hw_alt_next of many tds, to prevent the qh * from automatically advancing to the next td after short reads. */ ehci->async->qh_next.qh = NULL; ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); ehci->async->hw_qtd_next = EHCI_LIST_END(ehci); ehci->async->qh_state = QH_STATE_LINKED; ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); /* clear interrupt enables, set irq latency */ if (log2_irq_thresh < 0 || log2_irq_thresh > 6) log2_irq_thresh = 0; temp = 1 << (16 + log2_irq_thresh); if (HCC_CANPARK(hcc_params)) { /* HW default park == 3, on hardware that supports it (like * NVidia and ALI silicon), maximizes throughput on the async * schedule by avoiding QH fetches between transfers. * * With fast usb storage devices and NForce2, "park" seems to * make problems: throughput reduction (!), data errors... */ if (park) { park = min(park, (unsigned) 3); temp |= CMD_PARK; temp |= park << 8; } ehci_dbg(ehci, "park %d\n", park); } if (HCC_PGM_FRAMELISTLEN(hcc_params)) { /* periodic schedule size can be smaller than default */ temp &= ~(3 << 2); temp |= (EHCI_TUNE_FLS << 2); switch (EHCI_TUNE_FLS) { case 0: ehci->periodic_size = 1024; break; case 1: ehci->periodic_size = 512; break; case 2: ehci->periodic_size = 256; break; default: BUG(); } } ehci->command = temp; return 0; }
static int ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *req) { ALLOC_ALIGN_BUFFER(struct QH, qh, 1, USB_DMA_MINALIGN); struct qTD *qtd; int qtd_count = 0; int qtd_counter = 0; volatile struct qTD *vtd; unsigned long ts; uint32_t *tdp; uint32_t endpt, maxpacket, token, usbsts; uint32_t c, toggle; uint32_t cmd; int timeout; int ret = 0; struct ehci_ctrl *ctrl = ehci_get_ctrl(dev); debug("dev=%p, pipe=%lx, buffer=%p, length=%d, req=%p\n", dev, pipe, buffer, length, req); if (req != NULL) debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n", req->request, req->request, req->requesttype, req->requesttype, le16_to_cpu(req->value), le16_to_cpu(req->value), le16_to_cpu(req->index)); #define PKT_ALIGN 512 /* * The USB transfer is split into qTD transfers. Eeach qTD transfer is * described by a transfer descriptor (the qTD). The qTDs form a linked * list with a queue head (QH). * * Each qTD transfer starts with a new USB packet, i.e. a packet cannot * have its beginning in a qTD transfer and its end in the following * one, so the qTD transfer lengths have to be chosen accordingly. * * Each qTD transfer uses up to QT_BUFFER_CNT data buffers, mapped to * single pages. The first data buffer can start at any offset within a * page (not considering the cache-line alignment issues), while the * following buffers must be page-aligned. There is no alignment * constraint on the size of a qTD transfer. */ if (req != NULL) /* 1 qTD will be needed for SETUP, and 1 for ACK. */ qtd_count += 1 + 1; if (length > 0 || req == NULL) { /* * Determine the qTD transfer size that will be used for the * data payload (not considering the first qTD transfer, which * may be longer or shorter, and the final one, which may be * shorter). * * In order to keep each packet within a qTD transfer, the qTD * transfer size is aligned to PKT_ALIGN, which is a multiple of * wMaxPacketSize (except in some cases for interrupt transfers, * see comment in submit_int_msg()). * * By default, i.e. if the input buffer is aligned to PKT_ALIGN, * QT_BUFFER_CNT full pages will be used. */ int xfr_sz = QT_BUFFER_CNT; /* * However, if the input buffer is not aligned to PKT_ALIGN, the * qTD transfer size will be one page shorter, and the first qTD * data buffer of each transfer will be page-unaligned. */ if ((unsigned long)buffer & (PKT_ALIGN - 1)) xfr_sz--; /* Convert the qTD transfer size to bytes. */ xfr_sz *= EHCI_PAGE_SIZE; /* * Approximate by excess the number of qTDs that will be * required for the data payload. The exact formula is way more * complicated and saves at most 2 qTDs, i.e. a total of 128 * bytes. */ qtd_count += 2 + length / xfr_sz; } /* * Threshold value based on the worst-case total size of the allocated qTDs for * a mass-storage transfer of 65535 blocks of 512 bytes. */ #if CONFIG_SYS_MALLOC_LEN <= 64 + 128 * 1024 #warning CONFIG_SYS_MALLOC_LEN may be too small for EHCI #endif qtd = memalign(USB_DMA_MINALIGN, qtd_count * sizeof(struct qTD)); if (qtd == NULL) { printf("unable to allocate TDs\n"); return -1; } memset(qh, 0, sizeof(struct QH)); memset(qtd, 0, qtd_count * sizeof(*qtd)); toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); /* * Setup QH (3.6 in ehci-r10.pdf) * * qh_link ................. 03-00 H * qh_endpt1 ............... 07-04 H * qh_endpt2 ............... 0B-08 H * - qh_curtd * qh_overlay.qt_next ...... 13-10 H * - qh_overlay.qt_altnext */ qh->qh_link = cpu_to_hc32((unsigned long)&ctrl->qh_list | QH_LINK_TYPE_QH); c = (dev->speed != USB_SPEED_HIGH) && !usb_pipeendpoint(pipe); maxpacket = usb_maxpacket(dev, pipe); endpt = QH_ENDPT1_RL(8) | QH_ENDPT1_C(c) | QH_ENDPT1_MAXPKTLEN(maxpacket) | QH_ENDPT1_H(0) | QH_ENDPT1_DTC(QH_ENDPT1_DTC_DT_FROM_QTD) | QH_ENDPT1_EPS(ehci_encode_speed(dev->speed)) | QH_ENDPT1_ENDPT(usb_pipeendpoint(pipe)) | QH_ENDPT1_I(0) | QH_ENDPT1_DEVADDR(usb_pipedevice(pipe)); qh->qh_endpt1 = cpu_to_hc32(endpt); endpt = QH_ENDPT2_MULT(1) | QH_ENDPT2_UFCMASK(0) | QH_ENDPT2_UFSMASK(0); qh->qh_endpt2 = cpu_to_hc32(endpt); ehci_update_endpt2_dev_n_port(dev, qh); qh->qh_overlay.qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qh->qh_overlay.qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); tdp = &qh->qh_overlay.qt_next; if (req != NULL) { /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H * * [ buffer, buffer_hi ] loaded with "req". */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = QT_TOKEN_DT(0) | QT_TOKEN_TOTALBYTES(sizeof(*req)) | QT_TOKEN_IOC(0) | QT_TOKEN_CPAGE(0) | QT_TOKEN_CERR(3) | QT_TOKEN_PID(QT_TOKEN_PID_SETUP) | QT_TOKEN_STATUS(QT_TOKEN_STATUS_ACTIVE); qtd[qtd_counter].qt_token = cpu_to_hc32(token); if (ehci_td_buffer(&qtd[qtd_counter], req, sizeof(*req))) { printf("unable to construct SETUP TD\n"); goto fail; } /* Update previous qTD! */ *tdp = cpu_to_hc32((unsigned long)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; toggle = 1; } if (length > 0 || req == NULL) { uint8_t *buf_ptr = buffer; int left_length = length; do { /* * Determine the size of this qTD transfer. By default, * QT_BUFFER_CNT full pages can be used. */ int xfr_bytes = QT_BUFFER_CNT * EHCI_PAGE_SIZE; /* * However, if the input buffer is not page-aligned, the * portion of the first page before the buffer start * offset within that page is unusable. */ xfr_bytes -= (unsigned long)buf_ptr & (EHCI_PAGE_SIZE - 1); /* * In order to keep each packet within a qTD transfer, * align the qTD transfer size to PKT_ALIGN. */ xfr_bytes &= ~(PKT_ALIGN - 1); /* * This transfer may be shorter than the available qTD * transfer size that has just been computed. */ xfr_bytes = min(xfr_bytes, left_length); /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H * * [ buffer, buffer_hi ] loaded with "buffer". */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = QT_TOKEN_DT(toggle) | QT_TOKEN_TOTALBYTES(xfr_bytes) | QT_TOKEN_IOC(req == NULL) | QT_TOKEN_CPAGE(0) | QT_TOKEN_CERR(3) | QT_TOKEN_PID(usb_pipein(pipe) ? QT_TOKEN_PID_IN : QT_TOKEN_PID_OUT) | QT_TOKEN_STATUS(QT_TOKEN_STATUS_ACTIVE); qtd[qtd_counter].qt_token = cpu_to_hc32(token); if (ehci_td_buffer(&qtd[qtd_counter], buf_ptr, xfr_bytes)) { printf("unable to construct DATA TD\n"); goto fail; } /* Update previous qTD! */ *tdp = cpu_to_hc32((unsigned long)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; /* * Data toggle has to be adjusted since the qTD transfer * size is not always an even multiple of * wMaxPacketSize. */ if ((xfr_bytes / maxpacket) & 1) toggle ^= 1; buf_ptr += xfr_bytes; left_length -= xfr_bytes; } while (left_length > 0); } if (req != NULL) { /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = QT_TOKEN_DT(1) | QT_TOKEN_TOTALBYTES(0) | QT_TOKEN_IOC(1) | QT_TOKEN_CPAGE(0) | QT_TOKEN_CERR(3) | QT_TOKEN_PID(usb_pipein(pipe) ? QT_TOKEN_PID_OUT : QT_TOKEN_PID_IN) | QT_TOKEN_STATUS(QT_TOKEN_STATUS_ACTIVE); qtd[qtd_counter].qt_token = cpu_to_hc32(token); /* Update previous qTD! */ *tdp = cpu_to_hc32((unsigned long)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; } ctrl->qh_list.qh_link = cpu_to_hc32((unsigned long)qh | QH_LINK_TYPE_QH); /* Flush dcache */ flush_dcache_range((unsigned long)&ctrl->qh_list, ALIGN_END_ADDR(struct QH, &ctrl->qh_list, 1)); flush_dcache_range((unsigned long)qh, ALIGN_END_ADDR(struct QH, qh, 1)); flush_dcache_range((unsigned long)qtd, ALIGN_END_ADDR(struct qTD, qtd, qtd_count)); /* Set async. queue head pointer. */ ehci_writel(&ctrl->hcor->or_asynclistaddr, (unsigned long)&ctrl->qh_list); usbsts = ehci_readl(&ctrl->hcor->or_usbsts); ehci_writel(&ctrl->hcor->or_usbsts, (usbsts & 0x3f)); /* Enable async. schedule. */ cmd = ehci_readl(&ctrl->hcor->or_usbcmd); cmd |= CMD_ASE; ehci_writel(&ctrl->hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&ctrl->hcor->or_usbsts, STS_ASS, STS_ASS, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STS_ASS set\n"); goto fail; } /* Wait for TDs to be processed. */ ts = get_timer(0); vtd = &qtd[qtd_counter - 1]; timeout = USB_TIMEOUT_MS(pipe); do { /* Invalidate dcache */ invalidate_dcache_range((unsigned long)&ctrl->qh_list, ALIGN_END_ADDR(struct QH, &ctrl->qh_list, 1)); invalidate_dcache_range((unsigned long)qh, ALIGN_END_ADDR(struct QH, qh, 1)); invalidate_dcache_range((unsigned long)qtd, ALIGN_END_ADDR(struct qTD, qtd, qtd_count)); token = hc32_to_cpu(vtd->qt_token); if (!(QT_TOKEN_GET_STATUS(token) & QT_TOKEN_STATUS_ACTIVE)) break; WATCHDOG_RESET(); } while (get_timer(ts) < timeout); /* * Invalidate the memory area occupied by buffer * Don't try to fix the buffer alignment, if it isn't properly * aligned it's upper layer's fault so let invalidate_dcache_range() * vow about it. But we have to fix the length as it's actual * transfer length and can be unaligned. This is potentially * dangerous operation, it's responsibility of the calling * code to make sure enough space is reserved. */ invalidate_dcache_range((unsigned long)buffer, ALIGN((unsigned long)buffer + length, ARCH_DMA_MINALIGN)); /* Check that the TD processing happened */ if (QT_TOKEN_GET_STATUS(token) & QT_TOKEN_STATUS_ACTIVE) printf("EHCI timed out on TD - token=%#x\n", token); /* Disable async schedule. */ cmd = ehci_readl(&ctrl->hcor->or_usbcmd); cmd &= ~CMD_ASE; ehci_writel(&ctrl->hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&ctrl->hcor->or_usbsts, STS_ASS, 0, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STS_ASS reset\n"); goto fail; } token = hc32_to_cpu(qh->qh_overlay.qt_token); if (!(QT_TOKEN_GET_STATUS(token) & QT_TOKEN_STATUS_ACTIVE)) { debug("TOKEN=%#x\n", token); switch (QT_TOKEN_GET_STATUS(token) & ~(QT_TOKEN_STATUS_SPLITXSTATE | QT_TOKEN_STATUS_PERR)) { case 0: toggle = QT_TOKEN_GET_DT(token); usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), toggle); dev->status = 0; break; case QT_TOKEN_STATUS_HALTED: dev->status = USB_ST_STALLED; break; case QT_TOKEN_STATUS_ACTIVE | QT_TOKEN_STATUS_DATBUFERR: case QT_TOKEN_STATUS_DATBUFERR: dev->status = USB_ST_BUF_ERR; break; case QT_TOKEN_STATUS_HALTED | QT_TOKEN_STATUS_BABBLEDET: case QT_TOKEN_STATUS_BABBLEDET: dev->status = USB_ST_BABBLE_DET; break; default: dev->status = USB_ST_CRC_ERR; if (QT_TOKEN_GET_STATUS(token) & QT_TOKEN_STATUS_HALTED) dev->status |= USB_ST_STALLED; break; } dev->act_len = length - QT_TOKEN_GET_TOTALBYTES(token); } else { dev->act_len = 0; #ifndef CONFIG_USB_EHCI_FARADAY debug("dev=%u, usbsts=%#x, p[1]=%#x, p[2]=%#x\n", dev->devnum, ehci_readl(&ctrl->hcor->or_usbsts), ehci_readl(&ctrl->hcor->or_portsc[0]), ehci_readl(&ctrl->hcor->or_portsc[1])); #endif } free(qtd); return (dev->status != USB_ST_NOT_PROC) ? 0 : -1; fail: free(qtd); return -1; }
void init_qh_and_qtd(void) { int n; struct ehci_qtd * qtd; struct ehci_qh * qh; if(!qh_header) { //u32 mem = (u32) USB_Alloc(4096*3); //mem=(mem+4095) & ~4095; qh_header= (struct ehci_qh *) ehci->async;//mem; qtd_header= (struct ehci_qtd *) ehci->qtds[0]; } qtd=qtd_header;//= (struct ehci_qtd *) (((u32)qh_header)+4096); for(n=0;n<EHCI_MAX_QTD;n++) { ehci->qtds[n]=qtd; memset((void *) ehci->qtds[n], 0, sizeof(struct ehci_qtd)); ehci_dma_map_bidir((void *) ehci->qtds[n],sizeof(struct ehci_qtd)); qtd=(struct ehci_qtd *) (((((u32) qtd)+sizeof(struct ehci_qtd)+31) & ~31)); } for(n=0;n<EHCI_MAX_QTD;n++) { memset((void *) qtd, 0, sizeof(struct ehci_qtd)); ehci_dma_map_bidir((void *) qtd,sizeof(struct ehci_qtd)); qtd=(struct ehci_qtd *) (((((u32) qtd)+sizeof(struct ehci_qtd)+31) & ~31)); } qtd_dummy_first=qtd; qh=qh_header; for(n=0;n<6;n++) { qh_pointer[n]=qh; memset((void *) qh_pointer[n], 0, sizeof(struct ehci_qh)); qh->qh_dma = ehci_virt_to_dma(qh); qh_pointer[n]->hw_info1 = cpu_to_hc32((QH_HEAD*(n!=0))); qh_pointer[n]->hw_info2 = cpu_to_hc32(0); qh_pointer[n]->hw_token = cpu_to_hc32( QTD_STS_HALT); qh=(struct ehci_qh *) (((((u32) qh)+sizeof(struct ehci_qh)+31) & ~31)); qh_pointer[n]->hw_next = QH_NEXT( ehci_virt_to_dma(qh)); qh_pointer[n]->hw_qtd_next =EHCI_LIST_END(); qh_pointer[n]->hw_alt_next = EHCI_LIST_END(); ehci_dma_map_bidir((void *) qh_pointer[n],sizeof(struct ehci_qh)); } n--; qh_pointer[n]->hw_next = QH_NEXT( ehci_virt_to_dma(qh_header)); ehci_dma_map_bidir((void *) qh_pointer[n],sizeof(struct ehci_qh)); }
static unsigned short periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) { __hc32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned usecs = 0; struct ehci_qh_hw *hw; while (q->ptr) { switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { case Q_TYPE_QH: hw = q->qh->hw; if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) usecs += q->qh->usecs; if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << (8 + uframe))) usecs += q->qh->c_usecs; hw_p = &hw->hw_next; q = &q->qh->qh_next; break; default: if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) { ehci_dbg (ehci, "ignoring FSTN cost ...\n"); } hw_p = &q->fstn->hw_next; q = &q->fstn->fstn_next; break; case Q_TYPE_ITD: if (q->itd->hw_transaction[uframe]) usecs += q->itd->stream->usecs; hw_p = &q->itd->hw_next; q = &q->itd->itd_next; break; case Q_TYPE_SITD: if (q->sitd->hw_uframe & cpu_to_hc32(ehci, 1 << uframe)) { if (q->sitd->hw_fullspeed_ep & cpu_to_hc32(ehci, 1<<31)) usecs += q->sitd->stream->usecs; else usecs += HS_USECS_ISO (188); } if (q->sitd->hw_uframe & cpu_to_hc32(ehci, 1 << (8 + uframe))) { usecs += q->sitd->stream->c_usecs; } hw_p = &q->sitd->hw_next; q = &q->sitd->sitd_next; break; } } #ifdef DEBUG if (usecs > 100) ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", frame * 8 + uframe, usecs); #endif return usecs; }
/* * Technically, updating td->status here is a race, but it's not really a * problem. The worst that can happen is that we set the IOC bit again * generating a spurious interrupt. We could fix this by creating another * QH and leaving the IOC bit always set, but then we would have to play * games with the FSBR code to make sure we get the correct order in all * the cases. I don't think it's worth the effort */ static void uhci_set_next_interrupt(struct uhci_hcd *uhci) { if (uhci->is_stopped) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); }
static int ehci_fsl_drv_suspend(struct platform_device *pdev, pm_message_t message) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 tmp; struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; #ifdef DEBUG u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE); mode &= USBMODE_CM_MASK; tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */ printk(KERN_DEBUG "%s('%s'): suspend=%d already_suspended=%d " "mode=%d usbcmd %08x\n", __func__, pdata->name, pdata->suspended, pdata->already_suspended, mode, tmp); #endif /* * If the controller is already suspended, then this must be a * PM suspend. Remember this fact, so that we will leave the * controller suspended at PM resume time. */ if (pdata->suspended) { pr_debug("%s: already suspended, leaving early\n", __func__); pdata->already_suspended = 1; return 0; } pr_debug("%s: suspending...\n", __func__); printk(KERN_INFO "USB Host suspended\n"); hcd->state = HC_STATE_SUSPENDED; pdev->dev.power.power_state = PMSG_SUSPEND; /* ignore non-host interrupts */ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* stop the controller */ tmp = ehci_readl(ehci, &ehci->regs->command); tmp &= ~CMD_RUN; ehci_writel(ehci, tmp, &ehci->regs->command); /* save EHCI registers */ pdata->pm_command = ehci_readl(ehci, &ehci->regs->command); pdata->pm_command &= ~CMD_RUN; pdata->pm_status = ehci_readl(ehci, &ehci->regs->status); pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable); pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index); pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment); pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list); pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next); pdata->pm_configured_flag = ehci_readl(ehci, &ehci->regs->configured_flag); pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]); /* clear the W1C bits */ pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS); pdata->suspended = 1; #if defined(CONFIG_USB_EHCI_ARC_H2_WAKE_UP) || \ defined(CONFIG_USB_EHCI_ARC_OTG_WAKE_UP) /* enable remote wake up irq */ usb_wakeup_set(&(pdev->dev), 1); /* We CAN NOT enable wake up by connetion and disconnection * concurrently */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); /* if there is no usb device connectted */ if (tmp & PORT_CONNECT) { /* enable wake up by usb device disconnection */ tmp |= PORT_WKDISC_E; tmp &= ~(PORT_WKOC_E | PORT_WKCONN_E); } else { /* enable wake up by usb device insertion */ tmp |= PORT_WKCONN_E; tmp &= ~(PORT_WKOC_E | PORT_WKDISC_E); } ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); /* Set the port into suspend */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); tmp |= PORT_SUSPEND; ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); /* Disable PHY clock */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); tmp |= PORT_PHCD; ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); #else /* clear PP to cut power to the port */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); tmp &= ~PORT_POWER; ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); #endif return 0; }
static int ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *req) { struct QH *qh; struct qTD *td; volatile struct qTD *vtd; unsigned long ts; uint32_t *tdp; uint32_t endpt, token, usbsts; uint32_t c, toggle; uint32_t cmd; int timeout; int ret = 0; debug("dev=%p, pipe=%lx, buffer=%p, length=%d, req=%p\n", dev, pipe, buffer, length, req); if (req != NULL) debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n", req->request, req->request, req->requesttype, req->requesttype, le16_to_cpu(req->value), le16_to_cpu(req->value), le16_to_cpu(req->index)); qh = ehci_alloc(sizeof(struct QH), 32); if (qh == NULL) { debug("unable to allocate QH\n"); return -1; } qh->qh_link = cpu_to_hc32((uint32_t)&qh_list | QH_LINK_TYPE_QH); c = (usb_pipespeed(pipe) != USB_SPEED_HIGH && usb_pipeendpoint(pipe) == 0) ? 1 : 0; endpt = (8 << 28) | (c << 27) | (usb_maxpacket(dev, pipe) << 16) | (0 << 15) | (1 << 14) | (usb_pipespeed(pipe) << 12) | (usb_pipeendpoint(pipe) << 8) | (0 << 7) | (usb_pipedevice(pipe) << 0); qh->qh_endpt1 = cpu_to_hc32(endpt); endpt = (1 << 30) | (dev->portnr << 23) | (dev->parent->devnum << 16) | (0 << 8) | (0 << 0); qh->qh_endpt2 = cpu_to_hc32(endpt); qh->qh_overlay.qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td = NULL; tdp = &qh->qh_overlay.qt_next; toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); if (req != NULL) { td = ehci_alloc(sizeof(struct qTD), 32); if (td == NULL) { debug("unable to allocate SETUP td\n"); goto fail; } td->qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td->qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (0 << 31) | (sizeof(*req) << 16) | (0 << 15) | (0 << 12) | (3 << 10) | (2 << 8) | (0x80 << 0); td->qt_token = cpu_to_hc32(token); if (ehci_td_buffer(td, req, sizeof(*req)) != 0) { debug("unable construct SETUP td\n"); ehci_free(td, sizeof(*td)); goto fail; } *tdp = cpu_to_hc32((uint32_t) td); tdp = &td->qt_next; toggle = 1; } if (length > 0 || req == NULL) { td = ehci_alloc(sizeof(struct qTD), 32); if (td == NULL) { debug("unable to allocate DATA td\n"); goto fail; } td->qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td->qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (toggle << 31) | (length << 16) | ((req == NULL ? 1 : 0) << 15) | (0 << 12) | (3 << 10) | ((usb_pipein(pipe) ? 1 : 0) << 8) | (0x80 << 0); td->qt_token = cpu_to_hc32(token); if (ehci_td_buffer(td, buffer, length) != 0) { debug("unable construct DATA td\n"); ehci_free(td, sizeof(*td)); goto fail; } *tdp = cpu_to_hc32((uint32_t) td); tdp = &td->qt_next; } if (req != NULL) { td = ehci_alloc(sizeof(struct qTD), 32); if (td == NULL) { debug("unable to allocate ACK td\n"); goto fail; } td->qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td->qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (toggle << 31) | (0 << 16) | (1 << 15) | (0 << 12) | (3 << 10) | ((usb_pipein(pipe) ? 0 : 1) << 8) | (0x80 << 0); td->qt_token = cpu_to_hc32(token); *tdp = cpu_to_hc32((uint32_t) td); tdp = &td->qt_next; } qh_list.qh_link = cpu_to_hc32((uint32_t) qh | QH_LINK_TYPE_QH); /* Flush dcache */ ehci_flush_dcache(&qh_list); usbsts = ehci_readl(&hcor->or_usbsts); ehci_writel(&hcor->or_usbsts, (usbsts & 0x3f)); /* Enable async. schedule. */ cmd = ehci_readl(&hcor->or_usbcmd); cmd |= CMD_ASE; ehci_writel(&hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, STD_ASS, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STD_ASS set\n"); goto fail; } /* Wait for TDs to be processed. */ ts = get_timer(0); vtd = td; timeout = USB_TIMEOUT_MS(pipe); do { /* Invalidate dcache */ ehci_invalidate_dcache(&qh_list); token = hc32_to_cpu(vtd->qt_token); if (!(token & 0x80)) break; WATCHDOG_RESET(); } while (get_timer(ts) < timeout); /* Check that the TD processing happened */ if (token & 0x80) { printf("EHCI timed out on TD - token=%#x\n", token); goto fail; } /* Disable async schedule. */ cmd = ehci_readl(&hcor->or_usbcmd); cmd &= ~CMD_ASE; ehci_writel(&hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, 0, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STD_ASS reset\n"); goto fail; } qh_list.qh_link = cpu_to_hc32((uint32_t)&qh_list | QH_LINK_TYPE_QH); token = hc32_to_cpu(qh->qh_overlay.qt_token); if (!(token & 0x80)) { debug("TOKEN=%#x\n", token); switch (token & 0xfc) { case 0: toggle = token >> 31; usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), toggle); dev->status = 0; break; case 0x40: dev->status = USB_ST_STALLED; break; case 0xa0: case 0x20: dev->status = USB_ST_BUF_ERR; break; case 0x50: case 0x10: dev->status = USB_ST_BABBLE_DET; break; default: dev->status = USB_ST_CRC_ERR; if ((token & 0x40) == 0x40) dev->status |= USB_ST_STALLED; break; } dev->act_len = length - ((token >> 16) & 0x7fff); } else {
/* Do not free buffers associated with QHs, they're owned by someone else */ int EHCIDestroyIntQueue(struct usb_device *dev,struct int_queue *queue) { struct ehci_ctrl *ctrl = ehci_get_ctrl(dev); int result = -1; unsigned long timeout; struct QH *cur = NULL; DWORD dwFlags; struct int_queue* before = NULL, *current = NULL; //Remove it from the controller's pending list if it is in.It's a bit complicated since //the pending list is a one direction link list,and we also are not sure if the queue is //in list. __ENTER_CRITICAL_SECTION(NULL, dwFlags); if (NULL != ctrl->pIntQueueFirst) { if (queue == ctrl->pIntQueueFirst) { if (queue == ctrl->pIntQueueLast) { ctrl->pIntQueueFirst = NULL; ctrl->pIntQueueLast = NULL; } else { ctrl->pIntQueueFirst = queue->pNext; queue->pNext = NULL; } } else { before = ctrl->pIntQueueFirst; current = before->pNext; while (current && (current != queue)) { before = current; current = current->pNext; } if (queue == current) //Find the queue in list. { before->pNext = current->pNext; queue->pNext = NULL; if (NULL == current->pNext) //Last one. { ctrl->pIntQueueLast = before; } } } } __LEAVE_CRITICAL_SECTION(NULL, dwFlags); if (NULL != queue->pNext) { BUG(); } WaitForThisObject(ctrl->hMutex); if (ehci_disable_periodic(ctrl) < 0) { ReleaseMutex(ctrl->hMutex); debug("FATAL: periodic should never fail, but did"); goto out; } ctrl->periodic_schedules--; cur = &ctrl->periodic_queue; timeout = get_timer(0) + (500 / SYSTEM_TIME_SLICE); /* abort after 500ms */ while (!(cur->qh_link & cpu_to_hc32(QH_LINK_TERMINATE))) { debug("considering %p, with qh_link %x\r\n", cur, cur->qh_link); if (NEXT_QH(cur) == queue->first) { debug("found candidate. removing from chain\r\n"); cur->qh_link = queue->last->qh_link; flush_dcache_range((unsigned long)cur, ALIGN_END_ADDR(struct QH, cur, 1)); result = 0; break; } cur = NEXT_QH(cur); if (get_timer(0) > timeout) { ReleaseMutex(ctrl->hMutex); _hx_printf("Timeout destroying interrupt endpoint queue\r\n"); result = -1; goto out; } }
//Create and return an interrupt queue object. struct int_queue* EHCICreateIntQueue(struct usb_device *dev, unsigned long pipe, int queuesize, int elementsize, void *buffer, int interval) { struct ehci_ctrl *ctrl = ehci_get_ctrl(dev); struct int_queue *result = NULL; uint32_t i, toggle; struct QH *list = NULL; int cmd = 0; DWORD dwFlags; /* * Interrupt transfers requiring several transactions are not supported * because bInterval is ignored. * * Also, ehci_submit_async() relies on wMaxPacketSize being a power of 2 * <= PKT_ALIGN if several qTDs are required, while the USB * specification does not constrain this for interrupt transfers. That * means that ehci_submit_async() would support interrupt transfers * requiring several transactions only as long as the transfer size does * not require more than a single qTD. */ if (elementsize > usb_maxpacket(dev, pipe)) { printf("%s: xfers requiring several transactions are not supported.\r\n", "_ehci_create_int_queue"); return NULL; } if (usb_pipetype(pipe) != PIPE_INTERRUPT) { debug("non-interrupt pipe (type=%lu)", usb_pipetype(pipe)); return NULL; } /* limit to 4 full pages worth of data - * we can safely fit them in a single TD, * no matter the alignment */ if (elementsize >= 16384) { debug("too large elements for interrupt transfers\r\n"); return NULL; } result = malloc(sizeof(*result)); if (!result) { debug("ehci intr queue: out of memory\r\n"); goto fail1; } //Create EVENT object to synchronizing the access. result->hEvent = CreateEvent(FALSE); if (NULL == result->hEvent) { goto fail1; } result->dwTimeOut = 0; result->pNext = NULL; result->pOwnerThread = KernelThreadManager.lpCurrentKernelThread; result->QueueIntHandler = _ehciQueueIntHandler; result->pUsbDev = dev; result->dwStatus = INT_QUEUE_STATUS_INITIALIZED; result->elementsize = elementsize; result->pipe = pipe; result->first = memalign(USB_DMA_MINALIGN, sizeof(struct QH) * queuesize); if (!result->first) { debug("ehci intr queue: out of memory\r\n"); goto fail2; } debug("%s: Allocate %d QH(s) at %X.\r\n", __func__,queuesize,result->first); result->current = result->first; result->last = result->first + queuesize - 1; result->tds = memalign(USB_DMA_MINALIGN, sizeof(struct qTD) * queuesize); if (!result->tds) { debug("ehci intr queue: out of memory\r\n"); goto fail3; } debug("%s: Allocate %d qTD(s) at %X.\r\n", __func__,queuesize, result->tds); memset(result->first, 0, sizeof(struct QH) * queuesize); memset(result->tds, 0, sizeof(struct qTD) * queuesize); toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); for (i = 0; i < (uint32_t)queuesize; i++) { struct QH *qh = result->first + i; struct qTD *td = result->tds + i; void **buf = &qh->buffer; qh->qh_link = cpu_to_hc32((unsigned long)(qh + 1) | QH_LINK_TYPE_QH); if (i == queuesize - 1) qh->qh_link = cpu_to_hc32(QH_LINK_TERMINATE); qh->qh_overlay.qt_next = cpu_to_hc32((unsigned long)td); qh->qh_overlay.qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); qh->qh_endpt1 = cpu_to_hc32((0 << 28) | /* No NAK reload (ehci 4.9) */ (usb_maxpacket(dev, pipe) << 16) | /* MPS */ (1 << 14) | QH_ENDPT1_EPS(ehci_encode_speed(dev->speed)) | (usb_pipeendpoint(pipe) << 8) | /* Endpoint Number */ (usb_pipedevice(pipe) << 0)); qh->qh_endpt2 = cpu_to_hc32((1 << 30) | /* 1 Tx per mframe */ (1 << 0)); /* S-mask: microframe 0 */ if (dev->speed == USB_SPEED_LOW || dev->speed == USB_SPEED_FULL) { /* C-mask: microframes 2-4 */ qh->qh_endpt2 |= cpu_to_hc32((0x1c << 8)); } ehci_update_endpt2_dev_n_port(dev, qh); td->qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); td->qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); debug("%s: communication direction is '%s'\r\n", __func__, usb_pipein(pipe) ? "in" : "out"); if (i == queuesize - 1) //Last one,set IoC bit. { td->qt_token = cpu_to_hc32( QT_TOKEN_DT(toggle) | (elementsize << 16) | (1 << 15) | //Interrupt On Completion. (3 << 10) | //CERR bits. ((usb_pipein(pipe) ? 1 : 0) << 8) | /* IN/OUT token */ 0x80); /* active */ } else { td->qt_token = cpu_to_hc32( QT_TOKEN_DT(toggle) | (elementsize << 16) | (3 << 10) | //CERR bits. ((usb_pipein(pipe) ? 1 : 0) << 8) | /* IN/OUT token */ 0x80); /* active */ } debug("%s: construct TD token = %X.\r\n", __func__, td->qt_token); td->qt_buffer[0] = cpu_to_hc32((unsigned long)buffer + i * elementsize); td->qt_buffer[1] = cpu_to_hc32((td->qt_buffer[0] + 0x1000) & ~0xfff); td->qt_buffer[2] = cpu_to_hc32((td->qt_buffer[0] + 0x2000) & ~0xfff); td->qt_buffer[3] = cpu_to_hc32((td->qt_buffer[0] + 0x3000) & ~0xfff); td->qt_buffer[4] = cpu_to_hc32((td->qt_buffer[0] + 0x4000) & ~0xfff); #ifdef __MS_VC__ //MS VC can not support sizeof(void) operation,we should //convert the buffer type to char*. *buf = (void*)((char*)buffer + i * elementsize); #else //sizeof(void) is 1 under GCC or other environment,so the //following sentence is same as above one. *buf = buffer + i * elementsize; #endif toggle ^= 1; } flush_dcache_range((unsigned long)buffer, ALIGN_END_ADDR(char, buffer, queuesize * elementsize)); flush_dcache_range((unsigned long)result->first, ALIGN_END_ADDR(struct QH, result->first, queuesize)); flush_dcache_range((unsigned long)result->tds, ALIGN_END_ADDR(struct qTD, result->tds, queuesize)); //Acquire exclusively accessing of the controller. WaitForThisObject(ctrl->hMutex); if (ctrl->periodic_schedules > 0) { if (ehci_disable_periodic(ctrl) < 0) { ReleaseMutex(ctrl->hMutex); _hx_printf("FATAL %s: periodic should never fail, but did.\r\n",__func__); goto fail3; } } __ENTER_CRITICAL_SECTION(NULL, dwFlags); /* hook up to periodic list */ list = &ctrl->periodic_queue; result->last->qh_link = list->qh_link; list->qh_link = cpu_to_hc32((unsigned long)result->first | QH_LINK_TYPE_QH); //Link interrupt queue to Controller's pending queue. if (NULL == ctrl->pIntQueueFirst) { ctrl->pIntQueueFirst = result; ctrl->pIntQueueLast = result; } else { result->pNext = ctrl->pIntQueueFirst; ctrl->pIntQueueFirst = result; } __LEAVE_CRITICAL_SECTION(NULL, dwFlags); flush_dcache_range((unsigned long)result->last, ALIGN_END_ADDR(struct QH, result->last, 1)); flush_dcache_range((unsigned long)list, ALIGN_END_ADDR(struct QH, list, 1)); if (ehci_enable_periodic(ctrl) < 0) { ReleaseMutex(ctrl->hMutex); _hx_printf("FATAL %s: periodic should never fail, but did.\r\n", __func__);; goto fail3; } ctrl->periodic_schedules++; ReleaseMutex(ctrl->hMutex); debug("Exit create_int_queue\r\n"); return result; fail3: if (result->tds) free(result->tds); fail2: if (result->first) free(result->first); //if (result) // free(result); fail1: if (result) { if (NULL != result->hEvent) { DestroyEvent(result->hEvent); } free(result); } return NULL; }
static int ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer, int length, struct devrequest *req) { static struct QH qh __attribute__((aligned(32))); static struct qTD qtd[3] __attribute__((aligned (32))); int qtd_counter = 0; volatile struct qTD *vtd; unsigned long ts; uint32_t *tdp; uint32_t endpt, token, usbsts; uint32_t c, toggle; uint32_t cmd; int timeout; int ret = 0; debug("dev=%p, pipe=%lx, buffer=%p, length=%d, req=%p\n", dev, pipe, buffer, length, req); if (req != NULL) debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n", req->request, req->request, req->requesttype, req->requesttype, le16_to_cpu(req->value), le16_to_cpu(req->value), le16_to_cpu(req->index)); memset(&qh, 0, sizeof(struct QH)); memset(qtd, 0, sizeof(qtd)); toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe)); /* * Setup QH (3.6 in ehci-r10.pdf) * * qh_link ................. 03-00 H * qh_endpt1 ............... 07-04 H * qh_endpt2 ............... 0B-08 H * - qh_curtd * qh_overlay.qt_next ...... 13-10 H * - qh_overlay.qt_altnext */ qh.qh_link = cpu_to_hc32((uint32_t)&qh_list | QH_LINK_TYPE_QH); c = (usb_pipespeed(pipe) != USB_SPEED_HIGH && usb_pipeendpoint(pipe) == 0) ? 1 : 0; endpt = (8 << 28) | (c << 27) | (usb_maxpacket(dev, pipe) << 16) | (0 << 15) | (1 << 14) | (usb_pipespeed(pipe) << 12) | (usb_pipeendpoint(pipe) << 8) | (0 << 7) | (usb_pipedevice(pipe) << 0); qh.qh_endpt1 = cpu_to_hc32(endpt); endpt = (1 << 30) | (dev->portnr << 23) | (dev->parent->devnum << 16) | (0 << 8) | (0 << 0); qh.qh_endpt2 = cpu_to_hc32(endpt); qh.qh_overlay.qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); tdp = &qh.qh_overlay.qt_next; if (req != NULL) { /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H * * [ buffer, buffer_hi ] loaded with "req". */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (0 << 31) | (sizeof(*req) << 16) | (0 << 15) | (0 << 12) | (3 << 10) | (2 << 8) | (0x80 << 0); qtd[qtd_counter].qt_token = cpu_to_hc32(token); if (ehci_td_buffer(&qtd[qtd_counter], req, sizeof(*req)) != 0) { debug("unable construct SETUP td\n"); goto fail; } /* Update previous qTD! */ *tdp = cpu_to_hc32((uint32_t)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; toggle = 1; } if (length > 0 || req == NULL) { /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H * * [ buffer, buffer_hi ] loaded with "buffer". */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (toggle << 31) | (length << 16) | ((req == NULL ? 1 : 0) << 15) | (0 << 12) | (3 << 10) | ((usb_pipein(pipe) ? 1 : 0) << 8) | (0x80 << 0); qtd[qtd_counter].qt_token = cpu_to_hc32(token); if (ehci_td_buffer(&qtd[qtd_counter], buffer, length) != 0) { debug("unable construct DATA td\n"); goto fail; } /* Update previous qTD! */ *tdp = cpu_to_hc32((uint32_t)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; } if (req != NULL) { /* * Setup request qTD (3.5 in ehci-r10.pdf) * * qt_next ................ 03-00 H * qt_altnext ............. 07-04 H * qt_token ............... 0B-08 H */ qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE); qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE); token = (toggle << 31) | (0 << 16) | (1 << 15) | (0 << 12) | (3 << 10) | ((usb_pipein(pipe) ? 0 : 1) << 8) | (0x80 << 0); qtd[qtd_counter].qt_token = cpu_to_hc32(token); /* Update previous qTD! */ *tdp = cpu_to_hc32((uint32_t)&qtd[qtd_counter]); tdp = &qtd[qtd_counter++].qt_next; } qh_list.qh_link = cpu_to_hc32((uint32_t)&qh | QH_LINK_TYPE_QH); /* Flush dcache */ flush_dcache_range((uint32_t)&qh_list, (uint32_t)&qh_list + sizeof(struct QH)); flush_dcache_range((uint32_t)&qh, (uint32_t)&qh + sizeof(struct QH)); flush_dcache_range((uint32_t)qtd, (uint32_t)qtd + sizeof(qtd)); usbsts = ehci_readl(&hcor->or_usbsts); ehci_writel(&hcor->or_usbsts, (usbsts & 0x3f)); /* Enable async. schedule. */ cmd = ehci_readl(&hcor->or_usbcmd); cmd |= CMD_ASE; ehci_writel(&hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, STD_ASS, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STD_ASS set\n"); goto fail; } /* Wait for TDs to be processed. */ ts = get_timer(0); vtd = &qtd[qtd_counter - 1]; timeout = USB_TIMEOUT_MS(pipe); do { /* Invalidate dcache */ invalidate_dcache_range((uint32_t)&qh_list, (uint32_t)&qh_list + sizeof(struct QH)); invalidate_dcache_range((uint32_t)&qh, (uint32_t)&qh + sizeof(struct QH)); invalidate_dcache_range((uint32_t)qtd, (uint32_t)qtd + sizeof(qtd)); token = hc32_to_cpu(vtd->qt_token); if (!(token & 0x80)) break; WATCHDOG_RESET(); } while (get_timer(ts) < timeout); /* Invalidate the memory area occupied by buffer */ invalidate_dcache_range(((uint32_t)buffer & ~31), ((uint32_t)buffer & ~31) + roundup(length, 32)); /* Check that the TD processing happened */ if (token & 0x80) { printf("EHCI timed out on TD - token=%#x\n", token); } /* Disable async schedule. */ cmd = ehci_readl(&hcor->or_usbcmd); cmd &= ~CMD_ASE; ehci_writel(&hcor->or_usbcmd, cmd); ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, 0, 100 * 1000); if (ret < 0) { printf("EHCI fail timeout STD_ASS reset\n"); goto fail; } qh_list.qh_link = cpu_to_hc32((uint32_t)&qh_list | QH_LINK_TYPE_QH); token = hc32_to_cpu(qh.qh_overlay.qt_token); if (!(token & 0x80)) { debug("TOKEN=%#x\n", token); switch (token & 0xfc) { case 0: toggle = token >> 31; usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), toggle); dev->status = 0; break; case 0x40: dev->status = USB_ST_STALLED; break; case 0xa0: case 0x20: dev->status = USB_ST_BUF_ERR; break; case 0x50: case 0x10: dev->status = USB_ST_BABBLE_DET; break; default: dev->status = USB_ST_CRC_ERR; if ((token & 0x40) == 0x40) dev->status |= USB_ST_STALLED; break; } dev->act_len = length - ((token >> 16) & 0x7fff); } else {
/* These routines rely on the bus (pci, platform, etc) * to handle powerdown and wakeup, and currently also on * transceivers that don't need any software attention to set up * the right sort of wakeup. * * They're also used for turning on/off the port when doing OTG. */ static int ehci_fsl_drv_suspend(struct platform_device *pdev, pm_message_t message) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct usb_device *roothub = hcd->self.root_hub; u32 port_status; struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; printk(KERN_DEBUG "USB Host suspend begins\n"); /* Only handles OTG mode switch event, system suspend event will be done in bus suspend */ if (pdata->pmflags == 0) { printk(KERN_DEBUG "%s, pm event \n", __func__); if (!host_can_wakeup_system(pdev)) { int mask; /* Need open clock for register access */ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) fsl_usb_clk_gate(hcd->self.controller->platform_data, true); mask = ehci_readl(ehci, &ehci->regs->intr_enable); mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); usb_host_set_wakeup(hcd->self.controller, false); fsl_usb_clk_gate(hcd->self.controller->platform_data, false); } return 0; } /* only the otg host can go here */ /* wait for all usb device on the hcd dettached */ usb_lock_device(roothub); if (roothub->children[0] != NULL) { int old = hcd->self.is_b_host; printk(KERN_DEBUG "will resume roothub and its children\n"); hcd->self.is_b_host = 0; /* resume the roothub, so that it can test the children is disconnected */ if (roothub->state == USB_STATE_SUSPENDED) usb_resume(&roothub->dev, PMSG_USER_SUSPEND); /* we must do unlock here, the hubd thread will hold the same lock * here release the lock, so that the hubd thread can process the usb * disconnect event and set the children[0] be NULL, or there will be * a deadlock */ usb_unlock_device(roothub); while (roothub->children[0] != NULL) msleep(1); usb_lock_device(roothub); hcd->self.is_b_host = old; } usb_unlock_device(roothub); if (!(hcd->state & HC_STATE_SUSPENDED)) { printk(KERN_DEBUG "will suspend roothub and its children\n"); usb_lock_device(roothub); usb_suspend(&roothub->dev, PMSG_USER_SUSPEND); usb_unlock_device(roothub); } if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { fsl_usb_clk_gate(hcd->self.controller->platform_data, true); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); } port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); /* save EHCI registers */ pdata->pm_command = ehci_readl(ehci, &ehci->regs->command); pdata->pm_command &= ~CMD_RUN; pdata->pm_status = ehci_readl(ehci, &ehci->regs->status); pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable); pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index); pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment); pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list); pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next); pdata->pm_configured_flag = ehci_readl(ehci, &ehci->regs->configured_flag); pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]); /* clear the W1C bits */ pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS); /* clear PHCD bit */ pdata->pm_portsc &= ~PORT_PTS_PHCD; usb_host_set_wakeup(hcd->self.controller, true); fsl_usb_lowpower_mode(pdata, true); if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); fsl_usb_clk_gate(hcd->self.controller->platform_data, false); } pdata->pmflags = 0; printk(KERN_DEBUG "host suspend ends\n"); return 0; }
/* remember to add cleanup code (above) if you add anything here */ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) { int i; /* QTDs for control/bulk/intr transfers */ ehci->qtd_pool = dma_pool_create ("ehci_qtd", ehci_to_hcd(ehci)->self.sysdev, sizeof (struct ehci_qtd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qtd_pool) { goto fail; } /* QHs for control/bulk/intr transfers */ ehci->qh_pool = dma_pool_create ("ehci_qh", ehci_to_hcd(ehci)->self.sysdev, sizeof(struct ehci_qh_hw), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qh_pool) { goto fail; } ehci->async = ehci_qh_alloc (ehci, flags); if (!ehci->async) { goto fail; } /* ITD for high speed ISO transfers */ ehci->itd_pool = dma_pool_create ("ehci_itd", ehci_to_hcd(ehci)->self.sysdev, sizeof (struct ehci_itd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->itd_pool) { goto fail; } /* SITD for full/low speed split ISO transfers */ ehci->sitd_pool = dma_pool_create ("ehci_sitd", ehci_to_hcd(ehci)->self.sysdev, sizeof (struct ehci_sitd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->sitd_pool) { goto fail; } /* Hardware periodic table */ ehci->periodic = (__le32 *) dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev, ehci->periodic_size * sizeof(__le32), &ehci->periodic_dma, flags); if (ehci->periodic == NULL) { goto fail; } if (ehci->use_dummy_qh) { struct ehci_qh_hw *hw; ehci->dummy = ehci_qh_alloc(ehci, flags); if (!ehci->dummy) goto fail; hw = ehci->dummy->hw; hw->hw_next = EHCI_LIST_END(ehci); hw->hw_qtd_next = EHCI_LIST_END(ehci); hw->hw_alt_next = EHCI_LIST_END(ehci); ehci->dummy->hw = hw; for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = cpu_to_hc32(ehci, ehci->dummy->qh_dma); } else { for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = EHCI_LIST_END(ehci); } /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); if (ehci->pshadow != NULL) return 0; fail: ehci_dbg (ehci, "couldn't init memory\n"); ehci_mem_cleanup (ehci); return -ENOMEM; }
/* These routines rely on the bus (pci, platform, etc) * to handle powerdown and wakeup, and currently also on * transceivers that don't need any software attention to set up * the right sort of wakeup. * * They're also used for turning on/off the port when doing OTG. */ static int ehci_fsl_drv_suspend(struct platform_device *pdev, pm_message_t message) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 tmp, port_status; struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; if (device_may_wakeup(&(pdev->dev))) { /* Need open clock for register access */ if (pdata->usb_clock_for_pm) pdata->usb_clock_for_pm(true); } #ifdef DEBUG u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE); mode &= USBMODE_CM_MASK; tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */ printk(KERN_DEBUG "%s('%s'): suspend=%d already_suspended=%d " "mode=%d usbcmd %08x\n", __func__, pdata->name, pdata->suspended, pdata->already_suspended, mode, tmp); #endif /* * If the controller is already suspended, then this must be a * PM suspend. Remember this fact, so that we will leave the * controller suspended at PM resume time. */ if (pdata->suspended) { pr_debug("%s: already suspended, leaving early\n", __func__); pdata->already_suspended = 1; goto err1; } pr_debug("%s: suspending...\n", __func__); printk(KERN_INFO "USB Host suspended\n"); port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); pdev->dev.power.power_state = PMSG_SUSPEND; /* ignore non-host interrupts */ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* save EHCI registers */ pdata->pm_command = ehci_readl(ehci, &ehci->regs->command); pdata->pm_command &= ~CMD_RUN; pdata->pm_status = ehci_readl(ehci, &ehci->regs->status); pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable); pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index); pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment); pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list); pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next); pdata->pm_configured_flag = ehci_readl(ehci, &ehci->regs->configured_flag); pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]); /* clear the W1C bits */ pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS); /* clear PHCD bit */ pdata->pm_portsc &= ~PORT_PHCD; pdata->suspended = 1; if (!device_may_wakeup(&(pdev->dev))) { /* clear PP to cut power to the port */ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); tmp &= ~PORT_POWER; ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); goto err1; } tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); if (pdata->platform_suspend) pdata->platform_suspend(pdata); err1: if (device_may_wakeup(&(pdev->dev))) { if (pdata->usb_clock_for_pm) pdata->usb_clock_for_pm(false); } return 0; }