void xhci_init_ejxxx(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 reg32 = 0; switch (xhci->hcc_params1 & 0xff) { case 0x30: xhci_init_ej168_v00660(xhci); reg32 = xhci_readl(xhci, hcd->regs + 0x40c0); reg32 = (reg32 & 0xffff00ff) | 0x0100; xhci_writel(xhci, reg32, hcd->regs + 0x40c0); reg32 = xhci_readl(xhci, hcd->regs + 0x40d4); reg32 = (reg32 & 0xfffffffe) | 0x01; xhci_writel(xhci, reg32, hcd->regs + 0x40d4); break; case 0x40: xhci_init_ej188_v00100900(xhci); reg32 = xhci_readl(xhci, hcd->regs + 0x4294); reg32 = (reg32 & 0xfffffffe) | 0x01; xhci_writel(xhci, reg32, hcd->regs + 0x4294); reg32 = xhci_readl(xhci, hcd->regs + 0x42d4); reg32 = (reg32 & 0xfffffffe) | 0x01; xhci_writel(xhci, reg32, hcd->regs + 0x42d4); break; default: break; } }
static void mxhci_hsic_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { struct xhci_plat_data *pdata = dev->platform_data; struct mxhci_hsic_hcd *mxhci = hcd_to_hsic(xhci_to_hcd(xhci)); /* * As of now platform drivers don't provide MSI support so we ensure * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ xhci->quirks |= XHCI_PLAT; /* Single port controller using out of band remote wakeup */ if (mxhci->wakeup_irq) xhci->quirks |= XHCI_NO_SELECTIVE_SUSPEND; /* * Observing hw tr deq pointer getting stuck to a noop trb * when aborting transfer during suspend. Reset tr deq pointer * to start of the first seg of the xfer ring. */ xhci->quirks |= XHCI_TR_DEQ_RESET_QUIRK; if (!pdata) return; if (pdata->vendor == SYNOPSIS_DWC3_VENDOR && pdata->revision < 0x230A) xhci->quirks |= XHCI_PORTSC_DELAY; }
/* * Free IRQs * free all IRQs request */ static void xhci_free_irq(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); int ret; /* return if using legacy interrupt */ if (xhci_to_hcd(xhci)->irq > 0) return; ret = xhci_free_msi(xhci); if (!ret) return; if (pdev->irq > 0) free_irq(pdev->irq, xhci_to_hcd(xhci)); return; }
static inline void dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size, void *cpu_addr, dma_addr_t dma_handle) { if (cpu_addr) dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev, size, cpu_addr, dma_handle); }
int xhci_test_trb_in_td (struct xhci_hcd *xhci, struct xhci_segment *input_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t input_dma, struct xhci_segment *result_seg, char *test_name, int test_number) { unsigned long long start_dma; unsigned long long end_dma; struct xhci_segment *seg; start_dma = xhci_trb_virt_to_dma (input_seg, start_trb); end_dma = xhci_trb_virt_to_dma (input_seg, end_trb); { dev_warn (xhci_to_hcd (xhci)->self.controller, "%d\n", test_number); dev_warn (xhci_to_hcd (xhci)->self.controller, "Expected seg %p, got seg %p\n", result_seg, seg); } }
void xhci_free_4bytes_pool(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; if (xhci->align_pool == NULL) return; dma_free_coherent(dev, XHCI_4BYTES_POOL_SIZE, xhci->align_pool->buf, xhci->align_pool->dma); kfree(xhci->align_pool); xhci->align_pool = NULL; }
static inline void * dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size, dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, size, dma_handle, flags); memset(vaddr, 0, size); return vaddr; }
static int xhci_try_enable_msi(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct pci_dev *pdev; int ret; /* The xhci platform device has set up IRQs through usb_add_hcd. */ if (xhci->quirks & XHCI_PLAT) return 0; pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); /* * Some Fresco Logic host controllers advertise MSI, but fail to * generate interrupts. Don't even try to enable MSI. */ if (xhci->quirks & XHCI_BROKEN_MSI) goto legacy_irq; /* unregister the legacy interrupt */ if (hcd->irq) free_irq(hcd->irq, hcd); hcd->irq = 0; ret = xhci_setup_msix(xhci); if (ret) /* fall back to msi*/ ret = xhci_setup_msi(xhci); if (!ret) /* hcd->irq is 0, we have MSI */ return 0; if (!pdev->irq) { xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); return -EINVAL; } legacy_irq: if (!strlen(hcd->irq_descr)) snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", hcd->driver->description, hcd->self.busnum); /* fall back to legacy interrupt*/ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd); if (ret) { xhci_err(xhci, "request interrupt %d failed\n", pdev->irq); return ret; } hcd->irq = pdev->irq; return 0; }
static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (!xhci->msix_entries) return; free_irq(xhci->msix_entries[0].vector, xhci); pci_disable_msix(pdev); kfree(xhci->msix_entries); xhci->msix_entries = NULL; xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); }
/* * Set up MSI */ static int xhci_setup_msi(struct xhci_hcd *xhci) { int ret; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); ret = pci_enable_msi(pdev); if (ret) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "failed to allocate MSI entry"); return ret; } ret = request_irq(pdev->irq, xhci_msi_irq, 0, "xhci_hcd", xhci_to_hcd(xhci)); if (ret) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI interrupt"); pci_disable_msi(pdev); } return ret; }
static int xhci_free_msi(struct xhci_hcd *xhci) { int i; if (!xhci->msix_entries) return -EINVAL; for (i = 0; i < xhci->msix_count; i++) if (xhci->msix_entries[i].vector) free_irq(xhci->msix_entries[i].vector, xhci_to_hcd(xhci)); return 0; }
void xhci_debugfs_init(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; xhci->debugfs_root = debugfs_create_dir(dev_name(dev), xhci_debugfs_root); INIT_LIST_HEAD(&xhci->regset_list); xhci_debugfs_regset(xhci, 0, xhci_cap_regs, ARRAY_SIZE(xhci_cap_regs), xhci->debugfs_root, "reg-cap"); xhci_debugfs_regset(xhci, HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)), xhci_op_regs, ARRAY_SIZE(xhci_op_regs), xhci->debugfs_root, "reg-op"); xhci_debugfs_regset(xhci, readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK, xhci_runtime_regs, ARRAY_SIZE(xhci_runtime_regs), xhci->debugfs_root, "reg-runtime"); xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_LEGACY, xhci_extcap_legsup, ARRAY_SIZE(xhci_extcap_legsup), "reg-ext-legsup"); xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_PROTOCOL, xhci_extcap_protocol, ARRAY_SIZE(xhci_extcap_protocol), "reg-ext-protocol"); xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_DEBUG, xhci_extcap_dbc, ARRAY_SIZE(xhci_extcap_dbc), "reg-ext-dbc"); xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring, "command-ring", xhci->debugfs_root); xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring, "event-ring", xhci->debugfs_root); xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root); xhci_debugfs_create_ports(xhci, xhci->debugfs_root); }
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { unsigned long flags; int ret; u32 temp; struct xhci_hcd *xhci; struct xhci_td *td; unsigned int ep_index; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; xhci = hcd_to_xhci(hcd); spin_lock_irqsave(&xhci->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret || !urb->hcpriv) goto done; temp = xhci_readl(xhci, &xhci->op_regs->status); if (temp == 0xffffffff) { xhci_dbg(xhci, "HW died, freeing TD.\n"); td = (struct xhci_td *) urb->hcpriv; usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&xhci->lock, flags); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); kfree(td); return ret; } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); xhci_debug_ring(xhci, xhci->event_ring); ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep_ring = ep->ring; xhci_dbg(xhci, "Endpoint ring:\n"); xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; ep->cancels_pending++; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); if (ep->cancels_pending == 1) { xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } done: spin_unlock_irqrestore(&xhci->lock, flags); return ret; }
static int xhci_setup_msix(struct xhci_hcd *xhci) { int ret; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); xhci->msix_count = 0; xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } xhci->msix_entries[0].entry = 0; ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { xhci_err(xhci, "Failed to enable MSI-X\n"); goto free_entries; } ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, "xHCI", xhci_to_hcd(xhci)); if (ret) { xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); goto disable_msix; } xhci_dbg(xhci, "Finished setting up MSI-X\n"); return 0; disable_msix: pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); xhci->msix_entries = NULL; return ret; }
int mtk_is_hub_active(void){ struct usb_hcd *hcd = xhci_to_hcd(mtk_xhci); struct usb_device *rhdev = hcd->self.root_hub; struct usb_hub *hub = usb_hub_to_struct_hub(rhdev); bool ret = true; spin_lock_irq(mtk_hub_event_lock); if((mtk_ep_count == 0) && (list_empty(&hub->event_list) == 1) && (atomic_read(&(hub->kref.refcount)) == 1)){ ret = false; } spin_unlock_irq(mtk_hub_event_lock); return ret; }
int xhci_init_4bytes_pool(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_4bytes_pool *pool = kzalloc(sizeof(struct xhci_4bytes_pool), GFP_KERNEL); if (!pool) return -ENOMEM; pool->buf = (void *)dma_alloc_coherent(dev, XHCI_4BYTES_POOL_SIZE, &pool->dma, GFP_KERNEL); if (!pool->buf) { kfree(pool); return -ENOMEM; } xhci->align_pool = pool; return 0; }
static int xhci_exit_test_mode(struct xhci_hcd *xhci) { int retval; if (!xhci->test_mode) { xhci_err(xhci, "Not in test mode, do nothing.\n"); return 0; } if (xhci->test_mode == TEST_FORCE_EN && !(xhci->xhc_state & XHCI_STATE_HALTED)) { retval = xhci_halt(xhci); if (retval) return retval; } pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); xhci->test_mode = 0; return xhci_reset(xhci); }
irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); u32 temp, temp2; union xhci_trb *trb; spin_lock(&xhci->lock); trb = xhci->event_ring->dequeue; temp = xhci_readl(xhci, &xhci->op_regs->status); temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); if (temp == 0xffffffff && temp2 == 0xffffffff) goto hw_died; if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { spin_unlock(&xhci->lock); return IRQ_NONE; } xhci_dbg(xhci, "op reg status = %08x\n", temp); xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); xhci_dbg(xhci, "Event ring dequeue ptr:\n"); xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), lower_32_bits(trb->link.segment_ptr), upper_32_bits(trb->link.segment_ptr), (unsigned int) trb->link.intr_target, (unsigned int) trb->link.control); if (temp & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); hw_died: xhci_to_hcd(xhci)->state = HC_STATE_HALT; spin_unlock(&xhci->lock); return -ESHUTDOWN; } xhci_work(xhci); spin_unlock(&xhci->lock); return IRQ_HANDLED; }
static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); /* * As of now platform drivers don't provide MSI support so we ensure * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ xhci->quirks |= XHCI_PLAT; xhci->quirks |= XHCI_MTK_HOST; /* * MTK host controller gives a spurious successful event after a * short transfer. Ignore it. */ xhci->quirks |= XHCI_SPURIOUS_SUCCESS; if (mtk->lpm_support) xhci->quirks |= XHCI_LPM_SUPPORT; }
int xhci_reset(struct xhci_hcd *xhci) { u32 command; u32 state; state = xhci_readl(xhci, &xhci->op_regs->status); if ((state & STS_HALT) == 0) { xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); return 0; } xhci_dbg(xhci, "// Reset the HC\n"); command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RESET; xhci_writel(xhci, command, &xhci->op_regs->command); xhci_to_hcd(xhci)->state = HC_STATE_HALT; return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); }
/* Free any IRQs and disable MSI-X */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); if (xhci->quirks & XHCI_PLAT) return; xhci_free_irq(xhci); if (xhci->msix_entries) { pci_disable_msix(pdev); kfree(xhci->msix_entries); xhci->msix_entries = NULL; } else { pci_disable_msi(pdev); } hcd->msix_enabled = 0; return; }
static int xhci_enter_test_mode(struct xhci_hcd *xhci, u16 test_mode, u16 wIndex, unsigned long *flags) { int i, retval; /* Disable all Device Slots */ xhci_dbg(xhci, "Disable all slots\n"); spin_unlock_irqrestore(&xhci->lock, *flags); for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { if (!xhci->devs[i]) continue; retval = xhci_disable_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); } spin_lock_irqsave(&xhci->lock, *flags); /* Put all ports to the Disable state by clear PP */ xhci_dbg(xhci, "Disable all port (PP = 0)\n"); /* Power off USB3 ports*/ for (i = 0; i < xhci->num_usb3_ports; i++) xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags); /* Power off USB2 ports*/ for (i = 0; i < xhci->num_usb2_ports; i++) xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags); /* Stop the controller */ xhci_dbg(xhci, "Stop controller\n"); retval = xhci_halt(xhci); if (retval) return retval; /* Disable runtime PM for test mode */ pm_runtime_forbid(xhci_to_hcd(xhci)->self.controller); /* Set PORTPMSC.PTC field to enter selected test mode */ /* Port is selected by wIndex. port_id = wIndex + 1 */ xhci_dbg(xhci, "Enter Test Mode: %d, Port_id=%d\n", test_mode, wIndex + 1); xhci_port_set_test_mode(xhci, test_mode, wIndex); return retval; }
dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); writel(dev_info, &dbc->regs->devinfo1); dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID); writel(dev_info, &dbc->regs->devinfo2); } static void xhci_dbc_giveback(struct dbc_request *req, int status) __releases(&dbc->lock) __acquires(&dbc->lock) { struct dbc_ep *dep = req->dep; struct xhci_dbc *dbc = dep->dbc; struct xhci_hcd *xhci = dbc->xhci; struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev; list_del_init(&req->list_pending); req->trb_dma = 0; req->trb = NULL; if (req->status == -EINPROGRESS) req->status = status; trace_xhci_dbc_giveback_request(req); dma_unmap_single(dev, req->dma, req->length, dbc_ep_dma_direction(dep));
/* * Stop HC (not bus-specific) * * This is called when the machine transition into S3/S4 mode. * */ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; unsigned int delay = XHCI_MAX_HALT_USEC; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; if (!hcd->state) return 0; if (hcd->state != HC_STATE_SUSPENDED || xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; /* Clear root port wake on bits if wakeup not allowed. */ if (!do_wakeup) xhci_disable_port_wake_on_bits(xhci); /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); del_timer_sync(&hcd->rh_timer); clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); del_timer_sync(&xhci->shared_hcd->rh_timer); spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); /* step 1: stop endpoint */ /* skipped assuming that port suspend has done */ /* step 2: clear Run/Stop bit */ command = readl(&xhci->op_regs->command); command &= ~CMD_RUN; writel(command, &xhci->op_regs->command); /* Some chips from Fresco Logic need an extraordinary delay */ delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; if (xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, delay)) { xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } xhci_clear_command_ring(xhci); /* step 3: save registers */ xhci_save_registers(xhci); /* step 4: set CSS flag */ command = readl(&xhci->op_regs->command); command |= CMD_CSS; writel(command, &xhci->op_regs->command); if (xhci_handshake(&xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { xhci_warn(xhci, "WARN: xHC save state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } spin_unlock_irq(&xhci->lock); /* * Deleting Compliance Mode Recovery Timer because the xHCI Host * is about to be suspended. */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { del_timer_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); } /* step 5: remove core well power */ /* synchronize irq when using MSI-X */ xhci_msix_sync_irqs(xhci); return rc; }
/* * start xHC (not bus-specific) * * This is called when the machine transition from S3/S4 mode. * */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { u32 command, temp = 0, status; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; if (!hcd->state) return 0; /* Wait a bit if either of the roothubs need to settle from the * transition into bus suspend. */ if (time_before(jiffies, xhci->bus_state[0].next_statechange) || time_before(jiffies, xhci->bus_state[1].next_statechange)) msleep(100); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); if (xhci->quirks & XHCI_RESET_ON_RESUME) hibernated = true; if (!hibernated) { /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ xhci_set_cmd_ring_deq(xhci); /* step 3: restore state and start state*/ /* step 3: set CRS flag */ command = readl(&xhci->op_regs->command); command |= CMD_CRS; writel(command, &xhci->op_regs->command); if (xhci_handshake(&xhci->op_regs->status, STS_RESTORE, 0, 10 * 1000)) { xhci_warn(xhci, "WARN: xHC restore state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } temp = readl(&xhci->op_regs->status); } /* If restore operation fails, re-initialize the HC during resume */ if ((temp & STS_SRE) || hibernated) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance Mode Recovery Timer deleted!"); } /* Let the USB core know _both_ roothubs lost power. */ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_reset(xhci); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel(temp & ~STS_EINT, &xhci->op_regs->status); temp = readl(&xhci->ir_set->irq_pending); writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); xhci_dbg(xhci, "xhci_stop completed - status = %x\n", readl(&xhci->op_regs->status)); /* USB core calls the PCI reinit and start functions twice: * first with the primary HCD, and then with the secondary HCD. * If we don't do the same, the host will never be started. */ if (!usb_hcd_is_primary_hcd(hcd)) secondary_hcd = hcd; else secondary_hcd = xhci->shared_hcd; xhci_dbg(xhci, "Initialize the xhci_hcd\n"); retval = xhci_init(hcd->primary_hcd); if (retval) return retval; comp_timer_running = true; xhci_dbg(xhci, "Start the primary HCD\n"); retval = xhci_run(hcd->primary_hcd); if (!retval) { xhci_dbg(xhci, "Start the secondary HCD\n"); retval = xhci_run(secondary_hcd); } hcd->state = HC_STATE_SUSPENDED; xhci->shared_hcd->state = HC_STATE_SUSPENDED; goto done; } /* step 4: set Run/Stop bit */ command = readl(&xhci->op_regs->command); command |= CMD_RUN; writel(command, &xhci->op_regs->command); xhci_handshake(&xhci->op_regs->status, STS_HALT, 0, 250 * 1000); /* step 5: walk topology and initialize portsc, * portpmsc and portli */ /* this is done in bus_resume */ /* step 6: restart each of the previously * Running endpoints by ringing their doorbells */ spin_unlock_irq(&xhci->lock); done: if (retval == 0) { /* Resume root hubs only when have pending events. */ status = readl(&xhci->op_regs->status); if (status & STS_EINT) { usb_hcd_resume_root_hub(hcd); usb_hcd_resume_root_hub(xhci->shared_hcd); } } /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject * to suffer the Compliance Mode issue again. It doesn't matter if * ports have entered previously to U0 before system's suspension. */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) compliance_mode_recovery_timer_init(xhci); /* Re-enable port polling. */ xhci_dbg(xhci, "%s: starting port polling.\n", __func__); set_bit(HCD_FLAG_POLL_RH, &hcd->flags); usb_hcd_poll_rh_status(hcd); set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); usb_hcd_poll_rh_status(xhci->shared_hcd); return retval; }
static void xhci_quiesce(struct xhci_hcd *xhci) { BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); }