/* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. */ static void uhci_pci_reset_hc(struct uhci_hcd *uhci) { uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); }
static inline int is_sata(ide_hwif_t *hwif) { return pdev_is_sata(to_pci_dev(hwif->dev)); }
static int ehci_pci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct pci_dev *p_smbus; u8 rev; u32 temp; int retval; switch (pdev->vendor) { case PCI_VENDOR_ID_TOSHIBA_2: if (pdev->device == 0x01b5) { #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO ehci->big_endian_mmio = 1; #else ehci_warn(ehci, "unsupported big endian Toshiba quirk\n"); #endif } break; } ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); switch (pdev->vendor) { case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { case 0x003c: case 0x005b: case 0x00d8: case 0x00e8: if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)) < 0) ehci_warn(ehci, "can't enable NVidia " "workaround for >2GB RAM\n"); break; } break; } ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); retval = ehci_halt(ehci); if (retval) return retval; if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { ehci->use_dummy_qh = 1; ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " "dummy qh workaround\n"); } retval = ehci_init(hcd); if (retval) return retval; switch (pdev->vendor) { case PCI_VENDOR_ID_NEC: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; ehci->fs_i_thresh = 1; if (pdev->device == 0x27cc) { ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); } if (pdev->device == 0x0806 || pdev->device == 0x0811 || pdev->device == 0x0829) { ehci_info(ehci, "disable lpm for langwell/penwell\n"); ehci->has_lpm = 0; } if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) { hcd->has_tt = 1; tdi_reset(ehci); } if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); hcd->broken_pci_sleep = 1; device_set_wakeup_capable(&pdev->dev, false); } } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_AMD: if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; if (pdev->device == 0x7463) { ehci_info(ehci, "ignoring AMD8111 (errata)\n"); retval = -EIO; goto done; } break; case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { case 0x0068: if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; case 0x0d9d: ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89"); ehci->has_lpm = 0; ehci->has_ppcd = 0; ehci->command &= ~CMD_PPCEE; break; } break; case PCI_VENDOR_ID_VIA: if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { u8 tmp; pci_read_config_byte(pdev, 0x4b, &tmp); if (tmp & 0x20) break; pci_write_config_byte(pdev, 0x4b, tmp | 0x20); } break; case PCI_VENDOR_ID_ATI: if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) { p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p_smbus) break; rev = p_smbus->revision; if ((pdev->device == 0x4386) || (rev == 0x3a) || (rev == 0x3b)) { u8 tmp; ehci_info(ehci, "applying AMD SB600/SB700 USB " "freeze workaround\n"); pci_read_config_byte(pdev, 0x53, &tmp); pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); } pci_dev_put(p_smbus); } break; case PCI_VENDOR_ID_NETMOS: ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; } temp = pci_find_capability(pdev, 0x0a); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if ((temp & (3 << 13)) == (1 << 13)) { temp &= 0x1fff; ehci->debug = ehci_to_hcd(ehci)->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(ehci->hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } ehci_reset(ehci); temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); temp &= 0x0f; if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { ehci_dbg(ehci, "bogus port configuration: " "cc=%d x pcc=%d < ports=%d\n", HCS_N_CC(ehci->hcs_params), HCS_N_PCC(ehci->hcs_params), HCS_N_PORTS(ehci->hcs_params)); switch (pdev->vendor) { case 0x17a0: temp |= (ehci->hcs_params & ~0xf); ehci->hcs_params = temp; break; case PCI_VENDOR_ID_NVIDIA: break; } } pci_read_config_byte(pdev, 0x60, &ehci->sbrn); if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ehci->sbrn = 0x20; if (!device_can_wakeup(&pdev->dev)) { u16 port_wake; pci_read_config_word(pdev, 0x62, &port_wake); if (port_wake & 0x0001) { dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); device_set_wakeup_capable(&pdev->dev, 1); } } #ifdef CONFIG_USB_SUSPEND if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); #endif ehci_port_power(ehci, 1); retval = ehci_pci_reinit(ehci, pdev); done: return retval; }
static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct cmd640_reg *timing = ap->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_timing t; const unsigned long T = 1000000 / 33; const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 }; u8 reg; int arttim = ARTIM0 + 2 * adev->devno; struct ata_device *pair = ata_dev_pair(adev); if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) { printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); return; } /* The second channel has shared timings and the setup timing is messy to switch to merge it for worst case */ if (ap->port_no && pair) { struct ata_timing p; ata_timing_compute(pair, pair->pio_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP); } /* Make the timings fit */ if (t.recover > 16) { t.active += t.recover - 16; t.recover = 16; } if (t.active > 16) t.active = 16; /* Now convert the clocks into values we can actually stuff into the chip */ if (t.recover > 1) t.recover--; /* 640B only */ else t.recover = 15; if (t.setup > 4) t.setup = 0xC0; else t.setup = setup_data[t.setup]; if (ap->port_no == 0) { t.active &= 0x0F; /* 0 = 16 */ /* Load setup timing */ pci_read_config_byte(pdev, arttim, ®); reg &= 0x3F; reg |= t.setup; pci_write_config_byte(pdev, arttim, reg); /* Load active/recovery */ pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover); } else { /* Save the shared timings for channel, they will be loaded by qc_issue. Reloading the setup time is expensive so we keep a merged one loaded */ pci_read_config_byte(pdev, ARTIM23, ®); reg &= 0x3F; reg |= t.setup; pci_write_config_byte(pdev, ARTIM23, reg); timing->reg58[adev->devno] = (t.active << 4) | t.recover; } }
static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); unsigned long flags; int rc = 0; int port; if (time_before(jiffies, ehci->next_statechange)) msleep(10); /* s0i3 may poweroff SRAM, backup the SRAM */ if (hcd->has_sram && sram_backup(hcd)) { ehci_warn(ehci, "sram_backup failed\n"); return -EPERM; } /* Root hub was already suspended. Disable irq emission and * mark HW unaccessible. The PM and USB cores make sure that * the root hub is either suspended or stopped. */ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); spin_lock_irqsave (&ehci->lock, flags); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); /* Set HOSTPC_PHCD if not set yet to let PHY enter low-power mode */ if (ehci->has_hostpc) { spin_unlock_irqrestore(&ehci->lock, flags); usleep_range(5000, 6000); spin_lock_irqsave(&ehci->lock, flags); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg; u32 temp; struct pci_dev *pdev = to_pci_dev(hcd->self.controller); if (pdev->device != 0x119D) { hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs + HOSTPC0 + 4 * port); temp = ehci_readl(ehci, hostpc_reg); if (!(temp & HOSTPC_PHCD)) ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); temp = ehci_readl(ehci, hostpc_reg); ehci_dbg(ehci, "Port %d PHY low-power mode %s\n", port, (temp & HOSTPC_PHCD) ? "succeeded" : "failed"); } } } clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irqrestore (&ehci->lock, flags); // could save FLADJ in case of Vaux power loss // ... we'd only use it to handle clock skew return rc; }
/* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { struct xhci_hcd *xhci; struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; u32 temp; hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; if (usb_hcd_is_primary_hcd(hcd)) { xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); if (!xhci) return -ENOMEM; *((struct xhci_hcd **) hcd->hcd_priv) = xhci; xhci->main_hcd = hcd; /* Mark the first roothub as being USB 2.0. * The xHCI driver will register the USB 3.0 roothub. */ hcd->speed = HCD_USB2; hcd->self.root_hub->speed = USB_SPEED_HIGH; /* * USB 2.0 roothub under xHCI has an integrated TT, * (rate matching hub) as opposed to having an OHCI/UHCI * companion controller. */ hcd->has_tt = 1; } else { /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ xhci = hcd_to_xhci(hcd); temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); if (HCC_64BIT_ADDR(temp)) { xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); } else { dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); } return 0; } xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); xhci->run_regs = hcd->regs + (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); /* Cache read-only capability registers */ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); xhci->hci_version = HC_VERSION(xhci->hcc_params); xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); xhci_print_registers(xhci); /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) { if (pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); } /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. */ xhci->quirks |= XHCI_BROKEN_MSI; xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " "has broken MSI implementation\n", pdev->revision); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) goto error; xhci_dbg(xhci, "Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ retval = xhci_reset(xhci); if (retval) goto error; xhci_dbg(xhci, "Reset complete\n"); temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); if (HCC_64BIT_ADDR(temp)) { xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); } else { dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); } xhci_dbg(xhci, "Calling HCD init\n"); /* Initialize HCD and host controller data structures. */ retval = xhci_init(hcd); if (retval) goto error; xhci_dbg(xhci, "Called HCD init\n"); pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn); xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ retval = xhci_pci_reinit(xhci, pdev); if (!retval) return retval; error: kfree(xhci); return retval; }
static int __init match_pci_dev(struct device *dev, void *data) { unsigned int devfn = *(unsigned int *)data; return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; }
static int ast_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *ddev = pci_get_drvdata(pdev); return ast_drm_resume(ddev); }
static int vbox_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *ddev = pci_get_drvdata(pdev); return vbox_drm_thaw(ddev); }
static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) { if (dev_is_pci(dev)) return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); return __dma_set_mask(dev, dma_mask); }
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint"); } if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x4) { xhci->quirks |= XHCI_SLOW_SUSPEND; xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Fresco Logic xHC revision %u" "must be suspended extra slowly", pdev->revision); } if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) xhci->quirks |= XHCI_BROKEN_STREAMS; /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. */ xhci->quirks |= XHCI_BROKEN_MSI; xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Fresco Logic revision %u " "has broken MSI implementation", pdev->revision); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; /* * PPT desktop boards DH77EB and DH77DF will power back on after * a few seconds of being shutdown. The fix for this is to * switch the ports from xHCI to EHCI on shutdown. We can't use * DMI information to find those particular boards (since each * vendor will change the board name), so we have to key off all * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_EJ168) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); }
struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct sk_buff *skb; struct skb_frame_desc *skbdesc; unsigned int frame_size; unsigned int head_size = 0; unsigned int tail_size = 0; struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev); struct queue_entry_priv_pci *entry_priv = entry->priv_data; /* * The frame size includes descriptor size, because the * hardware directly receive the frame into the skbuffer. */ frame_size = entry->queue->data_size + entry->queue->desc_size; /* * The payload should be aligned to a 4-byte boundary, * this means we need at least 3 bytes for moving the frame * into the correct offset. */ head_size = 4; /* * For IV/EIV/ICV assembly we must make sure there is * at least 8 bytes bytes available in headroom for IV/EIV * and 8 bytes for ICV data as tailroon. */ if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { head_size += 8; tail_size += 8; } /* * Allocate skbuffer. */ skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); //skb = dev_alloc_skb(3842); if (!skb) return NULL; /* * Make sure we not have a frame with the requested bytes * available in the head and tail. */ skb_reserve(skb, head_size); skb_put(skb, frame_size); /* * Populate skbdesc. */ skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->entry = entry; //memset(skb->data, 0x00, skb->len); #if 0 //VirtualAddress = (void*) skb->data; if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { skbdesc->skb_dma = pci_map_single(pci_dev, skb->data, 3840, PCI_DMA_FROMDEVICE); skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; } #else if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_FROM_DEVICE); skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; } #endif rt2x00dev->alloc_len = skb->len; //printk("rt2x00queue_alloc_rxskb skbdesc->skb_dma = 0x%x skb->data=0x%x len=%d\n",skbdesc->skb_dma,skb->data, skb->len); return skb; }
static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_timing t; const unsigned long T = 1000000 / 33; const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 }; u8 reg; /* Port layout is not logical so use a table */ const u8 arttim_port[2][2] = { { ARTTIM0, ARTTIM1 }, { ARTTIM23, ARTTIM23 } }; const u8 drwtim_port[2][2] = { { DRWTIM0, DRWTIM1 }, { DRWTIM2, DRWTIM3 } }; int arttim = arttim_port[ap->port_no][adev->devno]; int drwtim = drwtim_port[ap->port_no][adev->devno]; /* ata_timing_compute is smart and will produce timings for MWDMA that don't violate the drives PIO capabilities. */ if (ata_timing_compute(adev, mode, &t, T, 0) < 0) { printk(KERN_ERR DRV_NAME ": mode computation failed.\n"); return; } if (ap->port_no) { /* Slave has shared address setup */ struct ata_device *pair = ata_dev_pair(adev); if (pair) { struct ata_timing tp; ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); } } printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n", t.active, t.recover, t.setup); if (t.recover > 16) { t.active += t.recover - 16; t.recover = 16; } if (t.active > 16) t.active = 16; /* Now convert the clocks into values we can actually stuff into the chip */ if (t.recover == 16) t.recover = 0; else if (t.recover > 1) t.recover--; else t.recover = 15; if (t.setup > 4) t.setup = 0xC0; else t.setup = setup_data[t.setup]; t.active &= 0x0F; /* 0 = 16 */ /* Load setup timing */ pci_read_config_byte(pdev, arttim, ®); reg &= 0x3F; reg |= t.setup; pci_write_config_byte(pdev, arttim, reg); /* Load active/recovery */ pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover); }
/* * Initialize a controller that was newly discovered or has just been * resumed. In either case we can't be sure of its previous state. * * Returns: 1 if the controller was reset, 0 otherwise. */ static int uhci_pci_check_and_reset_hc(struct uhci_hcd *uhci) { return uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); }
static int __devinit ohci_pci_start (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; /* REVISIT this whole block should move to reset(), which handles * all the other one-time init. */ if (hcd->self.controller) { struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* AMD 756, for most chips (early revs), corrupts register * values on read ... so enable the vendor workaround. */ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x740c) { ohci->flags = OHCI_QUIRK_AMD756; ohci_dbg (ohci, "AMD756 erratum 4 workaround\n"); /* also erratum 10 (suspend/resume issues) */ device_init_wakeup(&hcd->self.root_hub->dev, 0); } /* FIXME for some of the early AMD 760 southbridges, OHCI * won't work at all. blacklist them. */ /* Apple's OHCI driver has a lot of bizarre workarounds * for this chip. Evidently control and bulk lists * can get confused. (B&W G3 models, and ...) */ else if (pdev->vendor == PCI_VENDOR_ID_OPTI && pdev->device == 0xc861) { ohci_dbg (ohci, "WARNING: OPTi workarounds unavailable\n"); } /* Check for NSC87560. We have to look at the bridge (fn1) to * identify the USB (fn2). This quirk might apply to more or * even all NSC stuff. */ else if (pdev->vendor == PCI_VENDOR_ID_NS) { struct pci_dev *b; b = pci_get_slot (pdev->bus, PCI_DEVFN (PCI_SLOT (pdev->devfn), 1)); if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO && b->vendor == PCI_VENDOR_ID_NS) { ohci->flags |= OHCI_QUIRK_SUPERIO; ohci_dbg (ohci, "Using NSC SuperIO setup\n"); } pci_dev_put(b); } /* Check for Compaq's ZFMicro chipset, which needs short * delays before control or bulk queues get re-activated * in finish_unlinks() */ else if (pdev->vendor == PCI_VENDOR_ID_COMPAQ && pdev->device == 0xa0f8) { ohci->flags |= OHCI_QUIRK_ZFMICRO; ohci_dbg (ohci, "enabled Compaq ZFMicro chipset quirk\n"); } /* RWC may not be set for add-in PCI cards, since boot * firmware probably ignored them. This transfers PCI * PM wakeup capabilities (once the PCI layer is fixed). */ if (device_may_wakeup(&pdev->dev)) ohci->hc_control |= OHCI_CTRL_RWC; } /* NOTE: there may have already been a first reset, to * keep bios/smm irqs from making trouble */ if ((ret = ohci_run (ohci)) < 0) { ohci_err (ohci, "can't start\n"); ohci_stop (hcd); return ret; } return 0; }
int ibmasm_init_remote_input_dev(struct service_processor *sp) { struct input_dev *mouse_dev, *keybd_dev; struct pci_dev *pdev = to_pci_dev(sp->dev); int error = -ENOMEM; int i; sp->remote.mouse_dev = mouse_dev = input_allocate_device(); sp->remote.keybd_dev = keybd_dev = input_allocate_device(); if (!mouse_dev || !keybd_dev) goto err_free_devices; mouse_dev->id.bustype = BUS_PCI; mouse_dev->id.vendor = pdev->vendor; mouse_dev->id.product = pdev->device; mouse_dev->id.version = 1; mouse_dev->dev.parent = sp->dev; mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); set_bit(BTN_TOUCH, mouse_dev->keybit); mouse_dev->name = "ibmasm RSA I remote mouse"; input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0); input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0); keybd_dev->id.bustype = BUS_PCI; keybd_dev->id.vendor = pdev->vendor; keybd_dev->id.product = pdev->device; keybd_dev->id.version = 2; keybd_dev->dev.parent = sp->dev; keybd_dev->evbit[0] = BIT_MASK(EV_KEY); keybd_dev->name = "ibmasm RSA I remote keyboard"; for (i = 0; i < XLATE_SIZE; i++) { if (xlate_high[i]) set_bit(xlate_high[i], keybd_dev->keybit); if (xlate[i]) set_bit(xlate[i], keybd_dev->keybit); } error = input_register_device(mouse_dev); if (error) goto err_free_devices; error = input_register_device(keybd_dev); if (error) goto err_unregister_mouse_dev; enable_mouse_interrupts(sp); printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number); return 0; err_unregister_mouse_dev: input_unregister_device(mouse_dev); mouse_dev = NULL; err_free_devices: input_free_device(mouse_dev); input_free_device(keybd_dev); return error; }
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); } if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x4) { xhci->quirks |= XHCI_SLOW_SUSPEND; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC revision %u" "must be suspended extra slowly", pdev->revision); } /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. */ xhci->quirks |= XHCI_BROKEN_MSI; xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " "has broken MSI implementation\n", pdev->revision); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_INTEL) xhci->quirks |= XHCI_AVOID_BEI; if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; /* * PPT desktop boards DH77EB and DH77DF will power back on after * a few seconds of being shutdown. The fix for this is to * switch the ports from xHCI to EHCI on shutdown. We can't use * DMI information to find those particular boards (since each * vendor will change the board name), so we have to key off all * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) { /* Workaround for occasional spurious wakeups from S5 (or * any other sleep) on Haswell machines with LPT and LPT-LP * with the new Intel BIOS */ /* Limit the quirk to only known vendors, as this triggers * yet another BIOS bug on some other machines * https://bugzilla.kernel.org/show_bug.cgi?id=66171 */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; }
/* * Initialize a controller that was newly discovered or has lost power * or otherwise been reset while it was suspended. In none of these cases * can we be sure of its previous state. */ static void check_and_reset_hc(struct uhci_hcd *uhci) { if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) finish_reset(uhci); }
static int iTCO_wdt_init(struct platform_device *pdev) { int ret; u32 base_address; unsigned long val32; struct pci_dev *parent; struct resource *irq; if (!pdev->dev.parent || !dev_is_pci(pdev->dev.parent)) { pr_err("Unqualified parent device.\n"); return -EINVAL; } parent = to_pci_dev(pdev->dev.parent); /* * Find the ACPI/PM base I/O address which is the base * for the TCO registers (TCOBASE=ACPIBASE + 0x60) * ACPIBASE is bits [15:7] from 0x40-0x43 */ pci_read_config_dword(parent, 0x40, &base_address); base_address &= 0x0000ff80; if (base_address == 0x00000000) { /* Something's wrong here, ACPIBASE has to be set */ pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n"); return -ENODEV; } iTCO_wdt_private.ACPIBASE = base_address; pci_read_config_dword(parent, 0x44, &pmc_base_address); pmc_base_address &= 0xFFFFFE00; /* * Disable watchdog on command-line demand */ if (strstr(saved_command_line, "disable_kernel_watchdog=1")) { pr_warn("disable_kernel_watchdog=1 watchdog will not be started\n"); iTCO_wdt_private.enable = false; /* Set the NO_REBOOT bit to prevent later reboots */ iTCO_wdt_set_NO_REBOOT_bit(); /* Ensure Wdt is well stopped in case started by IAFW */ iTCO_wdt_stop(); } else { iTCO_wdt_private.enable = true; /* Check chipset's NO_REBOOT bit */ if (iTCO_wdt_unset_NO_REBOOT_bit()) { pr_err("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n"); ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ goto out; } } /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ if (!request_region(SMI_EN, 4, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", SMI_EN); ret = -EIO; goto out; } if (!request_region(SMI_STS, 4, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", SMI_STS); ret = -EIO; goto unreg_smi_sts; } /* The TCO I/O registers reside in a 32-byte range pointed to by the TCOBASE value */ if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", TCOBASE); ret = -EIO; goto unreg_smi_en; } pr_info("Found a TCO device (TCOBASE=0x%04lx)\n", TCOBASE); /* Check that the heartbeat value is within it's range; if not reset to the default */ if (iTCO_wdt_set_heartbeat(heartbeat)) { iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT); pr_info("timeout value out of range, using %d\n", heartbeat); } ret = misc_register(&iTCO_wdt_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_region; } pr_info("initialized. heartbeat=%d sec (nowayout=%d) policy=0x%x\n", heartbeat, nowayout, iTCO_wdt_get_current_ospolicy()); /* Reset OS policy */ iTCO_wdt_set_reset_type(TCO_POLICY_NORM); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { pr_err("No warning interrupt resource found\n"); goto misc_unreg; } ret = acpi_register_gsi(NULL, irq->start, irq->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE) ? ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE, irq->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_HIGHLEVEL) ? ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW); if (ret < 0) { pr_err("failed to configure TCO warning IRQ %d\n", (int)irq->start); goto misc_unreg; } ret = request_irq(irq->start, tco_irq_handler, 0, "tco_watchdog", NULL); if (ret < 0) { pr_err("failed to request TCO warning IRQ %d\n", (int)irq->start); goto gsi_unreg; } /* Clear old TCO timeout status */ val32 = TCO_TIMEOUT_BIT | SECOND_TO_STS_BIT; outl(val32, TCO1_STS); /* Clear the SMI status */ outl(TCO_STS_BIT, SMI_STS); /* Enable SMI for TCO */ val32 = inl(SMI_EN); val32 |= TCO_EN_BIT; outl(val32, SMI_EN); /* then ensure that PMC is ready to handle next SMI */ val32 |= EOS_BIT; outl(val32, SMI_EN); reboot_notifier.notifier_call = TCO_reboot_notifier; reboot_notifier.priority = 1; ret = register_reboot_notifier(&reboot_notifier); if (ret) /* We continue as reboot notifier is not critical for * watchdog */ pr_err("cannot register reboot notifier %d\n", ret); return 0; gsi_unreg: acpi_unregister_gsi((int)(irq->start)); misc_unreg: misc_deregister(&iTCO_wdt_miscdev); unreg_region: release_region(TCOBASE, 0x20); unreg_smi_sts: release_region(SMI_STS, 4); unreg_smi_en: release_region(SMI_EN, 4); out: iTCO_wdt_private.ACPIBASE = 0; return ret; }
/** * jmicron_pre_reset - check for 40/80 pin * @link: ATA link * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. * * On the Jmicron 361/363 there is a single PATA port that can be mapped * either as primary or secondary (or neither). We don't do any policy * and setup here. We assume that has been done by init_one and the * BIOS. */ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 control; u32 control5; int port_mask = 1<< (4 * ap->port_no); int port = ap->port_no; port_type port_map[2]; /* Check if our port is enabled */ pci_read_config_dword(pdev, 0x40, &control); if ((control & port_mask) == 0) return -ENOENT; /* There are two basic mappings. One has the two SATA ports merged as master/slave and the secondary as PATA, the other has only the SATA port mapped */ if (control & (1 << 23)) { port_map[0] = PORT_SATA; port_map[1] = PORT_PATA0; } else { port_map[0] = PORT_SATA; port_map[1] = PORT_SATA; } /* The 365/366 may have this bit set to map the second PATA port as the internal primary channel */ pci_read_config_dword(pdev, 0x80, &control5); if (control5 & (1<<24)) port_map[0] = PORT_PATA1; /* The two ports may then be logically swapped by the firmware */ if (control & (1 << 22)) port = port ^ 1; /* * Now we know which physical port we are talking about we can * actually do our cable checking etc. Thankfully we don't need * to do the plumbing for other cases. */ switch (port_map[port]) { case PORT_PATA0: if ((control & (1 << 5)) == 0) return -ENOENT; if (control & (1 << 3)) /* 40/80 pin primary */ ap->cbl = ATA_CBL_PATA40; else ap->cbl = ATA_CBL_PATA80; break; case PORT_PATA1: /* Bit 21 is set if the port is enabled */ if ((control5 & (1 << 21)) == 0) return -ENOENT; if (control5 & (1 << 19)) /* 40/80 pin secondary */ ap->cbl = ATA_CBL_PATA40; else ap->cbl = ATA_CBL_PATA80; break; case PORT_SATA: ap->cbl = ATA_CBL_SATA; break; } return ata_sff_prereset(link, deadline); }
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); } /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. */ xhci->quirks |= XHCI_BROKEN_MSI; xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " "has broken MSI implementation\n", pdev->revision); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; /* * PPT desktop boards DH77EB and DH77DF will power back on after * a few seconds of being shutdown. The fix for this is to * switch the ports from xHCI to EHCI on shutdown. We can't use * DMI information to find those particular boards (since each * vendor will change the board name), so we have to key off all * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; }
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]); }
/* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct pci_dev *p_smbus; u8 rev; u32 temp; int retval; int force_otg_hc_mode = 0; switch (pdev->vendor) { case PCI_VENDOR_ID_TOSHIBA_2: /* celleb's companion chip */ if (pdev->device == 0x01b5) { #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO ehci->big_endian_mmio = 1; #else ehci_warn(ehci, "unsupported big endian Toshiba quirk\n"); #endif } break; case PCI_VENDOR_ID_INTEL: if (pdev->device == 0x0811 || pdev->device == 0x0829 || pdev->device == 0xE006) { ehci_info(ehci, "Detected Intel MID OTG HC\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; #ifdef CONFIG_USB_OTG ehci->has_otg = 1; #endif force_otg_hc_mode = 1; hcd->has_sram = 1; /* * Disable SRAM for CLVP A0 due to the silicon issue. */ if (pdev->device == 0xE006 && pdev->revision < 0xC) { ehci_info(ehci, "Disable SRAM for CLVP A0\n"); hcd->has_sram = 0; } hcd->sram_no_payload = 1; sram_init(hcd); } else if (pdev->device == 0x0806) { ehci_info(ehci, "Detected Langwell MPH\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; hcd->has_sram = 1; hcd->sram_no_payload = 1; sram_init(hcd); } else if (pdev->device == 0x0829) { ehci_info(ehci, "Detected Penwell OTG HC\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; } else if (pdev->device == 0x08F2) { #ifdef CONFIG_USB_EHCI_HCD_SPH struct ehci_sph_pdata *sph_pdata; sph_pdata = pdev->dev.platform_data; if (!sph_pdata) { ehci_err(ehci, "get SPH platform data failed\n"); retval = -ENODEV; return retval; } /* All need to bypass tll mode */ temp = ehci_readl(ehci, hcd->regs + CLV_SPHCFG); temp &= ~CLV_SPHCFG_ULPI1TYPE; ehci_writel(ehci, temp, hcd->regs + CLV_SPHCFG); sph_pdata->enabled = sph_enabled(); /* Check SPH enabled or not */ if (sph_pdata->enabled == 0) { /* ULPI 1 ref-clock switch off */ temp = ehci_readl(ehci, hcd->regs + CLV_SPHCFG); temp |= CLV_SPHCFG_REFCKDIS; ehci_writel(ehci, temp, hcd->regs + CLV_SPHCFG); /* Set Power state */ retval = pci_set_power_state(pdev, PCI_D1); if (retval < 0) ehci_err(ehci, "Set SPH to D1 failed, retval = %d\n", retval); ehci_info(ehci, "USB SPH is disabled\n"); return -ENODEV; } ehci_info(ehci, "Detected SPH HC\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; temp = ehci_readl(ehci, hcd->regs + CLV_SPH_HOSTPC); temp |= CLV_SPH_HOSTPC_PTS; ehci_writel(ehci, temp, hcd->regs + CLV_SPH_HOSTPC); device_set_wakeup_enable(&pdev->dev, true); /* Set Runtime-PM flags for SPH */ hcd->rpm_control = 1; hcd->rpm_resume = 0; pm_runtime_set_active(&pdev->dev); #endif } else if (pdev->device == 0x119C) { ehci_info(ehci, "Detected Merr USB2 HC\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; } else if (pdev->device == 0x119D) { ehci_info(ehci, "Detected HSIC HC\n"); hcd->has_tt = 1; ehci->has_hostpc = 1; ehci->has_lpm = 0; hcd->has_sram = 1; hcd->sram_no_payload = 1; sram_init(hcd); device_set_wakeup_enable(&pdev->dev, true); /* Set Runtime-PM flags for SPH */ hcd->rpm_control = 1; hcd->rpm_resume = 0; pm_runtime_set_active(&pdev->dev); } } ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* ehci_init() causes memory for DMA transfers to be * allocated. Thus, any vendor-specific workarounds based on * limiting the type of memory used for DMA transfers must * happen before ehci_init() is called. */ switch (pdev->vendor) { case PCI_VENDOR_ID_NVIDIA: /* NVidia reports that certain chips don't handle * QH, ITD, or SITD addresses above 2GB. (But TD, * data buffer, and periodic schedule are normal.) */ switch (pdev->device) { case 0x003c: /* MCP04 */ case 0x005b: /* CK804 */ case 0x00d8: /* CK8 */ case 0x00e8: /* CK8S */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)) < 0) ehci_warn(ehci, "can't enable NVidia " "workaround for >2GB RAM\n"); break; } break; } /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); if (force_otg_hc_mode) ehci_reset(ehci); retval = ehci_halt(ehci); if (retval) return retval; if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may * read/write memory space which does not belong to it when * there is NULL pointer with T-bit set to 1 in the frame list * table. To avoid the issue, the frame list link pointer * should always contain a valid pointer to a inactive qh. */ ehci->use_dummy_qh = 1; ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " "dummy qh workaround\n"); } /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; switch (pdev->vendor) { case PCI_VENDOR_ID_NEC: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; ehci->fs_i_thresh = 1; if (pdev->device == 0x27cc) { ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); } if (pdev->device == 0x0806 || pdev->device == 0x0811 || pdev->device == 0x0829 || pdev->device == 0xE006) { ehci_info(ehci, "disable lpm for langwell/penwell\n"); ehci->has_lpm = 0; } if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_AMD: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { ehci_info(ehci, "ignoring AMD8111 (errata)\n"); retval = -EIO; goto done; } break; case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { /* Some NForce2 chips have problems with selective suspend; * fixed in newer silicon. */ case 0x0068: if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; /* MCP89 chips on the MacBookAir3,1 give EPROTO when * fetching device descriptors unless LPM is disabled. * There are also intermittent problems enumerating * devices with PPCD enabled. */ case 0x0d9d: ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89"); ehci->has_lpm = 0; ehci->has_ppcd = 0; ehci->command &= ~CMD_PPCEE; break; } break; case PCI_VENDOR_ID_VIA: if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { u8 tmp; /* The VT6212 defaults to a 1 usec EHCI sleep time which * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes * that sleep time use the conventional 10 usec. */ pci_read_config_byte(pdev, 0x4b, &tmp); if (tmp & 0x20) break; pci_write_config_byte(pdev, 0x4b, tmp | 0x20); } break; case PCI_VENDOR_ID_ATI: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* SB600 and old version of SB700 have a bug in EHCI controller, * which causes usb devices lose response in some cases. */ if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) { p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p_smbus) break; rev = p_smbus->revision; if ((pdev->device == 0x4386) || (rev == 0x3a) || (rev == 0x3b)) { u8 tmp; ehci_info(ehci, "applying AMD SB600/SB700 USB " "freeze workaround\n"); pci_read_config_byte(pdev, 0x53, &tmp); pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); } pci_dev_put(p_smbus); } break; case PCI_VENDOR_ID_NETMOS: /* MosChip frame-index-register bug */ ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; } /* optional debug port, normally in the first BAR */ temp = pci_find_capability(pdev, 0x0a); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if ((temp & (3 << 13)) == (1 << 13)) { temp &= 0x1fff; ehci->debug = ehci_to_hcd(ehci)->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(ehci->hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } ehci_reset(ehci); /* at least the Genesys GL880S needs fixup here */ temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); temp &= 0x0f; if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { ehci_dbg(ehci, "bogus port configuration: " "cc=%d x pcc=%d < ports=%d\n", HCS_N_CC(ehci->hcs_params), HCS_N_PCC(ehci->hcs_params), HCS_N_PORTS(ehci->hcs_params)); switch (pdev->vendor) { case 0x17a0: /* GENESYS */ /* GL880S: should be PORTS=2 */ temp |= (ehci->hcs_params & ~0xf); ehci->hcs_params = temp; break; case PCI_VENDOR_ID_NVIDIA: /* NF4: should be PCC=10 */ break; } } /* Serial Bus Release Number is at PCI 0x60 offset */ pci_read_config_byte(pdev, 0x60, &ehci->sbrn); if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ehci->sbrn = 0x20; /* ConneXT has no sbrn register */ /* Keep this around for a while just in case some EHCI * implementation uses legacy PCI PM support. This test * can be removed on 17 Dec 2009 if the dev_warn() hasn't * been triggered by then. */ if (!device_can_wakeup(&pdev->dev)) { u16 port_wake; pci_read_config_word(pdev, 0x62, &port_wake); if (port_wake & 0x0001) { dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); device_set_wakeup_capable(&pdev->dev, 1); } } #ifdef CONFIG_USB_SUSPEND /* REVISIT: the controller works fine for wakeup iff the root hub * itself is "globally" suspended, but usbcore currently doesn't * understand such things. * * System suspend currently expects to be able to suspend the entire * device tree, device-at-a-time. If we failed selective suspend * reports, system suspend would fail; so the root hub code must claim * success. That's lying to usbcore, and it matters for runtime * PM scenarios with selective suspend and remote wakeup... */ if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); #endif ehci_port_power(ehci, 1); retval = ehci_pci_reinit(ehci, pdev); done: return retval; }
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]); }
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* s0i3 may have poweroff the SRAM, restore it here*/ if (hcd->has_sram && sram_restore(hcd)) { ehci_warn(ehci, "sram_restore failed, stop resuming.\n"); return -EPERM; } /* The BIOS on systems with the Intel Panther Point chipset may or may * not support xHCI natively. That means that during system resume, it * may switch the ports back to EHCI so that users can use their * keyboard to select a kernel from GRUB after resume from hibernate. * * The BIOS is supposed to remember whether the OS had xHCI ports * enabled before resume, and switch the ports back to xHCI when the * BIOS/OS semaphore is written, but we all know we can't trust BIOS * writers. * * Unconditionally switch the ports back to xHCI after a system resume. * We can't tell whether the EHCI or xHCI controller will be resumed * first, so we have to do the port switchover in both drivers. Writing * a '1' to the port switchover registers should have no effect if the * port was already switched over. */ if (usb_is_intel_switchable_ehci(pdev)) ehci_enable_xhci_companion(); // maybe restore FLADJ if (time_before(jiffies, ehci->next_statechange)) msleep(100); /* Mark hardware accessible again as we are out of D3 state by now */ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* If CF is still set and we aren't resuming from hibernation * then we maintained PCI Vaux power. * Just undo the effect of ehci_pci_suspend(). */ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF && !hibernated) { int mask = INTR_MASK; ehci_prepare_ports_for_controller_resume(ehci); if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); return 0; } usb_root_hub_lost_power(hcd->self.root_hub); /* Else reset, to cope with power loss or flush-to-storage * style "resume" having let BIOS kick in during reboot. */ (void) ehci_halt(ehci); (void) ehci_reset(ehci); (void) ehci_pci_reinit(ehci, pdev); /* emptying the schedule aborts any urbs */ spin_lock_irq(&ehci->lock); if (ehci->reclaim) end_unlink_async(ehci); ehci_work(ehci); spin_unlock_irq(&ehci->lock); ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ /* here we "know" root ports should always stay powered */ ehci_port_power(ehci, 1); ehci->rh_state = EHCI_RH_SUSPENDED; return 0; }
static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, ®48); pci_read_config_word(dev, 0x4a, ®4a); pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); if (speed >= XFER_UDMA_0) { u8 udma = speed - XFER_UDMA_0; u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; u8 pio; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; else pio = 2; /* only SWDMA2 is allowed */ piix_set_pio_mode(drive, pio); } }
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* The BIOS on systems with the Intel Panther Point chipset may or may * not support xHCI natively. That means that during system resume, it * may switch the ports back to EHCI so that users can use their * keyboard to select a kernel from GRUB after resume from hibernate. * * The BIOS is supposed to remember whether the OS had xHCI ports * enabled before resume, and switch the ports back to xHCI when the * BIOS/OS semaphore is written, but we all know we can't trust BIOS * writers. * * Unconditionally switch the ports back to xHCI after a system resume. * We can't tell whether the EHCI or xHCI controller will be resumed * first, so we have to do the port switchover in both drivers. Writing * a '1' to the port switchover registers should have no effect if the * port was already switched over. */ if (usb_is_intel_switchable_ehci(pdev)) ehci_enable_xhci_companion(); if (time_before(jiffies, ehci->next_statechange)) msleep(100); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF && !hibernated) { int mask = INTR_MASK; ehci_prepare_ports_for_controller_resume(ehci); if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); return 0; } usb_root_hub_lost_power(hcd->self.root_hub); (void) ehci_halt(ehci); (void) ehci_reset(ehci); (void) ehci_pci_reinit(ehci, pdev); spin_lock_irq(&ehci->lock); if (ehci->reclaim) end_unlink_async(ehci); ehci_work(ehci); spin_unlock_irq(&ehci->lock); ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ehci_readl(ehci, &ehci->regs->command); ehci_port_power(ehci, 1); ehci->rh_state = EHCI_RH_SUSPENDED; return 0; }
static int vmw_pm_resume(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); return vmw_pci_resume(pdev); }
void dna_igb_alloc_tx_buffers(struct igb_ring *tx_ring, struct pfring_hooks *hook) { union e1000_adv_tx_desc *tx_desc, *shadow_tx_desc; struct igb_tx_buffer *bi; u16 i; int num_slots_per_page = tx_ring->dna.tot_packet_memory / tx_ring->dna.packet_slot_len; /* Check if the memory has been already allocated */ if(tx_ring->dna.memory_allocated) return; /* nothing to do or no valid netdev defined */ if (!netdev_ring(tx_ring)) return; /* We suppose that RX and TX are in sync */ if(unlikely(enable_debug)) printk("%s(): tx_ring->dna.rx_tx.tx.tot_packet_memory=%d dna.num_memory_pages=%d\n", __FUNCTION__, tx_ring->dna.tot_packet_memory, tx_ring->dna.num_memory_pages); for(i=0; i<tx_ring->dna.num_memory_pages; i++) { tx_ring->dna.rx_tx.tx.packet_memory[i] = alloc_contiguous_memory(&tx_ring->dna.tot_packet_memory, &tx_ring->dna.mem_order); if (tx_ring->dna.rx_tx.tx.packet_memory[i] == 0) { printk("\n\n%s() ERROR: not enough memory for TX DMA ring!!\n\n\n", __FUNCTION__); return; } if(unlikely(enable_debug)) printk("[DNA] %s(): Successfully allocated TX %u@%u bytes at " "0x%08lx [slot_len=%d]\n",__FUNCTION__, tx_ring->dna.tot_packet_memory, i, tx_ring->dna.rx_tx.tx.packet_memory[i], tx_ring->dna.packet_slot_len); } for(i=0; i < tx_ring->count; i++) { u_int offset, page_index; char *pkt; page_index = i / num_slots_per_page; offset = (i % num_slots_per_page) * tx_ring->dna.packet_slot_len; pkt = (char *)(tx_ring->dna.rx_tx.tx.packet_memory[page_index] + offset); bi = &tx_ring->tx_buffer_info[i]; bi->skb = NULL; tx_desc = IGB_TX_DESC(tx_ring, i); if(unlikely(enable_debug)) printk("%s(): [%s@%d] Mapping TX slot %d of %d [pktaddr=%p][tx_desc=%p][offset=%u]\n", __FUNCTION__, tx_ring->netdev->name, tx_ring->queue_index, i, tx_ring->dna.packet_num_slots, pkt, tx_desc, offset); bi->dma = pci_map_single(to_pci_dev(tx_ring->dev), pkt, tx_ring->dna.packet_slot_len, PCI_DMA_TODEVICE); tx_desc->read.buffer_addr = cpu_to_le64(bi->dma); shadow_tx_desc = IGB_TX_DESC(tx_ring, i + tx_ring->count); memcpy(shadow_tx_desc, tx_desc, sizeof(union e1000_adv_tx_desc)); } /* for */ tx_ring->dna.memory_allocated = 1; }
/* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct pci_dev *p_smbus; u8 rev; u32 temp; int retval; switch (pdev->vendor) { case PCI_VENDOR_ID_TOSHIBA_2: /* celleb's companion chip */ if (pdev->device == 0x01b5) { #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO ehci->big_endian_mmio = 1; #else ehci_warn(ehci, "unsupported big endian Toshiba quirk\n"); #endif } break; } ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* ehci_init() causes memory for DMA transfers to be * allocated. Thus, any vendor-specific workarounds based on * limiting the type of memory used for DMA transfers must * happen before ehci_init() is called. */ switch (pdev->vendor) { case PCI_VENDOR_ID_NVIDIA: /* NVidia reports that certain chips don't handle * QH, ITD, or SITD addresses above 2GB. (But TD, * data buffer, and periodic schedule are normal.) */ switch (pdev->device) { case 0x003c: /* MCP04 */ case 0x005b: /* CK804 */ case 0x00d8: /* CK8 */ case 0x00e8: /* CK8S */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)) < 0) ehci_warn(ehci, "can't enable NVidia " "workaround for >2GB RAM\n"); break; } break; } /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); if (ehci_quirk_amd_hudson(ehci)) ehci->amd_l1_fix = 1; retval = ehci_halt(ehci); if (retval) return retval; /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; switch (pdev->vendor) { case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; if (pdev->device == 0x27cc) { ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_AMD: /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { ehci_info(ehci, "ignoring AMD8111 (errata)\n"); retval = -EIO; goto done; } break; case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { /* Some NForce2 chips have problems with selective suspend; * fixed in newer silicon. */ case 0x0068: if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; } break; case PCI_VENDOR_ID_VIA: if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { u8 tmp; /* The VT6212 defaults to a 1 usec EHCI sleep time which * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes * that sleep time use the conventional 10 usec. */ pci_read_config_byte(pdev, 0x4b, &tmp); if (tmp & 0x20) break; pci_write_config_byte(pdev, 0x4b, tmp | 0x20); } break; case PCI_VENDOR_ID_ATI: /* SB600 and old version of SB700 have a bug in EHCI controller, * which causes usb devices lose response in some cases. */ if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) { p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p_smbus) break; rev = p_smbus->revision; if ((pdev->device == 0x4386) || (rev == 0x3a) || (rev == 0x3b)) { u8 tmp; ehci_info(ehci, "applying AMD SB600/SB700 USB " "freeze workaround\n"); pci_read_config_byte(pdev, 0x53, &tmp); pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); } pci_dev_put(p_smbus); } break; } /* optional debug port, normally in the first BAR */ temp = pci_find_capability(pdev, 0x0a); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if ((temp & (3 << 13)) == (1 << 13)) { temp &= 0x1fff; ehci->debug = ehci_to_hcd(ehci)->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(ehci->hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } ehci_reset(ehci); /* at least the Genesys GL880S needs fixup here */ temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); temp &= 0x0f; if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { ehci_dbg(ehci, "bogus port configuration: " "cc=%d x pcc=%d < ports=%d\n", HCS_N_CC(ehci->hcs_params), HCS_N_PCC(ehci->hcs_params), HCS_N_PORTS(ehci->hcs_params)); switch (pdev->vendor) { case 0x17a0: /* GENESYS */ /* GL880S: should be PORTS=2 */ temp |= (ehci->hcs_params & ~0xf); ehci->hcs_params = temp; break; case PCI_VENDOR_ID_NVIDIA: /* NF4: should be PCC=10 */ break; } } /* Serial Bus Release Number is at PCI 0x60 offset */ pci_read_config_byte(pdev, 0x60, &ehci->sbrn); if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ehci->sbrn = 0x20; /* ConneXT has no sbrn register */ /* Keep this around for a while just in case some EHCI * implementation uses legacy PCI PM support. This test * can be removed on 17 Dec 2009 if the dev_warn() hasn't * been triggered by then. */ if (!device_can_wakeup(&pdev->dev)) { u16 port_wake; pci_read_config_word(pdev, 0x62, &port_wake); if (port_wake & 0x0001) { dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); device_set_wakeup_capable(&pdev->dev, 1); } } #ifdef CONFIG_USB_SUSPEND /* REVISIT: the controller works fine for wakeup iff the root hub * itself is "globally" suspended, but usbcore currently doesn't * understand such things. * * System suspend currently expects to be able to suspend the entire * device tree, device-at-a-time. If we failed selective suspend * reports, system suspend would fail; so the root hub code must claim * success. That's lying to usbcore, and it matters for runtime * PM scenarios with selective suspend and remote wakeup... */ if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); #endif ehci_port_power(ehci, 1); retval = ehci_pci_reinit(ehci, pdev); done: return retval; }