static int __devinit ath5k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *mem; struct ath5k_softc *sc; struct ieee80211_hw *hw; int ret; u8 csz; /* * L0s needs to be disabled on all ath5k cards. * * For distributions shipping with CONFIG_PCIEASPM (this will be enabled * by default in the future in 2.6.36) this will also mean both L1 and * L0s will be disabled when a pre 1.1 PCIe device is detected. We do * know L1 works correctly even for all ath5k pre 1.1 PCIe devices * though but cannot currently undue the effect of a blacklist, for * details you can read pcie_aspm_sanity_check() and see how it adjusts * the device link capability. * * It may be possible in the future to implement some PCI API to allow * drivers to override blacklists for pre 1.1 PCIe but for now it is * best to accept that both L0s and L1 will be disabled completely for * distributions shipping with CONFIG_PCIEASPM rather than having this * issue present. Motivation for adding this new API will be to help * with power consumption for some of these devices. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "can't enable device\n"); goto err; } /* XXX 32-bit addressing only */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "32-bit DMA not available\n"); goto err_dis; } /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); if (csz == 0) { /* * Linux 2.4.18 (at least) writes the cache line size * register as a 16-bit wide register which is wrong. * We must have this setup properly for rx buffer * DMA to work so force a reasonable value here if it * comes up zero. */ csz = L1_CACHE_BYTES >> 2; pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); }
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); struct iwl_bus *bus; struct iwl_pci_bus *pci_bus; u16 pci_cmd; int err; bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL); if (!bus) { dev_printk(KERN_ERR, &pdev->dev, "Couldn't allocate iwl_pci_bus"); err = -ENOMEM; goto out_no_pci; } pci_bus = IWL_BUS_GET_PCI_BUS(bus); pci_bus->pci_dev = pdev; /* W/A - seems to solve weird behavior. We need to remove this if we * don't want to stay in L1 all the time. This wastes a lot of power */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { dev_printk(KERN_ERR, bus->dev, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed"); goto out_pci_disable_device; } pci_bus->hw_base = pci_iomap(pdev, 0, 0); if (!pci_bus->hw_base) { dev_printk(KERN_ERR, bus->dev, "pci_iomap failed"); err = -ENODEV; goto out_pci_release_regions; } dev_printk(KERN_INFO, &pdev->dev, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); dev_printk(KERN_INFO, &pdev->dev, "pci_resource_base = %p\n", pci_bus->hw_base); dev_printk(KERN_INFO, &pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); err = pci_enable_msi(pdev); if (err) { dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); goto out_iounmap; } /* TODO: Move this away, not needed if not MSI */ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; err = iwl_probe(bus, cfg); if (err) goto out_disable_msi; return 0; out_disable_msi: pci_disable_msi(pdev); out_iounmap: pci_iounmap(pdev, pci_bus->hw_base); out_pci_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(bus); return err; }
static int xilly_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct xilly_endpoint *endpoint; int rc = 0; endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw); if (!endpoint) return -ENOMEM; pci_set_drvdata(pdev, endpoint); rc = pcim_enable_device(pdev); if (rc) { dev_err(endpoint->dev, "pcim_enable_device() failed. Aborting.\n"); return rc; } /* L0s has caused packet drops. No power saving, thank you. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(endpoint->dev, "Incorrect BAR configuration. Aborting.\n"); return -ENODEV; } rc = pcim_iomap_regions(pdev, 0x01, xillyname); if (rc) { dev_err(endpoint->dev, "pcim_iomap_regions() failed. Aborting.\n"); return rc; } endpoint->registers = pcim_iomap_table(pdev)[0]; pci_set_master(pdev); /* Set up a single MSI interrupt */ if (pci_enable_msi(pdev)) { dev_err(endpoint->dev, "Failed to enable MSI interrupts. Aborting.\n"); return -ENODEV; } rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, xillyname, endpoint); if (rc) { dev_err(endpoint->dev, "Failed to register MSI handler. Aborting.\n"); return -ENODEV; } /* * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1 * is the right thing. But some unclever PCIe drivers report it's OK * when the hardware drops those 64-bit PCIe packets. So trust * nobody and use 32 bits DMA addressing in any case. */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) endpoint->dma_using_dac = 0; else { dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); return -ENODEV; } rc = xillybus_endpoint_discovery(endpoint); if (!rc) return 0; xillybus_do_cleanup(&endpoint->cleanup, endpoint); return rc; }
static int xilly_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct xilly_endpoint *endpoint; int rc; endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw); if (!endpoint) return -ENOMEM; pci_set_drvdata(pdev, endpoint); rc = pcim_enable_device(pdev); if (rc) { dev_err(endpoint->dev, "pcim_enable_device() failed. Aborting.\n"); return rc; } /* L0s has caused packet drops. No power saving, thank you. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(endpoint->dev, "Incorrect BAR configuration. Aborting.\n"); return -ENODEV; } rc = pcim_iomap_regions(pdev, 0x01, xillyname); if (rc) { dev_err(endpoint->dev, "pcim_iomap_regions() failed. Aborting.\n"); return rc; } endpoint->registers = pcim_iomap_table(pdev)[0]; pci_set_master(pdev); /* Set up a single MSI interrupt */ if (pci_enable_msi(pdev)) { dev_err(endpoint->dev, "Failed to enable MSI interrupts. Aborting.\n"); return -ENODEV; } rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0, xillyname, endpoint); if (rc) { dev_err(endpoint->dev, "Failed to register MSI handler. Aborting.\n"); return -ENODEV; } /* * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets, * even when the PCIe driver claims that a 64-bit mask is OK. On the * other hand, on some architectures, 64-bit addressing is mandatory. * So go for the 64-bit mask only when failing is the other option. */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { endpoint->dma_using_dac = 0; } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { endpoint->dma_using_dac = 1; } else { dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); return -ENODEV; } return xillybus_endpoint_discovery(endpoint); }