/* Bus ops */ static int wil_if_pcie_enable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; int rc; pci_set_master(pdev); /* * how many MSI interrupts to request? */ switch (use_msi) { case 3: case 1: case 0: break; default: wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi); use_msi = 1; } wil->n_msi = use_msi; if (wil->n_msi) { wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); rc = pci_enable_msi_block(pdev, wil->n_msi); if (rc && (wil->n_msi == 3)) { wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); wil->n_msi = 1; rc = pci_enable_msi_block(pdev, wil->n_msi); } if (rc) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); wil->n_msi = 0; } } else { wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); } rc = wil6210_init_irq(wil, pdev->irq); if (rc) goto stop_master; /* need reset here to obtain MAC */ rc = wil_reset(wil); if (debug_fw) rc = 0; if (rc) goto release_irq; return 0; release_irq: wil6210_fini_irq(wil, pdev->irq); /* safe to call if no MSI */ pci_disable_msi(pdev); stop_master: pci_clear_master(pdev); return rc; }
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) { struct pci_dev *pdev = vdev->pdev; int ret; if (!is_irq_none(vdev)) return -EINVAL; vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; if (msix) { int i; vdev->msix = kzalloc(nvec * sizeof(struct msix_entry), GFP_KERNEL); if (!vdev->msix) { kfree(vdev->ctx); return -ENOMEM; } for (i = 0; i < nvec; i++) vdev->msix[i].entry = i; ret = pci_enable_msix(pdev, vdev->msix, nvec); if (ret) { kfree(vdev->msix); kfree(vdev->ctx); return ret; } } else { ret = pci_enable_msi_block(pdev, nvec); if (ret) { kfree(vdev->ctx); return ret; } } vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; if (!msix) { /* * Compute the virtual hardware field for max msi vectors - * it is the log base 2 of the number of vectors. */ vdev->msi_qmax = fls(nvec * 2 - 1) - 1; } return 0; }
static int __devinit ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ddb *dev; int stat = 0; int irq_flag = IRQF_SHARED; if (pci_enable_device(pdev) < 0) return -ENODEV; dev = vzalloc(sizeof(struct ddb)); if (dev == NULL) return -ENOMEM; dev->has_dma = 1; dev->pdev = pdev; dev->dev = &pdev->dev; pci_set_drvdata(pdev, dev); dev->ids.vendor = id->vendor; dev->ids.device = id->device; dev->ids.subvendor = id->subvendor; dev->ids.subdevice = id->subdevice; dev->info = (struct ddb_info *) id->driver_data; pr_info("DDBridge driver detected: %s\n", dev->info->name); dev->regs_len = pci_resource_len(dev->pdev, 0); dev->regs = ioremap(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); if (!dev->regs) { pr_err("DDBridge: not enough memory for register map\n"); stat = -ENOMEM; goto fail; } if (ddbreadl(dev, 0) == 0xffffffff) { pr_err("DDBridge: cannot read registers\n"); stat = -ENODEV; goto fail; } dev->ids.hwid = ddbreadl(dev, 0); dev->ids.regmapid = ddbreadl(dev, 4); pr_info("HW %08x REGMAP %08x\n", dev->ids.hwid, dev->ids.regmapid); ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE); ddbwritel(dev, 0x00000000, MSI1_ENABLE); ddbwritel(dev, 0x00000000, MSI2_ENABLE); ddbwritel(dev, 0x00000000, MSI3_ENABLE); ddbwritel(dev, 0x00000000, MSI4_ENABLE); ddbwritel(dev, 0x00000000, MSI5_ENABLE); ddbwritel(dev, 0x00000000, MSI6_ENABLE); ddbwritel(dev, 0x00000000, MSI7_ENABLE); #ifdef CONFIG_PCI_MSI if (msi && pci_msi_enabled()) { stat = pci_enable_msi_block(dev->pdev, 2); if (stat == 0) { dev->msi = 1; pr_info("DDBrige using 2 MSI interrupts\n"); } if (stat == 1) stat = pci_enable_msi(dev->pdev); if (stat < 0) { pr_info(": MSI not available.\n"); } else { irq_flag = 0; dev->msi++; } } if (dev->msi == 2) { stat = request_irq(dev->pdev->irq, irq_handler0, irq_flag, "ddbridge", (void *) dev); if (stat < 0) goto fail0; stat = request_irq(dev->pdev->irq + 1, irq_handler1, irq_flag, "ddbridge", (void *) dev); if (stat < 0) { free_irq(dev->pdev->irq, dev); goto fail0; } } else #endif { #ifdef DDB_TEST_THREADED stat = request_threaded_irq(dev->pdev->irq, irq_handler, irq_thread, irq_flag, "ddbridge", (void *) dev); #else stat = request_irq(dev->pdev->irq, irq_handler, irq_flag, "ddbridge", (void *) dev); #endif if (stat < 0) goto fail0; } ddbwritel(dev, 0, DMA_BASE_READ); if (dev->info->type != DDB_MOD) ddbwritel(dev, 0, DMA_BASE_WRITE); /*ddbwritel(dev, 0xffffffff, INTERRUPT_ACK);*/ if (dev->msi == 2) { ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE); ddbwritel(dev, 0x0000000f, MSI1_ENABLE); } else { ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE); ddbwritel(dev, 0x00000000, MSI1_ENABLE); } if (ddb_i2c_init(dev) < 0) goto fail1; ddb_ports_init(dev); if (ddb_buffers_alloc(dev) < 0) { pr_info(": Could not allocate buffer memory\n"); goto fail2; } if (ddb_ports_attach(dev) < 0) goto fail3; /* ignore if this fails */ ddb_device_create(dev); if (dev->info->fan_num) { ddbwritel(dev, 1, GPIO_DIRECTION); ddbwritel(dev, 1, GPIO_OUTPUT); } if (dev->info->type == DDB_MOD) ddbridge_mod_init(dev); return 0; fail3: ddb_ports_detach(dev); pr_err("fail3\n"); ddb_ports_release(dev); fail2: pr_err("fail2\n"); ddb_buffers_free(dev); ddb_i2c_release(dev); fail1: pr_err("fail1\n"); ddbwritel(dev, 0, INTERRUPT_ENABLE); ddbwritel(dev, 0, MSI1_ENABLE); free_irq(dev->pdev->irq, dev); if (dev->msi == 2) free_irq(dev->pdev->irq + 1, dev); fail0: pr_err("fail0\n"); if (dev->msi) pci_disable_msi(dev->pdev); fail: pr_err("fail\n"); ddb_unmap(dev); pci_set_drvdata(pdev, 0); pci_disable_device(pdev); return -1; }