static int aq_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct aq_nic_s *self; int err; struct net_device *ndev; resource_size_t mmio_pa; u32 bar; u32 numvecs; err = pci_enable_device(pdev); if (err) return err; err = aq_pci_func_init(pdev); if (err) goto err_pci_func; ndev = aq_ndev_alloc(); if (!ndev) { err = -ENOMEM; goto err_ndev; } self = netdev_priv(ndev); self->pdev = pdev; SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, self); err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops, &aq_nic_get_cfg(self)->aq_hw_caps); if (err) goto err_ioremap; self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); if (!self->aq_hw) { err = -ENOMEM; goto err_ioremap; } self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); for (bar = 0; bar < 4; ++bar) { if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) { resource_size_t reg_sz; mmio_pa = pci_resource_start(pdev, bar); if (mmio_pa == 0U) { err = -EIO; goto err_free_aq_hw; } reg_sz = pci_resource_len(pdev, bar); if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { err = -EIO; goto err_free_aq_hw; } self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); if (!self->aq_hw->mmio) { err = -EIO; goto err_free_aq_hw; } break; } } if (bar == 4) { err = -EIO; goto err_free_aq_hw; } numvecs = min((u8)AQ_CFG_VECS_DEF, aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs); numvecs = min(numvecs, num_online_cpus()); /*enable interrupts */ #if !AQ_CFG_FORCE_LEGACY_INT err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); if (err < 0) goto err_hwinit; numvecs = err; #endif self->irqvecs = numvecs; /* net device init */ aq_nic_cfg_start(self); aq_nic_ndev_init(self); err = aq_nic_ndev_register(self); if (err < 0) goto err_register; return 0; err_register: aq_nic_free_vectors(self); aq_pci_free_irq_vectors(self); err_hwinit: iounmap(self->aq_hw->mmio); err_free_aq_hw: kfree(self->aq_hw); err_ioremap: free_netdev(ndev); err_ndev: pci_release_regions(pdev); err_pci_func: pci_disable_device(pdev); return err; }
static int pci_endpoint_test_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int i; int err; int irq = 0; int id; char name[20]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; struct pci_endpoint_test *test; struct pci_endpoint_test_data *data; enum pci_barno test_reg_bar = BAR_0; struct miscdevice *misc_device; if (pci_is_bridge(pdev)) return -ENODEV; test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); if (!test) return -ENOMEM; test->test_reg_bar = 0; test->alignment = 0; test->pdev = pdev; data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; test->alignment = data->alignment; no_msi = data->no_msi; } init_completion(&test->irq_raised); mutex_init(&test->mutex); err = pci_enable_device(pdev); if (err) { dev_err(dev, "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(dev, "Cannot obtain PCI resources\n"); goto err_disable_pdev; } pci_set_master(pdev); if (!no_msi) { irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); if (irq < 0) dev_err(dev, "failed to get MSI interrupts\n"); } err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler, IRQF_SHARED, DRV_MODULE_NAME, test); if (err) { dev_err(dev, "failed to request IRQ %d\n", pdev->irq); goto err_disable_msi; } for (i = 1; i < irq; i++) { err = devm_request_irq(dev, pdev->irq + i, pci_endpoint_test_irqhandler, IRQF_SHARED, DRV_MODULE_NAME, test); if (err) dev_err(dev, "failed to request IRQ %d for MSI %d\n", pdev->irq + i, i + 1); } for (bar = BAR_0; bar <= BAR_5; bar++) { base = pci_ioremap_bar(pdev, bar); if (!base) { dev_err(dev, "failed to read BAR%d\n", bar); WARN_ON(bar == test_reg_bar); } test->bar[bar] = base; } test->base = test->bar[test_reg_bar]; if (!test->base) { dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; } pci_set_drvdata(pdev, test); id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); if (id < 0) { dev_err(dev, "unable to get id\n"); goto err_iounmap; } snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); misc_device = &test->miscdev; misc_device->minor = MISC_DYNAMIC_MINOR; misc_device->name = name; misc_device->fops = &pci_endpoint_test_fops, err = misc_register(misc_device); if (err) { dev_err(dev, "failed to register device\n"); goto err_ida_remove; } return 0; err_ida_remove: ida_simple_remove(&pci_endpoint_test_ida, id); err_iounmap: for (bar = BAR_0; bar <= BAR_5; bar++) { if (test->bar[bar]) pci_iounmap(pdev, test->bar[bar]); } err_disable_msi: pci_disable_msi(pdev); pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); return err; }
static int igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) { int err = 0; #ifndef HAVE_ALLOC_IRQ_VECTORS struct msix_entry msix_entry; #endif switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ #ifndef HAVE_ALLOC_IRQ_VECTORS msix_entry.entry = 0; if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) { dev_dbg(&udev->pdev->dev, "using MSI-X"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } #else if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) { dev_dbg(&udev->pdev->dev, "using MSI-X"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = pci_irq_vector(udev->pdev, 0); udev->mode = RTE_INTR_MODE_MSIX; break; } #endif /* fall back to MSI */ case RTE_INTR_MODE_MSI: #ifndef HAVE_ALLOC_IRQ_VECTORS if (pci_enable_msi(udev->pdev) == 0) { dev_dbg(&udev->pdev->dev, "using MSI"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = udev->pdev->irq; udev->mode = RTE_INTR_MODE_MSI; break; } #else if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) { dev_dbg(&udev->pdev->dev, "using MSI"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = pci_irq_vector(udev->pdev, 0); udev->mode = RTE_INTR_MODE_MSI; break; } #endif /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(udev->pdev)) { dev_dbg(&udev->pdev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD; udev->info.irq = udev->pdev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = UIO_IRQ_NONE; break; default: dev_err(&udev->pdev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); udev->info.irq = UIO_IRQ_NONE; err = -EINVAL; } if (udev->info.irq != UIO_IRQ_NONE) err = request_irq(udev->info.irq, igbuio_pci_irqhandler, udev->info.irq_flags, udev->info.name, udev); dev_info(&udev->pdev->dev, "uio device registered with irq %lx\n", udev->info.irq); return err; }