static void nitrox_disable_msix(struct nitrox_device *ndev) { struct msix_entry *msix_ent = ndev->msix.entries; char **names = ndev->msix.names; int i = 0, ring, nr_ring_vectors; nr_ring_vectors = ndev->msix.nr_entries - 1; /* clear pkt ring irqs */ while (i < nr_ring_vectors) { if (test_and_clear_bit(i, ndev->msix.irqs)) { ring = (i / NR_RING_VECTORS); irq_set_affinity_hint(msix_ent[i].vector, NULL); free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]); } i += NR_RING_VECTORS; } irq_set_affinity_hint(msix_ent[i].vector, NULL); free_irq(msix_ent[i].vector, ndev); clear_bit(i, ndev->msix.irqs); kfree(ndev->msix.entries); for (i = 0; i < ndev->msix.nr_entries; i++) kfree(*(names + i)); kfree(names); pci_disable_msix(ndev->pdev); }
/* Ensure a device is "turned off" and ready to be exported. * (Also see xen_pcibk_config_reset to ensure virtual configuration space is * ready to be re-exported) */ void xen_pcibk_reset_device(struct pci_dev *dev) { u16 cmd; xen_pcibk_control_isr(dev, 1 /* reset device */); /* Disable devices (but not bridges) */ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { #ifdef CONFIG_PCI_MSI /* The guest could have been abruptly killed without * disabling MSI/MSI-X interrupts.*/ if (dev->msix_enabled) pci_disable_msix(dev); if (dev->msi_enabled) pci_disable_msi(dev); #endif if (pci_is_enabled(dev)) pci_disable_device(dev); pci_write_config_word(dev, PCI_COMMAND, 0); dev->is_busmaster = 0; } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_INVALIDATE)) { cmd &= ~(PCI_COMMAND_INVALIDATE); pci_write_config_word(dev, PCI_COMMAND, cmd); dev->is_busmaster = 0; } } }
/* * Same as qib_nosmi, but for MSIx. */ void qib_nomsix(struct qib_devdata *dd) { #ifdef CONFIG_PCI_MSI qib_cdbg(INIT, "disable device MSIx interrupts\n"); pci_disable_msix(dd->pcidev); #endif }
static void mlx5_disable_msix(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; pci_disable_msix(dev->pdev); kfree(table->msix_arr); }
static int be_register_isr(struct be_adapter *adapter, struct be_net_object *pnob) { struct net_device *netdev = pnob->netdev; int intx = 0, r; netdev->irq = adapter->pdev->irq; r = be_enable_msix(adapter); if (r == 0) { r = request_irq(adapter->msix_entries[0].vector, be_int, IRQF_SHARED, netdev->name, netdev); if (r) { printk(KERN_WARNING "MSIX Request IRQ failed - Errno %d\n", r); intx = 1; pci_disable_msix(adapter->pdev); adapter->msix_enabled = 0; } } else { intx = 1; } if (intx) { r = request_irq(netdev->irq, be_int, IRQF_SHARED, netdev->name, netdev); if (r) { printk(KERN_WARNING "INTx Request IRQ failed - Errno %d\n", r); return -1; } } adapter->isr_registered = 1; return 0; }
static int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", pci_name(dev)); if (dev->msix_enabled) { struct xen_pcibk_dev_data *dev_data; pci_disable_msix(dev); dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 1; } /* * SR-IOV devices (which don't have any legacy IRQ) have * an undefined IRQ value of zero. */ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), op->value); return 0; }
static void igbuio_pci_remove(struct pci_dev *dev) { struct rte_uio_pci_dev *udev = pci_get_drvdata(dev); struct net_device *netdev; struct net_adapter *adapter; /* unregister device from netdev */ netdev = udev->adapter->netdev; adapter = netdev_priv(netdev); if (udev->adapter->netdev_registered) { unregister_netdev(netdev); udev->adapter->netdev_registered = false; } switch (udev->adapter->type) { case IXGBE: iounmap(udev->adapter->hw._ixgbe_hw.hw_addr); break; case IGB: iounmap(udev->adapter->hw._ixgbe_hw.hw_addr); break; } free_netdev(netdev); sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); uio_unregister_device(&udev->info); igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(dev); pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(udev); }
static void ndev_deinit_isr(struct amd_ntb_dev *ndev) { struct pci_dev *pdev; void __iomem *mmio = ndev->self_mmio; int i; pdev = ndev->ntb.pdev; /* Mask all doorbell interrupts */ ndev->db_mask = ndev->db_valid_mask; writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET); if (ndev->msix) { i = ndev->msix_vec_count; while (i--) free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); kfree(ndev->msix); kfree(ndev->vec); } else { free_irq(pdev->irq, ndev); if (pci_dev_msi_enabled(pdev)) pci_disable_msi(pdev); else pci_intx(pdev, 0); } }
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) { struct pci_dev *pdev = vdev->pdev; int ret; if (!is_irq_none(vdev)) return -EINVAL; vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; if (msix) { int i; vdev->msix = kzalloc(nvec * sizeof(struct msix_entry), GFP_KERNEL); if (!vdev->msix) { kfree(vdev->ctx); return -ENOMEM; } for (i = 0; i < nvec; i++) vdev->msix[i].entry = i; ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec); if (ret < nvec) { if (ret > 0) pci_disable_msix(pdev); kfree(vdev->msix); kfree(vdev->ctx); return ret; } } else { ret = pci_enable_msi_range(pdev, 1, nvec); if (ret < nvec) { if (ret > 0) pci_disable_msi(pdev); kfree(vdev->ctx); return ret; } } vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; if (!msix) { /* * Compute the virtual hardware field for max msi vectors - * it is the log base 2 of the number of vectors. */ vdev->msi_qmax = fls(nvec * 2 - 1) - 1; } return 0; }
int pciback_disable_msix(struct pciback_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { pci_disable_msix(dev); op->value = dev->irq; return 0; }
/* * Set up MSI-X */ static int xhci_setup_msix(struct xhci_hcd *xhci) { int i, ret = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* * calculate number of msi-x vectors supported. * - HCS_MAX_INTRS: the max number of interrupts the host can handle, * with max number of interrupters based on the xhci HCSPARAMS1. * - num_online_cpus: maximum msi-x vectors per CPUs core. * Add additional 1 vector to ensure always available interrupt. */ xhci->msix_count = min(num_online_cpus() + 1, HCS_MAX_INTRS(xhci->hcs_params1)); xhci->msix_entries = kmalloc((sizeof(struct msix_entry))*xhci->msix_count, GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } for (i = 0; i < xhci->msix_count; i++) { xhci->msix_entries[i].entry = i; xhci->msix_entries[i].vector = 0; } ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Failed to enable MSI-X"); goto free_entries; } for (i = 0; i < xhci->msix_count; i++) { ret = request_irq(xhci->msix_entries[i].vector, xhci_msi_irq, 0, "xhci_hcd", xhci_to_hcd(xhci)); if (ret) goto disable_msix; } hcd->msix_enabled = 1; return ret; disable_msix: xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); xhci_free_irq(xhci); pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); xhci->msix_entries = NULL; return ret; }
static int isci_setup_interrupts(struct pci_dev *pdev) { int err, i, num_msix; struct isci_host *ihost; struct isci_pci_info *pci_info = to_pci_info(pdev); /* * Determine the number of vectors associated with this * PCI function. */ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; for (i = 0; i < num_msix; i++) pci_info->msix_entries[i].entry = i; err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix); if (err) goto intx; for (i = 0; i < num_msix; i++) { int id = i / SCI_NUM_MSI_X_INT; struct msix_entry *msix = &pci_info->msix_entries[i]; irq_handler_t isr; ihost = pci_info->hosts[id]; /* odd numbered vectors are error interrupts */ if (i & 1) isr = isci_error_isr; else isr = isci_msix_isr; err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, DRV_NAME"-msix", ihost); if (!err) continue; dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); while (i--) { id = i / SCI_NUM_MSI_X_INT; ihost = pci_info->hosts[id]; msix = &pci_info->msix_entries[i]; devm_free_irq(&pdev->dev, msix->vector, ihost); } pci_disable_msix(pdev); goto intx; } return 0; intx: for_each_isci_host(i, ihost, pdev) { err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", ihost); if (err) break; }
static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; pci_disable_msi(adapter->pdev); } }
static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (!xhci->msix_entries) return; free_irq(xhci->msix_entries[0].vector, xhci); pci_disable_msix(pdev); kfree(xhci->msix_entries); xhci->msix_entries = NULL; xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); }
static void mic_remove(struct pci_dev *pdev) { int32_t brdnum; bd_info_t *bd_info; if (mic_data.dd_numdevs - 1 < 0) return; mic_data.dd_numdevs--; brdnum = mic_data.dd_numdevs; /* Make sure boards are shutdown and not available. */ bd_info = mic_data.dd_bi[brdnum]; spin_lock_bh(&bd_info->bi_ctx.sysfs_lock); sysfs_put(bd_info->bi_ctx.sysfs_state); bd_info->bi_ctx.sysfs_state = NULL; spin_unlock_bh(&bd_info->bi_ctx.sysfs_lock); if (bd_info->bi_ctx.bi_psmi.enabled) { device_remove_bin_file(bd_info->bi_sysfsdev, &mic_psmi_ptes_attr); sysfs_remove_group(&bd_info->bi_sysfsdev->kobj, &psmi_attr_group); } sysfs_remove_group(&bd_info->bi_sysfsdev->kobj, &bd_attr_group); free_sysfs_entries(&bd_info->bi_ctx); device_destroy(mic_lindata.dd_class, mic_lindata.dd_dev + 2 + bd_info->bi_ctx.bi_id); adapter_stop_device(&bd_info->bi_ctx, 1, 0); /* * Need to wait for reset since accessing the card while GDDR training * is ongoing by adapter_remove(..) below for example can be fatal. */ wait_for_reset(&bd_info->bi_ctx); mic_disable_interrupts(&bd_info->bi_ctx); if (!bd_info->bi_ctx.msie) { free_irq(bd_info->bi_ctx.bi_pdev->irq, &bd_info->bi_ctx); #ifdef CONFIG_PCI_MSI } else { free_irq(bd_info->bi_msix_entries[0].vector, &bd_info->bi_ctx); pci_disable_msix(bd_info->bi_ctx.bi_pdev); #endif } adapter_remove(&bd_info->bi_ctx); release_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len); release_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len); pci_disable_device(bd_info->bi_ctx.bi_pdev); kfree(bd_info); }
static void qla24xx_disable_msix(scsi_qla_host_t *ha) { int i; struct qla_msix_entry *qentry; for (i = 0; i < QLA_MSIX_ENTRIES; i++) { qentry = &ha->msix_entries[imsix_entries[i].index]; if (qentry->have_irq) free_irq(qentry->msix_vector, ha); } pci_disable_msix(ha->pdev); }
static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) { if (adapter->irq) { free_irq(adapter->irq, adapter); adapter->irq = 0; } if (adapter->use_msi) { pci_disable_msi(adapter->dev); adapter->use_msi = 0; } else if (adapter->use_msix) { pci_disable_msix(adapter->dev); adapter->use_msix = 0; } }
static void be_unregister_isr(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdevp; if (adapter->isr_registered) { if (adapter->msix_enabled) { free_irq(adapter->msix_entries[0].vector, netdev); pci_disable_msix(adapter->pdev); adapter->msix_enabled = 0; } else { free_irq(netdev->irq, netdev); } adapter->isr_registered = 0; } }
static void igbuio_pci_remove(struct pci_dev *dev) { struct rte_uio_pci_dev *udev = pci_get_drvdata(dev); sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); uio_unregister_device(&udev->info); igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(dev); pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(udev); }
static void enic_clear_intr_mode(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(enic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(enic->pdev); break; default: break; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); }
void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
void bfad_remove_intr(struct bfad_s *bfad) { int i; if (bfad->bfad_flags & BFAD_MSIX_ON) { for (i = 0; i < bfad->nvec; i++) free_irq(bfad->msix_tab[i].msix.vector, &bfad->msix_tab[i]); pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; } else { free_irq(bfad->pcidev->irq, bfad); } }
static void igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev) { if (udev->info.irq) { free_irq(udev->info.irq, udev); udev->info.irq = 0; } #ifndef HAVE_ALLOC_IRQ_VECTORS if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); if (udev->mode == RTE_INTR_MODE_MSI) pci_disable_msi(udev->pdev); #else if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) pci_free_irq_vectors(udev->pdev); #endif }
static void ivshmem_pci_remove(struct pci_dev *dev) { struct ivshmem_info *ivshmem_info = pci_get_drvdata(dev); struct uio_info *info = ivshmem_info->uio; pci_set_drvdata(dev, NULL); uio_unregister_device(info); if (ivshmem_info->nvectors) { free_msix_vectors(ivshmem_info, ivshmem_info->nvectors); pci_disable_msix(dev); kfree(ivshmem_info->msix_entries); kfree(ivshmem_info->msix_names); } iounmap(info->mem[0].internal_addr); pci_release_regions(dev); pci_disable_device(dev); kfree(info); kfree(ivshmem_info); }
static void mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); int p; if (dev) { mlx4_stop_sense(dev); mlx4_unregister_device(dev); for (p = 1; p <= dev->caps.num_ports; p++) { mlx4_cleanup_port_info(&priv->port[p]); mlx4_CLOSE_PORT(dev, p); } mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); mlx4_free_eq_table(dev); mlx4_close_hca(dev); mlx4_cmd_cleanup(dev); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); kfree(priv); pci_release_region(pdev, 2); pci_release_region(pdev, 0); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } }
/* Free any IRQs and disable MSI-X */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); if (xhci->quirks & XHCI_PLAT) return; xhci_free_irq(xhci); if (xhci->msix_entries) { pci_disable_msix(pdev); kfree(xhci->msix_entries); xhci->msix_entries = NULL; } else { pci_disable_msi(pdev); } hcd->msix_enabled = 0; return; }
static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); for (i = 0; i < vdev->num_ctx; i++) { virqfd_disable(vdev, &vdev->ctx[i].unmask); virqfd_disable(vdev, &vdev->ctx[i].mask); } if (msix) { pci_disable_msix(vdev->pdev); kfree(vdev->msix); } else pci_disable_msi(pdev); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); }
static int xhci_setup_msix(struct xhci_hcd *xhci) { int ret; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); xhci->msix_count = 0; xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } xhci->msix_entries[0].entry = 0; ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { xhci_err(xhci, "Failed to enable MSI-X\n"); goto free_entries; } ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, "xHCI", xhci_to_hcd(xhci)); if (ret) { xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); goto disable_msix; } xhci_dbg(xhci, "Finished setting up MSI-X\n"); return 0; disable_msix: pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); xhci->msix_entries = NULL; return ret; }
static int avp_pci_release_interrupts(struct pci_dev *dev, struct wrs_avp_pci_dev *avp_pci_dev) { void *registers = avp_pci_dev->addresses[WRS_AVP_PCI_MMIO_BAR]; struct msix_entry *entry; unsigned i; if (registers) { /* disable device interrupts */ iowrite32(WRS_AVP_NO_INTERRUPTS_MASK, registers + WRS_AVP_INTERRUPT_MASK_OFFSET); } if (avp_pci_dev->msix_vectors > 0) { for (i = 0; i < avp_pci_dev->msix_vectors; i++) { /* release all IRQ entries */ entry = &avp_pci_dev->msix_entries[i]; free_irq(entry->vector, avp_pci_dev); AVP_DBG("MSI-X vector %u IRQ %u disabled\n", entry->entry, entry->vector); } /* disable MSI-X processing */ pci_disable_msix(dev); /* free resources */ kfree(avp_pci_dev->msix_entries); kfree(avp_pci_dev->msix_names); AVP_DBG("MSI-X disabled %d vector(s)\n", avp_pci_dev->msix_vectors); } else { /* release IRQ based device interrupt */ free_irq(dev->irq, avp_pci_dev); AVP_DBG("IRQ interrupt %u disabled\n", dev->irq); } return 0; }
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx4_priv *priv; struct mlx4_dev *dev; int err; int port; printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " "aborting.\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) != 1 << 20) { dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); goto err_disable_pdev; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " "consistent PCI DMA mask.\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); goto err_release_regions; } } priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_release_regions; } dev = &priv->dev; dev->pdev = pdev; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&priv->port_mutex); INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting.\n"); goto err_free_dev; } if (mlx4_cmd_init(dev)) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } err = mlx4_init_hca(dev); if (err) goto err_cmd; err = mlx4_alloc_eq_table(dev); if (err) goto err_close; mlx4_enable_msi_x(dev); err = mlx4_setup_hca(dev); if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { dev->flags &= ~MLX4_FLAG_MSI_X; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } if (err) goto err_free_eq; for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); if (err) goto err_port; } err = mlx4_register_device(dev); if (err) goto err_port; mlx4_sense_init(dev); mlx4_start_sense(dev); pci_set_drvdata(pdev, dev); return 0; err_port: for (port = 1; port <= dev->caps.num_ports; port++) mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); err_free_eq: mlx4_free_eq_table(dev); err_close: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); mlx4_close_hca(dev); err_cmd: mlx4_cmd_cleanup(dev); err_free_dev: kfree(priv); err_release_regions: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; }