static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, struct msix_entry *msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); if (tabsize > *msixcnt) tabsize = *msixcnt; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); if (ret > 0) { tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " "falling back to INTx\n", tabsize, ret); tabsize = 0; } *msixcnt = tabsize; if (ret) qib_enable_intx(dd->pcidev); }
int pciback_enable_msix(struct pciback_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { int i, result; struct msix_entry *entries; if (op->value > SH_INFO_MAX_VEC) return -EINVAL; entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; for (i = 0; i < op->value; i++) { entries[i].entry = op->msix_entries[i].entry; entries[i].vector = op->msix_entries[i].vector; } result = pci_enable_msix(dev, entries, op->value); for (i = 0; i < op->value; i++) { op->msix_entries[i].entry = entries[i].entry; op->msix_entries[i].vector = entries[i].vector; } kfree(entries); op->value = result; return result; }
static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry entries[MLX4_NUM_EQ]; int err; int i; if (msi_x) { for (i = 0; i < MLX4_NUM_EQ; ++i) entries[i].entry = i; err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); if (err) { if (err > 0) mlx4_info(dev, "Only %d MSI-X vectors available, " "not using MSI-X\n", err); goto no_msi; } for (i = 0; i < MLX4_NUM_EQ; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; return; } no_msi: for (i = 0; i < MLX4_NUM_EQ; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; }
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, struct qib_msix_entry *qib_msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; struct msix_entry *msix_entry; int i; /* We can't pass qib_msix_entry array to qib_msix_setup * so use a dummy msix_entry array and copy the allocated * irq back to the qib_msix_entry array. */ msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) { ret = -ENOMEM; goto do_intx; } for (i = 0; i < *msixcnt; i++) msix_entry[i] = qib_msix_entry[i].msix; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); if (tabsize > *msixcnt) tabsize = *msixcnt; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); if (ret > 0) { tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } do_intx: if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, falling back to INTx\n", tabsize, ret); tabsize = 0; } for (i = 0; i < tabsize; i++) qib_msix_entry[i].msix = msix_entry[i]; kfree(msix_entry); *msixcnt = tabsize; if (ret) qib_enable_intx(dd->pcidev); }
/** * Setup MSIX based interrupt. */ int bfad_setup_intr(struct bfad_s *bfad) { int error = 0; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); if (!msix_disable) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { /* * Only error number of vector is available. * We don't have a mechanism to map multiple * interrupts into one vector, so even if we * can try to request less vectors, we don't * know how to associate interrupt events to * vectors. Linux doesn't dupicate vectors * in the MSIX table for this case. */ printk(KERN_WARNING "bfad%d: " "pci_enable_msix failed (%d)," " use line based.\n", bfad->inst_no, error); goto line_based; } /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); bfad->msix_tab[i].msix.vector = msix_entries[i].vector; } bfa_msix_init(&bfad->bfa, bfad->nvec); bfad->bfad_flags |= BFAD_MSIX_ON; return error; } line_based: error = 0; if (request_irq (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad) != 0) { /* Enable interrupt handler failed */ return 1; } return error; }
static int isci_setup_interrupts(struct pci_dev *pdev) { int err, i, num_msix; struct isci_host *ihost; struct isci_pci_info *pci_info = to_pci_info(pdev); /* * Determine the number of vectors associated with this * PCI function. */ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT; for (i = 0; i < num_msix; i++) pci_info->msix_entries[i].entry = i; err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix); if (err) goto intx; for (i = 0; i < num_msix; i++) { int id = i / SCI_NUM_MSI_X_INT; struct msix_entry *msix = &pci_info->msix_entries[i]; irq_handler_t isr; ihost = pci_info->hosts[id]; /* odd numbered vectors are error interrupts */ if (i & 1) isr = isci_error_isr; else isr = isci_msix_isr; err = devm_request_irq(&pdev->dev, msix->vector, isr, 0, DRV_NAME"-msix", ihost); if (!err) continue; dev_info(&pdev->dev, "msix setup failed falling back to intx\n"); while (i--) { id = i / SCI_NUM_MSI_X_INT; ihost = pci_info->hosts[id]; msix = &pci_info->msix_entries[i]; devm_free_irq(&pdev->dev, msix->vector, ihost); } pci_disable_msix(pdev); goto intx; } return 0; intx: for_each_isci_host(i, ihost, pdev) { err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx", ihost); if (err) break; }
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) { struct pci_dev *pdev = vdev->pdev; int ret; if (!is_irq_none(vdev)) return -EINVAL; vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; if (msix) { int i; vdev->msix = kzalloc(nvec * sizeof(struct msix_entry), GFP_KERNEL); if (!vdev->msix) { kfree(vdev->ctx); return -ENOMEM; } for (i = 0; i < nvec; i++) vdev->msix[i].entry = i; ret = pci_enable_msix(pdev, vdev->msix, nvec); if (ret) { kfree(vdev->msix); kfree(vdev->ctx); return ret; } } else { ret = pci_enable_msi_block(pdev, nvec); if (ret) { kfree(vdev->ctx); return ret; } } vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; if (!msix) { /* * Compute the virtual hardware field for max msi vectors - * it is the log base 2 of the number of vectors. */ vdev->msi_qmax = fls(nvec * 2 - 1) - 1; } return 0; }
static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq) { struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; int ret; ret = pci_enable_msix(adapter->dev, &entry, 1); if (ret) return ret; *irq = entry.vector; return 0; }
static int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int i, result; struct msix_entry *entries; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", pci_name(dev)); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; for (i = 0; i < op->value; i++) { entries[i].entry = op->msix_entries[i].entry; entries[i].vector = op->msix_entries[i].vector; } result = pci_enable_msix(dev, entries, op->value); if (result == 0) { for (i = 0; i < op->value; i++) { op->msix_entries[i].entry = entries[i].entry; if (entries[i].vector) op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: " \ "MSI-X[%d]: %d\n", pci_name(dev), i, op->msix_entries[i].vector); } } else { printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", pci_name(dev), result); } kfree(entries); op->value = result; dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return result; }
static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int nreq; int err; int i; if (msi_x) { nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, num_possible_cpus() + 1); entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; for (i = 0; i < nreq; ++i) entries[i].entry = i; retry: err = pci_enable_msix(dev->pdev, entries, nreq); if (err) { if (err > 1) { mlx4_info(dev, "Requested %d vectors, " "but only %d MSI-X vectors available, " "trying again\n", nreq, err); nreq = err; goto retry; } kfree(entries); goto no_msi; } dev->caps.num_comp_vectors = nreq - 1; for (i = 0; i < nreq; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; kfree(entries); return; } no_msi: dev->caps.num_comp_vectors = 1; for (i = 0; i < 2; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; }
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, int vectors) { int err, vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler * 2) Other (Link Status Change, etc.) */ vector_threshold = MIN_MSIX_COUNT; /* * The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors); if (!err) /* Success in acquiring all requested vectors. */ break; else if (err < 0) vectors = 0; /* Nasty failure, quit now */ else /* err == number of vectors we should try again with */ vectors = err; } if (vectors < vector_threshold) { /* Can't allocate enough MSI-X interrupts? Oh well. * This just means we'll go with either a single MSI * vector or fall back to legacy interrupts. */ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, "Unable to allocate MSI-X interrupts\n"); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else { adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ /* * Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NON_Q_VECTORS, or the number of * vectors we were allocated. */ vectors -= NON_Q_VECTORS; adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); } }
int bfad_setup_intr(struct bfad_s *bfad) { int error = 0; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); if (!msix_disable) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { printk(KERN_WARNING "bfad%d: " "pci_enable_msix failed (%d)," " use line based.\n", bfad->inst_no, error); goto line_based; } for (i = 0; i < bfad->nvec; i++) { bfa_trc(bfad, msix_entries[i].vector); bfad->msix_tab[i].msix.vector = msix_entries[i].vector; } bfa_msix_init(&bfad->bfa, bfad->nvec); bfad->bfad_flags |= BFAD_MSIX_ON; return error; } line_based: error = 0; if (request_irq (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad) != 0) { return 1; } return error; }
static int be_enable_msix(struct be_adapter *adapter) { int i, ret; if (!msix) return -1; for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++) adapter->msix_entries[i].entry = i; ret = pci_enable_msix(adapter->pdev, adapter->msix_entries, BE_MAX_REQ_MSIX_VECTORS); if (ret == 0) adapter->msix_enabled = 1; return ret; }
static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t msix_num_entries = hw_data->num_banks + 1; int i; for (i = 0; i < msix_num_entries; i++) pci_dev_info->msix_entries.entries[i].entry = i; if (pci_enable_msix(pci_dev_info->pci_dev, pci_dev_info->msix_entries.entries, msix_num_entries)) { pr_err("QAT: Failed to enable MSIX IRQ\n"); return -EFAULT; } return 0; }
static int qla24xx_enable_msix(scsi_qla_host_t *ha) { int i, ret; struct msix_entry entries[QLA_MSIX_ENTRIES]; struct qla_msix_entry *qentry; for (i = 0; i < QLA_MSIX_ENTRIES; i++) entries[i].entry = imsix_entries[i].entry; ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); if (ret) { qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable support -- %d/%d\n", QLA_MSIX_ENTRIES, ret); goto msix_out; } ha->flags.msix_enabled = 1; for (i = 0; i < QLA_MSIX_ENTRIES; i++) { qentry = &ha->msix_entries[imsix_entries[i].index]; qentry->msix_vector = entries[i].vector; qentry->msix_entry = entries[i].entry; qentry->have_irq = 0; ret = request_irq(qentry->msix_vector, imsix_entries[i].handler, 0, imsix_entries[i].name, ha); if (ret) { qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", imsix_entries[i].index, ret); qla24xx_disable_msix(ha); goto msix_out; } qentry->have_irq = 1; } msix_out: return ret; }
static int xhci_setup_msix(struct xhci_hcd *xhci) { int ret; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); xhci->msix_count = 0; xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); if (!xhci->msix_entries) { xhci_err(xhci, "Failed to allocate MSI-X entries\n"); return -ENOMEM; } xhci->msix_entries[0].entry = 0; ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); if (ret) { xhci_err(xhci, "Failed to enable MSI-X\n"); goto free_entries; } ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, "xHCI", xhci_to_hcd(xhci)); if (ret) { xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); goto disable_msix; } xhci_dbg(xhci, "Finished setting up MSI-X\n"); return 0; disable_msix: pci_disable_msix(pdev); free_entries: kfree(xhci->msix_entries); xhci->msix_entries = NULL; return ret; }
int fnic_set_intr_mode(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); for (i = 0; i < n + m + o + 1; i++) fnic->msix_entry[i].entry = i; if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, n + m + o + 1)) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; fnic->intr_count = n + m + o + 1; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI-X Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* * Next try MSI * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && !pci_enable_msi(fnic->pdev)) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->wq_count = 2; fnic->cq_count = 3; fnic->intr_count = 1; fnic->err_intr_offset = 0; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* * Next try INTx * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs * 1 INTR is used for all 3 queues, 1 INTR for queue errors * 1 INTR for notification area */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 3) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->cq_count = 3; fnic->intr_count = 3; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; }
static int enic_set_intr_mode(struct enic *enic) { unsigned int n = ARRAY_SIZE(enic->rq); unsigned int m = ARRAY_SIZE(enic->wq); unsigned int i; /* Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs * (the second to last INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); for (i = 0; i < n + m + 2; i++) enic->msix_entry[i].entry = i; if (enic->config.intr_mode < 1 && enic->rq_count >= n && enic->wq_count >= m && enic->cq_count >= n + m && enic->intr_count >= n + m + 2 && !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { enic->rq_count = n; enic->wq_count = m; enic->cq_count = n + m; enic->intr_count = n + m + 2; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } /* Next try MSI * * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR */ if (enic->config.intr_mode < 2 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 1 && !pci_enable_msi(enic->pdev)) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* Next try INTx * * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs * (the first INTR is used for WQ/RQ) * (the second INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ if (enic->config.intr_mode < 3 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 3) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; }
/** * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port * @dev: PCI Express port to handle * @vectors: Array of interrupt vectors to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: 0 on success, error code on failure */ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) { struct msix_entry *msix_entries; int idx[PCIE_PORT_DEVICE_MAXSERVICES]; int nr_entries, status, pos, i, nvec; u16 reg16; u32 reg32; nr_entries = pci_msix_table_size(dev); if (!nr_entries) return -EINVAL; if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL); if (!msix_entries) return -ENOMEM; /* * Allocate as many entries as the port wants, so that we can check * which of them will be useful. Moreover, if nr_entries is correctly * equal to the number of entries this port actually uses, we'll happily * go through without any tricks. */ for (i = 0; i < nr_entries; i++) msix_entries[i].entry = i; status = pci_enable_msix(dev, msix_entries, nr_entries); if (status) goto Exit; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) idx[i] = -1; status = -EIO; nvec = 0; if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { int entry; /* * The code below follows the PCI Express Base Specification 2.0 * stating in Section 6.1.6 that "PME and Hot-Plug Event * interrupts (when both are implemented) always share the same * MSI or MSI-X vector, as indicated by the Interrupt Message * Number field in the PCI Express Capabilities register", where * according to Section 7.8.2 of the specification "For MSI-X, * the value in this field indicates which MSI-X Table entry is * used to generate the interrupt message." */ pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; if (entry >= nr_entries) goto Error; i = pcie_port_msix_add_entry(msix_entries, entry, nvec); if (i == nvec) nvec++; idx[PCIE_PORT_SERVICE_PME_SHIFT] = i; idx[PCIE_PORT_SERVICE_HP_SHIFT] = i; }
static int enic_set_intr_mode(struct enic *enic) { unsigned int n = 1; unsigned int m = 1; unsigned int i; BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); for (i = 0; i < n + m + 2; i++) enic->msix_entry[i].entry = i; if (enic->config.intr_mode < 1 && enic->rq_count >= n && enic->wq_count >= m && enic->cq_count >= n + m && enic->intr_count >= n + m + 2 && !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { enic->rq_count = n; enic->wq_count = m; enic->cq_count = n + m; enic->intr_count = n + m + 2; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } if (enic->config.intr_mode < 2 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 1 && !pci_enable_msi(enic->pdev)) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } if (enic->config.intr_mode < 3 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 3) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; }
static int __devinit #else static int #endif igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; struct msix_entry msix_entry; int err; /* essential vars for configuring the device with net_device */ struct net_device *netdev; struct net_adapter *adapter = NULL; struct ixgbe_hw *hw_i = NULL; struct e1000_hw *hw_e = NULL; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) return -ENOMEM; /* * enable device: ask low-level code to enable I/O and * memory */ err = pci_enable_device(dev); if (err != 0) { dev_err(&dev->dev, "Cannot enable PCI device\n"); goto fail_free; } /* * reserve device's PCI memory regions for use by this * module */ err = pci_request_regions(dev, "igb_uio"); if (err != 0) { dev_err(&dev->dev, "Cannot request regions\n"); goto fail_disable; } /* enable bus mastering on the device */ pci_set_master(dev); /* remap IO memory */ err = igbuio_setup_bars(dev, &udev->info); if (err != 0) goto fail_release_iomem; /* set 64-bit DMA mask */ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set DMA mask\n"); goto fail_release_iomem; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); goto fail_release_iomem; } /* fill uio infos */ udev->info.name = "igb_uio"; udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) udev->info.mmap = igbuio_dom0_pci_mmap; #endif udev->info.priv = udev; udev->pdev = dev; switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { dev_dbg(&dev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED; udev->info.irq = dev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&dev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = 0; break; default: dev_err(&dev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); err = -EINVAL; goto fail_release_iomem; } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); if (err != 0) goto fail_release_iomem; /* initialize the corresponding netdev */ netdev = alloc_etherdev(sizeof(struct net_adapter)); if (!netdev) { err = -ENOMEM; goto fail_alloc_etherdev; } SET_NETDEV_DEV(netdev, pci_dev_to_dev(dev)); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = dev; udev->adapter = adapter; adapter->type = retrieve_dev_specs(id); /* recover device-specific mac address */ switch (adapter->type) { case IXGBE: hw_i = &adapter->hw._ixgbe_hw; hw_i->back = adapter; hw_i->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_i->hw_addr) { err = -EIO; goto fail_ioremap; } break; case IGB: hw_e = &adapter->hw._e1000_hw; hw_e->back = adapter; hw_e->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_e->hw_addr) { err = -EIO; goto fail_ioremap; } break; } netdev_assign_netdev_ops(netdev); strncpy(netdev->name, pci_name(dev), sizeof(netdev->name) - 1); retrieve_dev_addr(netdev, adapter); strcpy(netdev->name, "dpdk%d"); err = register_netdev(netdev); if (err) goto fail_ioremap; adapter->netdev_registered = true; if (sscanf(netdev->name, "dpdk%hu", &adapter->bd_number) <= 0) goto fail_bdnumber; //printk(KERN_DEBUG "ifindex picked: %hu\n", adapter->bd_number); dev_info(&dev->dev, "ifindex picked: %hu\n", adapter->bd_number); /* register uio driver */ err = uio_register_device(&dev->dev, &udev->info); if (err != 0) goto fail_remove_group; pci_set_drvdata(dev, udev); dev_info(&dev->dev, "uio device registered with irq %lx\n", udev->info.irq); /* reset nstats */ memset(&adapter->nstats, 0, sizeof(struct net_device_stats)); return 0; fail_bdnumber: fail_ioremap: free_netdev(netdev); fail_alloc_etherdev: pci_release_selected_regions(dev, pci_select_bars(dev, IORESOURCE_MEM)); fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev); fail_free: kfree(udev); return err; }
static int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; int i, result; struct msix_entry *entries; u16 cmd; if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", pci_name(dev)); if (op->value > SH_INFO_MAX_VEC) return -EINVAL; if (dev->msix_enabled) return -EALREADY; /* * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able * to access the BARs where the MSI-X entries reside. */ pci_read_config_word(dev, PCI_COMMAND, &cmd); if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) return -ENXIO; entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); if (entries == NULL) return -ENOMEM; for (i = 0; i < op->value; i++) { entries[i].entry = op->msix_entries[i].entry; entries[i].vector = op->msix_entries[i].vector; } result = pci_enable_msix(dev, entries, op->value); if (result == 0) { for (i = 0; i < op->value; i++) { op->msix_entries[i].entry = entries[i].entry; if (entries[i].vector) op->msix_entries[i].vector = xen_pirq_from_irq(entries[i].vector); if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: " \ "MSI-X[%d]: %d\n", pci_name(dev), i, op->msix_entries[i].vector); } } else pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", pci_name(dev), pdev->xdev->otherend_id, result); kfree(entries); op->value = result; dev_data = pci_get_drvdata(dev); if (dev_data) dev_data->ack_intr = 0; return result > 0 ? 0 : result; }
static int request_msix_vectors(struct ivshmem_info *ivs_info, int nvectors) { int i, err; const char *name = "ivshmem"; ivs_info->nvectors = nvectors; ivs_info->msix_entries = kmalloc(nvectors * sizeof(*ivs_info->msix_entries), GFP_KERNEL); if (ivs_info->msix_entries == NULL) return -ENOSPC; ivs_info->msix_names = kmalloc(nvectors * sizeof(*ivs_info->msix_names), GFP_KERNEL); if (ivs_info->msix_names == NULL) { kfree(ivs_info->msix_entries); return -ENOSPC; } for (i = 0; i < nvectors; ++i) ivs_info->msix_entries[i].entry = i; #ifdef HAVE_PCI_ENABLE_MSIX err = pci_enable_msix(ivs_info->dev, ivs_info->msix_entries, ivs_info->nvectors); #else err = pci_enable_msix_range(ivs_info->dev, ivs_info->msix_entries, ivs_info->nvectors, ivs_info->nvectors); #endif if (err > 0) { ivs_info->nvectors = err; /* msi-x positive error code returns the number available*/ #ifdef HAVE_PCI_ENABLE_MSIX err = pci_enable_msix(ivs_info->dev, ivs_info->msix_entries, ivs_info->nvectors); #else err = pci_enable_msix_range(ivs_info->dev, ivs_info->msix_entries, ivs_info->nvectors, ivs_info->nvectors); #endif if (err) { dev_info(&ivs_info->dev->dev, "no MSI (%d). Back to INTx.\n", err); goto error; } } if (err) goto error; for (i = 0; i < ivs_info->nvectors; i++) { snprintf(ivs_info->msix_names[i], sizeof(*ivs_info->msix_names), "%s-config", name); err = request_irq(ivs_info->msix_entries[i].vector, ivshmem_msix_handler, 0, ivs_info->msix_names[i], ivs_info->uio); if (err) { free_msix_vectors(ivs_info, i - 1); goto error; } } return 0; error: kfree(ivs_info->msix_entries); kfree(ivs_info->msix_names); ivs_info->nvectors = 0; return err; }
static int igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) { int err = 0; #ifndef HAVE_ALLOC_IRQ_VECTORS struct msix_entry msix_entry; #endif switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ #ifndef HAVE_ALLOC_IRQ_VECTORS msix_entry.entry = 0; if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) { dev_dbg(&udev->pdev->dev, "using MSI-X"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } #else if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) { dev_dbg(&udev->pdev->dev, "using MSI-X"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = pci_irq_vector(udev->pdev, 0); udev->mode = RTE_INTR_MODE_MSIX; break; } #endif /* fall back to MSI */ case RTE_INTR_MODE_MSI: #ifndef HAVE_ALLOC_IRQ_VECTORS if (pci_enable_msi(udev->pdev) == 0) { dev_dbg(&udev->pdev->dev, "using MSI"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = udev->pdev->irq; udev->mode = RTE_INTR_MODE_MSI; break; } #else if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) { dev_dbg(&udev->pdev->dev, "using MSI"); udev->info.irq_flags = IRQF_NO_THREAD; udev->info.irq = pci_irq_vector(udev->pdev, 0); udev->mode = RTE_INTR_MODE_MSI; break; } #endif /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(udev->pdev)) { dev_dbg(&udev->pdev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD; udev->info.irq = udev->pdev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = UIO_IRQ_NONE; break; default: dev_err(&udev->pdev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); udev->info.irq = UIO_IRQ_NONE; err = -EINVAL; } if (udev->info.irq != UIO_IRQ_NONE) err = request_irq(udev->info.irq, igbuio_pci_irqhandler, udev->info.irq_flags, udev->info.name, udev); dev_info(&udev->pdev->dev, "uio device registered with irq %lx\n", udev->info.irq); return err; }
static int mic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int brdnum = mic_data.dd_numdevs; int err = 0; bd_info_t *bd_info; mic_ctx_t *mic_ctx; #ifdef CONFIG_PCI_MSI int i=0; #endif if ((bd_info = (bd_info_t *)kzalloc(sizeof(bd_info_t), GFP_KERNEL)) == NULL) { printk("MIC: probe failed allocating memory for bd_info\n"); return -ENOSPC; } mic_ctx = &bd_info->bi_ctx; mic_ctx->bd_info = bd_info; mic_ctx->bi_id = brdnum; mic_ctx->bi_pdev = pdev; mic_ctx->msie = 0; mic_data.dd_bi[brdnum] = bd_info; if ((err = pci_enable_device(pdev))) { printk("pci_enable failed board #%d\n", brdnum); goto probe_freebd; } pci_set_master(pdev); err = pci_reenable_device(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { printk("mic %d: ERROR DMA not available\n", brdnum); goto probe_freebd; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { printk("mic %d: ERROR pci_set_consistent_dma_mask(64) %d\n", brdnum, err); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { printk("mic %d: ERROR pci_set_consistent_dma_mask(32) %d\n", brdnum, err); goto probe_freebd; } } // Allocate bar 4 for MMIO and GTT bd_info->bi_ctx.mmio.pa = pci_resource_start(pdev, DLDR_MMIO_BAR); bd_info->bi_ctx.mmio.len = pci_resource_len(pdev, DLDR_MMIO_BAR); if (request_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len, "mic") == NULL) { printk("mic %d: failed to reserve mmio space\n", brdnum); goto probe_freebd; } // Allocate bar 0 for access Aperture bd_info->bi_ctx.aper.pa = pci_resource_start(pdev, DLDR_APT_BAR); bd_info->bi_ctx.aper.len = pci_resource_len(pdev, DLDR_APT_BAR); if (request_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len, "mic") == NULL) { printk("mic %d: failed to reserve aperture space\n", brdnum); goto probe_relmmio; } #ifdef CONFIG_PCI_MSI if (mic_msi_enable){ for (i = 0; i < MIC_NUM_MSIX_ENTRIES; i ++) bd_info->bi_msix_entries[i].entry = i; err = pci_enable_msix(mic_ctx->bi_pdev, bd_info->bi_msix_entries, MIC_NUM_MSIX_ENTRIES); if (err == 0 ) { // Only support 1 MSIx for now err = request_irq(bd_info->bi_msix_entries[0].vector, mic_irq_isr, 0, "mic", mic_ctx); if (err != 0) { printk("MIC: Error in request_irq %d\n", err); goto probe_relaper; } mic_ctx->msie = 1; } } #endif // TODO: this needs to be hardened and actually return errors if ((err = adapter_init_device(mic_ctx)) != 0) { printk("MIC: Adapter init device failed %d\n", err); goto probe_relaper; } // Adding sysfs entries set_sysfs_entries(mic_ctx); bd_info->bi_sysfsdev = device_create(mic_lindata.dd_class, &pdev->dev, mic_lindata.dd_dev + 2 + mic_ctx->bd_info->bi_ctx.bi_id, NULL, "mic%d", mic_ctx->bd_info->bi_ctx.bi_id); err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj, &bd_attr_group); mic_ctx->sysfs_state = sysfs_get_dirent(mic_ctx->bd_info->bi_sysfsdev->kobj.sd, #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)) //NULL, #endif "state"); dev_set_drvdata(mic_ctx->bd_info->bi_sysfsdev, mic_ctx); if (!mic_ctx->msie) if ((err = request_irq(mic_ctx->bi_pdev->irq, mic_irq_isr, IRQF_SHARED, "mic", mic_ctx)) != 0) { printk("MIC: Error in request_irq %d\n", err); goto probe_unmapaper; } adapter_probe(&bd_info->bi_ctx); if (mic_ctx->bi_psmi.enabled) { err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj, &psmi_attr_group); err = device_create_bin_file(mic_ctx->bd_info->bi_sysfsdev, &mic_psmi_ptes_attr); } adapter_wait_reset(mic_ctx); // Adding a board instance so increment the total number of MICs in the system. list_add_tail(&bd_info->bi_list, &mic_data.dd_bdlist); mic_data.dd_numdevs++; printk("mic_probe %d:%d:%d as board #%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), brdnum); return 0; probe_unmapaper: wait_event(mic_ctx->ioremapwq, mic_ctx->aper.va || mic_ctx->state == MIC_RESETFAIL); if (mic_ctx->aper.va) iounmap((void *)bd_info->bi_ctx.aper.va); iounmap((void *)bd_info->bi_ctx.mmio.va); probe_relaper: release_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len); probe_relmmio: release_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len); probe_freebd: kfree(bd_info); return err; }
static int __devinit #else static int #endif igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; struct msix_entry msix_entry; int err; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) return -ENOMEM; /* * enable device: ask low-level code to enable I/O and * memory */ err = pci_enable_device(dev); if (err != 0) { dev_err(&dev->dev, "Cannot enable PCI device\n"); goto fail_free; } /* * reserve device's PCI memory regions for use by this * module */ err = pci_request_regions(dev, "igb_uio"); if (err != 0) { dev_err(&dev->dev, "Cannot request regions\n"); goto fail_disable; } /* enable bus mastering on the device */ pci_set_master(dev); /* remap IO memory */ err = igbuio_setup_bars(dev, &udev->info); if (err != 0) goto fail_release_iomem; /* set 64-bit DMA mask */ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set DMA mask\n"); goto fail_release_iomem; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); goto fail_release_iomem; } /* fill uio infos */ udev->info.name = "igb_uio"; udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) udev->info.mmap = igbuio_dom0_pci_mmap; #endif udev->info.priv = udev; udev->pdev = dev; switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { dev_dbg(&dev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED; udev->info.irq = dev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&dev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = 0; break; default: dev_err(&dev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); err = -EINVAL; goto fail_release_iomem; } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); if (err != 0) goto fail_release_iomem; /* register uio driver */ err = uio_register_device(&dev->dev, &udev->info); if (err != 0) goto fail_remove_group; pci_set_drvdata(dev, udev); dev_info(&dev->dev, "uio device registered with irq %lx\n", udev->info.irq); return 0; fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev); fail_free: kfree(udev); return err; }
static int avp_pci_setup_msi_interrupts(struct pci_dev *dev, struct wrs_avp_pci_dev *avp_pci_dev) { struct msix_entry *entry; size_t size; char *name; unsigned i; int ret; /* Use the maximum number of vectors */ avp_pci_dev->msix_vectors = WRS_AVP_MAX_MSIX_VECTORS; /* Allocate MSI-X vectors */ size = avp_pci_dev->msix_vectors * sizeof(avp_pci_dev->msix_entries[0]); avp_pci_dev->msix_entries = kmalloc(size, GFP_KERNEL); if (avp_pci_dev->msix_entries == NULL) { AVP_ERR("Failed to allocate memory %d MSI-X entries\n", avp_pci_dev->msix_vectors); return -ENOMEM; } /* Allocate MSI-X vectors */ size = avp_pci_dev->msix_vectors * WRS_AVP_PCI_MSIX_NAME_LEN; avp_pci_dev->msix_names = kmalloc(size, GFP_KERNEL); if (avp_pci_dev->msix_names == NULL) { AVP_ERR("Failed to allocate memory %d MSI-X names\n", avp_pci_dev->msix_vectors); return -ENOMEM; } /* Setup vector descriptors */ for (i = 0; i < avp_pci_dev->msix_vectors; i++) { entry = &avp_pci_dev->msix_entries[i]; entry->entry = i; } retry: /* Enable interrupt vectors */ ret = pci_enable_msix(dev, avp_pci_dev->msix_entries, avp_pci_dev->msix_vectors); if ((ret < 0) || (ret == avp_pci_dev->msix_vectors)) { AVP_ERR("Failed to enable MSI-X interrupts, ret=%d\n", ret); goto cleanup; } else if (ret > 0) { /* The device has a smaller number of vectors available */ AVP_INFO("Reducing MSI-X vectors to %d to match device limits\n", ret); avp_pci_dev->msix_vectors = ret; goto retry; } /* Setup interrupt handlers */ for (i = 0; i < avp_pci_dev->msix_vectors; i++) { entry = &avp_pci_dev->msix_entries[i]; name = avp_pci_dev->msix_names[i]; snprintf(name, WRS_AVP_PCI_MSIX_NAME_LEN, "avp-msi%d", i); ret = request_irq(entry->vector, avp_pci_interrupt_handler, 0, /* flags */ name, avp_pci_dev); if (ret != 0) { AVP_ERR("Failed to allocate IRQ for MSI-X vector %d, ret=%d\n", i, ret); goto cleanup; } AVP_DBG("MSI-X vector %u IRQ %u enabled\n", entry->entry, entry->vector); } AVP_DBG("MSI-X enabled with %d vector(s) allocated\n", avp_pci_dev->msix_vectors); return 0; cleanup: avp_pci_dev->msix_vectors = 0; if (avp_pci_dev->msix_names) kfree(avp_pci_dev->msix_names); if (avp_pci_dev->msix_entries) kfree(avp_pci_dev->msix_entries); pci_disable_msix(dev); return ret; }