/** * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain * @domain: The domain to allocate from * @dev: Pointer to device struct of the device for which the interrupts * are allocated * @nvec: The number of interrupts to allocate * * Returns 0 on success or an error code. */ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; msi_alloc_info_t arg; struct msi_desc *desc; int i, ret, virq; ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); if (ret) return ret; for_each_msi_entry(desc, dev) { ops->set_desc(&arg, desc); virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, dev_to_node(dev), &arg, false, desc->affinity); if (virq < 0) { ret = -ENOSPC; if (ops->handle_error) ret = ops->handle_error(domain, desc, ret); if (ops->msi_finish) ops->msi_finish(&arg, ret); return ret; } for (i = 0; i < desc->nvec_used; i++) irq_set_msi_desc_off(virq, i, desc); }
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, int virq, int nvec, msi_alloc_info_t *arg) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; struct msi_desc *desc; int ret = 0; for_each_msi_entry(desc, dev) { /* Don't even try the multi-MSI brain damage. */ if (WARN_ON(!desc->irq || desc->nvec_used != 1)) { ret = -EINVAL; break; } if (!(desc->irq >= virq && desc->irq < (virq + nvec))) continue; ops->set_desc(arg, desc); /* Assumes the domain mutex is held! */ ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1, arg); if (ret) break; irq_set_msi_desc_off(desc->irq, 0, desc); } if (ret) { /* Mop up the damage */ for_each_msi_entry(desc, dev) { if (!(desc->irq >= virq && desc->irq < (virq + nvec))) continue; irq_domain_free_irqs_common(domain, desc->irq, 1); } } return ret; }
/** * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain * @domain: The domain to allocate from * @dev: Pointer to device struct of the device for which the interrupts * are allocated * @nvec: The number of interrupts to allocate * * Returns 0 on success or an error code. */ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; msi_alloc_info_t arg; struct msi_desc *desc; int i, ret, virq = -1; ret = ops->msi_check(domain, info, dev); if (ret == 0) ret = ops->msi_prepare(domain, dev, nvec, &arg); if (ret) return ret; for_each_msi_entry(desc, dev) { ops->set_desc(&arg, desc); if (info->flags & MSI_FLAG_IDENTITY_MAP) virq = (int)ops->get_hwirq(info, &arg); else virq = -1; virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, dev_to_node(dev), &arg, false); if (virq < 0) { ret = -ENOSPC; if (ops->handle_error) ret = ops->handle_error(domain, desc, ret); if (ops->msi_finish) ops->msi_finish(&arg, ret); return ret; } for (i = 0; i < desc->nvec_used; i++) irq_set_msi_desc_off(virq, i, desc); }
/** * irq_set_msi_desc - set MSI descriptor data for an irq * @irq: Interrupt number * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq */ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) { return irq_set_msi_desc_off(irq, 0, entry); }