struct irq_domain *hpet_create_irq_domain(int hpet_id) { struct irq_domain *parent; struct irq_alloc_info info; struct msi_domain_info *domain_info; if (x86_vector_domain == NULL) return NULL; domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); if (!domain_info) return NULL; *domain_info = hpet_msi_domain_info; domain_info->data = (void *)(long)hpet_id; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_HPET; info.hpet_id = hpet_id; parent = irq_remapping_get_ir_irq_domain(&info); if (parent == NULL) parent = x86_vector_domain; else hpet_msi_controller.name = "IR-HPET-MSI"; return msi_create_irq_domain(NULL, domain_info, parent); }
int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev, int dev_num) { struct irq_alloc_info info; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_HPET; info.hpet_data = dev; info.hpet_id = hpet_dev_id(domain); info.hpet_index = dev_num; return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); }
int dmar_alloc_hwirq(int id, int node, void *arg) { struct irq_domain *domain = dmar_get_irq_domain(); struct irq_alloc_info info; if (!domain) return -1; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_DMAR; info.dmar_id = id; info.dmar_data = arg; return irq_domain_alloc_irqs(domain, 1, node, &info); }
static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { struct pci_dev *pdev = to_pci_dev(dev); struct msi_desc *desc = first_pci_msi_entry(pdev); init_irq_alloc_info(arg, NULL); arg->msi_dev = pdev; if (desc->msi_attrib.is_msix) { arg->type = X86_IRQ_ALLOC_TYPE_MSIX; } else { arg->type = X86_IRQ_ALLOC_TYPE_MSI; arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; } return 0; }
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct irq_domain *domain; struct irq_alloc_info info; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_MSI; info.msi_dev = dev; domain = irq_remapping_get_irq_domain(&info); if (domain == NULL) domain = msi_default_domain; if (domain == NULL) return -ENOSYS; return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); }
int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev, ht_irq_update_t *update) { struct irq_alloc_info info; if (!htirq_domain) return -ENOSYS; init_irq_alloc_info(&info, NULL); info.ht_idx = idx; info.ht_pos = pos; info.ht_dev = dev; info.ht_update = update; return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev), &info); }
/* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { struct irq_alloc_info info; struct irq_domain *domain = uv_get_irq_domain(); if (!domain) return -ENOMEM; init_irq_alloc_info(&info, cpumask_of(cpu)); info.type = X86_IRQ_ALLOC_TYPE_UV; info.uv_limit = limit; info.uv_blade = mmr_blade; info.uv_offset = mmr_offset; info.uv_name = irq_name; return irq_domain_alloc_irqs(domain, 1, uv_blade_to_memory_nid(mmr_blade), &info); }