/** * msi_create_irq_domain - Create a MSI interrupt domain * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain */ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) msi_domain_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) msi_domain_update_chip_ops(info); return irq_domain_create_hierarchy(parent, 0, 0, fwnode, &msi_domain_ops, info); }
/** * msi_create_irq_domain - Create a MSI interrupt domain * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain */ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { struct irq_domain *domain; if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) msi_domain_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) msi_domain_update_chip_ops(info); domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, fwnode, &msi_domain_ops, info); if (domain && !domain->name && info->chip) domain->name = info->chip->name; return domain; }
int its_alloc_vcpu_irqs(struct its_vm *vm) { int vpe_base_irq, i; vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", task_pid_nr(current)); if (!vm->fwnode) goto err; vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes, vm->fwnode, vpe_domain_ops, vm); if (!vm->domain) goto err; for (i = 0; i < vm->nr_vpes; i++) { vm->vpes[i]->its_vm = vm; vm->vpes[i]->idai = true; } vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes, NUMA_NO_NODE, vm, false, NULL); if (vpe_base_irq <= 0) goto err; for (i = 0; i < vm->nr_vpes; i++) vm->vpes[i]->irq = vpe_base_irq + i; return 0; err: if (vm->domain) irq_domain_remove(vm->domain); if (vm->fwnode) irq_domain_free_fwnode(vm->fwnode); return -ENOMEM; }
static int __init hyperv_prepare_irq_remapping(void) { struct fwnode_handle *fn; int i; if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) || !x2apic_supported()) return -ENODEV; fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0); if (!fn) return -ENOMEM; ioapic_ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, IOAPIC_REMAPPING_ENTRY, fn, &hyperv_ir_domain_ops, NULL); irq_domain_free_fwnode(fn); /* * Hyper-V doesn't provide irq remapping function for * IO-APIC and so IO-APIC only accepts 8-bit APIC ID. * Cpu's APIC ID is read from ACPI MADT table and APIC IDs * in the MADT table on Hyper-v are sorted monotonic increasingly. * APIC ID reflects cpu topology. There maybe some APIC ID * gaps when cpu number in a socket is not power of two. Prepare * max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu * into ioapic_max_cpumask if its APIC ID is less than 256. */ for (i = min_t(unsigned int, num_possible_cpus() - 1, 255); i >= 0; i--) if (cpu_physical_id(i) < 256) cpumask_set_cpu(i, &ioapic_max_cpumask); return 0; }
static int mvebu_gicp_probe(struct platform_device *pdev) { struct mvebu_gicp *gicp; struct irq_domain *inner_domain, *plat_domain, *parent_domain; struct device_node *node = pdev->dev.of_node; struct device_node *irq_parent_dn; int ret, i; gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL); if (!gicp) return -ENOMEM; gicp->dev = &pdev->dev; spin_lock_init(&gicp->spi_lock); gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!gicp->res) return -ENODEV; ret = of_property_count_u32_elems(node, "marvell,spi-ranges"); if (ret < 0) return ret; gicp->spi_ranges_cnt = ret / 2; gicp->spi_ranges = devm_kzalloc(&pdev->dev, gicp->spi_ranges_cnt * sizeof(struct mvebu_gicp_spi_range), GFP_KERNEL); if (!gicp->spi_ranges) return -ENOMEM; for (i = 0; i < gicp->spi_ranges_cnt; i++) { of_property_read_u32_index(node, "marvell,spi-ranges", i * 2, &gicp->spi_ranges[i].start); of_property_read_u32_index(node, "marvell,spi-ranges", i * 2 + 1, &gicp->spi_ranges[i].count); gicp->spi_cnt += gicp->spi_ranges[i].count; } gicp->spi_bitmap = devm_kzalloc(&pdev->dev, BITS_TO_LONGS(gicp->spi_cnt) * sizeof(long), GFP_KERNEL); if (!gicp->spi_bitmap) return -ENOMEM; irq_parent_dn = of_irq_find_parent(node); if (!irq_parent_dn) { dev_err(&pdev->dev, "failed to find parent IRQ node\n"); return -ENODEV; } parent_domain = irq_find_host(irq_parent_dn); if (!parent_domain) { dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); return -ENODEV; } inner_domain = irq_domain_create_hierarchy(parent_domain, 0, gicp->spi_cnt, of_node_to_fwnode(node), &gicp_domain_ops, gicp); if (!inner_domain) return -ENOMEM; plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), &gicp_msi_domain_info, inner_domain); if (!plat_domain) { irq_domain_remove(inner_domain); return -ENOMEM; } platform_set_drvdata(pdev, gicp); return 0; }
static int intel_setup_irq_remapping(struct intel_iommu *iommu) { struct ir_table *ir_table; struct fwnode_handle *fn; unsigned long *bitmap; struct page *pages; if (iommu->ir_table) return 0; ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); if (!ir_table) return -ENOMEM; pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); goto out_free_table; } bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), sizeof(long), GFP_ATOMIC); if (bitmap == NULL) { pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); goto out_free_pages; } fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); if (!fn) goto out_free_bitmap; iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } iommu->ir_msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, "INTEL-IR-MSI", iommu->seq_id); ir_table->base = page_address(pages); ir_table->bitmap = bitmap; iommu->ir_table = ir_table; /* * If the queued invalidation is already initialized, * shouldn't disable it. */ if (!iommu->qi) { /* * Clear previous faults. */ dmar_fault(-1, iommu); dmar_disable_qi(iommu); if (dmar_enable_qi(iommu)) { pr_err("Failed to enable queued invalidation\n"); goto out_free_bitmap; } } init_ir_status(iommu); if (ir_pre_enabled(iommu)) { if (!is_kdump_kernel()) { pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", iommu->name); clear_ir_pre_enabled(iommu); iommu_disable_irq_remapping(iommu); } else if (iommu_load_old_irte(iommu)) pr_err("Failed to copy IR table for %s from previous kernel\n", iommu->name); else pr_info("Copied IR table for %s from previous kernel\n", iommu->name); } iommu_set_irq_remapping(iommu, eim_mode); return 0; out_free_bitmap: kfree(bitmap); out_free_pages: __free_pages(pages, INTR_REMAP_PAGE_ORDER); out_free_table: kfree(ir_table); iommu->ir_table = NULL; return -ENOMEM; }
static int xgene_gpio_sb_probe(struct platform_device *pdev) { struct xgene_gpio_sb *priv; int ret; struct resource *res; void __iomem *regs; struct irq_domain *parent_domain = NULL; struct fwnode_handle *fwnode; u32 val32; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); priv->regs = regs; ret = platform_get_irq(pdev, 0); if (ret > 0) { priv->parent_irq_base = irq_get_irq_data(ret)->hwirq; parent_domain = irq_get_irq_data(ret)->domain; } if (!parent_domain) { dev_err(&pdev->dev, "unable to obtain parent domain\n"); return -ENODEV; } ret = bgpio_init(&priv->gc, &pdev->dev, 4, regs + MPA_GPIO_IN_ADDR, regs + MPA_GPIO_OUT_ADDR, NULL, regs + MPA_GPIO_OE_ADDR, NULL, 0); if (ret) return ret; priv->gc.to_irq = xgene_gpio_sb_to_irq; /* Retrieve start irq pin, use default if property not found */ priv->irq_start = XGENE_DFLT_IRQ_START_PIN; if (!device_property_read_u32(&pdev->dev, XGENE_IRQ_START_PROPERTY, &val32)) priv->irq_start = val32; /* Retrieve number irqs, use default if property not found */ priv->nirq = XGENE_DFLT_MAX_NIRQ; if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32)) priv->nirq = val32; /* Retrieve number gpio, use default if property not found */ priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO; if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32)) priv->gc.ngpio = val32; dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n", priv->gc.ngpio, priv->nirq, priv->irq_start); platform_set_drvdata(pdev, priv); if (pdev->dev.of_node) fwnode = of_node_to_fwnode(pdev->dev.of_node); else fwnode = pdev->dev.fwnode; priv->irq_domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, fwnode, &xgene_gpio_sb_domain_ops, priv); if (!priv->irq_domain) return -ENODEV; priv->gc.irqdomain = priv->irq_domain; ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); if (ret) { dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n"); irq_domain_remove(priv->irq_domain); return ret; } dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n"); if (priv->nirq > 0) { /* Register interrupt handlers for gpio signaled acpi events */ acpi_gpiochip_request_interrupts(&priv->gc); } return ret; }