Beispiel #1
0
static int __init irqcrossbar_init(struct device_node *node,
				   struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;
	int err;

	if (!parent) {
		pr_err("%pOF: no parent, giving up\n", node);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%pOF: unable to obtain parent domain\n", node);
		return -ENXIO;
	}

	err = crossbar_of_init(node);
	if (err)
		return err;

	domain = irq_domain_add_hierarchy(parent_domain, 0,
					  cb->max_crossbar_sources,
					  node, &crossbar_domain_ops,
					  NULL);
	if (!domain) {
		pr_err("%pOF: failed to allocated domain\n", node);
		return -ENOMEM;
	}

	return 0;
}
Beispiel #2
0
static int __init exynos_pmu_irq_init(struct device_node *node,
				      struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;

	if (!parent) {
		pr_err("%s: no parent, giving up\n", node->full_name);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%s: unable to obtain parent domain\n", node->full_name);
		return -ENXIO;
	}

	pmu_base_addr = of_iomap(node, 0);

	if (!pmu_base_addr) {
		pr_err("%s: failed to find exynos pmu register\n",
		       node->full_name);
		return -ENOMEM;
	}

	domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
					  node, &exynos_pmu_domain_ops,
					  NULL);
	if (!domain) {
		iounmap(pmu_base_addr);
		return -ENOMEM;
	}

	return 0;
}
Beispiel #3
0
static int __init vf610_mscm_ir_of_init(struct device_node *node,
			       struct device_node *parent)
{
	struct irq_domain *domain, *domain_parent;
	struct regmap *mscm_cp_regmap;
	int ret, cpuid;

	domain_parent = irq_find_host(parent);
	if (!domain_parent) {
		pr_err("vf610_mscm_ir: interrupt-parent not found\n");
		return -EINVAL;
	}

	mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL);
	if (!mscm_ir_data)
		return -ENOMEM;

	mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir");

	if (!mscm_ir_data->mscm_ir_base) {
		pr_err("vf610_mscm_ir: unable to map mscm register\n");
		ret = -ENOMEM;
		goto out_free;
	}

	mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg");
	if (IS_ERR(mscm_cp_regmap)) {
		ret = PTR_ERR(mscm_cp_regmap);
		pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n");
		goto out_unmap;
	}

	regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid);
	mscm_ir_data->cpu_mask = 0x1 << cpuid;

	domain = irq_domain_add_hierarchy(domain_parent, 0,
					  MSCM_IRSPRC_NUM, node,
					  &mscm_irq_domain_ops, mscm_ir_data);
	if (!domain) {
		ret = -ENOMEM;
		goto out_unmap;
	}

	cpu_pm_register_notifier(&mscm_ir_notifier_block);

	return 0;

out_unmap:
	iounmap(mscm_ir_data->mscm_ir_base);
out_free:
	kfree(mscm_ir_data);
	return ret;
}
Beispiel #4
0
/**
 * msi_create_irq_domain - Create a MSI interrupt domain
 * @of_node:	Optional device-tree node of the interrupt controller
 * @info:	MSI domain info
 * @parent:	Parent irq domain
 */
struct irq_domain *msi_create_irq_domain(struct device_node *node,
					 struct msi_domain_info *info,
					 struct irq_domain *parent)
{
	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
		msi_domain_update_dom_ops(info);
	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
		msi_domain_update_chip_ops(info);

	return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops,
					info);
}
Beispiel #5
0
static int __init imx_gpc_init(struct device_node *node,
			       struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;
	int i;

	if (!parent) {
		pr_err("%pOF: no parent, giving up\n", node);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%pOF: unable to obtain parent domain\n", node);
		return -ENXIO;
	}

	gpc_base = of_iomap(node, 0);
	if (WARN_ON(!gpc_base))
	        return -ENOMEM;

	domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
					  node, &imx_gpc_domain_ops,
					  NULL);
	if (!domain) {
		iounmap(gpc_base);
		return -ENOMEM;
	}

	/* Initially mask all interrupts */
	for (i = 0; i < IMR_NUM; i++)
		writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);

	/*
	 * Clear the OF_POPULATED flag set in of_irq_init so that
	 * later the GPC power domain driver will not be skipped.
	 */
	of_node_clear_flag(node, OF_POPULATED);

	return 0;
}
Beispiel #6
0
static int __init exynos_pmu_irq_init(struct device_node *node,
				      struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;

	if (!parent) {
		pr_err("%s: no parent, giving up\n", node->full_name);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%s: unable to obtain parent domain\n", node->full_name);
		return -ENXIO;
	}

	pmu_base_addr = of_iomap(node, 0);

	if (!pmu_base_addr) {
		pr_err("%s: failed to find exynos pmu register\n",
		       node->full_name);
		return -ENOMEM;
	}

	domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
					  node, &exynos_pmu_domain_ops,
					  NULL);
	if (!domain) {
		iounmap(pmu_base_addr);
		return -ENOMEM;
	}

	/*
	 * Clear the OF_POPULATED flag set in of_irq_init so that
	 * later the Exynos PMU platform device won't be skipped.
	 */
	of_node_clear_flag(node, OF_POPULATED);

	return 0;
}
Beispiel #7
0
static int __init imx_gpcv2_irqchip_init(struct device_node *node,
			       struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;
	struct gpcv2_irqchip_data *cd;
	int i;

	if (!parent) {
		pr_err("%s: no parent, giving up\n", node->full_name);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%s: unable to get parent domain\n", node->full_name);
		return -ENXIO;
	}

	cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
	if (!cd) {
		pr_err("kzalloc failed!\n");
		return -ENOMEM;
	}

	cd->gpc_base = of_iomap(node, 0);
	if (!cd->gpc_base) {
		pr_err("fsl-gpcv2: unable to map gpc registers\n");
		kfree(cd);
		return -ENOMEM;
	}

	domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
				node, &gpcv2_irqchip_data_domain_ops, cd);
	if (!domain) {
		iounmap(cd->gpc_base);
		kfree(cd);
		return -ENOMEM;
	}
	irq_set_default_host(domain);

	/* Initially mask all interrupts */
	for (i = 0; i < IMR_NUM; i++) {
		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
		cd->wakeup_sources[i] = ~0;
	}

	/* Let CORE0 as the default CPU to wake up by GPC */
	cd->cpu2wakeup = GPC_IMR1_CORE0;

	/*
	 * Due to hardware design failure, need to make sure GPR
	 * interrupt(#32) is unmasked during RUN mode to avoid entering
	 * DSM by mistake.
	 */
	writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);

	imx_gpcv2_instance = cd;
	register_syscore_ops(&imx_gpcv2_syscore_ops);

	return 0;
}
Beispiel #8
0
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
	struct ir_table *ir_table;
	struct page *pages;
	unsigned long *bitmap;

	if (iommu->ir_table)
		return 0;

	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
	if (!ir_table)
		return -ENOMEM;

	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
				 INTR_REMAP_PAGE_ORDER);
	if (!pages) {
		pr_err("IR%d: failed to allocate pages of order %d\n",
		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
		goto out_free_table;
	}

	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
			 sizeof(long), GFP_ATOMIC);
	if (bitmap == NULL) {
		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
		goto out_free_pages;
	}

	iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
						    0, INTR_REMAP_TABLE_ENTRIES,
						    NULL, &intel_ir_domain_ops,
						    iommu);
	if (!iommu->ir_domain) {
		pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
		goto out_free_bitmap;
	}
	iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);

	ir_table->base = page_address(pages);
	ir_table->bitmap = bitmap;
	iommu->ir_table = ir_table;

	/*
	 * If the queued invalidation is already initialized,
	 * shouldn't disable it.
	 */
	if (!iommu->qi) {
		/*
		 * Clear previous faults.
		 */
		dmar_fault(-1, iommu);
		dmar_disable_qi(iommu);

		if (dmar_enable_qi(iommu)) {
			pr_err("Failed to enable queued invalidation\n");
			goto out_free_bitmap;
		}
	}

	init_ir_status(iommu);

	if (ir_pre_enabled(iommu)) {
		if (iommu_load_old_irte(iommu))
			pr_err("Failed to copy IR table for %s from previous kernel\n",
			       iommu->name);
		else
			pr_info("Copied IR table for %s from previous kernel\n",
				iommu->name);
	}

	iommu_set_irq_remapping(iommu, eim_mode);

	return 0;

out_free_bitmap:
	kfree(bitmap);
out_free_pages:
	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
	kfree(ir_table);

	iommu->ir_table  = NULL;

	return -ENOMEM;
}