Exemple #1
0
static int aic5_irq_domain_xlate(struct irq_domain *d,
				 struct device_node *ctrlr,
				 const u32 *intspec, unsigned int intsize,
				 irq_hw_number_t *out_hwirq,
				 unsigned int *out_type)
{
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
	unsigned long flags;
	unsigned smr;
	int ret;

	if (!bgc)
		return -EINVAL;

	ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
					  out_hwirq, out_type);
	if (ret)
		return ret;

	irq_gc_lock_irqsave(bgc, flags);
	irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
	ret = aic_common_set_priority(intspec[2], &smr);
	if (!ret)
		irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
	irq_gc_unlock_irqrestore(bgc, flags);

	return ret;
}
Exemple #2
0
static void __init aic_hw_init(struct irq_domain *domain)
{
	struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
	int i;

	/*
	 * Perform 8 End Of Interrupt Command to make sure AIC
	 * will not Lock out nIRQ
	 */
	for (i = 0; i < 8; i++)
		irq_reg_writel(gc, 0, AT91_AIC_EOICR);

	/*
	 * Spurious Interrupt ID in Spurious Vector Register.
	 * When there is no current interrupt, the IRQ Vector Register
	 * reads the value stored in AIC_SPU
	 */
	irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU);

	/* No debugging in AIC: Debug (Protect) Control Register */
	irq_reg_writel(gc, 0, AT91_AIC_DCR);

	/* Disable and clear all interrupts initially */
	irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
	irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);

	for (i = 0; i < 32; i++)
		irq_reg_writel(gc, i, AT91_AIC_SVR(i));
}
Exemple #3
0
static int __init aic_of_init(struct device_node *node,
			      struct device_node *parent)
{
	struct irq_chip_generic *gc;
	struct irq_domain *domain;

	if (aic_domain)
		return -EEXIST;

	domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
				    NR_AIC_IRQS);
	if (IS_ERR(domain))
		return PTR_ERR(domain);

	aic_common_irq_fixup(aic_irq_fixups);

	aic_domain = domain;
	gc = irq_get_domain_generic_chip(domain, 0);

	gc->chip_types[0].regs.eoi = AT91_AIC_EOICR;
	gc->chip_types[0].regs.enable = AT91_AIC_IECR;
	gc->chip_types[0].regs.disable = AT91_AIC_IDCR;
	gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
	gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
	gc->chip_types[0].chip.irq_retrigger = aic_retrigger;
	gc->chip_types[0].chip.irq_set_type = aic_set_type;
	gc->chip_types[0].chip.irq_suspend = aic_suspend;
	gc->chip_types[0].chip.irq_resume = aic_resume;
	gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown;

	aic_hw_init(domain);
	set_handle_irq(aic_handle);

	return 0;
}
Exemple #4
0
static int __init nvic_of_init(struct device_node *node,
			       struct device_node *parent)
{
	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
	unsigned int irqs, i, ret, numbanks;
	void __iomem *nvic_base;

	numbanks = (readl_relaxed(V7M_SCS_ICTR) &
		    V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;

	nvic_base = of_iomap(node, 0);
	if (!nvic_base) {
		pr_warn("unable to map nvic registers\n");
		return -ENOMEM;
	}

	irqs = numbanks * 32;
	if (irqs > NVIC_MAX_IRQ)
		irqs = NVIC_MAX_IRQ;

	nvic_irq_domain =
		irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL);
	if (!nvic_irq_domain) {
		pr_warn("Failed to allocate irq domain\n");
		return -ENOMEM;
	}

	ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
					     "nvic_irq", handle_fasteoi_irq,
					     clr, 0, IRQ_GC_INIT_MASK_CACHE);
	if (ret) {
		pr_warn("Failed to allocate irq chips\n");
		irq_domain_remove(nvic_irq_domain);
		return ret;
	}

	for (i = 0; i < numbanks; ++i) {
		struct irq_chip_generic *gc;

		gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
		gc->reg_base = nvic_base + 4 * i;
		gc->chip_types[0].regs.enable = NVIC_ISER;
		gc->chip_types[0].regs.disable = NVIC_ICER;
		gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
		gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
		gc->chip_types[0].chip.irq_eoi = nvic_eoi;

		/* disable interrupts */
		writel_relaxed(~0, gc->reg_base + NVIC_ICER);
	}

	/* Set priority on all interrupts */
	for (i = 0; i < irqs; i += 4)
		writel_relaxed(0, nvic_base + NVIC_IPR + i);

	return 0;
}
Exemple #5
0
static int __init moxart_of_intc_init(struct device_node *node,
				      struct device_node *parent)
{
	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
	int ret;
	struct irq_chip_generic *gc;

	intc.base = of_iomap(node, 0);
	if (!intc.base) {
		pr_err("%s: unable to map IC registers\n",
		       node->full_name);
		return -EINVAL;
	}

	intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
					    intc.base);
	if (!intc.domain) {
		pr_err("%s: unable to create IRQ domain\n", node->full_name);
		return -EINVAL;
	}

	ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1,
					     "MOXARTINTC", handle_edge_irq,
					     clr, 0, IRQ_GC_INIT_MASK_CACHE);
	if (ret) {
		pr_err("%s: could not allocate generic chip\n",
		       node->full_name);
		irq_domain_remove(intc.domain);
		return -EINVAL;
	}

	ret = of_property_read_u32(node, "interrupt-mask",
				   &intc.interrupt_mask);
	if (ret)
		pr_err("%s: could not read interrupt-mask DT property\n",
		       node->full_name);

	gc = irq_get_domain_generic_chip(intc.domain, 0);

	gc->reg_base = intc.base;
	gc->chip_types[0].regs.mask = IRQ_MASK_REG;
	gc->chip_types[0].regs.ack = IRQ_CLEAR_REG;
	gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;

	writel(0, intc.base + IRQ_MASK_REG);
	writel(0xffffffff, intc.base + IRQ_CLEAR_REG);

	writel(intc.interrupt_mask, intc.base + IRQ_MODE_REG);
	writel(intc.interrupt_mask, intc.base + IRQ_LEVEL_REG);

	set_handle_irq(handle_irq);

	return 0;
}
Exemple #6
0
static int aic5_retrigger(struct irq_data *d)
{
	struct irq_domain *domain = d->domain;
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);

	/* Enable interrupt on AIC5 */
	irq_gc_lock(bgc);
	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
	irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
	irq_gc_unlock(bgc);

	return 0;
}
Exemple #7
0
static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
				    unsigned en_reg, unsigned ack_reg)
{
	struct irq_chip_generic *gc;

	gc = irq_get_domain_generic_chip(digicolor_irq_domain, irq_base);
	gc->reg_base = reg_base;
	gc->chip_types[0].regs.ack = ack_reg;
	gc->chip_types[0].regs.mask = en_reg;
	gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
}
Exemple #8
0
static int __init orion_irq_init(struct device_node *np,
				 struct device_node *parent)
{
	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
	int n, ret, base, num_chips = 0;
	struct resource r;

	/* count number of irq chips by valid reg addresses */
	while (of_address_to_resource(np, num_chips, &r) == 0)
		num_chips++;

	orion_irq_domain = irq_domain_add_linear(np,
				num_chips * ORION_IRQS_PER_CHIP,
				&irq_generic_chip_ops, NULL);
	if (!orion_irq_domain)
		panic("%s: unable to add irq domain\n", np->name);

	ret = irq_alloc_domain_generic_chips(orion_irq_domain,
				ORION_IRQS_PER_CHIP, 1, np->name,
				handle_level_irq, clr, 0,
				IRQ_GC_INIT_MASK_CACHE);
	if (ret)
		panic("%s: unable to alloc irq domain gc\n", np->name);

	for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
		struct irq_chip_generic *gc =
			irq_get_domain_generic_chip(orion_irq_domain, base);

		of_address_to_resource(np, n, &r);

		if (!request_mem_region(r.start, resource_size(&r), np->name))
			panic("%s: unable to request mem region %d",
			      np->name, n);

		gc->reg_base = ioremap(r.start, resource_size(&r));
		if (!gc->reg_base)
			panic("%s: unable to map resource %d", np->name, n);

		gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
		gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;

		/* mask all interrupts */
		writel(0, gc->reg_base + ORION_IRQ_MASK);
	}

	set_handle_irq(orion_handle_irq);
	return 0;
}
Exemple #9
0
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
	struct irq_domain *d = irq_get_handler_data(irq);

	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
	u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
		   gc->mask_cache;

	while (stat) {
		u32 hwirq = ffs(stat) - 1;

		generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
		stat &= ~(1 << hwirq);
	}
}
Exemple #10
0
static asmlinkage void __exception_irq_entry
aic5_handle(struct pt_regs *regs)
{
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
	u32 irqnr;
	u32 irqstat;

	irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
	irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);

	if (!irqstat)
		irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
	else
		handle_domain_irq(aic5_domain, irqnr, regs);
}
Exemple #11
0
static void aic5_pm_shutdown(struct irq_data *d)
{
	struct irq_domain *domain = d->domain;
	struct irq_domain_chip_generic *dgc = domain->gc;
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
	int i;

	irq_gc_lock(bgc);
	for (i = 0; i < dgc->irqs_per_chip; i++) {
		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
		irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
		irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
	}
	irq_gc_unlock(bgc);
}
Exemple #12
0
static void aic5_mask(struct irq_data *d)
{
	struct irq_domain *domain = d->domain;
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);

	/*
	 * Disable interrupt on AIC5. We always take the lock of the
	 * first irq chip as all chips share the same registers.
	 */
	irq_gc_lock(bgc);
	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
	gc->mask_cache &= ~d->mask;
	irq_gc_unlock(bgc);
}
Exemple #13
0
static int aic5_set_type(struct irq_data *d, unsigned type)
{
	struct irq_domain *domain = d->domain;
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
	unsigned int smr;
	int ret;

	irq_gc_lock(bgc);
	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
	ret = aic_common_set_type(d, type, &smr);
	if (!ret)
		irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
	irq_gc_unlock(bgc);

	return ret;
}
Exemple #14
0
static __init void
sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;
	int ret;
	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;

	ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
		handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);

	gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
	gc->reg_base = base;
	ct = gc->chip_types;
	ct->chip.irq_mask = irq_gc_mask_clr_bit;
	ct->chip.irq_unmask = irq_gc_mask_set_bit;
	ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
}
Exemple #15
0
static void
__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
{
	struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
	int n, base = 0;

	for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
		struct irq_chip_generic *gc =
			irq_get_domain_generic_chip(orion_irq_domain, base);
		u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
			gc->mask_cache;
		while (stat) {
			u32 hwirq = __fls(stat);
			handle_domain_irq(orion_irq_domain,
					  gc->irq_base + hwirq, regs);
			stat &= ~(1 << hwirq);
		}
	}
}
static int __init aic5_of_init(struct device_node *node,
			       struct device_node *parent,
			       int nirqs)
{
	struct irq_chip_generic *gc;
	struct irq_domain *domain;
	int nchips;
	int i;

	if (nirqs > NR_AIC5_IRQS)
		return -EINVAL;

	if (aic5_domain)
		return -EEXIST;

	domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
				    nirqs);
	if (IS_ERR(domain))
		return PTR_ERR(domain);

	aic_common_irq_fixup(aic5_irq_fixups);

	aic5_domain = domain;
	nchips = aic5_domain->revmap_size / 32;
	for (i = 0; i < nchips; i++) {
		gc = irq_get_domain_generic_chip(domain, i * 32);

		gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
		gc->chip_types[0].chip.irq_mask = aic5_mask;
		gc->chip_types[0].chip.irq_unmask = aic5_unmask;
		gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
		gc->chip_types[0].chip.irq_set_type = aic5_set_type;
		gc->chip_types[0].chip.irq_suspend = aic5_suspend;
		gc->chip_types[0].chip.irq_resume = aic5_resume;
		gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
	}

	aic5_hw_init(domain);
	set_handle_irq(aic5_handle);

	return 0;
}
Exemple #17
0
static void aic5_resume(struct irq_data *d)
{
	struct irq_domain *domain = d->domain;
	struct irq_domain_chip_generic *dgc = domain->gc;
	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
	int i;
	u32 mask;

	irq_gc_lock(bgc);
	for (i = 0; i < dgc->irqs_per_chip; i++) {
		mask = 1 << i;
		if ((mask & gc->mask_cache) == (mask & gc->wake_active))
			continue;

		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
		if (mask & gc->mask_cache)
			irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
		else
			irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
	}
	irq_gc_unlock(bgc);
}
static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
{
	int ret;
	int i;

	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
			IRQ_LEVEL, 0);
	if (ret) {
		pr_warn("Failed to allocate irq chips\n");
		return ret;
	}

	for (i = 0; i < omap_nr_pending; i++) {
		struct irq_chip_generic *gc;
		struct irq_chip_type *ct;

		gc = irq_get_domain_generic_chip(d, 32 * i);
		gc->reg_base = base;
		ct = gc->chip_types;

		ct->type = IRQ_TYPE_LEVEL_MASK;
		ct->handler = handle_level_irq;

		ct->chip.irq_ack = omap_mask_ack_irq;
		ct->chip.irq_mask = irq_gc_mask_disable_reg;
		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;

		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;

		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
	}

	return 0;
}
Exemple #19
0
static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
{
	struct device_node *np = info->node;
	struct device *dev = info->priv->dev;
	struct tz1090_gpio_bank *bank;
	struct irq_chip_generic *gc;
	int err;

	bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
	if (!bank) {
		dev_err(dev, "unable to allocate driver data\n");
		return -ENOMEM;
	}

	/* Offset the main registers to the first register in this bank */
	bank->reg = info->priv->reg + info->index * 4;

	/* Set up GPIO chip */
	snprintf(bank->label, sizeof(bank->label), "tz1090-gpio-%u",
		 info->index);
	bank->chip.label		= bank->label;
	bank->chip.dev			= dev;
	bank->chip.direction_input	= tz1090_gpio_direction_input;
	bank->chip.direction_output	= tz1090_gpio_direction_output;
	bank->chip.get			= tz1090_gpio_get;
	bank->chip.set			= tz1090_gpio_set;
	bank->chip.free			= tz1090_gpio_free;
	bank->chip.request		= tz1090_gpio_request;
	bank->chip.to_irq		= tz1090_gpio_to_irq;
	bank->chip.of_node		= np;

	/* GPIO numbering from 0 */
	bank->chip.base			= info->index * 30;
	bank->chip.ngpio		= 30;

	/* Add the GPIO bank */
	gpiochip_add(&bank->chip);

	/* Get the GPIO bank IRQ if provided */
	bank->irq = irq_of_parse_and_map(np, 0);

	/* The interrupt is optional (it may be used by another core on chip) */
	if (!bank->irq) {
		dev_info(dev, "IRQ not provided for bank %u, IRQs disabled\n",
			 info->index);
		return 0;
	}

	dev_info(dev, "Setting up IRQs for GPIO bank %u\n",
		 info->index);

	/*
	 * Initialise all interrupts to disabled so we don't get
	 * spurious ones on a dirty boot and hit the BUG_ON in the
	 * handler.
	 */
	tz1090_gpio_write(bank, REG_GPIO_IRQ_EN, 0);

	/* Add a virtual IRQ for each GPIO */
	bank->domain = irq_domain_add_linear(np,
					     bank->chip.ngpio,
					     &irq_generic_chip_ops,
					     bank);

	/* Set up a generic irq chip with 2 chip types (level and edge) */
	err = irq_alloc_domain_generic_chips(bank->domain, bank->chip.ngpio, 2,
					     bank->label, handle_bad_irq, 0, 0,
					     IRQ_GC_INIT_NESTED_LOCK);
	if (err) {
		dev_info(dev,
			 "irq_alloc_domain_generic_chips failed for bank %u, IRQs disabled\n",
			 info->index);
		irq_domain_remove(bank->domain);
		return 0;
	}

	gc = irq_get_domain_generic_chip(bank->domain, 0);
	gc->reg_base	= bank->reg;

	/* level chip type */
	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
	gc->chip_types[0].handler		= handle_level_irq;
	gc->chip_types[0].regs.ack		= REG_GPIO_IRQ_STS;
	gc->chip_types[0].regs.mask		= REG_GPIO_IRQ_EN;
	gc->chip_types[0].chip.irq_startup	= gpio_startup_irq;
	gc->chip_types[0].chip.irq_ack		= irq_gc_ack_clr_bit;
	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[0].chip.irq_set_type	= gpio_set_irq_type;
	gc->chip_types[0].chip.irq_set_wake	= gpio_set_irq_wake;
	gc->chip_types[0].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* edge chip type */
	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
	gc->chip_types[1].handler		= handle_edge_irq;
	gc->chip_types[1].regs.ack		= REG_GPIO_IRQ_STS;
	gc->chip_types[1].regs.mask		= REG_GPIO_IRQ_EN;
	gc->chip_types[1].chip.irq_startup	= gpio_startup_irq;
	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_clr_bit;
	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[1].chip.irq_set_type	= gpio_set_irq_type;
	gc->chip_types[1].chip.irq_set_wake	= gpio_set_irq_wake;
	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* Setup chained handler for this GPIO bank */
	irq_set_handler_data(bank->irq, bank);
	irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);

	return 0;
}
static int pdc_intc_probe(struct platform_device *pdev)
{
	struct pdc_intc_priv *priv;
	struct device_node *node = pdev->dev.of_node;
	struct resource *res_regs;
	struct irq_chip_generic *gc;
	unsigned int i;
	int irq, ret;
	u32 val;

	if (!node)
		return -ENOENT;

	/* Get registers */
	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_regs == NULL) {
		dev_err(&pdev->dev, "cannot find registers resource\n");
		return -ENOENT;
	}

	/* Allocate driver data */
	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "cannot allocate device data\n");
		return -ENOMEM;
	}
	raw_spin_lock_init(&priv->lock);
	platform_set_drvdata(pdev, priv);

	/* Ioremap the registers */
	priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
				      res_regs->end - res_regs->start);
	if (!priv->pdc_base)
		return -EIO;

	/* Get number of peripherals */
	ret = of_property_read_u32(node, "num-perips", &val);
	if (ret) {
		dev_err(&pdev->dev, "No num-perips node property found\n");
		return -EINVAL;
	}
	if (val > SYS0_HWIRQ) {
		dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
		return -EINVAL;
	}
	priv->nr_perips = val;

	/* Get number of syswakes */
	ret = of_property_read_u32(node, "num-syswakes", &val);
	if (ret) {
		dev_err(&pdev->dev, "No num-syswakes node property found\n");
		return -EINVAL;
	}
	if (val > SYS0_HWIRQ) {
		dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
		return -EINVAL;
	}
	priv->nr_syswakes = val;

	/* Get peripheral IRQ numbers */
	priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
					GFP_KERNEL);
	if (!priv->perip_irqs) {
		dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
		return -ENOMEM;
	}
	for (i = 0; i < priv->nr_perips; ++i) {
		irq = platform_get_irq(pdev, 1 + i);
		if (irq < 0) {
			dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
			return irq;
		}
		priv->perip_irqs[i] = irq;
	}
	/* check if too many were provided */
	if (platform_get_irq(pdev, 1 + i) >= 0) {
		dev_err(&pdev->dev, "surplus perip IRQs detected\n");
		return -EINVAL;
	}

	/* Get syswake IRQ number */
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "cannot find syswake IRQ\n");
		return irq;
	}
	priv->syswake_irq = irq;

	/* Set up an IRQ domain */
	priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
					     priv);
	if (unlikely(!priv->domain)) {
		dev_err(&pdev->dev, "cannot add IRQ domain\n");
		return -ENOMEM;
	}

	/*
	 * Set up 2 generic irq chips with 2 chip types.
	 * The first one for peripheral irqs (only 1 chip type used)
	 * The second one for syswake irqs (edge and level chip types)
	 */
	ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
					     handle_level_irq, 0, 0,
					     IRQ_GC_INIT_NESTED_LOCK);
	if (ret)
		goto err_generic;

	/* peripheral interrupt chip */

	gc = irq_get_domain_generic_chip(priv->domain, 0);
	gc->unused	= ~(BIT(priv->nr_perips) - 1);
	gc->reg_base	= priv->pdc_base;
	/*
	 * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
	 * they cache the mask
	 */
	gc->chip_types[0].regs.mask		= PDC_IRQ_ROUTE;
	gc->chip_types[0].chip.irq_mask		= perip_irq_mask;
	gc->chip_types[0].chip.irq_unmask	= perip_irq_unmask;
	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;

	/* syswake interrupt chip */

	gc = irq_get_domain_generic_chip(priv->domain, 8);
	gc->unused	= ~(BIT(priv->nr_syswakes) - 1);
	gc->reg_base	= priv->pdc_base;

	/* edge interrupts */
	gc->chip_types[0].type			= IRQ_TYPE_EDGE_BOTH;
	gc->chip_types[0].handler		= handle_edge_irq;
	gc->chip_types[0].regs.ack		= PDC_IRQ_CLEAR;
	gc->chip_types[0].regs.mask		= PDC_IRQ_ENABLE;
	gc->chip_types[0].chip.irq_ack		= irq_gc_ack_set_bit;
	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[0].chip.irq_set_type	= syswake_irq_set_type;
	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;
	/* for standby we pass on to the shared syswake IRQ */
	gc->chip_types[0].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* level interrupts */
	gc->chip_types[1].type			= IRQ_TYPE_LEVEL_MASK;
	gc->chip_types[1].handler		= handle_level_irq;
	gc->chip_types[1].regs.ack		= PDC_IRQ_CLEAR;
	gc->chip_types[1].regs.mask		= PDC_IRQ_ENABLE;
	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[1].chip.irq_set_type	= syswake_irq_set_type;
	gc->chip_types[1].chip.irq_set_wake	= pdc_irq_set_wake;
	/* for standby we pass on to the shared syswake IRQ */
	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* Set up the hardware to enable interrupt routing */
	pdc_intc_setup(priv);

	/* Setup chained handlers for the peripheral IRQs */
	for (i = 0; i < priv->nr_perips; ++i) {
		irq = priv->perip_irqs[i];
		irq_set_handler_data(irq, priv);
		irq_set_chained_handler(irq, pdc_intc_perip_isr);
	}

	/* Setup chained handler for the syswake IRQ */
	irq_set_handler_data(priv->syswake_irq, priv);
	irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);

	dev_info(&pdev->dev,
		 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
		 priv->nr_perips,
		 priv->nr_syswakes);

	return 0;
err_generic:
	irq_domain_remove(priv->domain);
	return ret;
}
Exemple #21
0
static int __init orion_bridge_irq_init(struct device_node *np,
					struct device_node *parent)
{
	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
	struct resource r;
	struct irq_domain *domain;
	struct irq_chip_generic *gc;
	int ret, irq, nrirqs = 32;

	/* get optional number of interrupts provided */
	of_property_read_u32(np, "marvell,#interrupts", &nrirqs);

	domain = irq_domain_add_linear(np, nrirqs,
				       &irq_generic_chip_ops, NULL);
	if (!domain) {
		pr_err("%s: unable to add irq domain\n", np->name);
		return -ENOMEM;
	}

	ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
			     handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
	if (ret) {
		pr_err("%s: unable to alloc irq domain gc\n", np->name);
		return ret;
	}

	ret = of_address_to_resource(np, 0, &r);
	if (ret) {
		pr_err("%s: unable to get resource\n", np->name);
		return ret;
	}

	if (!request_mem_region(r.start, resource_size(&r), np->name)) {
		pr_err("%s: unable to request mem region\n", np->name);
		return -ENOMEM;
	}

	/* Map the parent interrupt for the chained handler */
	irq = irq_of_parse_and_map(np, 0);
	if (irq <= 0) {
		pr_err("%s: unable to parse irq\n", np->name);
		return -EINVAL;
	}

	gc = irq_get_domain_generic_chip(domain, 0);
	gc->reg_base = ioremap(r.start, resource_size(&r));
	if (!gc->reg_base) {
		pr_err("%s: unable to map resource\n", np->name);
		return -ENOMEM;
	}

	gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
	gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
	gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
	gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;

	/* mask and clear all interrupts */
	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);

	irq_set_handler_data(irq, domain);
	irq_set_chained_handler(irq, orion_bridge_irq_handler);

	return 0;
}