static int aic5_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); unsigned long flags; unsigned smr; int ret; if (!bgc) return -EINVAL; ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, out_hwirq, out_type); if (ret) return ret; irq_gc_lock_irqsave(bgc, flags); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); irq_gc_unlock_irqrestore(bgc, flags); return ret; }
static int aic5_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; unsigned smr; int ret; if (!dgc) return -EINVAL; ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, out_hwirq, out_type); if (ret) return ret; gc = dgc->gc[0]; irq_gc_lock(gc); irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR); smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR); irq_gc_unlock(gc); return ret; }
static void aic_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); irq_gc_lock(gc); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); irq_gc_unlock(gc); }
static void aic_pm_shutdown(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); irq_gc_lock(gc); irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); irq_gc_unlock(gc); }
static int aic5_retrigger(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); /* Enable interrupt on AIC5 */ irq_gc_lock(bgc); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); irq_gc_unlock(bgc); return 0; }
static void aic5_mask(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; /* Disable interrupt on AIC5 */ irq_gc_lock(gc); irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; irq_gc_unlock(gc); }
static int aic5_retrigger(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; /* Enable interrupt on AIC5 */ irq_gc_lock(gc); irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR); irq_gc_unlock(gc); return 0; }
/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */ static void s3c_irq_timer_ack(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); u32 mask = (1 << 5) << (d->irq - gc->irq_base); irq_reg_writel(mask | gc->mask_cache, gc->reg_base); }
static int aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; unsigned smr; int idx; int ret; if (!dgc) return -EINVAL; ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, out_hwirq, out_type); if (ret) return ret; idx = intspec[0] / dgc->irqs_per_chip; if (idx >= dgc->num_chips) return -EINVAL; gc = dgc->gc[idx]; irq_gc_lock(gc); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); irq_gc_unlock(gc); return ret; }
static void aic5_pm_shutdown(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; irq_gc_lock(bgc); for (i = 0; i < dgc->irqs_per_chip; i++) { irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); } irq_gc_unlock(bgc); }
static void aic5_mask(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* * Disable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ irq_gc_lock(bgc); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; irq_gc_unlock(bgc); }
static int aic5_set_type(struct irq_data *d, unsigned type) { struct irq_domain *domain = d->domain; struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); unsigned int smr; int ret; irq_gc_lock(bgc); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) irq_reg_writel(bgc, smr, AT91_AIC5_SMR); irq_gc_unlock(bgc); return ret; }
static void __init aic5_hw_init(struct irq_domain *domain) { struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); int i; /* * Perform 8 End Of Interrupt Command to make sure AIC * will not Lock out nIRQ */ for (i = 0; i < 8; i++) irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); /* * Spurious Interrupt ID in Spurious Vector Register. * When there is no current interrupt, the IRQ Vector Register * reads the value stored in AIC_SPU */ irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU); /* No debugging in AIC: Debug (Protect) Control Register */ irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR); /* Disable and clear all interrupts initially */ for (i = 0; i < domain->revmap_size; i++) { irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR); irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR); irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR); } }
static int aic5_set_type(struct irq_data *d, unsigned type) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; unsigned int smr; int ret; irq_gc_lock(gc); irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR); irq_gc_unlock(gc); return ret; }
static int aic_retrigger(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* Enable interrupt on AIC5 */ irq_gc_lock(gc); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_gc_unlock(gc); return 0; }
static void aic5_resume(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; u32 mask; irq_gc_lock(bgc); for (i = 0; i < dgc->irqs_per_chip; i++) { mask = 1 << i; if ((mask & gc->mask_cache) == (mask & gc->wake_active)) continue; irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); if (mask & gc->mask_cache) irq_reg_writel(bgc, 1, AT91_AIC5_IECR); else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } irq_gc_unlock(bgc); }
static int aic_set_type(struct irq_data *d, unsigned type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); unsigned int smr; int ret; smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq)); ret = aic_common_set_type(d, type, &smr); if (ret) return ret; irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq)); return 0; }
static asmlinkage void __exception_irq_entry aic5_handle(struct pt_regs *regs) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0); u32 irqnr; u32 irqstat; irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR); irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR); if (!irqstat) irq_reg_writel(bgc, 0, AT91_AIC5_EOICR); else handle_domain_irq(aic5_domain, irqnr, regs); }
static asmlinkage void __exception_irq_entry aic_handle(struct pt_regs *regs) { struct irq_domain_chip_generic *dgc = aic_domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; u32 irqnr; u32 irqstat; irqnr = irq_reg_readl(gc, AT91_AIC_IVR); irqstat = irq_reg_readl(gc, AT91_AIC_ISR); if (!irqstat) irq_reg_writel(gc, 0, AT91_AIC_EOICR); else handle_domain_irq(aic_domain, irqnr, regs); }
static asmlinkage void __exception_irq_entry aic5_handle(struct pt_regs *regs) { struct irq_domain_chip_generic *dgc = aic5_domain->gc; struct irq_chip_generic *gc = dgc->gc[0]; u32 irqnr; u32 irqstat; irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR); irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR); irqnr = irq_find_mapping(aic5_domain, irqnr); if (!irqstat) irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); else handle_IRQ(irqnr, regs); }
static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, u32 val) { irq_reg_writel(val, gc->reg_base + reg); }