static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct gpio_chip *gc = &mpc8xxx_gc->gc; unsigned int mask; mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER) & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR); if (mask) generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 32 - ffs(mask))); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); }
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned long mask; int pin; chip->irq_mask_ack(data); mask = __raw_readw(KEYDETR); for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS) generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin)); chip->irq_unmask(data); }
unsigned mtvic_get_irq(void) { static unsigned i = 0; unsigned *irqs = virq_host->host_data; if (!irqs) return NO_IRQ; for (i = (i + 1) & 31; *irqs; i = (i + 1) & 31) { if (*irqs & (1 << i)) { atomic_sub(1 << i, (atomic_t *) irqs); return irq_linear_revmap(virq_host, i); } } return NO_IRQ; }
static void oxnas_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct oxnas_gpio_bank *bank = gpiochip_get_data(gc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long stat; unsigned int pin; chained_irq_enter(chip, desc); stat = readl(bank->reg_base + IRQ_PENDING); for_each_set_bit(pin, &stat, BITS_PER_LONG) generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin)); chained_irq_exit(chip, desc); }
static void goldfish_pic_cascade(struct irq_desc *desc) { struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc); struct irq_chip *host_chip = irq_desc_get_chip(desc); u32 pending, hwirq, virq; chained_irq_enter(host_chip, desc); pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING); while (pending) { hwirq = __fls(pending); virq = irq_linear_revmap(gfpic->irq_domain, hwirq); generic_handle_irq(virq); pending &= ~(1 << hwirq); } chained_irq_exit(host_chip, desc); }
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) { struct pcie_port *pp = &ks_pcie->pp; u32 pending; int virq; pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4)); if (BIT(0) & pending) { virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset, virq); generic_handle_irq(virq); } /* EOI the INTx interrupt */ writel(offset, ks_pcie->va_app_base + IRQ_EOI); }
static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) { u32 offset, reg_offset, bit_pos; struct keystone_pcie *ks_pcie; unsigned int irq = d->irq; struct msi_desc *msi; struct pcie_port *pp; msi = irq_get_msi_desc(irq); pp = sys_to_pcie(msi->dev->bus->sysdata); ks_pcie = to_keystone_pcie(pp); offset = irq - irq_linear_revmap(pp->irq_domain, 0); update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); writel(BIT(bit_pos), ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4)); writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI); }
static int cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, u8 __iomem *maskp) { int cpld_irq; u8 status = in_8(statusp); u8 mask = in_8(maskp); /* ignore don't cares and masked irqs */ status |= (ignore | mask); if (status == 0xff) return NO_IRQ_IGNORE; cpld_irq = ffz(status) + offset; return irq_linear_revmap(cpld_pic_host, cpld_irq); }
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) { struct dw_pcie *pci = ks_pcie->pci; struct device *dev = pci->dev; u32 pending; int virq; pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); if (BIT(0) & pending) { virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); generic_handle_irq(virq); } /* EOI the INTx interrupt */ ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); }
/* Return an interrupt vector or 0 if no interrupt is pending. */ unsigned int ehv_pic_get_irq(void) { int irq; BUG_ON(global_ehv_pic == NULL); if (global_ehv_pic->coreint_flag) irq = mfspr(SPRN_EPR); /* if core int mode */ else ev_int_iack(0, &irq); /* legacy mode */ if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */ return 0; /* * this will also setup revmap[] in the slow path for the first * time, next calls will always use fast path by indexing revmap */ return irq_linear_revmap(global_ehv_pic->irqhost, irq); }
static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc) { struct pdc_intc_priv *priv; unsigned int i, irq_no; priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); /* find the peripheral number */ for (i = 0; i < priv->nr_perips; ++i) if (irq == priv->perip_irqs[i]) goto found; /* should never get here */ return; found: /* pass on the interrupt */ irq_no = irq_linear_revmap(priv->domain, i); generic_handle_irq(irq_no); }
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) { struct keystone_pcie *ks_pcie; unsigned int irq = d->irq; struct msi_desc *msi; struct pcie_port *pp; u32 offset; msi = irq_get_msi_desc(irq); pp = sys_to_pcie(msi->dev->bus->sysdata); ks_pcie = to_keystone_pcie(pp); offset = irq - irq_linear_revmap(pp->irq_domain, 0); /* Mask the end point if PVM implemented */ if (IS_ENABLED(CONFIG_PCI_MSI)) { if (msi->msi_attrib.maskbit) pci_msi_unmask_irq(d); } ks_dw_pcie_msi_set_irq(pp, offset); }
static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc) { struct pdc_intc_priv *priv; unsigned int syswake, irq_no; unsigned int status; priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); status = pdc_read(priv, PDC_IRQ_STATUS) & pdc_read(priv, PDC_IRQ_ENABLE); status &= (1 << priv->nr_syswakes) - 1; for (syswake = 0; status; status >>= 1, ++syswake) { /* Has this sys_wake triggered? */ if (!(status & 1)) continue; irq_no = irq_linear_revmap(priv->domain, syswake_to_hwirq(syswake)); generic_handle_irq(irq_no); } }
static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct megamod_cascade_data *cascade; struct megamod_pic *pic; u32 events; int n, idx; cascade = irq_desc_get_handler_data(desc); pic = cascade->pic; idx = cascade->index; while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) { n = __ffs(events); irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); soc_writel(1 << n, &pic->regs->evtclr[idx]); generic_handle_irq(irq); } }
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) { struct pcie_port *pp = &ks_pcie->pp; u32 pending, vector; int src, virq; pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4)); /* * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit * shows 1, 9, 17, 25 and so forth */ for (src = 0; src < 4; src++) { if (BIT(src) & pending) { vector = offset + (src << 3); virq = irq_linear_revmap(pp->irq_domain, vector); dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n", src, vector, virq); generic_handle_irq(virq); } } }
static void __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) { int cpu = smp_processor_id(); u32 stat; stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu); if (stat & BIT(LOCAL_IRQ_MAILBOX0)) { #ifdef CONFIG_SMP void __iomem *mailbox0 = (intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu); u32 mbox_val = readl(mailbox0); u32 ipi = ffs(mbox_val) - 1; writel(1 << ipi, mailbox0); handle_IPI(ipi, regs); #endif } else if (stat) { u32 hwirq = ffs(stat) - 1; handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); } }
unsigned int gef_pic_get_irq(void) { u32 cause, mask, active; unsigned int virq = NO_IRQ; int hwirq; cause = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_STATUS); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); active = cause & mask; if (active) { for (hwirq = GEF_PIC_NUM_IRQS - 1; hwirq > -1; hwirq--) { if (active & (0x1 << hwirq)) break; } virq = irq_linear_revmap(gef_pic_irq_host, (irq_hw_number_t)hwirq); } return virq; }
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) { struct pq2ads_pci_pic *priv = desc->handler_data; u32 stat, mask, pend; int bit; for (;;) { stat = in_be32(&priv->regs->stat); mask = in_be32(&priv->regs->mask); pend = stat & ~mask; if (!pend) break; for (bit = 0; pend != 0; ++bit, pend <<= 1) { if (pend & 0x80000000) { int virq = irq_linear_revmap(priv->host, bit); generic_handle_irq(virq); } } } }
static unsigned int pfm_get_irq(void) { int cnt = 1; unsigned char scc, esp; //printk( KERN_INFO "%s(): enter\n", __func__ ); via1_pen = AMIC_MASK(in_8(pfm_irq.via1_flag)); out_8(pfm_irq.via1_flag, via1_pen & AMIC_MASK(in_8(pfm_irq.via1_ena))); via1_pen &= VIA1_DEV; //DO_IRQ(via1_pen, 0x04, 0, 2, 2); if( via1_pen ) { // __do_IRQ(2, regs); return irq_linear_revmap(pmac_pfm_host, 2); } more: /* * I don't know where to look at for SCC irq, so we ask device * if the irq is indeed coming from them. */ out_8(serial, 3); scc = in_8(serial); /* * On ESP SCSI, we seem to lose an irq, and we get no irq any longer.. * so check it out. */ esp = in_8(scsi); if (scc & 0x3f) slt1_pen |= 0x80; if (esp & 0x80) via2_pen |= 0x08; #if 0 if (via1_pen || via2_pen || slt1_pen || slt2_pen || f108_pen) printk(KERN_ERR "%02d:via1:%02X,via2:%02X,slt1:%02X,slt2:%02X," "f108:%02X,pcr:%02X,scc:%02X\n", cnt,via1_pen,via2_pen, 0xff&(~in_8(pfm_irq.slt1_flag)), AMIC_MASK(~in_8(pfm_irq.slt2_flag)), in_8(pfm_irq.f108_flag), in_8(pfm_irq.via1_pcr), scc ); #endif if( via2_pen ) { //DO_IRQ(via2_pen, 0x10, 8, 4, 3); if(via2_pen & 0x10) { // __do_IRQ(12, regs); return irq_linear_revmap(pmac_pfm_host, 12); } if(via2_pen & 0x08) { // __do_IRQ(11, regs); return irq_linear_revmap(pmac_pfm_host, 11); } //DO_IRQ(via2_pen, 0x01, 8, 0, 0); if(via2_pen & 0x01) { // __do_IRQ(8, regs); return irq_linear_revmap(pmac_pfm_host, 8); } } //DO_IRQ(f108_pen, 0x20, 32, 5, 2); if( f108_pen ) { if(f108_pen & 0x20) { // __do_IRQ(37, regs); return irq_linear_revmap(pmac_pfm_host, 37); } if(f108_pen & 0x10) { // __do_IRQ(36, regs); return irq_linear_revmap(pmac_pfm_host, 36); } if(f108_pen & 0x08) { // __do_IRQ(35, regs); return irq_linear_revmap(pmac_pfm_host, 35); } if(f108_pen & 0x04) { // __do_IRQ(34, regs); return irq_linear_revmap(pmac_pfm_host, 34); } } //DO_IRQ(slt1_pen, 0x80, 16, 7, 7); if( slt1_pen & 0x80 ) { // __do_IRQ(23, regs); return irq_linear_revmap(pmac_pfm_host, 23); } //DO_IRQ(slt2_pen, 0x20, 24, 5, 0); if( slt2_pen ) { if( slt2_pen & 0x20 ) { // __do_IRQ(29, regs); return irq_linear_revmap(pmac_pfm_host, 29); } if( slt2_pen & 0x10 ) { // __do_IRQ(28, regs); return irq_linear_revmap(pmac_pfm_host, 28); } if( slt2_pen & 0x08 ) { // __do_IRQ(27, regs); return irq_linear_revmap(pmac_pfm_host, 27); } if( slt2_pen & 0x04 ) { // __do_IRQ(26, regs); return irq_linear_revmap(pmac_pfm_host, 26); } if( slt2_pen & 0x02 ) { // __do_IRQ(25, regs); return irq_linear_revmap(pmac_pfm_host, 25); } if( slt2_pen & 0x01 ) { // __do_IRQ(24, regs); return irq_linear_revmap(pmac_pfm_host, 24); } } if (cnt++ > 10) { //printk( KERN_INFO "%s(): handled more than enough\n", __func__ ); return NO_IRQ; } pfm_check_irq(); if (slt1_pen || slt2_pen || via2_pen) { //printk( KERN_INFO "%s(): still pending IRQs available\n", __func__ ); goto more; } //printk( KERN_INFO "%s(): done\n", __func__ ); /* TODO: For now just assume that there has been at least a single IRQ */ return NO_IRQ; }
static void armctrl_handle_bank(int bank, struct pt_regs *regs) { u32 stat, irq; while ((stat = readl_relaxed(intc.pending[bank]))) { irq = MAKE_HWIRQ(bank, ffs(stat) - 1); handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); } } static void armctrl_handle_shortcut(int bank, struct pt_regs *regs, u32 stat) { u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]); handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); } static void __exception_irq_entry bcm2835_handle_irq( struct pt_regs *regs) { u32 stat, irq; while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) { if (stat & BANK0_HWIRQ_MASK) { irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1); handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); } else if (stat & SHORTCUT1_MASK) { armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK); } else if (stat & SHORTCUT2_MASK) { armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK);
int xilinx_intc_get_irq(void) { void * regs = master_irqhost->host_data; pr_debug("get_irq:\n"); return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR)); }