static void ath79_misc_irq_handler(struct irq_desc *desc) { void __iomem *base = ath79_reset_base; u32 pending; pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) & __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); if (!pending) { spurious_interrupt(); return; } while (pending) { int bit = __ffs(pending); generic_handle_irq(ATH79_MISC_IRQ(bit)); pending &= ~BIT(bit); } }
void via1_irq(unsigned int irq, struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; events = via1[vIFR] & via1[vIER] & 0x7F; if (!events) return; irq_num = VIA1_SOURCE_BASE; irq_bit = 1; do { if (events & irq_bit) { via1[vIFR] = irq_bit; generic_handle_irq(irq_num); } ++irq_num; irq_bit <<= 1; } while (events >= irq_bit); }
/* Common interrupt demultiplexer used by Asp, Lasi & Wax. */ irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev) { unsigned long irr; struct gsc_asic *gsc_asic = dev; irr = gsc_readl(gsc_asic->hpa + OFFSET_IRR); if (irr == 0) return IRQ_NONE; DEBPRINTK("%s intr, mask=0x%x\n", gsc_asic->name, irr); do { int local_irq = __ffs(irr); unsigned int irq = gsc_asic->global_irq[local_irq]; generic_handle_irq(irq); irr &= ~(1 << local_irq); } while (irr); return IRQ_HANDLED; }
static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc) { struct pdc_intc_priv *priv; unsigned int i, irq_no; priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); /* find the peripheral number */ for (i = 0; i < priv->nr_perips; ++i) if (irq == priv->perip_irqs[i]) goto found; /* should never get here */ return; found: /* pass on the interrupt */ irq_no = irq_linear_revmap(priv->domain, i); generic_handle_irq(irq_no); }
static void via2_irq(struct irq_desc *desc) { int irq_num; unsigned char irq_bit, events; events = via2[gIFR] & via2[gIER] & 0x7F; if (!events) return; irq_num = VIA2_SOURCE_BASE; irq_bit = 1; do { if (events & irq_bit) { via2[gIFR] = irq_bit | rbv_clear; generic_handle_irq(irq_num); } ++irq_num; irq_bit <<= 1; } while (events >= irq_bit); }
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); unsigned int first_irq; chained_irq_enter(host_chip, desc); nmk_chip = irq_get_handler_data(irq); first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); while (status) { int bit = __ffs(status); generic_handle_irq(first_irq + bit); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); }
/* exynos_irq_demux_eint * * This function demuxes the IRQ from from EINTs 16 to 31. * It is designed to be inlined into the specific handler * s5p_irq_demux_eintX_Y. * * Each EINT pend/mask registers handle eight of them. */ static inline u32 exynos_irq_demux_eint(unsigned int start) { unsigned int irq; u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); u32 action = 0; status &= ~mask; status &= 0xff; while (status) { irq = fls(status) - 1; generic_handle_irq(irq + start); status &= ~(1 << irq); ++action; } return action; }
/* * handle_IRQ handles all hardware IRQ's. Decoded IRQs should * not come via this function. Instead, they should provide their * own 'handler'. Used by platform code implementing C-based 1st * level decoding. */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(irq >= nr_irqs)) { pr_warn_ratelimited("Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { generic_handle_irq(irq); } irq_exit(); set_irq_regs(old_regs); }
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); unsigned int first_irq; chained_irq_enter(host_chip, desc); nmk_chip = irq_get_handler_data(irq); first_irq = nmk_chip->domain->revmap_data.legacy.first_irq; while (status) { int bit = __ffs(status); generic_handle_irq(first_irq + bit); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); }
static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc) { struct glamo_core *glamo = get_irq_desc_chip_data(desc); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); if (unlikely(desc->status & IRQ_INPROGRESS)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); desc->chip->mask(irq); desc->chip->ack(irq); return; } kstat_incr_irqs_this_cpu(irq, desc); desc->chip->ack(irq); desc->status |= IRQ_INPROGRESS; do { uint16_t irqstatus; int i; if (unlikely((desc->status & (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_MASKED))) { /* dealing with pending IRQ, unmasking */ desc->chip->unmask(irq); desc->status &= ~IRQ_MASKED; } desc->status &= ~IRQ_PENDING; /* read IRQ status register */ irqstatus = __reg_read(glamo, GLAMO_REG_IRQ_STATUS); for (i = 0; i < 9; ++i) { if (irqstatus & BIT(i)) generic_handle_irq(glamo->irq_base + i); } } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; }
static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) { int i, j; unsigned long status; /* read IMIPR registers and find any active interrupts, * then call ISR for each active interrupt */ for (i = 0; i < ARRAY_SIZE(read_imipr); i++) { status = (read_imipr[i])(); if (!status) continue; do { j = find_first_bit(&status, 32); (write_imipr[i])(1 << j); /* write back to clear bit */ generic_handle_irq(IRQ_IOP13XX_MSI_0 + j + (32*i)); status = (read_imipr[i])(); } while (status); } }
static void s3c2440_uart_demux(unsigned int irq, struct irq_desc *desc) { unsigned long subsrcpnd; int offset; int i; struct irq_data *data = irq_desc_get_irq_data(desc); s3c2440_irq_mask(data); s3c2440_irq_ack(data); subsrcpnd = ioread32(REG_INT(SUBSRCPND)); offset = irq - IRQ_UART0; offset *= 3; for (i = 0; i < 3; i++) { if (subsrcpnd & (1 << (offset + i))) generic_handle_irq(IRQ_RXD0 + offset + i); } s3c2440_irq_unmask(data); }
static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) { u32 pending = ar5312_rst_reg_read(AR5312_ISR) & ar5312_rst_reg_read(AR5312_IMR); unsigned nr, misc_irq = 0; if (pending) { struct irq_domain *domain = irq_get_handler_data(irq); nr = __ffs(pending); misc_irq = irq_find_mapping(domain, nr); } if (misc_irq) { generic_handle_irq(misc_irq); if (nr == AR5312_MISC_IRQ_TIMER) ar5312_rst_reg_read(AR5312_TIMER); } else { spurious_interrupt(); } }
void do_IRQ(struct pt_regs *regs) { unsigned int irq; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); irq = get_irq(regs); next_irq: BUG_ON(irq == -1U); generic_handle_irq(irq); irq = get_irq(regs); if (irq != -1U) { pr_debug("next irq: %d\n", irq); ++concurrent_irq; goto next_irq; } irq_exit(); set_irq_regs(old_regs); }
/* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their * own 'handler' */ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { generic_handle_irq(irq); } irq_exit(); set_irq_regs(old_regs); }
static void msm_gpio_sleep_int(unsigned long arg) { int i, j; struct tramp_gpio_smem *smem_gpio; BUILD_BUG_ON(NR_GPIO_IRQS > NUM_GPIO_SMEM_BANKS * 32); smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); if (smem_gpio == NULL) return; local_irq_disable(); for(i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) { int count = smem_gpio->num_fired[i]; for(j = 0; j < count; j++) { /* TODO: Check mask */ generic_handle_irq(MSM_GPIO_TO_INT(smem_gpio->fired[i][j])); } } local_irq_enable(); }
static void sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc) { unsigned int mask; mask = GEDR & 0xfffff800; do { GEDR = mask; irq = IRQ_GPIO11; mask >>= 11; do { if (mask & 1) generic_handle_irq(irq); mask >>= 1; irq++; } while (mask); mask = GEDR & 0xfffff800; } while (mask); }
/* IRQ handler - redirect interrupts to virtual irq chip */ static irqreturn_t sunxi_gpio_irq_handler(int irq, void *devid) { __u32 status = 0; int i = 0; struct sunxi_gpio_chip *sgpio = devid; status = readl(sgpio->gaddr + PIO_INT_STAT_OFFSET); for (i = 0; i < EINT_NUM; i++) { if ((status & (1 << i)) && (gpio_eint_list[i].gpio >= 0)) { status &= ~(1 << i); SUNXI_CLEAR_EINT(sgpio->gaddr, i); generic_handle_irq(sgpio->irq_base + i); } } if (status) return IRQ_NONE; return IRQ_HANDLED; }
/** * amdgpu_irq_dispatch - dispatch irq to IP blocks * * @adev: amdgpu device pointer * @entry: interrupt vector * * Dispatches the irq to the different IP blocks */ void amdgpu_irq_dispatch(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { unsigned client_id = entry->client_id; unsigned src_id = entry->src_id; struct amdgpu_irq_src *src; int r; trace_amdgpu_iv(entry); if (client_id >= AMDGPU_IH_CLIENTID_MAX) { DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); return; } if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); return; } if (adev->irq.virq[src_id]) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); } else { if (!adev->irq.client[client_id].sources) { DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", client_id, src_id); return; } src = adev->irq.client[client_id].sources[src_id]; if (!src) { DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); return; } r = src->funcs->process(adev, src, entry); if (r) DRM_ERROR("error processing interrupt (%d)\n", r); } }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_rax; unsigned irq; irq_show_regs_callback(smp_processor_id(), regs); exit_idle(); irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; #ifdef CONFIG_EVENT_TRACE if (irq == trace_user_trigger_irq) user_trace_start(); #endif trace_special(regs->rip, irq, 0); #ifdef CONFIG_DEBUG_STACKOVERFLOW stack_overflow_check(regs); #endif if (likely(irq < NR_IRQS)) generic_handle_irq(irq); else { if (!disable_apic) ack_APIC_irq(); if (printk_ratelimit()) printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); } irq_exit(); set_irq_regs(old_regs); return 1; }
/* * TODO: Should this just be done at ASM level? */ static void pci_handler(unsigned int irq, struct irq_desc *desc) { u32 pci_interrupt; unsigned int irqno; pci_interrupt = *IXP23XX_PCI_XSCALE_INT_STATUS; desc->irq_data.chip->irq_ack(&desc->irq_data); /* See which PCI_INTA, or PCI_INTB interrupted */ if (pci_interrupt & (1 << 26)) { irqno = IRQ_IXP23XX_INTB; } else if (pci_interrupt & (1 << 27)) { irqno = IRQ_IXP23XX_INTA; } else { BUG(); } generic_handle_irq(irqno); desc->irq_data.chip->irq_unmask(&desc->irq_data); }
void handle_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); perf_mon_interrupt_in(); irq_enter(); if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { generic_handle_irq(irq); } irq_finish(irq); irq_exit(); set_irq_regs(old_regs); perf_mon_interrupt_out(); }
static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { int i, j, mask; unsigned val; for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i]; val = __raw_readl(msm_chip->regs.int_status); val &= msm_chip->int_enable[0]; while (val) { mask = val & -val; j = fls(mask) - 1; /* printk("%s %08x %08x bit %d gpio %d irq %d\n", __func__, v, m, j, msm_chip->chip.start + j, FIRST_GPIO_IRQ + msm_chip->chip.start + j); */ val &= ~mask; generic_handle_irq(FIRST_GPIO_IRQ + msm_chip->chip.base + j); } } desc->chip->ack(irq); }
/* Although we have two interrupt lines for the timers, we only have one * status register which clears all pending timer interrupts on reading. So * we have to handle all timer interrupts in one place. */ static void h7202_timerx_demux_handler(unsigned int irq_unused, struct irq_desc *desc) { unsigned int mask, irq; mask = CPU_REG (TIMER_VIRT, TIMER_TOPSTAT); if ( mask & TSTAT_T0INT ) { timer_tick(); if( mask == TSTAT_T0INT ) return; } mask >>= 1; irq = IRQ_TIMER1; while (mask) { if (mask & 1) generic_handle_irq(irq); irq++; mask >>= 1; } }
static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct megamod_cascade_data *cascade; struct megamod_pic *pic; u32 events; int n, idx; cascade = irq_desc_get_handler_data(desc); pic = cascade->pic; idx = cascade->index; while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) { n = __ffs(events); irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); soc_writel(1 << n, &pic->regs->evtclr[idx]); generic_handle_irq(irq); } }
static inline void s3c2443_irq_demux(unsigned int irq, unsigned int len) { unsigned int subsrc, submsk; unsigned int end; subsrc = __raw_readl(S3C2410_SUBSRCPND); submsk = __raw_readl(S3C2410_INTSUBMSK); subsrc &= ~submsk; subsrc >>= (irq - S3C2410_IRQSUB(0)); subsrc &= (1 << len)-1; end = len + irq; for (; irq < end && subsrc; irq++) { if (subsrc & 1) generic_handle_irq(irq); subsrc >>= 1; } }
/** * @brief: system module irq handler * * @author: caolianming * @param [in] irq: irq number * @param [in] *desc: irq info description */ static void ak39_sysctrl_handler(unsigned int irq, struct irq_desc *desc) { unsigned long regval_mask, regval_sta; unsigned long intpnd; unsigned int offset; regval_mask = __raw_readl(AK_SYSCTRL_INT_MASK); regval_sta = __raw_readl(AK_SYSCTRL_INT_STATUS); intpnd = (regval_mask & 0x7FF) & (regval_sta & 0x7FF); for (offset = 0; intpnd && offset < 11; offset++) { if (intpnd & (1 << offset)) intpnd &= ~(1 << offset); else continue; irq = AK39_SYSCTRL_IRQ(offset); //come back debug generic_handle_irq(irq); } }
static void gpio_irq_handler(struct irq_desc *desc) { unsigned int irq = irq_desc_get_irq(desc); struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; struct davinci_gpio_controller *d; d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); g = (struct davinci_gpio_regs __iomem *)d->regs; /* we only care about one bank */ if (irq & 1) mask <<= 16; /* temporarily mask (level sensitive) parent IRQ */ chained_irq_enter(irq_desc_get_chip(desc), desc); while (1) { u32 status; int bit; /* ack any irqs */ status = readl_relaxed(&g->intstat) & mask; if (!status) break; writel_relaxed(status, &g->intstat); /* now demux them to the right lowlevel handler */ while (status) { bit = __ffs(status); status &= ~BIT(bit); generic_handle_irq( irq_find_mapping(d->irq_domain, d->chip.base + bit)); } } chained_irq_exit(irq_desc_get_chip(desc), desc); /* now it may re-trigger */ }
static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; do { /* clear useless edge notification */ if (desc->irq_data.chip->irq_ack) { struct irq_data *d; d = irq_get_irq_data(BALLOON3_AUX_NIRQ); desc->irq_data.chip->irq_ack(d); } while (pending) { irq = BALLOON3_IRQ(0) + __ffs(pending); generic_handle_irq(irq); pending &= pending - 1; } pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; } while (pending); }
/*Sapphire has only one INT Bank.*/ static void sapphire_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { int j; unsigned v; int int_base = SAPPHIRE_INT_START; v = readb(SAPPHIRE_CPLD_INT_STATUS); /*INT1 status reg, BANK0*/ /*printk(KERN_INFO "sapphire_gpio_irq_handler irq %d, 0x%x, 0x%x\r\n", irq, SAPPHIRE_CPLD_INT_STATUS, v); printk("sapphire_gpio_irq_handler:irq=%d, l=0x%x\r\n", irq, readb(SAPPHIRE_CPLD_INT_LEVEL));*/ for(j = 0; j < 8 ; j ++) /*8 bit per bank*/ { if(v & (1U << j)) /*got the INT Bit*/ { DBG("generic_handle_irq j=0x%x\r\n", j); generic_handle_irq(int_base + j); } } desc->chip->ack(irq); /*clear CPLD INT in SOC side.*/ DBG("irq=%d, l=0x%x\r\n", irq, readb(SAPPHIRE_CPLD_INT_LEVEL)); }