void handle_irq(int irq) { /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the * INT_ACK cycles, it's too late to try to pretend to the * controller that we aren't taking the interrupt). * * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ static unsigned int illegal_count=0; if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { irq_err_count++; illegal_count++; printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", irq); return; } irq_enter(); /* * __do_IRQ() must be called with IPL_MAX. Note that we do not * explicitly enable interrupts afterwards - some MILO PALcode * (namely LX164 one) seems to have severe problems with RTI * at IPL 0. */ local_irq_disable(); __do_IRQ(irq); irq_exit(); }
static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs) { while ((irq = cpm2_get_irq(regs)) >= 0) __do_IRQ(irq, regs); return IRQ_HANDLED; }
static irqreturn_t cpm2_cascade(int irq, void *dev_id) { while ((irq = cpm2_get_irq()) >= 0) __do_IRQ(irq); return IRQ_HANDLED; }
asmlinkage void do_IRQ(int irq, struct pt_regs *regs) { struct pt_regs *oldregs = set_irq_regs(regs); irq_enter(); __do_IRQ(irq); irq_exit(); set_irq_regs(oldregs); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) { irq_enter(); __DO_IRQ_SMTC_HOOK(); __do_IRQ(irq, regs); irq_exit(); return 1; }
static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs) { unsigned long flags; spin_lock_irqsave(&cpm2_lock, flags); while ((irq = cpm2_get_irq(regs)) >= 0) __do_IRQ(irq, regs); spin_unlock_irqrestore(&cpm2_lock, flags); return IRQ_HANDLED; }
asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) { ltt_ev_irq_entry(irq, !user_mode(regs)); irq_enter(); __do_IRQ((irq), (regs)); ltt_ev_irq_exit(); irq_exit(); return 1; }
asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) { ltt_ev_irq_entry(irq, !user_mode(regs)); interrupt_overhead_start(); irq_enter(); __do_IRQ(irq, regs); ltt_ev_irq_exit(); irq_exit(); latency_check(); return 1; }
/* * do_IRQ handles all normal device IRQs (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) { struct pt_regs *old_regs; old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* FIXME M32R */ #endif __do_IRQ(irq); irq_exit(); set_irq_regs(old_regs); return 1; }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) { /* * Local CPU interrupt is disabled */ irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /*FIXME: trimedia*/ #endif trimedia_irq_disable(irq); __do_IRQ(irq, regs); trimedia_irq_enable(irq); irq_exit(); return 1; }
static void m82xx_pci_irq_demux(unsigned int irq, struct irq_desc *desc) { unsigned long stat, mask, pend; int bit; for (;;) { stat = *pci_regs.pci_int_stat_reg; mask = *pci_regs.pci_int_mask_reg; pend = stat & ~mask & 0xf0000000; if (!pend) break; for (bit = 0; pend != 0; ++bit, pend <<= 1) { if (pend & 0x80000000) __do_IRQ(pci_int_base + bit); } } }
/* ONLY called from entry.S:intr_extint() */ void do_cpu_irq_mask(struct pt_regs *regs) { struct pt_regs *old_regs; unsigned long eirr_val; int irq, cpu = smp_processor_id(); #ifdef CONFIG_SMP cpumask_t dest; #endif old_regs = set_irq_regs(regs); local_irq_disable(); irq_enter(); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); if (!eirr_val) goto set_out; irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); gsc_writel(irq + CPU_IRQ_BASE, per_cpu(cpu_data, cpu).hpa); goto set_out; } #endif __do_IRQ(irq); out: irq_exit(); set_irq_regs(old_regs); return; set_out: set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); goto out; }
/* Common interrupt demultiplexer used by Asp, Lasi & Wax. */ irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev) { unsigned long irr; struct gsc_asic *gsc_asic = dev; irr = gsc_readl(gsc_asic->hpa + OFFSET_IRR); if (irr == 0) return IRQ_NONE; DEBPRINTK("%s intr, mask=0x%x\n", gsc_asic->name, irr); do { int local_irq = __ffs(irr); unsigned int irq = gsc_asic->global_irq[local_irq]; __do_IRQ(irq); irr &= ~(1 << local_irq); } while (irr); return IRQ_HANDLED; }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs) { irq_enter(); #ifdef CONFIG_REALTEK_SCHED_LOG if (sched_log_flag & 0x1) log_intr_enter(irq); #endif __do_IRQ(irq, regs); #ifdef CONFIG_REALTEK_SCHED_LOG if (sched_log_flag & 0x1) log_intr_exit(irq); #endif irq_exit(); return 1; }
static irqreturn_t sbc82xx_i8259_demux(int irq, void *dev_id, struct pt_regs *regs) { spin_lock(&sbc82xx_i8259_lock); sbc82xx_i8259_map[0] = 0x0c; /* OCW3: Read IR register on RD# pulse */ irq = sbc82xx_i8259_map[0] & 7; /* Read IRR */ if (irq == 7) { /* Possible spurious interrupt */ int isr; sbc82xx_i8259_map[0] = 0x0b; /* OCW3: Read IS register on RD# pulse */ isr = sbc82xx_i8259_map[0]; /* Read ISR */ if (!(isr & 0x80)) { printk(KERN_INFO "Spurious i8259 interrupt\n"); return IRQ_HANDLED; } } __do_IRQ(NR_SIU_INTS + irq, regs); return IRQ_HANDLED; }
asmlinkage void do_IRQ(int irq) { irq_enter(); __do_IRQ(irq); irq_exit(); }