/* * Level-based IRQ handler. Nice and simple. */ void do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) { struct irqaction *action; const int cpu = smp_processor_id(); desc->triggered = 1; /* * Acknowledge, clear _AND_ disable the interrupt. */ desc->chip->ack(irq); if (likely(desc->enabled)) { kstat_cpu(cpu).irqs[irq]++; /* * Return with this interrupt masked if no action */ action = desc->action; if (action) { __do_irq(irq, desc->action, regs); if (likely(desc->enabled && !check_irq_lock(desc, irq, regs))) desc->chip->unmask(irq); } } }
/* * Most edge-triggered IRQ implementations seem to take a broken * approach to this. Hence the complexity. */ void do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) { const int cpu = smp_processor_id(); desc->triggered = 1; /* * If we're currently running this IRQ, or its disabled, * we shouldn't process the IRQ. Instead, turn on the * hardware masks. */ if (unlikely(desc->running || !desc->enabled)) goto running; /* * Acknowledge and clear the IRQ, but don't mask it. */ desc->chip->ack(irq); /* * Mark the IRQ currently in progress. */ desc->running = 1; kstat_cpu(cpu).irqs[irq]++; do { struct irqaction *action; action = desc->action; if (!action) break; if (desc->pending && desc->enabled) { desc->pending = 0; desc->chip->unmask(irq); } __do_irq(irq, action, regs); } while (desc->pending); desc->running = 0; /* * If we were disabled or freed, shut down the handler. */ if (likely(desc->action && !check_irq_lock(desc, irq, regs))) return; running: /* * We got another IRQ while this one was masked or * currently running. Delay it. */ desc->pending = 1; desc->chip->mask(irq); desc->chip->ack(irq); }
/* * do_IRQ handles all normal device IRQ's */ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { struct irqdesc * desc; struct irqaction * action; int cpu; #ifdef CONFIG_ILATENCY { extern void interrupt_overhead_start(void); interrupt_overhead_start(); } #endif /* CONFIG_ILATENCY */ irq = fixup_irq(irq); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (irq >= NR_IRQS) goto bad_irq; /* this is called recursively in some cases, so... */ if (!in_irq()) preempt_lock_start(-99); desc = irq_desc + irq; TRACE_IRQ_ENTRY(irq, !(user_mode(regs))); spin_lock(&irq_controller_lock); desc->mask_ack(irq); spin_unlock(&irq_controller_lock); cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; desc->triggered = 1; /* Return with this interrupt masked if no action */ action = desc->action; if (action) { int status = 0; if (desc->nomask) { spin_lock(&irq_controller_lock); desc->unmask(irq); spin_unlock(&irq_controller_lock); } if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); #ifdef CONFIG_ILATENCY { extern void interrupt_overhead_stop(void); interrupt_overhead_stop(); } #endif /* CONFIG_ILATENCY */ do { status |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); local_irq_disable(); if (!desc->nomask && desc->enabled) { spin_lock(&irq_controller_lock); desc->unmask(irq); spin_unlock(&irq_controller_lock); } } /* * Debug measure - hopefully we can continue if an * IRQ lockup problem occurs... */ check_irq_lock(desc, irq, regs); irq_exit(cpu, irq); TRACE_IRQ_EXIT(); if (!in_irq()) preempt_lock_stop(); if (softirq_pending(cpu)) do_softirq(); #ifdef CONFIG_ILATENCY /* * until entry.S gets this call do it here. */ intr_ret_from_exception(); #endif /* CONFIG_ILATENCY */ return; bad_irq: irq_err_count += 1; printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); return; }
/* * do_IRQ handles all normal device IRQ's */ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { struct irqdesc * desc; struct irqaction * action; int cpu; irq = fixup_irq(irq); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (irq >= NR_IRQS) goto bad_irq; desc = irq_desc + irq; spin_lock(&irq_controller_lock); desc->mask_ack(irq); spin_unlock(&irq_controller_lock); cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; desc->triggered = 1; /* Return with this interrupt masked if no action */ action = desc->action; if (action) { int status = 0; if (desc->nomask) { spin_lock(&irq_controller_lock); desc->unmask(irq); spin_unlock(&irq_controller_lock); } if (!(action->flags & SA_INTERRUPT)) __sti(); do { status |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); __cli(); if (!desc->nomask && desc->enabled) { spin_lock(&irq_controller_lock); desc->unmask(irq); spin_unlock(&irq_controller_lock); } } /* * Debug measure - hopefully we can continue if an * IRQ lockup problem occurs... */ check_irq_lock(desc, irq, regs); irq_exit(cpu, irq); if (softirq_active(cpu) & softirq_mask(cpu)) do_softirq(); return; bad_irq: irq_err_count += 1; printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); return; }