/* * The guts of the apic timer interrupt */ static void local_apic_timer_interrupt(void) { int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(lapic_events, cpu); /* * Normally we should not be here till LAPIC has been initialized but * in some cases like kdump, its possible that there is a pending LAPIC * timer interrupt from previous kernel's context and is delivered in * new kernel the moment interrupts are enabled. * * Interrupts are enabled early and LAPIC is setup much later, hence * its possible that when we get here evt->event_handler is NULL. * Check for event_handler being NULL and discard the interrupt as * spurious. */ if (!evt->event_handler) { printk(KERN_WARNING "Spurious LAPIC timer interrupt on cpu %d\n", cpu); /* Switch it off */ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); return; } /* * the NMI deadlock-detector uses this. */ add_pda(apic_timer_irqs, 1); evt->event_handler(evt); }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(lapic_events, cpu); /* * the NMI deadlock-detector uses this. */ add_pda(apic_timer_irqs, 1); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ exit_idle(); irq_enter(); evt->event_handler(evt); irq_exit(); set_irq_regs(old_regs); }
/* * Reschedule call back. Nothing to do, * all the work is done automatically when * we return from the interrupt. */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_resched_count++; #else add_pda(irq_resched_count, 1); #endif return IRQ_HANDLED; }
asmlinkage void do_nmi(struct pt_regs * regs, long error_code) { int cpu = safe_smp_processor_id(); nmi_enter(); add_pda(__nmi_count,1); if (!nmi_callback(regs, cpu)) default_do_nmi(regs); nmi_exit(); }
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { irq_enter(); generic_smp_call_function_single_interrupt(); #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_call_count++; #else add_pda(irq_call_count, 1); #endif irq_exit(); return IRQ_HANDLED; }
asmlinkage void smp_thermal_interrupt(void) { __u64 msr_val; ack_APIC_irq(); exit_idle(); irq_enter(); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); add_pda(irq_thermal_count, 1); irq_exit(); }
dotraplinkage notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code) { nmi_enter(); #ifdef CONFIG_X86_32 { int cpu; cpu = smp_processor_id(); ++nmi_count(cpu); } #else add_pda(__nmi_count, 1); #endif if (!ignore_nmis) default_do_nmi(regs); nmi_exit(); }
/* * This interrupt should _never_ happen with our APIC/SMP architecture */ asmlinkage void smp_spurious_interrupt(void) { unsigned int v; exit_idle(); irq_enter(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); add_pda(irq_spurious_count, 1); irq_exit(); }
void smp_send_timer_broadcast_ipi(void) { int cpu = smp_processor_id(); cpumask_t mask; cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask); if (cpu_isset(cpu, mask)) { cpu_clear(cpu, mask); add_pda(apic_timer_irqs, 1); smp_local_timer_interrupt(); } if (!cpus_empty(mask)) { send_IPI_mask(mask, LOCAL_TIMER_VECTOR); } }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { /* * the NMI deadlock-detector uses this. */ add_pda(apic_timer_irqs, 1); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(); smp_local_timer_interrupt(regs); irq_exit(); }
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) { int cpu; int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &per_cpu(flush_state, sender); if (!cpu_isset(cpu, f->flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); cpu_clear(cpu, f->flush_cpumask); add_pda(irq_tlb_count, 1); }