notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; int cpu = smp_processor_id(); clear_softint(1 << irq); pcr_ops->write(PCR_PIC_PRIV); local_cpu_data().__nmi_count++; if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; sum = kstat_irqs_cpu(0, cpu); if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } if (!touched && __get_cpu_var(last_irq_sum) == sum) { local_inc(&__get_cpu_var(alert_counter)); if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } if (nmi_usable) { write_pic(picl_value(nmi_hz)); pcr_ops->write(pcr_enable); } }
static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) { unsigned char reason = get_nmi_reason(); char buf[64]; sprintf(buf, "NMI received for unknown reason %02x\n", reason); die_nmi(buf, regs, 1); /* Always panic here */ return 0; }
void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) { int sum; int touched = 0; sum = read_pda(apic_timer_irqs); if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } #ifdef CONFIG_X86_MCE /* Could check oops_in_progress here too, but it's safer not too */ if (atomic_read(&mce_entry) > 0) touched = 1; #endif if (!touched && __get_cpu_var(last_irq_sum) == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ local_inc(&__get_cpu_var(alert_counter)); if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { local_set(&__get_cpu_var(alert_counter), 0); return; } die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs); } } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } if (nmi_perfctr_msr) { if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { /* * For Intel based architectural perfmon * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ apic_write(APIC_LVTPC, APIC_DM_NMI); } wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); } }
static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) { unsigned char reason = get_nmi_reason(); char buf[64]; if (!(reason & 0xc0)) { sprintf(buf, "NMI received for unknown reason %02x\n", reason); die_nmi(buf,regs); } return 0; }
void nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ int sum, cpu = smp_processor_id(); sum = per_cpu(irq_stat, cpu).apic_timer_irqs; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) /* * die_nmi will return ONLY if NOTIFY_STOP happens.. */ die_nmi(regs, "NMI Watchdog detected LOCKUP"); last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) { if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { /* Only P6 based Pentium M need to re-unmask * the apic vector but it doesn't hurt * other P6 variant */ apic_write(APIC_LVTPC, APIC_DM_NMI); } write_watchdog_counter(NULL); } }
void notrace nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ unsigned int sum; int cpu = smp_processor_id(); sum = per_cpu(irq_stat, cpu).apic_timer_irqs; profile_tick(CPU_PROFILING, regs); if (nmi_show_regs[cpu]) { nmi_show_regs[cpu] = 0; spin_lock(&nmi_print_lock); printk("NMI show regs on CPU#%d:\n", cpu); show_regs(regs); spin_unlock(&nmi_print_lock); } if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] && !(alert_counter[cpu] % (5*nmi_hz))) { int i; bust_spinlocks(1); spin_lock(&nmi_print_lock); printk("NMI watchdog detected lockup on CPU#%d (%d/%d)\n", cpu, alert_counter[cpu], 5*nmi_hz); show_regs(regs); spin_unlock(&nmi_print_lock); for_each_online_cpu(i) if (i != cpu) nmi_show_regs[i] = 1; for_each_online_cpu(i) while (nmi_show_regs[i] == 1) barrier(); die_nmi(regs, "NMI Watchdog detected LOCKUP"); }
int nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ int sum, cpu = smp_processor_id(), rc = 0; sum = irq_stat[cpu].apic_timer_irqs; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 30*nmi_hz) die_nmi(regs, "NMI Watchdog detected LOCKUP"); } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } /* see if the nmi watchdog went off */ if (!__get_cpu_var(wd_enabled)) return rc; switch (nmi_watchdog) { case NMI_LOCAL_APIC: rc |= lapic_wd_event(nmi_hz); break; case NMI_IO_APIC: /* * don't know how to accurately check for this. * just assume it was a watchdog timer interrupt * This matches the old behaviour. */ rc = 1; break; } return rc; }
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) { int sum, cpu; cpu = safe_smp_processor_id(); sum = read_pda(apic_timer_irqs); if (nmi_show_regs[cpu]) { nmi_show_regs[cpu] = 0; spin_lock(&nmi_print_lock); show_regs(regs); spin_unlock(&nmi_print_lock); } if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { int i; for (i = 0; i < NR_CPUS; i++) nmi_show_regs[i] = 1; } if (alert_counter[cpu] == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { alert_counter[cpu] = 0; return; } die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); }
notrace __kprobes int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ unsigned int sum; int touched = 0; int cpu = smp_processor_id(); int rc = 0; /* check for other users first */ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { rc = 1; touched = 1; } sum = get_timer_irqs(cpu); if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } /* We can be called before check_nmi_watchdog, hence NULL check. */ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */ raw_spin_lock(&lock); printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); show_regs(regs); dump_stack(); raw_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); rc = 1; } /* Could check oops_in_progress here too, but it's safer not to */ if (mce_in_progress()) touched = 1; /* if the none of the timers isn't firing, this cpu isn't doing much */ if (!touched && __get_cpu_var(last_irq_sum) == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ __this_cpu_inc(alert_counter); if (__this_cpu_read(alert_counter) == 5 * nmi_hz) /* * die_nmi will return ONLY if NOTIFY_STOP happens.. */ die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; __this_cpu_write(alert_counter, 0); } /* see if the nmi watchdog went off */ if (!__get_cpu_var(wd_enabled)) return rc; switch (nmi_watchdog) { case NMI_LOCAL_APIC: rc |= lapic_wd_event(nmi_hz); break; case NMI_IO_APIC: /* * don't know how to accurately check for this. * just assume it was a watchdog timer interrupt * This matches the old behaviour. */ rc = 1; break; } return rc; }