/* * proc handler for /proc/sys/kernel/nmi */ int proc_nmi_enabled(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int old_state; nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; old_state = nmi_watchdog_enabled; proc_dointvec(table, write, buffer, length, ppos); if (!!old_state == !!nmi_watchdog_enabled) return 0; if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { printk(KERN_WARNING "NMI watchdog is permanently disabled\n"); return -EIO; } if (nmi_watchdog == NMI_LOCAL_APIC) { if (nmi_watchdog_enabled) enable_lapic_nmi_watchdog(); else disable_lapic_nmi_watchdog(); } else if (nmi_watchdog == NMI_IO_APIC) { if (nmi_watchdog_enabled) enable_ioapic_nmi_watchdog(); else disable_ioapic_nmi_watchdog(); } else { printk(KERN_WARNING "NMI watchdog doesn't know what hardware to touch\n"); return -EIO; } return 0; }
int __init check_nmi_watchdog(void) { unsigned int *prev_nmi_count; int cpu; if (!nmi_watchdog_active() || !atomic_read(&nmi_active)) return 0; prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); if (!prev_nmi_count) goto error; printk(KERN_INFO "Testing NMI watchdog ... "); #ifdef CONFIG_SMP if (nmi_watchdog == NMI_LOCAL_APIC) smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); #endif for_each_possible_cpu(cpu) prev_nmi_count[cpu] = get_nmi_count(cpu); local_irq_enable(); mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ for_each_online_cpu(cpu) { if (!per_cpu(wd_enabled, cpu)) continue; if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) report_broken_nmi(cpu, prev_nmi_count); } endflag = 1; if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); atomic_set(&nmi_active, -1); goto error; } printk("OK.\n"); /* * now that we know it works we can reduce NMI frequency to * something more reasonable; makes a difference in some configs */ if (nmi_watchdog == NMI_LOCAL_APIC) nmi_hz = lapic_adjust_nmi_hz(1); kfree(prev_nmi_count); return 0; error: if (nmi_watchdog == NMI_IO_APIC) { if (!timer_through_8259) legacy_pic->chip->mask(0); on_each_cpu(__acpi_nmi_disable, NULL, 1); } #ifdef CONFIG_X86_32 timer_ack = 0; #endif return -1; }
void stop_apic_nmi_watchdog(void *unused) { /* only support LOCAL and IO APICs for now */ if (!nmi_watchdog_active()) return; if (__get_cpu_var(wd_enabled) == 0) return; if (nmi_watchdog == NMI_LOCAL_APIC) lapic_watchdog_stop(); else __acpi_nmi_disable(NULL); __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); }
void stop_apic_nmi_watchdog(void *unused) { struct hrtimer *hrtimer = &__get_cpu_var(nmi_watchdog_hrtimer); /* only support LOCAL and IO APICs for now */ if (!nmi_watchdog_active()) return; if (__get_cpu_var(wd_enabled) == 0) return; /* disable the hrtimer */ hrtimer_cancel(hrtimer); if (nmi_watchdog == NMI_LOCAL_APIC) lapic_watchdog_stop(); else __acpi_nmi_disable(NULL); __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); }