static int __init init_nonfatal_mce_checker(void) { struct cpuinfo_x86 *c = &boot_cpu_data; /* Check for MCE support */ if (!opt_mce || !mce_available(c)) return -ENODEV; if (__get_cpu_var(poll_bankmask) == NULL) return -EINVAL; /* * Check for non-fatal errors every MCE_RATE s */ switch (c->x86_vendor) { case X86_VENDOR_AMD: /* Assume we are on K8 or newer AMD CPU here */ amd_nonfatal_mcheck_init(c); break; case X86_VENDOR_INTEL: init_timer(&mce_timer, mce_work_fn, NULL, 0); set_timer(&mce_timer, NOW() + MCE_PERIOD); break; } printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n"); return 0; }
/* * Just in case we missed an event during initialization check * all the CMCI owned banks. */ void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); local_irq_restore(flags); }
static int __init init_nonfatal_mce_checker(void) { struct cpuinfo_x86 *c = &boot_cpu_data; /* Check for MCE support */ if (mce_disabled || !mce_available(c)) return -ENODEV; if ( __get_cpu_var(poll_bankmask) == NULL ) return -EINVAL; /* * Check for non-fatal errors every MCE_RATE s */ switch (c->x86_vendor) { case X86_VENDOR_AMD: if (c->x86 == 6) { /* K7 */ #ifdef CONFIG_XEN_TIMER init_timer(&mce_timer, mce_work_fn, NULL, 0); set_timer(&mce_timer, NOW() + MCE_PERIOD); #endif break; } /* Assume we are on K8 or newer AMD CPU here */ amd_nonfatal_mcheck_init(c); break; case X86_VENDOR_INTEL: /* * The P5 family is different. P4/P6 and latest CPUs share the * same polling methods. */ if ( c->x86 != 5 ) { #ifdef CONFIG_XEN_TIMER init_timer(&mce_timer, mce_work_fn, NULL, 0); set_timer(&mce_timer, NOW() + MCE_PERIOD); #endif } break; } printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n"); return 0; }