static int raise_local(void) { struct mce *m = &__get_cpu_var(injectm); int context = MCJ_CTX(m->inject_flags); int ret = 0; int cpu = m->extcpu; if (m->inject_flags & MCJ_EXCEPTION) { printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); switch (context) { case MCJ_CTX_IRQ: case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; default: printk(KERN_INFO "Invalid MCE context\n"); ret = -EINVAL; } printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); } else if (m->status) { printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); raise_poll(m); mce_notify_irq(); printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu); } else m->finished = 0; return ret; }
/* Inject mce on current CPU */ static void raise_mce(unsigned long data) { struct delayed_mce *dm = (struct delayed_mce *)data; struct mce *m = &dm->m; int cpu = m->extcpu; inject_mce(m); if (m->status & MCI_STATUS_UC) { struct pt_regs regs; memset(®s, 0, sizeof(struct pt_regs)); regs.ip = m->ip; regs.cs = m->cs; printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); do_machine_check(®s, 0); printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); } else { mce_banks_t b; memset(&b, 0xff, sizeof(mce_banks_t)); printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); machine_check_poll(0, &b); mce_notify_irq(); printk(KERN_INFO "Finished machine check poll on CPU %d\n", cpu); } kfree(dm); }
/* Inject mce on current CPU */ static int raise_local(void) { struct mce *m = &__get_cpu_var(injectm); int context = MCJ_CTX(m->inject_flags); int ret = 0; int cpu = m->extcpu; if (m->inject_flags & MCJ_EXCEPTION) { printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); switch (context) { case MCJ_CTX_IRQ: /* * Could do more to fake interrupts like * calling irq_enter, but the necessary * machinery isn't exported currently. */ /*FALL THROUGH*/ case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; default: printk(KERN_INFO "Invalid MCE context\n"); ret = -EINVAL; } printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); } else if (m->status) { printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); raise_poll(m); mce_notify_irq(); printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu); } else m->finished = 0; return ret; }
/* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */ static void intel_threshold_interrupt(void) { if (cmci_storm_detect()) return; machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); mce_notify_irq(); }
void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) { struct mce m; /* Only corrected MC is reported */ if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA)) return; mce_setup(&m); m.bank = 1; /* Fake a memory read corrected error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; m.addr = mem_err->physical_addr; mce_log(&m); mce_notify_irq(); }
unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { mce_notify_irq(); return CMCI_STORM_INTERVAL; } switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the timer * interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); /* FALLTHROUGH */ case CMCI_STORM_SUBSIDED: /* * We wait for all CPUs to go back to SUBSIDED state. When that * happens we switch back to interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_reenable(); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* We have shiny weather. Let the poll do whatever it thinks. */ return interval; } }
/* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */ static void intel_threshold_interrupt(void) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); mce_notify_irq(); }