/* Inject mce on current CPU */ static void raise_mce(unsigned long data) { struct delayed_mce *dm = (struct delayed_mce *)data; struct mce *m = &dm->m; int cpu = m->extcpu; inject_mce(m); if (m->status & MCI_STATUS_UC) { struct pt_regs regs; memset(®s, 0, sizeof(struct pt_regs)); regs.ip = m->ip; regs.cs = m->cs; printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); do_machine_check(®s, 0); printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); } else { mce_banks_t b; memset(&b, 0xff, sizeof(mce_banks_t)); printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); machine_check_poll(0, &b); mce_notify_irq(); printk(KERN_INFO "Finished machine check poll on CPU %d\n", cpu); } kfree(dm); }
static void raise_mce(struct mce *m) { int context = MCJ_CTX(m->inject_flags); inject_mce(m); if (context == MCJ_CTX_RANDOM) return; #ifdef CONFIG_X86_LOCAL_APIC if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) { unsigned long start; int cpu; get_online_cpus(); cpumask_copy(mce_inject_cpumask, cpu_online_mask); cpumask_clear_cpu(get_cpu(), mce_inject_cpumask); for_each_online_cpu(cpu) { struct mce *mcpu = &per_cpu(injectm, cpu); if (!mcpu->finished || MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM) cpumask_clear_cpu(cpu, mce_inject_cpumask); } if (!cpumask_empty(mce_inject_cpumask)) { if (m->inject_flags & MCJ_IRQ_BRAODCAST) { /* * don't wait because mce_irq_ipi is necessary * to be sync with following raise_local */ preempt_disable(); smp_call_function_many(mce_inject_cpumask, mce_irq_ipi, NULL, 0); preempt_enable(); } else if (m->inject_flags & MCJ_NMI_BROADCAST) apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR); } start = jiffies; while (!cpumask_empty(mce_inject_cpumask)) { if (!time_before(jiffies, start + 2*HZ)) { printk(KERN_ERR "Timeout waiting for mce inject %lx\n", *cpumask_bits(mce_inject_cpumask)); break; } cpu_relax(); } raise_local(); put_cpu(); put_online_cpus(); } else
static void raise_mce(struct mce *m) { int context = MCJ_CTX(m->inject_flags); inject_mce(m); if (context == MCJ_CTX_RANDOM) return; #ifdef CONFIG_X86_LOCAL_APIC if (m->inject_flags & MCJ_NMI_BROADCAST) { unsigned long start; int cpu; get_online_cpus(); mce_inject_cpumask = cpu_online_map; cpu_clear(get_cpu(), mce_inject_cpumask); for_each_online_cpu(cpu) { struct mce *mcpu = &per_cpu(injectm, cpu); if (!mcpu->finished || MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM) cpu_clear(cpu, mce_inject_cpumask); } if (!cpus_empty(mce_inject_cpumask)) apic->send_IPI_mask(&mce_inject_cpumask, NMI_VECTOR); start = jiffies; while (!cpus_empty(mce_inject_cpumask)) { if (!time_before(jiffies, start + 2*HZ)) { printk(KERN_ERR "Timeout waiting for mce inject NMI %lx\n", *cpus_addr(mce_inject_cpumask)); break; } cpu_relax(); } raise_local(); put_cpu(); put_online_cpus(); } else
static int restore_registers() { int ret; static int (*next_fnc)() = NULL; /* same type signature as ioctl */ DPRINTF("Trying to put registers\n"); ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_REGS, &g_kvm_regs); if (ret < 0) { perror("ioctl(KVM_SET_REGS)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_XSAVE, &g_kvm_xsave); if (ret < 0) { perror("ioctl(KVM_SET_XSAVE)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_XCRS, &g_kvm_xcrs); if (ret < 0) { perror("ioctl(KVM_SET_XCRS)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_SREGS, &g_kvm_sregs); if (ret < 0) { perror("ioctl(KVM_SET_SREGS)"); return ret; } /* must be before kvm_put_msrs */ ret = inject_mce(); if (ret < 0) { perror("ioctl(KVM_SET_MCE)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_MSRS, &g_kvm_msrs); if (ret < 0) { perror("ioctl(KVM_SET_MSRS)"); return ret; } // DPRINTF("Not setting MP State to: %s\n", // get_mp_state_string(g_kvm_mp_state)); #if 0 ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_MP_STATE, &g_kvm_mp_state); if (ret < 0) { perror("ioctl(KVM_SET_MP_STATE)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_LAPIC, &g_kvm_lapic); if (ret < 0) { perror("ioctl(KVM_SET_LAPIC)"); return ret; } ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_VCPU_EVENTS, &g_kvm_vcpu_events); if (ret < 0) { perror("ioctl(KVM_SET_VCPU_EVENTS)"); return ret; } #endif /* if 0 */ ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_DEBUGREGS, &g_kvm_dbgregs); if (ret < 0) { perror("ioctl(KVM_SET_DEBUGREGS)"); return ret; } /* must be last */ /* ret = kvm_guest_debug_workarounds(); if (ret < 0) { return ret; } */ DPRINTF("Put registers done\n"); return 0; }