static int crash_nmi_callback(struct notifier_block *self, unsigned long val, void *data) { struct pt_regs *regs; struct pt_regs fixed_regs; int cpu; if (val != DIE_NMI_IPI) return NOTIFY_OK; regs = ((struct die_args *)data)->regs; cpu = raw_smp_processor_id(); /* Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return NOTIFY_STOP; local_irq_disable(); if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } crash_save_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ halt(); for (;;) cpu_relax(); return 1; }
static int lapic_suspend(struct sys_device *dev, pm_message_t state) { unsigned long flags; int maxlvt; if (!apic_pm_state.active) return 0; maxlvt = lapic_get_maxlvt(); apic_pm_state.apic_id = read_apic_id(); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); if (maxlvt >= 4) apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); #ifdef CONFIG_X86_MCE_INTEL if (maxlvt >= 5) apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); #endif local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); return 0; }
static void kdump_nmi_callback(int cpu, struct die_args *args) { struct pt_regs *regs; #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; #endif regs = args->regs; #ifdef CONFIG_X86_32 if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } #endif //crash_save_cpu(regs, cpu); /* Disable VMX or SVM if needed. * * We need to disable virtualization on all CPUs. * Having VMX or SVM enabled on any CPU may break rebooting * after the kdump kernel has finished its task. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); disable_local_APIC(); }
static void kdump_nmi_shootdown_cpus(void) { in_crash_kexec = 1; nmi_shootdown_cpus(kdump_nmi_callback); disable_local_APIC(); }
static void apic_pm_suspend(void *data) { unsigned int l, h; unsigned long flags; if (apic_pm_state.perfctr_pmdev) pm_send(apic_pm_state.perfctr_pmdev, PM_SUSPEND, data); apic_pm_state.apic_id = apic_read(APIC_ID); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); __save_flags(flags); __cli(); disable_local_APIC(); rdmsr(MSR_IA32_APICBASE, l, h); l &= ~MSR_IA32_APICBASE_ENABLE; wrmsr(MSR_IA32_APICBASE, l, h); __restore_flags(flags); }
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) { #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; #endif #ifdef CONFIG_X86_32 if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } #endif crash_save_cpu(regs, cpu); /* * VMCLEAR VMCSs loaded on all cpus if needed. */ cpu_crash_vmclear_loaded_vmcss(); /* Disable VMX or SVM if needed. * * We need to disable virtualization on all CPUs. * Having VMX or SVM enabled on any CPU may break rebooting * after the kdump kernel has finished its task. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); /* * Disable Intel PT to stop its logging */ cpu_emergency_stop_pt(); disable_local_APIC(); }
static void native_smp_send_stop(void) { unsigned long flags; unsigned long wait; if (reboot_force) return; /* * Use an own vector here because smp_call_function * does lots of things not suitable in a panic situation. * On most systems we could also use an NMI here, * but there are a few systems around where NMI * is problematic so stay with an non NMI for now * (this implies we cannot stop CPUs spinning with irq off * currently) */ if (num_online_cpus() > 1) { apic->send_IPI_allbutself(REBOOT_VECTOR); /* Don't wait longer than a second */ wait = USEC_PER_SEC; while (num_online_cpus() > 1 && wait--) udelay(1); } local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); }
static int crash_nmi_callback(struct pt_regs *regs, int cpu) { struct pt_regs fixed_regs; /* Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return 1; local_irq_disable(); if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } crash_save_this_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ halt(); for (;;) cpu_relax(); return 1; }
static int lapic_suspend(struct sys_device *dev, u32 state) { unsigned long flags; if (!apic_pm_state.active) return 0; apic_pm_state.apic_id = apic_read(APIC_ID); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); local_save_flags(flags); local_irq_disable(); disable_local_APIC(); local_irq_restore(flags); return 0; }
int lapic_suspend(void) { unsigned long flags; int maxlvt = get_maxlvt(); if (!apic_pm_state.active) return 0; apic_pm_state.apic_id = apic_read(APIC_ID); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); apic_pm_state.apic_spiv = apic_read(APIC_SPIV); apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); if (maxlvt >= 4) apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); if (maxlvt >= 6) { apic_pm_state.apic_lvtcmci = apic_read(APIC_CMCI); } apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); apic_pm_state.apic_tmict = apic_read(APIC_TMICT); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); if (maxlvt >= 5) apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); local_irq_save(flags); disable_local_APIC(); iommu_disable_x2apic_IR(); local_irq_restore(flags); return 0; }
void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 1, 0); local_irq_disable(); disable_local_APIC(); local_irq_enable(); }
/* * If Linux enabled the LAPIC against the BIOS default * disable it down before re-entering the BIOS on shutdown. * Otherwise the BIOS may get confused and not power-off. */ void lapic_shutdown(void) { if (!cpu_has_apic || !enabled_via_apicbase) return; local_irq_disable(); disable_local_APIC(); local_irq_enable(); }
void smp_send_stop(void) { smp_call_function(stop_this_cpu, NULL, 1, 0); smp_num_cpus = 1; __cli(); disable_local_APIC(); __sti(); }
void smp_stop_cpu(void) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); disable_local_APIC(); local_irq_enable(); }
static void stop_this_cpu (void * dummy) { /* * Remove this CPU: */ clear_bit(smp_processor_id(), &cpu_online_map); __cli(); disable_local_APIC(); for(;;) __asm__("hlt"); for (;;); }
static void stop_this_cpu (void * dummy) { local_irq_disable(); /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); disable_local_APIC(); if (cpu_data[smp_processor_id()].hlt_works_ok) for(;;) halt(); for (;;); }
void stop_this_cpu(void *dummy) { local_irq_disable(); /* * Remove this CPU: */ set_cpu_online(smp_processor_id(), false); disable_local_APIC(); for (;;) halt(); }
static void native_smp_send_stop(void) { /* Don't deadlock on the call lock in panic */ int nolock = !spin_trylock(&call_lock); unsigned long flags; local_irq_save(flags); __smp_call_function(stop_this_cpu, NULL, 0, 0); if (!nolock) spin_unlock(&call_lock); disable_local_APIC(); local_irq_restore(flags); }
void lapic_shutdown(void) { unsigned long flags; if (!cpu_has_apic) return; local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); }
/* * If Linux enabled the LAPIC against the BIOS default * disable it down before re-entering the BIOS on shutdown. * Otherwise the BIOS may get confused and not power-off. * Additionally clear all LVT entries before disable_local_APIC * for the case where Linux didn't enable the LAPIC. */ void lapic_shutdown(void) { unsigned long flags; if (!cpu_has_apic) return; local_irq_save(flags); clear_local_APIC(); if (enabled_via_apicbase) disable_local_APIC(); local_irq_restore(flags); }
void __stop_this_cpu(void) { ASSERT(!local_irq_is_enabled()); disable_local_APIC(); hvm_cpu_down(); /* * Clear FPU, zapping any pending exceptions. Needed for warm reset with * some BIOSes. */ clts(); asm volatile ( "fninit" ); cpumask_clear_cpu(smp_processor_id(), &cpu_online_map); }
void smp_send_stop(void) { int nolock = 0; /* Don't deadlock on the call lock in panic */ if (!spin_trylock(&call_lock)) { udelay(100); /* ignore locking because we have paniced anyways */ nolock = 1; } __smp_call_function(smp_really_stop_cpu, NULL, 1, 0); if (!nolock) spin_unlock(&call_lock); local_irq_disable(); #ifndef CONFIG_XEN disable_local_APIC(); #endif local_irq_enable(); }
void smp_send_stop(void) { int nolock = 0; if (reboot_force) return; /* Don't deadlock on the call lock in panic */ if (!spin_trylock(&call_lock)) { /* ignore locking because we have paniced anyways */ nolock = 1; } __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); if (!nolock) spin_unlock(&call_lock); local_irq_disable(); disable_local_APIC(); local_irq_enable(); }
void lapic_shutdown(void) { unsigned long flags; if (!cpu_has_apic && !apic_from_smp_config()) return; local_irq_save(flags); #ifdef CONFIG_X86_32 if (!enabled_via_apicbase) clear_local_APIC(); else #endif disable_local_APIC(); local_irq_restore(flags); }
void machine_restart(char * __unused) { int i; printk("machine restart\n"); #ifdef CONFIG_SMP smp_halt(); #endif if (!reboot_force) { local_irq_disable(); #ifndef CONFIG_SMP disable_local_APIC(); #endif disable_IO_APIC(); local_irq_enable(); } /* Tell the BIOS if we want cold or warm reboot */ *((unsigned short *)__va(0x472)) = reboot_mode; for (;;) { /* Could also try the reset bit in the Hammer NB */ switch (reboot_type) { case BOOT_KBD: for (i=0; i<100; i++) { kb_wait(); udelay(50); outb(0xfe,0x64); /* pulse reset low */ udelay(50); } case BOOT_TRIPLE: __asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); __asm__ __volatile__("int3"); reboot_type = BOOT_KBD; break; } } }
static int crash_nmi_callback(struct pt_regs *regs, int cpu) { /* * Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return 1; local_irq_disable(); crash_save_this_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ for(;;) asm("hlt"); return 1; }
static void native_nmi_stop_other_cpus(int wait) { unsigned long flags; unsigned long timeout; if (reboot_force) return; /* * Use an own vector here because smp_call_function * does lots of things not suitable in a panic situation. */ if (num_online_cpus() > 1) { /* did someone beat us here? */ if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) return; if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, NMI_FLAG_FIRST, "smp_stop")) /* Note: we ignore failures here */ return; /* sync above data before sending NMI */ wmb(); apic->send_IPI_allbutself(NMI_VECTOR); /* * Don't wait longer than a second if the caller * didn't ask us to wait. */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); }
static void nmi_shootdown_cpus(void) { unsigned long msecs; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); set_nmi_callback(crash_nmi_callback); /* * Ensure the new callback function is set before sending * out the NMI */ wmb(); smp_send_nmi_allbutself(); msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } /* Leave the nmi callback set */ disable_local_APIC(); }
static void nmi_shootdown_cpus(void) { unsigned long msecs; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); /* Would it be better to replace the trap vector here? */ if (register_die_notifier(&crash_nmi_nb)) return; /* return what? */ /* Ensure the new callback function is set before sending * out the NMI */ wmb(); smp_send_nmi_allbutself(); msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } /* Leave the nmi callback set */ disable_local_APIC(); }
static void native_stop_other_cpus(int wait) { unsigned long flags; unsigned long timeout; if (reboot_force) return; /* * Use an own vector here because smp_call_function * does lots of things not suitable in a panic situation. */ /* * We start by using the REBOOT_VECTOR irq. * The irq is treated as a sync point to allow critical * regions of code on other cpus to release their spin locks * and re-enable irqs. Jumping straight to an NMI might * accidentally cause deadlocks with further shutdown/panic * code. By syncing, we give the cpus up to one second to * finish their work before we force them off with the NMI. */ if (num_online_cpus() > 1) { /* did someone beat us here? */ if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) return; /* sync above data before sending IRQ */ wmb(); apic->send_IPI_allbutself(REBOOT_VECTOR); /* * Don't wait longer than a second if the caller * didn't ask us to wait. */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } /* if the REBOOT_VECTOR didn't work, try with the NMI */ if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, NMI_FLAG_FIRST, "smp_stop")) /* Note: we ignore failures here */ /* Hope the REBOOT_IRQ is good enough */ goto finish; /* sync above data before sending IRQ */ wmb(); pr_emerg("Shutting down cpus with NMI\n"); apic->send_IPI_allbutself(NMI_VECTOR); /* * Don't wait longer than a 10 ms if the caller * didn't ask us to wait. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } finish: local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); }