static void crash_shutdown_secondary(void *passed_regs) { struct pt_regs *regs = passed_regs; int cpu = smp_processor_id(); /* * If we are passed registers, use those. Otherwise get the * regs from the last interrupt, which should be correct, as * we are in an interrupt. But if the regs are not there, * pull them from the top of the stack. They are probably * wrong, but we need something to keep from crashing again. */ if (!regs) regs = get_irq_regs(); if (!regs) regs = task_pt_regs(current); if (!cpu_online(cpu)) return; /* We won't be sent IPIs any more. */ set_cpu_online(cpu, false); local_irq_disable(); if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); cpumask_set_cpu(cpu, &cpus_in_crash); while (!atomic_read(&kexec_ready_to_reboot)) cpu_relax(); kexec_reboot(); /* NOTREACHED */ }
static int crash_nmi_callback(struct notifier_block *self, unsigned long val, void *data) { struct pt_regs *regs; struct pt_regs fixed_regs; int cpu; if (val != DIE_NMI_IPI) return NOTIFY_OK; regs = ((struct die_args *)data)->regs; cpu = raw_smp_processor_id(); /* Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return NOTIFY_STOP; local_irq_disable(); if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } crash_save_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ halt(); for (;;) cpu_relax(); return 1; }
void default_machine_crash_shutdown(struct pt_regs *regs) { unsigned int i; int (*old_handler)(struct pt_regs *regs); /* * This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means stopping other cpus in * an SMP system. * The kernel is broken so disable interrupts. */ hard_irq_disable(); /* * Make a note of crashing cpu. Will be used in machine_kexec * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); #endif machine_kexec_mask_interrupts(); /* * Call registered shutdown routines savely. Swap out * __debugger_fault_handler, and replace on exit. */ old_handler = __debugger_fault_handler; __debugger_fault_handler = handle_fault; crash_shutdown_cpu = smp_processor_id(); for (i = 0; crash_shutdown_handles[i]; i++) { if (setjmp(crash_shutdown_buf) == 0) { /* * Insert syncs and delay to ensure * instructions in the dangerous region don't * leak away from this protected region. */ asm volatile("sync; isync"); /* dangerous region */ crash_shutdown_handles[i](); asm volatile("sync; isync"); } } crash_shutdown_cpu = -1; __debugger_fault_handler = old_handler; crash_kexec_stop_spus(); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); }
void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); kdump_nmi_shootdown_cpus(); /* Booting kdump kernel with VMX or SVM enabled won't work, * because (among other limitations) we can't disable paging * with the virt flags. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); #endif #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }
void machine_crash_shutdown(struct pt_regs *regs) { local_irq_disable(); crash_save_cpu(regs, smp_processor_id()); printk(KERN_INFO "Loading crashdump kernel...\n"); }
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) { #ifdef CONFIG_X86_32 struct pt_regs fixed_regs; if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } #endif crash_save_cpu(regs, cpu); /* * VMCLEAR VMCSs loaded on all cpus if needed. */ cpu_crash_vmclear_loaded_vmcss(); /* Disable VMX or SVM if needed. * * We need to disable virtualization on all CPUs. * Having VMX or SVM enabled on any CPU may break rebooting * after the kdump kernel has finished its task. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); disable_local_APIC(); }
void crash_ipi_callback(struct pt_regs *regs) { static cpumask_t cpus_state_saved = CPU_MASK_NONE; int cpu = smp_processor_id(); hard_irq_disable(); if (!cpumask_test_cpu(cpu, &cpus_state_saved)) { crash_save_cpu(regs, cpu); cpumask_set_cpu(cpu, &cpus_state_saved); } atomic_inc(&cpus_in_crash); smp_mb__after_atomic(); /* * Starting the kdump boot. * This barrier is needed to make sure that all CPUs are stopped. */ while (!time_to_dump) cpu_relax(); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 1); #ifdef CONFIG_PPC64 kexec_smp_wait(); #else for (;;); /* FIXME */ #endif /* NOTREACHED */ }
void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); /* Make a note of crashing cpu. Will be used in NMI callback.*/ crashing_cpu = safe_smp_processor_id(); nmi_shootdown_cpus(); lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); #endif #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }
void default_machine_crash_shutdown(struct pt_regs *regs) { local_irq_disable(); crashing_cpu = smp_processor_id(); crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(); cpumask_set_cpu(crashing_cpu, &cpus_in_crash); }
void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); kdump_nmi_shootdown_cpus(); /* * VMCLEAR VMCSs loaded on this cpu if needed. */ cpu_crash_vmclear_loaded_vmcss(); /* Booting kdump kernel with VMX or SVM enabled won't work, * because (among other limitations) we can't disable paging * with the virt flags. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(1); #endif if (mcp55_rewrite) { u32 cfg; printk(KERN_CRIT "REWRITING MCP55 CFG REG\n"); /* * We have a mcp55 chip on board which has been * flagged as only sending legacy interrupts * to the BSP, and we are crashing on an AP * This is obviously bad, and we need to * fix it up. To do this we write to the * flagged device, to the register at offset 0x74 * and we make sure that bit 2 and bit 15 are clear * This forces legacy interrupts to be broadcast * to all cpus */ pci_read_config_dword(mcp55_rewrite, 0x74, &cfg); cfg &= ~((1 << 2) | (1 << 15)); printk(KERN_CRIT "CFG = %x\n", cfg); pci_write_config_dword(mcp55_rewrite, 0x74, cfg); } #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }
void machine_crash_shutdown(struct pt_regs *regs) { local_irq_disable(); crash_save_cpu(regs, 0); #ifdef CONFIG_CACHE_L2X0 l2x0_suspend(); #endif smsm_notify_apps_crashdump(); }
void machine_crash_shutdown(struct pt_regs *regs) { /* The kernel is broken so disable interrupts */ local_irq_disable(); crash_save_cpu(regs, 0); #ifdef CONFIG_CACHE_L2X0 l2x0_suspend(); #endif smsm_notify_apps_crashdump(); }
void machine_crash_nonpanic_core(void *unused) { struct pt_regs regs; crash_setup_regs(®s, NULL); printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", smp_processor_id()); crash_save_cpu(®s, smp_processor_id()); flush_cache_all(); atomic_dec(&waiting_for_crash_ipi); while (1) cpu_relax(); }
void machine_crash_nonpanic_core(void *info) { struct pt_regs regs; crash_setup_regs(®s, NULL); printk(KERN_EMERG "CPU %u will stop doing anything useful since another CPU has crashed\n", smp_processor_id()); crash_save_cpu(®s, smp_processor_id()); atomic_notifier_call_chain(&crash_percpu_notifier_list, 0, NULL); flush_cache_all(); atomic_dec(&waiting_for_crash_ipi); while (1) cpu_relax(); }
void crash_ipi_callback(struct pt_regs *regs) { int cpu = smp_processor_id(); if (!cpu_online(cpu)) return; hard_irq_disable(); if (!cpu_isset(cpu, cpus_in_crash)) crash_save_cpu(regs, cpu); cpu_set(cpu, cpus_in_crash); /* * Entered via soft-reset - could be the kdump * process is invoked using soft-reset or user activated * it if some CPU did not respond to an IPI. * For soft-reset, the secondary CPU can enter this func * twice. 1 - using IPI, and 2. soft-reset. * Tell the kexec CPU that entered via soft-reset and ready * to go down. */ if (cpu_isset(cpu, cpus_in_sr)) { cpu_clear(cpu, cpus_in_sr); atomic_inc(&enter_on_soft_reset); } /* * Starting the kdump boot. * This barrier is needed to make sure that all CPUs are stopped. * If not, soft-reset will be invoked to bring other CPUs. */ while (!cpu_isset(crashing_cpu, cpus_in_crash)) cpu_relax(); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 1); #ifdef CONFIG_PPC64 kexec_smp_wait(); #else for (;;); /* FIXME */ #endif /* NOTREACHED */ }
void machine_crash_shutdown(struct pt_regs *regs) { unsigned long msecs; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); local_irq_enable(); smp_call_function(machine_crash_nonpanic_core, NULL, false); msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } local_irq_disable(); crash_save_cpu(regs, smp_processor_id()); printk(KERN_INFO "Loading crashdump kernel...\n"); }
void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); kdump_nmi_shootdown_cpus(); /* * VMCLEAR VMCSs loaded on this cpu if needed. */ cpu_crash_vmclear_loaded_vmcss(); /* Booting kdump kernel with VMX or SVM enabled won't work, * because (among other limitations) we can't disable paging * with the virt flags. */ cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); /* * Disable Intel PT to stop its logging */ cpu_emergency_stop_pt(); #ifdef CONFIG_X86_IO_APIC /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ ioapic_zap_locks(); disable_IO_APIC(); #endif lapic_shutdown(); #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif crash_save_cpu(regs, safe_smp_processor_id()); }
void machine_crash_shutdown(struct pt_regs *regs) { unsigned long msecs; local_irq_disable(); atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); smp_call_function(machine_crash_nonpanic_core, NULL, false); msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } if (atomic_read(&waiting_for_crash_ipi) > 0) pr_warn("Non-crashing CPUs did not react to IPI\n"); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); pr_info("Loading crashdump kernel...\n"); }
void default_machine_crash_shutdown(struct pt_regs *regs) { unsigned int irq; /* * This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means stopping other cpus in * an SMP system. * The kernel is broken so disable interrupts. */ hard_irq_disable(); for_each_irq(irq) { struct irq_desc *desc = irq_desc + irq; if (desc->status & IRQ_INPROGRESS) desc->chip->eoi(irq); if (!(desc->status & IRQ_DISABLED)) desc->chip->disable(irq); } /* * Make a note of crashing cpu. Will be used in machine_kexec * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); crash_kexec_stop_spus(); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); }
void default_machine_crash_shutdown(struct pt_regs *regs) { unsigned int i; int (*old_handler)(struct pt_regs *regs); /* * This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means stopping other cpus in * an SMP system. * The kernel is broken so disable interrupts. */ hard_irq_disable(); /* * Make a note of crashing cpu. Will be used in machine_kexec * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); /* * If we came in via system reset, wait a while for the secondary * CPUs to enter. */ if (TRAP(regs) == 0x100) mdelay(PRIMARY_TIMEOUT); crash_kexec_prepare_cpus(crashing_cpu); crash_save_cpu(regs, crashing_cpu); time_to_dump = 1; crash_kexec_wait_realmode(crashing_cpu); machine_kexec_mask_interrupts(); /* * Call registered shutdown routines safely. Swap out * __debugger_fault_handler, and replace on exit. */ old_handler = __debugger_fault_handler; __debugger_fault_handler = handle_fault; crash_shutdown_cpu = smp_processor_id(); for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) { if (setjmp(crash_shutdown_buf) == 0) { /* * Insert syncs and delay to ensure * instructions in the dangerous region don't * leak away from this protected region. */ asm volatile("sync; isync"); /* dangerous region */ crash_shutdown_handles[i](); asm volatile("sync; isync"); } } crash_shutdown_cpu = -1; __debugger_fault_handler = old_handler; if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); }