Пример #1
0
static void crash_save_self(struct pt_regs *regs)
{
	int cpu;

	cpu = smp_processor_id();
	crash_save_this_cpu(regs, cpu);
}
Пример #2
0
static int crash_nmi_callback(struct pt_regs *regs, int cpu)
{
	struct pt_regs fixed_regs;

	/* Don't do anything if this handler is invoked on crashing cpu.
	 * Otherwise, system will completely hang. Crashing cpu can get
	 * an NMI if system was initially booted with nmi_watchdog parameter.
	 */
	if (cpu == crashing_cpu)
		return 1;
	local_irq_disable();

	if (!user_mode_vm(regs)) {
		crash_fixup_ss_esp(&fixed_regs, regs);
		regs = &fixed_regs;
	}
	crash_save_this_cpu(regs, cpu);
	disable_local_APIC();
	atomic_dec(&waiting_for_crash_ipi);
	/* Assume hlt works */
	halt();
	for (;;)
		cpu_relax();

	return 1;
}
Пример #3
0
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
	struct kimage *image = arg;
	relocate_new_kernel_t rnk;
	void *pal_addr = efi_get_pal_addr();
	unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
	unsigned long vector;
	int ii;
	u64 fp, gp;
	ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;

	BUG_ON(!image);
	if (image->type == KEXEC_TYPE_CRASH) {
		crash_save_this_cpu();
		current->thread.ksp = (__u64)info->sw - 16;

		/* Register noop init handler */
		fp = ia64_tpa(init_handler->fp);
		gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
		ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
	} else {
		/* Unregister init handlers of current kernel */
		ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
	}

	/* Unregister mca handler - No more recovery on current kernel */
	ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);

	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

	/* Mask CMC and Performance Monitor interrupts */
	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

	/* Mask ITV and Local Redirect Registers */
	ia64_set_itv(1 << 16);
	ia64_set_lrr0(1 << 16);
	ia64_set_lrr1(1 << 16);

	/* terminate possible nested in-service interrupts */
	for (ii = 0; ii < 16; ii++)
		ia64_eoi();

	/* unmask TPR and clear any pending interrupts */
	ia64_setreg(_IA64_REG_CR_TPR, 0);
	ia64_srlz_d();
	vector = ia64_get_ivr();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	platform_kernel_launch_event();
	rnk = (relocate_new_kernel_t)&code_addr;
	(*rnk)(image->head, image->start, ia64_boot_param,
		     GRANULEROUNDDOWN((unsigned long) pal_addr));
	BUG();
}
Пример #4
0
void crash_ipi_callback(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

	local_irq_disable();
	if (!cpu_isset(cpu, cpus_in_crash))
		crash_save_this_cpu(regs, cpu);
	cpu_set(cpu, cpus_in_crash);

	/*
	 * Entered via soft-reset - could be the kdump
	 * process is invoked using soft-reset or user activated
	 * it if some CPU did not respond to an IPI.
	 * For soft-reset, the secondary CPU can enter this func
	 * twice. 1 - using IPI, and 2. soft-reset.
	 * Tell the kexec CPU that entered via soft-reset and ready
	 * to go down.
	 */
	if (cpu_isset(cpu, cpus_in_sr)) {
		cpu_clear(cpu, cpus_in_sr);
		atomic_inc(&enter_on_soft_reset);
	}

	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 * If not, soft-reset will be invoked to bring other CPUs.
	 */
	while (!cpu_isset(crashing_cpu, cpus_in_crash))
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);

#ifdef CONFIG_PPC64
	kexec_smp_wait();
#else
	for (;;);	/* FIXME */
#endif

	/* NOTREACHED */
}
Пример #5
0
static int crash_nmi_callback(struct pt_regs *regs, int cpu)
{
	/*
	 * Don't do anything if this handler is invoked on crashing cpu.
	 * Otherwise, system will completely hang. Crashing cpu can get
	 * an NMI if system was initially booted with nmi_watchdog parameter.
	 */
	if (cpu == crashing_cpu)
		return 1;
	local_irq_disable();

	crash_save_this_cpu(regs, cpu);
	disable_local_APIC();
	atomic_dec(&waiting_for_crash_ipi);
	/* Assume hlt works */
	for(;;)
		asm("hlt");

	return 1;
}
Пример #6
0
/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
    struct kimage *image = arg;
    relocate_new_kernel_t rnk;
    void *pal_addr = efi_get_pal_addr();
    unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
    int ii;

    BUG_ON(!image);
    if (image->type == KEXEC_TYPE_CRASH) {
        crash_save_this_cpu();
        current->thread.ksp = (__u64)info->sw - 16;
    }

    /* Interrupts aren't acceptable while we reboot */
    local_irq_disable();

    /* Mask CMC and Performance Monitor interrupts */
    ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
    ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

    /* Mask ITV and Local Redirect Registers */
    ia64_set_itv(1 << 16);
    ia64_set_lrr0(1 << 16);
    ia64_set_lrr1(1 << 16);

    /* terminate possible nested in-service interrupts */
    for (ii = 0; ii < 16; ii++)
        ia64_eoi();

    /* unmask TPR and clear any pending interrupts */
    ia64_setreg(_IA64_REG_CR_TPR, 0);
    ia64_srlz_d();
    while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
        ia64_eoi();
    platform_kernel_launch_event();
    rnk = (relocate_new_kernel_t)&code_addr;
    (*rnk)(image->head, image->start, ia64_boot_param,
             GRANULEROUNDDOWN((unsigned long) pal_addr));
    BUG();
}
Пример #7
0
void default_machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned int irq;

	/*
	 * This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
	local_irq_disable();

	for_each_irq(irq) {
		struct irq_desc *desc = irq_desc + irq;

		if (desc->status & IRQ_INPROGRESS)
			desc->chip->eoi(irq);

		if (!(desc->status & IRQ_DISABLED))
			desc->chip->disable(irq);
	}

	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();
	crash_save_this_cpu(regs, crashing_cpu);
	crash_kexec_prepare_cpus(crashing_cpu);
	cpu_set(crashing_cpu, cpus_in_crash);
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
}
Пример #8
0
void default_machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned int i;
	int (*old_handler)(struct pt_regs *regs);


	/*
	 * This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
	local_irq_disable();

	for_each_irq(i) {
		struct irq_desc *desc = irq_desc + i;

		if (desc->status & IRQ_INPROGRESS)
			desc->chip->eoi(i);

		if (!(desc->status & IRQ_DISABLED))
			desc->chip->disable(i);
	}

	/*
	 * Call registered shutdown routines savely.  Swap out
	 * __debugger_fault_handler, and replace on exit.
	 */
	old_handler = __debugger_fault_handler;
	__debugger_fault_handler = handle_fault;
	for (i = 0; crash_shutdown_handles[i]; i++) {
		if (setjmp(crash_shutdown_buf) == 0) {
			/*
			 * Insert syncs and delay to ensure
			 * instructions in the dangerous region don't
			 * leak away from this protected region.
			 */
			asm volatile("sync; isync");
			/* dangerous region */
			crash_shutdown_handles[i]();
			asm volatile("sync; isync");
			/* 
			 * wait a little while to see if we get a
			 * machine check 
			 */
			__delay(200);
		}
	}
	__debugger_fault_handler = old_handler;

	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();
	crash_save_this_cpu(regs, crashing_cpu);
	crash_kexec_prepare_cpus(crashing_cpu);
	cpu_set(crashing_cpu, cpus_in_crash);
	crash_kexec_stop_spus();
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
}