Ejemplo n.º 1
0
/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
 * returns:
 *
 * == 1 if we're ready to go into guest state
 * <= 0 if we need to go back to the host with return value
 */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
	int r;

	WARN_ON(irqs_disabled());
	hard_irq_disable();

	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			hard_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
			break;
		}

		vcpu->mode = IN_GUEST_MODE;

		/*
		 * Reading vcpu->requests must happen after setting vcpu->mode,
		 * so we don't miss a request because the requester sees
		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
		 * before next entering the guest (and thus doesn't IPI).
		 * This also orders the write to mode from any reads
		 * to the page tables done while the VCPU is running.
		 * Please see the comment in kvm_flush_remote_tlbs.
		 */
		smp_mb();

		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			trace_kvm_check_requests(vcpu);
			r = kvmppc_core_check_requests(vcpu);
			hard_irq_disable();
			if (r > 0)
				continue;
			break;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

		guest_enter_irqoff();
		return 1;
	}

	/* return to host */
	local_irq_enable();
	return r;
}
Ejemplo n.º 2
0
static void nmi_ipi_lock_start(unsigned long *flags)
{
	raw_local_irq_save(*flags);
	hard_irq_disable();
	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
		raw_local_irq_restore(*flags);
		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
		raw_local_irq_save(*flags);
		hard_irq_disable();
	}
}
Ejemplo n.º 3
0
void default_machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned int i;
	int (*old_handler)(struct pt_regs *regs);


	/*
	 * This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
	hard_irq_disable();

	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();
	crash_save_cpu(regs, crashing_cpu);
	crash_kexec_prepare_cpus(crashing_cpu);
	cpu_set(crashing_cpu, cpus_in_crash);
#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
	crash_kexec_wait_realmode(crashing_cpu);
#endif

	machine_kexec_mask_interrupts();

	/*
	 * Call registered shutdown routines savely.  Swap out
	 * __debugger_fault_handler, and replace on exit.
	 */
	old_handler = __debugger_fault_handler;
	__debugger_fault_handler = handle_fault;
	crash_shutdown_cpu = smp_processor_id();
	for (i = 0; crash_shutdown_handles[i]; i++) {
		if (setjmp(crash_shutdown_buf) == 0) {
			/*
			 * Insert syncs and delay to ensure
			 * instructions in the dangerous region don't
			 * leak away from this protected region.
			 */
			asm volatile("sync; isync");
			/* dangerous region */
			crash_shutdown_handles[i]();
			asm volatile("sync; isync");
		}
	}
	crash_shutdown_cpu = -1;
	__debugger_fault_handler = old_handler;

	crash_kexec_stop_spus();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
}
static void kexec_prepare_cpus(void)
{
	wake_offline_cpus();
	smp_call_function(kexec_smp_down, NULL, /* wait */0);
	local_irq_disable();
	hard_irq_disable();

	mb(); /* make sure IRQs are disabled before we say they are */
	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;

	kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
	/* we are sure every CPU has IRQs off at this point */
	kexec_all_irq_disabled = 1;

	/* after we tell the others to go down */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);

	/*
	 * Before removing MMU mappings make sure all CPUs have entered real
	 * mode:
	 */
	kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);

	put_cpu();
}
Ejemplo n.º 5
0
void crash_ipi_callback(struct pt_regs *regs)
{
	static cpumask_t cpus_state_saved = CPU_MASK_NONE;

	int cpu = smp_processor_id();

	hard_irq_disable();
	if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
		crash_save_cpu(regs, cpu);
		cpumask_set_cpu(cpu, &cpus_state_saved);
	}

	atomic_inc(&cpus_in_crash);
	smp_mb__after_atomic();

	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 */
	while (!time_to_dump)
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);

#ifdef CONFIG_PPC64
	kexec_smp_wait();
#else
	for (;;);	/* FIXME */
#endif

	/* NOTREACHED */
}
Ejemplo n.º 6
0
static void wsp_h8_terminal_cmd(const char *cmd, int sz)
{
	hard_irq_disable();
	wsp_h8_puts(cmd, sz);
	/*                                       */
	for (;;)
		continue;
}
Ejemplo n.º 7
0
Archivo: h8.c Proyecto: 24hours/linux
static void wsp_h8_terminal_cmd(const char *cmd, int sz)
{
	hard_irq_disable();
	wsp_h8_puts(cmd, sz);
	/* should never return, but just in case */
	for (;;)
		continue;
}
void save_processor_state(void)
{
	/*
	 * flush out all the special registers so we don't need
	 * to save them in the snapshot
	 */
	flush_all_to_thread(current);

#ifdef CONFIG_PPC64
	hard_irq_disable();
#endif

}
Ejemplo n.º 9
0
static void check_and_cede_processor(void)
{
	/*
	 * Interrupts are soft-disabled at this point,
	 * but not hard disabled. So an interrupt might have
	 * occurred before entering NAP, and would be potentially
	 * lost (edge events, decrementer events, etc...) unless
	 * we first hard disable then check.
	 */
	hard_irq_disable();
	if (get_paca()->irq_happened == 0)
		cede_processor();
}
Ejemplo n.º 10
0
/* Return CPUs to OPAL before starting FW update */
static void flash_return_cpu(void *info)
{
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

	/* Disable IRQ */
	hard_irq_disable();

	/* Return the CPU to OPAL */
	opal_return_cpu();
}
Ejemplo n.º 11
0
void rtas_give_timebase(void)
{
	unsigned long flags;

	local_irq_save(flags);
	hard_irq_disable();
	arch_spin_lock(&timebase_lock);
	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
	timebase = get_tb();
	arch_spin_unlock(&timebase_lock);

	while (timebase)
		barrier();
	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
	local_irq_restore(flags);
}
static void kexec_prepare_cpus(void)
{
	/*
	 * move the secondarys to us so that we can copy
	 * the new kernel 0-0x100 safely
	 *
	 * do this if kexec in setup.c ?
	 *
	 * We need to release the cpus if we are ever going from an
	 * UP to an SMP kernel.
	 */
	smp_release_cpus();
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);
	local_irq_disable();
	hard_irq_disable();
}
Ejemplo n.º 13
0
void crash_ipi_callback(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

	hard_irq_disable();
	if (!cpu_isset(cpu, cpus_in_crash))
		crash_save_cpu(regs, cpu);
	cpu_set(cpu, cpus_in_crash);

	/*
	 * Entered via soft-reset - could be the kdump
	 * process is invoked using soft-reset or user activated
	 * it if some CPU did not respond to an IPI.
	 * For soft-reset, the secondary CPU can enter this func
	 * twice. 1 - using IPI, and 2. soft-reset.
	 * Tell the kexec CPU that entered via soft-reset and ready
	 * to go down.
	 */
	if (cpu_isset(cpu, cpus_in_sr)) {
		cpu_clear(cpu, cpus_in_sr);
		atomic_inc(&enter_on_soft_reset);
	}

	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 * If not, soft-reset will be invoked to bring other CPUs.
	 */
	while (!cpu_isset(crashing_cpu, cpus_in_crash))
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);

#ifdef CONFIG_PPC64
	kexec_smp_wait();
#else
	for (;;);	/* FIXME */
#endif

	/* NOTREACHED */
}
Ejemplo n.º 14
0
static void cbe_power_save(void)
{
	unsigned long ctrl, thread_switch_control;

	/*
	 * We need to hard disable interrupts, the local_irq_enable() done by
	 * our caller upon return will hard re-enable.
	 */
	hard_irq_disable();

	ctrl = mfspr(SPRN_CTRLF);

	/* Enable DEC and EE interrupt request */
	thread_switch_control  = mfspr(SPRN_TSC_CELL);
	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;

	switch (ctrl & CTRL_CT) {
	case CTRL_CT0:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
		break;
	case CTRL_CT1:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
		break;
	default:
		printk(KERN_WARNING "%s: unknown configuration\n",
			__func__);
		break;
	}
	mtspr(SPRN_TSC_CELL, thread_switch_control);

	/*
	 * go into low thread priority, medium priority will be
	 * restored for us after wake-up.
	 */
	HMT_low();

	/*
	 * atomically disable thread execution and runlatch.
	 * External and Decrementer exceptions are still handled when the
	 * thread is disabled but now enter in cbe_system_reset_exception()
	 */
	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
	mtspr(SPRN_CTRLT, ctrl);
}
Ejemplo n.º 15
0
/* This is the actual function which stops the CPU. It runs
 * in the context of a dedicated stopmachine workqueue. */
static void stop_cpu(struct work_struct *unused)
{
	enum stopmachine_state curstate = STOPMACHINE_NONE;
	struct stop_machine_data *smdata = &idle;
	int cpu = smp_processor_id();
	int err;

	if (!active_cpus) {
		if (cpu == first_cpu(cpu_online_map))
			smdata = &active;
	} else {
		if (cpu_isset(cpu, *active_cpus))
			smdata = &active;
	}
	/* Simple state machine */
	do {
		/* Chill out and ensure we re-read stopmachine_state. */
		cpu_relax();
		if (state != curstate) {
			curstate = state;
			switch (curstate) {
			case STOPMACHINE_DISABLE_IRQ:
				local_irq_disable();
				hard_irq_disable();
				break;
			case STOPMACHINE_RUN:
				/* On multiple CPUs only a single error code
				 * is needed to tell that something failed. */
				err = smdata->fn(smdata->data);
				if (err)
					smdata->fnret = err;
				break;
			default:
				break;
			}
			ack_state();
		}
	} while (curstate != STOPMACHINE_EXIT);

	local_irq_enable();
}
static void kexec_smp_down(void *arg)
{
	local_irq_disable();
	hard_irq_disable();

	mb(); /* make sure our irqs are disabled before we say they are */
	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
	while(kexec_all_irq_disabled == 0)
		cpu_relax();
	mb(); /* make sure all irqs are disabled before this */
	hw_breakpoint_disable();
	/*
	 * Now every CPU has IRQs off, we can clear out any pending
	 * IPIs and be sure that no more will come in after this.
	 */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 1);

	kexec_smp_wait();
	/* NOTREACHED */
}
Ejemplo n.º 17
0
void default_machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned int irq;

	/*
	 * This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
	hard_irq_disable();

	for_each_irq(irq) {
		struct irq_desc *desc = irq_desc + irq;

		if (desc->status & IRQ_INPROGRESS)
			desc->chip->eoi(irq);

		if (!(desc->status & IRQ_DISABLED))
			desc->chip->disable(irq);
	}

	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();
	crash_save_cpu(regs, crashing_cpu);
	crash_kexec_prepare_cpus(crashing_cpu);
	cpu_set(crashing_cpu, cpus_in_crash);
	crash_kexec_stop_spus();
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
}
Ejemplo n.º 18
0
/* This gets called just before system reboots */
void opal_flash_term_callback(void)
{
	struct cpumask mask;

	if (update_flash_data.status != FLASH_IMG_READY)
		return;

	pr_alert("FLASH: Flashing new firmware\n");
	pr_alert("FLASH: Image is %u bytes\n", image_data.size);
	pr_alert("FLASH: Performing flash and reboot/shutdown\n");
	pr_alert("FLASH: This will take several minutes. Do not power off!\n");

	/* Small delay to help getting the above message out */
	msleep(500);

	/* Return secondary CPUs to firmware */
	cpumask_copy(&mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &mask);
	if (!cpumask_empty(&mask))
		smp_call_function_many(&mask,
				       flash_return_cpu, NULL, false);
	/* Hard disable interrupts */
	hard_irq_disable();
}
Ejemplo n.º 19
0
static void pseries_mach_cpu_die(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned int hwcpu = hard_smp_processor_id();
	u8 cede_latency_hint = 0;

	local_irq_disable();
	idle_task_exit();
	if (xive_enabled())
		xive_teardown_cpu();
	else
		xics_teardown_cpu();

	if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
		set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
		if (ppc_md.suspend_disable_cpu)
			ppc_md.suspend_disable_cpu();

		cede_latency_hint = 2;

		get_lppaca()->idle = 1;
		if (!lppaca_shared_proc(get_lppaca()))
			get_lppaca()->donate_dedicated_cpu = 1;

		while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
			while (!prep_irq_for_idle()) {
				local_irq_enable();
				local_irq_disable();
			}

			extended_cede_processor(cede_latency_hint);
		}

		local_irq_disable();

		if (!lppaca_shared_proc(get_lppaca()))
			get_lppaca()->donate_dedicated_cpu = 0;
		get_lppaca()->idle = 0;

		if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
			unregister_slb_shadow(hwcpu);

			hard_irq_disable();
			/*
			 * Call to start_secondary_resume() will not return.
			 * Kernel stack will be reset and start_secondary()
			 * will be called to continue the online operation.
			 */
			start_secondary_resume();
		}
	}

	/* Requested state is CPU_STATE_OFFLINE at this point */
	WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);

	set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
	unregister_slb_shadow(hwcpu);
	rtas_stop_self();

	/* Should never get here... */
	BUG();
	for(;;);
}
Ejemplo n.º 20
0
Archivo: psr2.c Proyecto: 08opt/linux
static void psr2_spin(void)
{
	hard_irq_disable();
	for (;;)
		continue;
}
Ejemplo n.º 21
0
void default_machine_crash_shutdown(struct pt_regs *regs)
{
	unsigned int i;
	int (*old_handler)(struct pt_regs *regs);

	/*
	 * This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
	hard_irq_disable();

	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();

	/*
	 * If we came in via system reset, wait a while for the secondary
	 * CPUs to enter.
	 */
	if (TRAP(regs) == 0x100)
		mdelay(PRIMARY_TIMEOUT);

	crash_kexec_prepare_cpus(crashing_cpu);

	crash_save_cpu(regs, crashing_cpu);

	time_to_dump = 1;

	crash_kexec_wait_realmode(crashing_cpu);

	machine_kexec_mask_interrupts();

	/*
	 * Call registered shutdown routines safely.  Swap out
	 * __debugger_fault_handler, and replace on exit.
	 */
	old_handler = __debugger_fault_handler;
	__debugger_fault_handler = handle_fault;
	crash_shutdown_cpu = smp_processor_id();
	for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
		if (setjmp(crash_shutdown_buf) == 0) {
			/*
			 * Insert syncs and delay to ensure
			 * instructions in the dangerous region don't
			 * leak away from this protected region.
			 */
			asm volatile("sync; isync");
			/* dangerous region */
			crash_shutdown_handles[i]();
			asm volatile("sync; isync");
		}
	}
	crash_shutdown_cpu = -1;
	__debugger_fault_handler = old_handler;

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
}
Ejemplo n.º 22
0
/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
 * returns:
 *
 * == 1 if we're ready to go into guest state
 * <= 0 if we need to go back to the host with return value
 */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
	int r = 1;

	WARN_ON_ONCE(!irqs_disabled());
	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			local_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
			break;
		}

		vcpu->mode = IN_GUEST_MODE;

		/*
		 * Reading vcpu->requests must happen after setting vcpu->mode,
		 * so we don't miss a request because the requester sees
		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
		 * before next entering the guest (and thus doesn't IPI).
		 */
		smp_mb();

		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			trace_kvm_check_requests(vcpu);
			r = kvmppc_core_check_requests(vcpu);
			local_irq_disable();
			if (r > 0)
				continue;
			break;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

#ifdef CONFIG_PPC64
		/* lazy EE magic */
		hard_irq_disable();
		if (lazy_irq_pending()) {
			/* Got an interrupt in between, try again */
			local_irq_enable();
			local_irq_disable();
			kvm_guest_exit();
			continue;
		}
#endif

		kvm_guest_enter();
		break;
	}

	return r;
}