示例#1
0
static void check_and_cede_processor(void)
{
	/*
	 * Ensure our interrupt state is properly tracked,
	 * also checks if no interrupt has occurred while we
	 * were soft-disabled
	 */
	if (prep_irq_for_idle()) {
		cede_processor();
#ifdef CONFIG_TRACE_IRQFLAGS
		/* Ensure that H_CEDE returns with IRQs on */
		if (WARN_ON(!(mfmsr() & MSR_EE)))
			__hard_irq_enable();
#endif
	}
}
示例#2
0
static void cbe_power_save(void)
{
	unsigned long ctrl, thread_switch_control;

	/* Ensure our interrupt state is properly tracked */
	if (!prep_irq_for_idle())
		return;

	ctrl = mfspr(SPRN_CTRLF);

	/* Enable DEC and EE interrupt request */
	thread_switch_control  = mfspr(SPRN_TSC_CELL);
	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;

	switch (ctrl & CTRL_CT) {
	case CTRL_CT0:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
		break;
	case CTRL_CT1:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
		break;
	default:
		printk(KERN_WARNING "%s: unknown configuration\n",
			__func__);
		break;
	}
	mtspr(SPRN_TSC_CELL, thread_switch_control);

	/*
	 * go into low thread priority, medium priority will be
	 * restored for us after wake-up.
	 */
	HMT_low();

	/*
	 * atomically disable thread execution and runlatch.
	 * External and Decrementer exceptions are still handled when the
	 * thread is disabled but now enter in cbe_system_reset_exception()
	 */
	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
	mtspr(SPRN_CTRLT, ctrl);

	/* Re-enable interrupts in MSR */
	__hard_irq_enable();
}
示例#3
0
static void cbe_power_save(void)
{
    unsigned long ctrl, thread_switch_control;

    /* Ensure our interrupt state is properly tracked */
    if (!prep_irq_for_idle())
        return;

    ctrl = mfspr(SPRN_CTRLF);


    thread_switch_control  = mfspr(SPRN_TSC_CELL);
    thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;

    switch (ctrl & CTRL_CT) {
    case CTRL_CT0:
        thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
        break;
    case CTRL_CT1:
        thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
        break;
    default:
        printk(KERN_WARNING "%s: unknown configuration\n",
               __func__);
        break;
    }
    mtspr(SPRN_TSC_CELL, thread_switch_control);

    HMT_low();

    ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
    mtspr(SPRN_CTRLT, ctrl);

    /* Re-enable interrupts in MSR */
    __hard_irq_enable();
}
示例#4
0
static void pseries_mach_cpu_die(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned int hwcpu = hard_smp_processor_id();
	u8 cede_latency_hint = 0;

	local_irq_disable();
	idle_task_exit();
	if (xive_enabled())
		xive_teardown_cpu();
	else
		xics_teardown_cpu();

	if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
		set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
		if (ppc_md.suspend_disable_cpu)
			ppc_md.suspend_disable_cpu();

		cede_latency_hint = 2;

		get_lppaca()->idle = 1;
		if (!lppaca_shared_proc(get_lppaca()))
			get_lppaca()->donate_dedicated_cpu = 1;

		while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
			while (!prep_irq_for_idle()) {
				local_irq_enable();
				local_irq_disable();
			}

			extended_cede_processor(cede_latency_hint);
		}

		local_irq_disable();

		if (!lppaca_shared_proc(get_lppaca()))
			get_lppaca()->donate_dedicated_cpu = 0;
		get_lppaca()->idle = 0;

		if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
			unregister_slb_shadow(hwcpu);

			hard_irq_disable();
			/*
			 * Call to start_secondary_resume() will not return.
			 * Kernel stack will be reset and start_secondary()
			 * will be called to continue the online operation.
			 */
			start_secondary_resume();
		}
	}

	/* Requested state is CPU_STATE_OFFLINE at this point */
	WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);

	set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
	unregister_slb_shadow(hwcpu);
	rtas_stop_self();

	/* Should never get here... */
	BUG();
	for(;;);
}