Exemplo n.º 1
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	int cpu = dev->cpu;

	idle_loop_prolog(&in_purr);
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	while ((!need_resched()) && cpu_online(cpu)) {
		ppc64_runlatch_off();
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

	idle_loop_epilog(in_purr);

	return index;
}
Exemplo n.º 2
0
static int default_idle(void)
{
	long oldval;
	unsigned int cpu = smp_processor_id();

	while (1) {
		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			while (!need_resched() && !cpu_is_offline(cpu)) {
				barrier();
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Exemplo n.º 3
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* Don't trace irqs off for idle */
				stop_critical_timings();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				/* Some power_save functions return with
				 * interrupts enabled, some don't.
				 */
				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Exemplo n.º 4
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		hrtimer_stop_sched_tick();

		while (!need_resched() && !need_resched_delayed() &&
				!cpu_should_die()) {
			ppc64_runlatch_off();
			/*
			 *
			 * We have irqs disabled here, so stop latency tracing
			 * at this point and restart it after we return:
			 */
			stop_critical_timing();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			touch_critical_timing();
		}

		HMT_medium();
		ppc64_runlatch_on();
		if (cpu_should_die())
			cpu_die();

		hrtimer_restart_sched_tick();
		__preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 5
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
Exemplo n.º 6
0
static void pseries_dedicated_idle_sleep(void)
{ 
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned long in_purr, out_purr;

	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
	get_lppaca()->cpuctls_task_attrs = 1;
	in_purr = mfspr(SPRN_PURR);

	/*
	 * We come in with interrupts disabled, and need_resched()
	 * has been checked recently.  If we should poll for a little
	 * while, do so.
	 */
	if (*smt_snooze_delay) {
		start_snooze = get_tb() +
			*smt_snooze_delay * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while (get_tb() < start_snooze) {
			if (need_resched() || cpu_is_offline(cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
		if (need_resched() || cpu_is_offline(cpu))
			goto out;
	}

	cede_processor();

out:
	HMT_medium();
	get_lppaca()->cpuctls_task_attrs = 0;
	out_purr = mfspr(SPRN_PURR);
	get_lppaca()->wait_state_cycles += out_purr - in_purr;
	get_lppaca()->idle = 0;
}
Exemplo n.º 7
0
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				smp_mb();
				local_irq_disable();

				
				stop_critical_timings();

				
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Exemplo n.º 8
0
Arquivo: idle.c Projeto: 274914765/C
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
    if (ppc_md.idle_loop)
        ppc_md.idle_loop();    /* doesn't return */

    set_thread_flag(TIF_POLLING_NRFLAG);
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched() && !cpu_should_die()) {
            ppc64_runlatch_off();

            if (ppc_md.power_save) {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                /*
                 * smp_mb is so clearing of TIF_POLLING_NRFLAG
                 * is ordered w.r.t. need_resched() test.
                 */
                smp_mb();
                local_irq_disable();

                /* check again after disabling irqs */
                if (!need_resched() && !cpu_should_die())
                    ppc_md.power_save();

                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);

            } else {
                /*
                 * Go into low thread priority and possibly
                 * low power mode.
                 */
                HMT_low();
                HMT_very_low();
            }
        }

        HMT_medium();
        ppc64_runlatch_on();
        tick_nohz_restart_sched_tick();
        if (cpu_should_die())
            cpu_die();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Exemplo n.º 9
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Exemplo n.º 10
0
Arquivo: idle.c Projeto: Endika/linux
/*
 * pnv_cpu_offline: A function that puts the CPU into the deepest
 * available platform idle state on a CPU-Offline.
 * interrupts hard disabled and no lazy irq pending.
 */
unsigned long pnv_cpu_offline(unsigned int cpu)
{
	unsigned long srr1;
	u32 idle_states = pnv_get_supported_cpuidle_states();

	__ppc64_runlatch_off();

	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
		unsigned long psscr;

		psscr = mfspr(SPRN_PSSCR);
		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
						pnv_deepest_stop_psscr_val;
		srr1 = power9_idle_stop(psscr);

	} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
	} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
		   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
		srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
	} else if (idle_states & OPAL_PM_NAP_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_NAP);
	} else {
		/* This is the fallback method. We emulate snooze */
		while (!generic_check_cpu_restart(cpu)) {
			HMT_low();
			HMT_very_low();
		}
		srr1 = 0;
		HMT_medium();
	}

	__ppc64_runlatch_on();

	return srr1;
}
Exemplo n.º 11
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;
	unsigned long start_snooze;
	long snooze = drv->states[0].target_residency;

	idle_loop_prolog(&in_purr, &kt_before);

	if (snooze) {
		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while ((snooze < 0) || (get_tb() < start_snooze)) {
			if (need_resched() || cpu_is_offline(dev->cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
	}

out:
	HMT_medium();
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Exemplo n.º 12
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	u64 snooze_exit_time;

	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	snooze_exit_time = get_tb() + snooze_timeout;
	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
		if (snooze_timeout_en && get_tb() > snooze_exit_time)
			break;
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Exemplo n.º 13
0
/*
 * pnv_cpu_offline: A function that puts the CPU into the deepest
 * available platform idle state on a CPU-Offline.
 * interrupts hard disabled and no lazy irq pending.
 */
unsigned long pnv_cpu_offline(unsigned int cpu)
{
	unsigned long srr1;
	u32 idle_states = pnv_get_supported_cpuidle_states();
	u64 lpcr_val;

	/*
	 * We don't want to take decrementer interrupts while we are
	 * offline, so clear LPCR:PECE1. We keep PECE2 (and
	 * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
	 *
	 * If the CPU gets woken up by a special wakeup, ensure that
	 * the SLW engine sets LPCR with decrementer bit cleared, else
	 * the CPU will come back to the kernel due to a spurious
	 * wakeup.
	 */
	lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);

	__ppc64_runlatch_off();

	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
		unsigned long psscr;

		psscr = mfspr(SPRN_PSSCR);
		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
						pnv_deepest_stop_psscr_val;
		srr1 = power9_offline_stop(psscr);

	} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
		   (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
		srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
	} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
		   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
		srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
	} else if (idle_states & OPAL_PM_NAP_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_NAP);
	} else {
		/* This is the fallback method. We emulate snooze */
		while (!generic_check_cpu_restart(cpu)) {
			HMT_low();
			HMT_very_low();
		}
		srr1 = 0;
		HMT_medium();
	}

	__ppc64_runlatch_on();

	/*
	 * Re-enable decrementer interrupts in LPCR.
	 *
	 * Further, we want stop states to be woken up by decrementer
	 * for non-hotplug cases. So program the LPCR via stop api as
	 * well.
	 */
	lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);

	return srr1;
}
Exemplo n.º 14
0
int dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca(), *ppaca;
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned int cpu = smp_processor_id();

	ppaca = &paca[cpu ^ 1];

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.xIdle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);
			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;
			while (!need_resched() && !cpu_is_offline(cpu)) {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay == 0 ||
				    __get_tb() < start_snooze)
					continue;

				HMT_medium();

				if (!(ppaca->lppaca.xIdle)) {
					local_irq_disable();

					/*
					 * We are about to sleep the thread
					 * and so wont be polling any
					 * more.
					 */
					clear_thread_flag(TIF_POLLING_NRFLAG);

					/*
					 * SMT dynamic mode. Cede will result
					 * in this thread going dormant, if the
					 * partner thread is still doing work.
					 * Thread wakes up if partner goes idle,
					 * an interrupt is presented, or a prod
					 * occurs.  Returning from the cede
					 * enables external interrupts.
					 */
					if (!need_resched())
						cede_processor();
					else
						local_irq_enable();
				} else {
					/*
					 * Give the HV an opportunity at the
					 * processor, since we are not doing
					 * any work.
					 */
					poll_pending();
				}
			}

			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		HMT_medium();
		lpaca->lppaca.xIdle = 0;
		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
	return 0;
}