Esempio n. 1
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	int cpu = dev->cpu;

	idle_loop_prolog(&in_purr);
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	while ((!need_resched()) && cpu_online(cpu)) {
		ppc64_runlatch_off();
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

	idle_loop_epilog(in_purr);

	return index;
}
static void iseries_shared_idle(void)
{
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && !hvlpevent_is_pending()) {
			local_irq_disable();
			ppc64_runlatch_off();

			/* Recheck with irqs off */
			if (!need_resched() && !hvlpevent_is_pending())
				yield_shared_processor();

			HMT_medium();
			local_irq_enable();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();

		if (hvlpevent_is_pending())
			process_iSeries_events();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
static void iseries_dedicated_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	while (1) {
		tick_nohz_stop_sched_tick(1);
		if (!need_resched()) {
			while (!need_resched()) {
				ppc64_runlatch_off();
				HMT_low();

				if (hvlpevent_is_pending()) {
					HMT_medium();
					ppc64_runlatch_on();
					process_iSeries_events();
				}
			}

			HMT_medium();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Esempio n. 4
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* Don't trace irqs off for idle */
				stop_critical_timings();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				/* Some power_save functions return with
				 * interrupts enabled, some don't.
				 */
				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Esempio n. 5
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		hrtimer_stop_sched_tick();

		while (!need_resched() && !need_resched_delayed() &&
				!cpu_should_die()) {
			ppc64_runlatch_off();
			/*
			 *
			 * We have irqs disabled here, so stop latency tracing
			 * at this point and restart it after we return:
			 */
			stop_critical_timing();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			touch_critical_timing();
		}

		HMT_medium();
		ppc64_runlatch_on();
		if (cpu_should_die())
			cpu_die();

		hrtimer_restart_sched_tick();
		__preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Esempio n. 6
0
static int nap_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	ppc64_runlatch_off();
	power7_idle();
	ppc64_runlatch_on();
	return index;
}
Esempio n. 7
0
static inline void idle_loop_prolog(unsigned long *in_purr)
{
	ppc64_runlatch_off();
	*in_purr = mfspr(SPRN_PURR);
	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
}
Esempio n. 8
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
Esempio n. 9
0
static void pseries_dedicated_idle_sleep(void)
{ 
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned long in_purr, out_purr;

	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
	get_lppaca()->cpuctls_task_attrs = 1;
	in_purr = mfspr(SPRN_PURR);

	/*
	 * We come in with interrupts disabled, and need_resched()
	 * has been checked recently.  If we should poll for a little
	 * while, do so.
	 */
	if (*smt_snooze_delay) {
		start_snooze = get_tb() +
			*smt_snooze_delay * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while (get_tb() < start_snooze) {
			if (need_resched() || cpu_is_offline(cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
		if (need_resched() || cpu_is_offline(cpu))
			goto out;
	}

	cede_processor();

out:
	HMT_medium();
	get_lppaca()->cpuctls_task_attrs = 0;
	out_purr = mfspr(SPRN_PURR);
	get_lppaca()->wait_state_cycles += out_purr - in_purr;
	get_lppaca()->idle = 0;
}
Esempio n. 10
0
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				smp_mb();
				local_irq_disable();

				
				stop_critical_timings();

				
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Esempio n. 11
0
File: idle.c Progetto: 274914765/C
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
    if (ppc_md.idle_loop)
        ppc_md.idle_loop();    /* doesn't return */

    set_thread_flag(TIF_POLLING_NRFLAG);
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched() && !cpu_should_die()) {
            ppc64_runlatch_off();

            if (ppc_md.power_save) {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                /*
                 * smp_mb is so clearing of TIF_POLLING_NRFLAG
                 * is ordered w.r.t. need_resched() test.
                 */
                smp_mb();
                local_irq_disable();

                /* check again after disabling irqs */
                if (!need_resched() && !cpu_should_die())
                    ppc_md.power_save();

                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);

            } else {
                /*
                 * Go into low thread priority and possibly
                 * low power mode.
                 */
                HMT_low();
                HMT_very_low();
            }
        }

        HMT_medium();
        ppc64_runlatch_on();
        tick_nohz_restart_sched_tick();
        if (cpu_should_die())
            cpu_die();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Esempio n. 12
0
static int pseries_shared_idle(void)
{
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		while (!need_resched() && !cpu_is_offline(cpu)) {
			local_irq_disable();
			ppc64_runlatch_off();

			/*
			 * Yield the processor to the hypervisor.  We return if
			 * an external interrupt occurs (which are driven prior
			 * to returning here) or if a prod occurs from another
			 * processor. When returning here, external interrupts
			 * are enabled.
			 *
			 * Check need_resched() again with interrupts disabled
			 * to avoid a race.
			 */
			if (!need_resched())
				cede_processor();
			else
				local_irq_enable();

			HMT_medium();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Esempio n. 13
0
static int dedicated_cede_loop(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	unsigned long in_purr;
	ktime_t kt_before;

	idle_loop_prolog(&in_purr, &kt_before);
	get_lppaca()->donate_dedicated_cpu = 1;

	ppc64_runlatch_off();
	HMT_medium();
	check_and_cede_processor();

	get_lppaca()->donate_dedicated_cpu = 0;
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Esempio n. 14
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Esempio n. 15
0
static int dedicated_cede_loop(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	unsigned long in_purr;

	idle_loop_prolog(&in_purr);
	get_lppaca()->donate_dedicated_cpu = 1;

	ppc64_runlatch_off();
	HMT_medium();
	check_and_cede_processor();

	get_lppaca()->donate_dedicated_cpu = 0;

	idle_loop_epilog(in_purr);

	return index;
}
Esempio n. 16
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	u64 snooze_exit_time;

	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	snooze_exit_time = get_tb() + snooze_timeout;
	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
		if (snooze_timeout_en && get_tb() > snooze_exit_time)
			break;
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Esempio n. 17
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;
	unsigned long start_snooze;
	long snooze = drv->states[0].target_residency;

	idle_loop_prolog(&in_purr, &kt_before);

	if (snooze) {
		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while ((snooze < 0) || (get_tb() < start_snooze)) {
			if (need_resched() || cpu_is_offline(dev->cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
	}

out:
	HMT_medium();
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Esempio n. 18
0
static void pnv_smp_cpu_kill_self(void)
{
	unsigned int cpu;

	/* Standard hot unplug procedure */
	local_irq_disable();
	idle_task_exit();
	current->active_mm = NULL; /* for sanity */
	cpu = smp_processor_id();
	DBG("CPU%d offline\n", cpu);
	generic_set_cpu_dead(cpu);
	smp_wmb();

	/* We don't want to take decrementer interrupts while we are offline,
	 * so clear LPCR:PECE1. We keep PECE2 enabled.
	 */
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
	while (!generic_check_cpu_restart(cpu)) {
		ppc64_runlatch_off();
		power7_nap(1);
		ppc64_runlatch_on();

		/* Reenable IRQs briefly to clear the IPI that woke us */
		local_irq_enable();
		local_irq_disable();
		mb();

		if (cpu_core_split_required())
			continue;

		if (!generic_check_cpu_restart(cpu))
			DBG("CPU%d Unexpected exit while offline !\n", cpu);
	}
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
	DBG("CPU%d coming online...\n", cpu);
}