Exemplo n.º 1
0
static void iseries_dedicated_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	while (1) {
		tick_nohz_stop_sched_tick(1);
		if (!need_resched()) {
			while (!need_resched()) {
				ppc64_runlatch_off();
				HMT_low();

				if (hvlpevent_is_pending()) {
					HMT_medium();
					ppc64_runlatch_on();
					process_iSeries_events();
				}
			}

			HMT_medium();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 2
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
Exemplo n.º 3
0
static void pseries_dedicated_idle_sleep(void)
{ 
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned long in_purr, out_purr;

	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
	get_lppaca()->cpuctls_task_attrs = 1;
	in_purr = mfspr(SPRN_PURR);

	/*
	 * We come in with interrupts disabled, and need_resched()
	 * has been checked recently.  If we should poll for a little
	 * while, do so.
	 */
	if (*smt_snooze_delay) {
		start_snooze = get_tb() +
			*smt_snooze_delay * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while (get_tb() < start_snooze) {
			if (need_resched() || cpu_is_offline(cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
		if (need_resched() || cpu_is_offline(cpu))
			goto out;
	}

	cede_processor();

out:
	HMT_medium();
	get_lppaca()->cpuctls_task_attrs = 0;
	out_purr = mfspr(SPRN_PURR);
	get_lppaca()->wait_state_cycles += out_purr - in_purr;
	get_lppaca()->idle = 0;
}
Exemplo n.º 4
0
static int default_idle(void)
{
	long oldval;
	unsigned int cpu = smp_processor_id();

	while (1) {
		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			while (!need_resched() && !cpu_is_offline(cpu)) {
				barrier();
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Exemplo n.º 5
0
static void iseries_shared_idle(void)
{
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && !hvlpevent_is_pending()) {
			local_irq_disable();
			ppc64_runlatch_off();

			/* Recheck with irqs off */
			if (!need_resched() && !hvlpevent_is_pending())
				yield_shared_processor();

			HMT_medium();
			local_irq_enable();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();

		if (hvlpevent_is_pending())
			process_iSeries_events();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 6
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	int cpu = dev->cpu;

	idle_loop_prolog(&in_purr);
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	while ((!need_resched()) && cpu_online(cpu)) {
		ppc64_runlatch_off();
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

	idle_loop_epilog(in_purr);

	return index;
}
Exemplo n.º 7
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* Don't trace irqs off for idle */
				stop_critical_timings();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				/* Some power_save functions return with
				 * interrupts enabled, some don't.
				 */
				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Exemplo n.º 8
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		hrtimer_stop_sched_tick();

		while (!need_resched() && !need_resched_delayed() &&
				!cpu_should_die()) {
			ppc64_runlatch_off();
			/*
			 *
			 * We have irqs disabled here, so stop latency tracing
			 * at this point and restart it after we return:
			 */
			stop_critical_timing();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			touch_critical_timing();
		}

		HMT_medium();
		ppc64_runlatch_on();
		if (cpu_should_die())
			cpu_die();

		hrtimer_restart_sched_tick();
		__preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 9
0
static int iSeries_idle(void)
{
	struct paca_struct *lpaca;
	long oldval;
	unsigned long CTRL;

	/* ensure iSeries run light will be out when idle */
	clear_thread_flag(TIF_RUN_LIGHT);
	CTRL = mfspr(CTRLF);
	CTRL &= ~RUNLATCH;
	mtspr(CTRLT, CTRL);

	lpaca = get_paca();

	while (1) {
		if (lpaca->lppaca.xSharedProc) {
			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
				process_iSeries_events();
			if (!need_resched())
				yield_shared_processor();
		} else {
			oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

			if (!oldval) {
				set_thread_flag(TIF_POLLING_NRFLAG);

				while (!need_resched()) {
					HMT_medium();
					if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
						process_iSeries_events();
					HMT_low();
				}

				HMT_medium();
				clear_thread_flag(TIF_POLLING_NRFLAG);
			} else {
				set_need_resched();
			}
		}

		schedule();
	}

	return 0;
}
Exemplo n.º 10
0
void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
	while (lock->slock) {
		HMT_low();
		if (SHARED_PROCESSOR)
			__spin_yield(lock);
	}
	HMT_medium();
}
Exemplo n.º 11
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;
	unsigned long start_snooze;
	long snooze = drv->states[0].target_residency;

	idle_loop_prolog(&in_purr, &kt_before);

	if (snooze) {
		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while ((snooze < 0) || (get_tb() < start_snooze)) {
			if (need_resched() || cpu_is_offline(dev->cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
	}

out:
	HMT_medium();
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Exemplo n.º 12
0
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				smp_mb();
				local_irq_disable();

				
				stop_critical_timings();

				
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				if (irqs_disabled())
					local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		rcu_idle_exit();
		tick_nohz_idle_exit();
		if (cpu_should_die()) {
			sched_preempt_enable_no_resched();
			cpu_die();
		}
		schedule_preempt_disabled();
	}
}
Exemplo n.º 13
0
Arquivo: idle.c Projeto: 274914765/C
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
    if (ppc_md.idle_loop)
        ppc_md.idle_loop();    /* doesn't return */

    set_thread_flag(TIF_POLLING_NRFLAG);
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched() && !cpu_should_die()) {
            ppc64_runlatch_off();

            if (ppc_md.power_save) {
                clear_thread_flag(TIF_POLLING_NRFLAG);
                /*
                 * smp_mb is so clearing of TIF_POLLING_NRFLAG
                 * is ordered w.r.t. need_resched() test.
                 */
                smp_mb();
                local_irq_disable();

                /* check again after disabling irqs */
                if (!need_resched() && !cpu_should_die())
                    ppc_md.power_save();

                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);

            } else {
                /*
                 * Go into low thread priority and possibly
                 * low power mode.
                 */
                HMT_low();
                HMT_very_low();
            }
        }

        HMT_medium();
        ppc64_runlatch_on();
        tick_nohz_restart_sched_tick();
        if (cpu_should_die())
            cpu_die();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Exemplo n.º 14
0
static void cbe_idle(void)
{
	unsigned long ctrl;

	/* Why do we do that on every idle ? Couldn't that be done once for
	 * all or do we lose the state some way ? Also, the pm_control
	 * register setting, that can't be set once at boot ? We really want
	 * to move that away in order to implement a simple powersave
	 */
	cbe_enable_pause_zero();

	while (1) {
		if (!need_resched()) {
			local_irq_disable();
			while (!need_resched()) {
				/* go into low thread priority */
				HMT_low();

				/*
				 * atomically disable thread execution
				 * and runlatch.
				 * External and Decrementer exceptions
				 * are still handled when the thread
				 * is disabled but now enter in
				 * cbe_system_reset_exception()
				 */
				ctrl = mfspr(SPRN_CTRLF);
				ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
				mtspr(SPRN_CTRLT, ctrl);
			}
			/* restore thread prio */
			HMT_medium();
			local_irq_enable();
		}

		/*
		 * turn runlatch on again before scheduling the
		 * process we just woke up
		 */
		ppc64_runlatch_on();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 15
0
static int pseries_shared_idle(void)
{
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		while (!need_resched() && !cpu_is_offline(cpu)) {
			local_irq_disable();
			ppc64_runlatch_off();

			/*
			 * Yield the processor to the hypervisor.  We return if
			 * an external interrupt occurs (which are driven prior
			 * to returning here) or if a prod occurs from another
			 * processor. When returning here, external interrupts
			 * are enabled.
			 *
			 * Check need_resched() again with interrupts disabled
			 * to avoid a race.
			 */
			if (!need_resched())
				cede_processor();
			else
				local_irq_enable();

			HMT_medium();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Exemplo n.º 16
0
static int dedicated_cede_loop(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	unsigned long in_purr;

	idle_loop_prolog(&in_purr);
	get_lppaca()->donate_dedicated_cpu = 1;

	HMT_medium();
	check_and_cede_processor();

	get_lppaca()->donate_dedicated_cpu = 0;

	idle_loop_epilog(in_purr);

	return index;
}
Exemplo n.º 17
0
static int dedicated_cede_loop(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	unsigned long in_purr;
	ktime_t kt_before;

	idle_loop_prolog(&in_purr, &kt_before);
	get_lppaca()->donate_dedicated_cpu = 1;

	ppc64_runlatch_off();
	HMT_medium();
	check_and_cede_processor();

	get_lppaca()->donate_dedicated_cpu = 0;
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Exemplo n.º 18
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Exemplo n.º 19
0
static void cbe_idle(void)
{
	unsigned long ctrl;

	cbe_enable_pause_zero();

	while (1) {
		if (!need_resched()) {
			local_irq_disable();
			while (!need_resched()) {
				/* go into low thread priority */
				HMT_low();

				/*
				 * atomically disable thread execution
				 * and runlatch.
				 * External and Decrementer exceptions
				 * are still handled when the thread
				 * is disabled but now enter in
				 * cbe_system_reset_exception()
				 */
				ctrl = mfspr(SPRN_CTRLF);
				ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
				mtspr(SPRN_CTRLT, ctrl);
			}
			/* restore thread prio */
			HMT_medium();
			local_irq_enable();
		}

		/*
		 * turn runlatch on again before scheduling the
		 * process we just woke up
		 */
		ppc64_runlatch_on();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Exemplo n.º 20
0
Arquivo: idle.c Projeto: Endika/linux
/*
 * pnv_cpu_offline: A function that puts the CPU into the deepest
 * available platform idle state on a CPU-Offline.
 * interrupts hard disabled and no lazy irq pending.
 */
unsigned long pnv_cpu_offline(unsigned int cpu)
{
	unsigned long srr1;
	u32 idle_states = pnv_get_supported_cpuidle_states();

	__ppc64_runlatch_off();

	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
		unsigned long psscr;

		psscr = mfspr(SPRN_PSSCR);
		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
						pnv_deepest_stop_psscr_val;
		srr1 = power9_idle_stop(psscr);

	} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
	} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
		   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
		srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
	} else if (idle_states & OPAL_PM_NAP_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_NAP);
	} else {
		/* This is the fallback method. We emulate snooze */
		while (!generic_check_cpu_restart(cpu)) {
			HMT_low();
			HMT_very_low();
		}
		srr1 = 0;
		HMT_medium();
	}

	__ppc64_runlatch_on();

	return srr1;
}
Exemplo n.º 21
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	u64 snooze_exit_time;

	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	snooze_exit_time = get_tb() + snooze_timeout;
	ppc64_runlatch_off();
	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
		if (snooze_timeout_en && get_tb() > snooze_exit_time)
			break;
	}

	HMT_medium();
	ppc64_runlatch_on();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();
	return index;
}
Exemplo n.º 22
0
/*
 * pnv_cpu_offline: A function that puts the CPU into the deepest
 * available platform idle state on a CPU-Offline.
 * interrupts hard disabled and no lazy irq pending.
 */
unsigned long pnv_cpu_offline(unsigned int cpu)
{
	unsigned long srr1;
	u32 idle_states = pnv_get_supported_cpuidle_states();
	u64 lpcr_val;

	/*
	 * We don't want to take decrementer interrupts while we are
	 * offline, so clear LPCR:PECE1. We keep PECE2 (and
	 * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
	 *
	 * If the CPU gets woken up by a special wakeup, ensure that
	 * the SLW engine sets LPCR with decrementer bit cleared, else
	 * the CPU will come back to the kernel due to a spurious
	 * wakeup.
	 */
	lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);

	__ppc64_runlatch_off();

	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
		unsigned long psscr;

		psscr = mfspr(SPRN_PSSCR);
		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
						pnv_deepest_stop_psscr_val;
		srr1 = power9_offline_stop(psscr);

	} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
		   (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
		srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
	} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
		   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
		srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
	} else if (idle_states & OPAL_PM_NAP_ENABLED) {
		srr1 = power7_idle_insn(PNV_THREAD_NAP);
	} else {
		/* This is the fallback method. We emulate snooze */
		while (!generic_check_cpu_restart(cpu)) {
			HMT_low();
			HMT_very_low();
		}
		srr1 = 0;
		HMT_medium();
	}

	__ppc64_runlatch_on();

	/*
	 * Re-enable decrementer interrupts in LPCR.
	 *
	 * Further, we want stop states to be woken up by decrementer
	 * for non-hotplug cases. So program the LPCR via stop api as
	 * well.
	 */
	lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);

	return srr1;
}
Exemplo n.º 23
0
/*
 * This function sends a 'generic call function' IPI to all other CPUs
 * in the system.
 *
 * [SUMMARY] Run a function on all other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
			int wait)

{ 
	struct call_data_struct data;
	int ret = -1, cpus = smp_num_cpus-1;
	int timeout;

	if (!cpus)
		return 0;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	spin_lock_bh(&call_lock);
	call_data = &data;
	/* Send a message to all other CPUs and wait for them to respond */
	smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);

	/* Wait for response */
	timeout = 8000000;
	while (atomic_read(&data.started) != cpus) {
		HMT_low();
		if (--timeout == 0) {
			printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
			       smp_processor_id(), atomic_read(&data.started));
#ifdef CONFIG_XMON
                        xmon(0);
#endif
#ifdef CONFIG_KDB
			kdb(KDB_REASON_CALL,0, (kdb_eframe_t) 0);
#endif

#ifdef CONFIG_PPC_ISERIES
			HvCall_terminateMachineSrc();
#endif
			goto out;
		}
		barrier();
		udelay(1);
	}

	if (wait) {
		timeout = 1000000;
		while (atomic_read(&data.finished) != cpus) {
			HMT_low();
			if (--timeout == 0) {
				printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
				       smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
#ifdef CONFIG_PPC_ISERIES
				HvCall_terminateMachineSrc();
#endif
				goto out;
			}
			barrier();
			udelay(1);
		}
	}
	ret = 0;

 out:
	call_data = NULL;
	HMT_medium();
	spin_unlock_bh(&call_lock);
	return ret;
}
Exemplo n.º 24
0
int dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca(), *ppaca;
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned int cpu = smp_processor_id();

	ppaca = &paca[cpu ^ 1];

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.xIdle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);
			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;
			while (!need_resched() && !cpu_is_offline(cpu)) {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay == 0 ||
				    __get_tb() < start_snooze)
					continue;

				HMT_medium();

				if (!(ppaca->lppaca.xIdle)) {
					local_irq_disable();

					/*
					 * We are about to sleep the thread
					 * and so wont be polling any
					 * more.
					 */
					clear_thread_flag(TIF_POLLING_NRFLAG);

					/*
					 * SMT dynamic mode. Cede will result
					 * in this thread going dormant, if the
					 * partner thread is still doing work.
					 * Thread wakes up if partner goes idle,
					 * an interrupt is presented, or a prod
					 * occurs.  Returning from the cede
					 * enables external interrupts.
					 */
					if (!need_resched())
						cede_processor();
					else
						local_irq_enable();
				} else {
					/*
					 * Give the HV an opportunity at the
					 * processor, since we are not doing
					 * any work.
					 */
					poll_pending();
				}
			}

			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		HMT_medium();
		lpaca->lppaca.xIdle = 0;
		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
	return 0;
}
Exemplo n.º 25
0
/*
 * This function sends a 'generic call function' IPI to all other CPUs
 * in the system.
 *
 * [SUMMARY] Run a function on all other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
		       int wait)
{ 
	struct call_data_struct data;
	int ret = -1, cpus;
	u64 timeout;

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	spin_lock(&call_lock);
	/* Must grab online cpu count with preempt disabled, otherwise
	 * it can change. */
	cpus = num_online_cpus() - 1;
	if (!cpus) {
		ret = 0;
		goto out;
	}

	call_data = &data;
	smp_wmb();
	/* Send a message to all other CPUs and wait for them to respond */
	smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);

	timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;

	/* Wait for response */
	while (atomic_read(&data.started) != cpus) {
		HMT_low();
		if (get_tb() >= timeout) {
			printk("smp_call_function on cpu %d: other cpus not "
			       "responding (%d)\n", smp_processor_id(),
			       atomic_read(&data.started));
			debugger(NULL);
			goto out;
		}
	}

	if (wait) {
		while (atomic_read(&data.finished) != cpus) {
			HMT_low();
			if (get_tb() >= timeout) {
				printk("smp_call_function on cpu %d: other "
				       "cpus not finishing (%d/%d)\n",
				       smp_processor_id(),
				       atomic_read(&data.finished),
				       atomic_read(&data.started));
				debugger(NULL);
				goto out;
			}
		}
	}

	ret = 0;

 out:
	call_data = NULL;
	HMT_medium();
	spin_unlock(&call_lock);
	return ret;
}
Exemplo n.º 26
0
/*
 * These functions send a 'generic call function' IPI to other online
 * CPUS in the system.
 *
 * [SUMMARY] Run a function on other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 * <map> is a cpu map of the cpus to send IPI to.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
static int __smp_call_function_map(void (*func) (void *info), void *info,
				   int nonatomic, int wait, cpumask_t map)
{
	struct call_data_struct data;
	int ret = -1, num_cpus;
	int cpu;
	u64 timeout;

	if (unlikely(smp_ops == NULL))
		return ret;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	/* remove 'self' from the map */
	if (cpu_isset(smp_processor_id(), map))
		cpu_clear(smp_processor_id(), map);

	/* sanity check the map, remove any non-online processors. */
	cpus_and(map, map, cpu_online_map);

	num_cpus = cpus_weight(map);
	if (!num_cpus)
		goto done;

	call_data = &data;
	smp_wmb();
	/* Send a message to all CPUs in the map */
	for_each_cpu_mask(cpu, map)
		smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);

	timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;

	/* Wait for indication that they have received the message */
	while (atomic_read(&data.started) != num_cpus) {
		HMT_low();
		if (get_tb() >= timeout) {
			printk("smp_call_function on cpu %d: other cpus not "
				"responding (%d)\n", smp_processor_id(),
				atomic_read(&data.started));
			if (!ipi_fail_ok)
				debugger(NULL);
			goto out;
		}
	}

	/* optionally wait for the CPUs to complete */
	if (wait) {
		while (atomic_read(&data.finished) != num_cpus) {
			HMT_low();
			if (get_tb() >= timeout) {
				printk("smp_call_function on cpu %d: other "
					"cpus not finishing (%d/%d)\n",
					smp_processor_id(),
					atomic_read(&data.finished),
					atomic_read(&data.started));
				debugger(NULL);
				goto out;
			}
		}
	}

 done:
	ret = 0;

 out:
	call_data = NULL;
	HMT_medium();
	return ret;
}