Exemple #1
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	for (;;)
		if (ppc_md.idle != NULL)
			ppc_md.idle();
		else
			default_idle();
}
Exemple #2
0
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 */
static void cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_state *target_state;
	int next_state;

	/* check if the device is ready */
	if (!dev || !dev->enabled) {
		if (pm_idle_old)
			pm_idle_old();
		else
#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
			default_idle();
#else
			local_irq_enable();
#endif
		return;
	}

#if 0
	/* shows regressions, re-enable for 2.6.29 */
	/*
	 * run any timers that can be run now, at this point
	 * before calculating the idle duration etc.
	 */
	hrtimer_peek_ahead_timers();
#endif
	  
	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(dev);
	if (need_resched()) {
		local_irq_enable();
		return;
	}

	target_state = &dev->states[next_state];

	/* enter the state and update stats */
	dev->last_state = target_state;
	
	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
	trace_cpu_idle(next_state, dev->cpu);
	
	dev->last_residency = target_state->enter(dev, target_state);
	
	trace_power_end(dev->cpu);
	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
	
	if (dev->last_state)
		target_state = dev->last_state;

	target_state->time += (unsigned long long)dev->last_residency;
	target_state->usage++;

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev);
}
Exemple #3
0
/*
 * The body of the idle task.
 */
int cpu_idle(void)
{
	for (;;)
		if (ppc_md.idle != NULL)
			ppc_md.idle();
		else
			default_idle();
	return 0;
}
Exemple #4
0
/*
 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
 * pending message MSR. If we detect C1E, then we handle it the same
 * way as C3 power states (local apic timer and TSC stop)
 */
static void amd_e400_idle(void)
{
	if (!amd_e400_c1e_detected) {
		u32 lo, hi;

		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);

		if (lo & K8_INTP_C1E_ACTIVE_MASK) {
			amd_e400_c1e_detected = true;
			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
				mark_tsc_unstable("TSC halt in AMD C1E");
			pr_info("System has AMD C1E enabled\n");
		}
	}

	if (amd_e400_c1e_detected) {
		int cpu = smp_processor_id();

		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
			cpumask_set_cpu(cpu, amd_e400_c1e_mask);
			/* Force broadcast so ACPI can not interfere. */
			tick_broadcast_force();
			pr_info("Switch to broadcast mode on CPU%d\n", cpu);
		}
		tick_broadcast_enter();

		default_idle();

		/*
		 * The switch back from broadcast mode needs to be
		 * called with interrupts disabled.
		 */
		local_irq_disable();
		tick_broadcast_exit();
		local_irq_enable();
	} else
		default_idle();
}
Exemple #5
0
/*
 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
 * states (local apic timer and TSC stop).
 */
static void amd_e400_idle(void)
{
	/*
	 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
	 * gets set after static_cpu_has() places have been converted via
	 * alternatives.
	 */
	if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
		default_idle();
		return;
	}

	tick_broadcast_enter();

	default_idle();

	/*
	 * The switch back from broadcast mode needs to be called with
	 * interrupts disabled.
	 */
	local_irq_disable();
	tick_broadcast_exit();
	local_irq_enable();
}
Exemple #6
0
void cpu_idle (void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		rcu_idle_enter();
		while (!need_resched()) {
			/*
			 * Mark this as an RCU critical section so that
			 * synchronize_kernel() in the unload path waits
			 * for our completion.
			 */
			default_idle();
		}
		rcu_idle_exit();
		schedule_preempt_disabled();
	}
}
Exemple #7
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	for (;;) {
		while (!need_resched()) {
			if (ppc_md.idle != NULL)
				ppc_md.idle();
			else
				default_idle();
		}

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
void init_idle_tt(void)
{
	default_idle();
}
Exemple #9
0
static void apm_cpu_idle(void)
{
	static int use_apm_idle; /* = 0 */
	static unsigned int last_jiffies; /* = 0 */
	static unsigned int last_stime; /* = 0 */

	int apm_idle_done = 0;
	unsigned int jiffies_since_last_check = jiffies - last_jiffies;
	unsigned int bucket;

recalc:
	if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
		use_apm_idle = 0;
		last_jiffies = jiffies;
		last_stime = current->stime;
	} else if (jiffies_since_last_check > idle_period) {
		unsigned int idle_percentage;

		idle_percentage = current->stime - last_stime;
		idle_percentage *= 100;
		idle_percentage /= jiffies_since_last_check;
		use_apm_idle = (idle_percentage > idle_threshold);
		if (apm_info.forbid_idle)
			use_apm_idle = 0;
		last_jiffies = jiffies;
		last_stime = current->stime;
	}

	bucket = IDLE_LEAKY_MAX;

	while (!need_resched()) {
		if (use_apm_idle) {
			unsigned int t;

			t = jiffies;
			switch (apm_do_idle()) {
			case 0:
				apm_idle_done = 1;
				if (t != jiffies) {
					if (bucket) {
						bucket = IDLE_LEAKY_MAX;
						continue;
					}
				} else if (bucket) {
					bucket--;
					continue;
				}
				break;
			case 1:
				apm_idle_done = 1;
				break;
			default: /* BIOS refused */
				break;
			}
		}
		if (original_pm_idle)
			original_pm_idle();
		else
			default_idle();
		local_irq_disable();
		jiffies_since_last_check = jiffies - last_jiffies;
		if (jiffies_since_last_check > idle_period)
			goto recalc;
	}

	if (apm_idle_done)
		apm_do_busy();

	local_irq_enable();
}
Exemple #10
0
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 */
static void cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
	struct cpuidle_state *target_state;
	int next_state;

	/* check if the device is ready */
	if (!dev || !dev->enabled) {
		if (pm_idle_old)
			pm_idle_old();
		else
#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
			default_idle();
#else
			local_irq_enable();
#endif
		return;
	}

#if 0
	/* shows regressions, re-enable for 2.6.29 */
	/*
	 * run any timers that can be run now, at this point
	 * before calculating the idle duration etc.
	 */
	hrtimer_peek_ahead_timers();
#endif

	/*
	 * Call the device's prepare function before calling the
	 * governor's select function.  ->prepare gives the device's
	 * cpuidle driver a chance to update any dynamic information
	 * of its cpuidle states for the current idle period, e.g.
	 * state availability, latencies, residencies, etc.
	 */
	if (dev->prepare)
		dev->prepare(dev);

	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(dev);
	if (need_resched()) {
		local_irq_enable();
		return;
	}

	target_state = &dev->states[next_state];

	/* enter the state and update stats */
	dev->last_state = target_state;
	dev->last_residency = target_state->enter(dev, target_state);
	if (dev->last_state)
		target_state = dev->last_state;

	target_state->time += (unsigned long long)dev->last_residency;
	target_state->usage++;

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev);
	trace_power_end(0);
}
void init_idle_skas(void)
{
	cpu_tasks[current_thread->cpu].pid = os_getpid();
	default_idle();
}
Exemple #12
0
void arch_cpu_idle(void)
{
	default_idle();
}