/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_driver(); struct cpuidle_state *target_state; int next_state, entered_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { local_irq_enable(); return 0; } target_state = &drv->states[next_state]; trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu); entered_state = target_state->enter(dev, drv, next_state); trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); if (entered_state >= 0) { /* Update cpuidle counters */ /* This can be moved to within driver enter routine * but that results in multiple copies of same code. */ dev->states_usage[entered_state].time += (unsigned long long)dev->last_residency; dev->states_usage[entered_state].usage++; } /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, entered_state); return 0; }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here */ static void cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; int next_state; /* check if the device is ready */ if (!dev || !dev->enabled) { if (pm_idle_old) pm_idle_old(); else #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) default_idle(); #else local_irq_enable(); #endif return; } #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); return; } target_state = &dev->states[next_state]; /* enter the state and update stats */ dev->last_state = target_state; trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu); dev->last_residency = target_state->enter(dev, target_state); trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); if (dev->last_state) target_state = dev->last_state; target_state->time += (unsigned long long)dev->last_residency; target_state->usage++; /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; int next_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); return 0; } target_state = &dev->states[next_state]; /* enter the state and update stats */ dev->last_state = target_state; RCU_NONIDLE( trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu) ); dev->last_residency = target_state->enter(dev, target_state); RCU_NONIDLE( trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); );
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here */ static void cpuidle_idle_call(void) { struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); struct cpuidle_state *target_state; int next_state; /* check if the device is ready */ if (!dev || !dev->enabled) { if (pm_idle_old) pm_idle_old(); else #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) default_idle(); #else local_irq_enable(); #endif return; } #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* * Call the device's prepare function before calling the * governor's select function. ->prepare gives the device's * cpuidle driver a chance to update any dynamic information * of its cpuidle states for the current idle period, e.g. * state availability, latencies, residencies, etc. */ if (dev->prepare) dev->prepare(dev); /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); return; } target_state = &dev->states[next_state]; /* enter the state and update stats */ dev->last_state = target_state; dev->last_residency = target_state->enter(dev, target_state); if (dev->last_state) target_state = dev->last_state; target_state->time += (unsigned long long)dev->last_residency; target_state->usage++; /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); trace_power_end(0); }