示例#1
0
文件: menu.c 项目: E-LLP/n900
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
	int i;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		data->last_state_idx = 0;
		return 0;
	}

	/* determine the expected residency time */
	data->expected_us =
		(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;

	/* find the deepest idle state that satisfies our constraints */
	for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->target_residency > data->expected_us)
			break;
		if (s->target_residency > data->predicted_us)
			break;
		if (s->exit_latency > latency_req)
			break;
	}

	data->last_state_idx = i - 1;
	return i - 1;
}
示例#2
0
bool tegra2_lp2_is_allowed(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	s64 request = ktime_to_us(tick_nohz_get_sleep_length());

	if (request < state->target_residency) {
		/* Not enough time left to enter LP2 */
		return false;
	}

	return true;
}
示例#3
0
void tegra2_idle_lp2(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	s64 request = ktime_to_us(tick_nohz_get_sleep_length());
	bool last_cpu = tegra_set_cpu_in_lp2(dev->cpu);

	cpu_pm_enter();

	if (dev->cpu == 0) {
		if (last_cpu) {
			if (tegra2_idle_lp2_cpu_0(dev, state, request) < 0) {
				int i;
				for_each_online_cpu(i) {
					if (i != dev->cpu)
						tegra2_wake_reset_cpu(i);
				}
			}
		} else {
示例#4
0
文件: menu.c 项目: Tigrouzen/k1099
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int i;

	/* determine the expected residency time */
	data->expected_us =
		(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;

	/* find the deepest idle state that satisfies our constraints */
	for (i = 1; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->target_residency > data->expected_us)
			break;
		if (s->target_residency > data->predicted_us)
			break;
		if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
			break;
	}

	data->last_state_idx = i - 1;
	return i - 1;
}
示例#5
0
文件: menu.c 项目: mazuhowski/Cm
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/*
	 * Force the result of multiplication to be 64 bits even if both
	 * operands are 32 bits.
	 * Make sure to round up for half microseconds.
	 */
	data->predicted_us = div_round64((uint64_t)data->expected_us *
					 data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	get_typical_interval(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}
示例#6
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
#ifdef CONFIG_SKIP_IDLE_CORRELATION
	if (dev->skip_idle_correlation)
		data->predicted_us = data->expected_us;
	else
#endif
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	/* This patch is not checked */
#ifndef CONFIG_CPU_THERMAL_IPA
	get_typical_interval(data);
#else
	/*
	 * HACK - Ignore repeating patterns when we're
	 * forecasting a very large idle period.
	 */
	if(data->predicted_us < MAX_INTERESTING)
		get_typical_interval(data);
#endif

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}
示例#7
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int power_usage = INT_MAX;
	int i;
	int multiplier;
	struct timespec t;
	int repeat = 0, low_predicted = 0;
	int cpu = smp_processor_id();
	struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	repeat = get_typical_interval(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (su->disable)
			continue;
		if (s->target_residency > data->predicted_us) {
			low_predicted = 1;
			continue;
		}
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	/* not deepest C-state chosen for low predicted residency */
	if (low_predicted) {
		unsigned int timer_us = 0;
		unsigned int perfect_us = 0;

		/*
		 * Set a timer to detect whether this sleep is much
		 * longer than repeat mode predicted.  If the timer
		 * triggers, the code will evaluate whether to put
		 * the CPU into a deeper C-state.
		 * The timer is cancelled on CPU wakeup.
		 */
		timer_us = 2 * (data->predicted_us + MAX_DEVIATION);

		perfect_us = perfect_cstate_ms * 1000;

		if (repeat && (4 * timer_us < data->expected_us)) {
			hrtimer_start(hrtmr, ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED);
			/* In repeat case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
		} else if (perfect_us < data->expected_us) {
			/*
			 * The next timer is long. This could be because
			 * we did not make a useful prediction.
			 * In that case, it makes sense to re-enter
			 * into a deeper C-state after some time.
			 */
			hrtimer_start(hrtmr, ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED);
			/* In general case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
		}

	}

	return data->last_state_idx;
}
示例#8
0
/*
 * Put CPU in low power mode.
 */
void arch_idle(void)
{
	bool allow[MSM_PM_SLEEP_MODE_NR];
	uint32_t sleep_limit = SLEEP_LIMIT_NONE;

	int64_t timer_expiration;
	int latency_qos;
	int ret;
	int i;
	unsigned int cpu;
	int64_t t1;
	static DEFINE_PER_CPU(int64_t, t2);
	int exit_stat;

	if (!atomic_read(&msm_pm_init_done))
		return;

	cpu = smp_processor_id();
	latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	/* get the next timer expiration */
	timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());

	t1 = ktime_to_ns(ktime_get());
	msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
	msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
	exit_stat = MSM_PM_STAT_IDLE_SPIN;

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		allow[i] = true;

	if (num_online_cpus() > 1 ||
		(timer_expiration < msm_pm_idle_sleep_min_time) ||
		!msm_pm_irq_extns->idle_sleep_allowed()) {
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
	}

	for (i = 0; i < ARRAY_SIZE(allow); i++) {
		struct msm_pm_platform_data *mode =
					&msm_pm_modes[MSM_PM_MODE(cpu, i)];
		if (!mode->idle_supported || !mode->idle_enabled ||
			mode->latency >= latency_qos ||
			mode->residency * 1000ULL >= timer_expiration)
			allow[i] = false;
	}

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
		while (msm_pm_modem_busy() && wait_us) {
			if (wait_us > 100) {
				udelay(100);
				wait_us -= 100;
			} else {
				udelay(wait_us);
				wait_us = 0;
			}
		}

		if (msm_pm_modem_busy()) {
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
				= false;
		}
	}

	MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
		"%s(): latency qos %d, next timer %lld, sleep limit %u\n",
		__func__, latency_qos, timer_expiration, sleep_limit);

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
			"%s(): allow %s: %d\n", __func__,
			msm_pm_sleep_mode_labels[i], (int)allow[i]);

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		/* Sync the timer with SCLK, it is needed only for modem
		 * assissted pollapse case.
		 */
		int64_t next_timer_exp = msm_timer_enter_idle();
		uint32_t sleep_delay;
		bool low_power = false;

		sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
			next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);

		if (sleep_delay == 0) /* 0 would mean infinite time */
			sleep_delay = 1;

		if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
			sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;

#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
#endif

		ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
		low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
		msm_timer_exit_idle(low_power);

		if (ret)
			exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
		else {
			exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
			msm_pm_sleep_limit = sleep_limit;
		}
	} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
		ret = msm_pm_power_collapse_standalone(true);
		exit_stat = ret ?
			MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
			MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
	} else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
		ret = msm_pm_swfi(true);
		if (ret)
			while (!msm_pm_irq_extns->irq_pending())
				udelay(1);
		exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
	} else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
		msm_pm_swfi(false);
		exit_stat = MSM_PM_STAT_IDLE_WFI;
	} else {
		while (!msm_pm_irq_extns->irq_pending())
			udelay(1);
		exit_stat = MSM_PM_STAT_IDLE_SPIN;
	}

	__get_cpu_var(t2) = ktime_to_ns(ktime_get());
	msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
}
示例#9
0
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int int_vote_req = pm_qos_request(PM_QOS_CPU_INT_LATENCY);
	unsigned int power_usage = -1;
	int i;
	int multiplier;
	struct timespec t;
	unsigned int timer_id = 0;
	unsigned int schedule_time = 0xffffffff;

	if (data->needs_update) {
		menu_update(dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

    if (unlikely(int_vote_req != PM_QOS_CPUIDLE_INT_DEFAULT_VALUE))
    {
        PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,int_vote_req=0x%x\n",int_vote_req);
        return 0;
    }

    if(num_online_cpus() > 1)
        return 0;

    pwrctrl_sleep_mgr_get_next_schedule_time(0, &timer_id, &schedule_time);

    if(schedule_time > (0xFFFFFFFF / 1000))
    {
        schedule_time = 0xFFFFFFFF;
    }
    else
    {
        schedule_time *= USEC_PER_MSEC;
    }

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;

    PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,data->expected_us=%d,schedule_time=%d\n",data->expected_us,schedule_time);

    if(schedule_time < data->expected_us)
    {
        /*PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,system time:%d private time:%d\n",data->expected_us, schedule_time);*/
        data->expected_us = schedule_time;
    }

	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	detect_repeating_patterns(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */

    PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,multiplier=%d, latency_req=%d, predicted_us=%llu\n",
	    multiplier,latency_req, data->predicted_us);
	for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->flags & CPUIDLE_FLAG_IGNORE)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	return data->last_state_idx;
}
示例#10
0
文件: menu.c 项目: SelfImp/m9
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	unsigned int power_usage = -1;
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	detect_repeating_patterns(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->flags & CPUIDLE_FLAG_IGNORE)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	return data->last_state_idx;
}
示例#11
0
int enter_lowpm(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	int icanidle = 0;
	int idle_time;
	int cpu_id = 0;
	int cpu_idle_flag = IDLE_DISABLE;
	struct cpuidle_state *new_state = state;
	struct timeval before, after;
    int enter_state;
    struct timespec t;
	unsigned int timer_id = 0;
	unsigned int schedule_time = 0xffffffff;
    unsigned int expected_us;

    local_irq_disable();
    cpu_id = get_cpu();
    put_cpu();
    if(get_enter_state(state, &enter_state) != 0)
    {
        local_irq_enable();
        return 0;
    }

    /* Used to keep track of the total time in idle */
	getnstimeofday(&before);

    #if 1
    /*启动过程中不允许ACPU下电*/
    /*if((CPU_IDLE_C1 <= enter_state)&&(CPU_IDLE_C3 >= enter_state)&&(before.tv_sec < IDLE_ACTIVE_DELAY_S))*/
    if((CPU_IDLE_C1 <= enter_state)&&(CPU_IDLE_C3 >= enter_state)&&(0 == g_pwc_init_flag))
    {
        PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"before.tv_sec:0x%x\n",before.tv_sec);

        local_irq_enable();
        return 0;
    }
    #endif

    if((RET_OK == pwrctrl_is_func_on(PWC_SWITCH_CPUIDLE))&&((CPU_IDLE_C0 < enter_state)&&(CPU_IDLE_C4 > enter_state))&&(0 == cpu_id))
    {
        cpu_idle_flag = IDLE_ENABLE;
    }
    else
    {
        cpu_idle_flag = IDLE_DISABLE;
    }


    /*C3起定时器来唤醒 后续可优化*/
    if((enter_state >= CPU_IDLE_C3)&&(IDLE_ENABLE == cpu_idle_flag))
    {
        pwrctrl_sleep_mgr_get_next_schedule_time(0, &timer_id, &schedule_time);

        if(schedule_time > (0xFFFFFFFF / 1000))
        {
            schedule_time = 0xFFFFFFFF;
        }
        else
        {
            schedule_time *= USEC_PER_MSEC;
        }
        /*C3停止所有TCXO定时器,待优化*/

    	/* determine the expected residency time, round up */
    	t = ktime_to_timespec(tick_nohz_get_sleep_length());
    	expected_us = t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;

        if(schedule_time < expected_us)
        {
            PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"enter_lowpm,system time:%d private time:%d\n",expected_us, schedule_time);
            expected_us = schedule_time;
        }

        expected_us -= state->exit_latency;
        DRV_TIMER_STOP(CPU_IDLE_TIMER);
        DRV_TIMER_START((unsigned int)CPU_IDLE_TIMER, cpu_idle_timer_isr, (int)0, (expected_us / MSEC_PER_SEC), TIMER_ONCE_COUNT, TIMER_UNIT_MS);
    }


    if(IDLE_ENABLE == cpu_idle_flag)
    {
        *gp_cpuidle_state = (enter_state << CPUIDLE_STATE_START_BIT) | (CPU_IDLE_STAT_VALID << CPUIDLE_STATE_MAGIC_START_BIT);
        /*pwrctrl_wdt_disable();*/
    }

    if(0 == cpu_id)
    {
        PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"system will enter cpuidle state(%d)\n",enter_state);
    }

    if(CPU_IDLE_C0 == enter_state)
    {
        cpu_do_idle();
    }
    else if(IDLE_ENABLE == cpu_idle_flag)
    {
        pwrctrl_deep_sleep();
    }

    if(enter_state >= SPECIAL_HANDLE_STATE)
    {
        /*清除timer中断*/
        /*C3恢复之前停止所有TCXO定时器,待优化*/
    }

    if(IDLE_ENABLE == cpu_idle_flag)
    {
        *gp_cpuidle_state = (CPU_IDLE_C4 << CPUIDLE_STATE_START_BIT) | (CPU_IDLE_STAT_VALID << CPUIDLE_STATE_MAGIC_START_BIT);
        /*pwrctrl_wdt_enable();*/
    }

    getnstimeofday(&after);

    idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
		    (after.tv_usec - before.tv_usec);
    local_irq_enable();

	return idle_time;
}
示例#12
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = this_cpu_ptr(&menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	unsigned int interactivity_req;
	int repeat = 0, low_predicted = 0;
	int cpu = smp_processor_id();
	struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
	unsigned long nr_iowaiters;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());

	nr_iowaiters = nr_iowait_cpu(smp_processor_id());
	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
#ifdef CONFIG_SKIP_IDLE_CORRELATION
	if (dev->skip_idle_correlation)
		data->predicted_us = data->next_timer_us;
	else
#endif
	data->predicted_us = div_round64(data->next_timer_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	/* This patch is not checked */
#ifndef CONFIG_CPU_THERMAL_IPA
	repeat = get_typical_interval(data);
#else
	/*
	 * HACK - Ignore repeating patterns when we're
	 * forecasting a very large idle period.
	 */
	if(data->predicted_us < MAX_INTERESTING)
		repeat = get_typical_interval(data);
#endif

	/*
	 * Performance multiplier defines a minimum predicted idle
	 * duration / latency ratio. Adjust the latency limit if
	 * necessary.
	 */
	interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters);
	if (latency_req > interactivity_req)
		latency_req = interactivity_req;

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->next_timer_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us) {
			low_predicted = 1;
			continue;
		}
		if (s->exit_latency > latency_req)
			continue;

		data->last_state_idx = i;
	}

	/* not deepest C-state chosen for low predicted residency */
	if (low_predicted) {
		unsigned int timer_us = 0;
		unsigned int perfect_us = 0;

		/*
		 * Set a timer to detect whether this sleep is much
		 * longer than repeat mode predicted.  If the timer
		 * triggers, the code will evaluate whether to put
		 * the CPU into a deeper C-state.
		 * The timer is cancelled on CPU wakeup.
		 */
		timer_us = 2 * (data->predicted_us + MAX_DEVIATION);

		perfect_us = perfect_cstate_ms * 1000;

		if (repeat && (4 * timer_us < data->next_timer_us)) {
			RCU_NONIDLE(hrtimer_start(hrtmr,
				ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED));
			/* In repeat case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
		} else if (perfect_us < data->next_timer_us) {
			/*
			 * The next timer is long. This could be because
			 * we did not make a useful prediction.
			 * In that case, it makes sense to re-enter
			 * into a deeper C-state after some time.
			 */
			RCU_NONIDLE(hrtimer_start(hrtmr,
				ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED));
			/* In general case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
		}

	}

	return data->last_state_idx;
}