Ejemplo n.º 1
0
static int menu_select(struct acpi_processor_power *power)
{
    struct menu_device *data = &__get_cpu_var(menu_devices);
    int i;
    s_time_t    io_interval;

    /*  TBD: Change to 0 if C0(polling mode) support is added later*/
    data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
    data->exit_us = 0;

    /* determine the expected residency time, round up */
    data->expected_us = get_sleep_length_us();

    data->bucket = which_bucket(data->expected_us);

    io_interval = avg_intr_interval_us();

    data->latency_factor = DIV_ROUND(
            data->latency_factor * (DECAY - 1) + data->measured_us,
            DECAY);

    /*
     * if the correction factor is 0 (eg first time init or cpu hotplug
     * etc), we actually want to start out with a unity factor.
     */
    if (data->correction_factor[data->bucket] == 0)
        data->correction_factor[data->bucket] = RESOLUTION * DECAY;

    /* Make sure to round up for half microseconds */
    data->predicted_us = DIV_ROUND(
            data->expected_us * data->correction_factor[data->bucket],
            RESOLUTION * DECAY);

    /* find the deepest idle state that satisfies our constraints */
    for ( i = CPUIDLE_DRIVER_STATE_START + 1; i < power->count; i++ )
    {
        struct acpi_processor_cx *s = &power->states[i];

        if (s->target_residency > data->predicted_us)
            break;
        if (s->latency * IO_MULTIPLIER > io_interval)
            break;
        if (s->latency * LATENCY_MULTIPLIER > data->latency_factor)
            break;
        /* TBD: we need to check the QoS requirment in future */
        data->exit_us = s->latency;
        data->last_state_idx = i;
    }

    return data->last_state_idx;
}
Ejemplo n.º 2
0
Archivo: menu.c Proyecto: mazuhowski/Cm
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/*
	 * Force the result of multiplication to be 64 bits even if both
	 * operands are 32 bits.
	 * Make sure to round up for half microseconds.
	 */
	data->predicted_us = div_round64((uint64_t)data->expected_us *
					 data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	get_typical_interval(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}
Ejemplo n.º 3
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
#ifdef CONFIG_SKIP_IDLE_CORRELATION
	if (dev->skip_idle_correlation)
		data->predicted_us = data->expected_us;
	else
#endif
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	/* This patch is not checked */
#ifndef CONFIG_CPU_THERMAL_IPA
	get_typical_interval(data);
#else
	/*
	 * HACK - Ignore repeating patterns when we're
	 * forecasting a very large idle period.
	 */
	if(data->predicted_us < MAX_INTERESTING)
		get_typical_interval(data);
#endif

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}
Ejemplo n.º 4
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int power_usage = INT_MAX;
	int i;
	int multiplier;
	struct timespec t;
	int repeat = 0, low_predicted = 0;
	int cpu = smp_processor_id();
	struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	repeat = get_typical_interval(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (su->disable)
			continue;
		if (s->target_residency > data->predicted_us) {
			low_predicted = 1;
			continue;
		}
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	/* not deepest C-state chosen for low predicted residency */
	if (low_predicted) {
		unsigned int timer_us = 0;
		unsigned int perfect_us = 0;

		/*
		 * Set a timer to detect whether this sleep is much
		 * longer than repeat mode predicted.  If the timer
		 * triggers, the code will evaluate whether to put
		 * the CPU into a deeper C-state.
		 * The timer is cancelled on CPU wakeup.
		 */
		timer_us = 2 * (data->predicted_us + MAX_DEVIATION);

		perfect_us = perfect_cstate_ms * 1000;

		if (repeat && (4 * timer_us < data->expected_us)) {
			hrtimer_start(hrtmr, ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED);
			/* In repeat case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
		} else if (perfect_us < data->expected_us) {
			/*
			 * The next timer is long. This could be because
			 * we did not make a useful prediction.
			 * In that case, it makes sense to re-enter
			 * into a deeper C-state after some time.
			 */
			hrtimer_start(hrtmr, ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED);
			/* In general case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
		}

	}

	return data->last_state_idx;
}
Ejemplo n.º 5
0
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int int_vote_req = pm_qos_request(PM_QOS_CPU_INT_LATENCY);
	unsigned int power_usage = -1;
	int i;
	int multiplier;
	struct timespec t;
	unsigned int timer_id = 0;
	unsigned int schedule_time = 0xffffffff;

	if (data->needs_update) {
		menu_update(dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

    if (unlikely(int_vote_req != PM_QOS_CPUIDLE_INT_DEFAULT_VALUE))
    {
        PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,int_vote_req=0x%x\n",int_vote_req);
        return 0;
    }

    if(num_online_cpus() > 1)
        return 0;

    pwrctrl_sleep_mgr_get_next_schedule_time(0, &timer_id, &schedule_time);

    if(schedule_time > (0xFFFFFFFF / 1000))
    {
        schedule_time = 0xFFFFFFFF;
    }
    else
    {
        schedule_time *= USEC_PER_MSEC;
    }

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;

    PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,data->expected_us=%d,schedule_time=%d\n",data->expected_us,schedule_time);

    if(schedule_time < data->expected_us)
    {
        /*PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,system time:%d private time:%d\n",data->expected_us, schedule_time);*/
        data->expected_us = schedule_time;
    }

	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	detect_repeating_patterns(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */

    PRINT_PWC_DBG(PWC_SWITCH_CPUIDLE,"menu_select,multiplier=%d, latency_req=%d, predicted_us=%llu\n",
	    multiplier,latency_req, data->predicted_us);
	for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->flags & CPUIDLE_FLAG_IGNORE)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	return data->last_state_idx;
}
Ejemplo n.º 6
0
Archivo: menu.c Proyecto: SelfImp/m9
/**
 * menu_select - selects the next idle state to enter
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	unsigned int power_usage = -1;
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	detect_repeating_patterns(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
		struct cpuidle_state *s = &dev->states[i];

		if (s->flags & CPUIDLE_FLAG_IGNORE)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		if (s->power_usage < power_usage) {
			power_usage = s->power_usage;
			data->last_state_idx = i;
			data->exit_us = s->exit_latency;
		}
	}

	return data->last_state_idx;
}
Ejemplo n.º 7
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = this_cpu_ptr(&menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	unsigned int interactivity_req;
	int repeat = 0, low_predicted = 0;
	int cpu = smp_processor_id();
	struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
	unsigned long nr_iowaiters;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());

	nr_iowaiters = nr_iowait_cpu(smp_processor_id());
	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
#ifdef CONFIG_SKIP_IDLE_CORRELATION
	if (dev->skip_idle_correlation)
		data->predicted_us = data->next_timer_us;
	else
#endif
	data->predicted_us = div_round64(data->next_timer_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	/* This patch is not checked */
#ifndef CONFIG_CPU_THERMAL_IPA
	repeat = get_typical_interval(data);
#else
	/*
	 * HACK - Ignore repeating patterns when we're
	 * forecasting a very large idle period.
	 */
	if(data->predicted_us < MAX_INTERESTING)
		repeat = get_typical_interval(data);
#endif

	/*
	 * Performance multiplier defines a minimum predicted idle
	 * duration / latency ratio. Adjust the latency limit if
	 * necessary.
	 */
	interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters);
	if (latency_req > interactivity_req)
		latency_req = interactivity_req;

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->next_timer_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us) {
			low_predicted = 1;
			continue;
		}
		if (s->exit_latency > latency_req)
			continue;

		data->last_state_idx = i;
	}

	/* not deepest C-state chosen for low predicted residency */
	if (low_predicted) {
		unsigned int timer_us = 0;
		unsigned int perfect_us = 0;

		/*
		 * Set a timer to detect whether this sleep is much
		 * longer than repeat mode predicted.  If the timer
		 * triggers, the code will evaluate whether to put
		 * the CPU into a deeper C-state.
		 * The timer is cancelled on CPU wakeup.
		 */
		timer_us = 2 * (data->predicted_us + MAX_DEVIATION);

		perfect_us = perfect_cstate_ms * 1000;

		if (repeat && (4 * timer_us < data->next_timer_us)) {
			RCU_NONIDLE(hrtimer_start(hrtmr,
				ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED));
			/* In repeat case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
		} else if (perfect_us < data->next_timer_us) {
			/*
			 * The next timer is long. This could be because
			 * we did not make a useful prediction.
			 * In that case, it makes sense to re-enter
			 * into a deeper C-state after some time.
			 */
			RCU_NONIDLE(hrtimer_start(hrtmr,
				ns_to_ktime(1000 * timer_us),
				HRTIMER_MODE_REL_PINNED));
			/* In general case, menu hrtimer is started */
			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
		}

	}

	return data->last_state_idx;
}