예제 #1
0
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency. Every sampling_rate, we look
 * for the lowest frequency which can sustain the load while keeping idle time
 * over 30%. If such a frequency exist, we try to decrease to this frequency.
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of current frequency
 */
static void od_check_cpu(int cpu, unsigned int load_freq)
{
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	dbs_info->freq_lo = 0;

	/* Check for frequency increase */
	if (load_freq > od_tuners->up_threshold * policy->cur) {
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		dbs_freq_increase(policy, policy->max);
		return;
	}

	/* Check for frequency decrease */
	/* if we cannot reduce the frequency anymore, break out early */
	if (policy->cur == policy->min)
		return;

	/*
	 * The optimal frequency is the frequency that is the lowest that can
	 * support the current CPU usage without triggering the up policy. To be
	 * safe, we focus 10 points under the threshold.
	 */
	if (load_freq < od_tuners->adj_up_threshold
			* policy->cur) {
		unsigned int freq_next;
		freq_next = load_freq / od_tuners->adj_up_threshold;

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (freq_next < policy->min)
			freq_next = policy->min;

		if (!od_tuners->powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
			return;
		}

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
	}
}
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
					unsigned int event)
{
#ifdef CONFIG_ARCH_HI3XXX
	unsigned int utarget = policy->max;
#endif

	switch (event) {
	case CPUFREQ_GOV_START:
	case CPUFREQ_GOV_LIMITS:
		pr_debug("setting to %u kHz because of event %u\n",
						policy->max, event);

#ifdef CONFIG_ARCH_HI3XXX
		if (policy->cpu == 4) {
			if (big_is_booting) {
#define BIG_BOOTING_PERFORMANCE_OPERATING_POINT 1708800
				utarget = BIG_BOOTING_PERFORMANCE_OPERATING_POINT;
			}
		}

		if ((get_lowbatteryflag() == 1) && (policy->cpu == 4))
			utarget = policy->min;

		pr_info("%s utarget=%d\n", __func__, utarget);

		__cpufreq_driver_target(policy, utarget,
						CPUFREQ_RELATION_H);
#else
		__cpufreq_driver_target(policy, policy->max,
						CPUFREQ_RELATION_H);
#endif
		break;

#ifdef CONFIG_ARCH_HI3XXX
	case CPUFREQ_GOV_POLICY_EXIT:

		set_lowBatteryflag(0);

		if (policy->cpu == 4)
			big_is_booting = 0;

		break;
#endif

	default:
		break;
	}
	return 0;
}
예제 #3
0
/*
 * Every sampling_rate, we check, if current idle time is less than 37%
 * (default), then we try to increase frequency. Else, we adjust the frequency
 * proportional to load.
 */
static void od_check_cpu(int cpu, unsigned int load)
{
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	bool boosted;
	u64 now;

	dbs_info->freq_lo = 0;

	now = ktime_to_us(ktime_get());
	boosted = now < (last_input_time + get_input_boost_duration());

	/* Check for frequency increase */
	if (load > od_tuners->up_threshold) {
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		dbs_freq_increase(policy, policy->max);
	} else {
		/* Calculate the next frequency proportional to load */
		unsigned int freq_next, min_f, max_f;

		min_f = policy->cpuinfo.min_freq;
		max_f = policy->cpuinfo.max_freq;
		freq_next = min_f + load * (max_f - min_f) / 100;

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (boosted && policy->cur < input_boost_freq
		     && freq_next < input_boost_freq)
			freq_next = input_boost_freq;

		if (!od_tuners->powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_C);
			return;
		}

		if (boosted && policy->cur <= input_boost_freq)
			return;

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
	}
}
static void ondemand_suspend(int suspend)
{
        struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, smp_processor_id());
        if (dbs_enable==0) return;
        if (!suspend) { // resume at max speed:
                suspended = 0;
                __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->cur_policy->max, 
			CPUFREQ_RELATION_L);
        } else {
                suspended = 1;
		// let's give it a little breathing room
                __cpufreq_driver_target(dbs_info->cur_policy, 368640, CPUFREQ_RELATION_L);
        }
}
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency. Every sampling_rate *
 * sampling_down_factor, we check, if current idle time is more than 80%
 * (default), then we try to decrease frequency
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of maximum frequency
 */
static void cs_check_cpu(int cpu, unsigned int load)
{
	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
	if (cs_tuners->freq_step == 0)
		return;

	/* Check for frequency increase */
	if (load > cs_tuners->up_threshold) {
		dbs_info->down_skip = 0;

		/* if we are already at full speed then break out early */
		if (dbs_info->requested_freq == policy->max)
			return;

		dbs_info->requested_freq += get_freq_target(cs_tuners, policy);

		__cpufreq_driver_target(policy, dbs_info->requested_freq,
			CPUFREQ_RELATION_H);
		return;
	}

	/* if sampling_down_factor is active break out early */
	if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
		return;
	dbs_info->down_skip = 0;

	/* Check for frequency decrease */
	if (load < cs_tuners->down_threshold) {
		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
			return;

		dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);

		__cpufreq_driver_target(policy, dbs_info->requested_freq,
				CPUFREQ_RELATION_L);
		return;
	}
}
예제 #6
0
static int cpufreq_governor_interactive(struct cpufreq_policy *new_policy,
		unsigned int event)
{
	int rc;
	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(new_policy->cpu))
			return -EINVAL;

		/*
		 * Do not register the idle hook and create sysfs
		 * entries if we have already done so.
		 */
		if (atomic_inc_return(&active_count) > 1)
			return 0;

		rc = sysfs_create_group(cpufreq_global_kobject,
				&interactive_attr_group);
		if (rc)
			return rc;

		pm_idle_old = pm_idle;
		pm_idle = cpufreq_idle;
		policy = new_policy;
		break;

	case CPUFREQ_GOV_STOP:
		if (atomic_dec_return(&active_count) > 1)
			return 0;

		sysfs_remove_group(cpufreq_global_kobject,
				&interactive_attr_group);

		pm_idle = pm_idle_old;
		del_timer(&per_cpu(cpu_timer, new_policy->cpu));
			break;

	case CPUFREQ_GOV_LIMITS:
		if (new_policy->max < new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->max, CPUFREQ_RELATION_H);
		else if (new_policy->min > new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->min, CPUFREQ_RELATION_L);
		break;
	}
	return 0;
}
static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
                                 struct dbs_data *dbs_data, bool modify_all)
{
    struct cpufreq_policy *policy = cdbs->shared->policy;
    unsigned int cpu = policy->cpu;
    struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
                                         cpu);
    struct od_dbs_tuners *od_tuners = dbs_data->tuners;
    int delay = 0, sample_type = dbs_info->sample_type;

    if (!modify_all)
        goto max_delay;

    /* Common NORMAL_SAMPLE setup */
    dbs_info->sample_type = OD_NORMAL_SAMPLE;
    if (sample_type == OD_SUB_SAMPLE) {
        delay = dbs_info->freq_lo_jiffies;
        __cpufreq_driver_target(policy, dbs_info->freq_lo,
                                CPUFREQ_RELATION_H);
    } else {
        dbs_check_cpu(dbs_data, cpu);
        if (dbs_info->freq_lo) {
            /* Setup timer for SUB_SAMPLE */
            dbs_info->sample_type = OD_SUB_SAMPLE;
            delay = dbs_info->freq_hi_jiffies;
        }
    }

max_delay:
    if (!delay)
        delay = delay_for_sampling_rate(od_tuners->sampling_rate
                                        * dbs_info->rate_mult);

    return delay;
}
예제 #8
0
/**
 * cpufreq_set - set the CPU frequency
 * @policy: pointer to policy struct where freq is being set
 * @freq: target frequency in kHz
 *
 * Sets the CPU frequency to freq.
 */
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
	int ret = -EINVAL;

	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);

	mutex_lock(&userspace_mutex);
	if (!per_cpu(cpu_is_managed, policy->cpu))
		goto err;

	/*
	 * We're safe from concurrent calls to ->target() here
	 * as we hold the userspace_mutex lock. If we were calling
	 * cpufreq_driver_target, a deadlock situation might occur:
	 * A: cpufreq_set (lock userspace_mutex) ->
	 *      cpufreq_driver_target(lock policy->lock)
	 * B: cpufreq_set_policy(lock policy->lock) ->
	 *      __cpufreq_governor ->
	 *         cpufreq_governor_userspace (lock userspace_mutex)
	 */
	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);

 err:
	mutex_unlock(&userspace_mutex);
	return ret;
}
/*
 * Every sampling_rate, if current idle time is less than 30% (default),
 * try to increase the frequency. Every sampling_rate if the current idle
 * time is more than 70% (default), try to decrease the frequency.
 */
static void sa_check_cpu(int cpu, unsigned int load)
{
	struct sa_cpu_dbs_info_s const *dbs_info = &per_cpu(sa_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data* const dbs_data = policy->governor_data;
	const struct sa_dbs_tuners* const sa_tuners = dbs_data->tuners;
	const unsigned int prev_load = dbs_info->cdbs.prev_load;
	const unsigned int freq_cur = policy->cur;
	unsigned int freq_target = 0;
	const bool input_event = input_event_boost(sa_tuners->input_event_duration);

	/* Check for frequency decrease */
	if (load < sa_tuners->down_threshold) {
		const unsigned int freq_min = policy->min;

		// break out early if the frequency is set to the minimum
		if (freq_cur == freq_min)
			return;

		if (input_event)
			freq_target = sa_tuners->input_event_min_freq;

		else
			freq_target = (freq_cur + freq_min) >> RESISTANCE_OFFSET;

		__cpufreq_driver_target(policy, freq_target,
					CPUFREQ_RELATION_L);
	}

	/* Check for frequency increase */
	else if (load >= max(sa_tuners->up_threshold, prev_load)) {
예제 #10
0
static int
__syf_pwm_cpufreq_governor(struct cpufreq_policy *policy,
                           unsigned int event)
{
    unsigned int cpu = policy->cpu;
    struct syf_info_t *__syf_info = &per_cpu(_syf_info, cpu);

    switch (event) {
    case CPUFREQ_GOV_START:
        if (!cpu_online(cpu))
            return -EINVAL;

        if (__syf_info->enable)
            break;

        /*
         * Picker:
         *	Call your governor at here.
         *	Important: You may have a mutex lock for calling your governor.
         */
        break;

    case CPUFREQ_GOV_STOP:
        break;

    /* Change the cpu freq. with either highest freq. or lowest freq. */
    case CPUFREQ_GOV_LIMITS:
        mutex_lock(&_syf_mutex);
        if (policy->max <
                __syf_info->cur_policy.cur) {
            __cpufreq_driver_target(&__syf_info->cur_policy,
                                    policy->max,
                                    CPUFREQ_RELATION_H);
        }
        else if (policy->min >
                 __syf_info->cur_policy.cur) {
            __cpufreq_driver_target(&__syf_info->cur_policy,
                                    policy->min,
                                    CPUFREQ_RELATION_L);
        }
        mutex_unlock(&_syf_mutex);
        break;
    }

    return 0;
}
예제 #11
0
static void interactivex_suspend(int suspend)
{
	unsigned int max_speed;

	max_speed = RESUME_SPEED;

	if (!enabled) return;
        if (!suspend) { // resume at max speed:
		suspended = 0;
                __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L);
                pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur);
        } else {
		suspended = 1;
                __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
                pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur);
        }
}
예제 #12
0
static void od_check_cpu(int cpu, unsigned int load)
{
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	dbs_info->freq_lo = 0;

	
	if (load > od_tuners->up_threshold) {
		
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		dbs_freq_increase(policy, load, policy->max);
	} else {
		
		unsigned int freq_next;
		freq_next = load * policy->cpuinfo.max_freq / 100;

		
		dbs_info->rate_mult = 1;

		if (!od_tuners->powersave_bias) {
			trace_cpufreq_interactive_target(policy->cpu, load, freq_next, policy->cur, freq_next);

			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);

			trace_cpufreq_interactive_setspeed(policy->cpu, freq_next, policy->cur);

			return;
		}

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);

		trace_cpufreq_interactive_target(policy->cpu, load, freq_next, policy->cur, freq_next);

		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);

		trace_cpufreq_interactive_setspeed(policy->cpu, freq_next, policy->cur);
	}
}
예제 #13
0
static void msm_gov_check_limits(struct cpufreq_policy *policy)
{
	struct msm_gov *gov = &per_cpu(msm_gov_info, policy->cpu);

	if (policy->max < gov->cur_freq)
		__cpufreq_driver_target(policy, policy->max,
				CPUFREQ_RELATION_H);
	else if (policy->min > gov->min_freq)
		__cpufreq_driver_target(policy, policy->min,
				CPUFREQ_RELATION_L);
	else
		__cpufreq_driver_target(policy, gov->cur_freq,
				CPUFREQ_RELATION_L);

	gov->cur_freq = policy->cur;
	gov->min_freq = policy->min;
	gov->max_freq = policy->max;
}
static int greenmax_powersave_bias_setspeed(struct cpufreq_policy *policy,
					    struct cpufreq_policy *altpolicy,
					    int level)
{
	if (level == POWERSAVE_BIAS_MAXLEVEL) {
		/* maximum powersave; set to lowest frequency */
		__cpufreq_driver_target(policy,
			(altpolicy) ? altpolicy->min : policy->min,
			CPUFREQ_RELATION_L);
		return 1;
	} else if (level == POWERSAVE_BIAS_MINLEVEL) {
		/* minimum powersave; set to highest frequency */
		__cpufreq_driver_target(policy,
			(altpolicy) ? altpolicy->max : policy->max,
			CPUFREQ_RELATION_H);
		return 1;
	}
	return 0;
}
예제 #15
0
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
	if (od_tuners.powersave_bias)
		freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
	else if (p->cur == p->max)
		return;

	__cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
	struct dbs_data *dbs_data = p->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	if (od_tuners->powersave_bias)
		freq = od_ops.powersave_bias_target(p, freq,
				CPUFREQ_RELATION_H);
	else if (p->cur == p->max)
		return;


#ifdef CONFIG_ARCH_HI6XXX
	__cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L);
#else
	__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
#endif
}
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
				   unsigned int event)
{
	unsigned int cpu = policy->cpu;
	int rc = 0;

	switch (event) {
	case CPUFREQ_GOV_START:
		BUG_ON(!policy->cur);
		pr_debug("started managing cpu %u\n", cpu);

		mutex_lock(&userspace_mutex);
		per_cpu(cpu_is_managed, cpu) = 1;
		mutex_unlock(&userspace_mutex);
		break;
	case CPUFREQ_GOV_STOP:
		pr_debug("managing cpu %u stopped\n", cpu);

		mutex_lock(&userspace_mutex);
		per_cpu(cpu_is_managed, cpu) = 0;
		mutex_unlock(&userspace_mutex);
		break;
	case CPUFREQ_GOV_LIMITS:
		mutex_lock(&userspace_mutex);
		pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
			cpu, policy->min, policy->max,
			policy->cur);

		if (policy->max < policy->cur)
			__cpufreq_driver_target(policy, policy->max,
						CPUFREQ_RELATION_H);
		else if (policy->min > policy->cur)
			__cpufreq_driver_target(policy, policy->min,
						CPUFREQ_RELATION_C);
		mutex_unlock(&userspace_mutex);
		break;
	}
	return rc;
}
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
					unsigned int event)
{
	switch (event) {
	case CPUFREQ_GOV_START:
	case CPUFREQ_GOV_LIMITS:
		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
		break;
	default:
		break;
	}
	return 0;
}
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency. Else, we adjust the frequency
 * proportional to load.
 */
static void od_check_cpu(int cpu, unsigned int load)
{
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	dbs_info->freq_lo = 0;

	/* Check for frequency increase */
	if (load > od_tuners->up_threshold) {
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		dbs_freq_increase(policy, policy->max);
		return;
	} else {
		/* Calculate the next frequency proportional to load */
		unsigned int freq_next;
		freq_next = load * policy->cpuinfo.max_freq / 100;

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (freq_next < policy->min)
			freq_next = policy->min;

		if (!od_tuners->powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
			return;
		}

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
	}
}
예제 #20
0
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	if (od_tuners->powersave_bias)
		freq = od_ops.powersave_bias_target(policy, freq,
				CPUFREQ_RELATION_H);
	else if (policy->cur == policy->max)
		return;

	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
					unsigned int event)
{
	switch (event) {
	case CPUFREQ_GOV_START:
	case CPUFREQ_GOV_LIMITS:
		dprintk("setting to %u kHz because of event %u\n", policy->max, event);
		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
		break;
	default:
		break;
	}
	return 0;
}
/**
 * cpufreq_set - set the CPU frequency
 * @policy: pointer to policy struct where freq is being set
 * @freq: target frequency in kHz
 *
 * Sets the CPU frequency to freq.
 */
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
	int ret = -EINVAL;

	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);

	mutex_lock(&userspace_mutex);
	if (!per_cpu(cpu_is_managed, policy->cpu))
		goto err;

	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_C);
 err:
	mutex_unlock(&userspace_mutex);
	return ret;
}
/* We use the same work function to sale up and down */
static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
{
unsigned int cpu;
cpumask_t tmp_mask = work_cpumask;

for_each_cpu(cpu, tmp_mask) {
if (!suspended && (target_freq >= freq_threshold || target_freq == policy->max) ) {
if (policy->cur < 400000) {
// avoid quick jump from lowest to highest
target_freq = resume_speed;
}
if (nr_running() == 1) {
cpumask_clear_cpu(cpu, &work_cpumask);
return;
}
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
} else {
if (!suspended) {
target_freq = cpufreq_interactivex_calc_freq(cpu);
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_L);
} else { // special care when suspended
if (target_freq > suspendfreq) {
__cpufreq_driver_target(policy, suspendfreq, CPUFREQ_RELATION_H);
} else {
target_freq = cpufreq_interactivex_calc_freq(cpu);
if (target_freq < policy->cur)
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
}
}
}
freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
cpumask_clear_cpu(cpu, &work_cpumask);
}


}
예제 #24
0
static int
syf_pwm_ioctl(struct inode *inode, struct file *file,
              unsigned int cmd, void *arg)
{
    int i;
    unsigned int freq;
    cpu_ctrl_t *cc				= (cpu_ctrl_t *) arg;
    pmu_results_t *r 			= &(cc->pmu);
    unsigned int amt_cpu 		= cc->amt_cpu;
    unsigned int amt_counter 	= cc->amt_counter;

    for (i = 0; i < amt_cpu; ++i) {
        struct syf_info_t *_sinfo = &per_cpu(_syf_info, i);

        switch (cmd) {
        case SYFPWM_TESTING:
            printk("TESTING.\n");
            break;

        case SYFPWM_PMU_START:
            pmu_start(amt_counter, cc->evt_t);
            break;

        case SYFPWM_PMU_STOP:
            pmu_stop(amt_counter);
            memcpy(r, &pmu, sizeof(pmu_results_t));
            break;

        case SYFPWM_GET_FEQU:
            mutex_lock(&_syf_mutex);
            cpufreq_get_policy(&_sinfo->cur_policy, i);
            freq = cpufreq_get(_sinfo->cur_policy.cpu);
            mutex_unlock(&_syf_mutex);
            break;

        case SYFPWM_SET_FEQU:
            mutex_lock(&_syf_mutex);
            cpufreq_get_policy(&_sinfo->cur_policy, i);
            freq = __cpufreq_driver_target(&_sinfo->cur_policy,
                                           (unsigned int) cc->cpu_freq,
                                           CPUFREQ_RELATION_H);
            mutex_unlock(&_syf_mutex);
            break;
        }
    }

    return 0;
}
예제 #25
0
static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
					unsigned int event)
{
	switch (event) {
	case CPUFREQ_GOV_START:
	case CPUFREQ_GOV_LIMITS:
		pr_debug("setting to %u kHz because of event %u\n",
							policy->min, event);
		__cpufreq_driver_target(policy, policy->min,
						CPUFREQ_RELATION_L);
		break;
	default:
		break;
	}
	return 0;
}
inline static void target_freq(struct cpufreq_policy *policy,
		struct greenmax_info_s *this_greenmax, int new_freq, int old_freq,
		int prefered_relation) {
	int index, target;
	struct cpufreq_frequency_table *table = this_greenmax->freq_table;
	unsigned int cpu = this_greenmax->cpu;

	dprintk(GREENMAX_DEBUG_ALG, "%d: %s\n", old_freq, __func__);

	// apply policy limits - just to be sure
	new_freq = validate_freq(policy, new_freq);

	if (!cpufreq_frequency_table_target(policy, table, new_freq,
					prefered_relation, &index)) {
		target = table[index].frequency;
		if (target == old_freq) {
			// if for example we are ramping up to *at most* current + ramp_up_step
			// but there is no such frequency higher than the current, try also
			// to ramp up to *at least* current + ramp_up_step.
			if (new_freq > old_freq && prefered_relation == CPUFREQ_RELATION_H
					&& !cpufreq_frequency_table_target(policy, table, new_freq,
							CPUFREQ_RELATION_L, &index))
				target = table[index].frequency;
			// simlarly for ramping down:
			else if (new_freq < old_freq
					&& prefered_relation == CPUFREQ_RELATION_L
					&& !cpufreq_frequency_table_target(policy, table, new_freq,
							CPUFREQ_RELATION_H, &index))
				target = table[index].frequency;
		}

		// no change
		if (target == old_freq)
			return;
	} else {
		dprintk(GREENMAX_DEBUG_ALG, "frequency change failed\n");
		return;
	}

	dprintk(GREENMAX_DEBUG_JUMPS, "%d: jumping to %d (%d) cpu %d\n", old_freq, new_freq, target, cpu);

	__cpufreq_driver_target(policy, target, prefered_relation);

	// remember last time we changed frequency
	this_greenmax->freq_change_time = ktime_to_us(ktime_get());
}
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
	int ret = -EINVAL;

	l_vbfs_cpufreq_set_flag = 1;

#ifdef CONFIG_CPU_FREQ_DBG
	pr_info("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
#else
	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
#endif

	mutex_lock(&vbfs_mutex);
/*
	if (!per_cpu(cpu_is_managed, policy->cpu))
		goto err;

	per_cpu(cpu_set_freq, policy->cpu) = freq;

	if (freq < per_cpu(cpu_min_freq, policy->cpu))
		freq = per_cpu(cpu_min_freq, policy->cpu);
	if (freq > per_cpu(cpu_max_freq, policy->cpu))
		freq = per_cpu(cpu_max_freq, policy->cpu);
*/
	if(freq < policy->min)
		freq = policy->min;
	if(freq > policy->max)
		freq = policy->max;

	/*
	 * We're safe from concurrent calls to ->target() here
	 * as we hold the userspace_mutex lock. If we were calling
	 * cpufreq_driver_target, a deadlock situation might occur:
	 * A: cpufreq_set (lock userspace_mutex) ->
	 *      cpufreq_driver_target(lock policy->lock)
	 * B: cpufreq_set_policy(lock policy->lock) ->
	 *      __cpufreq_governor ->
	 *         cpufreq_governor_userspace (lock userspace_mutex)
	 */
	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);

 err:
	mutex_unlock(&vbfs_mutex);
	return ret;
}
예제 #28
0
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
					unsigned int event)
{
	switch (event) {
	case CPUFREQ_GOV_START:
	case CPUFREQ_GOV_LIMITS:
		pr_debug("setting to %u kHz because of event %u\n",
						policy->max, event);
		__cpufreq_driver_target(policy, policy->max,
						CPUFREQ_RELATION_H);
		break;
	case CPUFREQ_GOV_STOP:
		schedule_work_on(0, &performance_down_work);
		break;
	default:
		break;
	}
	return 0;
}
예제 #29
0
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int load, unsigned int freq)
{
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	if (od_tuners->powersave_bias)
		freq = od_ops.powersave_bias_target(policy, freq,
				CPUFREQ_RELATION_H);
	else if (policy->cur == policy->max) {
		trace_cpufreq_interactive_already (policy->cpu, load, policy->cur, policy->cur, policy->max);
		return;
	}

	trace_cpufreq_interactive_target (policy->cpu, load, freq, policy->cur, freq);

	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);

	trace_cpufreq_interactive_setspeed (policy->cpu, freq, policy->cur);
}
예제 #30
0
static void od_dbs_timer(struct work_struct *work)
{
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
			cpu);
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	int delay = 0, sample_type = core_dbs_info->sample_type;
	bool modify_all = true;

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
		modify_all = false;
		goto max_delay;
	}

	/* Common NORMAL_SAMPLE setup */
	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
	if (sample_type == OD_SUB_SAMPLE) {
		delay = core_dbs_info->freq_lo_jiffies;
		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
	} else {
		dbs_check_cpu(dbs_data, cpu);
		if (core_dbs_info->freq_lo) {
			/* Setup timer for SUB_SAMPLE */
			core_dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = core_dbs_info->freq_hi_jiffies;
		}
	}

max_delay:
	if (!delay)
		delay = delay_for_sampling_rate(od_tuners->sampling_rate
				* core_dbs_info->rate_mult);

	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}