static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	unsigned int boosted_freq;
	struct cpufreq_interactive_cpuinfo *picpu;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	now = update_load(data);
	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
	cputime_speedadj = pcpu->cputime_speedadj;
	pcpu->last_evaluated_jiffy = get_jiffies_64();
	spin_unlock_irqrestore(&pcpu->load_lock, flags);

	if (WARN_ON_ONCE(!delta_time))
		goto rearm;

	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	pcpu->prev_load = cpu_load;
	boosted = boost_val || now < boostpulse_endtime;
	boosted_freq = max(hispeed_freq, pcpu->policy->min);

	if (cpu_load >= go_hispeed_load || boosted) {
		if (pcpu->target_freq < boosted_freq) {
			new_freq = boosted_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq > freq_calc_thresh)
				new_freq = pcpu->policy->max * cpu_load / 100;

			if (new_freq < boosted_freq)
				new_freq = boosted_freq;
		}
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (new_freq > freq_calc_thresh)
			new_freq = pcpu->policy->max * cpu_load / 100;

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->target_freq);
			}

			if (max_freq > up_threshold_any_cpu_freq ||
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}
예제 #2
0
static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	struct cpufreq_interactive_cpuinfo *picpu;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	now = update_load(data);
	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
	cputime_speedadj = pcpu->cputime_speedadj;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);

	if (!delta_time)
		goto rearm;

	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;

	pcpu->prev_load = cpu_load;
	boosted = now < (last_input_time + boostpulse_duration_val);

	if (cpu_load >= go_hispeed_load)
	{
		if (pcpu->target_freq < hispeed_freq) 
			new_freq = hispeed_freq;
		else
		{
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq < hispeed_freq)
				new_freq = hispeed_freq;
		}
	}
	else 
	{
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->target_freq);
			}

			if (max_freq > up_threshold_any_cpu_freq &&
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}
static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	unsigned int boosted_freq;
	struct cpufreq_interactive_cpuinfo *picpu;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->last_evaluated_jiffy = get_jiffies_64();
	now = update_load(data);
	if (use_sched_hint) {
		/*
		 * Unlock early to avoid deadlock.
		 *
		 * cpufreq_interactive_timer_resched_now() is called
		 * in thread migration notification which already holds
		 * rq lock. Then it locks load_lock to avoid racing with
		 * cpufreq_interactive_timer_resched/start().
		 * sched_get_busy() will also acquire rq lock. Thus we
		 * can't hold load_lock when calling sched_get_busy().
		 *
		 * load_lock used in this function protects time
		 * and load information. These stats are not used when
		 * scheduler hint is available. Thus unlocking load_lock
		 * early is perfectly OK.
		 */
		spin_unlock_irqrestore(&pcpu->load_lock, flags);
		cputime_speedadj = (u64)sched_get_busy(data) *
				pcpu->policy->cpuinfo.max_freq;
		do_div(cputime_speedadj, timer_rate);
	} else {
		delta_time = (unsigned int)
				(now - pcpu->cputime_speedadj_timestamp);
		cputime_speedadj = pcpu->cputime_speedadj;
		spin_unlock_irqrestore(&pcpu->load_lock, flags);
		if (WARN_ON_ONCE(!delta_time))
			goto rearm;
		do_div(cputime_speedadj, delta_time);
	}

	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	pcpu->prev_load = cpu_load;
	boosted = boost_val || now < boostpulse_endtime;
	boosted_freq = max(hispeed_freq, pcpu->policy->min);

	if (cpu_load >= go_hispeed_load || boosted) {
		if (pcpu->target_freq < boosted_freq) {
			new_freq = boosted_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq > freq_calc_thresh)
				new_freq = pcpu->policy->max * cpu_load / 100;

			if (new_freq < boosted_freq)
				new_freq = boosted_freq;
		}
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (new_freq > freq_calc_thresh)
			new_freq = pcpu->policy->max * cpu_load / 100;

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->target_freq);
			}

			if (max_freq > up_threshold_any_cpu_freq ||
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}
예제 #4
0
static void cpufreq_interactive_timer(unsigned long data)
{
	u64 now;
	unsigned int delta_time;
	u64 cputime_speedadj;
	int cpu_load;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	unsigned int new_freq;
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	bool boosted;
	unsigned long mod_min_sample_time;
	int i, max_load;
	unsigned int max_freq;
	struct cpufreq_interactive_cpuinfo *picpu;
	static unsigned int phase = 0;
	static unsigned int counter = 0;
	unsigned int nr_cpus;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled)
		goto exit;

	if (cpu_is_offline(data))
		goto exit;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	now = update_load(data);
	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
	cputime_speedadj = pcpu->cputime_speedadj;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);

	if (WARN_ON_ONCE(!delta_time))
		goto rearm;

	do_div(cputime_speedadj, delta_time);
	loadadjfreq = (unsigned int)cputime_speedadj * 100;
	cpu_load = loadadjfreq / pcpu->target_freq;
	pcpu->prev_load = cpu_load;
	boosted = now < (last_input_time + boostpulse_duration_val);
/*
	if (counter < 5) {
		counter++;
		if (counter > 2) {
			phase = 1;
		}
	}*/

	if (cpu_load >= go_hispeed_load) {
		if (pcpu->target_freq < hispeed_freq) {
			//nr_cpus = num_online_cpus();

			//pcpu->two_phase_freq = two_phase_freq_array[nr_cpus-1];
			//if (pcpu->two_phase_freq < pcpu->policy->cur)
			//	phase = 1;
			//if (pcpu->two_phase_freq != 0 && phase == 0) {
			//	new_freq = pcpu->two_phase_freq;
			//} else
				new_freq = hispeed_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);

			if (new_freq < hispeed_freq)
				new_freq = hispeed_freq;
		}
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);

		if (sync_freq && new_freq < sync_freq) {

			max_load = 0;
			max_freq = 0;

			for_each_online_cpu(i) {
				picpu = &per_cpu(cpuinfo, i);

				if (i == data || picpu->prev_load <
						up_threshold_any_cpu_load)
					continue;

				max_load = max(max_load, picpu->prev_load);
				max_freq = max(max_freq, picpu->policy->cur);
			}

			if (max_freq > up_threshold_any_cpu_freq &&
				max_load >= up_threshold_any_cpu_load)
				new_freq = sync_freq;
		}
	}