/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate * * sampling_down_factor, we check, if current idle time is more than 80% * (default), then we try to decrease frequency * * Any frequency increase takes it to the maximum frequency. Frequency reduction * happens at minimum steps of 5% (default) of maximum frequency */ static void cs_check_cpu(int cpu, unsigned int load) { struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; bool boosted; u64 now; cpufreq_notify_utilization(policy, load); /* * break out if we 'cannot' reduce the speed as the user might * want freq_step to be zero */ if (cs_tuners->freq_step == 0) return; now = ktime_to_us(ktime_get()); boosted = now < (get_input_time() + cs_tuners->input_boost_duration); /* Check for frequency increase */ if (load > DEF_FREQUENCY_TWOSTEP_THRESHOLD) { if (load >= cs_tuners->up_threshold) dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ if (policy->cur == policy->max) return; if (load < cs_tuners->up_threshold && dbs_info->twostep_counter++ < 2) { dbs_info->twostep_time = now; dbs_info->requested_freq += get_freq_target(cs_tuners, policy->max >> 1); } else { if (load >= cs_tuners->up_threshold)
static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) { switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: pr_debug("setting to %u kHz because of event %u\n", policy->max, event); __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); cpufreq_notify_utilization(policy, LOAD); break; default: break; } return 0; }
static void cpufreq_interactive_timer(unsigned long data) { u64 now; unsigned int delta_time; u64 cputime_speedadj; int cpu_load; struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, data); unsigned int new_freq; unsigned int loadadjfreq; unsigned int index; unsigned long flags; unsigned long mod_min_sample_time; int i, max_load; unsigned int max_freq; struct cpufreq_interactive_cpuinfo *picpu; if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) goto exit; spin_lock_irqsave(&pcpu->load_lock, flags); now = update_load(data); delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); cputime_speedadj = pcpu->cputime_speedadj; spin_unlock_irqrestore(&pcpu->load_lock, flags); if (!delta_time) goto rearm; do_div(cputime_speedadj, delta_time); loadadjfreq = (unsigned int)cputime_speedadj * 100; cpu_load = loadadjfreq / pcpu->target_freq; pcpu->prev_load = cpu_load; boosted = now < (last_input_time + boostpulse_duration_val); cpufreq_notify_utilization(pcpu->policy, cpu_load); if (cpu_load >= go_hispeed_load) { if (pcpu->target_freq < hispeed_freq) new_freq = hispeed_freq; else { new_freq = choose_freq(pcpu, loadadjfreq); if (new_freq < hispeed_freq) new_freq = hispeed_freq; } } else { new_freq = choose_freq(pcpu, loadadjfreq); if (sync_freq && new_freq < sync_freq) { max_load = 0; max_freq = 0; for_each_online_cpu(i) { picpu = &per_cpu(cpuinfo, i); if (i == data || picpu->prev_load < up_threshold_any_cpu_load) continue; max_load = max(max_load, picpu->prev_load); max_freq = max(max_freq, picpu->target_freq); } if (max_freq > up_threshold_any_cpu_freq && max_load >= up_threshold_any_cpu_load) new_freq = sync_freq; } }