int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0; unsigned int i; unsigned int count = 0; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) count++; else if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!count) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; }
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0, freq, i = 0; bool found = false; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) { if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) { found = true; break; } if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!found) { policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); } pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; }
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { unsigned int next_larger = ~0; unsigned int i; unsigned int count = 0; if (!cpu_online(policy->cpu)) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; if ((freq >= policy->min) && (freq <= policy->max)) count++; else if ((next_larger > freq) && (freq > policy->max)) next_larger = freq; } if (!count) policy->max = next_larger; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; }
/* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. * * The sync kthread needs to run on the CPU in question to avoid deadlocks in * the wake up code. Achieve this by binding the thread to the respective * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up * again each time the CPU comes back up. We can use CPUFREQ_START to figure * out a CPU is coming online instead of registering for hotplug notifiers. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int b_min = s->boost_min; unsigned int ib_min = s->input_boost_min; unsigned int min; switch (val) { case CPUFREQ_ADJUST: if (!b_min && !ib_min) break; min = max(b_min, ib_min); pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, min); cpufreq_verify_within_limits(policy, min, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); break; case CPUFREQ_START: set_cpus_allowed(s->thread, *cpumask_of(cpu)); break; } return NOTIFY_OK; }
static int bcl_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; uint32_t max_freq = UINT_MAX; if (!(bcl_frequency_mask & BIT(policy->cpu))) return NOTIFY_OK; switch (event) { case CPUFREQ_INCOMPATIBLE: if (bcl_vph_state == BCL_LOW_THRESHOLD || bcl_ibat_state == BCL_HIGH_THRESHOLD || bcl_soc_state == BCL_LOW_THRESHOLD) { max_freq = (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ? gbcl->btm_freq_max : gbcl->bcl_p_freq_max; } pr_debug("Requesting Max freq:%u for CPU%d\n", max_freq, policy->cpu); cpufreq_verify_within_limits(policy, 0, max_freq); break; } return NOTIFY_OK; }
static int acpi_processor_ppc_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; struct acpi_processor *pr; unsigned int ppc = 0; mutex_lock(&performance_mutex); if (event != CPUFREQ_INCOMPATIBLE) goto out; pr = processors[policy->cpu]; if (!pr || !pr->performance) goto out; ppc = (unsigned int)pr->performance_platform_limit; if (ppc >= pr->performance->state_count) goto out; cpufreq_verify_within_limits(policy, 0, pr->performance->states[ppc]. core_frequency * 1000); out: mutex_unlock(&performance_mutex); return 0; }
static int pmi_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; struct cpufreq_frequency_table *cbe_freqs; u8 node; /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE * and CPUFREQ_NOTIFY policy events?) */ if (event == CPUFREQ_START) return 0; cbe_freqs = cpufreq_frequency_get_table(policy->cpu); node = cbe_cpu_to_node(policy->cpu); pr_debug("got notified, event=%lu, node=%u\n", event, node); if (pmi_slow_mode_limit[node] != 0) { pr_debug("limiting node %d to slow mode %d\n", node, pmi_slow_mode_limit[node]); cpufreq_verify_within_limits(policy, 0, cbe_freqs[pmi_slow_mode_limit[node]].frequency); } return 0; }
/* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int b_min = s->boost_min; unsigned int ib_min = s->input_boost_min; unsigned int min; if (val != CPUFREQ_ADJUST) return NOTIFY_OK; if (!b_min && !ib_min) return NOTIFY_OK; min = max(b_min, ib_min); pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, min); cpufreq_verify_within_limits(policy, min, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); return NOTIFY_OK; }
/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on * this platform, anyway. */ int sa11x0_verify_speed(struct cpufreq_policy *policy) { unsigned int tmp; if (policy->cpu) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); /* make sure that at least one frequency is within the policy */ tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100; if (tmp > policy->max) policy->max = tmp; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; }
static int tegra_verify_speed(struct cpufreq_policy *policy) { #if defined(CONFIG_USE_FAKE_SHMOO) return cpufreq_frequency_table_verify(policy, freq_table); #else cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; #endif }
static int bcl_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; switch (event) { case CPUFREQ_INCOMPATIBLE: if (bcl_vph_state == BCL_LOW_THRESHOLD) { cpufreq_verify_within_limits(policy, 0, gbcl->btm_freq_max); } else if (bcl_vph_state == BCL_HIGH_THRESHOLD) { cpufreq_verify_within_limits(policy, 0, UINT_MAX); } break; } return NOTIFY_OK; }
/* * Validate the speed policy. */ static int integrator_verify_policy(struct cpufreq_policy *policy) { struct icst525_vco vco; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); vco = icst525_khz_to_vco(&cclk_params, policy->max); policy->max = icst525_khz(&cclk_params, vco); vco = icst525_khz_to_vco(&cclk_params, policy->min); policy->min = icst525_khz(&cclk_params, vco); cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; }
static int limit_adjust_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; if (event != CPUFREQ_ADJUST) return 0; /* This is our indicator of GPU activity */ if (regulator_is_enabled(g3d_pd_regulator)) #ifdef CONFIG_LIVE_OC cpufreq_verify_within_limits(policy, get_gpuminfreq(), policy->cpuinfo.max_freq); #else cpufreq_verify_within_limits(policy, MIN_CPU_KHZ_FREQ, policy->cpuinfo.max_freq); #endif return 0; }
static int bcl_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; uint32_t max_freq = UINT_MAX; if (!(bcl_frequency_mask & BIT(policy->cpu))) return NOTIFY_OK; switch (event) { case CPUFREQ_INCOMPATIBLE: #ifndef CONFIG_LGE_PM if (bcl_vph_state == BCL_LOW_THRESHOLD || bcl_ibat_state == BCL_HIGH_THRESHOLD || bcl_soc_state == BCL_LOW_THRESHOLD) { #else if (bcl_vph_state == BCL_LOW_THRESHOLD || bcl_ibat_state == BCL_HIGH_THRESHOLD) { #endif max_freq = (gbcl->bcl_monitor_type == BCL_IBAT_MONITOR_TYPE) ? gbcl->btm_freq_max : gbcl->bcl_p_freq_max; } pr_debug("Requesting Max freq:%u for CPU%d\n", max_freq, policy->cpu); cpufreq_verify_within_limits(policy, 0, max_freq); break; } return NOTIFY_OK; } static struct notifier_block bcl_cpufreq_notifier = { .notifier_call = bcl_cpufreq_callback, }; static void update_cpu_freq(void) { int cpu, ret = 0; get_online_cpus(); for_each_online_cpu(cpu) { if (bcl_frequency_mask & BIT(cpu)) { ret = cpufreq_update_policy(cpu); if (ret) pr_err( "Error updating policy for CPU%d. ret:%d\n", cpu, ret); } } put_online_cpus(); }
/** * longrun_verify_poliy - verifies a new CPUFreq policy * * Validates a new CPUFreq policy. This function has to be called with * cpufreq_driver locked. */ static int longrun_verify_policy(struct cpufreq_policy *policy) { if (!policy || !longrun_driver) return -EINVAL; policy->cpu = 0; cpufreq_verify_within_limits(policy, longrun_driver->policy[0].cpuinfo.min_freq, longrun_driver->policy[0].cpuinfo.max_freq); return 0; }
static int thermal_notify(struct notifier_block *block, unsigned long event, void *data) { struct cpufreq_policy *policy = data; if (event != CPUFREQ_ADJUST) return 0; if (maximum_freq) cpufreq_verify_within_limits(policy, 0, maximum_freq); return 0; }
static int msm_thermal_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; switch (event) { case CPUFREQ_INCOMPATIBLE: cpufreq_verify_within_limits(policy, limited_min_freq, limited_max_freq); break; } return NOTIFY_OK; }
static int clamp_notifier_call(struct notifier_block *self, unsigned long event, void *data) { struct cpufreq_policy *p = data; unsigned long max_freq; if (event != CPUFREQ_ADJUST) return 0; max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq); cpufreq_verify_within_limits(p, 0, max_freq); return 0; }
static int powernow_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data; struct processor_performance *perf; if (!policy || !(data = cpufreq_drv_data[policy->cpu]) || !processor_pminfo[policy->cpu]) return -EINVAL; perf = &processor_pminfo[policy->cpu]->perf; cpufreq_verify_within_limits(policy, 0, perf->states[perf->platform_limit].core_frequency * 1000); return cpufreq_frequency_table_verify(policy, data->freq_table); }
static int longrun_verify_policy(struct cpufreq_policy *policy) { if (!policy) return -EINVAL; policy->cpu = 0; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) return -EINVAL; return 0; }
/* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int min = s->boost_min; if (val != CPUFREQ_ADJUST) return NOTIFY_OK; if (min == 0) return NOTIFY_OK; cpufreq_verify_within_limits(policy, min, UINT_MAX); return NOTIFY_OK; }
static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; if (event != CPUFREQ_ADJUST) goto out; max_freq = (policy->cpuinfo.max_freq * (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100; cpufreq_verify_within_limits(policy, 0, max_freq); out: return 0; }
static int limit_adjust_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; return 0 ; //todo if (event != CPUFREQ_ADJUST) return 0; #if !defined(CONFIG_MACH_qt210) /* This is our indicator of GPU activity */ if (regulator_is_enabled(g3d_pd_regulator)) #endif cpufreq_verify_within_limits(policy, MIN_CPU_KHZ_FREQ, policy->cpuinfo.max_freq); return 0; }
static int update_cpu_max_freq(struct cpufreq_policy *cpu_policy, int cpu, int max_freq) { int ret = 0; if (!cpu_policy) return -EINVAL; cpufreq_verify_within_limits(cpu_policy, cpu_policy->min, max_freq); cpu_policy->user_policy.max = max_freq; ret = cpufreq_update_policy(cpu); if (!ret) pr_info("msm_thermal: Limiting core%d max frequency to %d\n", cpu, max_freq); return ret; }
static int davinci_verify_speed(struct cpufreq_policy *policy) { struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; struct cpufreq_frequency_table *freq_table = pdata->freq_table; struct clk *armclk = cpufreq.armclk; if (freq_table) return cpufreq_frequency_table_verify(policy, freq_table); if (policy->cpu) return -EINVAL; cpufreq_verify_within_cpu_limits(policy); policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; }
/** * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. * @nb: struct notifier_block * with callback info. * @event: value showing cpufreq event for which this function invoked. * @data: callback-specific data * * Callback to highjack the notification on cpufreq policy transition. * Every time there is a change in policy, we will intercept and * update the cpufreq policy with thermal constraints. * * Return: 0 (success) */ static int cpufreq_thermal_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) return 0; if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) max_freq = notify_device->cpufreq_val; /* Never exceed user_policy.max */ if (max_freq > policy->user_policy.max) max_freq = policy->user_policy.max; if (policy->max != max_freq) cpufreq_verify_within_limits(policy, 0, max_freq); return 0; }
/* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; if (val != CPUFREQ_ADJUST) return NOTIFY_OK; /* just in case someone underclocks below input_boost_freq */ if (boost_freq_buf > policy->max) boost_freq_buf = policy->max; pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, boost_freq_buf); cpufreq_verify_within_limits(policy, boost_freq_buf, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); return NOTIFY_OK; }
static int pmi_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; struct cpufreq_frequency_table *cbe_freqs; u8 node; cbe_freqs = cpufreq_frequency_get_table(policy->cpu); node = cbe_cpu_to_node(policy->cpu); pr_debug("got notified, event=%lu, node=%u\n", event, node); if (pmi_slow_mode_limit[node] != 0) { pr_debug("limiting node %d to slow mode %d\n", node, pmi_slow_mode_limit[node]); cpufreq_verify_within_limits(policy, 0, cbe_freqs[pmi_slow_mode_limit[node]].frequency); } return 0; }
static int tegra_verify_speed(struct cpufreq_policy *policy) { cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; }