static int update_cpu_min_freq_all(uint32_t min) { int cpu = 0; int ret = 0; if (!freq_table_get) { ret = check_freq_table(); if (ret) { pr_err("%s:Fail to get freq table\n", __func__); return ret; } } /* If min is larger than allowed max */ if (min != MSM_CPUFREQ_NO_LIMIT && min > table[limit_idx_high].frequency) min = table[limit_idx_high].frequency; for_each_possible_cpu(cpu) { ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq); if (ret) { pr_err("%s:Fail to set limits for cpu%d\n", __func__, cpu); return ret; } if (cpufreq_update_policy(cpu)) pr_debug("%s: Cannot update policy for cpu%d\n", __func__, cpu); } return ret; }
static int update_cpu_min_freq_all(uint32_t min) { int cpu = 0; int ret = 0; if (!freq_table_get) { ret = check_freq_table(); if (ret) { pr_err("%s:Fail to get freq table\n", KBUILD_MODNAME); return ret; } } /* If min is larger than allowed max */ min = min(min, table[limit_idx_high].frequency); limited_min_freq = min; get_online_cpus(); for_each_online_cpu(cpu) { if (cpufreq_update_policy(cpu)) pr_info("%s: Unable to update policy for cpu:%d\n", KBUILD_MODNAME, cpu); } put_online_cpus(); return ret; }
static int do_vdd_restriction(void) { struct tsens_device tsens_dev; long temp = 0; int ret = 0; int i = 0; int dis_cnt = 0; if (!vdd_rstr_enabled) return ret; if (usefreq && !freq_table_get) { if (check_freq_table()) return ret; } mutex_lock(&vdd_rstr_mutex); for (i = 0; i < max_tsens_num; i++) { tsens_dev.sensor_num = i; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("%s: Unable to read TSENS sensor %d\n", __func__, tsens_dev.sensor_num); dis_cnt++; continue; } if (temp <= msm_thermal_info.vdd_rstr_temp_hyst_degC && vdd_rstr_en.enabled == 0) { ret = vdd_restriction_apply_all(1); if (ret) { pr_err( \ "Enable vdd rstr votlage for all failed\n"); goto exit; } vdd_rstr_en.enabled = 1; goto exit; } else if (temp > msm_thermal_info.vdd_rstr_temp_degC && vdd_rstr_en.enabled == 1) dis_cnt++; } if (dis_cnt == max_tsens_num) { ret = vdd_restriction_apply_all(0); if (ret) { pr_err("Disable vdd rstr votlage for all failed\n"); goto exit; } vdd_rstr_en.enabled = 0; } exit: mutex_unlock(&vdd_rstr_mutex); return ret; }
static int vdd_restriction_reg_init(struct platform_device *pdev) { int ret = 0; int i; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1) { usefreq |= BIT(i); check_freq_table(); /* * Restrict frequency by default until we have made * our first temp reading */ if (freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], 0); else pr_info("%s:Defer vdd rstr freq init\n", __func__); } else { rails[i].reg = devm_regulator_get(&pdev->dev, rails[i].name); if (IS_ERR_OR_NULL(rails[i].reg)) { ret = PTR_ERR(rails[i].reg); if (ret != -EPROBE_DEFER) { pr_err( \ "%s, could not get regulator: %s\n", rails[i].name, __func__); rails[i].reg = NULL; rails[i].curr_level = -2; return ret; } return ret; } /* * Restrict votlage by default until we have made * our first temp reading */ ret = vdd_restriction_apply_voltage(&rails[i], 0); } } return ret; }
static int update_cpu_min_freq_all(uint32_t min) { int cpu = 0; int ret = 0; struct cpufreq_policy *policy = NULL; if (!freq_table_get) { ret = check_freq_table(); if (ret) { pr_err("%s:Fail to get freq table\n", __func__); return ret; } } /* If min is larger than allowed max */ if (min != MSM_CPUFREQ_NO_LIMIT && min > table[limit_idx_high].frequency) min = table[limit_idx_high].frequency; for_each_possible_cpu(cpu) { ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq); if (ret) { pr_err("%s:Fail to set limits for cpu%d\n", __func__, cpu); return ret; } if (cpu_online(cpu)) { policy = cpufreq_cpu_get(cpu); if (!policy) continue; cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L); cpufreq_cpu_put(policy); } } return ret; }