static void check_temp(struct work_struct *work) { struct cpufreq_policy *cpu_policy = NULL; struct tsens_device tsens_dev; unsigned long temp = 0; unsigned int max_freq = 0; int update_policy = 0; int cpu = 0; int ret = 0; tsens_dev.sensor_num = DEF_TEMP_SENSOR; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("msm_thermal: Unable to read TSENS sensor %d\n", tsens_dev.sensor_num); goto reschedule; } else pr_info("msm_thermal: TSENS sensor %d (%ld C)\n", tsens_dev.sensor_num, temp); for_each_possible_cpu(cpu) { update_policy = 0; cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) { pr_debug("msm_thermal: NULL policy on cpu %d\n", cpu); continue; } if (temp >= allowed_max_high) { if (cpu_policy->max > allowed_max_freq) { update_policy = 1; max_freq = allowed_max_freq; } else { pr_debug("msm_thermal: policy max for cpu %d " "already < allowed_max_freq\n", cpu); } } else if (temp < allowed_max_low) { if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) { max_freq = cpu_policy->cpuinfo.max_freq; update_policy = 1; } else { pr_debug("msm_thermal: policy max for cpu %d " "already at max allowed\n", cpu); } } if (update_policy) update_cpu_max_freq(cpu_policy, cpu, max_freq); cpufreq_cpu_put(cpu_policy); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(check_interval_ms)); }
static void check_temp(struct work_struct *work) { struct cpufreq_policy *cpu_policy = NULL; struct tsens_device tsens_dev; unsigned long temp = 0; unsigned int max_freq = 0; int update_policy = 0; int cpu = 0; int ret = 0; mutex_lock(&policy_mutex); tsens_dev.sensor_num = DEF_TEMP_SENSOR; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("msm_thermal: Unable to read TSENS sensor %d\n", tsens_dev.sensor_num); goto reschedule; } /* lock hotplug when updating CPUfreq policy */ get_online_cpus(); for_each_online_cpu(cpu) { update_policy = 0; cpu_policy = per_cpu(policy, cpu); if (!cpu_policy) { pr_debug("msm_thermal: No CPUFreq policy found for " "cpu %d\n", cpu); continue; } if (temp >= allowed_max_high) { if (cpu_policy->max > allowed_max_freq) { update_policy = 1; max_freq = allowed_max_freq; } } else if (temp < allowed_max_low) { if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) { max_freq = cpu_policy->cpuinfo.max_freq; update_policy = 1; } } if (update_policy) update_cpu_max_freq(cpu, max_freq); } put_online_cpus(); reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(check_interval_ms)); mutex_unlock(&policy_mutex); }
static void __cpuinit disable_msm_thermal(void) { int cpu = 0; cancel_delayed_work_sync(&check_temp_work); flush_scheduled_work(); for_each_possible_cpu(cpu) { update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT); } }
/** * We will reset the cpu frequencies limits here. The core online/offline * status will be carried over to the process stopping the msm_thermal, as * we dont want to online a core and bring in the thermal issues. */ static void __cpuinit disable_msm_thermal(void) { int cpu = 0; /* make sure check_temp is no longer running */ cancel_delayed_work_sync(&check_temp_work); if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT) return; for_each_possible_cpu(cpu) { update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT); } }
static void disable_msm_thermal(void) { int cpu = 0; struct cpufreq_policy *cpu_policy = NULL; for_each_possible_cpu(cpu) { cpu_policy = cpufreq_cpu_get(cpu); if (cpu_policy) { if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) update_cpu_max_freq(cpu_policy, cpu, cpu_policy-> cpuinfo.max_freq); cpufreq_cpu_put(cpu_policy); } } }
/** * We will reset the cpu frequencies limits here. The core online/offline * status will be carried over to the process stopping the msm_thermal, as * we dont want to online a core and bring in the thermal issues. */ static void __cpuinit disable_msm_thermal(void) { int cpu = 0; /* make sure check_temp is no longer running */ cancel_delayed_work(&check_temp_work); flush_scheduled_work(); #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT)\ || defined(CONFIG_MACH_APQ8064_GVDCM) || defined(CONFIG_MACH_APQ8064_GV_KR) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) || defined(CONFIG_MACH_APQ8064_OMEGAR_KR) || defined(CONFIG_MACH_APQ8064_OMEGA_KR) if (lge_get_factory_boot()) return; if (limited_max_freq == DEF_ALLOWED_MAX_FREQ_1) { pr_info("msm_thermal: Continue to limit cpu%d max freq to %d\n", cpu, DEF_ALLOWED_MAX_FREQ_1); return; } if (limited_max_freq == DEF_ALLOWED_MAX_FREQ_2) { pr_info("msm_thermal: Continue to limit cpu%d cur freq to %d\n", cpu, DEF_ALLOWED_MAX_FREQ_2); limit_cpufreq = 1; if( timer_pending(&limit_timer)) del_timer(&limit_timer); init_timer(&limit_timer); limit_timer.function = msm_thermal_limit_holding_timer; limit_timer.expires = jiffies + msecs_to_jiffies(6000); add_timer(&limit_timer); } #endif if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT) return; #if defined(CONFIG_MACH_APQ8064_GVAR_CMCC) // if (limited_max_freq == DEF_ALLOWED_MAX_FREQ) { pr_info("msm_thermal: continue max_freq = %d..\n", DEF_ALLOWED_MAX_FREQ); return; } #endif for_each_possible_cpu(cpu) { update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT); } }
static void disable_msm_thermal(void) { int cpu = 0; struct cpufreq_policy *cpu_policy = NULL; cpufreq_unregister_notifier(&msm_thermal_notifier_block, CPUFREQ_POLICY_NOTIFIER); cancel_delayed_work(&check_temp_work); mutex_lock(&policy_mutex); for_each_possible_cpu(cpu) { cpu_policy = per_cpu(policy, cpu); if (cpu_policy && cpu_policy->max < cpu_policy->cpuinfo.max_freq) update_cpu_max_freq(cpu, cpu_policy->cpuinfo.max_freq); } mutex_unlock(&policy_mutex); unregister_hotcpu_notifier(&msm_thermal_hotcpu_notify); }
static void check_temp(struct work_struct *work) { struct tsens_device tsens_dev; unsigned long temp = 0; uint32_t max_freq = limited_max_freq; int cpu = 0; int ret = 0; tsens_dev.sensor_num = msm_thermal_info.sensor_id; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("msm_thermal: Unable to read TSENS sensor %d\n", tsens_dev.sensor_num); goto reschedule; } else pr_info("msm_thermal: TSENS sensor %d (%ld C)\n", tsens_dev.sensor_num, temp); if (temp >= msm_thermal_info.limit_temp) { max_freq = msm_thermal_info.limit_freq; #ifdef CONFIG_PERFLOCK_BOOT_LOCK release_boot_lock(); #endif } else if (temp < msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis) max_freq = MSM_CPUFREQ_NO_LIMIT; if (max_freq == limited_max_freq) goto reschedule; for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug("Unable to limit cpu%d max freq to %d\n", cpu, max_freq); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(msm_thermal_info.poll_ms)); }
static void __ref do_freq_control(long temp) { int ret = 0; int cpu = 0; uint32_t max_freq = limited_max_freq; if (temp >= msm_thermal_info.limit_temp_degC) { if (limit_idx == limit_idx_low) return; limit_idx -= msm_thermal_info.freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) { if (limit_idx == limit_idx_high) return; limit_idx += msm_thermal_info.freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = MSM_CPUFREQ_NO_LIMIT; } else max_freq = table[limit_idx].frequency; } if (max_freq == limited_max_freq) return; /* Update new limits */ for_each_possible_cpu(cpu) { if (!(msm_thermal_info.freq_control_mask & BIT(cpu))) continue; ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug( "%s: Unable to limit cpu%d max freq to %d\n", KBUILD_MODNAME, cpu, max_freq); } }
static void update_all_cpus_max_freq_if_changed(int new_throttled_bin, unsigned temp) { int cpu; int ret; if (throttled_bin == new_throttled_bin) return; #ifdef CONFIG_PERFLOCK_BOOT_LOCK release_boot_lock(); #endif throttled_bin = new_throttled_bin; /* Update new limits */ for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, throttled_bin, temp); if (ret) pr_warn("Unable to limit cpu%d\n", cpu); } }
static void __ref do_freq_control(long temp) { int cpu = 0; uint32_t max_freq = limited_max_freq; if (temp >= msm_thermal_info.limit_temp_degC) { if (limit_idx == limit_idx_low) return; limit_idx -= msm_thermal_info.freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) { if (limit_idx == limit_idx_high) return; limit_idx += msm_thermal_info.freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = UINT_MAX; } else max_freq = table[limit_idx].frequency; } if (max_freq == limited_max_freq) return; limited_max_freq = max_freq; /* Update new limits */ for_each_possible_cpu(cpu) { if (!(msm_thermal_info.freq_control_mask & BIT(cpu))) continue; update_cpu_max_freq(cpu, max_freq); } }
static void __cpuinit check_temp(struct work_struct *work) { static int limit_init; struct tsens_device tsens_dev; long temp = 0; uint32_t max_freq = limited_max_freq; int cpu = 0; int ret = 0; tsens_dev.sensor_num = msm_thermal_info.sensor_id; ret = tsens_get_temp(&tsens_dev, &temp); current_temp = temp; if (ret) { pr_debug("%s: Unable to read TSENS sensor %d\n", KBUILD_MODNAME, tsens_dev.sensor_num); goto reschedule; } if (!limit_init) { ret = msm_thermal_get_freq_table(); if (ret) goto reschedule; else limit_init = 1; } do_core_control(temp); if (temp >= msm_thermal_info.limit_temp_degC) { if (limit_idx == limit_idx_low) goto reschedule; limit_idx -= msm_thermal_info.freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) { if (limit_idx == limit_idx_high) goto reschedule; limit_idx += msm_thermal_info.freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = MSM_CPUFREQ_NO_LIMIT; } else max_freq = table[limit_idx].frequency; } if (max_freq == limited_max_freq) goto reschedule; /* Update new limits */ for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug( "%s: Unable to limit cpu%d max freq to %d\n", KBUILD_MODNAME, cpu, max_freq); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(msm_thermal_info.poll_ms)); }
static void check_temp(struct work_struct *work) { static int limit_init; struct tsens_device tsens_dev; unsigned long temp = 0; uint32_t max_freq = limited_max_freq; int cpu = 0; int ret = 0; policy = cpufreq_cpu_get(0); tsens_dev.sensor_num = msm_thermal_info.sensor_id; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("msm_thermal: Unable to read TSENS sensor %d\n", tsens_dev.sensor_num); goto reschedule; } if (!limit_init) { ret = msm_thermal_get_freq_table(); if (ret) goto reschedule; else limit_init = 1; } if (temp >= temp_threshold) { if (!throttling) { max_frequency = policy->max; throttling = true; } if (limit_idx == limit_idx_low) goto reschedule; limit_idx = limit_idx_low; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < (temp_threshold - 5)) { if (limit_idx == limit_idx_high) goto reschedule; limit_idx = limit_idx_high; max_freq = max_frequency; } if (max_freq == limited_max_freq) goto reschedule; /* Update new limits */ for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug("Unable to limit cpu%d max freq to %d\n", cpu, max_freq); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(POLLING_DELAY)); }
static void __cpuinit check_temp(struct work_struct *work) { static int limit_init; struct tsens_device tsens_dev; long temp = 0; uint32_t max_freq = limited_max_freq; int cpu = 0; int ret = 0; tsens_dev.sensor_num = msm_thermal_info.sensor_id; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("%s: Unable to read TSENS sensor %d\n", KBUILD_MODNAME, tsens_dev.sensor_num); goto reschedule; } if (!limit_init) { ret = msm_thermal_get_freq_table(); if (ret) goto reschedule; else limit_init = 1; } #if defined(CONFIG_MACH_APQ8064_GK_KR) || defined(CONFIG_MACH_APQ8064_GKATT)\ || defined(CONFIG_MACH_APQ8064_GVDCM) || defined(CONFIG_MACH_APQ8064_GV_KR) || defined(CONFIG_MACH_APQ8064_GKGLOBAL) || defined(CONFIG_MACH_APQ8064_OMEGAR_KR) || defined(CONFIG_MACH_APQ8064_OMEGA_KR) if (lge_get_factory_boot()) return; do_core_control(temp); if (temp >= msm_thermal_info.limit_temp_degC || temp <= msm_thermal_info.limit_temp_degC_low) { max_freq = DEF_ALLOWED_MAX_FREQ_1; pr_info("msm_thermal: tsens_temp %ld\n", temp); } else if ( (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) && (temp > msm_thermal_info.limit_temp_degC_low)) { max_freq = DEF_ALLOWED_MAX_FREQ_2; } else { if(limited_max_freq == MSM_CPUFREQ_NO_LIMIT) max_freq = DEF_ALLOWED_MAX_FREQ_2; } #else do_core_control(temp); if (temp >= msm_thermal_info.limit_temp_degC #if defined(CONFIG_MACH_APQ8064_GVAR_CMCC) // || temp <= msm_thermal_info.limit_temp_degC_low #endif ) { if (limit_idx == limit_idx_low) goto reschedule; limit_idx -= msm_thermal_info.freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; #ifdef CONFIG_LGE_PM if(max_freq >= 1026000) max_freq = DEF_ALLOWED_MAX_FREQ; pr_info("msm_thermal: tsens_temp %ld\n", temp); #endif } else if ( (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) #if defined(CONFIG_MACH_APQ8064_GVAR_CMCC) // && (temp > msm_thermal_info.limit_temp_degC_low) #endif ) { if (limit_idx == limit_idx_high) goto reschedule; limit_idx += msm_thermal_info.freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = MSM_CPUFREQ_NO_LIMIT; } else max_freq = table[limit_idx].frequency; } #endif if (max_freq == limited_max_freq) goto reschedule; /* Update new limits */ for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug( "%s: Unable to limit cpu%d max freq to %d\n", KBUILD_MODNAME, cpu, max_freq); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(msm_thermal_info.poll_ms)); }