static ssize_t cpufreq_max_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; int lock_ret; ssize_t ret = -EINVAL; struct cpufreq_policy *policy; mutex_lock(&cpufreq_limit_mutex); if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_max_limit_val != -1) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); /* Yank555.lu - unlock now means set lock to scaling max to support powersave mode properly */ /* cpufreq_max_limit_val = -1; */ policy = cpufreq_cpu_get(0); if (get_cpufreq_level(policy->max, &cpufreq_level) == VALID_LEVEL) { lock_ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_USER, cpufreq_level); cpufreq_max_limit_val = policy->max; cpufreq_max_limit_coupled = SCALING_MAX_COUPLED; } } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (get_cpufreq_level((unsigned int)val, &cpufreq_level) == VALID_LEVEL) { if (cpufreq_max_limit_val != -1) { /* Unlock the previous lock */ exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); cpufreq_max_limit_coupled = SCALING_MAX_UNCOUPLED; /* if a limit existed, uncouple */ } else { cpufreq_max_limit_coupled = SCALING_MAX_COUPLED; /* if no limit existed, we're booting, couple */ } lock_ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_USER, cpufreq_level); /* ret of exynos_cpufreq_upper_limit is meaningless. 0 is fail? success? */ cpufreq_max_limit_val = val; } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } ret = n; out: mutex_unlock(&cpufreq_limit_mutex); return ret; }
static ssize_t cpufreq_max_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; int lock_ret; ssize_t ret = -EINVAL; struct cpufreq_policy *policy; mutex_lock(&cpufreq_limit_mutex); if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_max_limit_val != -1) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); cpufreq_max_limit_val = -1; } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (val < 1400000) { val = 1000000; if (get_cpufreq_level((unsigned int)val, &cpufreq_level) == VALID_LEVEL) { if (cpufreq_max_limit_val != -1) /* Unlock the previous lock */ exynos_cpufreq_upper_limit_free( DVFS_LOCK_ID_USER); lock_ret = exynos_cpufreq_upper_limit( DVFS_LOCK_ID_USER, cpufreq_level); /* ret of exynos_cpufreq_upper_limit is meaningless. 0 is fail? success? */ cpufreq_max_limit_val = val; } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } } ret = n; out: mutex_unlock(&cpufreq_limit_mutex); return ret; }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; int cur_temp; cur_temp = get_cur_temp(info); #ifdef CONFIG_TMU_DEBUG cancel_delayed_work(&info->monitor); pr_info("Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); #endif mutex_lock(&tmu_lock); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_NORMAL; already_limit = 0; pr_info("TC limit is released!!\n"); } else if (cur_temp <= data->ts.start_tc && !already_limit) { if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); already_limit = 1; } break; #endif case TMU_STATUS_NORMAL: #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); #endif __raw_writel((CLEAR_RISE_INT|CLEAR_FALL_INT), info->tmu_base + INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_warning) { info->tmu_state = TMU_STATUS_WARNING; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } else if (cur_temp > data->ts.stop_throttle && cur_temp < data->ts.start_warning && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->throttle_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_throttle) { info->tmu_state = TMU_STATUS_NORMAL; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); pr_info("Freq limit is released!!\n"); already_limit = 0; } break; case TMU_STATUS_WARNING: if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; already_limit = 0; } else if (cur_temp > data->ts.stop_warning && \ cur_temp < data->ts.start_tripping && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->warning_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_warning) { info->tmu_state = TMU_STATUS_THROTTLED; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } break; case TMU_STATUS_TRIPPED: mutex_unlock(&tmu_lock); tmu_tripped_cb(); return; default: break; } /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle && !(auto_refresh_changed)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } else if (cur_temp <= (data->ts.stop_mem_throttle) && (auto_refresh_changed)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }
static void exynos4_handler_tmu_state(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, polling); struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int cur_temp; static int auto_refresh_changed; static int check_handle; int trend = 0; int cpu = 0; mutex_lock(&tmu_lock); cur_temp = get_curr_temp(info); trend = cur_temp - info->last_temperature; pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: /* lock has priority than unlock */ if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } else if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) { pr_err("TMU: unlock error!\n"); } else { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: tc -> normal.\n"); } } /* free if upper limit is locked */ if (check_handle) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle = 0; } break; #endif case TMU_STATUS_NORMAL: /* 1. change state: 1st-throttling */ if (cur_temp >= data->ts.start_1st_throttle) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: normal->throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) { pr_err("TMU: lock error!\n"); } else { info->tmu_state = TMU_STATUS_TC; pr_info("change state: normal->tc.\n"); } #endif /* 2. polling end and uevent */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp <= data->ts.stop_mem_throttle)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("normal: free cpufreq_limit & interrupt enable.\n"); for_each_online_cpu(cpu) cpufreq_update_policy(cpu); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 1); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_THROTTLED: /* 1. change state: 2nd-throttling or warning */ if (cur_temp >= data->ts.start_2nd_throttle) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: 1st throttle->2nd throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_1st_throttle) && !(check_handle & THROTTLE_FLAG)) { if (check_handle & WARNING_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(WARNING_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_1st_throttle); check_handle |= THROTTLE_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("throttling: set cpufreq upper limit.\n"); /* 3. change state: normal */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: 1st throttle->normal.\n"); } break; case TMU_STATUS_WARNING: /* 1. change state: tripping */ if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; pr_info("change state: 2nd throttle->trip\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_2nd_throttle) && !(check_handle & WARNING_FLAG)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_2nd_throttle); check_handle |= WARNING_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("2nd throttle: cpufreq is limited.\n"); /* 3. change state: 1st-throttling */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: 2nd throttle->1st throttle, " "and release cpufreq upper limit.\n"); } break; case TMU_STATUS_TRIPPED: /* 1. call uevent to shut-down */ if ((cur_temp >= data->ts.start_tripping) && (trend > 0) && !(check_handle & TRIPPING_FLAG)) { notify_change_of_tmu_state(info); pr_info("tripping: on waiting shutdown.\n"); check_handle |= TRIPPING_FLAG; pr_debug("check_handle = %d\n", check_handle); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. change state: 2nd-throttling or warning */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: trip->2nd throttle, " "Check! occured only test mode.\n"); } /* 3. chip protection: kernel panic as SW workaround */ if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) { panic("Emergency!!!! tripping is not treated!\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_state + EXYNOS4_TMU_INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_INIT: /* sned tmu initial status to platform */ disable_irq(info->irq); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else if (cur_temp >= data->ts.start_2nd_throttle) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp >= data->ts.start_1st_throttle) info->tmu_state = TMU_STATUS_THROTTLED; else if (cur_temp <= data->ts.stop_1st_throttle) info->tmu_state = TMU_STATUS_NORMAL; notify_change_of_tmu_state(info); pr_info("%s: inform to init state to platform.\n", __func__); break; default: pr_warn("Bug: checked tmu_state.\n"); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else info->tmu_state = TMU_STATUS_WARNING; break; } /* end */ info->last_temperature = cur_temp; /* reschedule the next work */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; unsigned char cur_temp; #ifdef CONFIG_TMU_DEBUG cancel_delayed_work(&info->monitor); #endif cur_temp = get_cur_temp(info); pr_info("Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); switch (info->tmu_state) { case TMU_STATUS_NORMAL: #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, usecs_to_jiffies(1000 * 1000)); #endif cancel_delayed_work(&info->polling); enable_irq(info->irq); break; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_warning) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp > data->ts.stop_throttle && cur_temp < data->ts.start_warning) exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, data->cpulimit.throttle_freq); else if (cur_temp <= data->ts.stop_throttle) { info->tmu_state = TMU_STATUS_NORMAL; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); break; case TMU_STATUS_WARNING: if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; else if (cur_temp > data->ts.stop_warning && \ cur_temp < data->ts.start_tripping) exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, data->cpulimit.warning_freq); else if (cur_temp <= data->ts.stop_warning) { info->tmu_state = TMU_STATUS_THROTTLED; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); break; case TMU_STATUS_TRIPPED: tmu_tripped_cb(); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(5000 * 1000)); default: break; } return; }
static void ir_remocon_send(struct ir_remocon_data *data) { unsigned int period, off_period = 0; unsigned int duty; unsigned int on, off = 0; unsigned int i, j; int ret; static int cpu_lv = -1; if (data->pwr_en == -1) { regulator = regulator_get(NULL, "vled_3.3v"); if (IS_ERR(regulator)) goto out; regulator_enable(regulator); regulator_status = 1; } if (data->pwr_en != -1) gpio_direction_output(data->pwr_en, 1); __udelay(1000); if (cpu_lv == -1) { if (data->pwr_en == -1) exynos_cpufreq_get_level(500000, &cpu_lv); else exynos_cpufreq_get_level(800000, &cpu_lv); } ret = exynos_cpufreq_lock(DVFS_LOCK_ID_IR_LED, cpu_lv); if (ret < 0) pr_err("%s: fail to lock cpufreq\n", __func__); ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_IR_LED, cpu_lv); if (ret < 0) pr_err("%s: fail to lock cpufreq(limit)\n", __func__); if (data->pwr_en == -1) period = (MICRO_SEC/data->signal[0])-2; else period = (MICRO_SEC/data->signal[0])-1; duty = period/4; on = duty; off = period - duty; local_irq_disable(); for (i = 1; i < MAX_SIZE; i += 2) { if (data->signal[i] == 0) break; for (j = 0; j < data->signal[i]; j++) { gpio_direction_output(data->gpio, 1); __udelay(on); gpio_direction_output(data->gpio, 0); __udelay(off); } if (data->pwr_en == -1) period = (MICRO_SEC/data->signal[0]); else period = (MICRO_SEC/data->signal[0])+1; off_period = data->signal[i+1]*period; if (off_period <= 9999) { if (off_period > 1000) { __udelay(off_period % 1000); mdelay(off_period/1000); } else __udelay(off_period); } else { local_irq_enable(); __udelay(off_period % 1000); mdelay(off_period/1000); local_irq_disable(); } } gpio_direction_output(data->gpio, 1); __udelay(on); gpio_direction_output(data->gpio, 0); __udelay(off); local_irq_enable(); pr_info("%s end!\n", __func__); exynos_cpufreq_lock_free(DVFS_LOCK_ID_IR_LED); exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_IR_LED); if (data->pwr_en != -1) gpio_direction_output(data->pwr_en, 0); if ((data->pwr_en == -1) && (regulator_status == 1)) { regulator_force_disable(regulator); regulator_put(regulator); regulator_status = -1; } out: ; }
static void exynos4_handler_tmu_state(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, polling); struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int cur_temp; static int auto_refresh_changed; static int check_handle; int trend = 0; mutex_lock(&tmu_lock); cur_temp = get_curr_temp(info); trend = cur_temp - info->last_temperature; pr_debug("curr_temp = %d, temp_diff = %d\n", cur_temp, trend); switch (info->tmu_state) { case TMU_STATUS_TC: #if defined(CONFIG_TC_VOLTAGE) if (cur_temp >= data->ts.stop_tc) { if (check_handle & TC_VOLTAGE_FLAG) { exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU); #ifdef CONFIG_BUSFREQ_OPP if (dev_unlock(info->bus_dev, info->dev)) pr_err("TMU: dev_unlock error!\n"); #endif if (mali_voltage_lock_pop() < 0) pr_err("TMU: g3d_pop error\n"); check_handle &= ~(TC_VOLTAGE_FLAG); pr_info("change state: tc -> normal.\n"); } info->tmu_state = TMU_STATUS_NORMAL; } else if (cur_temp <= data->ts.start_tc) { if (!(check_handle & TC_VOLTAGE_FLAG)) { if (exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc) < 0) pr_err("TMU: cpu_lock error!\n"); #ifdef CONFIG_BUSFREQ_OPP if (dev_lock(info->bus_dev, info->dev, info->busfreq_tc) < 0) pr_err("TMU: bus_lock error\n"); #endif if (mali_voltage_lock_push(data->temp_compensate.g3d_volt) < 0) pr_err("TMU: g3d_push error [%u] uV\n", data->temp_compensate.g3d_volt); check_handle |= TC_VOLTAGE_FLAG; } } #endif break; case TMU_STATUS_NORMAL: /* 1. change state: 1st-throttling */ if (cur_temp >= data->ts.start_1st_throttle) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: normal->throttle.\n"); /* 2. polling end and uevent */ #if defined(CONFIG_TC_VOLTAGE) } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp >= data->ts.stop_tc) && (cur_temp <= data->ts.stop_mem_throttle)) { #else } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp <= data->ts.stop_mem_throttle)) { #endif if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("normal: free cpufreq_limit & interrupt enable.\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 1); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_THROTTLED: /* 1. change state: 2nd-throttling or warning */ if (cur_temp >= data->ts.start_2nd_throttle) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: 1st throttle->2nd throttle.\n"); /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_1st_throttle) && !(check_handle & THROTTLE_FLAG)) { if (check_handle & WARNING_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(WARNING_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_1st_throttle); check_handle |= THROTTLE_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("throttling: set cpufreq upper limit.\n"); /* 3. change state: normal */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: 1st throttle->normal.\n"); } break; case TMU_STATUS_WARNING: /* 1. change state: tripping */ if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; pr_info("change state: 2nd throttle->trip\n"); /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_2nd_throttle) && !(check_handle & WARNING_FLAG)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_2nd_throttle); check_handle |= WARNING_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("2nd throttle: cpufreq is limited.\n"); /* 3. change state: 1st-throttling */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: 2nd throttle->1st throttle, " "and release cpufreq upper limit.\n"); } break; case TMU_STATUS_TRIPPED: /* 1. call uevent to shut-down */ if ((cur_temp >= data->ts.start_tripping) && (trend > 0) && !(check_handle & TRIPPING_FLAG)) { notify_change_of_tmu_state(info); pr_info("tripping: on waiting shutdown.\n"); check_handle |= TRIPPING_FLAG; pr_debug("check_handle = %d\n", check_handle); /* 2. change state: 2nd-throttling or warning */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: trip->2nd throttle, " "Check! occured only test mode.\n"); } /* 3. chip protection: kernel panic as SW workaround */ if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) { panic("Emergency!!!! tripping is not treated!\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_state + EXYNOS4_TMU_INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_INIT: /* sned tmu initial status to platform */ disable_irq(info->irq); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) else if (cur_temp >= data->ts.start_tc) info->tmu_state = TMU_STATUS_TC; #endif else if (cur_temp >= data->ts.start_2nd_throttle) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp >= data->ts.start_1st_throttle) info->tmu_state = TMU_STATUS_THROTTLED; else if (cur_temp <= data->ts.stop_1st_throttle) info->tmu_state = TMU_STATUS_NORMAL; notify_change_of_tmu_state(info); pr_info("%s: inform to init state to platform.\n", __func__); break; default: pr_warn("Bug: checked tmu_state.\n"); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; else info->tmu_state = TMU_STATUS_WARNING; break; } /* end */ /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle) { if (!(auto_refresh_changed) && (trend > 0)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } } else if (cur_temp <= (data->ts.stop_mem_throttle)) { if ((auto_refresh_changed) && (trend < 0)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } } info->last_temperature = cur_temp; /* reschedule the next work */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }