static irqreturn_t tmu_irq(int irq, void *id) { struct tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + INTSTAT); if (status & INTSTAT_RISE0) { pr_info("Throttling interrupt occured!!!!\n"); __raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_THROTTLED; queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); } else if (status & INTSTAT_RISE1) { pr_info("Warning interrupt occured!!!!\n"); __raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_WARNING; queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); } else if (status & INTSTAT_RISE2) { pr_info("Tripping interrupt occured!!!!\n"); info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + INTCLEAR); tmu_tripped_cb(); } else { pr_err("%s: TMU interrupt error\n", __func__); return -ENODEV; } return IRQ_HANDLED; }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; int cur_temp; cur_temp = get_cur_temp(info); #ifdef CONFIG_TMU_DEBUG cancel_delayed_work(&info->monitor); pr_info("Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); #endif mutex_lock(&tmu_lock); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_NORMAL; already_limit = 0; pr_info("TC limit is released!!\n"); } else if (cur_temp <= data->ts.start_tc && !already_limit) { if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); already_limit = 1; } break; #endif case TMU_STATUS_NORMAL: #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); #endif __raw_writel((CLEAR_RISE_INT|CLEAR_FALL_INT), info->tmu_base + INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_warning) { info->tmu_state = TMU_STATUS_WARNING; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } else if (cur_temp > data->ts.stop_throttle && cur_temp < data->ts.start_warning && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->throttle_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_throttle) { info->tmu_state = TMU_STATUS_NORMAL; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); pr_info("Freq limit is released!!\n"); already_limit = 0; } break; case TMU_STATUS_WARNING: if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; already_limit = 0; } else if (cur_temp > data->ts.stop_warning && \ cur_temp < data->ts.start_tripping && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->warning_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_warning) { info->tmu_state = TMU_STATUS_THROTTLED; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } break; case TMU_STATUS_TRIPPED: mutex_unlock(&tmu_lock); tmu_tripped_cb(); return; default: break; } /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle && !(auto_refresh_changed)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } else if (cur_temp <= (data->ts.stop_mem_throttle) && (auto_refresh_changed)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; unsigned char cur_temp; #ifdef CONFIG_TMU_DEBUG cancel_delayed_work(&info->monitor); #endif cur_temp = get_cur_temp(info); pr_info("Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); switch (info->tmu_state) { case TMU_STATUS_NORMAL: #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, usecs_to_jiffies(1000 * 1000)); #endif cancel_delayed_work(&info->polling); enable_irq(info->irq); break; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_warning) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp > data->ts.stop_throttle && cur_temp < data->ts.start_warning) exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, data->cpulimit.throttle_freq); else if (cur_temp <= data->ts.stop_throttle) { info->tmu_state = TMU_STATUS_NORMAL; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); break; case TMU_STATUS_WARNING: if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; else if (cur_temp > data->ts.stop_warning && \ cur_temp < data->ts.start_tripping) exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, data->cpulimit.warning_freq); else if (cur_temp <= data->ts.stop_warning) { info->tmu_state = TMU_STATUS_THROTTLED; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(500 * 1000)); break; case TMU_STATUS_TRIPPED: tmu_tripped_cb(); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(5000 * 1000)); default: break; } return; }