static void ir_remocon_send(struct ir_remocon_data *data) { unsigned int period, off_period = 0; unsigned int duty; unsigned int on, off = 0; unsigned int i, j; int ret; static int cpu_lv = -1; if (data->pwr_en == -1) { regulator = regulator_get(NULL, "vled_3.3v"); if (IS_ERR(regulator)) goto out; regulator_enable(regulator); regulator_status = 1; } if (data->pwr_en != -1) gpio_direction_output(data->pwr_en, 1); __udelay(1000); if (cpu_lv == -1) { if (data->pwr_en == -1) exynos_cpufreq_get_level(500000, &cpu_lv); else exynos_cpufreq_get_level(800000, &cpu_lv); } ret = exynos_cpufreq_lock(DVFS_LOCK_ID_IR_LED, cpu_lv); if (ret < 0) pr_err("%s: fail to lock cpufreq\n", __func__); ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_IR_LED, cpu_lv); if (ret < 0) pr_err("%s: fail to lock cpufreq(limit)\n", __func__); if (data->pwr_en == -1) period = (MICRO_SEC/data->signal[0])-2; else period = (MICRO_SEC/data->signal[0])-1; duty = period/4; on = duty; off = period - duty; local_irq_disable(); for (i = 1; i < MAX_SIZE; i += 2) { if (data->signal[i] == 0) break; for (j = 0; j < data->signal[i]; j++) { gpio_direction_output(data->gpio, 1); __udelay(on); gpio_direction_output(data->gpio, 0); __udelay(off); } if (data->pwr_en == -1) period = (MICRO_SEC/data->signal[0]); else period = (MICRO_SEC/data->signal[0])+1; off_period = data->signal[i+1]*period; if (off_period <= 9999) { if (off_period > 1000) { __udelay(off_period % 1000); mdelay(off_period/1000); } else __udelay(off_period); } else { local_irq_enable(); __udelay(off_period % 1000); mdelay(off_period/1000); local_irq_disable(); } } gpio_direction_output(data->gpio, 1); __udelay(on); gpio_direction_output(data->gpio, 0); __udelay(off); local_irq_enable(); pr_info("%s end!\n", __func__); exynos_cpufreq_lock_free(DVFS_LOCK_ID_IR_LED); exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_IR_LED); if (data->pwr_en != -1) gpio_direction_output(data->pwr_en, 0); if ((data->pwr_en == -1) && (regulator_status == 1)) { regulator_force_disable(regulator); regulator_put(regulator); regulator_status = -1; } out: ; }
static inline void rotation_booster_on(void) { exynos_cpufreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, L0); exynos4_busfreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, BUS_L0); exynos_gpufreq_lock(); }
static void exynos4_handler_tmu_state(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, polling); struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int cur_temp; static int auto_refresh_changed; static int check_handle; int trend = 0; mutex_lock(&tmu_lock); cur_temp = get_curr_temp(info); trend = cur_temp - info->last_temperature; pr_debug("curr_temp = %d, temp_diff = %d\n", cur_temp, trend); switch (info->tmu_state) { case TMU_STATUS_TC: #if defined(CONFIG_TC_VOLTAGE) if (cur_temp >= data->ts.stop_tc) { if (check_handle & TC_VOLTAGE_FLAG) { exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU); #ifdef CONFIG_BUSFREQ_OPP if (dev_unlock(info->bus_dev, info->dev)) pr_err("TMU: dev_unlock error!\n"); #endif if (mali_voltage_lock_pop() < 0) pr_err("TMU: g3d_pop error\n"); check_handle &= ~(TC_VOLTAGE_FLAG); pr_info("change state: tc -> normal.\n"); } info->tmu_state = TMU_STATUS_NORMAL; } else if (cur_temp <= data->ts.start_tc) { if (!(check_handle & TC_VOLTAGE_FLAG)) { if (exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc) < 0) pr_err("TMU: cpu_lock error!\n"); #ifdef CONFIG_BUSFREQ_OPP if (dev_lock(info->bus_dev, info->dev, info->busfreq_tc) < 0) pr_err("TMU: bus_lock error\n"); #endif if (mali_voltage_lock_push(data->temp_compensate.g3d_volt) < 0) pr_err("TMU: g3d_push error [%u] uV\n", data->temp_compensate.g3d_volt); check_handle |= TC_VOLTAGE_FLAG; } } #endif break; case TMU_STATUS_NORMAL: /* 1. change state: 1st-throttling */ if (cur_temp >= data->ts.start_1st_throttle) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: normal->throttle.\n"); /* 2. polling end and uevent */ #if defined(CONFIG_TC_VOLTAGE) } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp >= data->ts.stop_tc) && (cur_temp <= data->ts.stop_mem_throttle)) { #else } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp <= data->ts.stop_mem_throttle)) { #endif if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("normal: free cpufreq_limit & interrupt enable.\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 1); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_THROTTLED: /* 1. change state: 2nd-throttling or warning */ if (cur_temp >= data->ts.start_2nd_throttle) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: 1st throttle->2nd throttle.\n"); /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_1st_throttle) && !(check_handle & THROTTLE_FLAG)) { if (check_handle & WARNING_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(WARNING_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_1st_throttle); check_handle |= THROTTLE_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("throttling: set cpufreq upper limit.\n"); /* 3. change state: normal */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: 1st throttle->normal.\n"); } break; case TMU_STATUS_WARNING: /* 1. change state: tripping */ if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; pr_info("change state: 2nd throttle->trip\n"); /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_2nd_throttle) && !(check_handle & WARNING_FLAG)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_2nd_throttle); check_handle |= WARNING_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("2nd throttle: cpufreq is limited.\n"); /* 3. change state: 1st-throttling */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: 2nd throttle->1st throttle, " "and release cpufreq upper limit.\n"); } break; case TMU_STATUS_TRIPPED: /* 1. call uevent to shut-down */ if ((cur_temp >= data->ts.start_tripping) && (trend > 0) && !(check_handle & TRIPPING_FLAG)) { notify_change_of_tmu_state(info); pr_info("tripping: on waiting shutdown.\n"); check_handle |= TRIPPING_FLAG; pr_debug("check_handle = %d\n", check_handle); /* 2. change state: 2nd-throttling or warning */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: trip->2nd throttle, " "Check! occured only test mode.\n"); } /* 3. chip protection: kernel panic as SW workaround */ if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) { panic("Emergency!!!! tripping is not treated!\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_state + EXYNOS4_TMU_INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_INIT: /* sned tmu initial status to platform */ disable_irq(info->irq); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) else if (cur_temp >= data->ts.start_tc) info->tmu_state = TMU_STATUS_TC; #endif else if (cur_temp >= data->ts.start_2nd_throttle) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp >= data->ts.start_1st_throttle) info->tmu_state = TMU_STATUS_THROTTLED; else if (cur_temp <= data->ts.stop_1st_throttle) info->tmu_state = TMU_STATUS_NORMAL; notify_change_of_tmu_state(info); pr_info("%s: inform to init state to platform.\n", __func__); break; default: pr_warn("Bug: checked tmu_state.\n"); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; else info->tmu_state = TMU_STATUS_WARNING; break; } /* end */ /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle) { if (!(auto_refresh_changed) && (trend > 0)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } } else if (cur_temp <= (data->ts.stop_mem_throttle)) { if ((auto_refresh_changed) && (trend < 0)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } } info->last_temperature = cur_temp; /* reschedule the next work */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }