예제 #1
0
파일: tmu.c 프로젝트: yerlirock/void-kernel
static void exynos4_handler_tmu_state(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct s5p_tmu_info *info =
		container_of(delayed_work, struct s5p_tmu_info, polling);
	struct s5p_platform_tmu *data = info->dev->platform_data;
	unsigned int cur_temp;
	static int auto_refresh_changed;
	static int check_handle;
	int trend = 0;
	int cpu = 0;

	mutex_lock(&tmu_lock);

	cur_temp = get_curr_temp(info);
	trend = cur_temp - info->last_temperature;
	pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend);

	switch (info->tmu_state) {
#if defined(CONFIG_TC_VOLTAGE)
	case TMU_STATUS_TC:
		/* lock has priority than unlock */
		if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
		} else if (cur_temp >= data->ts.stop_tc) {
			if (exynos_tc_volt(info, 0) < 0) {
				pr_err("TMU: unlock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_NORMAL;
				pr_info("change state: tc -> normal.\n");
			}
		}
		/* free if upper limit is locked */
		if (check_handle) {
			exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
			check_handle = 0;
		}
		break;
#endif
	case TMU_STATUS_NORMAL:
		/* 1. change state: 1st-throttling */
		if (cur_temp >= data->ts.start_1st_throttle) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: normal->throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0) {
				pr_err("TMU: lock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_TC;
				pr_info("change state: normal->tc.\n");
			}
#endif
		/* 2. polling end and uevent */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("normal: free cpufreq_limit & interrupt enable.\n");

			for_each_online_cpu(cpu)
				cpufreq_update_policy(cpu);

			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_base + EXYNOS4_TMU_INTCLEAR);
			exynos_interrupt_enable(info, 1);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_THROTTLED:
		/* 1. change state: 2nd-throttling or warning */
		if (cur_temp >= data->ts.start_2nd_throttle) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: 1st throttle->2nd throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_1st_throttle) &&
			!(check_handle & THROTTLE_FLAG)) {
			if (check_handle & WARNING_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(WARNING_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_1st_throttle);
			check_handle |= THROTTLE_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("throttling: set cpufreq upper limit.\n");
		/* 3. change state: normal */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_NORMAL;
			pr_info("change state: 1st throttle->normal.\n");
		}
		break;

	case TMU_STATUS_WARNING:
		/* 1. change state: tripping */
		if (cur_temp >= data->ts.start_tripping) {
			info->tmu_state = TMU_STATUS_TRIPPED;
			pr_info("change state: 2nd throttle->trip\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_2nd_throttle) &&
			!(check_handle & WARNING_FLAG)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_2nd_throttle);

			check_handle |= WARNING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("2nd throttle: cpufreq is limited.\n");
		/* 3. change state: 1st-throttling */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: 2nd throttle->1st throttle, "
				"and release cpufreq upper limit.\n");
		}
		break;

	case TMU_STATUS_TRIPPED:
		/* 1. call uevent to shut-down */
		if ((cur_temp >= data->ts.start_tripping) &&
			(trend > 0) && !(check_handle & TRIPPING_FLAG)) {
			notify_change_of_tmu_state(info);
			pr_info("tripping: on waiting shutdown.\n");
			check_handle |= TRIPPING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
			info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. change state: 2nd-throttling or warning */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
				&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: trip->2nd throttle, "
				"Check! occured only test mode.\n");
		}
		/* 3. chip protection: kernel panic as SW workaround */
		if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
			panic("Emergency!!!! tripping is not treated!\n");
			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_state + EXYNOS4_TMU_INTCLEAR);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_INIT:
		/* sned tmu initial status to platform */
		disable_irq(info->irq);
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else if (cur_temp >= data->ts.start_2nd_throttle)
			info->tmu_state = TMU_STATUS_WARNING;
		else if (cur_temp >= data->ts.start_1st_throttle)
			info->tmu_state = TMU_STATUS_THROTTLED;
		else if (cur_temp <= data->ts.stop_1st_throttle)
			info->tmu_state = TMU_STATUS_NORMAL;

		notify_change_of_tmu_state(info);
		pr_info("%s: inform to init state to platform.\n", __func__);
		break;

	default:
		pr_warn("Bug: checked tmu_state.\n");
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else
			info->tmu_state = TMU_STATUS_WARNING;
		break;
	} /* end */

	info->last_temperature = cur_temp;

	/* reschedule the next work */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
			info->sampling_rate);

	mutex_unlock(&tmu_lock);

	return;
}
예제 #2
0
static void exynos4_handler_tmu_state(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct s5p_tmu_info *info =
		container_of(delayed_work, struct s5p_tmu_info, polling);
	struct s5p_platform_tmu *data = info->dev->platform_data;
	unsigned int cur_temp;
	static int auto_refresh_changed;
	static int check_handle;
	int trend = 0;

	mutex_lock(&tmu_lock);

	cur_temp = get_curr_temp(info);
	trend = cur_temp - info->last_temperature;
	pr_debug("curr_temp = %d, temp_diff = %d\n", cur_temp, trend);

	switch (info->tmu_state) {
	case TMU_STATUS_TC:
#if defined(CONFIG_TC_VOLTAGE)
		if (cur_temp >= data->ts.stop_tc) {
			if (check_handle & TC_VOLTAGE_FLAG) {
				exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU);
#ifdef CONFIG_BUSFREQ_OPP
				if (dev_unlock(info->bus_dev, info->dev))
					pr_err("TMU: dev_unlock error!\n");
#endif
				if (mali_voltage_lock_pop() < 0)
					pr_err("TMU: g3d_pop error\n");

				check_handle &= ~(TC_VOLTAGE_FLAG);
				pr_info("change state: tc -> normal.\n");
			}
			info->tmu_state = TMU_STATUS_NORMAL;
		} else if (cur_temp <= data->ts.start_tc) {
			if (!(check_handle & TC_VOLTAGE_FLAG)) {
				if (exynos_cpufreq_lock(DVFS_LOCK_ID_TMU,
					info->cpulevel_tc) < 0)
					pr_err("TMU: cpu_lock error!\n");
#ifdef CONFIG_BUSFREQ_OPP
				if (dev_lock(info->bus_dev, info->dev,
					info->busfreq_tc) < 0)
					pr_err("TMU: bus_lock error\n");
#endif
				if (mali_voltage_lock_push(data->temp_compensate.g3d_volt) < 0)
					pr_err("TMU: g3d_push error [%u] uV\n",
						data->temp_compensate.g3d_volt);

				check_handle |= TC_VOLTAGE_FLAG;
			}
		}
#endif
		break;

	case TMU_STATUS_NORMAL:
		/* 1. change state: 1st-throttling */
		if (cur_temp >= data->ts.start_1st_throttle) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: normal->throttle.\n");
		/* 2. polling end and uevent */
#if defined(CONFIG_TC_VOLTAGE)
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp >= data->ts.stop_tc)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
#else
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
#endif
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("normal: free cpufreq_limit & interrupt enable.\n");

			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_base + EXYNOS4_TMU_INTCLEAR);
			exynos_interrupt_enable(info, 1);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_THROTTLED:
		/* 1. change state: 2nd-throttling or warning */
		if (cur_temp >= data->ts.start_2nd_throttle) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: 1st throttle->2nd throttle.\n");
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_1st_throttle) &&
			!(check_handle & THROTTLE_FLAG)) {
			if (check_handle & WARNING_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(WARNING_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_1st_throttle);
			check_handle |= THROTTLE_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("throttling: set cpufreq upper limit.\n");
		/* 3. change state: normal */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_NORMAL;
			pr_info("change state: 1st throttle->normal.\n");
		}
		break;

	case TMU_STATUS_WARNING:
		/* 1. change state: tripping */
		if (cur_temp >= data->ts.start_tripping) {
			info->tmu_state = TMU_STATUS_TRIPPED;
			pr_info("change state: 2nd throttle->trip\n");
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_2nd_throttle) &&
			!(check_handle & WARNING_FLAG)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_2nd_throttle);

			check_handle |= WARNING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("2nd throttle: cpufreq is limited.\n");
		/* 3. change state: 1st-throttling */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: 2nd throttle->1st throttle, "
				"and release cpufreq upper limit.\n");
		}
		break;

	case TMU_STATUS_TRIPPED:
		/* 1. call uevent to shut-down */
		if ((cur_temp >= data->ts.start_tripping) &&
			(trend > 0) && !(check_handle & TRIPPING_FLAG)) {
			notify_change_of_tmu_state(info);
			pr_info("tripping: on waiting shutdown.\n");
			check_handle |= TRIPPING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
		/* 2. change state: 2nd-throttling or warning */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
				&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: trip->2nd throttle, "
				"Check! occured only test mode.\n");
		}
		/* 3. chip protection: kernel panic as SW workaround */
		if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
			panic("Emergency!!!! tripping is not treated!\n");
			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_state + EXYNOS4_TMU_INTCLEAR);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_INIT:
		/* sned tmu initial status to platform */
		disable_irq(info->irq);
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		else if (cur_temp >= data->ts.start_tc)
			info->tmu_state = TMU_STATUS_TC;
#endif
		else if (cur_temp >= data->ts.start_2nd_throttle)
			info->tmu_state = TMU_STATUS_WARNING;
		else if (cur_temp >= data->ts.start_1st_throttle)
			info->tmu_state = TMU_STATUS_THROTTLED;
		else if (cur_temp <= data->ts.stop_1st_throttle)
			info->tmu_state = TMU_STATUS_NORMAL;

		notify_change_of_tmu_state(info);
		pr_info("%s: inform to init state to platform.\n", __func__);
		break;

	default:
		pr_warn("Bug: checked tmu_state.\n");
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
		else
			info->tmu_state = TMU_STATUS_WARNING;
		break;
	} /* end */

	/* memory throttling */
	if (cur_temp >= data->ts.start_mem_throttle) {
		if (!(auto_refresh_changed) && (trend > 0)) {
			pr_info("set auto_refresh 1.95us\n");
			set_refresh_rate(info->auto_refresh_tq0);
			auto_refresh_changed = 1;
		}
	} else if (cur_temp <= (data->ts.stop_mem_throttle)) {
		if ((auto_refresh_changed) && (trend < 0)) {
			pr_info("set auto_refresh 3.9us\n");
			set_refresh_rate(info->auto_refresh_normal);
			auto_refresh_changed = 0;
		}
	}

	info->last_temperature = cur_temp;

	/* reschedule the next work */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
			info->sampling_rate);

	mutex_unlock(&tmu_lock);

	return;
}