예제 #1
0
static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
					struct kobj_attribute *attr,
					const char *buf, size_t n)
{
	int val;
	unsigned int cpufreq_level;
	int lock_ret;
	ssize_t ret = -EINVAL;

	mutex_lock(&cpufreq_limit_mutex);

	if (sscanf(buf, "%d", &val) != 1) {
		printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__);
		goto out;
	}

	if (val == -1) { /* Unlock request */
		if (cpufreq_min_limit_val != -1) {
			exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER);
			cpufreq_min_limit_val = -1;
		} else /* Already unlocked */
			printk(KERN_ERR "%s: Unlock request is ignored\n",
				__func__);
	} else { /* Lock request */
		if (get_cpufreq_level((unsigned int)val, &cpufreq_level)
			== VALID_LEVEL) {
			if (cpufreq_min_limit_val != -1)
				/* Unlock the previous lock */
				exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER);
			lock_ret = exynos_cpufreq_lock(
					DVFS_LOCK_ID_USER, cpufreq_level);
			/* ret of exynos_cpufreq_lock is meaningless.
			   0 is fail? success? */
			cpufreq_min_limit_val = val;
		if ((cpufreq_max_limit_val != -1) &&
			    (cpufreq_min_limit_val > cpufreq_max_limit_val))
				printk(KERN_ERR "%s: Min lock may not work well"
					" because of Max lock\n", __func__);
		} else /* Invalid lock request --> No action */
			printk(KERN_ERR "%s: Lock request is invalid\n",
				__func__);
	}

	ret = n;
out:
	mutex_unlock(&cpufreq_limit_mutex);
	return ret;
}
static int exynos_frequency_unlock(struct device *dev)
{
	int ret = 0;
	struct device *busdev = dev_get("exynos-busfreq");

	if (atomic_read(&umts_link_pm_data.freqlock) == 1) {
		/* cpu frequency unlock */
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_USB_IF);

		/* bus frequency unlock */
		ret = dev_unlock(busdev, dev);
		if (ret < 0) {
			mif_err("ERR: dev_unlock error: %d\n", ret);
			goto exit;
		}

		/* unlock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_unlock();

		atomic_set(&umts_link_pm_data.freqlock, 0);
		mif_debug("success\n");
	}
exit:
	return ret;
}
int cpufreq_lock_by_mali(unsigned int freq)
{
#ifdef CONFIG_EXYNOS4_CPUFREQ
/* #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_ARCH_EXYNOS4) */
	unsigned int level;

	if (atomic_read(&mali_cpufreq_lock) == 1)
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);

	if (exynos_cpufreq_get_level(freq * 1000, &level)) {
		printk(KERN_ERR
			"Mali: failed to get cpufreq level for %dMHz", freq);
		if (atomic_read(&mali_cpufreq_lock) == 1)
			atomic_set(&mali_cpufreq_lock, 0);
		return -EINVAL;
	}

	if (exynos_cpufreq_lock(DVFS_LOCK_ID_G3D, level)) {
		printk(KERN_ERR "Mali: failed to cpufreq lock for L%d", level);
		if (atomic_read(&mali_cpufreq_lock) == 1)
			atomic_set(&mali_cpufreq_lock, 0);
		return -EINVAL;
	}

	printk(KERN_DEBUG "Mali: cpufreq locked on <%d>%dMHz\n", level, freq);

	if (atomic_read(&mali_cpufreq_lock) == 0)
		atomic_set(&mali_cpufreq_lock, 1);
#endif
	return 0;
}
예제 #4
0
/* block wacom coordinate print */
#ifdef CONFIG_SEC_TOUCHSCREEN_DVFS_LOCK
#if defined(CONFIG_MACH_P4NOTE)
void free_dvfs_lock(struct work_struct *work)
{
	struct wacom_i2c *wac_i2c =
	    container_of(work, struct wacom_i2c, dvfs_work.work);

	if (wac_i2c->dvfs_lock_status)
		dev_lock(wac_i2c->bus_dev,
			wac_i2c->dev, SEC_BUS_LOCK_FREQ);
	else {
		dev_unlock(wac_i2c->bus_dev, wac_i2c->dev);
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_PEN);
	}
}

static void set_dvfs_lock(struct wacom_i2c *wac_i2c, bool on)
{
	if (on) {
		if (!wac_i2c->dvfs_lock_status) {
			cancel_delayed_work(&wac_i2c->dvfs_work);
			dev_lock(wac_i2c->bus_dev,
				wac_i2c->dev, SEC_BUS_LOCK_FREQ2);
			exynos_cpufreq_lock(DVFS_LOCK_ID_PEN,
					    wac_i2c->cpufreq_level);
			wac_i2c->dvfs_lock_status = true;
			schedule_delayed_work(&wac_i2c->dvfs_work,
				msecs_to_jiffies(SEC_DVFS_LOCK_TIMEOUT_MS));
		}
	} else {
		if (wac_i2c->dvfs_lock_status) {
			schedule_delayed_work(&wac_i2c->dvfs_work,
				msecs_to_jiffies(SEC_DVFS_LOCK_TIMEOUT_MS));
			wac_i2c->dvfs_lock_status = false;
		}
	}
}
#else	/* CONFIG_MACH_P4NOTE */
void free_dvfs_lock(struct work_struct *work)
{
	struct wacom_i2c *wac_i2c =
	    container_of(work, struct wacom_i2c, dvfs_work.work);

	exynos_cpufreq_lock_free(DVFS_LOCK_ID_PEN);
	wac_i2c->dvfs_lock_status = false;
}
예제 #5
0
파일: tmu.c 프로젝트: yerlirock/void-kernel
/**
 * exynos_tc_volt - locks or frees vdd_arm, vdd_mif/int and vdd_g3d for
 * temperature compensation.
 *
 * This function limits or free voltage of cpufreq, busfreq, and mali driver
 * according to 2nd arguments.
 */
static int exynos_tc_volt(struct s5p_tmu_info *info, int enable)
{
	struct s5p_platform_tmu *data;
	static int usage;
	int ret = 0;

	if (!info || !(info->dev))
		return -EPERM;

	data = info->dev->platform_data;

	if (enable == usage) {
		pr_debug("TMU: already is %s.\n",
			enable ? "locked" : "unlocked");
		return 0;
	}

	if (enable) {
		ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc);
		if (ret)
			goto err_lock;
#ifdef CONFIG_BUSFREQ_OPP
		ret = dev_lock(info->bus_dev, info->dev, info->busfreq_tc);
		if (ret)
			goto err_lock;
#endif
#if defined(CONFIG_VIDEO_MALI400MP)
		ret = mali_voltage_lock_push(data->temp_compensate.g3d_volt);
		if (ret < 0) {
			pr_err("TMU: g3d_push error: %u uV\n",
				data->temp_compensate.g3d_volt);
			goto err_lock;
		}
#endif
	} else {
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU);
#ifdef CONFIG_BUSFREQ_OPP
		ret = dev_unlock(info->bus_dev, info->dev);
		if (ret)
			goto err_unlock;
#endif
#if defined(CONFIG_VIDEO_MALI400MP)
		ret = mali_voltage_lock_pop();
		if (ret < 0) {
			pr_err("TMU: g3d_pop error\n");
			goto err_unlock;
		}
#endif
	}
	usage = enable;
	pr_info("TMU: %s is ok!\n", enable ? "lock" : "unlock");
	return ret;

err_lock:
err_unlock:
	pr_err("TMU: %s is fail.\n", enable ? "lock" : "unlock");
	return ret;
}
void cpufreq_unlock_by_mali(void)
{
#ifdef CONFIG_EXYNOS4_CPUFREQ
/* #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_ARCH_EXYNOS4) */
	if (atomic_read(&mali_cpufreq_lock) == 1) {
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);
		atomic_set(&mali_cpufreq_lock, 0);
		printk(KERN_DEBUG "Mali: cpufreq locked off\n");
	}
#endif
}
예제 #7
0
void free_dvfs_lock(struct work_struct *work)
{
	struct wacom_i2c *wac_i2c =
	    container_of(work, struct wacom_i2c, dvfs_work.work);

	if (wac_i2c->dvfs_lock_status)
		dev_lock(wac_i2c->bus_dev,
			wac_i2c->dev, SEC_BUS_LOCK_FREQ);
	else {
		dev_unlock(wac_i2c->bus_dev, wac_i2c->dev);
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_PEN);
	}
}
예제 #8
0
void free_dvfs_lock(struct work_struct *work)
{
	struct wacom_i2c *wac_i2c =
	    container_of(work, struct wacom_i2c, dvfs_work.work);

#ifdef SEC_BUS_LOCK
#if defined(CONFIG_MACH_P4NOTE)
	dev_unlock(wac_i2c->bus_dev, wac_i2c->dev);
#else
	exynos4_busfreq_lock_free(DVFS_LOCK_ID_PEN);
#endif
#endif	/* SEC_BUS_LOCK */
	exynos_cpufreq_lock_free(DVFS_LOCK_ID_PEN);
	wac_i2c->dvfs_lock_status = false;
}
예제 #9
0
static void set_dvfs_off(struct work_struct *work)
{
	int ret;
	if (dvfs_lock_status && !press_status) {
		ret = dev_unlock(bus_dev, sec_touchscreen);
		if (ret < 0) {
			pr_err("%s: bus unlock failed(%d)\n",
			__func__, __LINE__);
			return;
		}
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_TSP);
		dvfs_lock_status = false;
#if DEBUG_PRINT
		pr_info("[TSP] TSP DVFS mode exit ");
#endif
	}
}
예제 #10
0
파일: tmu.c 프로젝트: 0x7678/SJKernel-gn2
/**
 * exynos_tc_volt - locks or frees vdd_arm, vdd_mif/int and vdd_g3d for
 * temperature compensation.
 *
 * This function limits or free voltage of cpufreq, busfreq, and mali driver
 * according to 2nd arguments.
 */
static int exynos_tc_volt(struct s5p_tmu_info *info, int enable)
{
	struct s5p_platform_tmu *data;
	static int usage;
	int ret = 0;

	if (!info || !(info->dev))
		return -EPERM;

	data = info->dev->platform_data;

	if (enable == usage) {
		pr_debug("TMU: already is %s.\n",
			enable ? "locked" : "unlocked");
		return 0;
	}

	if (enable) {
		ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc);
		if (ret)
			goto err_lock;
#ifdef CONFIG_BUSFREQ_OPP
		ret = dev_lock(info->bus_dev, info->dev, info->busfreq_tc);
		if (ret)
			goto err_lock;
#endif
	} else {
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU);
#ifdef CONFIG_BUSFREQ_OPP
		ret = dev_unlock(info->bus_dev, info->dev);
		if (ret)
			goto err_unlock;
#endif
	}
	usage = enable;
	pr_info("TMU: %s is ok!\n", enable ? "lock" : "unlock");
	return ret;

err_lock:
err_unlock:
	pr_err("TMU: %s is fail.\n", enable ? "lock" : "unlock");
	return ret;
}
static int fimc_is_runtime_suspend(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct v4l2_subdev *sd = platform_get_drvdata(pdev);
	struct fimc_is_dev *is_dev = to_fimc_is_dev(sd);

	printk(KERN_INFO "FIMC-IS runtime suspend\n");
	if (is_dev->pdata->clk_off) {
		is_dev->pdata->clk_off(pdev);
	} else {
		printk(KERN_ERR "#### failed to Clock OFF ####\n");
		return -EINVAL;
	}
#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
	/* Unlock bus frequency */
	pm_qos_remove_request(&bus_qos_pm_qos_req);
	dev_unlock(is_dev->bus_dev, dev);
#endif
#ifdef CONFIG_EXYNOS4_CPUFREQ
	exynos_cpufreq_lock_free(DVFS_LOCK_ID_CAM);
#endif

#if defined(CONFIG_VIDEOBUF2_ION)
	if (is_dev->alloc_ctx)
		fimc_is_mem_suspend(is_dev->alloc_ctx);
#endif
	mutex_lock(&is_dev->lock);
	clear_bit(IS_PWR_ST_POWERON, &is_dev->power);
	set_bit(IS_PWR_ST_POWEROFF, &is_dev->power);
	mutex_unlock(&is_dev->lock);
#ifdef CONFIG_REGULATOR	
	regulator_disable(is_dev->r_vdd18_cam);
	regulator_disable(is_dev->r_vddio18_cam);
	regulator_disable(is_dev->r_vdd28_af_cam);
	regulator_disable(is_dev->r_vadd28_cam);
#endif

	printk(KERN_INFO "FIMC-IS runtime suspend end\n");
	return 0;
}
static int exynos_frequency_unlock(struct device *dev)
{
	int ret = 0, lock_id;
	atomic_t *freqlock;
	struct device *busdev = dev_get("exynos-busfreq");

	if (!strcmp(dev->bus->name, "usb")) {
		lock_id = DVFS_LOCK_ID_USB_IF;
		freqlock = &umts_link_pm_data.freqlock;
	} else if (!strcmp(dev->bus->name, "platform")) { // for dpram lock
		lock_id = DVFS_LOCK_ID_DPRAM_IF;
		freqlock = &umts_link_pm_data.freq_dpramlock;
	} else {
		mif_err("ERR: Unkown unlock ID (%s)\n", dev->bus->name);
		goto exit;
	}

	if (atomic_read(freqlock) == 1) {
		/* cpu frequency unlock */
		exynos_cpufreq_lock_free(lock_id);

		/* bus frequency unlock */
		ret = dev_unlock(busdev, dev);
		if (ret < 0) {
			mif_err("ERR: dev_unlock error: %d\n", ret);
			goto exit;
		}

		/* unlock minimum number of cpu cores */
		cpufreq_pegasusq_min_cpu_unlock();

		atomic_set(freqlock, 0);
		mif_info("success\n");
	}
exit:
	return ret;
}
예제 #13
0
static void release_all_fingers(struct melfas_ts_data *ts)
{
	int i, ret;

	printk(KERN_DEBUG "[TSP] %s\n", __func__);
	for (i = 0; i < MELFAS_MAX_TOUCH; i++) {
		g_Mtouch_info[i].status = TSP_STATE_INACTIVE;
		g_Mtouch_info[i].strength = 0;
		g_Mtouch_info[i].posX = 0;
		g_Mtouch_info[i].posY = 0;
		g_Mtouch_info[i].angle = 0;
		g_Mtouch_info[i].major = 0;
		g_Mtouch_info[i].minor = 0;
		g_Mtouch_info[i].palm = 0;

		input_mt_slot(ts->input_dev, i);
		input_mt_report_slot_state(ts->input_dev,
			MT_TOOL_FINGER, 0);
	}
	input_sync(ts->input_dev);
#if TOUCH_BOOSTER
	if (dvfs_lock_status) {
		exynos_cpufreq_lock_free(DVFS_LOCK_ID_TSP);
		ret = dev_unlock(bus_dev, sec_touchscreen);
		if (ret < 0) {
			pr_err("%s: bus unlock failed(%d)\n",
			__func__, __LINE__);
			return;
		}
		dvfs_lock_status = false;
		press_status = false;
#if DEBUG_PRINT
		pr_info("[TSP] %s : DVFS mode exit\n", __func__);
#endif
	}
#endif
}
예제 #14
0
static inline void rotation_booster_off(void)
{
	exynos_gpufreq_unlock();
	exynos4_busfreq_lock_free(DVFS_LOCK_ID_ROTATION_BOOSTER);
	exynos_cpufreq_lock_free(DVFS_LOCK_ID_ROTATION_BOOSTER);
}
예제 #15
0
static void ir_remocon_send(struct ir_remocon_data *data)
{
	unsigned int		period, off_period = 0;
	unsigned int		duty;
	unsigned int		on, off = 0;
	unsigned int		i, j;
	int					ret;
	static int		cpu_lv = -1;

	if (data->pwr_en == -1) {
		regulator = regulator_get(NULL, "vled_3.3v");
		if (IS_ERR(regulator))
			goto out;

		regulator_enable(regulator);
		regulator_status = 1;
	}

	if (data->pwr_en != -1)
		gpio_direction_output(data->pwr_en, 1);

	__udelay(1000);

	if (cpu_lv == -1) {
		if (data->pwr_en == -1)
			exynos_cpufreq_get_level(500000, &cpu_lv);
		else
			exynos_cpufreq_get_level(800000, &cpu_lv);
	}

	ret = exynos_cpufreq_lock(DVFS_LOCK_ID_IR_LED, cpu_lv);
	if (ret < 0)
		pr_err("%s: fail to lock cpufreq\n", __func__);

	ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_IR_LED, cpu_lv);
	if (ret < 0)
		pr_err("%s: fail to lock cpufreq(limit)\n", __func__);

	if (data->pwr_en == -1)
		period  = (MICRO_SEC/data->signal[0])-2;
	else
		period  = (MICRO_SEC/data->signal[0])-1;

	duty = period/4;
	on = duty;
	off = period - duty;

	local_irq_disable();
	for (i = 1; i < MAX_SIZE; i += 2) {
		if (data->signal[i] == 0)
			break;

		for (j = 0; j < data->signal[i]; j++) {
			gpio_direction_output(data->gpio, 1);
			__udelay(on);
			gpio_direction_output(data->gpio, 0);
			__udelay(off);
		}

		if (data->pwr_en == -1)
			period = (MICRO_SEC/data->signal[0]);
		else
			period = (MICRO_SEC/data->signal[0])+1;

		off_period = data->signal[i+1]*period;

		if (off_period <= 9999) {
			if (off_period > 1000) {
				__udelay(off_period % 1000);
				mdelay(off_period/1000);
			} else
				__udelay(off_period);
		} else {
			local_irq_enable();
			__udelay(off_period % 1000);
			mdelay(off_period/1000);
			local_irq_disable();
		}
	}
	gpio_direction_output(data->gpio, 1);
	__udelay(on);
	gpio_direction_output(data->gpio, 0);
	__udelay(off);

	local_irq_enable();
	pr_info("%s end!\n", __func__);
	exynos_cpufreq_lock_free(DVFS_LOCK_ID_IR_LED);
	exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_IR_LED);

	if (data->pwr_en != -1)
		gpio_direction_output(data->pwr_en, 0);

	if ((data->pwr_en == -1) && (regulator_status == 1)) {
		regulator_force_disable(regulator);
		regulator_put(regulator);

		regulator_status = -1;
	}
out: ;
}
예제 #16
0
static void exynos4_handler_tmu_state(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct s5p_tmu_info *info =
		container_of(delayed_work, struct s5p_tmu_info, polling);
	struct s5p_platform_tmu *data = info->dev->platform_data;
	unsigned int cur_temp;
	static int auto_refresh_changed;
	static int check_handle;
	int trend = 0;

	mutex_lock(&tmu_lock);

	cur_temp = get_curr_temp(info);
	trend = cur_temp - info->last_temperature;
	pr_debug("curr_temp = %d, temp_diff = %d\n", cur_temp, trend);

	switch (info->tmu_state) {
	case TMU_STATUS_TC:
#if defined(CONFIG_TC_VOLTAGE)
		if (cur_temp >= data->ts.stop_tc) {
			if (check_handle & TC_VOLTAGE_FLAG) {
				exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU);
#ifdef CONFIG_BUSFREQ_OPP
				if (dev_unlock(info->bus_dev, info->dev))
					pr_err("TMU: dev_unlock error!\n");
#endif
				if (mali_voltage_lock_pop() < 0)
					pr_err("TMU: g3d_pop error\n");

				check_handle &= ~(TC_VOLTAGE_FLAG);
				pr_info("change state: tc -> normal.\n");
			}
			info->tmu_state = TMU_STATUS_NORMAL;
		} else if (cur_temp <= data->ts.start_tc) {
			if (!(check_handle & TC_VOLTAGE_FLAG)) {
				if (exynos_cpufreq_lock(DVFS_LOCK_ID_TMU,
					info->cpulevel_tc) < 0)
					pr_err("TMU: cpu_lock error!\n");
#ifdef CONFIG_BUSFREQ_OPP
				if (dev_lock(info->bus_dev, info->dev,
					info->busfreq_tc) < 0)
					pr_err("TMU: bus_lock error\n");
#endif
				if (mali_voltage_lock_push(data->temp_compensate.g3d_volt) < 0)
					pr_err("TMU: g3d_push error [%u] uV\n",
						data->temp_compensate.g3d_volt);

				check_handle |= TC_VOLTAGE_FLAG;
			}
		}
#endif
		break;

	case TMU_STATUS_NORMAL:
		/* 1. change state: 1st-throttling */
		if (cur_temp >= data->ts.start_1st_throttle) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: normal->throttle.\n");
		/* 2. polling end and uevent */
#if defined(CONFIG_TC_VOLTAGE)
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp >= data->ts.stop_tc)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
#else
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
#endif
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("normal: free cpufreq_limit & interrupt enable.\n");

			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_base + EXYNOS4_TMU_INTCLEAR);
			exynos_interrupt_enable(info, 1);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_THROTTLED:
		/* 1. change state: 2nd-throttling or warning */
		if (cur_temp >= data->ts.start_2nd_throttle) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: 1st throttle->2nd throttle.\n");
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_1st_throttle) &&
			!(check_handle & THROTTLE_FLAG)) {
			if (check_handle & WARNING_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(WARNING_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_1st_throttle);
			check_handle |= THROTTLE_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("throttling: set cpufreq upper limit.\n");
		/* 3. change state: normal */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_NORMAL;
			pr_info("change state: 1st throttle->normal.\n");
		}
		break;

	case TMU_STATUS_WARNING:
		/* 1. change state: tripping */
		if (cur_temp >= data->ts.start_tripping) {
			info->tmu_state = TMU_STATUS_TRIPPED;
			pr_info("change state: 2nd throttle->trip\n");
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_2nd_throttle) &&
			!(check_handle & WARNING_FLAG)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_2nd_throttle);

			check_handle |= WARNING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("2nd throttle: cpufreq is limited.\n");
		/* 3. change state: 1st-throttling */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: 2nd throttle->1st throttle, "
				"and release cpufreq upper limit.\n");
		}
		break;

	case TMU_STATUS_TRIPPED:
		/* 1. call uevent to shut-down */
		if ((cur_temp >= data->ts.start_tripping) &&
			(trend > 0) && !(check_handle & TRIPPING_FLAG)) {
			notify_change_of_tmu_state(info);
			pr_info("tripping: on waiting shutdown.\n");
			check_handle |= TRIPPING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
		/* 2. change state: 2nd-throttling or warning */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
				&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: trip->2nd throttle, "
				"Check! occured only test mode.\n");
		}
		/* 3. chip protection: kernel panic as SW workaround */
		if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
			panic("Emergency!!!! tripping is not treated!\n");
			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_state + EXYNOS4_TMU_INTCLEAR);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_INIT:
		/* sned tmu initial status to platform */
		disable_irq(info->irq);
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		else if (cur_temp >= data->ts.start_tc)
			info->tmu_state = TMU_STATUS_TC;
#endif
		else if (cur_temp >= data->ts.start_2nd_throttle)
			info->tmu_state = TMU_STATUS_WARNING;
		else if (cur_temp >= data->ts.start_1st_throttle)
			info->tmu_state = TMU_STATUS_THROTTLED;
		else if (cur_temp <= data->ts.stop_1st_throttle)
			info->tmu_state = TMU_STATUS_NORMAL;

		notify_change_of_tmu_state(info);
		pr_info("%s: inform to init state to platform.\n", __func__);
		break;

	default:
		pr_warn("Bug: checked tmu_state.\n");
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
		else
			info->tmu_state = TMU_STATUS_WARNING;
		break;
	} /* end */

	/* memory throttling */
	if (cur_temp >= data->ts.start_mem_throttle) {
		if (!(auto_refresh_changed) && (trend > 0)) {
			pr_info("set auto_refresh 1.95us\n");
			set_refresh_rate(info->auto_refresh_tq0);
			auto_refresh_changed = 1;
		}
	} else if (cur_temp <= (data->ts.stop_mem_throttle)) {
		if ((auto_refresh_changed) && (trend < 0)) {
			pr_info("set auto_refresh 3.9us\n");
			set_refresh_rate(info->auto_refresh_normal);
			auto_refresh_changed = 0;
		}
	}

	info->last_temperature = cur_temp;

	/* reschedule the next work */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
			info->sampling_rate);

	mutex_unlock(&tmu_lock);

	return;
}