コード例 #1
0
ファイル: gpu_ipa.c プロジェクト: ColinIanKing/m576
static unsigned int gpu_ipa_dvfs_max_allowed_freq(struct kbase_device *kbdev)
{
	gpu_dvfs_info *dvfs_max_info;
	int max_thermal_step = -1;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	int max_step;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context (0x%p) is not initialized\n", __func__, platform);
		return 0xffffffff;
	}

	max_step = gpu_dvfs_get_level(platform->gpu_max_clock);

	/* Account for Throttling Lock */
#ifdef CONFIG_EXYNOS_THERMAL
	max_thermal_step = gpu_dvfs_get_level(platform->gpu_max_clock);
#endif /* CONFIG_EXYNOS_THERMAL */
	if (max_thermal_step <= gpu_dvfs_get_level(platform->gpu_min_clock) && max_thermal_step > max_step)
		max_step = max_thermal_step;

	/* NOTE: This is the absolute maximum, not taking into account any tmu
	 * throttling */
	dvfs_max_info = &(platform->table[max_step]);
	return dvfs_max_info->clock;
}
コード例 #2
0
static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
{
	ssize_t ret = 0;
	int i;
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	gpu_dvfs_update_time_in_state(gpu_control_is_power_on(pkbdev) * platform->cur_clock);

	for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
				platform->table[i].clock,
				platform->table[i].time);
	}

	if (ret >= PAGE_SIZE - 1) {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}

	return ret;
}
コード例 #3
0
static ssize_t set_min_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	int ret, clock = 0;
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	if (sysfs_streq("0", buf)) {
		gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
	} else {
		ret = kstrtoint(buf, 0, &clock);
		if (ret) {
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
			return -ENOENT;
		}

		ret = gpu_dvfs_get_level(clock);
		if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
			return -ENOENT;
		}

		if (clock > platform->gpu_max_clock_limit)
			clock = platform->gpu_max_clock_limit;

		if (clock == platform->gpu_min_clock)
			gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
		else
			gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, SYSFS_LOCK, clock);
	}

	return count;
}
コード例 #4
0
ファイル: gpu_ipa.c プロジェクト: ColinIanKing/m576
int kbase_platform_dvfs_power_to_freq(int power)
{
#ifdef CONFIG_MALI_DVFS
	int level, freq;
	unsigned int vol;
	u64 _power;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context (0x%p) is not initialized\n", __func__, platform);
		return -1;
	}

	for (level = gpu_dvfs_get_level(platform->gpu_min_clock); level >= gpu_dvfs_get_level(platform->gpu_max_clock); level--) {
		vol = platform->table[level].voltage / 10000;
		freq = platform->table[level].clock;
		_power = div_u64((u64)platform->ipa_power_coeff_gpu * freq * vol * vol, 100000);
		if ((int)_power >= power)
			break;
	}

	return platform->table[level].clock;
#else
	return 0;
#endif /* CONFIG_MALI_DVFS */
}
コード例 #5
0
static int gpu_check_target_clock(struct exynos_context *platform, int clock)
{
	int target_clock = clock;

	DVFS_ASSERT(platform);

	if (gpu_dvfs_get_level(target_clock) < 0)
		return -1;

#ifdef CONFIG_MALI_DVFS
	if (!platform->dvfs_status)
		return target_clock;

	GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "clock: %d, min: %d, max: %d\n", clock, platform->min_lock, platform->max_lock);

	if ((platform->min_lock > 0) && (platform->power_status) &&
			((target_clock < platform->min_lock) || (platform->cur_clock < platform->min_lock)))
		target_clock = platform->min_lock;

	if ((platform->max_lock > 0) && (target_clock > platform->max_lock))
		target_clock = platform->max_lock;
#endif /* CONFIG_MALI_DVFS */

	platform->step = gpu_dvfs_get_level(target_clock);

	return target_clock;
}
コード例 #6
0
static ssize_t show_down_staycount(struct device *dev, struct device_attribute *attr, char *buf)
{
	ssize_t ret = 0;
	unsigned long flags;
	int i = -1;
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "Clock %d - %d\n",
			platform->table[i].clock, platform->table[i].down_staycount);
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	if (ret < PAGE_SIZE - 1) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
	} else {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}

	return ret;
}
コード例 #7
0
static ssize_t set_highspeed_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	ssize_t ret = 0;
	unsigned long flags;
	int highspeed_clock = -1;
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	ret = kstrtoint(buf, 0, &highspeed_clock);
	if (ret) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
		return -ENOENT;
	}

	ret = gpu_dvfs_get_level(highspeed_clock);
	if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, highspeed_clock);
		return -ENOENT;
	}

	if (highspeed_clock > platform->gpu_max_clock_limit)
		highspeed_clock = platform->gpu_max_clock_limit;

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->interactive.highspeed_clock = highspeed_clock;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return count;
}
コード例 #8
0
static int gpu_dvfs_governor_static(struct exynos_context *platform, int utilization)
{
	static bool step_down = true;
	static int count;

	DVFS_ASSERT(platform);

	if (count == G3D_GOVERNOR_STATIC_PERIOD) {
		if (step_down) {
			if (platform->step > gpu_dvfs_get_level(platform->gpu_max_clock))
				platform->step--;
			if (((platform->max_lock > 0) && (platform->table[platform->step].clock == platform->max_lock))
					|| (platform->step == gpu_dvfs_get_level(platform->gpu_max_clock)))
				step_down = false;
		} else {
			if (platform->step < gpu_dvfs_get_level(platform->gpu_min_clock))
				platform->step++;
			if (((platform->min_lock > 0) && (platform->table[platform->step].clock == platform->min_lock))
					|| (platform->step == gpu_dvfs_get_level(platform->gpu_min_clock)))
				step_down = true;
		}

		count = 0;
	} else {
		count++;
	}

	return 0;
}
コード例 #9
0
int gpu_dvfs_init_time_in_state(void)
{
#ifdef CONFIG_MALI_DEBUG_SYS
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	int i;

	DVFS_ASSERT(platform);

	for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
		platform->table[i].time = 0;
#endif /* CONFIG_MALI_DEBUG_SYS */

	return 0;
}
コード例 #10
0
static ssize_t set_down_staycount(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	unsigned long flags;
	char tmpbuf[32];
	char *sptr, *tok;
	int ret = -1;
	int clock = -1, level = -1, down_staycount = 0;
	unsigned int len = 0;
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	len = (unsigned int)min(count, sizeof(tmpbuf) - 1);
	memcpy(tmpbuf, buf, len);
	tmpbuf[len] = '\0';
	sptr = tmpbuf;

	tok = strsep(&sptr, " ,");
	if (tok == NULL) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
		return -ENOENT;
	}

	ret = kstrtoint(tok, 0, &clock);
	if (ret) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, clock);
		return -ENOENT;
	}

	tok = strsep(&sptr, " ,");
	if (tok == NULL) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
		return -ENOENT;
	}

	ret = kstrtoint(tok, 0, &down_staycount);
	if (ret) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, down_staycount);
		return -ENOENT;
	}

	level = gpu_dvfs_get_level(clock);
	if (level < 0) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
		return -ENOENT;
	}

	if ((down_staycount < MIN_DOWN_STAYCOUNT) || (down_staycount > MAX_DOWN_STAYCOUNT)) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: down_staycount is out of range (%d, %d ~ %d)\n",
			__func__, down_staycount, MIN_DOWN_STAYCOUNT, MAX_DOWN_STAYCOUNT);
		return -ENOENT;
	}

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->table[level].down_staycount = down_staycount;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return count;
}
コード例 #11
0
int gpu_enable_dvs(struct exynos_context *platform)
{
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	int level = 0;
#endif /* CONFIG_EXYNOS_CL_DVFS_G3D */

	if (!platform->dvs_status)
		return 0;

	if (!gpu_is_power_on()) {
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set dvs in the power-off state!\n", __func__);
		return -1;
	}

#if defined(CONFIG_REGULATOR_S2MPS15)
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	level = gpu_dvfs_get_level(gpu_get_cur_clock(platform));
	exynos7420_cl_dvfs_stop(ID_G3D, level);
#endif /* CONFIG_EXYNOS_CL_DVFS_G3D */

	/* Do not need to enable dvs during suspending */
	if (!pkbdev->pm.suspending) {
		if (s2m_set_dvs_pin(true) != 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to enable dvs\n", __func__);
			return -1;
		}
	}
#endif /* CONFIG_REGULATOR_S2MPS13 */



	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "dvs is enabled (vol: %d)\n", gpu_get_cur_voltage(platform));
	return 0;
}
コード例 #12
0
static int gpu_get_dvfs_table(struct exynos_context *platform, char *buf, size_t buf_size)
{
	int i, cnt = 0;

	if (!platform)
		return -ENODEV;

	if (buf == NULL)
		return 0;

	for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
		cnt += snprintf(buf+cnt, buf_size-cnt, " %d", platform->table[i].clock);

	cnt += snprintf(buf+cnt, buf_size-cnt, "\n");

	return cnt;
}
コード例 #13
0
int gpu_dvfs_governor_setting(struct exynos_context *platform, int governor_type)
{
#ifdef CONFIG_MALI_DVFS
	int i;
#endif /* CONFIG_MALI_DVFS */
	unsigned long flags;

	DVFS_ASSERT(platform);

	if ((governor_type < 0) || (governor_type >= G3D_MAX_GOVERNOR_NUM)) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid governor type (%d)\n", __func__, governor_type);
		return -1;
	}

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
#ifdef CONFIG_MALI_DVFS
	platform->table = governor_info[governor_type].table;
	platform->table_size = governor_info[governor_type].table_size;
	platform->step = gpu_dvfs_get_level(governor_info[governor_type].start_clk);
	gpu_dvfs_get_next_level = (GET_NEXT_LEVEL)(governor_info[governor_type].governor);

	platform->env_data.utilization = 80;
	platform->max_lock = 0;
	platform->min_lock = 0;

	for (i = 0; i < NUMBER_LOCK; i++) {
		platform->user_max_lock[i] = 0;
		platform->user_min_lock[i] = 0;
	}

	platform->down_requirement = 1;
	platform->governor_type = governor_type;

	gpu_dvfs_init_time_in_state();
#else /* CONFIG_MALI_DVFS */
	platform->table = (gpu_dvfs_info *)gpu_get_attrib_data(platform->attrib, GPU_GOVERNOR_TABLE_DEFAULT);
	platform->table_size = (u32)gpu_get_attrib_data(platform->attrib, GPU_GOVERNOR_TABLE_SIZE_DEFAULT);
	platform->step = gpu_dvfs_get_level(platform->gpu_dvfs_start_clock);
#endif /* CONFIG_MALI_DVFS */
	platform->cur_clock = platform->table[platform->step].clock;

	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return 0;
}
コード例 #14
0
static ssize_t set_volt_table(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
	int max = gpu_dvfs_get_level(platform->gpu_max_clock);
	int min = gpu_dvfs_get_level(platform->gpu_min_clock);
	int i, tokens, rest, target;
	int t[min - max];
	unsigned long flags;

	if ((tokens = read_into((int*)&t, min-max, buf, count)) < 0)
		return -EINVAL;

	target = -1;
	if (tokens == 2) {
		for (i = max; i <= min; i++) {
			if (t[0] == platform->table[i].clock) {
				target = i;
				break;
			}
		}
	}

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

	if (tokens == 2 && target > -1) {
		if ((rest = t[1] % GPU_VOLT_STEP) != 0) 
			t[1] += GPU_VOLT_STEP - rest;
		
		sanitize_min_max(t[1], GPU_MIN_VOLT, GPU_MAX_VOLT);
		platform->table[target].voltage = t[1];
	} else {
		for (i = 0; i < tokens; i++) {
			if ((rest = t[i] % GPU_VOLT_STEP) != 0) 
				t[i] += GPU_VOLT_STEP - rest;
			
			sanitize_min_max(t[i], GPU_MIN_VOLT, GPU_MAX_VOLT);
			platform->table[i + max].voltage = t[i];
		}
	}

	ipa_update();
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return count;
}
コード例 #15
0
static ssize_t show_volt_table(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
	ssize_t count = 0, pr_len;
	int i, max, min;

	if (!platform)
		return -ENODEV;

	max = gpu_dvfs_get_level(platform->gpu_max_clock);
	min = gpu_dvfs_get_level(platform->gpu_min_clock);
	pr_len = (size_t)((PAGE_SIZE - 2) / (min-max));

	for (i = max; i <= min; i++) {
		count += snprintf(&buf[count], pr_len, "%d %d\n", 
				platform->table[i].clock,
				platform->table[i].voltage);
	}

	return count;
}
コード例 #16
0
static int gpu_get_asv_table(struct exynos_context *platform, char *buf, size_t buf_size)
{
	int i, cnt = 0;

	if (!platform)
		return -ENODEV;

	if (buf == NULL)
		return 0;

	cnt += snprintf(buf+cnt, buf_size-cnt, "GPU, vol, min, max, down_stay, mif, int, cpu\n");

	for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++) {
		cnt += snprintf(buf+cnt, buf_size-cnt, "%d, %7d, %2d, %3d, %d, %6d, %6d, %7d\n",
		platform->table[i].clock, platform->table[i].voltage, platform->table[i].min_threshold,
		platform->table[i].max_threshold, platform->table[i].down_staycount, platform->table[i].mem_freq,
		platform->table[i].int_freq, platform->table[i].cpu_freq);
	}

	return cnt;
}
コード例 #17
0
static int gpu_dvfs_governor_default(struct exynos_context *platform, int utilization)
{
	DVFS_ASSERT(platform);

	if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock)) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step--;
#ifdef MALI_SEC_HWCNT
		if ((!platform->hwcnt_bt_clk) && (platform->table[platform->step].clock > platform->gpu_max_clock_limit))
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#else
		if (platform->table[platform->step].clock > platform->gpu_max_clock_limit)
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#endif
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock)) && (utilization < platform->table[platform->step].min_threshold)) {
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}
	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	return 0;
}
コード例 #18
0
static int gpu_dvfs_governor_booster(struct exynos_context *platform, int utilization)
{
	static int weight;
	int cur_weight, booster_threshold, dvfs_table_lock;

	DVFS_ASSERT(platform);

	cur_weight = platform->cur_clock*utilization;
	/* booster_threshold = current clock * set the percentage of utilization */
	booster_threshold = platform->cur_clock * 50;

	dvfs_table_lock = gpu_dvfs_get_level(platform->gpu_max_clock);

	if ((platform->step >= dvfs_table_lock+2) &&
			((cur_weight - weight) > booster_threshold)) {
		platform->step -= 2;
		platform->down_requirement = platform->table[platform->step].down_staycount;
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "Booster Governor: G3D level 2 step\n");
	} else if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock)) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step--;
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock)) &&
			(utilization < platform->table[platform->step].min_threshold)) {
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}

	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	weight = cur_weight;

	return 0;
}
コード例 #19
0
ファイル: gpu_ipa.c プロジェクト: ColinIanKing/m576
int kbase_platform_dvfs_freq_to_power(int freq)
{
#ifdef CONFIG_MALI_DVFS
	int level;
	unsigned int vol;
	unsigned long flags;
	unsigned long long power;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context (0x%p) is not initialized\n", __func__, platform);
		return -1;
	}

	if (0 == freq) {
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		power = platform->power;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	} else {
		for (level = gpu_dvfs_get_level(platform->gpu_max_clock); level <= gpu_dvfs_get_level(platform->gpu_min_clock); level++)
			if (platform->table[level].clock == freq)
				break;

		if (level <= gpu_dvfs_get_level(platform->gpu_min_clock)) {
			vol = platform->table[level].voltage / 10000;
			power = div_u64((u64)platform->ipa_power_coeff_gpu * freq * vol * vol, 100000);
		} else {
			power = 0;
		}
	}

	return (int)power;
#else
	return 0;
#endif /* CONFIG_MALI_DVFS */
}
コード例 #20
0
int gpu_dvfs_update_time_in_state(int clock)
{
#ifdef CONFIG_MALI_DEBUG_SYS
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	u64 current_time;
	static u64 prev_time;
	int level = gpu_dvfs_get_level(clock);

	DVFS_ASSERT(platform);

	if (prev_time == 0)
		prev_time = get_jiffies_64();

	current_time = get_jiffies_64();
	if ((level >= gpu_dvfs_get_level(platform->gpu_max_clock)) && (level <= gpu_dvfs_get_level(platform->gpu_min_clock)))
		platform->table[level].time += current_time-prev_time;

	prev_time = current_time;
#endif /* CONFIG_MALI_DEBUG_SYS */

	return 0;
}
コード例 #21
0
int gpu_dvfs_update_time_in_state(struct exynos_context *platform, int freq)
{
#ifdef CONFIG_MALI_T6XX_DEBUG_SYS
	u64 current_time;
	static u64 prev_time;
	int level = gpu_dvfs_get_level(platform, freq);

	if (prev_time == 0)
		prev_time = get_jiffies_64();

	current_time = get_jiffies_64();
	if ((level >= 0) && (level < platform->table_size))
		platform->table[level].time += current_time-prev_time;

	prev_time = current_time;
#endif /* CONFIG_MALI_T6XX_DEBUG_SYS */

	return 0;
}
コード例 #22
0
static int gpu_dvfs_governor_interactive(struct exynos_context *platform, int utilization)
{
	DVFS_ASSERT(platform);

	if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock))
			&& (utilization > platform->table[platform->step].max_threshold)) {
		int highspeed_level = gpu_dvfs_get_level(platform->interactive.highspeed_clock);
		if ((highspeed_level > 0) && (platform->step > highspeed_level)
				&& (utilization > platform->interactive.highspeed_load)) {
			if (platform->interactive.delay_count == platform->interactive.highspeed_delay) {
				platform->step = highspeed_level;
				platform->interactive.delay_count = 0;
			} else {
				platform->interactive.delay_count++;
			}
		} else {
			platform->step--;
			platform->interactive.delay_count = 0;
		}
#ifdef MALI_SEC_HWCNT
		if ((!platform->hwcnt_bt_clk) && (platform->table[platform->step].clock > platform->gpu_max_clock_limit))
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#else
		if (platform->table[platform->step].clock > platform->gpu_max_clock_limit)
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#endif
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock))
			&& (utilization < platform->table[platform->step].min_threshold)) {
		platform->interactive.delay_count = 0;
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->interactive.delay_count = 0;
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}

	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	return 0;
}
コード例 #23
0
ファイル: gpu_dvfs_api.c プロジェクト: Hani-K/trelte_5433_MM
static int gpu_update_cur_level(struct exynos_context *platform)
{
	unsigned long flags;
	int level = 0;

	DVFS_ASSERT(platform);

	level = gpu_dvfs_get_level(platform->cur_clock);
	if (level >= 0) {
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (platform->step != level)
			platform->down_requirement = platform->table[level].stay_count;
		platform->step = level;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	} else {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: invalid dvfs level returned %d\n", __func__, platform->cur_clock);
		return -1;
	}

	return 0;
}
コード例 #24
0
int gpu_set_target_clk_vol_pending(int clk)
{
	int ret = 0, target_clk = 0, target_vol = 0;
	int prev_clk = 0;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	int level = 0;
#endif

	DVFS_ASSERT(platform);

	target_clk = gpu_check_target_clock(platform, clk);
	if (target_clk < 0) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
				"%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk);
		return -1;
	}

	target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol);
	target_vol = target_vol < (int) platform->table[0].voltage ? target_vol : (int) platform->table[0].voltage;

	prev_clk = gpu_get_cur_clock(platform);
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	level = gpu_dvfs_get_level(clk);
	exynos7420_cl_dvfs_stop(ID_G3D, level);
#endif

	GPU_SET_CLK_VOL(kbdev, platform->cur_clock, target_clk, target_vol);
	ret = gpu_update_cur_level(platform);
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	if (!platform->voltage_margin && platform->cl_dvfs_start_base
			&& platform->cur_clock >= platform->cl_dvfs_start_base)
		exynos7420_cl_dvfs_start(ID_G3D);
#endif
	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "pending clk[%d -> %d], vol[%d (margin : %d)]\n",
		prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin);

	return ret;
}
コード例 #25
0
static int gpu_set_clock(struct exynos_context *platform, int clk)
{
	long g3d_rate_prev = -1;
	unsigned long g3d_rate = clk * MHZ;
	int ret = 0;
	int level = 0;

	if (aclk_g3d == 0)
		return -1;

#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_lock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */

	if (!gpu_is_power_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the power-off state!\n", __func__);
		goto err;
	}

	g3d_rate_prev = clk_get_rate(aclk_g3d);

	/* if changed the VPLL rate, set rate for VPLL and wait for lock time */
	if (g3d_rate != g3d_rate_prev) {

		ret = clk_set_parent(mout_g3d, fin_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [fin_pll]\n", __func__);
			goto err;
		}

		/*change g3d pll*/
		ret = clk_set_rate(fout_g3d_pll, g3d_rate);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [fout_g3d_pll]\n", __func__);
			goto err;
		}

		level = gpu_dvfs_get_level(g3d_rate/MHZ);
		if (level < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to gpu_dvfs_get_level \n", __func__);
			goto err;
		}

		ret = clk_set_rate(sclk_hpm_g3d, (clk_get_rate(aclk_g3d)/hpm_freq_table[level]));
		if(ret < 0)
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [sclk_hpm_g3d]\n", __func__);

		ret = clk_set_parent(mout_g3d, fout_g3d_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [fout_g3d_pll]\n", __func__);
			goto err;
		}

		g3d_rate_prev = g3d_rate;
	}

	platform->cur_clock = gpu_get_cur_clock(platform);

	if (platform->cur_clock != clk_get_rate(fout_g3d_pll)/MHZ)
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "clock value is wrong (aclk_g3d: %d, fout_g3d_pll: %d)\n",
				platform->cur_clock, (int) clk_get_rate(fout_g3d_pll)/MHZ);
	GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, g3d_rate/MHZ, platform->cur_clock,
		"clock set: %ld, clock get: %d\n", g3d_rate/MHZ, platform->cur_clock);
err:
#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_unlock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */
	return ret;
}
コード例 #26
0
int gpu_dvfs_governor_init(struct kbase_device *kbdev, int governor_type)
{
	unsigned long flags;
#ifdef CONFIG_MALI_T6XX_DVFS
	int i, total = 0;
#endif /* CONFIG_MALI_T6XX_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

#ifdef CONFIG_MALI_T6XX_DVFS
	switch (governor_type) {
	case G3D_DVFS_GOVERNOR_DEFAULT:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT);
		break;
	case G3D_DVFS_GOVERNOR_STATIC:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_static;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_STATIC);
		break;
	case G3D_DVFS_GOVERNOR_BOOSTER:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_booster;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_BOOSTER);
		break;
	default:
		GPU_LOG(DVFS_WARNING, "[gpu_dvfs_governor_init] invalid governor type\n");
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT);
		break;
	}

	platform->utilization = 100;
	platform->target_lock_type = -1;
	platform->max_lock = 0;
	platform->min_lock = 0;
#ifdef CONFIG_CPU_THERMAL_IPA
	gpu_ipa_dvfs_calc_norm_utilisation(kbdev);
#endif /* CONFIG_CPU_THERMAL_IPA */
	for (i = 0; i < NUMBER_LOCK; i++) {
		platform->user_max_lock[i] = 0;
		platform->user_min_lock[i] = 0;
	}

	platform->down_requirement = 1;
	platform->wakeup_lock = 0;

	platform->governor_type = governor_type;
	platform->governor_num = G3D_MAX_GOVERNOR_NUM;

	for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
		total += snprintf(platform->governor_list+total,
			sizeof(platform->governor_list), "[%d] %s\n", i, governor_list[i]);

	gpu_dvfs_init_time_in_state(platform);
#else
	platform->table = gpu_dvfs_infotbl_default;
	platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
	platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
	platform->step = gpu_dvfs_get_level(platform, MALI_DVFS_START_FREQ);
#endif /* CONFIG_MALI_T6XX_DVFS */

	platform->cur_clock = platform->table[platform->step].clock;

	/* asv info update */
	gpu_dvfs_update_asv_table(platform, governor_type);

	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return 1;
}
コード例 #27
0
int gpu_dvfs_clock_lock(gpu_dvfs_lock_command lock_command, gpu_dvfs_lock_type lock_type, int clock)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	int i;
	bool dirty = false;
	unsigned long flags;

	DVFS_ASSERT(platform);

	if (!platform->dvfs_status)
		return 0;

	if ((lock_type < TMU_LOCK) || (lock_type >= NUMBER_LOCK)) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid lock type is called (%d)\n", __func__, lock_type);
		return -1;
	}

	switch (lock_command) {
	case GPU_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (gpu_dvfs_get_level(clock) < 0) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "max lock error: invalid clock value %d\n", clock);
			return -1;
		}

		platform->user_max_lock[lock_type] = clock;
		platform->max_lock = clock;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = clock;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock >= platform->max_lock))
			gpu_set_target_clk_vol(platform->max_lock, false);

		GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock,
			"lock max clk[%d], user lock[%d], current clk[%d]\n",
			platform->max_lock, platform->user_max_lock[lock_type], platform->cur_clock);
		break;
	case GPU_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (gpu_dvfs_get_level(clock) < 0) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "min lock error: invalid clock value %d\n", clock);
			return -1;
		}

		platform->user_min_lock[lock_type] = clock;
		platform->min_lock = clock;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = clock;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0)&& (platform->cur_clock < platform->min_lock)
						&& (platform->min_lock <= platform->max_lock))
			gpu_set_target_clk_vol(platform->min_lock, false);

		GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock,
			"lock min clk[%d], user lock[%d], current clk[%d]\n",
			platform->min_lock, platform->user_min_lock[lock_type], platform->cur_clock);
		break;
	case GPU_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		platform->user_max_lock[lock_type] = 0;
		platform->max_lock = platform->gpu_max_clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock, "unlock max clk\n");
		break;
	case GPU_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		platform->user_min_lock[lock_type] = 0;
		platform->min_lock = platform->gpu_min_clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock, "unlock min clk\n");
		break;
	default:
		break;
	}

	return 0;
}
コード例 #28
0
int gpu_set_target_clk_vol(int clk, bool pending_is_allowed)
{
	int ret = 0, target_clk = 0, target_vol = 0;
	int prev_clk = 0;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	int level = 0;
#endif

	DVFS_ASSERT(platform);

	if (!gpu_control_is_power_on(pkbdev)) {
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock and voltage in the power-off state!\n", __func__);
		return -1;
	}

	mutex_lock(&platform->gpu_clock_lock);
#ifdef CONFIG_MALI_DVFS
	if (pending_is_allowed && platform->dvs_is_enabled) {
		if (!platform->dvfs_pending && clk < platform->cur_clock) {
			platform->dvfs_pending = clk;
			GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "pending to change the clock [%d -> %d\n", platform->cur_clock, platform->dvfs_pending);
		} else if (clk > platform->cur_clock) {
			platform->dvfs_pending = 0;
		}
		mutex_unlock(&platform->gpu_clock_lock);
		return 0;
	} else {
		platform->dvfs_pending = 0;
	}

	if (platform->dvs_is_enabled) {
		mutex_unlock(&platform->gpu_clock_lock);
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't control clock and voltage in dvs mode\n", __func__);
		return 0;
	}

#endif /* CONFIG_MALI_DVFS */

	target_clk = gpu_check_target_clock(platform, clk);
	if (target_clk < 0) {
		mutex_unlock(&platform->gpu_clock_lock);
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
				"%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk);
		return -1;
	}

	target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol);
	target_vol = target_vol < (int) platform->table[0].voltage ? target_vol : (int) platform->table[0].voltage;

	prev_clk = gpu_get_cur_clock(platform);

#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	level = gpu_dvfs_get_level(clk);
	exynos7420_cl_dvfs_stop(ID_G3D, level);
#endif

	GPU_SET_CLK_VOL(kbdev, prev_clk, target_clk, target_vol);
	ret = gpu_update_cur_level(platform);

#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	if (!platform->voltage_margin && platform->power_status
		&& platform->cl_dvfs_start_base && platform->cur_clock >= platform->cl_dvfs_start_base)
		exynos7420_cl_dvfs_start(ID_G3D);
#endif
	mutex_unlock(&platform->gpu_clock_lock);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "clk[%d -> %d], vol[%d (margin : %d)]\n",
		prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin);

	return ret;
}
コード例 #29
0
ファイル: gpu_dvfs_handler.c プロジェクト: monojo/xu3
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param)
{
	int ret = 0;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	int i;
	bool dirty = false;
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (command) {
#ifdef CONFIG_MALI_MIDGARD_DVFS
	case GPU_HANDLER_DVFS_ON:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_OFF:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_GOVERNOR_CHANGE:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		gpu_dvfs_governor_init(kbdev, param);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->min_lock >= 0) && (param < platform->min_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = param;
		platform->max_lock = param;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->max_lock > 0) && (param > platform->max_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = param;
		platform->min_lock = param;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = 0;
		platform->max_lock = platform->table[platform->table_size-1].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n");
		break;
	case GPU_HANDLER_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = 0;
		platform->min_lock = platform->table[0].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n");
		break;
	case GPU_HANDLER_INIT_TIME_IN_STATE:
		gpu_dvfs_init_time_in_state(platform);
		break;
	case GPU_HANDLER_UPDATE_TIME_IN_STATE:
		gpu_dvfs_update_time_in_state(platform, param);
		break;
	case GPU_HANDLER_DVFS_GET_LEVEL:
		ret = gpu_dvfs_get_level(platform, param);
		break;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	case GPU_HANDLER_DVFS_GET_VOLTAGE:
		ret = gpu_dvfs_get_voltage(platform, param);
		break;
	default:
		break;
	}
	return ret;
}