static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	if (sysfs_streq("0", buf))
		gpu_dvfs_on_off(false);
	else if (sysfs_streq("1", buf))
		gpu_dvfs_on_off(true);

	return count;
}
static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	unsigned int clk = 0;
	int ret, i, policy_count;
	static bool cur_state;
	const struct kbase_pm_policy *const *policy_list;
	static const struct kbase_pm_policy *prev_policy;
	static bool prev_tmu_status = true;
#ifdef CONFIG_MALI_DVFS
	static bool prev_dvfs_status = true;
#endif /* CONFIG_MALI_DVFS */
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;

	if (!platform)
		return -ENODEV;

	ret = kstrtoint(buf, 0, &clk);
	if (ret) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
		return -ENOENT;
	}

	if (!cur_state) {
		prev_tmu_status = platform->tmu_status;
#ifdef CONFIG_MALI_DVFS
		prev_dvfs_status = platform->dvfs_status;
#endif /* CONFIG_MALI_DVFS */
		prev_policy = kbase_pm_get_policy(pkbdev);
	}

	if (clk == 0) {
		kbase_pm_set_policy(pkbdev, prev_policy);
		platform->tmu_status = prev_tmu_status;
#ifdef CONFIG_MALI_DVFS
		if (!platform->dvfs_status)
			gpu_dvfs_on_off(true);
#endif /* CONFIG_MALI_DVFS */
		cur_state = false;
	} else {
		policy_count = kbase_pm_list_policies(&policy_list);
		for (i = 0; i < policy_count; i++) {
			if (sysfs_streq(policy_list[i]->name, "always_on")) {
				kbase_pm_set_policy(pkbdev, policy_list[i]);
				break;
			}
		}
		platform->tmu_status = false;
#ifdef CONFIG_MALI_DVFS
		if (platform->dvfs_status)
			gpu_dvfs_on_off(false);
#endif /* CONFIG_MALI_DVFS */
		gpu_set_target_clk_vol(clk, false);
		cur_state = true;
	}

	return count;
}
Exemple #3
0
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param)
{
	int ret = 0;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	int i;
	bool dirty = false;
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (command) {
#ifdef CONFIG_MALI_MIDGARD_DVFS
	case GPU_HANDLER_DVFS_ON:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_OFF:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_GOVERNOR_CHANGE:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		gpu_dvfs_governor_init(kbdev, param);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->min_lock >= 0) && (param < platform->min_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = param;
		platform->max_lock = param;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->max_lock > 0) && (param > platform->max_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = param;
		platform->min_lock = param;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = 0;
		platform->max_lock = platform->table[platform->table_size-1].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n");
		break;
	case GPU_HANDLER_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = 0;
		platform->min_lock = platform->table[0].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n");
		break;
	case GPU_HANDLER_INIT_TIME_IN_STATE:
		gpu_dvfs_init_time_in_state(platform);
		break;
	case GPU_HANDLER_UPDATE_TIME_IN_STATE:
		gpu_dvfs_update_time_in_state(platform, param);
		break;
	case GPU_HANDLER_DVFS_GET_LEVEL:
		ret = gpu_dvfs_get_level(platform, param);
		break;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	case GPU_HANDLER_DVFS_GET_VOLTAGE:
		ret = gpu_dvfs_get_voltage(platform, param);
		break;
	default:
		break;
	}
	return ret;
}