static int pm_callback_runtime_on(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	GPU_LOG(DVFS_INFO, "g3d turn on\n");
#ifdef CONFIG_MALI_EXYNOS_TRACE
	KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_GPU_ON, NULL, NULL, 0u, 0u);
#endif /* CONFIG_MALI_EXYNOS_TRACE */

#ifdef CONFIG_MALI_MIDGARD_DVFS
	gpu_control_state_set(kbdev, GPU_CONTROL_PREPARE_ON, 0);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_ON, 0);
#ifdef GPU_EARLY_CLK_GATING
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_ON_POST, 0);
#endif /* GPU_EARLY_CLK_GATING */

#if (GPU_POWERON_SRC_CLK_OSC == 0)
	gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);
#endif

	return 0;
}
Пример #2
0
static int gpu_dvfs_on_off(struct kbase_device *kbdev, bool enable)
{
	unsigned long flags;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (enable && !platform->dvfs_status) {
		platform->dvfs_status = true;
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);
		gpu_dvfs_handler_init(kbdev);
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
		}
	} else if (!enable && platform->dvfs_status) {
		platform->dvfs_status = false;
		gpu_dvfs_handler_deinit(kbdev);
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, MALI_DVFS_BL_CONFIG_FREQ);
		if (kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_cancel(&kbdev->pm.metrics.timer);
		}
	} else {
		GPU_LOG(DVFS_WARNING, "impossible state to change dvfs status (current: %d, request: %d)\n",
				platform->dvfs_status, enable);
		return -1;
	}
	return 0;
}
static void pm_callback_runtime_off(struct kbase_device *kbdev)
{
	GPU_LOG(DVFS_INFO, "g3d turn off\n");
#ifdef CONFIG_MALI_EXYNOS_TRACE
	KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_GPU_OFF, NULL, NULL, 0u, 0u);
#endif /* CONFIG_MALI_EXYNOS_TRACE */
#ifdef GPU_EARLY_CLK_GATING
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_OFF_POST, 0);
#else
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_OFF, 0);
#endif /* GPU_EARLY_CLK_GATING */
}
static int gpu_tmu_notifier(struct notifier_block *notifier,
				unsigned long event, void *v)
{
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (!platform->tmu_status)
		return NOTIFY_OK;

	platform->voltage_margin = 0;

	if (event == GPU_COLD) {
		platform->voltage_margin = VOLTAGE_OFFSET_MARGIN;
	} else if (event == GPU_NORMAL) {
		gpu_tmu_normal_work(pkbdev);
	} else if (event >= GPU_THROTTLING1 && event <= GPU_TRIPPING) {
		if (gpu_tmu_hot_check_and_work(pkbdev, event))
			GPU_LOG(DVFS_ERROR, "failed to open device");
	}
	KBASE_TRACE_ADD_EXYNOS(pkbdev, LSI_TMU_VALUE, NULL, NULL, 0u, event);

	gpu_control_state_set(pkbdev, GPU_CONTROL_SET_MARGIN, 0);

	return NOTIFY_OK;
}
static void pm_callback_runtime_off(kbase_device *kbdev)
{
	GPU_LOG(DVFS_INFO, "g3d turn off\n");
	KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_GPU_OFF, NULL, NULL, 0u, 0u);

	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_OFF, 0);
}
static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	struct kbase_device *kbdev;
	struct exynos_context *platform;
	unsigned int freq = 0;
	int ret;

	kbdev = dev_get_drvdata(dev);
	if (!kbdev)
		return -ENODEV;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	ret = kstrtoint(buf, 0, &freq);
	if (ret) {
		GPU_LOG(DVFS_WARNING, "set_clock: invalid value\n");
		return -ENOENT;
	}

	gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, freq);

	return count;
}
static int pm_callback_runtime_on(kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	GPU_LOG(DVFS_INFO, "g3d turn on\n");
	KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_GPU_ON, NULL, NULL, 0u, 0u);

#ifdef CONFIG_MALI_T6XX_DVFS
	gpu_control_state_set(kbdev, GPU_CONTROL_PREPARE_ON, 0);
#endif /* CONFIG_MALI_T6XX_DVFS */
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_ON, 0);
	gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);

	return 0;
}
static void gpu_power_off(struct kbase_device *kbdev)
{
	GPU_LOG(DVFS_INFO, "gpu_power_off\n");
	pm_schedule_suspend(kbdev->dev, RUNTIME_PM_DELAY_TIME);
#ifdef GPU_EARLY_CLK_GATING
	gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_OFF, 0);
#endif /* GPU_EARLY_CLK_GATING */
}
static int gpu_power_on_post(struct kbase_device *kbdev)
{
   struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

   GPU_LOG(DVFS_DEBUG, "power on post\n");

   gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);

   return 1;
}
Пример #10
0
static int gpu_set_target_freq(int freq)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, freq);

	return 0;
}
static int gpu_power_on(struct kbase_device *kbdev)
{
	if (pm_runtime_resume(kbdev->dev)) {
#ifdef GPU_EARLY_CLK_GATING
		GPU_LOG(DVFS_INFO, "already power on\n");
		gpu_control_state_set(kbdev, GPU_CONTROL_CLOCK_ON, 0);
#endif /* GPU_EARLY_CLK_GATING */
		return 0;
	} else {
		return 1;
	}
}
Пример #12
0
static void gpu_dvfs_event_proc(struct work_struct *q)
{
	int freq = 0;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform;

	platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return;

	mutex_lock(&platform->gpu_dvfs_handler_lock);
	if (gpu_control_state_set(kbdev, GPU_CONTROL_IS_POWER_ON, 0)) {
		freq = gpu_get_target_freq();
		gpu_set_target_freq(freq);
	}
	mutex_unlock(&platform->gpu_dvfs_handler_lock);
}
Пример #13
0
static ssize_t show_power_state(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct kbase_device *kbdev = dev_get_drvdata(dev);
	ssize_t ret = 0;

	if (gpu_control_state_set(kbdev, GPU_CONTROL_IS_POWER_ON, 0) > 0)
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "1");
	else
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "0");

	if (ret < PAGE_SIZE - 1) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
	} else {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}

	return ret;
}
Пример #14
0
static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev = dev_get_drvdata(dev);
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (sysfs_streq("0", buf)) {
		if (platform->voltage_margin != 0) {
			platform->voltage_margin = 0;
			gpu_control_state_set(kbdev, GPU_CONTROL_SET_MARGIN, 0);
		}
		platform->target_lock_type = TMU_LOCK;
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_UNLOCK, 0);
		platform->tmu_status = false;
	} else if (sysfs_streq("1", buf))
		platform->tmu_status = true;
	else
		GPU_LOG(DVFS_WARNING, "invalid val -only [0 or 1] is accepted\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
Пример #15
0
static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
{
	ssize_t ret = 0;
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;
	struct exynos_context *platform;
	int i;

	kbdev = dev_get_drvdata(dev);
	if (!kbdev)
		return -ENODEV;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (gpu_control_state_set(kbdev, GPU_CONTROL_IS_POWER_ON, 0))
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
	else
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);

	for (i = platform->table_size - 1; i >= 0; i--) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
				platform->table[i].clock,
				platform->table[i].time);
	}

	if (ret >= PAGE_SIZE - 1) {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not see\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return ret;
}
Пример #16
0
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param)
{
	int ret = 0;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	int i;
	bool dirty = false;
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (command) {
#ifdef CONFIG_MALI_MIDGARD_DVFS
	case GPU_HANDLER_DVFS_ON:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_OFF:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_GOVERNOR_CHANGE:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		gpu_dvfs_governor_init(kbdev, param);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->min_lock >= 0) && (param < platform->min_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = param;
		platform->max_lock = param;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->max_lock > 0) && (param > platform->max_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = param;
		platform->min_lock = param;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = 0;
		platform->max_lock = platform->table[platform->table_size-1].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n");
		break;
	case GPU_HANDLER_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = 0;
		platform->min_lock = platform->table[0].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n");
		break;
	case GPU_HANDLER_INIT_TIME_IN_STATE:
		gpu_dvfs_init_time_in_state(platform);
		break;
	case GPU_HANDLER_UPDATE_TIME_IN_STATE:
		gpu_dvfs_update_time_in_state(platform, param);
		break;
	case GPU_HANDLER_DVFS_GET_LEVEL:
		ret = gpu_dvfs_get_level(platform, param);
		break;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	case GPU_HANDLER_DVFS_GET_VOLTAGE:
		ret = gpu_dvfs_get_voltage(platform, param);
		break;
	default:
		break;
	}
	return ret;
}