static int exynos_gpu_min_qos_handler(struct notifier_block *b, unsigned long val, void *v)
{
	struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
	pr_debug("%s: lock minimal gpu freq %lu\n", __func__, val);

	platform->target_lock_type = BOOST_LOCK;

	if (val)
		gpu_dvfs_handler_control(pkbdev, GPU_HANDLER_DVFS_MIN_LOCK, val);
	else
		gpu_dvfs_handler_control(pkbdev, GPU_HANDLER_DVFS_MIN_UNLOCK, val);

	return NOTIFY_OK;
}
static int gpu_set_clk_vol(struct kbase_device *kbdev, int clock, int voltage)
{
	static int prev_clock = -1;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if ((clock > platform->table[platform->table_size-1].clock) || (clock < platform->table[0].clock)) {
		GPU_LOG(DVFS_ERROR, "Mismatch clock error (%d)\n", clock);
		return -1;
	}

	if (clock > prev_clock) {
		gpu_set_voltage(platform, voltage + platform->voltage_margin);
#if SOC_NAME == 5260
		set_match_abb(ID_G3D, platform->devfreq_g3d_asv_abb[platform->step]);
#endif /* SOC_NAME */
		gpu_set_clock(platform, clock);
	} else {
		gpu_set_clock(platform, clock);
#if SOC_NAME == 5260
		set_match_abb(ID_G3D, platform->devfreq_g3d_asv_abb[platform->step]);
#endif /* SOC_NAME */
		gpu_set_voltage(platform, voltage + platform->voltage_margin);
	}
	GPU_LOG(DVFS_INFO, "[G3D] clock changed [%d -> %d]\n", prev_clock, clock);

	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, prev_clock);

	prev_clock = clock;

	return 0;
}
static ssize_t set_governor(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;
	struct exynos_context *platform;
	int ret;
	int next_governor_type;

	kbdev = dev_get_drvdata(dev);

	if (!kbdev)
		return -ENODEV;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	ret = kstrtoint(buf, 0, &next_governor_type);

	if ((next_governor_type < 0) || (next_governor_type >= platform->governor_num)) {
		GPU_LOG(DVFS_WARNING, "set_governor: invalid value\n");
		return -ENOENT;
	}

	ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GOVERNOR_CHANGE, next_governor_type);

	if (ret < 0) {
		GPU_LOG(DVFS_WARNING, "set_governor: fail to set the new governor\n");
		return -ENOENT;
	}
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not set\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;
	kbdev = dev_get_drvdata(dev);

	if (!kbdev)
		return -ENODEV;

	if (sysfs_streq("0", buf))
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_OFF, 0);
	else if (sysfs_streq("1", buf))
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_ON, 0);
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not set\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
static ssize_t set_min_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;
	int ret, clock = 0;
	struct exynos_context *platform;

	kbdev = dev_get_drvdata(dev);
	if (!kbdev)
		return -ENODEV;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (sysfs_streq("0", buf)) {
		platform->target_lock_type = SYSFS_LOCK;
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MIN_UNLOCK, 0);
	} else {
		ret = kstrtoint(buf, 0, &clock);
		if (ret) {
			GPU_LOG(DVFS_WARNING, "set_clock: invalid value\n");
			return -ENOENT;
		}

		ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, clock);
		if ((ret < 0) || (ret > platform->table_size - 1)) {
			GPU_LOG(DVFS_WARNING, "set_clock: invalid value\n");
			return -ENOENT;
		}

		if (clock == platform->table[0].clock) {
			platform->target_lock_type = SYSFS_LOCK;
			gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MIN_UNLOCK, 0);
		} else {
			platform->target_lock_type = SYSFS_LOCK;
			gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MIN_LOCK, clock);
		}
	}
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not set\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
static void gpu_tmu_normal_work(struct kbase_device *kbdev)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return;

	platform->target_lock_type = TMU_LOCK;
	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_UNLOCK, 0);
#endif /* CONFIG_MALI_T6XX_DVFS */
}
Exemplo n.º 7
0
int gpu_ipa_dvfs_max_unlock(void)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, "platform context (0x%p) is not initialized within %s\n", platform, __FUNCTION__);
		return -ENODEV;
	}

	platform->target_lock_type = IPA_LOCK;
	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_UNLOCK, 0);
	return 0;
}
static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
{
	ssize_t ret = 0;
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;
	struct exynos_context *platform;
	int i;

	kbdev = dev_get_drvdata(dev);
	if (!kbdev)
		return -ENODEV;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (gpu_control_state_set(kbdev, GPU_CONTROL_IS_POWER_ON, 0))
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
	else
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);

	for (i = platform->table_size - 1; i >= 0; i--) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
				platform->table[i].clock,
				platform->table[i].time);
	}

	if (ret >= PAGE_SIZE - 1) {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not see\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return ret;
}
static ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev;

	kbdev = dev_get_drvdata(dev);
	if (!kbdev)
		return -ENODEV;

	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_INIT_TIME_IN_STATE, 0);
#else
	GPU_LOG(DVFS_WARNING, "G3D DVFS build config is disabled. You can not set\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
static int gpu_set_clk_vol(struct kbase_device *kbdev, int clock, int voltage)
{
	static int prev_clock = -1;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if ((clock > platform->table[platform->table_size-1].clock) || (clock < platform->table[0].clock)) {
		GPU_LOG(DVFS_ERROR, "Mismatch clock error (%d)\n", clock);
		return -1;
	}

	if (platform->voltage_margin)
		voltage = MAX(voltage + platform->voltage_margin, COLD_MINIMUM_VOL);

	if (clock > prev_clock) {
		gpu_set_voltage(platform, voltage);
#ifdef CONFIG_DYNIMIC_ABB
        set_match_abb(ID_G3D, platform->devfreq_g3d_asv_abb[platform->step]);
#endif
		gpu_set_clock(platform, clock);
#if defined(CONFIG_EXYNOS5422_BTS)
		bts_scen_update(TYPE_G3D_FREQ, clock);
#endif /* CONFIG_EXYNOS5422_BTS */
	} else {
#if defined(CONFIG_EXYNOS5422_BTS)
		bts_scen_update(TYPE_G3D_FREQ, clock);
#endif /* CONFIG_EXYNOS5422_BTS */
		gpu_set_clock(platform, clock);
#ifdef CONFIG_DYNIMIC_ABB
        set_match_abb(ID_G3D, platform->devfreq_g3d_asv_abb[platform->step]);
#endif
		gpu_set_voltage(platform, voltage);
	}

	GPU_LOG(DVFS_INFO, "[G3D]clk[%d -> %d], vol[%d + %d]\n", prev_clock, clock, voltage, platform->voltage_margin);

	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, prev_clock);

	prev_clock = clock;

	return 0;
}
static int gpu_tmu_hot_check_and_work(struct kbase_device *kbdev, unsigned long event)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct exynos_context *platform;
	int lock_clock;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (event) {
	case GPU_THROTTLING1:
		lock_clock = GPU_THROTTLING_90_95;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_90_95\n");
		break;
	case GPU_THROTTLING2:
		lock_clock = GPU_THROTTLING_95_100;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_95_100\n");
		break;
	case GPU_THROTTLING3:
		lock_clock = GPU_THROTTLING_100_105;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_100_105\n");
		break;
	case GPU_THROTTLING4:
		lock_clock = GPU_THROTTLING_105_110;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_105_110\n");
		break;
	case GPU_TRIPPING:
		lock_clock = GPU_TRIPPING_110;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_110\n");
		break;
	default:
		GPU_LOG(DVFS_ERROR, "[G3D] Wrong event, %lu,  in the kbase_tmu_hot_check_and_work function\n", event);
		return 0;
	}

	platform->target_lock_type = TMU_LOCK;
	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_LOCK, lock_clock);
#endif /* CONFIG_MALI_T6XX_DVFS */
	return 0;
}
Exemplo n.º 12
0
static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct kbase_device *kbdev = dev_get_drvdata(dev);
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (sysfs_streq("0", buf)) {
		if (platform->voltage_margin != 0) {
			platform->voltage_margin = 0;
			gpu_control_state_set(kbdev, GPU_CONTROL_SET_MARGIN, 0);
		}
		platform->target_lock_type = TMU_LOCK;
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_UNLOCK, 0);
		platform->tmu_status = false;
	} else if (sysfs_streq("1", buf))
		platform->tmu_status = true;
	else
		GPU_LOG(DVFS_WARNING, "invalid val -only [0 or 1] is accepted\n");
#endif /* CONFIG_MALI_T6XX_DVFS */
	return count;
}
Exemplo n.º 13
0
static ssize_t set_highspeed_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{

	ssize_t ret = 0;
	unsigned long flags;
	int highspeed_clock = -1;
	struct kbase_device *kbdev;
	struct exynos_context *platform;

	kbdev = dev_get_drvdata(dev);
	platform = (struct exynos_context *)kbdev->platform_context;

	if (!platform)
		return -ENODEV;

	ret = kstrtoint(buf, 0, &highspeed_clock);
	if (ret) {
		GPU_LOG(DVFS_WARNING, "%s: invalid value\n", __func__);
		return -ENOENT;
	}

	ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, highspeed_clock);
	if ((ret < 0) || (ret > platform->table_size - 1)) {
		GPU_LOG(DVFS_WARNING, "%s: invalid clock value (%d)\n", __func__, highspeed_clock);
		return -ENOENT;
	}

	if (highspeed_clock > platform->max_lock)
		highspeed_clock = platform->max_lock;

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->interactive.highspeed_clock = highspeed_clock;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return count;
}
int gpu_control_state_set(struct kbase_device *kbdev, gpu_control_state state, int param)
{
	int ret = 0, voltage;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	mutex_lock(&platform->gpu_clock_lock);
	switch (state) {
	case GPU_CONTROL_CLOCK_ON:
		ret = gpu_clock_on(platform);
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_ON_POST:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
#ifdef GPU_EARLY_CLK_GATING
	case GPU_CONTROL_CLOCK_OFF_POST:
#else
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (platform->dvfs_status && kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
		gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_RESET);
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
		ret = gpu_clock_off(platform);
		break;
	case GPU_CONTROL_CHANGE_CLK_VOL:
		ret = gpu_set_clk_vol(kbdev, param, gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_VOLTAGE, param));
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (ret == 0) {
			ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, platform->cur_clock);
			if (ret >= 0) {
				spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
				platform->step = ret;
				spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			} else {
				GPU_LOG(DVFS_ERROR, "Invalid dvfs level returned [%d]\n", GPU_CONTROL_CHANGE_CLK_VOL);
			}
		}
		if (gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_SET) < -1)
			GPU_LOG(DVFS_ERROR, "failed to set the PM_QOS\n");
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_PREPARE_ON:
#ifdef CONFIG_MALI_MIDGARD_DVFS
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (platform->dvfs_status && platform->wakeup_lock)
			platform->cur_clock = MALI_DVFS_START_FREQ;

		if (platform->min_lock > 0)
			platform->cur_clock = MAX(platform->min_lock, platform->cur_clock);
		if (platform->max_lock > 0)
			platform->cur_clock = MIN(platform->max_lock, platform->cur_clock);

		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_IS_POWER_ON:
		ret = gpu_is_power_on();
		break;
	case GPU_CONTROL_SET_MARGIN:
		voltage = MAX(platform->table[platform->step].voltage + platform->voltage_margin, COLD_MINIMUM_VOL);
		gpu_set_voltage(platform, voltage);
		GPU_LOG(DVFS_DEBUG, "we set the voltage: %d\n", voltage);
		break;
	default:
		mutex_unlock(&platform->gpu_clock_lock);
		return -1;
	}
	mutex_unlock(&platform->gpu_clock_lock);

	return ret;
}