Exemplo n.º 1
0
int kbase_platform_dvfs_enable(bool enable, int freq)
{
	mali_dvfs_status *dvfs_status;
	struct kbase_device *kbdev;
	unsigned long flags;
	struct rk_context *platform;

	dvfs_status = &mali_dvfs_status_current;
	kbdev = mali_dvfs_status_current.kbdev;

	BUG_ON(kbdev == NULL);
	platform = (struct rk_context *)kbdev->platform_context;

	mutex_lock(&mali_enable_clock_lock);

	if (enable != kbdev->pm.metrics.timer_active) {
		if (enable) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_TRUE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_start(&kbdev->pm.metrics.timer,
					HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY),
					HRTIMER_MODE_REL);
		} else {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_FALSE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_cancel(&kbdev->pm.metrics.timer);
		}
	}

	if (freq != MALI_DVFS_CURRENT_FREQ) {
		spin_lock_irqsave(&mali_dvfs_spinlock, flags);
		platform->time_tick = 0;
		platform->time_busy = 0;
		platform->time_idle = 0;
		platform->utilisation = 0;
		dvfs_status->step = kbase_platform_dvfs_get_level(freq);
		spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

		kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
 	}
 
	mutex_unlock(&mali_enable_clock_lock);

	return MALI_TRUE;
}
static int gpu_dvfs_on_off(struct kbase_device *kbdev, bool enable)
{
	unsigned long flags;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (enable && !platform->dvfs_status) {
		platform->dvfs_status = true;
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);
		gpu_dvfs_handler_init(kbdev);
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
	} else if (!enable && platform->dvfs_status) {
		platform->dvfs_status = false;
		gpu_dvfs_handler_deinit(kbdev);
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, MALI_DVFS_BL_CONFIG_FREQ);
		if (kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
	} else {
		GPU_LOG(DVFS_WARNING, "impossible state to change dvfs status (current: %d, request: %d)\n",
				platform->dvfs_status, enable);
		return -1;
	}
	return 0;
}
void gpu_dvfs_timer_control(bool enable)
{
	unsigned long flags;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->dvfs_status) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: DVFS is disabled\n", __func__);
		return;
	}

	if (kbdev->pm.metrics.timer_active && !enable) {
/* MALI_SEC_INTEGRATION */
#if 0
		hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
		cancel_delayed_work(platform->delayed_work);
		flush_workqueue(platform->dvfs_wq);
#endif
	} else if (!kbdev->pm.metrics.timer_active && enable) {
/* MALI_SEC_INTEGRATION */
#if 0
		hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
		queue_delayed_work_on(0, platform->dvfs_wq,
				platform->delayed_work, msecs_to_jiffies(platform->polling_speed));
#endif
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		platform->down_requirement = platform->table[platform->step].down_staycount;
		platform->interactive.delay_count = 0;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	}

	spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
	kbdev->pm.metrics.timer_active = enable;
	spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
}
int kbase_platform_dvfs_enable(struct kbase_device *kbdev, bool enable, int freq)
{
	unsigned long flags;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	if (enable != kbdev->pm.metrics.timer_active) {
		if (enable) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_TRUE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_start(&kbdev->pm.metrics.timer,
					HR_TIMER_DELAY_MSEC(kbdev->pm.platform_dvfs_frequency),
					HRTIMER_MODE_REL);
		} else {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_FALSE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_cancel(&kbdev->pm.metrics.timer);
		}
	}

	return 1;
}
int gpu_control_state_set(struct kbase_device *kbdev, gpu_control_state state, int param)
{
	int ret = 0, voltage;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	mutex_lock(&platform->gpu_clock_lock);
	switch (state) {
	case GPU_CONTROL_CLOCK_ON:
		ret = gpu_clock_on(platform);
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_ON_POST:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
#ifdef GPU_EARLY_CLK_GATING
	case GPU_CONTROL_CLOCK_OFF_POST:
#else
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (platform->dvfs_status && kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
		gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_RESET);
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
		ret = gpu_clock_off(platform);
		break;
	case GPU_CONTROL_CHANGE_CLK_VOL:
		ret = gpu_set_clk_vol(kbdev, param, gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_VOLTAGE, param));
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (ret == 0) {
			ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, platform->cur_clock);
			if (ret >= 0) {
				spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
				platform->step = ret;
				spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			} else {
				GPU_LOG(DVFS_ERROR, "Invalid dvfs level returned [%d]\n", GPU_CONTROL_CHANGE_CLK_VOL);
			}
		}
		if (gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_SET) < -1)
			GPU_LOG(DVFS_ERROR, "failed to set the PM_QOS\n");
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_PREPARE_ON:
#ifdef CONFIG_MALI_MIDGARD_DVFS
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (platform->dvfs_status && platform->wakeup_lock)
			platform->cur_clock = MALI_DVFS_START_FREQ;

		if (platform->min_lock > 0)
			platform->cur_clock = MAX(platform->min_lock, platform->cur_clock);
		if (platform->max_lock > 0)
			platform->cur_clock = MIN(platform->max_lock, platform->cur_clock);

		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_IS_POWER_ON:
		ret = gpu_is_power_on();
		break;
	case GPU_CONTROL_SET_MARGIN:
		voltage = MAX(platform->table[platform->step].voltage + platform->voltage_margin, COLD_MINIMUM_VOL);
		gpu_set_voltage(platform, voltage);
		GPU_LOG(DVFS_DEBUG, "we set the voltage: %d\n", voltage);
		break;
	default:
		mutex_unlock(&platform->gpu_clock_lock);
		return -1;
	}
	mutex_unlock(&platform->gpu_clock_lock);

	return ret;
}
Exemplo n.º 6
0
int kbase_platform_dvfs_enable(bool enable, int freq)
{
	mali_dvfs_status *dvfs_status;
	struct kbase_device *kbdev;
	unsigned long flags;
	struct exynos_context *platform;
	int mif_qos, int_qos, cpu_qos;

	dvfs_status = &mali_dvfs_status_current;
	kbdev = mali_dvfs_status_current.kbdev;

	KBASE_DEBUG_ASSERT(kbdev != NULL);
	platform = (struct exynos_context *)kbdev->platform_context;

	mutex_lock(&mali_enable_clock_lock);

	if (freq != MALI_DVFS_CURRENT_FREQ) {
		spin_lock_irqsave(&mali_dvfs_spinlock, flags);
		platform->time_tick = 0;
		platform->time_busy = 0;
		platform->time_idle = 0;
		platform->utilisation = 0;
		dvfs_status->step = kbase_platform_dvfs_get_level(freq);
		spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

		if (freq == MALI_DVFS_START_FREQ) {
			if (dvfs_status->min_lock != -1)
				dvfs_status->step = MAX(dvfs_status->min_lock, dvfs_status->step);
			if (dvfs_status->max_lock != -1)
				dvfs_status->step = MIN(dvfs_status->max_lock, dvfs_status->step);
		}

		kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
	}

	if (enable != kbdev->pm.metrics.timer_active) {
		if (enable) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_TRUE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_start(&kbdev->pm.metrics.timer,
					HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY),
					HRTIMER_MODE_REL);

			DVFS_ASSERT(dvfs_status->step >= 0);

			mif_qos = mali_dvfs_infotbl[dvfs_status->step].mem_freq;
			int_qos = mali_dvfs_infotbl[dvfs_status->step].int_freq;
			cpu_qos = mali_dvfs_infotbl[dvfs_status->step].cpu_freq;
#if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ)
			pm_qos_update_request(&exynos5_g3d_mif_qos, mif_qos);
			pm_qos_update_request(&exynos5_g3d_int_qos, int_qos);
			pm_qos_update_request(&exynos5_g3d_cpu_qos, cpu_qos);
#endif
		} else {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = MALI_FALSE;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ)
			pm_qos_update_request(&exynos5_g3d_mif_qos, 0);
			pm_qos_update_request(&exynos5_g3d_int_qos, 0);
			pm_qos_update_request(&exynos5_g3d_cpu_qos, 0);
#endif
		}
	}
	mutex_unlock(&mali_enable_clock_lock);

	return MALI_TRUE;
}