示例#1
0
static int gpu_dvfs_governor_default(struct exynos_context *platform, int utilization)
{
	DVFS_ASSERT(platform);

	if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock)) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step--;
#ifdef MALI_SEC_HWCNT
		if ((!platform->hwcnt_bt_clk) && (platform->table[platform->step].clock > platform->gpu_max_clock_limit))
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#else
		if (platform->table[platform->step].clock > platform->gpu_max_clock_limit)
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#endif
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock)) && (utilization < platform->table[platform->step].min_threshold)) {
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}
	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	return 0;
}
static int gpu_dvfs_governor_default(struct kbase_device *kbdev, int utilization)
{
	struct exynos_context *platform;

	platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if ((platform->step < platform->table_size-1) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step++;
		platform->down_requirement = platform->table[platform->step].stay_count;
		DVFS_ASSERT(platform->step < platform->table_size);
	} else if ((platform->step > 0) && (utilization < platform->table[platform->step].min_threshold)) {
		DVFS_ASSERT(platform->step > 0);
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step--;
			platform->down_requirement = platform->table[platform->step].stay_count;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].stay_count;
	}

	return 0;
}
static void mali_dvfs_decide_next_level(mali_dvfs_status *dvfs_status)
{
	unsigned long flags;
	struct exynos_context *platform;

	platform = (struct exynos_context *)dvfs_status->kbdev->platform_context;
#ifdef MALI_DVFS_ASV_ENABLE
	if (dvfs_status->asv_status == ASV_STATUS_DISABLE_REQ) {
		dvfs_status->asv_status = mali_dvfs_update_asv(ASV_CMD_DISABLE);
	} else if (dvfs_status->asv_status == ASV_STATUS_NOT_INIT) {
		dvfs_status->asv_status = mali_dvfs_update_asv(ASV_CMD_ENABLE);
	}
#endif
	spin_lock_irqsave(&mali_dvfs_spinlock, flags);

#ifdef CONFIG_EXYNOS_THERMAL
	if (dvfs_status->step == kbase_platform_dvfs_get_level(GPU_MAX_CLK)) {
		dvfs_status->step--;
		goto skip;
	}
#endif

	if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) {
#ifdef PLATFORM_UTILIZATION
		if (dvfs_status->step == kbase_platform_dvfs_get_level(500)) {
			if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) {
				dvfs_status->step++;
				DVFS_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
			}
		} else {
#endif
			dvfs_status->step++;
			DVFS_ASSERT(dvfs_status->step < MALI_DVFS_STEP);
#ifdef PLATFORM_UTILIZATION
		}
#endif
	} else if ((dvfs_status->step > 0) &&
			(platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
			(platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
		DVFS_ASSERT(dvfs_status->step > 0);
		dvfs_status->step--;
	}
#ifdef CONFIG_EXYNOS_THERMAL
skip:
#endif
#ifdef CONFIG_MALI_T6XX_FREQ_LOCK
	if (dvfs_status->min_lock > 0) {
		if (dvfs_status->step < dvfs_status->min_lock)
			dvfs_status->step = dvfs_status->min_lock;
	}

	if ((dvfs_status->max_lock >= 0) && (dvfs_status->step > dvfs_status->max_lock)) {
		dvfs_status->step = dvfs_status->max_lock;
	}
#endif
	spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

}
int gpu_dvfs_calculate_env_data(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	static int polling_period = 0;

	DVFS_ASSERT(platform);

	gpu_dvfs_update_utilization(kbdev);

	polling_period -= platform->polling_speed;
	if (polling_period > 0)
		return 0;

	if (platform->dvs_is_enabled == true)
		return 0;

#ifdef MALI_SEC_HWCNT
	if (kbdev->hwcnt.is_hwcnt_attach == true && kbdev->hwcnt.is_hwcnt_enable == true
		&& kbdev->hwcnt.is_hwcnt_gpr_enable == false) {
		polling_period = platform->hwcnt_polling_speed;
		if (!gpu_control_is_power_on(kbdev))
			return 0;
		mutex_lock(&kbdev->hwcnt.mlock);
		if (kbdev->vendor_callbacks->hwcnt_update) {
			kbdev->vendor_callbacks->hwcnt_update(kbdev);
			dvfs_hwcnt_get_resource(kbdev);
			dvfs_hwcnt_utilization_equation(kbdev);
		}
		mutex_unlock(&kbdev->hwcnt.mlock);
	}
#endif

	return 0;
}
int proactive_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state)
{
	DVFS_ASSERT(platform);

	if (!platform->devfreq_status)
		return 0;

	switch (state) {
		case GPU_CONTROL_PM_QOS_INIT:
			pm_qos_add_request(&proactive_mif_min_qos, PM_QOS_BUS_THROUGHPUT, 0);
			pm_qos_add_request(&proactive_apollo_min_qos, PM_QOS_CLUSTER0_FREQ_MIN, 0);
			pm_qos_add_request(&proactive_atlas_min_qos, PM_QOS_CLUSTER1_FREQ_MIN, 0);
			if (!platform->pmqos_int_disable)
				pm_qos_add_request(&proactive_int_min_qos, PM_QOS_DEVICE_THROUGHPUT, 0);
			break;
		case GPU_CONTROL_PM_QOS_DEINIT:
			pm_qos_remove_request(&proactive_mif_min_qos);
			pm_qos_remove_request(&proactive_apollo_min_qos);
			pm_qos_remove_request(&proactive_atlas_min_qos);
			if (!platform->pmqos_int_disable)
				pm_qos_remove_request(&proactive_int_min_qos);
			break;
		case GPU_CONTROL_PM_QOS_RESET:
			pm_qos_update_request(&proactive_mif_min_qos, 0);
			pm_qos_update_request(&proactive_apollo_min_qos, 0);
			pm_qos_update_request(&proactive_atlas_min_qos, 0);
		default:
			break;
	}

	return 0;
}
int gpu_dvfs_on_off(bool enable)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (enable && !platform->dvfs_status) {
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_set_target_clk_vol(platform->cur_clock, false);
		gpu_dvfs_handler_init(kbdev);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);

		gpu_dvfs_timer_control(true);
	} else if (!enable && platform->dvfs_status) {
		gpu_dvfs_timer_control(false);

		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_handler_deinit(kbdev);
		gpu_set_target_clk_vol(platform->gpu_dvfs_config_clock, false);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
	} else {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: impossible state to change dvfs status (current: %d, request: %d)\n",
				__func__, platform->dvfs_status, enable);
		return -1;
	}

	return 0;
}
示例#7
0
static int gpu_dvfs_update_hwc(struct kbase_device *kbdev)
{
	unsigned long flags;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->hwcnt_gathering_status)
		return 0;

	if (!gpu_control_is_power_on(kbdev))
		return 0;

	mutex_lock(&kbdev->hwcnt.mlock);
	exynos_gpu_hwcnt_update(kbdev);
	mutex_unlock(&kbdev->hwcnt.mlock);

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->env_data.hwcnt = 0;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "Current HWC: %d\n", platform->env_data.hwcnt);

	return 0;
}
示例#8
0
static void gpu_dvfs_update_utilization(struct kbase_device *kbdev)
{
	unsigned long flags;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

#if defined(CONFIG_MALI_DVFS) && defined(CONFIG_CPU_THERMAL_IPA)
	if (platform->time_tick < platform->gpu_dvfs_time_interval) {
		platform->time_tick++;
		platform->time_busy += kbdev->pm.metrics.time_busy;
		platform->time_idle += kbdev->pm.metrics.time_idle;
	} else {
		platform->time_busy = kbdev->pm.metrics.time_busy;
		platform->time_idle = kbdev->pm.metrics.time_idle;
		platform->time_tick = 0;
	}
#endif /* CONFIG_MALI_DVFS && CONFIG_CPU_THERMAL_IPA */

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
#if SLSI_INTEGRATION  /* Temporary patch */
	platform->env_data.utilization = kbase_pm_get_dvfs_utilisation(kbdev, 0, 0);
#else
	platform->env_data.utilization = kbase_platform_dvfs_event(kbdev, 0);
#endif
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

#if defined(CONFIG_MALI_DVFS) && defined(CONFIG_CPU_THERMAL_IPA)
	gpu_ipa_dvfs_calc_norm_utilisation(kbdev);
#endif /* CONFIG_MALI_DVFS && CONFIG_CPU_THERMAL_IPA */
}
示例#9
0
static int gpu_dvfs_update_perf(struct kbase_device *kbdev)
{
	unsigned long flags;
	unsigned int pmcnt;
	u64 perfmon;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->perf_gathering_status)
		return 0;

	if (!gpu_control_is_power_on(kbdev))
		return 0;

	exynos_gpu_perf_update(&pmcnt);
	exynos_gpu_perf_reset();
	perfmon = div_u64((u64)pmcnt*1000,  platform->cur_clock);

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->env_data.perf = perfmon;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "Current PERF: %d\n", platform->env_data.perf);

	return 0;
}
示例#10
0
int gpu_dvfs_governor_init(struct kbase_device *kbdev)
{
	int governor_type = G3D_DVFS_GOVERNOR_DEFAULT;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

#ifdef CONFIG_MALI_DVFS
	governor_type = platform->governor_type;
#endif /* CONFIG_MALI_DVFS */
	if (gpu_dvfs_governor_setting(platform, governor_type) < 0) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: fail to initialize governor\n", __func__);
		return -1;
	}

	//share table_size among governors, as every single governor has same table_size.
	platform->save_cpu_max_freq = kmalloc(sizeof(int) * platform->table_size, GFP_KERNEL);
	gpu_dvfs_update_asv_table(platform);
	gpu_dvfs_decide_max_clock(platform);
#if defined(CONFIG_MALI_DVFS) && defined(CONFIG_CPU_THERMAL_IPA)
	gpu_ipa_dvfs_calc_norm_utilisation(kbdev);
#endif /* CONFIG_MALI_DVFS && CONFIG_CPU_THERMAL_IPA */

	return 0;
}
示例#11
0
static int gpu_dvfs_governor_static(struct exynos_context *platform, int utilization)
{
	static bool step_down = true;
	static int count;

	DVFS_ASSERT(platform);

	if (count == G3D_GOVERNOR_STATIC_PERIOD) {
		if (step_down) {
			if (platform->step > gpu_dvfs_get_level(platform->gpu_max_clock))
				platform->step--;
			if (((platform->max_lock > 0) && (platform->table[platform->step].clock == platform->max_lock))
					|| (platform->step == gpu_dvfs_get_level(platform->gpu_max_clock)))
				step_down = false;
		} else {
			if (platform->step < gpu_dvfs_get_level(platform->gpu_min_clock))
				platform->step++;
			if (((platform->min_lock > 0) && (platform->table[platform->step].clock == platform->min_lock))
					|| (platform->step == gpu_dvfs_get_level(platform->gpu_min_clock)))
				step_down = true;
		}

		count = 0;
	} else {
		count++;
	}

	return 0;
}
示例#12
0
int gpu_sustainable_pmqos(struct exynos_context *platform, int clock)
{
	static int full_util_count = 0;
	static int threshold_maxlock = 20;

	DVFS_ASSERT(platform);
	platform->sustainable.low_power_cluster1_clock = 0;

	if(!platform->devfreq_status)
		return 0;

	if (clock != platform->sustainable.sustainable_gpu_clock)
	{
		full_util_count = 0;
		threshold_maxlock = platform->sustainable.threshold;
		return 0;
	}

	if (platform->env_data.utilization == 100) {
		full_util_count++;

		if (full_util_count > threshold_maxlock) {
			platform->sustainable.low_power_cluster1_clock = platform->sustainable.low_power_cluster1_maxlock;
		}
	}
	else {
		full_util_count = 0;
		threshold_maxlock *= 2;
	}

	return 0;
}
static int gpu_check_target_clock(struct exynos_context *platform, int clock)
{
	int target_clock = clock;

	DVFS_ASSERT(platform);

	if (gpu_dvfs_get_level(target_clock) < 0)
		return -1;

#ifdef CONFIG_MALI_DVFS
	if (!platform->dvfs_status)
		return target_clock;

	GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "clock: %d, min: %d, max: %d\n", clock, platform->min_lock, platform->max_lock);

	if ((platform->min_lock > 0) && (platform->power_status) &&
			((target_clock < platform->min_lock) || (platform->cur_clock < platform->min_lock)))
		target_clock = platform->min_lock;

	if ((platform->max_lock > 0) && (target_clock > platform->max_lock))
		target_clock = platform->max_lock;
#endif /* CONFIG_MALI_DVFS */

	platform->step = gpu_dvfs_get_level(target_clock);

	return target_clock;
}
示例#14
0
static void gpu_dvfs_timer_control(bool enable)
{
	unsigned long flags;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->dvfs_status) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: DVFS is disabled\n", __func__);
		return;
	}

	if (kbdev->pm.metrics.timer_active && !enable) {
#if !defined(SLSI_SUBSTITUTE)
		hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
		del_timer(&kbdev->pm.metrics.tlist);
#endif /* SLSI_SUBSTITUTE */
	} else if (!kbdev->pm.metrics.timer_active && enable) {
#if !defined(SLSI_SUBSTITUTE)
		hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
		kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
		add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif /* SLSI_SUBSTITUTE */
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	}

	spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
	kbdev->pm.metrics.timer_active = enable;
	spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
}
示例#15
0
int gpu_set_target_clk_vol_pending(int clk)
{
	int ret = 0, target_clk = 0, target_vol = 0;
	int prev_clk = 0;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	target_clk = gpu_check_target_clock(platform, clk);
	if (target_clk < 0) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
				"%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk);
		return -1;
	}

	target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol);

	prev_clk = gpu_get_cur_clock(platform);

	GPU_SET_CLK_VOL(kbdev, platform->cur_clock, target_clk, target_vol);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "pending clk[%d -> %d], vol[%d (margin : %d)]\n",
		prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin);

	ret = gpu_update_cur_level(platform);

	return ret;
}
示例#16
0
int gpu_dvfs_stop_env_data_gathering(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	return 0;
}
示例#17
0
static int gpu_dvfs_governor_interactive(struct exynos_context *platform, int utilization)
{
	DVFS_ASSERT(platform);

	if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock))
			&& (utilization > platform->table[platform->step].max_threshold)) {
		int highspeed_level = gpu_dvfs_get_level(platform->interactive.highspeed_clock);
		if ((highspeed_level > 0) && (platform->step > highspeed_level)
				&& (utilization > platform->interactive.highspeed_load)) {
			if (platform->interactive.delay_count == platform->interactive.highspeed_delay) {
				platform->step = highspeed_level;
				platform->interactive.delay_count = 0;
			} else {
				platform->interactive.delay_count++;
			}
		} else {
			platform->step--;
			platform->interactive.delay_count = 0;
		}
#ifdef MALI_SEC_HWCNT
		if ((!platform->hwcnt_bt_clk) && (platform->table[platform->step].clock > platform->gpu_max_clock_limit))
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#else
		if (platform->table[platform->step].clock > platform->gpu_max_clock_limit)
			platform->step = gpu_dvfs_get_level(platform->gpu_max_clock_limit);
#endif
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock))
			&& (utilization < platform->table[platform->step].min_threshold)) {
		platform->interactive.delay_count = 0;
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->interactive.delay_count = 0;
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}

	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	return 0;
}
int gpu_dvfs_get_step(void)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	return platform->table_size;
}
static int gpu_dvfs_governor_booster(struct kbase_device *kbdev, int utilization)
{
	struct exynos_context *platform;
	static int weight;
	int cur_weight, booster_threshold, dvfs_table_lock, i;

	platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	cur_weight = platform->cur_clock*utilization;
	/* booster_threshold = current clock * set the percentage of utilization */
	booster_threshold = platform->cur_clock * 50;

	dvfs_table_lock = platform->table_size-1;
	for (i = platform->table_size-1; i >= 0; i--)
		if (platform->table[i].max_threshold == 100)
			dvfs_table_lock = i;

	if ((platform->step < dvfs_table_lock-2) &&
			((cur_weight - weight) > booster_threshold)) {
		platform->step += 2;
		platform->down_requirement = platform->table[platform->step].stay_count;
		GPU_LOG(DVFS_WARNING, "[G3D_booster] increase G3D level 2 step\n");
		DVFS_ASSERT(platform->step < platform->table_size);
	} else if ((platform->step < platform->table_size-1) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step++;
		platform->down_requirement = platform->table[platform->step].stay_count;
		DVFS_ASSERT(platform->step < platform->table_size);
	} else if ((platform->step > 0) && (utilization < platform->table[platform->step].min_threshold)) {
		DVFS_ASSERT(platform->step > 0);
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step--;
			platform->down_requirement = platform->table[platform->step].stay_count;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].stay_count;
	}
	weight = cur_weight;

	return 0;
}
int gpu_dvfs_calculate_env_data_ppmu(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	gpu_dvfs_update_perf(kbdev);

	return 0;
}
int gpu_dvfs_calculate_env_data(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	gpu_dvfs_update_utilization(kbdev);
	gpu_dvfs_update_hwc(kbdev);

	return 0;
}
示例#22
0
void gpu_dvfs_timer_control_locked(bool enable)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;

	DVFS_ASSERT(platform);

	mutex_lock(&platform->gpu_dvfs_handler_lock);
	gpu_dvfs_timer_control(enable);
	mutex_unlock(&platform->gpu_dvfs_handler_lock);
}
int gpu_int_pmqos(struct exynos_context *platform, int int_freq)
{
	DVFS_ASSERT(platform);

	if(!platform->devfreq_status)
		return 0;

	pm_qos_update_request(&proactive_int_min_qos, int_freq);

	return 0;
}
示例#24
0
int gpu_dvfs_start_env_data_gathering(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (platform->perf_gathering_status)
		exynos_gpu_perf_reset();

	return 0;
}
示例#25
0
int gpu_atlas_min_pmqos(struct exynos_context *platform, int atlas_step)
{
	DVFS_ASSERT(platform);

	if(!platform->devfreq_status)
		return 0;

	pm_qos_update_request_timeout(&proactive_atlas_min_qos, platform->atlas_table[atlas_step], 30000);

	return 0;
}
int gpu_dvfs_get_clock(int level)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if ((level < 0) || (level >= platform->table_size))
		return -1;

	return platform->table[level].clock;
}
int gpu_atlas_pmqos(struct exynos_context *platform, int atlas_freq)
{
	DVFS_ASSERT(platform);

	if(!platform->devfreq_status)
		return 0;

	//pm_qos_update_request(&proactive_atlas_min_qos, atlas_freq);
	pm_qos_update_request_timeout(&proactive_atlas_min_qos, atlas_freq, 30000);

	return 0;
}
int gpu_dvfs_get_cur_asv_abb(void)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if ((platform->step < 0) || (platform->step >= platform->table_size))
		return 0;

	return platform->table[platform->step].asv_abb;
}
示例#29
0
int gpu_dvfs_utilization_deinit(struct kbase_device *kbdev)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	exynos_gpu_perf_deinit();

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "utilization module de-initialized\n");

	return 0;
}
示例#30
0
static int gpu_dvfs_governor_booster(struct exynos_context *platform, int utilization)
{
	static int weight;
	int cur_weight, booster_threshold, dvfs_table_lock;

	DVFS_ASSERT(platform);

	cur_weight = platform->cur_clock*utilization;
	/* booster_threshold = current clock * set the percentage of utilization */
	booster_threshold = platform->cur_clock * 50;

	dvfs_table_lock = gpu_dvfs_get_level(platform->gpu_max_clock);

	if ((platform->step >= dvfs_table_lock+2) &&
			((cur_weight - weight) > booster_threshold)) {
		platform->step -= 2;
		platform->down_requirement = platform->table[platform->step].down_staycount;
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "Booster Governor: G3D level 2 step\n");
	} else if ((platform->step > gpu_dvfs_get_level(platform->gpu_max_clock)) &&
			(utilization > platform->table[platform->step].max_threshold)) {
		platform->step--;
		platform->down_requirement = platform->table[platform->step].down_staycount;
	} else if ((platform->step < gpu_dvfs_get_level(platform->gpu_min_clock)) &&
			(utilization < platform->table[platform->step].min_threshold)) {
		platform->down_requirement--;
		if (platform->down_requirement == 0) {
			platform->step++;
			platform->down_requirement = platform->table[platform->step].down_staycount;
		}
	} else {
		platform->down_requirement = platform->table[platform->step].down_staycount;
	}

	DVFS_ASSERT((platform->step >= gpu_dvfs_get_level(platform->gpu_max_clock))
					&& (platform->step <= gpu_dvfs_get_level(platform->gpu_min_clock)));

	weight = cur_weight;

	return 0;
}