コード例 #1
0
void gpu_ipa_dvfs_calc_norm_utilisation(struct kbase_device *kbdev)
{
	int cur_freq;
	unsigned int cur_vol;
	int max_freq;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	int cur_utilisation;
	gpu_dvfs_info *dvfs_cur_info;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context (0x%p) is not initialized\n", __func__, platform);
		return ;
	}

	/* TODO:
	 * - Other callers of kbase_platform_dvfs_set_level()
	 */

	/* Get Current Op point */
	/* This is before mali_dvfs_event_proc queued, so the dvfs 'step' is taken before we change frequency */
	cur_utilisation = platform->env_data.utilization;
	dvfs_cur_info = &(platform->table[platform->step]); /* dvfs_status under spinlock */
	cur_freq = (int)dvfs_cur_info->clock;

	cur_vol = dvfs_cur_info->voltage/10000;
	/* Get Max Op point */
	max_freq = gpu_ipa_dvfs_max_allowed_freq(kbdev);

	/* Calculate */
	platform->norm_utilisation = (cur_utilisation * cur_freq)/max_freq;
	/* Store what frequency was used for normalization */
	platform->freq_for_normalisation = cur_freq;
	platform->power = div_u64((u64)platform->ipa_power_coeff_gpu * cur_freq * cur_vol * cur_vol, 100000);

	gpu_ipa_trace_utilisation(kbdev);
}
コード例 #2
0
static ssize_t show_utilization_stats(struct device *dev, struct device_attribute *attr, char *buf)
{
	ssize_t ret = 0;
#ifdef CONFIG_EXYNOS_THERMAL
	struct kbase_device *kbdev;
	struct mali_debug_utilisation_stats stats;

	kbdev = dev_get_drvdata(dev);

	if (!kbdev)
		return -ENODEV;

#ifdef CONFIG_MALI_T6XX_DVFS
	gpu_ipa_dvfs_get_utilisation_stats(&stats);

	ret += snprintf(buf+ret, PAGE_SIZE-ret, "util=%d norm_util=%d norm_freq=%d time_busy=%u time_idle=%u time_tick=%d",
					stats.s.utilisation, stats.s.norm_utilisation,
					stats.s.freq_for_norm, stats.time_busy, stats.time_idle,
					stats.time_tick);
#else
	ret += snprintf(buf+ret, PAGE_SIZE-ret, "-1");
#endif

	if (ret < PAGE_SIZE - 1) {
		ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
	} else {
		buf[PAGE_SIZE-2] = '\n';
		buf[PAGE_SIZE-1] = '\0';
		ret = PAGE_SIZE-1;
	}
#else
	GPU_LOG(DVFS_WARNING, "EXYNOS THERMAL build config is disabled. You can not set\n");
#endif

	return ret;
}
コード例 #3
0
static int gpu_tmu_hot_check_and_work(struct kbase_device *kbdev, unsigned long event)
{
#ifdef CONFIG_MALI_T6XX_DVFS
	struct exynos_context *platform;
	int lock_clock;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (event) {
	case GPU_THROTTLING1:
		lock_clock = GPU_THROTTLING_90_95;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_90_95\n");
		break;
	case GPU_THROTTLING2:
		lock_clock = GPU_THROTTLING_95_100;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_95_100\n");
		break;
	case GPU_THROTTLING3:
		lock_clock = GPU_THROTTLING_100_105;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_100_105\n");
		break;
	case GPU_THROTTLING4:
		lock_clock = GPU_THROTTLING_105_110;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_105_110\n");
		break;
	case GPU_TRIPPING:
		lock_clock = GPU_TRIPPING_110;
		GPU_LOG(DVFS_INFO, "[G3D] GPU_THROTTLING_110\n");
		break;
	default:
		GPU_LOG(DVFS_ERROR, "[G3D] Wrong event, %lu,  in the kbase_tmu_hot_check_and_work function\n", event);
		return 0;
	}

	platform->target_lock_type = TMU_LOCK;
	gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_MAX_LOCK, lock_clock);
#endif /* CONFIG_MALI_T6XX_DVFS */
	return 0;
}
コード例 #4
0
static int gpu_tmu_hot_check_and_work(struct kbase_device *kbdev, unsigned long event)
{
#ifdef CONFIG_MALI_DVFS
	struct exynos_context *platform;
	int lock_clock;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (event) {
	case GPU_THROTTLING1:
		lock_clock = platform->tmu_lock_clk[THROTTLING1];
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "THROTTLING1\n");
		break;
	case GPU_THROTTLING2:
		lock_clock = platform->tmu_lock_clk[THROTTLING2];
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "THROTTLING2\n");
		break;
	case GPU_THROTTLING3:
		lock_clock = platform->tmu_lock_clk[THROTTLING3];
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "THROTTLING3\n");
		break;
	case GPU_THROTTLING4:
		lock_clock = platform->tmu_lock_clk[THROTTLING4];
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "THROTTLING4\n");
		break;
	case GPU_TRIPPING:
		lock_clock = platform->tmu_lock_clk[TRIPPING];
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "TRIPPING\n");
		break;
	default:
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: wrong event, %lu\n", __func__, event);
		return 0;
	}

	gpu_dvfs_clock_lock(GPU_DVFS_MAX_LOCK, TMU_LOCK, lock_clock);
#endif /* CONFIG_MALI_DVFS */
	return 0;
}
コード例 #5
0
static int gpu_set_clock(struct exynos_context *platform, int clk)
{
	long g3d_rate_prev = -1;
	unsigned long g3d_rate = clk * MHZ;
	int ret = 0;

	if (aclk_g3d == 0)
		return -1;

#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_lock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */

	if (!gpu_is_power_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the power-off state!\n", __func__);
		goto err;
	}

	if (!gpu_is_clock_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the clock-off state! %d\n", __func__, __raw_readl(EXYNOS5430_ENABLE_ACLK_G3D));
		goto err;
	}

	g3d_rate_prev = clk_get_rate(aclk_g3d);

	/* if changed the VPLL rate, set rate for VPLL and wait for lock time */
	if (g3d_rate != g3d_rate_prev) {
		/*change here for future stable clock changing*/
		ret = clk_set_parent(mout_g3d_pll, fin_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}

		if (g3d_rate_prev != GPU_OSC_CLK)
			sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
				"[GPU] %7d <= %7d", g3d_rate / 1000, g3d_rate_prev / 1000);

		/*change g3d pll*/
		ret = clk_set_rate(fout_g3d_pll, g3d_rate);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [fout_g3d_pll]\n", __func__);
			goto err;
		}

		/*restore parent*/
		ret = clk_set_parent(mout_g3d_pll, fout_g3d_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}
	}

	platform->cur_clock = gpu_get_cur_clock(platform);

	if (platform->cur_clock != clk_get_rate(fout_g3d_pll)/MHZ)
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "clock value is wrong (aclk_g3d: %d, fout_g3d_pll: %d)\n",
				platform->cur_clock, (int) clk_get_rate(fout_g3d_pll)/MHZ);

	if (g3d_rate != g3d_rate_prev)
		GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, g3d_rate/MHZ, platform->cur_clock, "clock set: %d, clock get: %d\n", (int) g3d_rate/MHZ, platform->cur_clock);
err:
#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_unlock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */
	return ret;
}
コード例 #6
0
int gpu_register_dump(void)
{
	if (gpu_is_power_on()) {
		/* G3D PMU */
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x105C4064, __raw_readl(EXYNOS5430_G3D_STATUS),
							"REG_DUMP: EXYNOS5430_G3D_STATUS %x\n", __raw_readl(EXYNOS5430_G3D_STATUS));
		/* G3D PLL */
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0000, __raw_readl(EXYNOS5430_G3D_PLL_LOCK),
							"REG_DUMP: EXYNOS5430_G3D_PLL_LOCK %x\n", __raw_readl(EXYNOS5430_G3D_PLL_LOCK));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0100, __raw_readl(EXYNOS5430_G3D_PLL_CON0),
							"REG_DUMP: EXYNOS5430_G3D_PLL_CON0 %x\n", __raw_readl(EXYNOS5430_G3D_PLL_CON0));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0104, __raw_readl(EXYNOS5430_G3D_PLL_CON1),
							"REG_DUMP: EXYNOS5430_G3D_PLL_CON1 %x\n", __raw_readl(EXYNOS5430_G3D_PLL_CON1));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA010c, __raw_readl(EXYNOS5430_G3D_PLL_FREQ_DET),
							"REG_DUMP: EXYNOS5430_G3D_PLL_FREQ_DET %x\n", __raw_readl(EXYNOS5430_G3D_PLL_FREQ_DET));

		/* G3D SRC */
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0200, __raw_readl(EXYNOS5430_SRC_SEL_G3D),
							"REG_DUMP: EXYNOS5430_SRC_SEL_G3D %x\n", __raw_readl(EXYNOS5430_SRC_SEL_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0300, __raw_readl(EXYNOS5430_SRC_ENABLE_G3D),
							"REG_DUMP: EXYNOS5430_SRC_ENABLE_G3D %x\n", __raw_readl(EXYNOS5430_SRC_ENABLE_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0400, __raw_readl(EXYNOS5430_SRC_STAT_G3D),
							"REG_DUMP: EXYNOS5430_SRC_STAT_G3D %x\n", __raw_readl(EXYNOS5430_SRC_STAT_G3D));

		/* G3D DIV */
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0600, __raw_readl(EXYNOS5430_DIV_G3D),
							"REG_DUMP: EXYNOS5430_DIV_G3D %x\n", __raw_readl(EXYNOS5430_DIV_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0604, __raw_readl(EXYNOS5430_DIV_G3D_PLL_FREQ_DET),
							"REG_DUMP: EXYNOS5430_DIV_G3D_PLL_FREQ_DET %x\n", __raw_readl(EXYNOS5430_DIV_G3D_PLL_FREQ_DET));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0700, __raw_readl(EXYNOS5430_DIV_STAT_G3D),
							"REG_DUMP: EXYNOS5430_DIV_STAT_G3D %x\n", __raw_readl(EXYNOS5430_DIV_STAT_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0704, __raw_readl(EXYNOS5430_DIV_STAT_G3D_PLL_FREQ_DET),
							"REG_DUMP: EXYNOS5430_DIV_STAT_G3D_PLL_FREQ_DET %x\n", __raw_readl(EXYNOS5430_DIV_STAT_G3D_PLL_FREQ_DET));

		/* G3D ENABLE */
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0800, __raw_readl(EXYNOS5430_ENABLE_ACLK_G3D),
							"REG_DUMP: EXYNOS5430_ENABLE_ACLK_G3D %x\n", __raw_readl(EXYNOS5430_ENABLE_ACLK_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0900, __raw_readl(EXYNOS5430_ENABLE_PCLK_G3D),
							"REG_DUMP: EXYNOS5430_ENABLE_PCLK_G3D %x\n", __raw_readl(EXYNOS5430_ENABLE_PCLK_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0A00, __raw_readl(EXYNOS5430_ENABLE_SCLK_G3D),
							"REG_DUMP: EXYNOS5430_ENABLE_SCLK_G3D %x\n", __raw_readl(EXYNOS5430_ENABLE_SCLK_G3D));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0B00, __raw_readl(EXYNOS5430_ENABLE_IP_G3D0),
							"REG_DUMP: EXYNOS5430_ENABLE_IP_G3D0 %x\n", __raw_readl(EXYNOS5430_ENABLE_IP_G3D0));
		GPU_LOG(DVFS_DEBUG, LSI_REGISTER_DUMP, 0x14AA0B0A, __raw_readl(EXYNOS5430_ENABLE_IP_G3D1),
							"REG_DUMP: EXYNOS5430_ENABLE_IP_G3D1 %x\n", __raw_readl(EXYNOS5430_ENABLE_IP_G3D1));
	}

	return 0;
}
コード例 #7
0
int gpu_control_state_set(struct kbase_device *kbdev, gpu_control_state state, int param)
{
	int ret = 0, voltage;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	mutex_lock(&platform->gpu_clock_lock);
	switch (state) {
	case GPU_CONTROL_CLOCK_ON:
		ret = gpu_clock_on(platform);
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_ON_POST:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
#ifdef GPU_EARLY_CLK_GATING
	case GPU_CONTROL_CLOCK_OFF_POST:
#else
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (platform->dvfs_status && kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
		gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_RESET);
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
		ret = gpu_clock_off(platform);
		break;
	case GPU_CONTROL_CHANGE_CLK_VOL:
		ret = gpu_set_clk_vol(kbdev, param, gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_VOLTAGE, param));
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (ret == 0) {
			ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, platform->cur_clock);
			if (ret >= 0) {
				spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
				platform->step = ret;
				spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			} else {
				GPU_LOG(DVFS_ERROR, "Invalid dvfs level returned [%d]\n", GPU_CONTROL_CHANGE_CLK_VOL);
			}
		}
		if (gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_SET) < -1)
			GPU_LOG(DVFS_ERROR, "failed to set the PM_QOS\n");
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_PREPARE_ON:
#ifdef CONFIG_MALI_MIDGARD_DVFS
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (platform->dvfs_status && platform->wakeup_lock)
			platform->cur_clock = MALI_DVFS_START_FREQ;

		if (platform->min_lock > 0)
			platform->cur_clock = MAX(platform->min_lock, platform->cur_clock);
		if (platform->max_lock > 0)
			platform->cur_clock = MIN(platform->max_lock, platform->cur_clock);

		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_IS_POWER_ON:
		ret = gpu_is_power_on();
		break;
	case GPU_CONTROL_SET_MARGIN:
		voltage = MAX(platform->table[platform->step].voltage + platform->voltage_margin, COLD_MINIMUM_VOL);
		gpu_set_voltage(platform, voltage);
		GPU_LOG(DVFS_DEBUG, "we set the voltage: %d\n", voltage);
		break;
	default:
		mutex_unlock(&platform->gpu_clock_lock);
		return -1;
	}
	mutex_unlock(&platform->gpu_clock_lock);

	return ret;
}
コード例 #8
0
int gpu_dvfs_governor_init(struct kbase_device *kbdev, int governor_type)
{
	unsigned long flags;
#ifdef CONFIG_MALI_T6XX_DVFS
	int i, total = 0;
#endif /* CONFIG_MALI_T6XX_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

#ifdef CONFIG_MALI_T6XX_DVFS
	switch (governor_type) {
	case G3D_DVFS_GOVERNOR_DEFAULT:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT);
		break;
	case G3D_DVFS_GOVERNOR_STATIC:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_static;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_STATIC);
		break;
	case G3D_DVFS_GOVERNOR_BOOSTER:
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_booster;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_BOOSTER);
		break;
	default:
		GPU_LOG(DVFS_WARNING, "[gpu_dvfs_governor_init] invalid governor type\n");
		gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default;
		platform->table = gpu_dvfs_infotbl_default;
		platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
		platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
		platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT);
		break;
	}

	platform->utilization = 100;
	platform->target_lock_type = -1;
	platform->max_lock = 0;
	platform->min_lock = 0;
#ifdef CONFIG_CPU_THERMAL_IPA
	gpu_ipa_dvfs_calc_norm_utilisation(kbdev);
#endif /* CONFIG_CPU_THERMAL_IPA */
	for (i = 0; i < NUMBER_LOCK; i++) {
		platform->user_max_lock[i] = 0;
		platform->user_min_lock[i] = 0;
	}

	platform->down_requirement = 1;
	platform->wakeup_lock = 0;

	platform->governor_type = governor_type;
	platform->governor_num = G3D_MAX_GOVERNOR_NUM;

	for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
		total += snprintf(platform->governor_list+total,
			sizeof(platform->governor_list), "[%d] %s\n", i, governor_list[i]);

	gpu_dvfs_init_time_in_state(platform);
#else
	platform->table = gpu_dvfs_infotbl_default;
	platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default);
#if SOC_NAME == 5260
	platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default;
#endif /* SOC_NAME */
	platform->step = gpu_dvfs_get_level(platform, MALI_DVFS_START_FREQ);
#endif /* CONFIG_MALI_T6XX_DVFS */

	platform->cur_clock = platform->table[platform->step].clock;

	/* asv info update */
	gpu_dvfs_update_asv_table(platform, governor_type);

	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	return 1;
}
コード例 #9
0
int gpu_create_sysfs_file(struct device *dev)
{
	if (device_create_file(dev, &dev_attr_clock)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [clock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_vol)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [vol]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_power_state)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [power_state]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_asv_table)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [asv_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_volt_table)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [volt_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_table)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_time_in_state)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [time_in_state]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_utilization)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_perf)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [perf]\n");
		goto out;
	}
#ifdef CONFIG_MALI_DVFS
	if (device_create_file(dev, &dev_attr_dvfs)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_governor)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_governor]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_max_lock_status)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock_status]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_min_lock_status)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock_status]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_max_lock)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_min_lock)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_down_staycount)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [down_staycount]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_clock)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_clock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_load)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_load]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_delay)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_delay]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_wakeup_lock)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [wakeup_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_polling_speed)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [polling_speed]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_tmu)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [tmu]\n");
		goto out;
	}
#ifdef CONFIG_CPU_THERMAL_IPA
	if (device_create_file(dev, &dev_attr_norm_utilization)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [norm_utilization]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_utilization_stats)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization_stats]\n");
		goto out;
	}
#endif /* CONFIG_CPU_THERMAL_IPA */
#endif /* CONFIG_MALI_DVFS */
	if (device_create_file(dev, &dev_attr_debug_level)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [debug_level]\n");
		goto out;
	}
#ifdef CONFIG_MALI_EXYNOS_TRACE
	if (device_create_file(dev, &dev_attr_trace_level)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_level]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_trace_dump)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_dump]\n");
		goto out;
	}
#endif /* CONFIG_MALI_EXYNOS_TRACE */
#ifdef DEBUG_FBDEV
	if (device_create_file(dev, &dev_attr_fbdev)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [fbdev]\n");
		goto out;
	}
#endif

#ifdef MALI_SEC_HWCNT
	if (device_create_file(dev, &dev_attr_hwcnt_dvfs)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,"Couldn't create sysfs file [hwcnt_dvfs]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_hwcnt_gpr)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "Couldn't create sysfs file [hwcnt_gpr]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_hwcnt_bt_state)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [hwcnt_bt_state]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_hwcnt_tripipe)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [hwcnt_tripipe]\n");
		goto out;
	}
#endif

	if (device_create_file(dev, &dev_attr_gpu_status)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [gpu_status]\n");
		goto out;
	}

	return 0;
out:
	return -ENOENT;
}
コード例 #10
0
int gpu_create_sysfs_file(struct device *dev)
{
	if (device_create_file(dev, &dev_attr_clock)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [clock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_fbdev)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [fbdev]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_vol)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [vol]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [dvfs]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_max_lock)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [dvfs_max_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_min_lock)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [dvfs_min_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_time_in_state)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [time_in_state]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_tmu)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [tmu]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_utilization)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [utilization]\n");
		goto out;
	}
#ifdef CONFIG_CPU_THERMAL_IPA
	if (device_create_file(dev, &dev_attr_norm_utilization)) {
		dev_err(dev, "Couldn't create sysfs file [norm_utilization]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_utilization_stats)) {
		dev_err(dev, "Couldn't create sysfs file [utilization_stats]\n");
		goto out;
	}
#endif /* CONFIG_CPU_THERMAL_IPA */
	if (device_create_file(dev, &dev_attr_asv_table)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [asv_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_volt_table)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [volt_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_table)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [dvfs_table]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_power_state)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [power_state]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_dvfs_governor)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [dvfs_governor]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_clock)) {
		GPU_LOG(DVFS_ERROR, "couldn't create sysfs file [highspeed_clock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_load)) {
		GPU_LOG(DVFS_ERROR, "couldn't create sysfs file [highspeed_load]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_highspeed_delay)) {
		GPU_LOG(DVFS_ERROR, "couldn't create sysfs file [highspeed_delay]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_wakeup_lock)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [wakeup_lock]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_debug_level)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [debug_level]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_polling_speed)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [polling_speed]\n");
		goto out;
	}

#ifdef CONFIG_MALI_HWCNT_UTIL
	if (device_create_file(dev, &dev_attr_hwcnt_dvfs)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [hwcnt_dvfs]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_hwcnt_gpr)) {
		GPU_LOG(DVFS_ERROR, "Couldn't create sysfs file [hwcnt_gpr]\n");
		goto out;
	}

	if (device_create_file(dev, &dev_attr_hwcnt_bt_state)) {
		GPU_LOG(DVFS_ERROR, "couldn't create sysfs file [hwcnt_bt_state]\n");
		goto out;
	}
#endif

	return 0;
out:
	return -ENOENT;
}
コード例 #11
0
ファイル: gpu_balance.c プロジェクト: ShedrockN4/wiliteneo
bool balance_init(struct kbase_device *kbdev)
{
	mali_bool ret = MALI_TRUE;

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "power initialized\n");

	/* Mali Soft reset */
	MALI_WRITE_REG((GPU_IRQ_CLEAR        ), 0x00030781);
	MALI_WRITE_REG((GPU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((GPU_COMMAND          ), 0x00000001);

	/* CHECK RESET_COMPLETE @ 0x14AC0020 (GPU_IRQ_RAWSTAT) -> [8]:RESET_COMPLETED  */
	MALI_GPU_CONTROL_WAIT(GPU_IRQ_RAWSTAT, 0x100, 100);

	/* Cache clean & invalidate */
	MALI_WRITE_REG((GPU_COMMAND          ), 0x00000008);

	/* CHECK CLEAN_CACHES_COMPLETED @ 0x14AC0020 (GPU_IRQ_RAWSTAT) -> [17]:CLEAN_CACHES_COMPLETED */
	MALI_GPU_CONTROL_WAIT(GPU_IRQ_RAWSTAT, 0x20000, 100);

	MALI_WRITE_REG((MMU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((SHADER_PWRON_LO      ), 0xFFFFFFFF);
	MALI_WRITE_REG((SHADER_PWRON_HI      ), 0xFFFFFFFF);
	MALI_WRITE_REG((TILER_PWRON_LO       ), 0xFFFFFFFF);
	MALI_WRITE_REG((TILER_PWRON_HI       ), 0xFFFFFFFF);
	MALI_WRITE_REG((L2_PWRON_LO          ), 0xFFFFFFFF);
	MALI_WRITE_REG((L2_PWRON_HI          ), 0xFFFFFFFF);

	/* power check  */
	MALI_GPU_CONTROL_WAIT(SHADER_READY_LO, 0xff, 50);
	MALI_GPU_CONTROL_WAIT(TILER_READY_LO, 0x1, 50);
	MALI_GPU_CONTROL_WAIT(L2_READY_LO, 0x1, 50);

	MALI_WRITE_REG((AS3_MEMATTR_HI       ), 0x88888888);
	MALI_WRITE_REG((AS3_MEMATTR_LO       ), 0x88888888);
	MALI_WRITE_REG((AS3_TRANSTAB_LO      ), 0xF2A26007);
	MALI_WRITE_REG((AS3_TRANSTAB_HI      ), 0x00000000);
	MALI_WRITE_REG((AS3_COMMAND          ), 0x00000001);

	MALI_WRITE_REG((GPU_IRQ_CLEAR        ), 0x00000081);
	MALI_WRITE_REG((GPU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0xFFFFFFFF);
	MALI_WRITE_REG((JOB_IRQ_MASK         ), 0x00000000);

	MALI_WRITE_REG((JS1_HEAD_NEXT_LO     ), 0xE402F740);
	MALI_WRITE_REG((JS1_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_CONFIG_NEXT      ), 0x00083703);
	MALI_WRITE_REG((JS1_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000002, 150);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000002);

	/* Mali Soft reset */
	MALI_WRITE_REG((GPU_IRQ_CLEAR        ), 0x00030781);
	MALI_WRITE_REG((GPU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((GPU_COMMAND          ), 0x00000001);

	/* CHECK RESET_COMPLETE @ 0x14AC0020 (GPU_IRQ_RAWSTAT) -> [8]:RESET_COMPLETED  */
	MALI_GPU_CONTROL_WAIT(GPU_IRQ_RAWSTAT, 0x100, 100);

	/* Cache clean & invalidate */
	MALI_WRITE_REG((GPU_COMMAND          ), 0x00000008);

	/* CHECK CLEAN_CACHES_COMPLETED @ 0x14AC0020 (GPU_IRQ_RAWSTAT) -> [17]:CLEAN_CACHES_COMPLETED */
	MALI_GPU_CONTROL_WAIT(GPU_IRQ_RAWSTAT, 0x20000, 100);

	MALI_WRITE_REG((MMU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((SHADER_PWRON_LO      ), 0xFFFFFFFF);
	MALI_WRITE_REG((SHADER_PWRON_HI      ), 0xFFFFFFFF);
	MALI_WRITE_REG((TILER_PWRON_LO       ), 0xFFFFFFFF);
	MALI_WRITE_REG((TILER_PWRON_HI       ), 0xFFFFFFFF);
	MALI_WRITE_REG((L2_PWRON_LO          ), 0xFFFFFFFF);
	MALI_WRITE_REG((L2_PWRON_HI          ), 0xFFFFFFFF);

	/* power check  */
	MALI_GPU_CONTROL_WAIT(SHADER_READY_LO, 0xff, 50);
	MALI_GPU_CONTROL_WAIT(TILER_READY_LO, 0x1, 50);
	MALI_GPU_CONTROL_WAIT(L2_READY_LO, 0x1, 50);

	MALI_WRITE_REG((AS0_MEMATTR_HI       ), 0x88888888);
	MALI_WRITE_REG((AS0_MEMATTR_LO       ), 0x88888888);
	MALI_WRITE_REG((AS0_TRANSTAB_LO      ), 0xF2A00007);
	MALI_WRITE_REG((AS0_TRANSTAB_HI      ), 0x00000000);
	MALI_WRITE_REG((AS0_COMMAND          ), 0x00000001);

	MALI_WRITE_REG((AS1_MEMATTR_HI       ), 0x88888888);
	MALI_WRITE_REG((AS1_MEMATTR_LO       ), 0x88888888);
	MALI_WRITE_REG((AS1_TRANSTAB_LO      ), 0xF2A0F007);
	MALI_WRITE_REG((AS1_TRANSTAB_HI      ), 0x00000000);
	MALI_WRITE_REG((AS1_COMMAND          ), 0x00000001);

	MALI_WRITE_REG((AS2_MEMATTR_HI       ), 0x88888888);
	MALI_WRITE_REG((AS2_MEMATTR_LO       ), 0x88888888);
	MALI_WRITE_REG((AS2_TRANSTAB_LO      ), 0xF2A1E007);
	MALI_WRITE_REG((AS2_TRANSTAB_HI      ), 0x00000000);
	MALI_WRITE_REG((AS2_COMMAND          ), 0x00000001);

	MALI_WRITE_REG((GPU_IRQ_CLEAR        ), 0x00000081);
	MALI_WRITE_REG((GPU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0xFFFFFFFF);
	MALI_WRITE_REG((JOB_IRQ_MASK         ), 0x00000000);

	MALI_WRITE_REG((JS1_HEAD_NEXT_LO     ), 0xE402F740);
	MALI_WRITE_REG((JS1_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_CONFIG_NEXT      ), 0x00083702);
	MALI_WRITE_REG((JS1_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000002, 150);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000002);

	MALI_WRITE_REG((JS2_HEAD_NEXT_LO     ), 0x01000000);
	MALI_WRITE_REG((JS2_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS2_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS2_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS2_CONFIG_NEXT      ), 0x00080700);
	MALI_WRITE_REG((JS2_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000004, 52);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000004);

	MALI_WRITE_REG((JS1_HEAD_NEXT_LO     ), 0xE4036D00);
	MALI_WRITE_REG((JS1_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_CONFIG_NEXT      ), 0x00083701);
	MALI_WRITE_REG((JS1_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000002, 450);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000002);

	MALI_WRITE_REG((JS0_HEAD_NEXT_LO     ), 0xD8C0D3C0);
	MALI_WRITE_REG((JS0_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS0_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS0_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS0_CONFIG_NEXT      ), 0x00083F00);
	MALI_WRITE_REG((JS0_COMMAND_NEXT     ), 0x00000001);

	MALI_WRITE_REG((JS1_HEAD_NEXT_LO     ), 0xD8C0D340);
	MALI_WRITE_REG((JS1_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_CONFIG_NEXT      ), 0x00083F01);
	MALI_WRITE_REG((JS1_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000002, 220);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000002);

	MALI_JOB_IRQ_WAIT(0x00000001, 15);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000001);

	MALI_WRITE_REG((JS0_HEAD_NEXT_LO     ), 0xE4036D80);
	MALI_WRITE_REG((JS0_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS0_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS0_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS0_CONFIG_NEXT      ), 0x00083700);
	MALI_WRITE_REG((JS0_COMMAND_NEXT     ), 0x00000001);

	MALI_WRITE_REG((JS2_HEAD_NEXT_LO     ), 0x01000080);
	MALI_WRITE_REG((JS2_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS2_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS2_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS2_CONFIG_NEXT      ), 0x00080000);
	MALI_WRITE_REG((JS2_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000005, 380);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000005);

	MALI_WRITE_REG((JS1_HEAD_NEXT_LO     ), 0x010000C0);
	MALI_WRITE_REG((JS1_HEAD_NEXT_HI     ), 0x00000000);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_LO ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_AFFINITY_NEXT_HI ), 0xFFFFFFFF);
	MALI_WRITE_REG((JS1_CONFIG_NEXT      ), 0x00083000);
	MALI_WRITE_REG((JS1_COMMAND_NEXT     ), 0x00000001);

	MALI_JOB_IRQ_WAIT(0x00000002, 50);
	MALI_WRITE_REG((JOB_IRQ_CLEAR        ), 0x00000002);

	if(test_checksum_all() != 0)
	{
		KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_CHECKSUM, NULL, NULL, 0u, MALI_FALSE);
		ret = MALI_FALSE;
	}

	/* Mali reset */
	MALI_WRITE_REG((GPU_IRQ_CLEAR        ), 0x00030781);
	MALI_WRITE_REG((GPU_IRQ_MASK         ), 0x00000000);
	MALI_WRITE_REG((GPU_COMMAND          ), 0x00000001);

	/* CHECK RESET_COMPLETE @ 0x14AC0020 (GPU_IRQ_RAWSTAT) -> [8]:RESET_COMPLETED  */
	MALI_GPU_CONTROL_WAIT(GPU_IRQ_RAWSTAT, 0x100, 100);

	return ret;
}
コード例 #12
0
int gpu_set_target_clk_vol(int clk, bool pending_is_allowed)
{
	int ret = 0, target_clk = 0, target_vol = 0;
	int prev_clk = 0;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	int level = 0;
#endif

	DVFS_ASSERT(platform);

	if (!gpu_control_is_power_on(pkbdev)) {
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock and voltage in the power-off state!\n", __func__);
		return -1;
	}

	mutex_lock(&platform->gpu_clock_lock);
#ifdef CONFIG_MALI_DVFS
	if (pending_is_allowed && platform->dvs_is_enabled) {
		if (!platform->dvfs_pending && clk < platform->cur_clock) {
			platform->dvfs_pending = clk;
			GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "pending to change the clock [%d -> %d\n", platform->cur_clock, platform->dvfs_pending);
		} else if (clk > platform->cur_clock) {
			platform->dvfs_pending = 0;
		}
		mutex_unlock(&platform->gpu_clock_lock);
		return 0;
	} else {
		platform->dvfs_pending = 0;
	}

	if (platform->dvs_is_enabled) {
		mutex_unlock(&platform->gpu_clock_lock);
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't control clock and voltage in dvs mode\n", __func__);
		return 0;
	}

#endif /* CONFIG_MALI_DVFS */

	target_clk = gpu_check_target_clock(platform, clk);
	if (target_clk < 0) {
		mutex_unlock(&platform->gpu_clock_lock);
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
				"%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk);
		return -1;
	}

	target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol);
	target_vol = target_vol < (int) platform->table[0].voltage ? target_vol : (int) platform->table[0].voltage;

	prev_clk = gpu_get_cur_clock(platform);

#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	level = gpu_dvfs_get_level(clk);
	exynos7420_cl_dvfs_stop(ID_G3D, level);
#endif

	GPU_SET_CLK_VOL(kbdev, prev_clk, target_clk, target_vol);
	ret = gpu_update_cur_level(platform);

#ifdef CONFIG_EXYNOS_CL_DVFS_G3D
	if (!platform->voltage_margin && platform->power_status
		&& platform->cl_dvfs_start_base && platform->cur_clock >= platform->cl_dvfs_start_base)
		exynos7420_cl_dvfs_start(ID_G3D);
#endif
	mutex_unlock(&platform->gpu_clock_lock);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "clk[%d -> %d], vol[%d (margin : %d)]\n",
		prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin);

	return ret;
}
コード例 #13
0
int gpu_dvfs_clock_lock(gpu_dvfs_lock_command lock_command, gpu_dvfs_lock_type lock_type, int clock)
{
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	int i;
	bool dirty = false;
	unsigned long flags;

	DVFS_ASSERT(platform);

	if (!platform->dvfs_status)
		return 0;

	if ((lock_type < TMU_LOCK) || (lock_type >= NUMBER_LOCK)) {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid lock type is called (%d)\n", __func__, lock_type);
		return -1;
	}

	switch (lock_command) {
	case GPU_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (gpu_dvfs_get_level(clock) < 0) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "max lock error: invalid clock value %d\n", clock);
			return -1;
		}

		platform->user_max_lock[lock_type] = clock;
		platform->max_lock = clock;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = clock;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock >= platform->max_lock))
			gpu_set_target_clk_vol(platform->max_lock, false);

		GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock,
			"lock max clk[%d], user lock[%d], current clk[%d]\n",
			platform->max_lock, platform->user_max_lock[lock_type], platform->cur_clock);
		break;
	case GPU_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (gpu_dvfs_get_level(clock) < 0) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "min lock error: invalid clock value %d\n", clock);
			return -1;
		}

		platform->user_min_lock[lock_type] = clock;
		platform->min_lock = clock;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = clock;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0)&& (platform->cur_clock < platform->min_lock)
						&& (platform->min_lock <= platform->max_lock))
			gpu_set_target_clk_vol(platform->min_lock, false);

		GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock,
			"lock min clk[%d], user lock[%d], current clk[%d]\n",
			platform->min_lock, platform->user_min_lock[lock_type], platform->cur_clock);
		break;
	case GPU_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		platform->user_max_lock[lock_type] = 0;
		platform->max_lock = platform->gpu_max_clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock, "unlock max clk\n");
		break;
	case GPU_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		platform->user_min_lock[lock_type] = 0;
		platform->min_lock = platform->gpu_min_clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock, "unlock min clk\n");
		break;
	default:
		break;
	}

	return 0;
}
コード例 #14
0
ファイル: mali_kbase_instr.c プロジェクト: ColinIanKing/m576
/**
 * @brief Issue Dump command to hardware and wait for completion
 */
mali_error kbase_instr_hwcnt_dump(struct kbase_context *kctx)
{
	unsigned long flags;
	mali_error err = MALI_ERROR_FUNCTION_FAILED;
	struct kbase_device *kbdev;

#ifdef SEC_HWCNT
	if (kctx == NULL) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "kctx is NULL error in %s %d \n", __FUNCTION__, err);
		goto out;
	}
#endif

	KBASE_DEBUG_ASSERT(NULL != kctx);
	kbdev = kctx->kbdev;
	KBASE_DEBUG_ASSERT(NULL != kbdev);

	err = kbase_instr_hwcnt_dump_irq(kctx);
	if (MALI_ERROR_NONE != err) {
		/* Can't dump HW counters */
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "kbase_instr_hwcnt_dump_irq error in %s %d \n", __FUNCTION__, err);
		goto out;
	}

	/* Wait for dump & cacheclean to complete */
#ifdef SEC_HWCNT
	if (kbdev->hwcnt.is_init) {
		int ret = wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);
		if ((kbdev->hwcnt.trig_exception == 1) || (ret == 0)) {
			kbdev->hwcnt.trig_exception = 0;
			kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
			err = MALI_ERROR_FUNCTION_FAILED;
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "wait_event_timeout error in %s %d \n", __FUNCTION__, err);
			goto out;
		}
	} else
#endif
	wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);

	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
		/* GPU is being reset */
		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
#ifdef SEC_HWCNT
		if (kbdev->hwcnt.is_init)
			wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout);
		else
#endif
		wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
	}

	if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) {
		err = MALI_ERROR_FUNCTION_FAILED;
		kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE;
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "hwcnt state is FAULT error in %s %d \n", __FUNCTION__, err);
	} else {
		/* Dump done */
		KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE);
		err = MALI_ERROR_NONE;
	}

	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 out:
	return err;
}
コード例 #15
0
static int gpu_set_clock(struct exynos_context *platform, int clk)
{
	long g3d_rate_prev = -1;
	unsigned long g3d_rate = clk * MHZ;
	int ret = 0;

#if 0
	if (aclk_g3d == 0)
		return -1;
#endif

#ifdef CONFIG_PM_RUNTIME
	if (platform->exynos_pm_domain)
		mutex_lock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_PM_RUNTIME */

	if (!gpu_is_power_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the power-off state!\n", __func__);
		goto err;
	}

	if (!gpu_is_clock_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the clock-off state!\n", __func__);
		goto err;
	}

#if 0
	g3d_rate_prev = clk_get_rate(fout_g3d_pll);

	/* if changed the VPLL rate, set rate for VPLL and wait for lock time */
	if (g3d_rate != g3d_rate_prev) {
		ret = gpu_set_maximum_outstanding_req(L2CONFIG_MO_1BY8);
		if (ret < 0)
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to set MO (%d)\n", __func__, ret);

		/*change here for future stable clock changing*/
		ret = clk_set_parent(mout_g3d_pll, fin_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}

		/*change g3d pll*/
		ret = clk_set_rate(fout_g3d_pll, g3d_rate);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [fout_g3d_pll]\n", __func__);
			goto err;
		}

		/*restore parent*/
		ret = clk_set_parent(mout_g3d_pll, fout_g3d_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}

#ifdef CONFIG_SOC_EXYNOS5433_REV_0
		/*restore parent*/
		ret = clk_set_parent(mout_aclk_g3d, aclk_g3d);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_ack_g3d]\n", __func__);
			goto err;
		}
#endif /* CONFIG_SOC_EXYNOS5433_REV_0 */

		ret = gpu_set_maximum_outstanding_req(L2CONFIG_MO_NO_RESTRICT);
		if (ret < 0)
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to restore MO (%d)\n", __func__, ret);

		g3d_rate_prev = g3d_rate;
	}
#endif

	platform->cur_clock = gpu_get_cur_clock(platform);

	GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, 0u, g3d_rate/MHZ, "clock set: %ld\n", g3d_rate/MHZ);
	GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, 0u, platform->cur_clock, "clock get: %d\n", platform->cur_clock);
err:
#ifdef CONFIG_PM_RUNTIME
	if (platform->exynos_pm_domain)
		mutex_unlock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_PM_RUNTIME */
	return ret;
}
コード例 #16
0
int gpu_register_dump(void)
{
	if (gpu_is_power_on() && !s2m_get_dvs_is_on()) {
		/* MCS Value check */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP,  0x10051224 , __raw_readl(EXYNOS7420_VA_SYSREG + 0x1224),
				"REG_DUMP: G3D_EMA_RF2_UHD_CON %x\n", __raw_readl(EXYNOS7420_VA_SYSREG + 0x1224));
		/* G3D PMU */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x105C4100, __raw_readl(EXYNOS_PMU_G3D_CONFIGURATION),
				"REG_DUMP: EXYNOS_PMU_G3D_CONFIGURATION %x\n", __raw_readl(EXYNOS_PMU_G3D_CONFIGURATION));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x105C4104, __raw_readl(EXYNOS_PMU_G3D_STATUS),
				"REG_DUMP: EXYNOS_PMU_G3D_STATUS %x\n", __raw_readl(EXYNOS_PMU_G3D_STATUS));
		/* G3D PLL */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x105C6100, __raw_readl(EXYNOS_PMU_GPU_DVS_CTRL),
				"REG_DUMP: EXYNOS_PMU_GPU_DVS_CTRL %x\n", __raw_readl(EXYNOS_PMU_GPU_DVS_CTRL));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x10576104, __raw_readl(EXYNOS_PMU_GPU_DVS_STATUS),
				"REG_DUMP: GPU_DVS_STATUS %x\n", __raw_readl(EXYNOS_PMU_GPU_DVS_STATUS));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x10051234, __raw_readl(EXYNOS7420_VA_SYSREG + 0x1234),
				"REG_DUMP: G3D_G3DCFG_REG0 %x\n", __raw_readl(EXYNOS7420_VA_SYSREG + 0x1234));

#ifdef CONFIG_EXYNOS_NOC_DEBUGGING
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14A002F0, __raw_readl(g3d0_outstanding_regs + 0x2F0),
				"REG_DUMP: read outstanding %x\n", __raw_readl(g3d0_outstanding_regs + 0x2F0));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14A003F0, __raw_readl(g3d0_outstanding_regs + 0x3F0),
				"REG_DUMP: write outstanding %x\n", __raw_readl(g3d0_outstanding_regs + 0x3F0));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14A202F0, __raw_readl(g3d1_outstanding_regs + 0x2F0),
				"REG_DUMP: read outstanding %x\n", __raw_readl(g3d1_outstanding_regs + 0x2F0));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14A203F0, __raw_readl(g3d1_outstanding_regs + 0x3F0),
				"REG_DUMP: write outstanding %x\n", __raw_readl(g3d1_outstanding_regs + 0x3F0));
#endif /* CONFIG_EXYNOS_NOC_DEBUGGING */

		/* G3D PLL */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0000, __raw_readl(G3D_LOCK),
				"REG_DUMP: EXYNOS7420_G3D_PLL_LOCK %x\n", __raw_readl(G3D_LOCK));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0100, __raw_readl(G3D_CON),
				"REG_DUMP: EXYNOS7420_G3D_PLL_CON0 %x\n", __raw_readl(G3D_CON));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0104, __raw_readl(G3D_CON1),
				"REG_DUMP: EXYNOS7420_G3D_PLL_CON1 %x\n", __raw_readl(G3D_CON1));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0108, __raw_readl(G3D_CON2),
				"REG_DUMP: EXYNOS7420_G3D_PLL_CON2 %x\n", __raw_readl(G3D_CON2));

		/* G3D SRC */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0200, __raw_readl(EXYNOS7420_MUX_SEL_G3D),
				"REG_DUMP: EXYNOS7420_SRC_SEL_G3D %x\n", __raw_readl(EXYNOS7420_MUX_SEL_G3D));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0300, __raw_readl(EXYNOS7420_MUX_ENABLE_G3D),
				"REG_DUMP: EXYNOS7420_SRC_ENABLE_G3D %x\n", __raw_readl(EXYNOS7420_MUX_ENABLE_G3D));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0400, __raw_readl(EXYNOS7420_MUX_STAT_G3D),
				"REG_DUMP: EXYNOS7420_SRC_STAT_G3D %x\n", __raw_readl(EXYNOS7420_MUX_STAT_G3D));

		/* G3D DIV */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0600, __raw_readl(EXYNOS7420_DIV_G3D),
				"REG_DUMP: EXYNOS7420_DIV_G3D %x\n", __raw_readl(EXYNOS7420_DIV_G3D));
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0700, __raw_readl(EXYNOS7420_DIV_STAT_G3D),
				"REG_DUMP: EXYNOS7420_DIV_STAT_G3D %x\n", __raw_readl(EXYNOS7420_DIV_STAT_G3D));

		/* G3D ENABLE */
		GPU_LOG(DVFS_WARNING, LSI_REGISTER_DUMP, 0x14AA0B00, __raw_readl(EXYNOS7420_CLK_ENABLE_IP_G3D),
				"REG_DUMP: EXYNOS7420_ENABLE_IP_G3D %x\n", __raw_readl(EXYNOS7420_CLK_ENABLE_IP_G3D));

	} else {
		GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: Power Status %d, DVS Status %d\n", __func__, gpu_is_power_on(), s2m_get_dvs_is_on());
	}

	return 0;
}
コード例 #17
0
static int gpu_set_clock(struct exynos_context *platform, int clk)
{
	long g3d_rate_prev = -1;
	unsigned long g3d_rate = clk * MHZ;
	int ret = 0;
	int level = 0;

	if (aclk_g3d == 0)
		return -1;

#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_lock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */

	if (!gpu_is_power_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the power-off state!\n", __func__);
		goto err;
	}

	g3d_rate_prev = clk_get_rate(aclk_g3d);

	/* if changed the VPLL rate, set rate for VPLL and wait for lock time */
	if (g3d_rate != g3d_rate_prev) {

		ret = clk_set_parent(mout_g3d, fin_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [fin_pll]\n", __func__);
			goto err;
		}

		/*change g3d pll*/
		ret = clk_set_rate(fout_g3d_pll, g3d_rate);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [fout_g3d_pll]\n", __func__);
			goto err;
		}

		level = gpu_dvfs_get_level(g3d_rate/MHZ);
		if (level < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to gpu_dvfs_get_level \n", __func__);
			goto err;
		}

		ret = clk_set_rate(sclk_hpm_g3d, (clk_get_rate(aclk_g3d)/hpm_freq_table[level]));
		if(ret < 0)
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [sclk_hpm_g3d]\n", __func__);

		ret = clk_set_parent(mout_g3d, fout_g3d_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [fout_g3d_pll]\n", __func__);
			goto err;
		}

		g3d_rate_prev = g3d_rate;
	}

	platform->cur_clock = gpu_get_cur_clock(platform);

	if (platform->cur_clock != clk_get_rate(fout_g3d_pll)/MHZ)
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "clock value is wrong (aclk_g3d: %d, fout_g3d_pll: %d)\n",
				platform->cur_clock, (int) clk_get_rate(fout_g3d_pll)/MHZ);
	GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, g3d_rate/MHZ, platform->cur_clock,
		"clock set: %ld, clock get: %d\n", g3d_rate/MHZ, platform->cur_clock);
err:
#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_unlock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */
	return ret;
}
コード例 #18
0
ファイル: gpu_hwcnt.c プロジェクト: ColinIanKing/m576
void exynos_hwcnt_init(struct kbase_device *kbdev)
{
	struct kbase_uk_hwcnt_setup setup_arg;
	struct kbase_context *kctx;
	struct kbase_uk_mem_alloc mem;
	struct kbase_va_region *reg;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	if (platform->hwcnt_gathering_status == false)
		goto out;

	kctx = kbase_create_context(kbdev, false);

	if (kctx) {
		kbdev->hwcnt.kctx = kctx;
	} else {
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "hwcnt error!, hwcnt_init is failed\n");
		goto out;
	}

	mem.va_pages = mem.commit_pages = mem.extent = 1;
	mem.flags = BASE_MEM_PROT_GPU_WR | BASE_MEM_PROT_CPU_RD | BASE_MEM_HINT_CPU_RD;

	reg = kbase_mem_alloc(kctx, mem.va_pages, mem.commit_pages, mem.extent, &mem.flags, &mem.gpu_va, &mem.va_alignment);

#if defined(CONFIG_64BIT)
	kbase_gpu_vm_lock(kctx);
	if (MALI_ERROR_NONE != kbase_gpu_mmap(kctx, reg, 0, 1, 1)) {
		kbase_gpu_vm_unlock(kctx);
		platform->hwcnt_gathering_status = false;
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "exynos_hwcnt_init error!mmap fail\n");
		kbase_mem_free(kbdev->hwcnt.kctx, kbdev->hwcnt.suspended_state.dump_buffer);
		goto out;
	}
	kbase_gpu_vm_unlock(kctx);
#endif

	kctx->kbdev->hwcnt.phy_addr = reg->alloc->pages[0];
	kctx->kbdev->hwcnt.enable_for_utilization = FALSE;
	kctx->kbdev->hwcnt.enable_for_gpr = FALSE;
	kctx->kbdev->hwcnt.suspended_kctx = NULL;
	kctx->kbdev->hwcnt.timeout = msecs_to_jiffies(100);
	kctx->kbdev->hwcnt.is_powered = FALSE;
	mutex_init(&kbdev->hwcnt.mlock);

#if defined(CONFIG_64BIT)
	setup_arg.dump_buffer = reg->start_pfn << PAGE_SHIFT;
#else
	setup_arg.dump_buffer = mem.gpu_va;
#endif
	setup_arg.jm_bm =  platform->hwcnt_choose_jm;
	setup_arg.shader_bm = platform->hwcnt_choose_shader;
	setup_arg.tiler_bm =  platform->hwcnt_choose_tiler;
	setup_arg.l3_cache_bm =  platform->hwcnt_choose_l3_cache;
	setup_arg.mmu_l2_bm =  platform->hwcnt_choose_mmu_l2;
	setup_arg.padding = HWC_MODE_UTILIZATION;

	kctx->kbdev->hwcnt.kspace_addr = kbase_kmap_from_physical_address(kbdev);

	if (MALI_ERROR_NONE != hwcnt_setup(kctx, &setup_arg)) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "hwcnt_setup is failed\n");
		goto out;
	}

	kctx->kbdev->hwcnt.acc_buffer = kmalloc(HWC_ACC_BUFFER_SIZE, GFP_KERNEL);

	if (kctx->kbdev->hwcnt.acc_buffer)
		memset(kctx->kbdev->hwcnt.acc_buffer, 0, HWC_ACC_BUFFER_SIZE);
	else
		goto out;

	kbdev->hwcnt.is_init = TRUE;
	if(kbdev->pm.pm_current_policy->id == KBASE_PM_POLICY_ID_ALWAYS_ON) {
		mutex_lock(&kbdev->hwcnt.mlock);
		if (!kbdev->hwcnt.kctx)
			hwcnt_start(kbdev);
		mutex_unlock(&kbdev->hwcnt.mlock);
	}
	return;
out:
	kbdev->hwcnt.is_init = FALSE;
	return;
}
コード例 #19
0
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	if (!kbdev)
		goto secure_out;

	if (!kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}

	if (!reg) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n",
			__func__, reg);
		goto secure_out;
	}
#if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
#if MALI_SEC_ASP_SECURE_BUF_CTRL
	{
		struct ion_client *client;
		struct ion_handle *ion_handle;
		size_t len = 0;
		ion_phys_addr_t phys = 0;

		flush_all_cpu_caches();

		if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) {
			reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE;
		} else {

			client = ion_client_create(ion_exynos, "G3D");
			if (IS_ERR(client)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n",
						__func__);
				goto secure_out;
			}

			ion_handle = ion_import_dma_buf(client, ion_fd);

			if (IS_ERR(ion_handle)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n",
						__func__);
				ion_client_destroy(client);
				goto secure_out;
			}

			if (ion_phys(client, ion_handle, &phys, &len)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n",
						__func__);
				ion_free(client, ion_handle);
				ion_client_destroy(client);
				goto secure_out;
			}

			ion_free(client, ion_handle);
			ion_client_destroy(client);

			ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D);
			if (ret != DRMDRV_OK) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n",
					__func__, (unsigned int)phys, ret);
				BUG();
			}

			reg->flags |= KBASE_REG_SECURE;
		}

		reg->phys_by_ion = phys;
		reg->len_by_ion = len;
	}
#else
	reg->flags |= KBASE_REG_SECURE;

	reg->phys_by_ion = 0;
	reg->len_by_ion = 0;
#endif
#else
	GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
	ret = -EINVAL;
#endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)

	return ret;
secure_out:
	ret = -EINVAL;
	return ret;
}
コード例 #20
0
ファイル: gpu_dvfs_handler.c プロジェクト: monojo/xu3
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param)
{
	int ret = 0;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	int i;
	bool dirty = false;
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform;

	platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	switch (command) {
#ifdef CONFIG_MALI_MIDGARD_DVFS
	case GPU_HANDLER_DVFS_ON:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_OFF:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_GOVERNOR_CHANGE:
		mutex_lock(&platform->gpu_dvfs_handler_lock);
		gpu_dvfs_on_off(kbdev, false);
		gpu_dvfs_governor_init(kbdev, param);
		gpu_dvfs_on_off(kbdev, true);
		mutex_unlock(&platform->gpu_dvfs_handler_lock);
		break;
	case GPU_HANDLER_DVFS_MAX_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->min_lock >= 0) && (param < platform->min_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = param;
		platform->max_lock = param;

		if (platform->max_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_max_lock[i] > 0)
					platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]);
			}
		} else {
			platform->max_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MIN_LOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if ((platform->max_lock > 0) && (param > platform->max_lock)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n");
			return -1;
		}

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = param;
		platform->min_lock = param;

		if (platform->min_lock > 0) {
			for (i = 0; i < NUMBER_LOCK; i++) {
				if (platform->user_min_lock[i] > 0)
					platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]);
			}
		} else {
			platform->min_lock = param;
		}

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock))
			gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock);

		GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock,
				platform->user_min_lock[platform->target_lock_type], platform->cur_clock);

		platform->target_lock_type = -1;
		break;
	case GPU_HANDLER_DVFS_MAX_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_max_lock[platform->target_lock_type] = 0;
		platform->max_lock = platform->table[platform->table_size-1].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_max_lock[i] > 0) {
				dirty = true;
				platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock);
			}
		}

		if (!dirty)
			platform->max_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n");
		break;
	case GPU_HANDLER_DVFS_MIN_UNLOCK:
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);

		if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) {
			spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			return -1;
		}

		platform->user_min_lock[platform->target_lock_type] = 0;
		platform->min_lock = platform->table[0].clock;

		for (i = 0; i < NUMBER_LOCK; i++) {
			if (platform->user_min_lock[i] > 0) {
				dirty = true;
				platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock);
			}
		}

		if (!dirty)
			platform->min_lock = 0;

		platform->target_lock_type = -1;

		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
		GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n");
		break;
	case GPU_HANDLER_INIT_TIME_IN_STATE:
		gpu_dvfs_init_time_in_state(platform);
		break;
	case GPU_HANDLER_UPDATE_TIME_IN_STATE:
		gpu_dvfs_update_time_in_state(platform, param);
		break;
	case GPU_HANDLER_DVFS_GET_LEVEL:
		ret = gpu_dvfs_get_level(platform, param);
		break;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	case GPU_HANDLER_DVFS_GET_VOLTAGE:
		ret = gpu_dvfs_get_voltage(platform, param);
		break;
	default:
		break;
	}
	return ret;
}