static int mali_kbase_devfreq_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	struct kbase_device *kbdev = (struct kbase_device *)dev->platform_data;
	unsigned long old_freq = kbdev->devfreq->previous_freq;
	struct opp *opp = NULL;
	unsigned long freq;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		printk("[mali-midgard]  Failed to get Operating Performance Point\n");
		rcu_read_unlock();
		return PTR_ERR(opp);
	}
	freq = opp_get_freq(opp);
	rcu_read_unlock();

	if (old_freq == freq)
		return 0;

	if (clk_set_rate((kbdev->clk), freq)) {
		printk("[mali-midgard]  Failed to set gpu freqency, [%lu->%lu]\n", old_freq, freq);
		return -ENODEV;
	}


	return 0;
}
示例#2
0
static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	int err = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device,
						    dev);
	struct busfreq_data_int *data = platform_get_drvdata(pdev);
	struct opp *opp;
	unsigned long old_freq, freq;
	unsigned long volt;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		rcu_read_unlock();
		dev_err(dev, "%s: Invalid OPP.\n", __func__);
		return PTR_ERR(opp);
	}

	freq = opp_get_freq(opp);
	volt = opp_get_voltage(opp);
	rcu_read_unlock();

	old_freq = data->curr_freq;

	if (old_freq == freq)
		return 0;

	dev_dbg(dev, "targetting %lukHz %luuV\n", freq, volt);

	mutex_lock(&data->lock);

	if (data->disabled)
		goto out;

	if (freq > exynos5_int_opp_table[0].clk)
		pm_qos_update_request(&data->int_req, freq * 16 / 1000);
	else
		pm_qos_update_request(&data->int_req, -1);

	if (old_freq < freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	err = clk_set_rate(data->int_clk, freq * 1000);

	if (err)
		goto out;

	if (old_freq > freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	data->curr_freq = freq;
out:
	mutex_unlock(&data->lock);
	return err;
}
static int
kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
{
	struct kbase_device *kbdev = dev_get_drvdata(dev);
	struct dev_pm_opp *opp;
	unsigned long freq = 0;
	int err;


	kbdev->reset_utilization = true;

	freq = *target_freq;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, &freq, flags);
	rcu_read_unlock();
	if (IS_ERR_OR_NULL(opp)) {
		dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
		return PTR_ERR(opp);
	}

	err = clk_set_rate(kbdev->clock, freq);
	if (err) {
		dev_err(dev, "Failed to set clock %lu (target %lu)\n",
				freq, *target_freq);
		return err;
	}

	*target_freq = freq;

	return 0;
}
示例#4
0
int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
	int ret;
	struct dev_pm_opp *opp;

	if (!pfdev->regulator)
		return 0;

	ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
	if (ret)
		return ret;

	panfrost_devfreq_reset(pfdev);

	pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);

	opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
	if (IS_ERR(opp))
		return PTR_ERR(opp);

	panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
	dev_pm_opp_put(opp);

	pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
			&panfrost_devfreq_profile, "simple_ondemand", NULL);
	if (IS_ERR(pfdev->devfreq.devfreq)) {
		DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
		ret = PTR_ERR(pfdev->devfreq.devfreq);
		pfdev->devfreq.devfreq = NULL;
		return ret;
	}

	return 0;
}
示例#5
0
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
				   u32 flags)
{
	struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
	struct dev_pm_opp *opp;
	unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
	unsigned long target_volt, target_rate;
	int err;

	opp = devfreq_recommended_opp(dev, freq, flags);
	if (IS_ERR(opp))
		return PTR_ERR(opp);

	target_rate = dev_pm_opp_get_freq(opp);
	target_volt = dev_pm_opp_get_voltage(opp);
	dev_pm_opp_put(opp);

	if (old_clk_rate == target_rate)
		return 0;

	/*
	 * If frequency scaling from low to high, adjust voltage first.
	 * If frequency scaling from high to low, adjust frequency first.
	 */
	if (old_clk_rate < target_rate) {
		err = regulator_set_voltage(pfdev->regulator, target_volt,
					    target_volt);
		if (err) {
			dev_err(dev, "Cannot set voltage %lu uV\n",
				target_volt);
			return err;
		}
	}

	err = clk_set_rate(pfdev->clock, target_rate);
	if (err) {
		dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
			err);
		regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
				      pfdev->devfreq.cur_volt);
		return err;
	}

	if (old_clk_rate > target_rate) {
		err = regulator_set_voltage(pfdev->regulator, target_volt,
					    target_volt);
		if (err)
			dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
	}

	pfdev->devfreq.cur_freq = target_rate;
	pfdev->devfreq.cur_volt = target_volt;

	return 0;
}
示例#6
0
static int exynos5_devfreq_isp_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_isp *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_isp = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(ISP) : Invalid OPP to find\n");
		ret = PTR_ERR(target_opp);
		goto out;
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset);
#endif
	rcu_read_unlock();

	target_idx = exynos5_devfreq_get_idx(devfreq_isp_opp_list, data->max_state,
						*target_freq);
	old_idx = exynos5_devfreq_get_idx(devfreq_isp_opp_list, data->max_state,
						devfreq_isp->previous_freq);
	old_freq = devfreq_isp->previous_freq;

	if (target_idx < 0 ||
		old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

	pr_debug("ISP %lu ================> %lu\n", old_freq, *target_freq);

	if (old_freq < *target_freq) {
		if (data->isp_set_volt)
			data->isp_set_volt(data, target_volt, target_volt + VOLT_STEP, false);
		if (data->isp_set_freq)
			data->isp_set_freq(data, target_idx, old_idx);
	} else {
		if (data->isp_set_freq)
			data->isp_set_freq(data, target_idx, old_idx);
		if (data->isp_set_volt)
			data->isp_set_volt(data, target_volt, target_volt + VOLT_STEP, true);
	}
out:
	mutex_unlock(&data->lock);

	return ret;
}
示例#7
0
static int exynos5_devfreq_isp_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct devfreq_data_isp *data;
	struct exynos_devfreq_platdata *plat_data;
	struct opp *target_opp;
	unsigned long freq;
	unsigned long volt;

	data = kzalloc(sizeof(struct devfreq_data_isp), GFP_KERNEL);
	if (data == NULL) {
		pr_err("DEVFREQ(ISP) : Failed to allocate private data\n");
		ret = -ENOMEM;
		goto err_data;
	}

	exynos5433_devfreq_isp_init(data);

	exynos5_devfreq_isp_profile.max_state = data->max_state;
	exynos5_devfreq_isp_profile.freq_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL);
	if (exynos5_devfreq_isp_profile.freq_table == NULL) {
		pr_err("DEVFREQ(ISP) : Failed to allocate freq table\n");
		ret = -ENOMEM;
		goto err_freqtable;
	}

	ret = exynos5_init_isp_table(&pdev->dev, data);
	if (ret)
		goto err_inittable;

	platform_set_drvdata(pdev, data);
	mutex_init(&data->lock);
	data->initial_freq = exynos5_devfreq_isp_profile.initial_freq;

	data->volt_offset = 0;
	isp_dev =
	data->dev = &pdev->dev;
	data->vdd_isp = regulator_get(NULL, "vdd_disp_cam0");

	if (IS_ERR_OR_NULL(data->vdd_isp)) {
		pr_err("DEVFREQ(ISP) : Failed to get regulator\n");
		goto err_inittable;
	}

	freq = DEVFREQ_INITIAL_FREQ;
	rcu_read_lock();
	target_opp = devfreq_recommended_opp(data->dev, &freq, 0);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		dev_err(data->dev, "DEVFREQ(ISP) : Invalid OPP to set voltage");
		ret = PTR_ERR(target_opp);
		goto err_opp;
	}
	volt = opp_get_voltage(target_opp);
#ifdef CONFIG_EXYNOS_THERMAL
	volt = get_limit_voltage(volt, data->volt_offset);
#endif
	rcu_read_unlock();

	if (data->isp_set_volt)
	data->isp_set_volt(data, volt, volt + VOLT_STEP, false);

	data->devfreq = devfreq_add_device(data->dev,
						&exynos5_devfreq_isp_profile,
						"simple_ondemand",
						&exynos5_devfreq_isp_governor_data);
	plat_data = data->dev->platform_data;

	data->devfreq->min_freq = plat_data->default_qos;
	data->devfreq->max_freq = exynos5_devfreq_isp_governor_data.cal_qos_max;

	register_reboot_notifier(&exynos5_isp_reboot_notifier);

#ifdef CONFIG_EXYNOS_THERMAL
	data->tmu_notifier.notifier_call = exynos5_devfreq_isp_tmu_notifier;
	exynos_tmu_add_notifier(&data->tmu_notifier);
#endif
	data->use_dvfs = true;

	return ret;
err_opp:
	regulator_put(data->vdd_isp);
err_inittable:
	devfreq_remove_device(data->devfreq);
	kfree(exynos5_devfreq_isp_profile.freq_table);
err_freqtable:
	kfree(data);
err_data:
	return ret;
}
static int exynos7_devfreq_disp_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_disp *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_disp = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(DISP) : Invalid OPP to find\n");
		return PTR_ERR(target_opp);
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
	rcu_read_unlock();

	target_idx = devfreq_get_opp_idx(devfreq_disp_opp_list, data->max_state,
						*target_freq);
	old_idx = devfreq_get_opp_idx(devfreq_disp_opp_list, data->max_state,
						devfreq_disp->previous_freq);
	old_freq = devfreq_disp->previous_freq;

	if (target_idx < 0 || old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset, 0);
#endif

	pr_debug("DISP LV_%d(%lu) ================> LV_%d(%lu, volt: %lu)\n",
			old_idx, old_freq, target_idx, *target_freq, target_volt);

	exynos_ss_freq(ESS_FLAG_DISP, old_freq, ESS_FLAG_IN);
	if (old_freq < *target_freq) {
		if (data->disp_set_volt)
			data->disp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT);
		if (data->disp_set_freq)
			data->disp_set_freq(data, target_idx, old_idx);
	} else {
		if (data->disp_set_freq)
			data->disp_set_freq(data, target_idx, old_idx);
		if (data->disp_set_volt)
			data->disp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT);
	}
	exynos_ss_freq(ESS_FLAG_DISP, *target_freq, ESS_FLAG_OUT);
	data->cur_freq = *target_freq;
out:
	mutex_unlock(&data->lock);

	return ret;
}
示例#9
0
static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	int err = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device,
						    dev);
	struct busfreq_data *data = platform_get_drvdata(pdev);
	struct dev_pm_opp *opp;
	unsigned long freq;
	unsigned long old_freq = data->curr_oppinfo.rate;
	struct busfreq_opp_info	new_oppinfo;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		rcu_read_unlock();
		return PTR_ERR(opp);
	}
	new_oppinfo.rate = dev_pm_opp_get_freq(opp);
	new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	freq = new_oppinfo.rate;

	if (old_freq == freq)
		return 0;

	dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);

	mutex_lock(&data->lock);

	if (data->disabled)
		goto out;

	if (old_freq < freq)
		err = exynos4_bus_setvolt(data, &new_oppinfo,
					  &data->curr_oppinfo);
	if (err)
		goto out;

	if (old_freq != freq) {
		switch (data->type) {
		case TYPE_BUSF_EXYNOS4210:
			err = exynos4210_set_busclk(data, &new_oppinfo);
			break;
		case TYPE_BUSF_EXYNOS4x12:
			err = exynos4x12_set_busclk(data, &new_oppinfo);
			break;
		default:
			err = -EINVAL;
		}
	}
	if (err)
		goto out;

	if (old_freq > freq)
		err = exynos4_bus_setvolt(data, &new_oppinfo,
					  &data->curr_oppinfo);
	if (err)
		goto out;

	data->curr_oppinfo = new_oppinfo;
out:
	mutex_unlock(&data->lock);
	return err;
}
static int exynos5_devfreq_int_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_int *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_int = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(INT) : Invalid OPP to find\n");
		ret = PTR_ERR(target_opp);
		goto out;
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset);
#endif
	/* just want to save voltage before apply constraint with isp */
	data->target_volt = target_volt;
	if (target_volt < data->volt_constraint_isp)
		target_volt = data->volt_constraint_isp;
	rcu_read_unlock();

	target_idx = exynos5_devfreq_get_idx(devfreq_int_opp_list, data->max_state,
						*target_freq);
	old_idx = exynos5_devfreq_get_idx(devfreq_int_opp_list, data->max_state,
						devfreq_int->previous_freq);

	old_freq = devfreq_int->previous_freq;

	if (target_idx < 0 || old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

	pr_debug("INT %lu ===================> %lu\n", old_freq, *target_freq);

	if (old_freq < *target_freq) {
		if (data->int_set_volt)
			data->int_set_volt(data, target_volt, target_volt + VOLT_STEP);
		set_match_abb(ID_INT, data->int_asv_abb_table[target_idx]);
		if (data->int_set_freq)
			data->int_set_freq(data, target_idx, old_idx);
	} else {
		if (data->int_set_freq)
			data->int_set_freq(data, target_idx, old_idx);
		set_match_abb(ID_INT, data->int_asv_abb_table[target_idx]);
		if (data->int_set_volt)
			data->int_set_volt(data, target_volt, target_volt + VOLT_STEP);
	}
out:
	mutex_unlock(&data->lock);

	return ret;
}
static int exynos8890_devfreq_int_init_freq_table(struct device *dev,
						struct exynos_devfreq_data *data)
{
	u32 max_freq, min_freq;
	unsigned long tmp_max, tmp_min;
	struct dev_pm_opp *target_opp;
	u32 flags = 0;
	int i;

	max_freq = (u32)cal_dfs_get_max_freq(dvfs_int);
	if (!max_freq) {
		dev_err(dev, "failed get max frequency\n");
		return -EINVAL;
	}

	dev_info(dev, "max_freq: %uKhz, get_max_freq: %uKhz\n",
			data->max_freq, max_freq);

	if (max_freq < data->max_freq) {
		rcu_read_lock();
		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND;
		tmp_max = (unsigned long)max_freq;
		target_opp = devfreq_recommended_opp(dev, &tmp_max, flags);
		if (IS_ERR(target_opp)) {
			rcu_read_unlock();
			dev_err(dev, "not found valid OPP for max_freq\n");
			return PTR_ERR(target_opp);
		}

		data->max_freq = dev_pm_opp_get_freq(target_opp);
		rcu_read_unlock();
	}

	min_freq = (u32)cal_dfs_get_min_freq(dvfs_int);
	if (!min_freq) {
		dev_err(dev, "failed get min frequency\n");
		return -EINVAL;
	}

	dev_info(dev, "min_freq: %uKhz, get_min_freq: %uKhz\n",
			data->min_freq, min_freq);

	if (min_freq > data->min_freq) {
		rcu_read_lock();
		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND;
		tmp_min = (unsigned long)min_freq;
		target_opp = devfreq_recommended_opp(dev, &tmp_min, flags);
		if (IS_ERR(target_opp)) {
			rcu_read_unlock();
			dev_err(dev, "not found valid OPP for min_freq\n");
			return PTR_ERR(target_opp);
		}

		data->min_freq = dev_pm_opp_get_freq(target_opp);
		rcu_read_unlock();
	}

	dev_info(dev, "min_freq: %uKhz, max_freq: %uKhz\n",
			data->min_freq, data->max_freq);

	for (i = 0; i < data->max_state; i++) {
		if (data->opp_list[i].freq > data->max_freq ||
			data->opp_list[i].freq < data->min_freq)
			dev_pm_opp_disable(dev, (unsigned long)data->opp_list[i].freq);
	}

	return 0;
}
static int
kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
{
	struct kbase_device *kbdev = dev_get_drvdata(dev);
	struct dev_pm_opp *opp;
	unsigned long freq = 0;
	unsigned long voltage;
	int err;

	freq = *target_freq;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, &freq, flags);
	voltage = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	if (IS_ERR_OR_NULL(opp)) {
		dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
		return PTR_ERR(opp);
	}

	/*
	 * Only update if there is a change of frequency
	 */
	if (kbdev->current_freq == freq) {
		*target_freq = freq;
		return 0;
	}

#ifdef CONFIG_REGULATOR
	if (kbdev->regulator && kbdev->current_voltage != voltage
			&& kbdev->current_freq < freq) {
		err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
		if (err) {
			dev_err(dev, "Failed to increase voltage (%d)\n", err);
			return err;
		}
	}
#endif

	err = clk_set_rate(kbdev->clock, freq);
	if (err) {
		dev_err(dev, "Failed to set clock %lu (target %lu)\n",
				freq, *target_freq);
		return err;
	}

#ifdef CONFIG_REGULATOR
	if (kbdev->regulator && kbdev->current_voltage != voltage
			&& kbdev->current_freq > freq) {
		err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
		if (err) {
			dev_err(dev, "Failed to decrease voltage (%d)\n", err);
			return err;
		}
	}
#endif

	*target_freq = freq;
	kbdev->current_voltage = voltage;
	kbdev->current_freq = freq;

	kbase_pm_reset_dvfs_utilisation(kbdev);

	return err;
}
static int mali_kbase_devfreq_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	struct kbase_device *kbdev = (struct kbase_device *)dev->platform_data;
	unsigned long old_freq = kbdev->devfreq->previous_freq;
	struct opp *opp = NULL;
	unsigned long freq;
#if KBASE_HI3635_GPU_IRDROP_ISSUE
	struct kbase_pm_policy *cur_policy;
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		printk("[mali-midgard]  Failed to get Operating Performance Point\n");
		rcu_read_unlock();
		return PTR_ERR(opp);
	}
	freq = opp_get_freq(opp);
	rcu_read_unlock();

	if (old_freq == freq)
		return 0;

#if KBASE_HI3635_GPU_IRDROP_ISSUE
	/* switch policy to always_on */
	if(old_freq <= KBASE_HI3635_GPU_TURBO_FREQ && freq > KBASE_HI3635_GPU_TURBO_FREQ ) {
		sw_policy.freq = freq;
		strncpy(sw_policy.name, "always_on", strlen("always_on") + 1);
		schedule_work(&sw_policy.update);
		goto exit;
	}

	/* warn on work doesn't finish yet.*/
	cur_policy = kbase_pm_get_policy(kbdev);
	if (cur_policy == NULL || ((freq > KBASE_HI3635_GPU_TURBO_FREQ)
		&& strncmp(cur_policy->name, "always_on", strlen("always_on")))){
		WARN_ON(1);

		/* restore the freq */
		*_freq = old_freq;
		goto exit;
	}
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

	printk("[mali-midgard] dvfs call clk_set_rate, old_freq = %lu -----> new_freq = %lu \n", old_freq, freq);
	if (clk_set_rate((kbdev->clk), freq)) {
		printk("[mali-midgard]  Failed to set gpu freqency, [%lu->%lu]\n", old_freq, freq);
		return -ENODEV;
	}

#if KBASE_HI3635_GPU_IRDROP_ISSUE
	if(old_freq > KBASE_HI3635_GPU_TURBO_FREQ && freq <= KBASE_HI3635_GPU_TURBO_FREQ) {
		strncpy(sw_policy.name, "demand", strlen("demand") + 1);
		schedule_work(&sw_policy.update);
	}
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

exit:
	return 0;
}