예제 #1
0
static void exynos_busfreq_timer(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct busfreq_data *data = container_of(delayed_work, struct busfreq_data,
			worker);
	struct opp *opp;
	unsigned int voltage;
	unsigned long currfreq;
	unsigned long newfreq;
	unsigned int index = 0;

	opp = data->monitor(data);

	if (bus_ctrl.opp_lock)
		opp = bus_ctrl.opp_lock;

	ppmu_start(data->dev);

	newfreq = opp_get_freq(opp);

	index = data->get_table_index(opp);

	mutex_lock(&busfreq_lock);

	if (opp == data->curr_opp || newfreq == 0 || data->use == false)
		goto out;

	currfreq = opp_get_freq(data->curr_opp);

	voltage = opp_get_voltage(opp);
	if (newfreq > currfreq) {
		regulator_set_voltage(data->vdd_mif, voltage,
				voltage + 25000);
		voltage = data->get_int_volt(index);
		regulator_set_voltage(data->vdd_int, voltage,
				voltage + 25000);
		/*if (data->busfreq_prepare)
			data->busfreq_prepare(index);*/
	}
	if (data->set_qos)
		data->set_qos(index);

	data->target(index);

	if (newfreq < currfreq) {
		/*if (data->busfreq_post)
			data->busfreq_post(index);*/
		regulator_set_voltage(data->vdd_mif, voltage,
				voltage + 25000);
		voltage = data->get_int_volt(index);
		regulator_set_voltage(data->vdd_int, voltage,
				voltage + 25000);
	}
	data->curr_opp = opp;

out:
	update_busfreq_stat(data, index);
	mutex_unlock(&busfreq_lock);
	queue_delayed_work(system_freezable_wq, &data->worker, data->sampling_rate);
}
static int mali_kbase_devfreq_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	struct kbase_device *kbdev = (struct kbase_device *)dev->platform_data;
	unsigned long old_freq = kbdev->devfreq->previous_freq;
	struct opp *opp = NULL;
	unsigned long freq;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		printk("[mali-midgard]  Failed to get Operating Performance Point\n");
		rcu_read_unlock();
		return PTR_ERR(opp);
	}
	freq = opp_get_freq(opp);
	rcu_read_unlock();

	if (old_freq == freq)
		return 0;

	if (clk_set_rate((kbdev->clk), freq)) {
		printk("[mali-midgard]  Failed to set gpu freqency, [%lu->%lu]\n", old_freq, freq);
		return -ENODEV;
	}


	return 0;
}
static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	int err = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device,
						    dev);
	struct busfreq_data_int *data = platform_get_drvdata(pdev);
	struct opp *opp;
	unsigned long old_freq, freq;
	unsigned long volt;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		rcu_read_unlock();
		dev_err(dev, "%s: Invalid OPP.\n", __func__);
		return PTR_ERR(opp);
	}

	freq = opp_get_freq(opp);
	volt = opp_get_voltage(opp);
	rcu_read_unlock();

	old_freq = data->curr_freq;

	if (old_freq == freq)
		return 0;

	dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);

	mutex_lock(&data->lock);

	if (data->disabled)
		goto out;

	if (freq > exynos5_int_opp_table[0].clk)
		pm_qos_update_request(&data->int_req, freq * 16 / 1000);
	else
		pm_qos_update_request(&data->int_req, -1);

	if (old_freq < freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	err = clk_set_rate(data->int_clk, freq * 1000);

	if (err)
		goto out;

	if (old_freq > freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	data->curr_freq = freq;
out:
	mutex_unlock(&data->lock);
	return err;
}
unsigned int exynos4210_get_table_index(struct opp *opp)
{
	unsigned int index;

	for (index = LV_0; index < LV_END; index++)
		if (opp_get_freq(opp) == exynos4_busfreq_table[index].mem_clk)
			break;

	return index;
}
예제 #5
0
void g3_display_read_fps(struct g3_display_data *data, struct devfreq_dev_status *stat){
	unsigned long fcount = msm_fb_read_frame_count();

	data->fps_data.fps = (fcount - data->fps_data.prev_fcount);
	data->fps_data.prev_fcount = fcount;

	stat->current_frequency = opp_get_freq(data->curr_opp);
	stat->total_time = msm_fb_read_frame_rate();
//	stat->total_time = g3_display_opp_table[g3_cur_level].freq;
	stat->busy_time = (data->fps_data.fps) * (1000 / g3_display_profile.polling_ms);

	/* trace("total_time=%lu, busy_time=%lu, util=%lu\n",
		stat->total_time, stat->busy_time, (stat->busy_time * 100 / stat->total_time));
	*/
}
struct opp *step_down(struct busfreq_data *data, int step)
{
	int i;
	struct opp *opp = data->curr_opp;
	unsigned long newfreq;

	if (data->min_opp == data->curr_opp)
		return data->curr_opp;

	for (i = 0; i < step; i++) {
		newfreq = opp_get_freq(opp) - 1;
		opp = opp_find_freq_floor(data->dev, &newfreq);

		if (opp == data->min_opp)
			break;
	}

	return opp;
}
static struct opp __maybe_unused *step_up(struct busfreq_data *data, int step)
{
	int i;
	struct opp *opp = data->curr_opp;
	unsigned long newfreq;

	if (data->max_opp == data->curr_opp)
		return data->curr_opp;

	for (i = 0; i < step; i++) {
		newfreq = opp_get_freq(opp) + 1;
		opp = opp_find_freq_ceil(data->dev, &newfreq);

		if (opp == data->max_opp)
			break;
	}

	return opp;
}
예제 #8
0
static int exynos5_devfreq_isp_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_isp *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_isp = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(ISP) : Invalid OPP to find\n");
		ret = PTR_ERR(target_opp);
		goto out;
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset);
#endif
	rcu_read_unlock();

	target_idx = exynos5_devfreq_get_idx(devfreq_isp_opp_list, data->max_state,
						*target_freq);
	old_idx = exynos5_devfreq_get_idx(devfreq_isp_opp_list, data->max_state,
						devfreq_isp->previous_freq);
	old_freq = devfreq_isp->previous_freq;

	if (target_idx < 0 ||
		old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

	pr_debug("ISP %lu ================> %lu\n", old_freq, *target_freq);

	if (old_freq < *target_freq) {
		if (data->isp_set_volt)
			data->isp_set_volt(data, target_volt, target_volt + VOLT_STEP, false);
		if (data->isp_set_freq)
			data->isp_set_freq(data, target_idx, old_idx);
	} else {
		if (data->isp_set_freq)
			data->isp_set_freq(data, target_idx, old_idx);
		if (data->isp_set_volt)
			data->isp_set_volt(data, target_volt, target_volt + VOLT_STEP, true);
	}
out:
	mutex_unlock(&data->lock);

	return ret;
}
예제 #9
0
int exynos5250_init(struct device *dev, struct busfreq_data *data)
{
	unsigned int i, tmp;
	unsigned long maxfreq = ULONG_MAX;
	unsigned long minfreq = 0;
	unsigned long cdrexfreq;
	unsigned long lrbusfreq;
	struct clk *clk;
	int ret;

	/* Enable pause function for DREX2 DVFS */
	drex2_pause_ctrl = __raw_readl(EXYNOS5_DREX2_PAUSE);
	drex2_pause_ctrl |= DMC_PAUSE_ENABLE;
	__raw_writel(drex2_pause_ctrl, EXYNOS5_DREX2_PAUSE);

	clk = clk_get(NULL, "mclk_cdrex");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get mclk_cdrex clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	cdrexfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	clk = clk_get(NULL, "aclk_266");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get aclk_266 clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	lrbusfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	if (cdrexfreq == 800000) {
		clkdiv_cdrex = clkdiv_cdrex_for800;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for800;
		exynos5_mif_volt = exynos5_mif_volt_for800;
	} else if (cdrexfreq == 666857) {
		clkdiv_cdrex = clkdiv_cdrex_for667;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for667;
		exynos5_mif_volt = exynos5_mif_volt_for667;
	} else if (cdrexfreq == 533000) {
		clkdiv_cdrex = clkdiv_cdrex_for533;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for533;
		exynos5_mif_volt = exynos5_mif_volt_for533;
	} else if (cdrexfreq == 400000) {
		clkdiv_cdrex = clkdiv_cdrex_for400;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for400;
		exynos5_mif_volt = exynos5_mif_volt_for400;
	} else {
		dev_err(dev, "Don't support cdrex table\n");
		return -EINVAL;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_LEX);

	for (i = LV_0; i < LV_INT_END; i++) {
		tmp &= ~(EXYNOS5_CLKDIV_LEX_ATCLK_LEX_MASK | EXYNOS5_CLKDIV_LEX_PCLK_LEX_MASK);

		tmp |= ((clkdiv_lex[i][0] << EXYNOS5_CLKDIV_LEX_ATCLK_LEX_SHIFT) |
			(clkdiv_lex[i][1] << EXYNOS5_CLKDIV_LEX_PCLK_LEX_SHIFT));

		data->lex_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_R0X);

	for (i = LV_0; i < LV_INT_END; i++) {

		tmp &= ~EXYNOS5_CLKDIV_R0X_PCLK_R0X_MASK;

		tmp |= (clkdiv_r0x[i][0] << EXYNOS5_CLKDIV_R0X_PCLK_R0X_SHIFT);

		data->r0x_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_R1X);

	for (i = LV_0; i < LV_INT_END; i++) {
		tmp &= ~EXYNOS5_CLKDIV_R1X_PCLK_R1X_MASK;

		tmp |= (clkdiv_r1x[i][0] << EXYNOS5_CLKDIV_R1X_PCLK_R1X_SHIFT);

		data->r1x_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX);

	if (samsung_rev() < EXYNOS5250_REV_1_0) {
		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK);

			tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) |
				(clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) |
				(clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][5] << EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_SHIFT) |
				(clkdiv_cdrex[i][6] << EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_SHIFT) |
				(clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT));

				data->cdrex_divtable[i] = tmp;
		}
	} else {
		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK);

			tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) |
				(clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) |
				(clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT));

				data->cdrex_divtable[i] = tmp;
		}
	}

	if (samsung_rev() < EXYNOS5250_REV_1_0) {
		tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX2);

		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_MASK;

			tmp |= clkdiv_cdrex[i][7] << EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_SHIFT;

			data->cdrex2_divtable[i] = tmp;

		}
	}

	exynos5250_set_bus_volt();

	data->dev[PPMU_MIF] = dev;
	data->dev[PPMU_INT] = &busfreq_for_int;

	for (i = LV_0; i < LV_MIF_END; i++) {
		ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk,
				exynos5_busfreq_table_mif[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

#if defined(CONFIG_DP_60HZ_P11) || defined(CONFIG_DP_60HZ_P10)
	if (cdrexfreq == 666857) {
		opp_disable(data->dev[PPMU_MIF], 334000);
		opp_disable(data->dev[PPMU_MIF], 110000);
	} else if (cdrexfreq == 533000) {
		opp_disable(data->dev[PPMU_MIF], 267000);
		opp_disable(data->dev[PPMU_MIF], 107000);
	} else if (cdrexfreq == 400000) {
		opp_disable(data->dev[PPMU_MIF], 267000);
		opp_disable(data->dev[PPMU_MIF], 100000);
	}
#endif

	for (i = LV_0; i < LV_INT_END; i++) {
		ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk,
				exynos5_busfreq_table_int[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	data->target = exynos5250_target;
	data->get_table_index = exynos5250_get_table_index;
	data->monitor = exynos5250_monitor;
	data->busfreq_suspend = exynos5250_suspend;
	data->busfreq_resume = exynos5250_resume;
	data->sampling_rate = usecs_to_jiffies(100000);

	data->table[PPMU_MIF] = exynos5_busfreq_table_mif;
	data->table[PPMU_INT] = exynos5_busfreq_table_int;

	/* Find max frequency for mif */
	data->max_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq));
	data->min_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq));
	data->curr_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq));
	/* Find max frequency for int */
	maxfreq = ULONG_MAX;
	minfreq = 0;
	data->max_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq));
	data->min_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq));
	data->curr_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq));

	data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int");
	if (IS_ERR(data->vdd_reg[PPMU_INT])) {
		pr_err("failed to get resource %s\n", "vdd_int");
		return -ENODEV;
	}

	data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif");
	if (IS_ERR(data->vdd_reg[PPMU_MIF])) {
		pr_err("failed to get resource %s\n", "vdd_mif");
		regulator_put(data->vdd_reg[PPMU_INT]);
		return -ENODEV;
	}

        data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	/* Request min 300MHz for MIF and 150MHz for  INT*/
	dev_lock(dev, dev, 300150);

	register_early_suspend(&data->busfreq_early_suspend_handler);

	tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL);
	tmp &= ~(0x1f | (1 << 31) | (1 << 7));
	tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7));
	__raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL);

	return 0;
}
int exynos5250_init(struct device *dev, struct busfreq_data *data)
{
	unsigned int i;
	unsigned long maxfreq = ULONG_MAX;
	unsigned long minfreq = 0;
	unsigned long cdrexfreq;
	unsigned long lrbusfreq;
	struct clk *clk;
	int ret;

	/* Enable pause function for DREX2 DVFS */
	dmc_pause_ctrl = __raw_readl(EXYNOS5_DMC_PAUSE_CTRL);
	dmc_pause_ctrl |= DMC_PAUSE_ENABLE;
	__raw_writel(dmc_pause_ctrl, EXYNOS5_DMC_PAUSE_CTRL);

	clk = clk_get(NULL, "mout_cdrex");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get mclk_cdrex clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	cdrexfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	clk = clk_get(NULL, "aclk_266");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get aclk_266 clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	lrbusfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	if (cdrexfreq == 800000) {
		clkdiv_cdrex = clkdiv_cdrex_for800;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for800;
		exynos5_mif_volt = exynos5_mif_volt_for800;
	} else if (cdrexfreq == 666857) {
		clkdiv_cdrex = clkdiv_cdrex_for667;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for667;
		exynos5_mif_volt = exynos5_mif_volt_for667;
	} else if (cdrexfreq == 533000) {
		clkdiv_cdrex = clkdiv_cdrex_for533;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for533;
		exynos5_mif_volt = exynos5_mif_volt_for533;
	} else if (cdrexfreq == 400000) {
		clkdiv_cdrex = clkdiv_cdrex_for400;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for400;
		exynos5_mif_volt = exynos5_mif_volt_for400;
	} else {
		dev_err(dev, "Don't support cdrex table\n");
		return -EINVAL;
	}

	exynos5250_set_bus_volt();

	data->dev[PPMU_MIF] = dev;
	data->dev[PPMU_INT] = &busfreq_for_int;

	for (i = LV_0; i < LV_MIF_END; i++) {
		ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk,
				exynos5_busfreq_table_mif[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	opp_disable(data->dev[PPMU_MIF], 107000);

	for (i = LV_0; i < LV_INT_END; i++) {
		ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk,
				exynos5_busfreq_table_int[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	data->target = exynos5250_target;
	data->get_table_index = exynos5250_get_table_index;
	data->monitor = exynos5250_monitor;
	data->busfreq_suspend = exynos5250_suspend;
	data->busfreq_resume = exynos5250_resume;
	data->sampling_rate = usecs_to_jiffies(100000);

	data->table[PPMU_MIF] = exynos5_busfreq_table_mif;
	data->table[PPMU_INT] = exynos5_busfreq_table_int;

	/* Find max frequency for mif */
	data->max_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq));
	data->min_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq));
	data->curr_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq));
	/* Find max frequency for int */
	maxfreq = ULONG_MAX;
	minfreq = 0;
	data->max_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq));
	data->min_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq));
	data->curr_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq));

	data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int");
	if (IS_ERR(data->vdd_reg[PPMU_INT])) {
		pr_err("failed to get resource %s\n", "vdd_int");
		return -ENODEV;
	}

	data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif");
	if (IS_ERR(data->vdd_reg[PPMU_MIF])) {
		pr_err("failed to get resource %s\n", "vdd_mif");
		regulator_put(data->vdd_reg[PPMU_INT]);
		return -ENODEV;
	}

        data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	/* Request min 300MHz */
	dev_lock(dev, dev, 300000);

	register_early_suspend(&data->busfreq_early_suspend_handler);

	tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL);
	tmp &= ~(0x1f | (1 << 31) | (1 << 7));
	tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7));
	__raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL);

	return 0;
}
static int exynos7_devfreq_disp_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_disp *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_disp = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(DISP) : Invalid OPP to find\n");
		return PTR_ERR(target_opp);
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
	rcu_read_unlock();

	target_idx = devfreq_get_opp_idx(devfreq_disp_opp_list, data->max_state,
						*target_freq);
	old_idx = devfreq_get_opp_idx(devfreq_disp_opp_list, data->max_state,
						devfreq_disp->previous_freq);
	old_freq = devfreq_disp->previous_freq;

	if (target_idx < 0 || old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset, 0);
#endif

	pr_debug("DISP LV_%d(%lu) ================> LV_%d(%lu, volt: %lu)\n",
			old_idx, old_freq, target_idx, *target_freq, target_volt);

	exynos_ss_freq(ESS_FLAG_DISP, old_freq, ESS_FLAG_IN);
	if (old_freq < *target_freq) {
		if (data->disp_set_volt)
			data->disp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT);
		if (data->disp_set_freq)
			data->disp_set_freq(data, target_idx, old_idx);
	} else {
		if (data->disp_set_freq)
			data->disp_set_freq(data, target_idx, old_idx);
		if (data->disp_set_volt)
			data->disp_set_volt(data, target_volt, REGULATOR_MAX_MICROVOLT);
	}
	exynos_ss_freq(ESS_FLAG_DISP, *target_freq, ESS_FLAG_OUT);
	data->cur_freq = *target_freq;
out:
	mutex_unlock(&data->lock);

	return ret;
}
예제 #12
0
	data->curr_opp = opp;

out:
	update_busfreq_stat(data, index);
	mutex_unlock(&busfreq_lock);
	queue_delayed_work(system_freezable_wq, &data->worker, data->sampling_rate);
}

static int exynos_buspm_notifier_event(struct notifier_block *this,
		unsigned long event, void *ptr)
{
	struct busfreq_data *data = container_of(this, struct busfreq_data,
			exynos_buspm_notifier);

	unsigned long voltage = opp_get_voltage(data->max_opp);
	unsigned long freq = opp_get_freq(data->max_opp);
	unsigned int index = 0;

	switch (event) {
	case PM_SUSPEND_PREPARE:
		mutex_lock(&busfreq_lock);
		data->use = false;
		regulator_set_voltage(data->vdd_mif, voltage, voltage + 25000);
		voltage = data->get_int_volt(freq);
		regulator_set_voltage(data->vdd_int, voltage, voltage + 25000);
		index = data->get_table_index(data->max_opp);
		if (data->busfreq_prepare)
			data->busfreq_prepare(index);
		data->target(index);
		data->curr_opp = data->max_opp;
		mutex_unlock(&busfreq_lock);
static int exynos5_devfreq_int_target(struct device *dev,
					unsigned long *target_freq,
					u32 flags)
{
	int ret = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
	struct devfreq_data_int *data = platform_get_drvdata(pdev);
	struct devfreq *devfreq_int = data->devfreq;
	struct opp *target_opp;
	int target_idx, old_idx;
	unsigned long target_volt;
	unsigned long old_freq;

	mutex_lock(&data->lock);

	rcu_read_lock();
	target_opp = devfreq_recommended_opp(dev, target_freq, flags);
	if (IS_ERR(target_opp)) {
		rcu_read_unlock();
		mutex_unlock(&data->lock);
		dev_err(dev, "DEVFREQ(INT) : Invalid OPP to find\n");
		ret = PTR_ERR(target_opp);
		goto out;
	}

	*target_freq = opp_get_freq(target_opp);
	target_volt = opp_get_voltage(target_opp);
#ifdef CONFIG_EXYNOS_THERMAL
	target_volt = get_limit_voltage(target_volt, data->volt_offset);
#endif
	/* just want to save voltage before apply constraint with isp */
	data->target_volt = target_volt;
	if (target_volt < data->volt_constraint_isp)
		target_volt = data->volt_constraint_isp;
	rcu_read_unlock();

	target_idx = exynos5_devfreq_get_idx(devfreq_int_opp_list, data->max_state,
						*target_freq);
	old_idx = exynos5_devfreq_get_idx(devfreq_int_opp_list, data->max_state,
						devfreq_int->previous_freq);

	old_freq = devfreq_int->previous_freq;

	if (target_idx < 0 || old_idx < 0) {
		ret = -EINVAL;
		goto out;
	}

	if (old_freq == *target_freq)
		goto out;

	pr_debug("INT %lu ===================> %lu\n", old_freq, *target_freq);

	if (old_freq < *target_freq) {
		if (data->int_set_volt)
			data->int_set_volt(data, target_volt, target_volt + VOLT_STEP);
		set_match_abb(ID_INT, data->int_asv_abb_table[target_idx]);
		if (data->int_set_freq)
			data->int_set_freq(data, target_idx, old_idx);
	} else {
		if (data->int_set_freq)
			data->int_set_freq(data, target_idx, old_idx);
		set_match_abb(ID_INT, data->int_asv_abb_table[target_idx]);
		if (data->int_set_volt)
			data->int_set_volt(data, target_volt, target_volt + VOLT_STEP);
	}
out:
	mutex_unlock(&data->lock);

	return ret;
}
static int mali_kbase_devfreq_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	struct kbase_device *kbdev = (struct kbase_device *)dev->platform_data;
	unsigned long old_freq = kbdev->devfreq->previous_freq;
	struct opp *opp = NULL;
	unsigned long freq;
#if KBASE_HI3635_GPU_IRDROP_ISSUE
	struct kbase_pm_policy *cur_policy;
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		printk("[mali-midgard]  Failed to get Operating Performance Point\n");
		rcu_read_unlock();
		return PTR_ERR(opp);
	}
	freq = opp_get_freq(opp);
	rcu_read_unlock();

	if (old_freq == freq)
		return 0;

#if KBASE_HI3635_GPU_IRDROP_ISSUE
	/* switch policy to always_on */
	if(old_freq <= KBASE_HI3635_GPU_TURBO_FREQ && freq > KBASE_HI3635_GPU_TURBO_FREQ ) {
		sw_policy.freq = freq;
		strncpy(sw_policy.name, "always_on", strlen("always_on") + 1);
		schedule_work(&sw_policy.update);
		goto exit;
	}

	/* warn on work doesn't finish yet.*/
	cur_policy = kbase_pm_get_policy(kbdev);
	if (cur_policy == NULL || ((freq > KBASE_HI3635_GPU_TURBO_FREQ)
		&& strncmp(cur_policy->name, "always_on", strlen("always_on")))){
		WARN_ON(1);

		/* restore the freq */
		*_freq = old_freq;
		goto exit;
	}
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

	printk("[mali-midgard] dvfs call clk_set_rate, old_freq = %lu -----> new_freq = %lu \n", old_freq, freq);
	if (clk_set_rate((kbdev->clk), freq)) {
		printk("[mali-midgard]  Failed to set gpu freqency, [%lu->%lu]\n", old_freq, freq);
		return -ENODEV;
	}

#if KBASE_HI3635_GPU_IRDROP_ISSUE
	if(old_freq > KBASE_HI3635_GPU_TURBO_FREQ && freq <= KBASE_HI3635_GPU_TURBO_FREQ) {
		strncpy(sw_policy.name, "demand", strlen("demand") + 1);
		schedule_work(&sw_policy.update);
	}
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */

exit:
	return 0;
}