Esempio n. 1
0
void df_rgx_init_available_freq_table(struct device *dev)
{
	int i = 0;
	if (!is_tng_a0) {
		for(i = 0; i < NUMBER_OF_LEVELS_B0; i++)
			opp_add(dev, a_available_state_freq[i].freq, voltage_gfx);
	} else {
		for(i = 0; i < NUMBER_OF_LEVELS; i++)
			opp_add(dev, a_available_state_freq[i].freq, voltage_gfx);
	}
}
Esempio n. 2
0
static int exynos5_init_isp_table(struct device *dev, struct devfreq_data_isp *data)

{
	unsigned int i;
	unsigned int ret;
	unsigned int freq;
	unsigned int volt;

	for (i = 0; i < data->max_state; ++i) {
		freq = devfreq_isp_opp_list[i].freq;
		volt = get_match_volt(ID_ISP, freq);

		if (!volt)
			volt = devfreq_isp_opp_list[i].volt;

		exynos5_devfreq_isp_profile.freq_table[i] = freq;

		ret = opp_add(dev, freq, volt);
		if (ret) {
			pr_err("DEVFREQ(ISP) : Failed to add opp entries %uKhz, %uV\n", freq, volt);
			return ret;
		} else {
			pr_info("DEVFREQ(ISP) : %uKhz, %uV\n", freq, volt);
		}
	}

	return 0;
}
static int exynos7_init_disp_table(struct device *dev, struct devfreq_data_disp *data)
{
	unsigned int i;
	unsigned int ret;
	unsigned int freq;
	unsigned int volt;

	for (i = 0; i < data->max_state; ++i) {
		freq = devfreq_disp_opp_list[i].freq;
		volt = get_match_volt(ID_ISP, freq);

		if (!volt)
			volt = devfreq_disp_opp_list[i].volt;

		devfreq_disp_opp_list[i].volt = volt;
		exynos7_devfreq_disp_profile.freq_table[i] = freq;

		ret = opp_add(dev, freq, volt);
		if (ret) {
			pr_err("DEVFREQ(DISP) : Failed to add opp entries %uKhz, %uuV\n", freq, volt);
			return ret;
		} else {
			pr_info("DEVFREQ(DISP) : %7uKhz, %7uuV\n", freq, volt);
		}
	}
	opp_disable(dev, devfreq_disp_opp_list[0].freq);
	opp_disable(dev, devfreq_disp_opp_list[1].freq);

	data->volt_of_avail_max_freq = get_volt_of_avail_max_freq(dev);
	pr_info("DEVFREQ(DISP) : voltage of available max freq : %7uuV\n",
			data->volt_of_avail_max_freq);

	return 0;
}
static int exynos5_init_int_table(struct device *dev,
				struct devfreq_data_int *data)
{
	unsigned int i;
	unsigned int ret;
	unsigned int freq;
	unsigned int volt;

	exynos5_devfreq_int_profile.max_state = data->max_state;
	data->int_asv_abb_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL);

	for (i = 0; i < data->max_state; ++i) {
		freq = devfreq_int_opp_list[i].freq;
		volt = get_match_volt(ID_INT, freq);
		if (!volt)
			volt = devfreq_int_opp_list[i].volt;

		exynos5_devfreq_int_profile.freq_table[i] = freq;
		devfreq_int_opp_list[i].volt = volt;

		ret = opp_add(dev, freq, volt);
		if (ret) {
			pr_err("DEVFREQ(INT) : Failed to add opp entries %uKhz, %uV\n", freq, volt);
			return ret;
		} else {
			pr_info("DEVFREQ(INT) : %uKhz, %uV\n", freq, volt);
		}

		data->int_asv_abb_table[i] = get_match_abb(ID_INT, freq);

		pr_info("DEVFREQ(INT) : %uKhz, ABB %u\n", freq, data->int_asv_abb_table[i]);
	}

	return 0;
}
static int exynos5250_init_int_tables(struct busfreq_data_int *data)
{
	int i, err = 0;

	asv_group_index = exynos_result_of_asv;

	if (asv_group_index == 0xff) {
		asv_group_index = 0;
	}

	for (i = LV_0; i < LV_4; i++) {
		if (exynos_lot_is_nzvpu)
			exynos5_int_opp_table[i].volt = 1025000;
		else if (exynos_lot_id)
			exynos5_int_opp_table[i].volt =
				exynos5_int_volt_orig[asv_group_index][i];
		else
			exynos5_int_opp_table[i].volt =
				exynos5_int_volt[asv_group_index][i];
	}

	printk(KERN_INFO "VDD_INT Voltage table set with %d Group\n",
				asv_group_index);

	for (i = LV_0; i < LV_2; i++) {
		err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
				exynos5_int_opp_table[i].volt);
		if (err) {
			dev_err(data->dev, "Cannot add opp entries.\n");
			return err;
		}
	}

	return 0;
}
void df_rgx_init_available_freq_table(struct device *dev)
{
	int i = 0;
	int n_states = sku_levels();

	for (i = 0; i < n_states; i++)
		opp_add(dev, a_available_state_freq[i].freq, voltage_gfx);
}
static int exynos5250_init_int_tables(struct busfreq_data_int *data)
{
	int i, err = 0;

	for (i = LV_0; i < _LV_END; i++) {
		err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
				exynos5_int_opp_table[i].volt);
		if (err) {
			dev_err(data->dev, "Cannot add opp entries.\n");
			return err;
		}
	}

	return 0;
}
Esempio n. 8
0
static int vexpress_init_opp_table(struct device *cpu_dev)
{
	int i = -1, count, cluster = cpu_to_cluster(cpu_dev->id);
	u32 *table;
	int ret;

	count = vexpress_spc_get_freq_table(cluster, &table);
	if (!table || !count) {
		pr_err("SPC controller returned invalid freq table");
		return -EINVAL;
	}

	while (++i < count) {
		/* FIXME: Voltage value */
		ret = opp_add(cpu_dev, table[i] * 1000, 900000);
		if (ret) {
			dev_warn(cpu_dev, "%s: Failed to add OPP %d, err: %d\n",
				 __func__, table[i] * 1000, ret);
			return ret;
		}
	}

	return 0;
}
/**
 * omap_init_opp_table() - Initialize opp table as per the CPU type
 * @opp_def:		opp default list for this silicon
 * @opp_def_size:	number of opp entries for this silicon
 *
 * Register the initial OPP table with the OPP library based on the CPU
 * type. This is meant to be used only by SoC specific registration.
 */
int __init omap_init_opp_table(struct omap_opp_def *opp_def,
		u32 opp_def_size)
{
	int i, r;
	struct clk *clk;
	long round_rate;

	if (!opp_def || !opp_def_size) {
		pr_err("%s: invalid params!\n", __func__);
		return -EINVAL;
	}

	/*
	 * Initialize only if not already initialized even if the previous
	 * call failed, because, no reason we'd succeed again.
	 */
	if (omap_table_init)
		return -EEXIST;
	omap_table_init = 1;

	/* Lets now register with OPP library */
	for (i = 0; i < opp_def_size; i++, opp_def++) {
		struct omap_hwmod *oh;
		struct device *dev;

		if (!opp_def->hwmod_name) {
			WARN(1, "%s: NULL name of omap_hwmod, failing"
				" [%d].\n", __func__, i);
			return -EINVAL;
		}
		oh = omap_hwmod_lookup(opp_def->hwmod_name);
		if (!oh || !oh->od) {
			WARN(1, "%s: no hwmod or odev for %s, [%d] "
				"cannot add OPPs.\n", __func__,
				opp_def->hwmod_name, i);
			return -EINVAL;
		}
		dev = &oh->od->pdev.dev;

		clk = omap_clk_get_by_name(opp_def->clk_name);
		if (clk) {
			round_rate = clk_round_rate(clk, opp_def->freq);
			if (round_rate > 0) {
				opp_def->freq = round_rate;
			} else {
				WARN(1, "%s: round_rate for clock %s failed\n",
					__func__, opp_def->clk_name);
				return -EINVAL; /* skip Bad OPP */
			}
		} else {
			WARN(1, "%s: No clock by name %s found\n", __func__,
				opp_def->clk_name);
			return -EINVAL; /* skip Bad OPP */
		}
		r = opp_add(dev, opp_def->freq, opp_def->u_volt);
		if (r) {
			dev_err(dev, "%s: add OPP %ld failed for %s [%d] "
				"result=%d\n",
			       __func__, opp_def->freq,
			       opp_def->hwmod_name, i, r);
		} else {
			if (!opp_def->default_available)
				r = opp_disable(dev, opp_def->freq);
			if (r)
				dev_err(dev, "%s: disable %ld failed for %s "
					"[%d] result=%d\n",
					__func__, opp_def->freq,
					opp_def->hwmod_name, i, r);

			r  = omap_dvfs_register_device(dev,
				opp_def->voltdm_name, opp_def->clk_name);
			if (r)
				dev_err(dev, "%s:%s:err dvfs register %d %d\n",
					__func__, opp_def->hwmod_name, r, i);
		}
	}

	return 0;
}
Esempio n. 10
0
int __init omap4_pm_init_opp_table(void)
{
    struct omap_opp_def *opp_def;
    struct device *dev;
    struct clk *gpu_fclk;
    int i, r;

    /*
     * Allow multiple calls, but initialize only if not already initalized
     * even if the previous call failed, coz, no reason we'd succeed again
     */
    if (omap4_table_init)
        return 0;
    omap4_table_init = 1;

    if (omap_rev() <= OMAP4430_REV_ES2_0)
        opp_def = omap44xx_pre_es2_1_opp_def_list;
    else
        opp_def = omap44xx_opp_def_list;

    for (i = 0; i < omap44xx_opp_def_size; i++) {
        r = opp_add(opp_def);
        if (r)
            pr_err("unable to add OPP %ld Hz for %s\n",
                   opp_def->freq, opp_def->hwmod_name);
        opp_def++;
    }

    dpll_mpu_clk = clk_get(NULL, "dpll_mpu_ck");
    iva_clk = clk_get(NULL, "dpll_iva_m5x2_ck");
    dsp_clk = clk_get(NULL, "dpll_iva_m4x2_ck");
    l3_clk = clk_get(NULL, "dpll_core_m5x2_ck");
    core_m2_clk = clk_get(NULL, "dpll_core_m2_ck");
    core_m3_clk = clk_get(NULL, "dpll_core_m3x2_ck");
    core_m6_clk = clk_get(NULL, "dpll_core_m6x2_ck");
    core_m7_clk = clk_get(NULL, "dpll_core_m7x2_ck");
    sgx_clk = clk_get(NULL, "dpll_per_m7x2_ck");
    gpu_fclk = clk_get(NULL, "gpu_fck");
    per_m3_clk = clk_get(NULL, "dpll_per_m3x2_ck");
    per_m6_clk = clk_get(NULL, "dpll_per_m6x2_ck");
    abe_clk = clk_get(NULL, "abe_clk");
    fdif_clk = clk_get(NULL, "fdif_fck");
    hsi_clk = clk_get(NULL, "hsi_fck");

    /* Set SGX parent to PER DPLL */
    clk_set_parent(gpu_fclk, sgx_clk);
    clk_put(gpu_fclk);

    /* Populate the set rate and get rate for mpu, iva, dsp and l3 device */
    dev = omap2_get_mpuss_device();
    if (dev)
        opp_populate_rate_fns(dev, omap4_mpu_set_rate,
                              omap4_mpu_get_rate);

    dev = omap2_get_iva_device();
    if (dev)
        opp_populate_rate_fns(dev, omap4_iva_set_rate,
                              omap4_iva_get_rate);

    dev = omap4_get_dsp_device();
    if (dev)
        opp_populate_rate_fns(dev, omap4_iva_set_rate,
                              omap4_iva_get_rate);

    dev = omap2_get_l3_device();
    if (dev)
        opp_populate_rate_fns(dev, omap4_l3_set_rate,
                              omap4_l3_get_rate);

    /*
     * This is a temporary hack since emif clocks cannot be scaled
     * on ES1.0 and ES2.0. Once everybody has migrated to ES2.1 this
     * check can be remove.
     */
    if (omap_rev() > OMAP4430_REV_ES2_0) {
        dev = find_dev_ptr("emif1");
        if (dev)
            opp_populate_rate_fns(dev, omap4_emif_set_rate,
                                  omap4_emif_get_rate);

        dev = find_dev_ptr("emif2");
        if (dev)
            opp_populate_rate_fns(dev, omap4_emif_set_rate,
                                  omap4_emif_get_rate);
    }

    dev = find_dev_ptr("omap-aess-audio");
    if (dev)
        opp_populate_rate_fns(dev, omap4_abe_set_rate,
                              omap4_abe_get_rate);

    dev = find_dev_ptr("gpu");
    if (dev)
        opp_populate_rate_fns(dev, omap4_sgx_set_rate,
                              omap4_sgx_get_rate);

    dev = find_dev_ptr("fdif");
    if (dev)
        opp_populate_rate_fns(dev, omap4_fdif_set_rate,
                              omap4_fdif_get_rate);

    dev = find_dev_ptr("hsi");
    if (dev)
        opp_populate_rate_fns(dev, omap4_hsi_set_rate,
                              omap4_hsi_get_rate);

    return 0;
}
Esempio n. 11
0
int g3_display_probe(struct platform_device *pdev){
	struct g3_display_data *data;
	struct device *dev = &pdev->dev;
	int ret = 0;
	struct opp *opp;
	int i;

	data = kzalloc(sizeof(struct g3_display_data), GFP_KERNEL);
	if(!data){
		dev_err(dev, "cannot_allocate memory.\n");
		return -ENOMEM;
	}
	data->dev = dev;
	mutex_init(&data->lock);

	/* register opp entries */
	for(i=0; i<_LV_END_; i++){
		ret = opp_add(dev, g3_display_opp_table[i].freq,
				g3_display_opp_table[i].volt);
		if(ret){
			dev_err(dev, "cannot add opp entries.\n");
			goto err_alloc_mem;
		}
	}

	/* find opp entry with init frequency */

	opp = opp_find_freq_floor(dev, &g3_display_profile.initial_freq);
	if(IS_ERR(opp)){
		dev_err(dev, "invalid initial frequency %lu.\n",
				g3_display_profile.initial_freq);
		ret = PTR_ERR(opp);
		goto err_alloc_mem;
	}
	data->curr_opp = opp;

	/* initialize qos */
	// TODO

	/* register g3_display to devfreq framework */
	data->devfreq = devfreq_add_device(dev, &g3_display_profile,
			"simple_ondemand", &g3_display_ondemand_data);
	if(IS_ERR(data->devfreq)){
		ret = PTR_ERR(data->devfreq);
		dev_err(dev, "failed to add devfreq: %d\n", ret);
		goto err_alloc_mem;
	}

	devfreq_register_opp_notifier(dev, data->devfreq);

	/* register g3_display as client to pm notifier */
	memset(&data->nb_pm, 0, sizeof(data->nb_pm));
	data->nb_pm.notifier_call = g3_display_pm_notifier_callback;
	ret = register_pm_notifier(&data->nb_pm);
	if(ret < 0){
		dev_err(dev, "failed to get pm notifier: %d\n", ret);
		goto err_add_devfreq;
	}

	platform_set_drvdata(pdev, data);

	return 0;
err_add_devfreq:
	devfreq_remove_device(data->devfreq);
err_alloc_mem:
	kfree(data);
	return ret;
}
Esempio n. 12
0
int exynos5250_init(struct device *dev, struct busfreq_data *data)
{
	unsigned int i, tmp;
	unsigned long maxfreq = ULONG_MAX;
	unsigned long minfreq = 0;
	unsigned long cdrexfreq;
	unsigned long lrbusfreq;
	struct clk *clk;
	int ret;

	/* Enable pause function for DREX2 DVFS */
	drex2_pause_ctrl = __raw_readl(EXYNOS5_DREX2_PAUSE);
	drex2_pause_ctrl |= DMC_PAUSE_ENABLE;
	__raw_writel(drex2_pause_ctrl, EXYNOS5_DREX2_PAUSE);

	clk = clk_get(NULL, "mclk_cdrex");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get mclk_cdrex clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	cdrexfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	clk = clk_get(NULL, "aclk_266");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get aclk_266 clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	lrbusfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	if (cdrexfreq == 800000) {
		clkdiv_cdrex = clkdiv_cdrex_for800;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for800;
		exynos5_mif_volt = exynos5_mif_volt_for800;
	} else if (cdrexfreq == 666857) {
		clkdiv_cdrex = clkdiv_cdrex_for667;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for667;
		exynos5_mif_volt = exynos5_mif_volt_for667;
	} else if (cdrexfreq == 533000) {
		clkdiv_cdrex = clkdiv_cdrex_for533;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for533;
		exynos5_mif_volt = exynos5_mif_volt_for533;
	} else if (cdrexfreq == 400000) {
		clkdiv_cdrex = clkdiv_cdrex_for400;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for400;
		exynos5_mif_volt = exynos5_mif_volt_for400;
	} else {
		dev_err(dev, "Don't support cdrex table\n");
		return -EINVAL;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_LEX);

	for (i = LV_0; i < LV_INT_END; i++) {
		tmp &= ~(EXYNOS5_CLKDIV_LEX_ATCLK_LEX_MASK | EXYNOS5_CLKDIV_LEX_PCLK_LEX_MASK);

		tmp |= ((clkdiv_lex[i][0] << EXYNOS5_CLKDIV_LEX_ATCLK_LEX_SHIFT) |
			(clkdiv_lex[i][1] << EXYNOS5_CLKDIV_LEX_PCLK_LEX_SHIFT));

		data->lex_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_R0X);

	for (i = LV_0; i < LV_INT_END; i++) {

		tmp &= ~EXYNOS5_CLKDIV_R0X_PCLK_R0X_MASK;

		tmp |= (clkdiv_r0x[i][0] << EXYNOS5_CLKDIV_R0X_PCLK_R0X_SHIFT);

		data->r0x_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_R1X);

	for (i = LV_0; i < LV_INT_END; i++) {
		tmp &= ~EXYNOS5_CLKDIV_R1X_PCLK_R1X_MASK;

		tmp |= (clkdiv_r1x[i][0] << EXYNOS5_CLKDIV_R1X_PCLK_R1X_SHIFT);

		data->r1x_divtable[i] = tmp;
	}

	tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX);

	if (samsung_rev() < EXYNOS5250_REV_1_0) {
		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK);

			tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) |
				(clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) |
				(clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][5] << EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_SHIFT) |
				(clkdiv_cdrex[i][6] << EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_SHIFT) |
				(clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT));

				data->cdrex_divtable[i] = tmp;
		}
	} else {
		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK |
				 EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK);

			tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) |
				(clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) |
				(clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) |
				(clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT));

				data->cdrex_divtable[i] = tmp;
		}
	}

	if (samsung_rev() < EXYNOS5250_REV_1_0) {
		tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX2);

		for (i = LV_0; i < LV_MIF_END; i++) {
			tmp &= ~EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_MASK;

			tmp |= clkdiv_cdrex[i][7] << EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_SHIFT;

			data->cdrex2_divtable[i] = tmp;

		}
	}

	exynos5250_set_bus_volt();

	data->dev[PPMU_MIF] = dev;
	data->dev[PPMU_INT] = &busfreq_for_int;

	for (i = LV_0; i < LV_MIF_END; i++) {
		ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk,
				exynos5_busfreq_table_mif[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

#if defined(CONFIG_DP_60HZ_P11) || defined(CONFIG_DP_60HZ_P10)
	if (cdrexfreq == 666857) {
		opp_disable(data->dev[PPMU_MIF], 334000);
		opp_disable(data->dev[PPMU_MIF], 110000);
	} else if (cdrexfreq == 533000) {
		opp_disable(data->dev[PPMU_MIF], 267000);
		opp_disable(data->dev[PPMU_MIF], 107000);
	} else if (cdrexfreq == 400000) {
		opp_disable(data->dev[PPMU_MIF], 267000);
		opp_disable(data->dev[PPMU_MIF], 100000);
	}
#endif

	for (i = LV_0; i < LV_INT_END; i++) {
		ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk,
				exynos5_busfreq_table_int[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	data->target = exynos5250_target;
	data->get_table_index = exynos5250_get_table_index;
	data->monitor = exynos5250_monitor;
	data->busfreq_suspend = exynos5250_suspend;
	data->busfreq_resume = exynos5250_resume;
	data->sampling_rate = usecs_to_jiffies(100000);

	data->table[PPMU_MIF] = exynos5_busfreq_table_mif;
	data->table[PPMU_INT] = exynos5_busfreq_table_int;

	/* Find max frequency for mif */
	data->max_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq));
	data->min_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq));
	data->curr_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq));
	/* Find max frequency for int */
	maxfreq = ULONG_MAX;
	minfreq = 0;
	data->max_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq));
	data->min_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq));
	data->curr_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq));

	data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int");
	if (IS_ERR(data->vdd_reg[PPMU_INT])) {
		pr_err("failed to get resource %s\n", "vdd_int");
		return -ENODEV;
	}

	data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif");
	if (IS_ERR(data->vdd_reg[PPMU_MIF])) {
		pr_err("failed to get resource %s\n", "vdd_mif");
		regulator_put(data->vdd_reg[PPMU_INT]);
		return -ENODEV;
	}

        data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	/* Request min 300MHz for MIF and 150MHz for  INT*/
	dev_lock(dev, dev, 300150);

	register_early_suspend(&data->busfreq_early_suspend_handler);

	tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL);
	tmp &= ~(0x1f | (1 << 31) | (1 << 7));
	tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7));
	__raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL);

	return 0;
}
int exynos5250_init(struct device *dev, struct busfreq_data *data)
{
	unsigned int i;
	unsigned long maxfreq = ULONG_MAX;
	unsigned long minfreq = 0;
	unsigned long cdrexfreq;
	unsigned long lrbusfreq;
	struct clk *clk;
	int ret;

	/* Enable pause function for DREX2 DVFS */
	dmc_pause_ctrl = __raw_readl(EXYNOS5_DMC_PAUSE_CTRL);
	dmc_pause_ctrl |= DMC_PAUSE_ENABLE;
	__raw_writel(dmc_pause_ctrl, EXYNOS5_DMC_PAUSE_CTRL);

	clk = clk_get(NULL, "mout_cdrex");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get mclk_cdrex clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	cdrexfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	clk = clk_get(NULL, "aclk_266");
	if (IS_ERR(clk)) {
		dev_err(dev, "Fail to get aclk_266 clock");
		ret = PTR_ERR(clk);
		return ret;
	}
	lrbusfreq = clk_get_rate(clk) / 1000;
	clk_put(clk);

	if (cdrexfreq == 800000) {
		clkdiv_cdrex = clkdiv_cdrex_for800;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for800;
		exynos5_mif_volt = exynos5_mif_volt_for800;
	} else if (cdrexfreq == 666857) {
		clkdiv_cdrex = clkdiv_cdrex_for667;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for667;
		exynos5_mif_volt = exynos5_mif_volt_for667;
	} else if (cdrexfreq == 533000) {
		clkdiv_cdrex = clkdiv_cdrex_for533;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for533;
		exynos5_mif_volt = exynos5_mif_volt_for533;
	} else if (cdrexfreq == 400000) {
		clkdiv_cdrex = clkdiv_cdrex_for400;
		exynos5_busfreq_table_mif = exynos5_busfreq_table_for400;
		exynos5_mif_volt = exynos5_mif_volt_for400;
	} else {
		dev_err(dev, "Don't support cdrex table\n");
		return -EINVAL;
	}

	exynos5250_set_bus_volt();

	data->dev[PPMU_MIF] = dev;
	data->dev[PPMU_INT] = &busfreq_for_int;

	for (i = LV_0; i < LV_MIF_END; i++) {
		ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk,
				exynos5_busfreq_table_mif[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	opp_disable(data->dev[PPMU_MIF], 107000);

	for (i = LV_0; i < LV_INT_END; i++) {
		ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk,
				exynos5_busfreq_table_int[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	data->target = exynos5250_target;
	data->get_table_index = exynos5250_get_table_index;
	data->monitor = exynos5250_monitor;
	data->busfreq_suspend = exynos5250_suspend;
	data->busfreq_resume = exynos5250_resume;
	data->sampling_rate = usecs_to_jiffies(100000);

	data->table[PPMU_MIF] = exynos5_busfreq_table_mif;
	data->table[PPMU_INT] = exynos5_busfreq_table_int;

	/* Find max frequency for mif */
	data->max_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq));
	data->min_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq));
	data->curr_freq[PPMU_MIF] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq));
	/* Find max frequency for int */
	maxfreq = ULONG_MAX;
	minfreq = 0;
	data->max_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq));
	data->min_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq));
	data->curr_freq[PPMU_INT] =
			opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq));

	data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int");
	if (IS_ERR(data->vdd_reg[PPMU_INT])) {
		pr_err("failed to get resource %s\n", "vdd_int");
		return -ENODEV;
	}

	data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif");
	if (IS_ERR(data->vdd_reg[PPMU_MIF])) {
		pr_err("failed to get resource %s\n", "vdd_mif");
		regulator_put(data->vdd_reg[PPMU_INT]);
		return -ENODEV;
	}

        data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend;
	data->busfreq_early_suspend_handler.resume = &busfreq_late_resume;

	/* Request min 300MHz */
	dev_lock(dev, dev, 300000);

	register_early_suspend(&data->busfreq_early_suspend_handler);

	tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL);
	tmp &= ~(0x1f | (1 << 31) | (1 << 7));
	tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7));
	__raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL);

	return 0;
}
Esempio n. 14
0
int __init omap4_pm_init_opp_table(void)
{
	struct omap_opp_def *opp_def;
	struct device *dev;
	struct clk *gpu_fclk;
	int i, r;
        #if defined (CONFIG_MACH_LGE_CX2)
	struct omap_opp *tnt_opp;
	int has_tnt_opp = 0;
	#endif

	/*
	 * Allow multiple calls, but initialize only if not already initalized
	 * even if the previous call failed, coz, no reason we'd succeed again
	 */
	if (omap4_table_init)
		return 0;
	omap4_table_init = 1;

	if (omap_rev() <= OMAP4430_REV_ES2_0)
		opp_def = omap44xx_pre_es2_1_opp_def_list;
	else
		opp_def = omap44xx_opp_def_list;

	for (i = 0; i < omap44xx_opp_def_size; i++) {
		r = opp_add(opp_def);
		if (r)
			pr_err("unable to add OPP %ld Hz for %s\n",
				opp_def->freq, opp_def->hwmod_name);
		opp_def++;
	}

	dpll_mpu_clk = clk_get(NULL, "dpll_mpu_ck");
	iva_clk = clk_get(NULL, "dpll_iva_m5x2_ck");
	dsp_clk = clk_get(NULL, "dpll_iva_m4x2_ck");
	l3_clk = clk_get(NULL, "dpll_core_m5x2_ck");
	core_m2_clk = clk_get(NULL, "dpll_core_m2_ck");
	core_m3_clk = clk_get(NULL, "dpll_core_m3x2_ck");
	core_m6_clk = clk_get(NULL, "dpll_core_m6x2_ck");
	core_m7_clk = clk_get(NULL, "dpll_core_m7x2_ck");
	sgx_clk = clk_get(NULL, "dpll_per_m7x2_ck");
	gpu_fclk = clk_get(NULL, "gpu_fck");
	per_m3_clk = clk_get(NULL, "dpll_per_m3x2_ck");
	per_m6_clk = clk_get(NULL, "dpll_per_m6x2_ck");
	abe_clk = clk_get(NULL, "abe_clk");
	fdif_clk = clk_get(NULL, "fdif_fck");
	hsi_clk = clk_get(NULL, "hsi_fck");

	/* Set SGX parent to PER DPLL */
	clk_set_parent(gpu_fclk, sgx_clk);
	clk_put(gpu_fclk);

	/* Populate the set rate and get rate for mpu, iva, dsp and l3 device */
	dev = omap2_get_mpuss_device();
	if (dev)
		opp_populate_rate_fns(dev, omap4_mpu_set_rate,
				omap4_mpu_get_rate);

#if defined (CONFIG_MACH_LGE_CX2)
	/* Enable 1.2Gz OPP for silicon that supports it
	 * TODO: determine if FUSE_OPP_VDD_MPU_3 is a reliable source to
	 * determine 1.2Gz availability.
	 */
	has_tnt_opp = __raw_readl(OMAP2_L4_IO_ADDRESS(CTRL_FUSE_OPP_VDD_MPU_3));
	has_tnt_opp &= 0xFFFFFF;

	if (has_tnt_opp) {
		tnt_opp = opp_find_freq_exact(dev, TNT_FREQ, false);
		if (IS_ERR(tnt_opp))
		{
			printk(KERN_ERR"[1.2GHz support Fail] %d\n",tnt_opp);
			pr_err("unable to find OPP for 1.2Gz\n");
		}
		else
		{
			printk(KERN_ERR"[1.2GHz support success] %d\n",tnt_opp);
			opp_enable(tnt_opp);
		}
	}
#endif


	dev = omap2_get_iva_device();
	if (dev)
		opp_populate_rate_fns(dev, omap4_iva_set_rate,
				omap4_iva_get_rate);

	dev = omap4_get_dsp_device();
	if (dev)
		opp_populate_rate_fns(dev, omap4_iva_set_rate,
				omap4_iva_get_rate);

	dev = omap2_get_l3_device();
	if (dev)
		opp_populate_rate_fns(dev, omap4_l3_set_rate,
				omap4_l3_get_rate);

	/*
	 * This is a temporary hack since emif clocks cannot be scaled
	 * on ES1.0 and ES2.0. Once everybody has migrated to ES2.1 this
	 * check can be remove.
	 */
	if (omap_rev() > OMAP4430_REV_ES2_0) {
		dev = find_dev_ptr("emif1");
		if (dev)
			opp_populate_rate_fns(dev, omap4_emif_set_rate,
					omap4_emif_get_rate);

		dev = find_dev_ptr("emif2");
		if (dev)
			opp_populate_rate_fns(dev, omap4_emif_set_rate,
					omap4_emif_get_rate);
	}

	dev = find_dev_ptr("omap-aess-audio");
	if (dev)
		opp_populate_rate_fns(dev, omap4_abe_set_rate,
				omap4_abe_get_rate);

	dev = find_dev_ptr("gpu");
	if (dev)
		opp_populate_rate_fns(dev, omap4_sgx_set_rate,
				omap4_sgx_get_rate);

	dev = find_dev_ptr("fdif");
	if (dev)
		opp_populate_rate_fns(dev, omap4_fdif_set_rate,
				omap4_fdif_get_rate);

	dev = find_dev_ptr("hsi");
	if (dev)
		opp_populate_rate_fns(dev, omap4_hsi_set_rate,
				omap4_hsi_get_rate);

	return 0;
}
int exynos4210_init(struct device *dev, struct busfreq_data *data)
{
	unsigned int i;
	unsigned int tmp;
	unsigned long maxfreq = UINT_MAX;
	int ret;

	tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);

	for (i = 0; i <  LV_END; i++) {
		tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
			EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
			EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
			EXYNOS4_CLKDIV_DMC0_DMC_MASK |
			EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
			EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
			EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
			EXYNOS4_CLKDIV_DMC0_CORETI_MASK);

		tmp |= ((clkdiv_dmc0[i][0] << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
			(clkdiv_dmc0[i][1] << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
			(clkdiv_dmc0[i][2] << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
			(clkdiv_dmc0[i][3] << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
			(clkdiv_dmc0[i][4] << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
			(clkdiv_dmc0[i][5] << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
			(clkdiv_dmc0[i][6] << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
			(clkdiv_dmc0[i][7] << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));

		exynos4_busfreq_table[i].clk_dmc0div = tmp;
	}

	tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);

	for (i = 0; i <  LV_END; i++) {
		tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
			EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
			EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
			EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
			EXYNOS4_CLKDIV_TOP_ONENAND_MASK);

		tmp |= ((clkdiv_top[i][0] << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
			(clkdiv_top[i][1] << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
			(clkdiv_top[i][2] << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
			(clkdiv_top[i][3] << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
			(clkdiv_top[i][4] << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));

		exynos4_busfreq_table[i].clk_topdiv = tmp;
	}

	exynos4210_set_bus_volt();

	for (i = 0; i < LV_END; i++) {
		ret = opp_add(dev, exynos4_busfreq_table[i].mem_clk,
				exynos4_busfreq_table[i].volt);
		if (ret) {
			dev_err(dev, "Fail to add opp entries.\n");
			return ret;
		}
	}

	data->table = exynos4_busfreq_table;
	data->table_size = LV_END;

	/* Find max frequency */
	data->max_opp = opp_find_freq_floor(dev, &maxfreq);

	data->vdd_int = regulator_get(NULL, "vdd_int");
	if (IS_ERR(data->vdd_int)) {
		pr_err("failed to get resource %s\n", "vdd_int");
		return -ENODEV;
	}

	data->vdd_mif = ERR_PTR(-ENODEV);

	return 0;
}
Esempio n. 16
0
/**
 * omap_opp_register() - Initialize opp table as per the CPU type
 * @dev: device registering for OPP
 * @hwmod_name: hemod name of registering device
 *
 * Register the given device with the OPP/DVFS framework. Intended to
 * be called when omap_device is built.
 */
int omap_opp_register(struct device *dev, const char *hwmod_name)
{
	int i, r;
	struct clk *clk;
	long round_rate;
	struct omap_opp_def *opp_def = opp_table;
	u32 opp_def_size = opp_table_size;

	if (!opp_def || !opp_def_size) {
		pr_err("%s: invalid params!\n", __func__);
		return -EINVAL;
	}

	if (IS_ERR(dev)) {
		pr_err("%s: Unable to get dev pointer\n", __func__);
		return -EINVAL;
	}


	/* Lets now register with OPP library */
	for (i = 0; i < opp_def_size; i++, opp_def++) {
		if (!opp_def->default_available)
			continue;

		if (!opp_def->dev_info->hwmod_name) {
			WARN_ONCE(1, "%s: NULL name of omap_hwmod, failing [%d].\n",
				  __func__, i);
			return -EINVAL;
		}

		if (!strcmp(hwmod_name, opp_def->dev_info->hwmod_name)) {
			clk = omap_clk_get_by_name(opp_def->dev_info->clk_name);
			if (clk) {
				round_rate = clk_round_rate(clk, opp_def->freq);
				if (round_rate > 0) {
					opp_def->freq = round_rate;
				} else {
				pr_warn("%s: round_rate for clock %s failed\n",
					__func__, opp_def->dev_info->clk_name);
				continue; /* skip Bad OPP */
				}
			} else {
				pr_warn("%s: No clock by name %s found\n",
					__func__, opp_def->dev_info->clk_name);
				continue; /* skip Bad OPP */
			}
			r = opp_add(dev, opp_def->freq, opp_def->u_volt);
			if (r) {
				dev_err(dev,
					"%s: add OPP %ld failed for %s [%d] result=%d\n",
					__func__, opp_def->freq,
				       opp_def->dev_info->hwmod_name, i, r);
				continue;
			}

			r  = omap_dvfs_register_device(dev,
				       opp_def->dev_info->voltdm_name,
				       opp_def->dev_info->clk_name);
			if (r)
				dev_err(dev, "%s:%s:err dvfs register %d %d\n",
					__func__, opp_def->dev_info->hwmod_name,
					r, i);
		}
	}
	return 0;
}