示例#1
0
static void __init imx6q_opp_init(void)
{
	struct device_node *np;
	struct device *cpu_dev = get_cpu_device(0);

	if (!cpu_dev) {
		pr_warn("failed to get cpu0 device\n");
		return;
	}
	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		pr_warn("failed to find cpu0 node\n");
		return;
	}

	if (of_init_opp_table(cpu_dev)) {
		pr_warn("failed to init OPP table\n");
		goto put_node;
	}

	imx6q_opp_check_speed_grading(cpu_dev);

put_node:
	of_node_put(np);
}
示例#2
0
static int dt_init_opp_table(struct device *cpu_dev)
{
	struct device_node *np, *parent;
	int count = 0, ret;

	parent = of_find_node_by_path("/cpus");
	if (!parent) {
		pr_err("failed to find OF /cpus\n");
		return -ENOENT;
	}

	for_each_child_of_node(parent, np) {
		if (count++ != cpu_dev->id)
			continue;
		if (!of_get_property(np, "operating-points", NULL)) {
			ret = -ENODATA;
		} else {
			cpu_dev->of_node = np;
			ret = of_init_opp_table(cpu_dev);
		}
		of_node_put(np);
		of_node_put(parent);

		return ret;
	}

	return -ENODEV;
}
static mali_bool kbase_platform_init(struct kbase_device *kbdev)
{
	struct device *dev = kbdev->dev;
	dev->platform_data = kbdev;

#ifdef CONFIG_REPORT_VSYNC
	kbase_dev = kbdev;
#endif

	kbdev->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(kbdev->clk)) {
		printk("[mali-midgard]  Failed to get clk\n");
		return MALI_FALSE;
	}

	kbdev->regulator = devm_regulator_get(dev, KBASE_HI3635_PLATFORM_GPU_REGULATOR_NAME);
	if (IS_ERR(kbdev->regulator)) {
		printk("[mali-midgard]  Failed to get regulator\n");
		return MALI_FALSE;
	}

#ifdef CONFIG_PM_DEVFREQ
	if (of_init_opp_table(dev) ||
		opp_init_devfreq_table(dev,
			&mali_kbase_devfreq_profile.freq_table)) {
		printk("[mali-midgard]  Failed to init devfreq_table\n");
		kbdev->devfreq = NULL;
	} else {
		mali_kbase_devfreq_profile.initial_freq	= clk_get_rate(kbdev->clk);
		rcu_read_lock();
		mali_kbase_devfreq_profile.max_state = opp_get_opp_count(dev);
		rcu_read_unlock();
		kbdev->devfreq = devfreq_add_device(dev,
						&mali_kbase_devfreq_profile,
						"mali_ondemand",
						NULL);
	}

	if (IS_ERR(kbdev->devfreq)) {
		printk("[mali-midgard]  NULL pointer [kbdev->devFreq]\n");
		return MALI_FALSE;
	}

	/* make devfreq function */
	//mali_kbase_devfreq_profile.polling_ms = DEFAULT_POLLING_MS;

#if KBASE_HI3635_GPU_IRDROP_ISSUE
	/* init update work */
	sw_policy.kbdev = kbdev;
	INIT_WORK(&sw_policy.update, handle_switch_policy);
#endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */
#endif
	return MALI_TRUE;
}
static int dt_init_opp_table(struct device *cpu_dev)
{
	struct device_node *np;
	int ret;

	np = get_cpu_node_with_valid_op(cpu_dev->id);
	if (!np)
		return -ENODATA;

	cpu_dev->of_node = np;
	ret = of_init_opp_table(cpu_dev);
	of_node_put(np);

	return ret;
}
示例#5
0
文件: mach-imx6q.c 项目: KDr2/linux
static void __init imx6q_opp_init(struct device *cpu_dev)
{
	struct device_node *np;

	np = of_find_node_by_path("/cpus/cpu@0");
	if (!np) {
		pr_warn("failed to find cpu0 node\n");
		return;
	}

	cpu_dev->of_node = np;
	if (of_init_opp_table(cpu_dev)) {
		pr_warn("failed to init OPP table\n");
		goto put_node;
	}

	imx6q_opp_check_1p2ghz(cpu_dev);

put_node:
	of_node_put(np);
}
示例#6
0
static int imx7d_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct dev_pm_opp *opp;
	unsigned long min_volt, max_volt;
	int num, ret;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get cpu0 device\n");
		return -ENODEV;
	}

	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		dev_err(cpu_dev, "failed to find the cpu0 node\n");
		return -ENOENT;
	}

	arm_clk = devm_clk_get(cpu_dev, "arm");
	arm_src	= devm_clk_get(cpu_dev, "arm_root_src");
	pll_arm = devm_clk_get(cpu_dev, "pll_arm");
	pll_sys_main = devm_clk_get(cpu_dev, "pll_sys_main");

	if (IS_ERR(arm_clk) | IS_ERR(arm_src) | IS_ERR(pll_arm) |
	    IS_ERR(pll_sys_main)) {
		dev_err(cpu_dev, "failed to get clocks\n");
		ret = -ENOENT;
		goto put_node;
	}

	arm_reg = devm_regulator_get(cpu_dev, "arm");
	if (IS_ERR(arm_reg)) {
		dev_err(cpu_dev, "failed to get the regulator\n");
		ret = -ENOENT;
		goto put_node;
	}

	/* We expect an OPP table supplied by platform.
	 * Just incase the platform did not supply the OPP
	 * table, it will try to get it.
	 */
	num = dev_pm_opp_get_opp_count(cpu_dev);
	if (num < 0) {
		ret = of_init_opp_table(cpu_dev);
		if (ret < 0) {
			dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
			goto put_node;
		}
		num = dev_pm_opp_get_opp_count(cpu_dev);
		if (num < 0) {
			ret = num;
			dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
			goto put_node;
		}
	}

	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
	if (ret) {
		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
		goto put_node;
	}

	if (of_property_read_u32(np, "clock-latency", &transition_latency))
		transition_latency = CPUFREQ_ETERNAL;

	/* OPP is maintained in order of increasing frequency, and
	 * freq_table initialized from OPP is therefore sorted in the
	 * same order
	 */
	rcu_read_lock();
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				freq_table[0].frequency * 1000, true);
	min_volt = dev_pm_opp_get_voltage(opp);
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				freq_table[--num].frequency * 1000, true);
	max_volt = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
	if (ret > 0)
		transition_latency += ret * 1000;

	ret = cpufreq_register_driver(&imx7d_cpufreq_driver);
	if (ret) {
		dev_err(cpu_dev, "failed register driver: %d\n", ret);
		goto free_freq_table;
	 }

	mutex_init(&set_cpufreq_lock);

	register_pm_notifier(&imx7_cpufreq_pm_notifier);
	of_node_put(np);
	return 0;

free_freq_table:
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
	of_node_put(np);

	return ret;
}
示例#7
0
static int cpufreq_init(struct cpufreq_policy *policy)
{
	struct cpufreq_dt_platform_data *pd;
	struct cpufreq_frequency_table *freq_table;
	struct thermal_cooling_device *cdev;
	struct device_node *np;
	struct private_data *priv;
	struct device *cpu_dev;
	struct regulator *cpu_reg;
	struct clk *cpu_clk;
	unsigned long min_uV = ~0, max_uV = 0;
	unsigned int transition_latency;
	int ret;

	ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
	if (ret) {
		pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
		return ret;
	}

	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
		ret = -ENOENT;
		goto out_put_reg_clk;
	}

	/* OPPs might be populated at runtime, don't check for error here */
	of_init_opp_table(cpu_dev);

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		goto out_put_node;
	}

	of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);

	if (of_property_read_u32(np, "clock-latency", &transition_latency))
		transition_latency = CPUFREQ_ETERNAL;

	if (!IS_ERR(cpu_reg)) {
		unsigned long opp_freq = 0;

		/*
		 * Disable any OPPs where the connected regulator isn't able to
		 * provide the specified voltage and record minimum and maximum
		 * voltage levels.
		 */
		while (1) {
			struct dev_pm_opp *opp;
			unsigned long opp_uV, tol_uV;

			rcu_read_lock();
			opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
			if (IS_ERR(opp)) {
				rcu_read_unlock();
				break;
			}
			opp_uV = dev_pm_opp_get_voltage(opp);
			rcu_read_unlock();

			tol_uV = opp_uV * priv->voltage_tolerance / 100;
			if (regulator_is_supported_voltage(cpu_reg, opp_uV,
							   opp_uV + tol_uV)) {
				if (opp_uV < min_uV)
					min_uV = opp_uV;
				if (opp_uV > max_uV)
					max_uV = opp_uV;
			} else {
				dev_pm_opp_disable(cpu_dev, opp_freq);
			}

			opp_freq++;
		}

		ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
		if (ret > 0)
			transition_latency += ret * 1000;
	}

	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
	if (ret) {
		pr_err("failed to init cpufreq table: %d\n", ret);
		goto out_free_priv;
	}

	/*
	 * For now, just loading the cooling device;
	 * thermal DT code takes care of matching them.
	 */
	if (of_find_property(np, "#cooling-cells", NULL)) {
		cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
		if (IS_ERR(cdev))
			dev_err(cpu_dev,
				"running cpufreq without cooling device: %ld\n",
				PTR_ERR(cdev));
		else
			priv->cdev = cdev;
	}

	priv->cpu_dev = cpu_dev;
	priv->cpu_reg = cpu_reg;
	policy->driver_data = priv;

	policy->clk = cpu_clk;
	ret = cpufreq_table_validate_and_show(policy, freq_table);
	if (ret) {
		dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
			ret);
		goto out_cooling_unregister;
	}

	policy->cpuinfo.transition_latency = transition_latency;

	pd = cpufreq_get_driver_data();
	if (!pd || !pd->independent_clocks)
		cpumask_setall(policy->cpus);

	of_node_put(np);

	return 0;

out_cooling_unregister:
	cpufreq_cooling_unregister(priv->cdev);
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_priv:
	kfree(priv);
out_put_node:
	of_node_put(np);
out_put_reg_clk:
	clk_put(cpu_clk);
	if (!IS_ERR(cpu_reg))
		regulator_put(cpu_reg);

	return ret;
}
static int ddr_devfreq_probe(struct platform_device *pdev)
{
    struct ddr_devfreq_device *ddev = NULL;
    struct device_node *np = pdev->dev.of_node;
    struct devfreq_pm_qos_data *ddata = NULL;
    const char *type = NULL;
    int ret = 0;

#ifdef CONFIG_INPUT_PULSE_SUPPORT
    int rc = 0;
    static int inited = 0;
    struct device_node *root = NULL;
    if (inited == 0)
    {
        root = of_find_compatible_node(NULL, NULL, "hisilicon,ddrfreq_boost");
        if (!root)
        {
            pr_err("%s hisilicon,ddrfreq_boost no root node\n",__func__);
        }
        else
        {
            of_property_read_u32_array(root, "switch-value", &boost_ddrfreq_switch, 0x1);
            pr_err("switch-value: %d", boost_ddrfreq_switch);

            if (boost_ddrfreq_switch != 0x0)
            {
                of_property_read_u32_array(root, "ddrfreq_dfs_value", &boost_ddr_dfs_band, 0x1);
                of_property_read_u32_array(root, "ddrfreq_dfs_last", &boost_ddr_dfs_last, 0x1);
                pr_err("boost_ddr_dfs_band: %d, boost_ddr_dfs_last: %d\n", boost_ddr_dfs_band, boost_ddr_dfs_last);
                rc = input_register_handler(&ddrfreq_input_handler);
                if (rc)
                    pr_warn("%s: failed to register input handler\n",
                            __func__);

            down_wq = alloc_workqueue("ddrfreq_down", 0, 1);

            if (!down_wq)
                return -ENOMEM;

                INIT_WORK(&inputopen.inputopen_work, ddrfreq_input_open);
                ddrfreq_min_req.pm_qos_class = 0;
                atomic_set(&flag, 0x0);
                INIT_DELAYED_WORK(&ddrfreq_begin, (work_func_t)ddrfreq_begin_work);
                INIT_DELAYED_WORK(&ddrfreq_end, (work_func_t)ddrfreq_end_work);
        }
        }
        inited = 1;
    }

#endif /*#ifdef CONFIG_INPUT_PULSE_SUPPORT*/

	if (!np) {
		pr_err("%s: %s %d, no device node\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -ENODEV;
		goto out;
	}

	ret = of_property_read_string(np, "pm_qos_class", &type);
	if (ret) {
		pr_err("%s: %s %d, no type\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -EINVAL;
		goto no_type;
	}

	if (!strcmp("memory_tput", type)) {
		ret=of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *)&ddr_devfreq_pm_qos_data, 0x2);
		if (ret) {
			pr_err("%s: %s %d, no type\n",
			MODULE_NAME, __func__, __LINE__);
	}
		pr_err("%s: %s %d, per_hz %d  utilization %d\n",
			MODULE_NAME, __func__, __LINE__,ddr_devfreq_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_pm_qos_data.bd_utilization);
		ddata = &ddr_devfreq_pm_qos_data;
		dev_set_name(&pdev->dev, "ddrfreq");
	} else if (!strcmp("memory_tput_up_threshold", type)) {
		ret = of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *) &ddr_devfreq_up_th_pm_qos_data, 0x2);
		if (ret) {
			pr_err("%s: %s %d, no type\n",
			MODULE_NAME, __func__, __LINE__);
		}
		pr_err("%s: %s %d, per_hz %d  utilization %d\n",
			MODULE_NAME, __func__, __LINE__,ddr_devfreq_up_th_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_up_th_pm_qos_data.bd_utilization);
		ddata = &ddr_devfreq_up_th_pm_qos_data;
		dev_set_name(&pdev->dev, "ddrfreq_up_threshold");
	} else {
		pr_err("%s: %s %d, err type\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -EINVAL;
		goto err_type;
	}

	ddev = kmalloc(sizeof(struct ddr_devfreq_device), GFP_KERNEL);
	if (!ddev) {
		pr_err("%s: %s %d, no mem\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -ENOMEM;
		goto no_men;
	}

	ddev->set = of_clk_get(np, 0);
	if (IS_ERR(ddev->set)) {
		pr_err("%s: %s %d, Failed to get set-clk\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -ENODEV;
		goto no_clk1;
	}

	ddev->get = of_clk_get(np, 1);
	if (IS_ERR(ddev->get)) {
		pr_err("%s: %s %d, Failed to get get-clk\n",
			MODULE_NAME, __func__, __LINE__);
		ret = -ENODEV;
		goto no_clk2;
	}

	if (of_init_opp_table(&pdev->dev) ||
		opp_init_devfreq_table(&pdev->dev,
			&ddr_devfreq_dev_profile.freq_table)) {
		ddev->devfreq = NULL;
	} else {
		ddr_devfreq_dev_profile.initial_freq = clk_get_rate(ddev->get);
		rcu_read_lock();
		ddr_devfreq_dev_profile.max_state = opp_get_opp_count(&pdev->dev);
		rcu_read_unlock();
		ddev->devfreq = devfreq_add_device(&pdev->dev,
					&ddr_devfreq_dev_profile,
					"pm_qos",
					ddata);
	}

	if (IS_ERR(ddev->devfreq)) {
		pr_err("%s: %s %d, <%s>, Failed to init ddr devfreq_table\n",
			MODULE_NAME, __func__, __LINE__, type);
		ret = -ENODEV;
		goto no_devfreq;
	}

	/*
	 *	cache value.
	 *	It does not mean actual ddr clk currently,
	 *	but a frequency cache of every clk setting in the module.
	 *	Because, it is not obligatory that setting value is equal to
	 *	the currently actual ddr clk frequency.
	 */
	ddev->freq = 0;
	if(ddev->devfreq) {
		ddev->devfreq->max_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1];
		ddev->devfreq->min_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1];
	}

	platform_set_drvdata(pdev, ddev);

	pr_info("%s: <%s> ready\n", MODULE_NAME, type);
	return ret;

no_devfreq:
	clk_put(ddev->get);
    opp_free_devfreq_table(&pdev->dev,
            &ddr_devfreq_dev_profile.freq_table);
no_clk2:
	clk_put(ddev->set);
no_clk1:
	kfree(ddev);
no_men:
err_type:
no_type:
out:
	return ret;
}
示例#9
0
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct dev_pm_opp *opp;
	unsigned long min_volt, max_volt;
	int num, ret;
	const struct property *prop;
	const __be32 *val;
	u32 nr, j, i = 0;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get cpu0 device\n");
		return -ENODEV;
	}

	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		dev_err(cpu_dev, "failed to find cpu0 node\n");
		return -ENOENT;
	}

	arm_clk = devm_clk_get(cpu_dev, "arm");
	pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
	pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
	step_clk = devm_clk_get(cpu_dev, "step");
	pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
	pll1_bypass = devm_clk_get(cpu_dev, "pll1_bypass");
	pll1 = devm_clk_get(cpu_dev, "pll1");
	pll1_bypass_src = devm_clk_get(cpu_dev, "pll1_bypass_src");

	if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
	    IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk) ||
	    IS_ERR(pll1_bypass) || IS_ERR(pll1) ||
	    IS_ERR(pll1_bypass_src)) {
		dev_err(cpu_dev, "failed to get clocks\n");
		ret = -ENOENT;
		goto put_node;
	}

	arm_reg = devm_regulator_get_optional(cpu_dev, "arm");
	pu_reg = devm_regulator_get_optional(cpu_dev, "pu");
	soc_reg = devm_regulator_get_optional(cpu_dev, "soc");
	if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
		dev_err(cpu_dev, "failed to get regulators\n");
		ret = -ENOENT;
		goto put_node;
	}

	/*
	 * soc_reg sync  with arm_reg if arm shares the same regulator
	 * with soc. Otherwise, regulator common framework will refuse to update
	 * this consumer's voltage right now while another consumer voltage
	 * still keep in old one. For example, imx6sx-sdb with pfuze200 in
	 * ldo-bypass mode.
	 */
	of_property_read_u32(np, "fsl,arm-soc-shared", &i);
	if (i == 1)
		soc_reg = arm_reg;
	/*
	 * We expect an OPP table supplied by platform.
	 * Just, incase the platform did not supply the OPP
	 * table, it will try to get it.
	 */
	num = dev_pm_opp_get_opp_count(cpu_dev);
	if (num < 0) {
		ret = of_init_opp_table(cpu_dev);
		if (ret < 0) {
			dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
			goto put_node;
		}

		num = dev_pm_opp_get_opp_count(cpu_dev);
		if (num < 0) {
			ret = num;
			dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
			goto put_node;
		}
	}

	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
	if (ret) {
		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
		goto put_node;
	}

	/* Make imx6_soc_volt array's size same as arm opp number */
	imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
	if (imx6_soc_volt == NULL) {
		ret = -ENOMEM;
		goto free_freq_table;
	}

	prop = of_find_property(np, "fsl,soc-operating-points", NULL);
	if (!prop || !prop->value)
		goto soc_opp_out;

	/*
	 * Each OPP is a set of tuples consisting of frequency and
	 * voltage like <freq-kHz vol-uV>.
	 */
	nr = prop->length / sizeof(u32);
	if (nr % 2 || (nr / 2) < num)
		goto soc_opp_out;

	for (j = 0; j < num; j++) {
		val = prop->value;
		for (i = 0; i < nr / 2; i++) {
			unsigned long freq = be32_to_cpup(val++);
			unsigned long volt = be32_to_cpup(val++);
			if (freq_table[j].frequency == freq) {
				imx6_soc_volt[soc_opp_count++] = volt;
#ifdef CONFIG_MX6_VPU_352M
				if (freq == 792000) {
					pr_info("increase SOC/PU voltage for VPU352MHz\n");
					imx6_soc_volt[soc_opp_count - 1] = 1250000;
				}
#endif
				break;
			}
		}
	}

soc_opp_out:
	/* use fixed soc opp volt if no valid soc opp info found in dtb */
	if (soc_opp_count != num) {
		dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
		for (j = 0; j < num; j++)
			imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
		if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
			imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
	}

	if (of_property_read_u32(np, "clock-latency", &transition_latency))
		transition_latency = CPUFREQ_ETERNAL;

	/*
	 * Calculate the ramp time for max voltage change in the
	 * VDDSOC and VDDPU regulators.
	 */
	ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
	if (ret > 0)
		transition_latency += ret * 1000;
	if (!IS_ERR(pu_reg)) {
		ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0],
			imx6_soc_volt[num - 1]);
		if (ret > 0)
			transition_latency += ret * 1000;
	}

	/*
	 * OPP is maintained in order of increasing frequency, and
	 * freq_table initialised from OPP is therefore sorted in the
	 * same order.
	 */
	rcu_read_lock();
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				  freq_table[0].frequency * 1000, true);
	min_volt = dev_pm_opp_get_voltage(opp);
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				  freq_table[--num].frequency * 1000, true);
	max_volt = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
	if (ret > 0)
		transition_latency += ret * 1000;

	ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
	if (ret) {
		dev_err(cpu_dev, "failed register driver: %d\n", ret);
		goto free_freq_table;
	}

	mutex_init(&set_cpufreq_lock);
	register_pm_notifier(&imx6_cpufreq_pm_notifier);

	of_node_put(np);
	return 0;

free_freq_table:
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
	of_node_put(np);
	return ret;
}