Exemplo n.º 1
0
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
					unsigned long action, void *_arg)
{
	pr_debug("%s: action: %ld\n", __func__, action);

	switch (action) {
	case BL_NOTIFY_PRE_ENABLE:
	case BL_NOTIFY_PRE_DISABLE:
		cpufreq_unregister_driver(&bL_cpufreq_driver);
		break;

	case BL_NOTIFY_POST_ENABLE:
		set_switching_enabled(true);
		cpufreq_register_driver(&bL_cpufreq_driver);
		break;

	case BL_NOTIFY_POST_DISABLE:
		set_switching_enabled(false);
		cpufreq_register_driver(&bL_cpufreq_driver);
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}
Exemplo n.º 2
0
static int __init qoriq_cpufreq_init(void)
{
	int ret;
	struct device_node  *np;
	const struct of_device_id *match;
	const struct soc_data *data;

	np = of_find_matching_node(NULL, node_matches);
	if (!np)
		return -ENODEV;

	match = of_match_node(node_matches, np);
	data = match->data;

	of_node_put(np);

	if (data && data->flags & SOC_BLACKLIST)
		return -ENODEV;

	ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
	if (!ret)
		pr_info("Freescale QorIQ CPU frequency scaling driver\n");

	return ret;
}
Exemplo n.º 3
0
static int dt_cpufreq_probe(struct platform_device *pdev)
{
	struct device *cpu_dev;
	struct regulator *cpu_reg;
	struct clk *cpu_clk;
	int ret;

	/*
	 * All per-cluster (CPUs sharing clock/voltages) initialization is done
	 * from ->init(). In probe(), we just need to make sure that clk and
	 * regulators are available. Else defer probe and retry.
	 *
	 * FIXME: Is checking this only for CPU0 sufficient ?
	 */
	ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
	if (ret)
		return ret;

	clk_put(cpu_clk);
	if (!IS_ERR(cpu_reg))
		regulator_put(cpu_reg);

	dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);

	ret = cpufreq_register_driver(&dt_cpufreq_driver);
	if (ret)
		dev_err(cpu_dev, "failed register driver: %d\n", ret);

	return ret;
}
Exemplo n.º 4
0
static int dt_cpufreq_probe(struct platform_device *pdev)
{
	struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
	int ret;

	/*
	 * All per-cluster (CPUs sharing clock/voltages) initialization is done
	 * from ->init(). In probe(), we just need to make sure that clk and
	 * regulators are available. Else defer probe and retry.
	 *
	 * FIXME: Is checking this only for CPU0 sufficient ?
	 */
	ret = resources_available();
	if (ret)
		return ret;

	if (data && data->have_governor_per_policy)
		dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;

	ret = cpufreq_register_driver(&dt_cpufreq_driver);
	if (ret)
		dev_err(&pdev->dev, "failed register driver: %d\n", ret);

	return ret;
}
Exemplo n.º 5
0
/**
 * speedstep_init - initializes the SpeedStep CPUFreq driver
 *
 *   Initializes the SpeedStep support. Returns -ENODEV on unsupported
 * devices, -EINVAL on problems during initiatization, and zero on
 * success.
 */
static int __init speedstep_init(void)
{
	if (!x86_match_cpu(ss_smi_ids))
		return -ENODEV;

	/* detect processor */
	speedstep_processor = speedstep_detect_processor();
	if (!speedstep_processor) {
		pr_debug("Intel(R) SpeedStep(TM) capable processor "
				"not found\n");
		return -ENODEV;
	}

	/* detect chipset */
	if (!speedstep_detect_chipset()) {
		pr_debug("Intel(R) SpeedStep(TM) for this chipset not "
				"(yet) available.\n");
		return -ENODEV;
	}

	/* activate speedstep support */
	if (speedstep_activate()) {
		pci_dev_put(speedstep_chipset_dev);
		return -EINVAL;
	}

	if (speedstep_find_register())
		return -ENODEV;

	return cpufreq_register_driver(&speedstep_driver);
}
static int __init tegra_cpufreq_init(void)
{
#ifdef CONFIG_HOTPLUG_CPU
	pm_notifier(tegra_cpufreq_pm_notifier, 0);
#endif
	return cpufreq_register_driver(&s_tegra_cpufreq_driver);
}
Exemplo n.º 7
0
static int omap_cpufreq_probe(struct platform_device *pdev)
{
	mpu_dev = get_cpu_device(0);
	if (!mpu_dev) {
		pr_warning("%s: unable to get the mpu device\n", __func__);
		return -EINVAL;
	}

	mpu_reg = regulator_get(mpu_dev, "vcc");
	if (IS_ERR(mpu_reg)) {
		pr_warning("%s: unable to get MPU regulator\n", __func__);
		mpu_reg = NULL;
	} else {
		/* 
		 * Ensure physical regulator is present.
		 * (e.g. could be dummy regulator.)
		 */
		if (regulator_get_voltage(mpu_reg) < 0) {
			pr_warn("%s: physical regulator not present for MPU\n",
				__func__);
			regulator_put(mpu_reg);
			mpu_reg = NULL;
		}
	}

	return cpufreq_register_driver(&omap_driver);
}
Exemplo n.º 8
0
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
{
	struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
	struct clk *asyncclk;

	if (!pdata)
		return -EINVAL;
	if (!pdata->freq_table)
		return -EINVAL;

	cpufreq.dev = &pdev->dev;

	cpufreq.armclk = clk_get(NULL, "arm");
	if (IS_ERR(cpufreq.armclk)) {
		dev_err(cpufreq.dev, "Unable to get ARM clock\n");
		return PTR_ERR(cpufreq.armclk);
	}

	asyncclk = clk_get(cpufreq.dev, "async");
	if (!IS_ERR(asyncclk)) {
		cpufreq.asyncclk = asyncclk;
		cpufreq.asyncrate = clk_get_rate(asyncclk);
	}

	return cpufreq_register_driver(&davinci_driver);
}
Exemplo n.º 9
0
static int __init
acpi_cpufreq_init (void)
{
	pr_debug("acpi_cpufreq_init\n");

 	return cpufreq_register_driver(&acpi_cpufreq_driver);
}
Exemplo n.º 10
0
static int __init sfi_cpufreq_init(void)
{
	int ret, i;

	/* parse the freq table from SFI */
	ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
	if (ret)
		return ret;

	freq_table = kzalloc(sizeof(*freq_table) *
			(num_freq_table_entries + 1), GFP_KERNEL);
	if (!freq_table) {
		ret = -ENOMEM;
		goto err_free_array;
	}

	for (i = 0; i < num_freq_table_entries; i++) {
		freq_table[i].driver_data = i;
		freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
	}
	freq_table[i].frequency = CPUFREQ_TABLE_END;

	ret = cpufreq_register_driver(&sfi_cpufreq_driver);
	if (ret)
		goto err_free_tbl;

	return ret;

err_free_tbl:
	kfree(freq_table);
err_free_array:
	kfree(sfi_cpufreq_array);
	return ret;
}
Exemplo n.º 11
0
static int mt8173_cpufreq_probe(struct platform_device *pdev)
{
    int ret;

    ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
    if (ret)
        pr_err("failed to register mtk cpufreq driver\n");

    return ret;
}
Exemplo n.º 12
0
static int __init longrun_init(void)
{
	struct cpuinfo_x86 *c = &cpu_data(0);

	if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
	    !cpu_has(c, X86_FEATURE_LONGRUN))
		return -ENODEV;

	return cpufreq_register_driver(&longrun_driver);
}
Exemplo n.º 13
0
static int exynos_cpufreq_probe(struct platform_device *pdev)
{
	int ret = -EINVAL;

	exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
	if (!exynos_info)
		return -ENOMEM;

	exynos_info->dev = &pdev->dev;

	if (of_machine_is_compatible("samsung,exynos4210")) {
		exynos_info->type = EXYNOS_SOC_4210;
		ret = exynos4210_cpufreq_init(exynos_info);
	} else if (of_machine_is_compatible("samsung,exynos4212")) {
		exynos_info->type = EXYNOS_SOC_4212;
		ret = exynos4x12_cpufreq_init(exynos_info);
	} else if (of_machine_is_compatible("samsung,exynos4412")) {
		exynos_info->type = EXYNOS_SOC_4412;
		ret = exynos4x12_cpufreq_init(exynos_info);
	} else if (of_machine_is_compatible("samsung,exynos5250")) {
		exynos_info->type = EXYNOS_SOC_5250;
		ret = exynos5250_cpufreq_init(exynos_info);
	} else {
		pr_err("%s: Unknown SoC type\n", __func__);
		return -ENODEV;
	}

	if (ret)
		goto err_vdd_arm;

	if (exynos_info->set_freq == NULL) {
		dev_err(&pdev->dev, "No set_freq function (ERR)\n");
		goto err_vdd_arm;
	}

	arm_regulator = regulator_get(NULL, "vdd_arm");
	if (IS_ERR(arm_regulator)) {
		dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
		goto err_vdd_arm;
	}

	/* Done here as we want to capture boot frequency */
	locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;

	if (!cpufreq_register_driver(&exynos_driver))
		return 0;

	dev_err(&pdev->dev, "failed to register cpufreq driver\n");
	regulator_put(arm_regulator);
err_vdd_arm:
	kfree(exynos_info);
	return -EINVAL;
}
Exemplo n.º 14
0
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
	int ret;

	scpi_ops = get_scpi_ops();
	if (!scpi_ops)
		return -EIO;

	ret = cpufreq_register_driver(&scpi_cpufreq_driver);
	if (ret)
		dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
			__func__, ret);
	return ret;
}
Exemplo n.º 15
0
/**
 * speedstep_init - initializes the SpeedStep CPUFreq driver
 *
 *   Initializes the SpeedStep support. Returns -ENODEV on unsupported
 * BIOS, -EINVAL on problems during initiatization, and zero on
 * success.
 */
static int __init speedstep_init(void)
{
	if (!x86_match_cpu(ss_smi_ids))
		return -ENODEV;

	speedstep_processor = speedstep_detect_processor();

	switch (speedstep_processor) {
	case SPEEDSTEP_CPU_PIII_T:
	case SPEEDSTEP_CPU_PIII_C:
	case SPEEDSTEP_CPU_PIII_C_EARLY:
		break;
	default:
		speedstep_processor = 0;
	}

	if (!speedstep_processor) {
		pr_debug("No supported Intel CPU detected.\n");
		return -ENODEV;
	}

	pr_debug("signature:0x%.8x, command:0x%.8x, "
		"event:0x%.8x, perf_level:0x%.8x.\n",
		ist_info.signature, ist_info.command,
		ist_info.event, ist_info.perf_level);

	/* Error if no IST-SMI BIOS or no PARM
		 sig= 'ISGE' aka 'Intel Speedstep Gate E' */
	if ((ist_info.signature !=  0x47534943) && (
	    (smi_port == 0) || (smi_cmd == 0)))
		return -ENODEV;

	if (smi_sig == 1)
		smi_sig = 0x47534943;
	else
		smi_sig = ist_info.signature;

	/* setup smi_port from MODLULE_PARM or BIOS */
	if ((smi_port > 0xff) || (smi_port < 0))
		return -EINVAL;
	else if (smi_port == 0)
		smi_port = ist_info.command & 0xff;

	if ((smi_cmd > 0xff) || (smi_cmd < 0))
		return -EINVAL;
	else if (smi_cmd == 0)
		smi_cmd = (ist_info.command >> 16) & 0xff;

	return cpufreq_register_driver(&speedstep_driver);
}
Exemplo n.º 16
0
static int __init acpi_cpufreq_init(void)
{
	int ret;

	if (acpi_disabled)
		return -ENODEV;

	/* don't keep reloading if cpufreq_driver exists */
	if (cpufreq_get_current_driver())
		return -EEXIST;

	pr_debug("acpi_cpufreq_init\n");

	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;

#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
	}
#endif
	acpi_cpufreq_boost_init();

	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret)
		free_acpi_perf_data();

	return ret;
}
Exemplo n.º 17
0
static int exynos_cpufreq_probe(struct platform_device *pdev)
{
	int ret = -EINVAL;

	exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
	if (!exynos_info)
		return -ENOMEM;

	exynos_info->dev = &pdev->dev;

	if (soc_is_exynos4210())
		ret = exynos4210_cpufreq_init(exynos_info);
	else if (soc_is_exynos4212() || soc_is_exynos4412())
		ret = exynos4x12_cpufreq_init(exynos_info);
	else if (soc_is_exynos5250())
		ret = exynos5250_cpufreq_init(exynos_info);
	else
		return 0;

	if (ret)
		goto err_vdd_arm;

	if (exynos_info->set_freq == NULL) {
		dev_err(&pdev->dev, "No set_freq function (ERR)\n");
		goto err_vdd_arm;
	}

	arm_regulator = regulator_get(NULL, "vdd_arm");
	if (IS_ERR(arm_regulator)) {
		dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
		goto err_vdd_arm;
	}

	/* Done here as we want to capture boot frequency */
	locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;

	if (!cpufreq_register_driver(&exynos_driver))
		return 0;

	dev_err(&pdev->dev, "failed to register cpufreq driver\n");
	regulator_put(arm_regulator);
err_vdd_arm:
	kfree(exynos_info);
	return -EINVAL;
}
Exemplo n.º 18
0
static int __init acpi_cpufreq_init(void)
{
	int ret;

	if (acpi_disabled)
		return -ENODEV;

	/* don't keep reloading if cpufreq_driver exists */
	if (cpufreq_get_current_driver())
		return -EEXIST;

	pr_debug("acpi_cpufreq_init\n");

	ret = acpi_cpufreq_early_init();
	if (ret)
		return ret;

#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	/* this is a sysfs file with a strange name and an even stranger
	 * semantic - per CPU instantiation, but system global effect.
	 * Lets enable it only on AMD CPUs for compatibility reasons and
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (!check_amd_hwpstate_cpu(0)) {
		struct freq_attr **attr;

		pr_debug("CPB unsupported, do not expose it\n");

		for (attr = acpi_cpufreq_attr; *attr; attr++)
			if (*attr == &cpb) {
				*attr = NULL;
				break;
			}
	}
#endif
	acpi_cpufreq_boost_init();

	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
	if (ret) {
		free_acpi_perf_data();
		acpi_cpufreq_boost_exit();
	}
	return ret;
}
Exemplo n.º 19
0
static int __init sc520_freq_init(void)
{
	int err;

	if (!x86_match_cpu(sc520_ids))
		return -ENODEV;

	cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
	if (!cpuctl) {
		printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
		return -ENOMEM;
	}

	err = cpufreq_register_driver(&sc520_freq_driver);
	if (err)
		iounmap(cpuctl);

	return err;
}
Exemplo n.º 20
0
int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
{
	int ret, i;

	if (arm_bL_ops) {
		pr_debug("%s: Already registered: %s, exiting\n", __func__,
				arm_bL_ops->name);
		return -EBUSY;
	}

	if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
	    !ops->get_transition_latency) {
		pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
		return -ENODEV;
	}

	arm_bL_ops = ops;

	set_switching_enabled(bL_switcher_get_enabled());

	for (i = 0; i < MAX_CLUSTERS; i++)
		mutex_init(&cluster_lock[i]);

	ret = cpufreq_register_driver(&bL_cpufreq_driver);
	if (ret) {
		pr_info("%s: Failed registering platform driver: %s, err: %d\n",
				__func__, ops->name, ret);
		arm_bL_ops = NULL;
	} else {
		ret = __bLs_register_notifier();
		if (ret) {
			cpufreq_unregister_driver(&bL_cpufreq_driver);
			arm_bL_ops = NULL;
		} else {
			pr_info("%s: Registered platform driver: %s\n",
					__func__, ops->name);
		}
	}

	bL_switcher_put_enabled();
	return ret;
}
Exemplo n.º 21
0
static int __init tegra_cpufreq_init(void)
{
	cpu_clk = clk_get_sys(NULL, "cclk");
	if (IS_ERR(cpu_clk))
		return PTR_ERR(cpu_clk);

	pll_x_clk = clk_get_sys(NULL, "pll_x");
	if (IS_ERR(pll_x_clk))
		return PTR_ERR(pll_x_clk);

	pll_p_clk = clk_get_sys(NULL, "pll_p");
	if (IS_ERR(pll_p_clk))
		return PTR_ERR(pll_p_clk);

	emc_clk = clk_get_sys("cpu", "emc");
	if (IS_ERR(emc_clk)) {
		clk_put(cpu_clk);
		return PTR_ERR(emc_clk);
	}

	return cpufreq_register_driver(&tegra_cpufreq_driver);
}
Exemplo n.º 22
0
unsigned int __init powernow_register_driver()
{
    unsigned int i, ret = 0;

    for_each_online_cpu(i) {
        struct cpuinfo_x86 *c = &cpu_data[i];
        if (c->x86_vendor != X86_VENDOR_AMD)
            ret = -ENODEV;
        else
        {
            u32 eax, ebx, ecx, edx;
            cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
            if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
                ret = -ENODEV;
        }
        if (ret)
            return ret;
    }

    ret = cpufreq_register_driver(&powernow_cpufreq_driver);
    return ret;
}
Exemplo n.º 23
0
static int __init cpufreq_init(void)
{
	int ret;

	/* Register platform stuff */
	ret = platform_driver_register(&platform_driver);
	if (ret)
		return ret;

	pr_info("Loongson-2F CPU frequency driver\n");

	cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
				  CPUFREQ_TRANSITION_NOTIFIER);

	ret = cpufreq_register_driver(&loongson2_cpufreq_driver);

	if (!ret && !nowait) {
		saved_cpu_wait = cpu_wait;
		cpu_wait = loongson2_cpu_wait;
	}

	return ret;
}
static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
	int i = 0;

	freq_table = dev_get_platdata(&pdev->dev);
	if (!freq_table) {
		pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
		return -ENODEV;
	}

	armss_clk = clk_get(&pdev->dev, "armss");
	if (IS_ERR(armss_clk)) {
		pr_err("dbx500-cpufreq: Failed to get armss clk\n");
		return PTR_ERR(armss_clk);
	}

	pr_info("dbx500-cpufreq: Available frequencies:\n");
	while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
		pr_info("  %d Mhz\n", freq_table[i].frequency/1000);
		i++;
	}

	return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
Exemplo n.º 25
0
/**
 * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
 *
 * Initializes the LongRun support.
 */
static int __init longrun_init(void)
{
	if (!x86_match_cpu(longrun_ids))
		return -ENODEV;
	return cpufreq_register_driver(&longrun_driver);
}
Exemplo n.º 26
0
static int ls1x_cpufreq_probe(struct platform_device *pdev)
{
	struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
	struct clk *clk;
	int ret;

	if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
		return -EINVAL;

	ls1x_cpufreq.dev = &pdev->dev;

	clk = devm_clk_get(&pdev->dev, pdata->clk_name);
	if (IS_ERR(clk)) {
		dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
			pdata->clk_name);
		ret = PTR_ERR(clk);
		goto out;
	}
	ls1x_cpufreq.clk = clk;

	clk = clk_get_parent(clk);
	if (IS_ERR(clk)) {
		dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
			__clk_get_name(ls1x_cpufreq.clk));
		ret = PTR_ERR(clk);
		goto out;
	}
	ls1x_cpufreq.mux_clk = clk;

	clk = clk_get_parent(clk);
	if (IS_ERR(clk)) {
		dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
			__clk_get_name(ls1x_cpufreq.mux_clk));
		ret = PTR_ERR(clk);
		goto out;
	}
	ls1x_cpufreq.pll_clk = clk;

	clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
	if (IS_ERR(clk)) {
		dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
			pdata->osc_clk_name);
		ret = PTR_ERR(clk);
		goto out;
	}
	ls1x_cpufreq.osc_clk = clk;

	ls1x_cpufreq.max_freq = pdata->max_freq;
	ls1x_cpufreq.min_freq = pdata->min_freq;

	ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
	if (ret) {
		dev_err(ls1x_cpufreq.dev,
			"failed to register cpufreq driver: %d\n", ret);
		goto out;
	}

	ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
					CPUFREQ_TRANSITION_NOTIFIER);

	if (!ret)
		goto out;

	dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
		ret);

	cpufreq_unregister_driver(&ls1x_cpufreq_driver);
out:
	return ret;
}
/*
 ====================================================
  Module Initialisation registers the cpufreq driver
 ====================================================
*/
static int __init bcm2835_cpufreq_module_init(void)
{	
	print_debug("IN\n");
	return cpufreq_register_driver(&bcm2835_cpufreq_driver);
}
Exemplo n.º 28
0
static int imx7d_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct dev_pm_opp *opp;
	unsigned long min_volt, max_volt;
	int num, ret;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get cpu0 device\n");
		return -ENODEV;
	}

	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		dev_err(cpu_dev, "failed to find the cpu0 node\n");
		return -ENOENT;
	}

	arm_clk = devm_clk_get(cpu_dev, "arm");
	arm_src	= devm_clk_get(cpu_dev, "arm_root_src");
	pll_arm = devm_clk_get(cpu_dev, "pll_arm");
	pll_sys_main = devm_clk_get(cpu_dev, "pll_sys_main");

	if (IS_ERR(arm_clk) | IS_ERR(arm_src) | IS_ERR(pll_arm) |
	    IS_ERR(pll_sys_main)) {
		dev_err(cpu_dev, "failed to get clocks\n");
		ret = -ENOENT;
		goto put_node;
	}

	arm_reg = devm_regulator_get(cpu_dev, "arm");
	if (IS_ERR(arm_reg)) {
		dev_err(cpu_dev, "failed to get the regulator\n");
		ret = -ENOENT;
		goto put_node;
	}

	/* We expect an OPP table supplied by platform.
	 * Just incase the platform did not supply the OPP
	 * table, it will try to get it.
	 */
	num = dev_pm_opp_get_opp_count(cpu_dev);
	if (num < 0) {
		ret = of_init_opp_table(cpu_dev);
		if (ret < 0) {
			dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
			goto put_node;
		}
		num = dev_pm_opp_get_opp_count(cpu_dev);
		if (num < 0) {
			ret = num;
			dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
			goto put_node;
		}
	}

	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
	if (ret) {
		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
		goto put_node;
	}

	if (of_property_read_u32(np, "clock-latency", &transition_latency))
		transition_latency = CPUFREQ_ETERNAL;

	/* OPP is maintained in order of increasing frequency, and
	 * freq_table initialized from OPP is therefore sorted in the
	 * same order
	 */
	rcu_read_lock();
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				freq_table[0].frequency * 1000, true);
	min_volt = dev_pm_opp_get_voltage(opp);
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				freq_table[--num].frequency * 1000, true);
	max_volt = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
	if (ret > 0)
		transition_latency += ret * 1000;

	ret = cpufreq_register_driver(&imx7d_cpufreq_driver);
	if (ret) {
		dev_err(cpu_dev, "failed register driver: %d\n", ret);
		goto free_freq_table;
	 }

	mutex_init(&set_cpufreq_lock);

	register_pm_notifier(&imx7_cpufreq_pm_notifier);
	of_node_put(np);
	return 0;

free_freq_table:
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
	of_node_put(np);

	return ret;
}
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct dev_pm_opp *opp;
	unsigned long min_volt, max_volt;
	int num, ret;
	const struct property *prop;
	const __be32 *val;
	u32 nr, i, j;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get cpu0 device\n");
		return -ENODEV;
	}

	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		dev_err(cpu_dev, "failed to find cpu0 node\n");
		return -ENOENT;
	}

	arm_clk = clk_get(cpu_dev, "arm");
	pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
	pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
	step_clk = clk_get(cpu_dev, "step");
	pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
	if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
	    IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
		dev_err(cpu_dev, "failed to get clocks\n");
		ret = -ENOENT;
		goto put_clk;
	}

	arm_reg = regulator_get(cpu_dev, "arm");
	pu_reg = regulator_get_optional(cpu_dev, "pu");
	soc_reg = regulator_get(cpu_dev, "soc");
	if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
		dev_err(cpu_dev, "failed to get regulators\n");
		ret = -ENOENT;
		goto put_reg;
	}

	/*
	 * We expect an OPP table supplied by platform.
	 * Just, incase the platform did not supply the OPP
	 * table, it will try to get it.
	 */
	num = dev_pm_opp_get_opp_count(cpu_dev);
	if (num < 0) {
		ret = dev_pm_opp_of_add_table(cpu_dev);
		if (ret < 0) {
			dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
			goto put_reg;
		}

		/* Because we have added the OPPs here, we must free them */
		free_opp = true;

		num = dev_pm_opp_get_opp_count(cpu_dev);
		if (num < 0) {
			ret = num;
			dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
			goto out_free_opp;
		}
	}

	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
	if (ret) {
		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
		goto put_reg;
	}

	/* Make imx6_soc_volt array's size same as arm opp number */
	imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
	if (imx6_soc_volt == NULL) {
		ret = -ENOMEM;
		goto free_freq_table;
	}

	prop = of_find_property(np, "fsl,soc-operating-points", NULL);
	if (!prop || !prop->value)
		goto soc_opp_out;

	/*
	 * Each OPP is a set of tuples consisting of frequency and
	 * voltage like <freq-kHz vol-uV>.
	 */
	nr = prop->length / sizeof(u32);
	if (nr % 2 || (nr / 2) < num)
		goto soc_opp_out;

	for (j = 0; j < num; j++) {
		val = prop->value;
		for (i = 0; i < nr / 2; i++) {
			unsigned long freq = be32_to_cpup(val++);
			unsigned long volt = be32_to_cpup(val++);
			if (freq_table[j].frequency == freq) {
				imx6_soc_volt[soc_opp_count++] = volt;
				break;
			}
		}
	}

soc_opp_out:
	/* use fixed soc opp volt if no valid soc opp info found in dtb */
	if (soc_opp_count != num) {
		dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
		for (j = 0; j < num; j++)
			imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
		if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
			imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
	}

	if (of_property_read_u32(np, "clock-latency", &transition_latency))
		transition_latency = CPUFREQ_ETERNAL;

	/*
	 * Calculate the ramp time for max voltage change in the
	 * VDDSOC and VDDPU regulators.
	 */
	ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
	if (ret > 0)
		transition_latency += ret * 1000;
	if (!IS_ERR(pu_reg)) {
		ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
		if (ret > 0)
			transition_latency += ret * 1000;
	}

	/*
	 * OPP is maintained in order of increasing frequency, and
	 * freq_table initialised from OPP is therefore sorted in the
	 * same order.
	 */
	rcu_read_lock();
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				  freq_table[0].frequency * 1000, true);
	min_volt = dev_pm_opp_get_voltage(opp);
	opp = dev_pm_opp_find_freq_exact(cpu_dev,
				  freq_table[--num].frequency * 1000, true);
	max_volt = dev_pm_opp_get_voltage(opp);
	rcu_read_unlock();
	ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
	if (ret > 0)
		transition_latency += ret * 1000;

	ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
	if (ret) {
		dev_err(cpu_dev, "failed register driver: %d\n", ret);
		goto free_freq_table;
	}

	of_node_put(np);
	return 0;

free_freq_table:
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
	if (free_opp)
		dev_pm_opp_of_remove_table(cpu_dev);
put_reg:
	if (!IS_ERR(arm_reg))
		regulator_put(arm_reg);
	if (!IS_ERR(pu_reg))
		regulator_put(pu_reg);
	if (!IS_ERR(soc_reg))
		regulator_put(soc_reg);
put_clk:
	if (!IS_ERR(arm_clk))
		clk_put(arm_clk);
	if (!IS_ERR(pll1_sys_clk))
		clk_put(pll1_sys_clk);
	if (!IS_ERR(pll1_sw_clk))
		clk_put(pll1_sw_clk);
	if (!IS_ERR(step_clk))
		clk_put(step_clk);
	if (!IS_ERR(pll2_pfd2_396m_clk))
		clk_put(pll2_pfd2_396m_clk);
	of_node_put(np);
	return ret;
}
Exemplo n.º 30
0
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct resource *res;
	int err;

	priv.dev = &pdev->dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	priv.base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(priv.base))
		return PTR_ERR(priv.base);

	np = of_cpu_device_node_get(0);
	if (!np) {
		dev_err(&pdev->dev, "failed to get cpu device node\n");
		return -ENODEV;
	}

	priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
	if (IS_ERR(priv.cpu_clk)) {
		dev_err(priv.dev, "Unable to get cpuclk");
		return PTR_ERR(priv.cpu_clk);
	}

	clk_prepare_enable(priv.cpu_clk);
	kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;

	priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
	if (IS_ERR(priv.ddr_clk)) {
		dev_err(priv.dev, "Unable to get ddrclk");
		err = PTR_ERR(priv.ddr_clk);
		goto out_cpu;
	}

	clk_prepare_enable(priv.ddr_clk);
	kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;

	priv.powersave_clk = of_clk_get_by_name(np, "powersave");
	if (IS_ERR(priv.powersave_clk)) {
		dev_err(priv.dev, "Unable to get powersave");
		err = PTR_ERR(priv.powersave_clk);
		goto out_ddr;
	}
	clk_prepare_enable(priv.powersave_clk);

	of_node_put(np);
	np = NULL;

	err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
	if (!err)
		return 0;

	dev_err(priv.dev, "Failed to register cpufreq driver");

	clk_disable_unprepare(priv.powersave_clk);
out_ddr:
	clk_disable_unprepare(priv.ddr_clk);
out_cpu:
	clk_disable_unprepare(priv.cpu_clk);
	of_node_put(np);

	return err;
}