static int omap_cpu_init(struct cpufreq_policy *policy) { int result; policy->clk = clk_get(NULL, "cpufreq_ck"); if (IS_ERR(policy->clk)) return PTR_ERR(policy->clk); if (!freq_table) { result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table); if (result) { dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result); goto fail; } } atomic_inc_return(&freq_table_users); /* FIXME: what's the actual transition time? */ result = cpufreq_generic_init(policy, freq_table, 300 * 1000); if (!result) return 0; freq_table_free(); fail: clk_put(policy->clk); return result; }
static int mtk_cpufreq_init(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info; struct cpufreq_frequency_table *freq_table; int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; ret = mtk_cpu_dvfs_info_init(info, policy->cpu); if (ret) { pr_err("%s failed to initialize dvfs info for cpu%d\n", __func__, policy->cpu); goto out_free_dvfs_info; } ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table for cpu%d: %d\n", policy->cpu, ret); goto out_release_dvfs_info; } ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { pr_err("%s: invalid frequency table: %d\n", __func__, ret); goto out_free_cpufreq_table; } /* CPUs in the same cluster share a clock and power domain. */ cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling); policy->driver_data = info; policy->clk = info->cpu_clk; return 0; out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table); out_release_dvfs_info: mtk_cpu_dvfs_info_release(info); out_free_dvfs_info: kfree(info); return ret; }
/** * clock_cooling_register - function to create clock cooling device. * @dev: struct device pointer to the device used as clock cooling device. * @clock_name: string containing the clock used as cooling mechanism. * * This interface function registers the clock cooling device with the name * "thermal-clock-%x". The cooling device is based on clock frequencies. * The struct device is assumed to be capable of DVFS transitions. * The OPP layer is used to fetch and fill the available frequencies for * the referred device. The ordered frequency table is used to control * the clock cooling device cooling states and to limit clock transitions * based on the cooling state requested by the thermal framework. * * Return: a valid struct thermal_cooling_device pointer on success, * on failure, it returns a corresponding ERR_PTR(). */ struct thermal_cooling_device * clock_cooling_register(struct device *dev, const char *clock_name) { struct thermal_cooling_device *cdev; struct clock_cooling_device *ccdev = NULL; char dev_name[THERMAL_NAME_LENGTH]; int ret = 0; ccdev = devm_kzalloc(dev, sizeof(*ccdev), GFP_KERNEL); if (!ccdev) return ERR_PTR(-ENOMEM); ccdev->dev = dev; ccdev->clk = devm_clk_get(dev, clock_name); if (IS_ERR(ccdev->clk)) return ERR_CAST(ccdev->clk); ret = clock_cooling_get_idr(&ccdev->id); if (ret) return ERR_PTR(-EINVAL); snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id); cdev = thermal_cooling_device_register(dev_name, ccdev, &clock_cooling_ops); if (IS_ERR(cdev)) { release_idr(ccdev->id); return ERR_PTR(-EINVAL); } ccdev->cdev = cdev; ccdev->clk_rate_change_nb.notifier_call = clock_cooling_clock_notifier; /* Assuming someone has already filled the opp table for this device */ ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table); if (ret) { release_idr(ccdev->id); return ERR_PTR(ret); } ccdev->clock_state = 0; ccdev->clock_val = clock_cooling_get_frequency(ccdev, 0); clk_notifier_register(ccdev->clk, &ccdev->clk_rate_change_nb); return cdev; }
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev, const struct cpumask *cpumask) { u32 cluster = raw_cpu_to_cluster(cpu_dev->id); int ret; if (freq_table[cluster]) return 0; ret = arm_bL_ops->init_opp_table(cpumask); if (ret) { dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto out; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); if (ret) { dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto free_opp_table; } clk[cluster] = clk_get(cpu_dev, NULL); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], cluster); return 0; } dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", __func__, cpu_dev->id, cluster); ret = PTR_ERR(clk[cluster]); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); free_opp_table: if (arm_bL_ops->free_opp_table) arm_bL_ops->free_opp_table(cpumask); out: dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, cluster); return ret; }
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = raw_cpu_to_cluster(cpu_dev->id); char name[14] = "cpu-cluster."; int ret; if (freq_table[cluster]) return 0; ret = arm_bL_ops->init_opp_table(cpu_dev); if (ret) { dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto out; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); if (ret) { dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", __func__, cpu_dev->id, ret); goto out; } name[12] = cluster + '0'; clk[cluster] = clk_get(cpu_dev, name); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], cluster); return 0; } dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n", __func__, cpu_dev->id, cluster); ret = PTR_ERR(clk[cluster]); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); out: dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, cluster); return ret; }
static int imx7d_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find the cpu0 node\n"); return -ENOENT; } arm_clk = devm_clk_get(cpu_dev, "arm"); arm_src = devm_clk_get(cpu_dev, "arm_root_src"); pll_arm = devm_clk_get(cpu_dev, "pll_arm"); pll_sys_main = devm_clk_get(cpu_dev, "pll_sys_main"); if (IS_ERR(arm_clk) | IS_ERR(arm_src) | IS_ERR(pll_arm) | IS_ERR(pll_sys_main)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_node; } arm_reg = devm_regulator_get(cpu_dev, "arm"); if (IS_ERR(arm_reg)) { dev_err(cpu_dev, "failed to get the regulator\n"); ret = -ENOENT; goto put_node; } /* We expect an OPP table supplied by platform. * Just incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = of_init_opp_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_node; } num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto put_node; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_node; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* OPP is maintained in order of increasing frequency, and * freq_table initialized from OPP is therefore sorted in the * same order */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx7d_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } mutex_init(&set_cpufreq_lock); register_pm_notifier(&imx7_cpufreq_pm_notifier); of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); put_node: of_node_put(np); return ret; }
static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; const struct property *prop; const __be32 *val; u32 nr, i, j; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu0 node\n"); return -ENOENT; } arm_clk = clk_get(cpu_dev, "arm"); pll1_sys_clk = clk_get(cpu_dev, "pll1_sys"); pll1_sw_clk = clk_get(cpu_dev, "pll1_sw"); step_clk = clk_get(cpu_dev, "step"); pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_clk; } arm_reg = regulator_get(cpu_dev, "arm"); pu_reg = regulator_get_optional(cpu_dev, "pu"); soc_reg = regulator_get(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; goto put_reg; } /* * We expect an OPP table supplied by platform. * Just, incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = dev_pm_opp_of_add_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_reg; } /* Because we have added the OPPs here, we must free them */ free_opp = true; num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto out_free_opp; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_reg; } /* Make imx6_soc_volt array's size same as arm opp number */ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); if (imx6_soc_volt == NULL) { ret = -ENOMEM; goto free_freq_table; } prop = of_find_property(np, "fsl,soc-operating-points", NULL); if (!prop || !prop->value) goto soc_opp_out; /* * Each OPP is a set of tuples consisting of frequency and * voltage like <freq-kHz vol-uV>. */ nr = prop->length / sizeof(u32); if (nr % 2 || (nr / 2) < num) goto soc_opp_out; for (j = 0; j < num; j++) { val = prop->value; for (i = 0; i < nr / 2; i++) { unsigned long freq = be32_to_cpup(val++); unsigned long volt = be32_to_cpup(val++); if (freq_table[j].frequency == freq) { imx6_soc_volt[soc_opp_count++] = volt; break; } } } soc_opp_out: /* use fixed soc opp volt if no valid soc opp info found in dtb */ if (soc_opp_count != num) { dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); for (j = 0; j < num; j++) imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* * Calculate the ramp time for max voltage change in the * VDDSOC and VDDPU regulators. */ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; if (!IS_ERR(pu_reg)) { ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; } /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_opp: if (free_opp) dev_pm_opp_of_remove_table(cpu_dev); put_reg: if (!IS_ERR(arm_reg)) regulator_put(arm_reg); if (!IS_ERR(pu_reg)) regulator_put(pu_reg); if (!IS_ERR(soc_reg)) regulator_put(soc_reg); put_clk: if (!IS_ERR(arm_clk)) clk_put(arm_clk); if (!IS_ERR(pll1_sys_clk)) clk_put(pll1_sys_clk); if (!IS_ERR(pll1_sw_clk)) clk_put(pll1_sw_clk); if (!IS_ERR(step_clk)) clk_put(step_clk); if (!IS_ERR(pll2_pfd2_396m_clk)) clk_put(pll2_pfd2_396m_clk); of_node_put(np); return ret; }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_dt_platform_data *pd; struct cpufreq_frequency_table *freq_table; struct thermal_cooling_device *cdev; struct device_node *np; struct private_data *priv; struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; int ret; ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); if (ret) { pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); return ret; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); ret = -ENOENT; goto out_put_reg_clk; } /* OPPs might be populated at runtime, don't check for error here */ of_init_opp_table(cpu_dev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_put_node; } of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; if (!IS_ERR(cpu_reg)) { unsigned long opp_freq = 0; /* * Disable any OPPs where the connected regulator isn't able to * provide the specified voltage and record minimum and maximum * voltage levels. */ while (1) { struct dev_pm_opp *opp; unsigned long opp_uV, tol_uV; rcu_read_lock(); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); if (IS_ERR(opp)) { rcu_read_unlock(); break; } opp_uV = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol_uV = opp_uV * priv->voltage_tolerance / 100; if (regulator_is_supported_voltage(cpu_reg, opp_uV, opp_uV + tol_uV)) { if (opp_uV < min_uV) min_uV = opp_uV; if (opp_uV > max_uV) max_uV = opp_uV; } else { dev_pm_opp_disable(cpu_dev, opp_freq); } opp_freq++; } ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) transition_latency += ret * 1000; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); goto out_free_priv; } /* * For now, just loading the cooling device; * thermal DT code takes care of matching them. */ if (of_find_property(np, "#cooling-cells", NULL)) { cdev = of_cpufreq_cooling_register(np, cpu_present_mask); if (IS_ERR(cdev)) dev_err(cpu_dev, "running cpufreq without cooling device: %ld\n", PTR_ERR(cdev)); else priv->cdev = cdev; } priv->cpu_dev = cpu_dev; priv->cpu_reg = cpu_reg; policy->driver_data = priv; policy->clk = cpu_clk; ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_cooling_unregister; } policy->cpuinfo.transition_latency = transition_latency; pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); of_node_put(np); return 0; out_cooling_unregister: cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_put_node: of_node_put(np); out_put_reg_clk: clk_put(cpu_clk); if (!IS_ERR(cpu_reg)) regulator_put(cpu_reg); return ret; }
static int scpi_cpufreq_init(struct cpufreq_policy *policy) { int ret; unsigned int latency; struct device *cpu_dev; struct scpi_data *priv; struct cpufreq_frequency_table *freq_table; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV; } ret = scpi_ops->add_opps_to_device(cpu_dev); if (ret) { dev_warn(cpu_dev, "failed to add opps to the device\n"); return ret; } ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { dev_warn(cpu_dev, "failed to get sharing cpumask\n"); return ret; } ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); if (ret) { dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); return ret; } ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_free_opp; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; priv->clk = clk_get(cpu_dev, NULL); if (IS_ERR(priv->clk)) { dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n", __func__, cpu_dev->id); ret = PTR_ERR(priv->clk); goto out_free_cpufreq_table; } policy->driver_data = priv; ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_put_clk; } /* scpi allows DVFS request for any domain from any CPU */ policy->dvfs_possible_from_any_cpu = true; latency = scpi_ops->get_transition_latency(cpu_dev); if (!latency) latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = false; return 0; out_put_clk: clk_put(priv->clk); out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_cpumask_remove_table(policy->cpus); return ret; }
static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; const struct property *prop; const __be32 *val; u32 nr, j, i = 0; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu0 node\n"); return -ENOENT; } arm_clk = devm_clk_get(cpu_dev, "arm"); pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); step_clk = devm_clk_get(cpu_dev, "step"); pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m"); pll1_bypass = devm_clk_get(cpu_dev, "pll1_bypass"); pll1 = devm_clk_get(cpu_dev, "pll1"); pll1_bypass_src = devm_clk_get(cpu_dev, "pll1_bypass_src"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk) || IS_ERR(pll1_bypass) || IS_ERR(pll1) || IS_ERR(pll1_bypass_src)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_node; } arm_reg = devm_regulator_get_optional(cpu_dev, "arm"); pu_reg = devm_regulator_get_optional(cpu_dev, "pu"); soc_reg = devm_regulator_get_optional(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; goto put_node; } /* * soc_reg sync with arm_reg if arm shares the same regulator * with soc. Otherwise, regulator common framework will refuse to update * this consumer's voltage right now while another consumer voltage * still keep in old one. For example, imx6sx-sdb with pfuze200 in * ldo-bypass mode. */ of_property_read_u32(np, "fsl,arm-soc-shared", &i); if (i == 1) soc_reg = arm_reg; /* * We expect an OPP table supplied by platform. * Just, incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = of_init_opp_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_node; } num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto put_node; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_node; } /* Make imx6_soc_volt array's size same as arm opp number */ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); if (imx6_soc_volt == NULL) { ret = -ENOMEM; goto free_freq_table; } prop = of_find_property(np, "fsl,soc-operating-points", NULL); if (!prop || !prop->value) goto soc_opp_out; /* * Each OPP is a set of tuples consisting of frequency and * voltage like <freq-kHz vol-uV>. */ nr = prop->length / sizeof(u32); if (nr % 2 || (nr / 2) < num) goto soc_opp_out; for (j = 0; j < num; j++) { val = prop->value; for (i = 0; i < nr / 2; i++) { unsigned long freq = be32_to_cpup(val++); unsigned long volt = be32_to_cpup(val++); if (freq_table[j].frequency == freq) { imx6_soc_volt[soc_opp_count++] = volt; #ifdef CONFIG_MX6_VPU_352M if (freq == 792000) { pr_info("increase SOC/PU voltage for VPU352MHz\n"); imx6_soc_volt[soc_opp_count - 1] = 1250000; } #endif break; } } } soc_opp_out: /* use fixed soc opp volt if no valid soc opp info found in dtb */ if (soc_opp_count != num) { dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); for (j = 0; j < num; j++) imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* * Calculate the ramp time for max voltage change in the * VDDSOC and VDDPU regulators. */ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; if (!IS_ERR(pu_reg)) { ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; } /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } mutex_init(&set_cpufreq_lock); register_pm_notifier(&imx6_cpufreq_pm_notifier); of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); put_node: of_node_put(np); return ret; }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; struct dev_pm_opp *suspend_opp; unsigned int transition_latency; bool opp_v1 = false; const char *name; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV; } cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); return ret; } /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { /* * operating-points-v2 not supported, fallback to old method of * finding shared-OPPs for backward compatibility. */ if (ret == -ENOENT) opp_v1 = true; else goto out_put_clk; } /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ name = find_supply_name(cpu_dev); if (name) { ret = dev_pm_opp_set_regulator(cpu_dev, name); if (ret) { dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", policy->cpu, ret); goto out_put_clk; } } /* * Initialize OPP tables for all policy->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. * * For platforms not using operating-points-v2 bindings, we do this * before updating policy->cpus. Otherwise, we will end up creating * duplicate OPPs for policy->cpus. * * OPPs might be populated at runtime, don't check for error here */ dev_pm_opp_of_cpumask_add_table(policy->cpus); /* * But we need OPP table to function so if it is not there let's * give platform code chance to provide it for us. */ ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } if (opp_v1) { struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); /* * OPP tables are initialized only for policy->cpu, do it for * others as well. */ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_free_opp; } priv->reg_name = name; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; policy->driver_data = priv; policy->clk = cpu_clk; rcu_read_lock(); suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev); if (suspend_opp) policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; rcu_read_unlock(); ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_free_cpufreq_table; } /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) goto out_free_cpufreq_table; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = transition_latency; return 0; out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); if (name) dev_pm_opp_put_regulator(cpu_dev); out_put_clk: clk_put(cpu_clk); return ret; }