static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, u32 size) { int count = dev_pm_opp_get_opp_count(dev); struct dev_pm_opp *opp; int i, index = 0; unsigned long freq = 1; /* * The OPP table doesn't contain the "off" frequency level so we need to * add 1 to the table size to account for it */ if (WARN(count + 1 > size, "The GMU frequency table is being truncated\n")) count = size - 1; /* Set the "off" frequency */ freqs[index++] = 0; for (i = 0; i < count; i++) { opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) break; dev_pm_opp_put(opp); freqs[index++] = freq++; } return index; }
/** * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device * @dev: device for which we do this operation * @table: Cpufreq table returned back to caller * * Generate a cpufreq table for a provided device- this assumes that the * opp list is already initialized and ready for usage. * * This function allocates required memory for the cpufreq table. It is * expected that the caller does the required maintenance such as freeing * the table as required. * * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM * if no memory available for the operation (table is not populated), returns 0 * if successful and table is populated. * * WARNING: It is important for the callers to ensure refreshing their copy of * the table if any of the mentioned functions have been invoked in the interim. * * Locking: The internal device_opp and opp structures are RCU protected. * Since we just use the regular accessor functions to access the internal data * structures, we use RCU read lock inside this function. As a result, users of * this function DONOT need to use explicit locks for invoking. */ int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table) { struct dev_pm_opp *opp; struct cpufreq_frequency_table *freq_table = NULL; int i, max_opps, ret = 0; unsigned long rate; rcu_read_lock(); max_opps = dev_pm_opp_get_opp_count(dev); if (max_opps <= 0) { ret = max_opps ? max_opps : -ENODATA; goto out; } freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); if (!freq_table) { ret = -ENOMEM; goto out; } for (i = 0, rate = 0; i < max_opps; i++, rate++) { /* find next rate */ opp = dev_pm_opp_find_freq_ceil(dev, &rate); if (IS_ERR(opp)) { ret = PTR_ERR(opp); goto out; } freq_table[i].driver_data = i; freq_table[i].frequency = rate / 1000; /* Is Boost/turbo opp ? */ if (dev_pm_opp_is_turbo(opp)) freq_table[i].flags = CPUFREQ_BOOST_FREQ; } freq_table[i].driver_data = i; freq_table[i].frequency = CPUFREQ_TABLE_END; *table = &freq_table[0]; out: rcu_read_unlock(); if (ret) kfree(freq_table); return ret; }
static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev, struct devfreq_dev_profile *dp) { int count; int i = 0; unsigned long freq = 0; struct dev_pm_opp *opp; rcu_read_lock(); count = dev_pm_opp_get_opp_count(kbdev->dev); if (count < 0) { rcu_read_unlock(); return count; } rcu_read_unlock(); dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]), GFP_KERNEL); if (!dp->freq_table) return -ENOMEM; rcu_read_lock(); for (i = 0; i < count; i++, freq++) { opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq); if (IS_ERR(opp)) break; dp->freq_table[i] = freq; } rcu_read_unlock(); if (count != i) dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n", count, i); dp->max_state = i; return 0; }
static int imx7d_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find the cpu0 node\n"); return -ENOENT; } arm_clk = devm_clk_get(cpu_dev, "arm"); arm_src = devm_clk_get(cpu_dev, "arm_root_src"); pll_arm = devm_clk_get(cpu_dev, "pll_arm"); pll_sys_main = devm_clk_get(cpu_dev, "pll_sys_main"); if (IS_ERR(arm_clk) | IS_ERR(arm_src) | IS_ERR(pll_arm) | IS_ERR(pll_sys_main)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_node; } arm_reg = devm_regulator_get(cpu_dev, "arm"); if (IS_ERR(arm_reg)) { dev_err(cpu_dev, "failed to get the regulator\n"); ret = -ENOENT; goto put_node; } /* We expect an OPP table supplied by platform. * Just incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = of_init_opp_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_node; } num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto put_node; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_node; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* OPP is maintained in order of increasing frequency, and * freq_table initialized from OPP is therefore sorted in the * same order */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx7d_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } mutex_init(&set_cpufreq_lock); register_pm_notifier(&imx7_cpufreq_pm_notifier); of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); put_node: of_node_put(np); return ret; }
static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; const struct property *prop; const __be32 *val; u32 nr, i, j; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu0 node\n"); return -ENOENT; } arm_clk = clk_get(cpu_dev, "arm"); pll1_sys_clk = clk_get(cpu_dev, "pll1_sys"); pll1_sw_clk = clk_get(cpu_dev, "pll1_sw"); step_clk = clk_get(cpu_dev, "step"); pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_clk; } arm_reg = regulator_get(cpu_dev, "arm"); pu_reg = regulator_get_optional(cpu_dev, "pu"); soc_reg = regulator_get(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; goto put_reg; } /* * We expect an OPP table supplied by platform. * Just, incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = dev_pm_opp_of_add_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_reg; } /* Because we have added the OPPs here, we must free them */ free_opp = true; num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto out_free_opp; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_reg; } /* Make imx6_soc_volt array's size same as arm opp number */ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); if (imx6_soc_volt == NULL) { ret = -ENOMEM; goto free_freq_table; } prop = of_find_property(np, "fsl,soc-operating-points", NULL); if (!prop || !prop->value) goto soc_opp_out; /* * Each OPP is a set of tuples consisting of frequency and * voltage like <freq-kHz vol-uV>. */ nr = prop->length / sizeof(u32); if (nr % 2 || (nr / 2) < num) goto soc_opp_out; for (j = 0; j < num; j++) { val = prop->value; for (i = 0; i < nr / 2; i++) { unsigned long freq = be32_to_cpup(val++); unsigned long volt = be32_to_cpup(val++); if (freq_table[j].frequency == freq) { imx6_soc_volt[soc_opp_count++] = volt; break; } } } soc_opp_out: /* use fixed soc opp volt if no valid soc opp info found in dtb */ if (soc_opp_count != num) { dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); for (j = 0; j < num; j++) imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* * Calculate the ramp time for max voltage change in the * VDDSOC and VDDPU regulators. */ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; if (!IS_ERR(pu_reg)) { ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; } /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_opp: if (free_opp) dev_pm_opp_of_remove_table(cpu_dev); put_reg: if (!IS_ERR(arm_reg)) regulator_put(arm_reg); if (!IS_ERR(pu_reg)) regulator_put(pu_reg); if (!IS_ERR(soc_reg)) regulator_put(soc_reg); put_clk: if (!IS_ERR(arm_clk)) clk_put(arm_clk); if (!IS_ERR(pll1_sys_clk)) clk_put(pll1_sys_clk); if (!IS_ERR(pll1_sw_clk)) clk_put(pll1_sw_clk); if (!IS_ERR(step_clk)) clk_put(step_clk); if (!IS_ERR(pll2_pfd2_396m_clk)) clk_put(pll2_pfd2_396m_clk); of_node_put(np); return ret; }
/** * devfreq_cooling_gen_tables() - Generate power and freq tables. * @dfc: Pointer to devfreq cooling device. * * Generate power and frequency tables: the power table hold the * device's maximum power usage at each cooling state (OPP). The * static and dynamic power using the appropriate voltage and * frequency for the state, is acquired from the struct * devfreq_cooling_power, and summed to make the maximum power draw. * * The frequency table holds the frequencies in descending order. * That way its indexed by cooling device state. * * The tables are malloced, and pointers put in dfc. They must be * freed when unregistering the devfreq cooling device. * * Return: 0 on success, negative error code on failure. */ static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc) { struct devfreq *df = dfc->devfreq; struct device *dev = df->dev.parent; int ret, num_opps; unsigned long freq; u32 *power_table = NULL; u32 *freq_table; int i; num_opps = dev_pm_opp_get_opp_count(dev); if (dfc->power_ops) { power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); if (!power_table) return -ENOMEM; } freq_table = kcalloc(num_opps, sizeof(*freq_table), GFP_KERNEL); if (!freq_table) { ret = -ENOMEM; goto free_power_table; } for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) { unsigned long power, voltage; struct dev_pm_opp *opp; opp = dev_pm_opp_find_freq_floor(dev, &freq); if (IS_ERR(opp)) { ret = PTR_ERR(opp); goto free_tables; } voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ dev_pm_opp_put(opp); if (dfc->power_ops) { if (dfc->power_ops->get_real_power) power = get_total_power(dfc, freq, voltage); else power = get_dynamic_power(dfc, freq, voltage); dev_dbg(dev, "Power table: %lu MHz @ %lu mV: %lu = %lu mW\n", freq / 1000000, voltage, power, power); power_table[i] = power; } freq_table[i] = freq; } if (dfc->power_ops) dfc->power_table = power_table; dfc->freq_table = freq_table; dfc->freq_table_size = num_opps; return 0; free_tables: kfree(freq_table); free_power_table: kfree(power_table); return ret; }
static int scpi_cpufreq_init(struct cpufreq_policy *policy) { int ret; unsigned int latency; struct device *cpu_dev; struct scpi_data *priv; struct cpufreq_frequency_table *freq_table; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV; } ret = scpi_ops->add_opps_to_device(cpu_dev); if (ret) { dev_warn(cpu_dev, "failed to add opps to the device\n"); return ret; } ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { dev_warn(cpu_dev, "failed to get sharing cpumask\n"); return ret; } ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); if (ret) { dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); return ret; } ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_free_opp; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; priv->clk = clk_get(cpu_dev, NULL); if (IS_ERR(priv->clk)) { dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n", __func__, cpu_dev->id); ret = PTR_ERR(priv->clk); goto out_free_cpufreq_table; } policy->driver_data = priv; ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_put_clk; } /* scpi allows DVFS request for any domain from any CPU */ policy->dvfs_possible_from_any_cpu = true; latency = scpi_ops->get_transition_latency(cpu_dev); if (!latency) latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = false; return 0; out_put_clk: clk_put(priv->clk); out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_cpumask_remove_table(policy->cpus); return ret; }
static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; struct dev_pm_opp *opp; unsigned long min_volt, max_volt; int num, ret; const struct property *prop; const __be32 *val; u32 nr, j, i = 0; cpu_dev = get_cpu_device(0); if (!cpu_dev) { pr_err("failed to get cpu0 device\n"); return -ENODEV; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu0 node\n"); return -ENOENT; } arm_clk = devm_clk_get(cpu_dev, "arm"); pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); step_clk = devm_clk_get(cpu_dev, "step"); pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m"); pll1_bypass = devm_clk_get(cpu_dev, "pll1_bypass"); pll1 = devm_clk_get(cpu_dev, "pll1"); pll1_bypass_src = devm_clk_get(cpu_dev, "pll1_bypass_src"); if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk) || IS_ERR(pll1_bypass) || IS_ERR(pll1) || IS_ERR(pll1_bypass_src)) { dev_err(cpu_dev, "failed to get clocks\n"); ret = -ENOENT; goto put_node; } arm_reg = devm_regulator_get_optional(cpu_dev, "arm"); pu_reg = devm_regulator_get_optional(cpu_dev, "pu"); soc_reg = devm_regulator_get_optional(cpu_dev, "soc"); if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; goto put_node; } /* * soc_reg sync with arm_reg if arm shares the same regulator * with soc. Otherwise, regulator common framework will refuse to update * this consumer's voltage right now while another consumer voltage * still keep in old one. For example, imx6sx-sdb with pfuze200 in * ldo-bypass mode. */ of_property_read_u32(np, "fsl,arm-soc-shared", &i); if (i == 1) soc_reg = arm_reg; /* * We expect an OPP table supplied by platform. * Just, incase the platform did not supply the OPP * table, it will try to get it. */ num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = of_init_opp_table(cpu_dev); if (ret < 0) { dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); goto put_node; } num = dev_pm_opp_get_opp_count(cpu_dev); if (num < 0) { ret = num; dev_err(cpu_dev, "no OPP table is found: %d\n", ret); goto put_node; } } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto put_node; } /* Make imx6_soc_volt array's size same as arm opp number */ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); if (imx6_soc_volt == NULL) { ret = -ENOMEM; goto free_freq_table; } prop = of_find_property(np, "fsl,soc-operating-points", NULL); if (!prop || !prop->value) goto soc_opp_out; /* * Each OPP is a set of tuples consisting of frequency and * voltage like <freq-kHz vol-uV>. */ nr = prop->length / sizeof(u32); if (nr % 2 || (nr / 2) < num) goto soc_opp_out; for (j = 0; j < num; j++) { val = prop->value; for (i = 0; i < nr / 2; i++) { unsigned long freq = be32_to_cpup(val++); unsigned long volt = be32_to_cpup(val++); if (freq_table[j].frequency == freq) { imx6_soc_volt[soc_opp_count++] = volt; #ifdef CONFIG_MX6_VPU_352M if (freq == 792000) { pr_info("increase SOC/PU voltage for VPU352MHz\n"); imx6_soc_volt[soc_opp_count - 1] = 1250000; } #endif break; } } } soc_opp_out: /* use fixed soc opp volt if no valid soc opp info found in dtb */ if (soc_opp_count != num) { dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n"); for (j = 0; j < num; j++) imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL; if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ) imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH; } if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; /* * Calculate the ramp time for max voltage change in the * VDDSOC and VDDPU regulators. */ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; if (!IS_ERR(pu_reg)) { ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; } /* * OPP is maintained in order of increasing frequency, and * freq_table initialised from OPP is therefore sorted in the * same order. */ rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[0].frequency * 1000, true); min_volt = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_exact(cpu_dev, freq_table[--num].frequency * 1000, true); max_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); if (ret > 0) transition_latency += ret * 1000; ret = cpufreq_register_driver(&imx6q_cpufreq_driver); if (ret) { dev_err(cpu_dev, "failed register driver: %d\n", ret); goto free_freq_table; } mutex_init(&set_cpufreq_lock); register_pm_notifier(&imx6_cpufreq_pm_notifier); of_node_put(np); return 0; free_freq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); put_node: of_node_put(np); return ret; }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; struct dev_pm_opp *suspend_opp; unsigned int transition_latency; bool opp_v1 = false; const char *name; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV; } cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); return ret; } /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { /* * operating-points-v2 not supported, fallback to old method of * finding shared-OPPs for backward compatibility. */ if (ret == -ENOENT) opp_v1 = true; else goto out_put_clk; } /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ name = find_supply_name(cpu_dev); if (name) { ret = dev_pm_opp_set_regulator(cpu_dev, name); if (ret) { dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", policy->cpu, ret); goto out_put_clk; } } /* * Initialize OPP tables for all policy->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. * * For platforms not using operating-points-v2 bindings, we do this * before updating policy->cpus. Otherwise, we will end up creating * duplicate OPPs for policy->cpus. * * OPPs might be populated at runtime, don't check for error here */ dev_pm_opp_of_cpumask_add_table(policy->cpus); /* * But we need OPP table to function so if it is not there let's * give platform code chance to provide it for us. */ ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } if (opp_v1) { struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); /* * OPP tables are initialized only for policy->cpu, do it for * others as well. */ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_free_opp; } priv->reg_name = name; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; policy->driver_data = priv; policy->clk = cpu_clk; rcu_read_lock(); suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev); if (suspend_opp) policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; rcu_read_unlock(); ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_free_cpufreq_table; } /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) goto out_free_cpufreq_table; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency) transition_latency = CPUFREQ_ETERNAL; policy->cpuinfo.transition_latency = transition_latency; return 0; out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); if (name) dev_pm_opp_put_regulator(cpu_dev); out_put_clk: clk_put(cpu_clk); return ret; }