Beispiel #1
0
/*
 * Check that the idle state is uniform across all CPUs in the CPUidle driver
 * cpumask
 */
static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
			     const cpumask_t *cpumask)
{
	int cpu;
	struct device_node *cpu_node, *curr_state_node;
	bool valid = true;

	/*
	 * Compare idle state phandles for index idx on all CPUs in the
	 * CPUidle driver cpumask. Start from next logical cpu following
	 * cpumask_first(cpumask) since that's the CPU state_node was
	 * retrieved from. If a mismatch is found bail out straight
	 * away since we certainly hit a firmware misconfiguration.
	 */
	for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
	     cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
		cpu_node = of_cpu_device_node_get(cpu);
		curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
						   idx);
		if (state_node != curr_state_node)
			valid = false;

		of_node_put(curr_state_node);
		of_node_put(cpu_node);
		if (!valid)
			break;
	}

	return valid;
}
Beispiel #2
0
/* get cpu node with valid operating-points */
static struct device_node *get_cpu_node_with_valid_op(int cpu)
{
	struct device_node *np = of_cpu_device_node_get(cpu);

	if (!of_get_property(np, "operating-points", NULL)) {
		of_node_put(np);
		np = NULL;
	}

	return np;
}
Beispiel #3
0
int cpu_init_idle(unsigned int cpu)
{
	int ret = -EOPNOTSUPP;
	struct device_node *cpu_node = of_cpu_device_node_get(cpu);

	if (!cpu_node)
		return -ENODEV;

	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
		ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);

	of_node_put(cpu_node);
	return ret;
}
Beispiel #4
0
/**
 * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu
 * @cpu: the cpu to be initialized
 *
 * Initialize the cpuidle ops with the device for the cpu and then call
 * the cpu's idle initialization callback. This may fail if the underlying HW
 * is not operational.
 *
 * Returns:
 *  0 on success,
 *  -ENODEV if it fails to find the cpu node in the device tree,
 *  -EOPNOTSUPP if it does not find a registered and valid cpuidle_ops for
 *  this cpu,
 *  -ENOENT if it fails to find an 'enable-method' property,
 *  -ENXIO if the HW reports a failure or a misconfiguration,
 *  -ENOMEM if the HW report an memory allocation failure 
 */
int __init arm_cpuidle_init(int cpu)
{
	struct device_node *cpu_node = of_cpu_device_node_get(cpu);
	int ret;

	if (!cpu_node)
		return -ENODEV;

	ret = arm_cpuidle_read_ops(cpu_node, cpu);
	if (!ret)
		ret = cpuidle_ops[cpu].init(cpu_node, cpu);

	of_node_put(cpu_node);

	return ret;
}
Beispiel #5
0
static int pmu_parse_irq_affinity(struct device_node *node, int i)
{
	struct device_node *dn;
	int cpu;

	/*
	 * If we don't have an interrupt-affinity property, we guess irq
	 * affinity matches our logical CPU order, as we used to assume.
	 * This is fragile, so we'll warn in pmu_parse_irqs().
	 */
	if (!pmu_has_irq_affinity(node))
		return i;

	dn = of_parse_phandle(node, "interrupt-affinity", i);
	if (!dn) {
		pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
			i, node->name);
		return -EINVAL;
	}

	/* Now look up the logical CPU number */
	for_each_possible_cpu(cpu) {
		struct device_node *cpu_dn;

		cpu_dn = of_cpu_device_node_get(cpu);
		of_node_put(cpu_dn);

		if (dn == cpu_dn)
			break;
	}

	if (cpu >= nr_cpu_ids) {
		pr_warn("failed to find logical CPU for %s\n", dn->name);
	}

	of_node_put(dn);

	return cpu;
}
Beispiel #6
0
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
	struct tegra124_cpufreq_priv *priv;
	struct device_node *np;
	struct device *cpu_dev;
	struct platform_device_info cpufreq_dt_devinfo = {};
	int ret;

	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev)
		return -ENODEV;

	np = of_cpu_device_node_get(0);
	if (!np)
		return -ENODEV;

	priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
	if (IS_ERR(priv->vdd_cpu_reg)) {
		ret = PTR_ERR(priv->vdd_cpu_reg);
		goto out_put_np;
	}

	priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
	if (IS_ERR(priv->cpu_clk)) {
		ret = PTR_ERR(priv->cpu_clk);
		goto out_put_vdd_cpu_reg;
	}

	priv->dfll_clk = of_clk_get_by_name(np, "dfll");
	if (IS_ERR(priv->dfll_clk)) {
		ret = PTR_ERR(priv->dfll_clk);
		goto out_put_cpu_clk;
	}

	priv->pllx_clk = of_clk_get_by_name(np, "pll_x");
	if (IS_ERR(priv->pllx_clk)) {
		ret = PTR_ERR(priv->pllx_clk);
		goto out_put_dfll_clk;
	}

	priv->pllp_clk = of_clk_get_by_name(np, "pll_p");
	if (IS_ERR(priv->pllp_clk)) {
		ret = PTR_ERR(priv->pllp_clk);
		goto out_put_pllx_clk;
	}

	ret = tegra124_cpu_switch_to_dfll(priv);
	if (ret)
		goto out_put_pllp_clk;

	cpufreq_dt_devinfo.name = "cpufreq-dt";
	cpufreq_dt_devinfo.parent = &pdev->dev;
	cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
	cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);

	priv->cpufreq_dt_pdev =
		platform_device_register_full(&cpufreq_dt_devinfo);
	if (IS_ERR(priv->cpufreq_dt_pdev)) {
		ret = PTR_ERR(priv->cpufreq_dt_pdev);
		goto out_switch_to_pllx;
	}

	platform_set_drvdata(pdev, priv);

	return 0;

out_switch_to_pllx:
	tegra124_cpu_switch_to_pllx(priv);
out_put_pllp_clk:
	clk_put(priv->pllp_clk);
out_put_pllx_clk:
	clk_put(priv->pllx_clk);
out_put_dfll_clk:
	clk_put(priv->dfll_clk);
out_put_cpu_clk:
	clk_put(priv->cpu_clk);
out_put_vdd_cpu_reg:
	regulator_put(priv->vdd_cpu_reg);
out_put_np:
	of_node_put(np);

	return ret;
}
Beispiel #7
0
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
	struct device_node *np;
	struct resource *res;
	int err;

	priv.dev = &pdev->dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	priv.base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(priv.base))
		return PTR_ERR(priv.base);

	np = of_cpu_device_node_get(0);
	if (!np) {
		dev_err(&pdev->dev, "failed to get cpu device node\n");
		return -ENODEV;
	}

	priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
	if (IS_ERR(priv.cpu_clk)) {
		dev_err(priv.dev, "Unable to get cpuclk");
		return PTR_ERR(priv.cpu_clk);
	}

	clk_prepare_enable(priv.cpu_clk);
	kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;

	priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
	if (IS_ERR(priv.ddr_clk)) {
		dev_err(priv.dev, "Unable to get ddrclk");
		err = PTR_ERR(priv.ddr_clk);
		goto out_cpu;
	}

	clk_prepare_enable(priv.ddr_clk);
	kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;

	priv.powersave_clk = of_clk_get_by_name(np, "powersave");
	if (IS_ERR(priv.powersave_clk)) {
		dev_err(priv.dev, "Unable to get powersave");
		err = PTR_ERR(priv.powersave_clk);
		goto out_ddr;
	}
	clk_prepare_enable(priv.powersave_clk);

	of_node_put(np);
	np = NULL;

	err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
	if (!err)
		return 0;

	dev_err(priv.dev, "Failed to register cpufreq driver");

	clk_disable_unprepare(priv.powersave_clk);
out_ddr:
	clk_disable_unprepare(priv.ddr_clk);
out_cpu:
	clk_disable_unprepare(priv.cpu_clk);
	of_node_put(np);

	return err;
}
Beispiel #8
0
/**
 * dt_init_idle_driver() - Parse the DT idle states and initialize the
 *			   idle driver states array
 * @drv:	  Pointer to CPU idle driver to be initialized
 * @matches:	  Array of of_device_id match structures to search in for
 *		  compatible idle state nodes. The data pointer for each valid
 *		  struct of_device_id entry in the matches array must point to
 *		  a function with the following signature, that corresponds to
 *		  the CPUidle state enter function signature:
 *
 *		  int (*)(struct cpuidle_device *dev,
 *			  struct cpuidle_driver *drv,
 *			  int index);
 *
 * @start_idx:    First idle state index to be initialized
 *
 * If DT idle states are detected and are valid the state count and states
 * array entries in the cpuidle driver are initialized accordingly starting
 * from index start_idx.
 *
 * Return: number of valid DT idle states parsed, <0 on failure
 */
int dt_init_idle_driver(struct cpuidle_driver *drv,
			const struct of_device_id *matches,
			unsigned int start_idx)
{
	struct cpuidle_state *idle_state;
	struct device_node *state_node, *cpu_node;
	int i, err = 0;
	const cpumask_t *cpumask;
	unsigned int state_idx = start_idx;

	if (state_idx >= CPUIDLE_STATE_MAX)
		return -EINVAL;
	/*
	 * We get the idle states for the first logical cpu in the
	 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
	 * and we check through idle_state_valid() if they are uniform
	 * across CPUs, otherwise we hit a firmware misconfiguration.
	 */
	cpumask = drv->cpumask ? : cpu_possible_mask;
	cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));

	for (i = 0; ; i++) {
		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
		if (!state_node)
			break;

		if (!of_device_is_available(state_node)) {
			of_node_put(state_node);
			continue;
		}

		if (!idle_state_valid(state_node, i, cpumask)) {
			pr_warn("%pOF idle state not valid, bailing out\n",
				state_node);
			err = -EINVAL;
			break;
		}

		if (state_idx == CPUIDLE_STATE_MAX) {
			pr_warn("State index reached static CPU idle driver states array size\n");
			break;
		}

		idle_state = &drv->states[state_idx++];
		err = init_state_node(idle_state, matches, state_node);
		if (err) {
			pr_err("Parsing idle state node %pOF failed with err %d\n",
			       state_node, err);
			err = -EINVAL;
			break;
		}
		of_node_put(state_node);
	}

	of_node_put(state_node);
	of_node_put(cpu_node);
	if (err)
		return err;
	/*
	 * Update the driver state count only if some valid DT idle states
	 * were detected
	 */
	if (i)
		drv->state_count = state_idx;

	/*
	 * Return the number of present and valid DT idle states, which can
	 * also be 0 on platforms with missing DT idle states or legacy DT
	 * configuration predating the DT idle states bindings.
	 */
	return i;
}
static int spear_cpufreq_driver_init(void)
{
	struct device_node *np;
	const struct property *prop;
	struct cpufreq_frequency_table *freq_tbl;
	const __be32 *val;
	int cnt, i, ret;

	np = of_cpu_device_node_get(0);
	if (!np) {
		pr_err("No cpu node found");
		return -ENODEV;
	}

	if (of_property_read_u32(np, "clock-latency",
				&spear_cpufreq.transition_latency))
		spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;

	prop = of_find_property(np, "cpufreq_tbl", NULL);
	if (!prop || !prop->value) {
		pr_err("Invalid cpufreq_tbl");
		ret = -ENODEV;
		goto out_put_node;
	}

	cnt = prop->length / sizeof(u32);
	val = prop->value;

	freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
	if (!freq_tbl) {
		ret = -ENOMEM;
		goto out_put_node;
	}

	for (i = 0; i < cnt; i++) {
		freq_tbl[i].driver_data = i;
		freq_tbl[i].frequency = be32_to_cpup(val++);
	}

	freq_tbl[i].driver_data = i;
	freq_tbl[i].frequency = CPUFREQ_TABLE_END;

	spear_cpufreq.freq_tbl = freq_tbl;

	of_node_put(np);

	spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
	if (IS_ERR(spear_cpufreq.clk)) {
		pr_err("Unable to get CPU clock\n");
		ret = PTR_ERR(spear_cpufreq.clk);
		goto out_put_mem;
	}

	ret = cpufreq_register_driver(&spear_cpufreq_driver);
	if (!ret)
		return 0;

	pr_err("failed register driver: %d\n", ret);
	clk_put(spear_cpufreq.clk);

out_put_mem:
	kfree(freq_tbl);
	return ret;

out_put_node:
	of_node_put(np);
	return ret;
}