void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
					struct cpumask *slow)
{
	struct device_node *cn = NULL;
	int cpu;

	cpumask_clear(fast);
	cpumask_clear(slow);

	/*
	 * Use the config options if they are given. This helps testing
	 * HMP scheduling on systems without a big.LITTLE architecture.
	 */
	if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
		if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
			WARN(1, "Failed to parse HMP fast cpu mask!\n");
		if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
			WARN(1, "Failed to parse HMP slow cpu mask!\n");
		return;
	}

	/*
	 * Else, parse device tree for little cores.
	 */
	while ((cn = of_find_node_by_type(cn, "cpu"))) {

		const u32 *mpidr;
		int len;

		mpidr = of_get_property(cn, "reg", &len);
		if (!mpidr || len != 4) {
			pr_err("* %s missing reg property\n", cn->full_name);
			continue;
		}

		cpu = get_logical_index(be32_to_cpup(mpidr));
		if (cpu == -EINVAL) {
			pr_err("couldn't get logical index for mpidr %x\n",
							be32_to_cpup(mpidr));
			break;
		}

		if (is_little_cpu(cn))
			cpumask_set_cpu(cpu, slow);
		else
			cpumask_set_cpu(cpu, fast);
	}

	if (!cpumask_empty(fast) && !cpumask_empty(slow))
		return;

	/*
	 * We didn't find both big and little cores so let's call all cores
	 * fast as this will keep the system running, with all cores being
	 * treated equal.
	 */
	cpumask_setall(fast);
	cpumask_clear(slow);
}
static unsigned int get_num_unloaded_little_cpus(void)
{
	unsigned int cpu;
	unsigned int unloaded_cpus = 0;

	for_each_online_cpu(cpu) {
		if (is_little_cpu(cpu)) {
			unsigned int cpu_load = get_delta_cpu_load_and_update(cpu);
			if (cpu_load < load_threshold_down)
				unloaded_cpus += 1;
		}
	}

	return unloaded_cpus;
}
static void __ref enable_little_cluster(void)
{
	unsigned int cpu;
	unsigned int num_up = 0;

	for_each_present_cpu(cpu) {
		if (is_little_cpu(cpu) && !cpu_online(cpu)) {
			cpu_up(cpu);
			num_up++;
		}
	}

	if (!little_cluster_enabled)
		pr_info("cluster_plug: %d little cpus enabled\n", num_up);

	little_cluster_enabled = true;
}
static void disable_little_cluster(void)
{
	unsigned int cpu;
	unsigned int num_down = 0;

	if (!little_cluster_enabled)
		return;

	for_each_present_cpu(cpu) {
		if (is_little_cpu(cpu) && cpu_online(cpu)) {
			cpu_down(cpu);
			num_down++;
		}
	}

	pr_info("cluster_plug: %d little cpus disabled\n", num_down);

	little_cluster_enabled = false;
}