void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
					struct cpumask *slow)
{
	struct device_node *cn = NULL;
	int cpu;

	cpumask_clear(fast);
	cpumask_clear(slow);

	/*
	 * Use the config options if they are given. This helps testing
	 * HMP scheduling on systems without a big.LITTLE architecture.
	 */
	if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
		if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
			WARN(1, "Failed to parse HMP fast cpu mask!\n");
		if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
			WARN(1, "Failed to parse HMP slow cpu mask!\n");
		return;
	}

	/*
	 * Else, parse device tree for little cores.
	 */
	while ((cn = of_find_node_by_type(cn, "cpu"))) {

		const u32 *mpidr;
		int len;

		mpidr = of_get_property(cn, "reg", &len);
		if (!mpidr || len != 4) {
			pr_err("* %s missing reg property\n", cn->full_name);
			continue;
		}

		cpu = get_logical_index(be32_to_cpup(mpidr));
		if (cpu == -EINVAL) {
			pr_err("couldn't get logical index for mpidr %x\n",
							be32_to_cpup(mpidr));
			break;
		}

		if (is_little_cpu(cn))
			cpumask_set_cpu(cpu, slow);
		else
			cpumask_set_cpu(cpu, fast);
	}

	if (!cpumask_empty(fast) && !cpumask_empty(slow))
		return;

	/*
	 * We didn't find both big and little cores so let's call all cores
	 * fast as this will keep the system running, with all cores being
	 * treated equal.
	 */
	cpumask_setall(fast);
	cpumask_clear(slow);
}
static int sunxi_cpu_budget_cooling_register(struct platform_device *pdev)
{
	struct thermal_cooling_device *cdev;
	struct cpumask cluster0_mask;
	struct cpumask cluster1_mask;
    int i;
	/* make sure cpufreq driver has been initialized */
	if (!cpufreq_frequency_get_table(0))
		return -EPROBE_DEFER;
    cpumask_clear(&cluster0_mask);
    cpumask_clear(&cluster1_mask);
#if defined(CONFIG_SCHED_HMP)
    arch_get_fast_and_slow_cpus(&cluster1_mask,&cluster0_mask);
#elif defined(CONFIG_SCHED_SMP_DCMP)
	if (strlen(CONFIG_CLUSTER0_CPU_MASK) && strlen(CONFIG_CLUSTER1_CPU_MASK)) {
		if (cpulist_parse(CONFIG_CLUSTER0_CPU_MASK, &cluster0_mask)) {
			pr_err("Failed to parse cluster0 cpu mask!\n");
			return -1;
		}
		if (cpulist_parse(CONFIG_CLUSTER1_CPU_MASK, &cluster1_mask)) {
			pr_err("Failed to parse cluster1 cpu mask!\n");
			return -1;
		}
	}
#else
        cpumask_copy(&cluster0_mask, cpu_possible_mask);
#endif
    dynamic_tbl = kmalloc(sizeof(struct cpu_budget_table)*max_tbl_num,GFP_KERNEL);
    dynamic_tbl_num=0;
    for(i=0;i<max_tbl_num;i++)
    {
        if (m_current_tbl[i].online)
        {
            dynamic_tbl[dynamic_tbl_num].cluster0_freq = m_current_tbl[i].cluster0_freq;
            dynamic_tbl[dynamic_tbl_num].cluster0_cpunr = m_current_tbl[i].cluster0_cpunr;
            dynamic_tbl[dynamic_tbl_num].cluster1_freq = m_current_tbl[i].cluster1_freq;
            dynamic_tbl[dynamic_tbl_num].cluster1_cpunr = m_current_tbl[i].cluster1_cpunr;
            dynamic_tbl[dynamic_tbl_num].gpu_throttle = m_current_tbl[i].gpu_throttle;
            dynamic_tbl_num++;
        }
    }

	cdev = cpu_budget_cooling_register(dynamic_tbl,dynamic_tbl_num,&cluster0_mask,&cluster1_mask);
	if (IS_ERR_OR_NULL(cdev)) {
		dev_err(&pdev->dev, "Failed to register cooling device\n");
		return PTR_ERR(cdev);
	}
	platform_set_drvdata(pdev, cdev);
	dev_info(&pdev->dev, "Cooling device registered: %s\n",	cdev->type);
	return 0;
}
static int autohotplug_smart_get_slow_fast_cpus(struct cpumask *fast,
							struct cpumask *slow)
{
#if defined(CONFIG_SCHED_HMP)
	arch_get_fast_and_slow_cpus(fast, slow);

	if (cpumask_test_cpu(0, fast))
		hmp_cluster0_is_big = 1;
	else
		hmp_cluster0_is_big = 0;

	if (hmp_cluster0_is_big) {
            autohotplug_smart.try_up = autohotplug_smart_tryup_hmp_simple;
            autohotplug_smart.try_down = autohotplug_smart_trydown_hmp_simple;
            autohotplug_smart.update_limits = autohotplug_smart_updatelimits_hmp_simple;
    }
    else
    {
            autohotplug_smart.try_up = autohotplug_smart_tryup_hmp_normal;
            autohotplug_smart.try_down = autohotplug_smart_trydown_hmp_normal;
            autohotplug_smart.update_limits = autohotplug_smart_updatelimits_hmp_normal;
    }
#elif defined(CONFIG_SCHED_SMP_DCMP)
	if (strlen(CONFIG_CLUSTER0_CPU_MASK) && strlen(CONFIG_CLUSTER1_CPU_MASK)) {
		if (cpulist_parse(CONFIG_CLUSTER0_CPU_MASK, fast)) {
			pr_err("Failed to parse cluster0 cpu mask!\n");
			return -1;
		}

		if (cpulist_parse(CONFIG_CLUSTER1_CPU_MASK, slow)) {
			pr_err("Failed to parse cluster1 cpu mask!\n");
			return -1;
		}
	}
    autohotplug_smart.try_up = autohotplug_smart_tryup_hmp_simple;
    autohotplug_smart.try_down = autohotplug_smart_trydown_hmp_simple;
#ifndef CONFIG_ARCH_SUN8IW6
    autohotplug_smart.update_limits = autohotplug_smart_updatelimits_hmp_simple;
#endif
#else
	cpumask_copy(slow, cpu_possible_mask);
    autohotplug_smart.try_up = autohotplug_smart_tryup_smp_normal;
    autohotplug_smart.try_down = autohotplug_smart_trydown_smp_normal;
    autohotplug_smart.update_limits = autohotplug_smart_updatelimits_smp_normal;
#endif

	return 0;
}
Exemple #4
0
static cpu_set_t *
path_cpuparse(int maxcpus, int islist, const char *path, va_list ap)
{
	FILE *fd;
	cpu_set_t *set;
	size_t setsize, len = maxcpus * 7;
	char buf[len];

	fd = path_vfopen("r" UL_CLOEXECSTR, 1, path, ap);

	if (!fgets(buf, len, fd))
		err(EXIT_FAILURE, _("cannot read %s"), pathbuf);
	fclose(fd);

	len = strlen(buf);
	if (buf[len - 1] == '\n')
		buf[len - 1] = '\0';

	set = cpuset_alloc(maxcpus, &setsize, NULL);
	if (!set)
		err(EXIT_FAILURE, _("failed to callocate cpu set"));

	if (islist) {
		if (cpulist_parse(buf, set, setsize, 0))
			errx(EXIT_FAILURE, _("failed to parse CPU list %s"), buf);
	} else {
		if (cpumask_parse(buf, set, setsize))
			errx(EXIT_FAILURE, _("failed to parse CPU mask %s"), buf);
	}
	return set;
}
Exemple #5
0
static int __init setup_kdata(char *str)
{
    char buf[NR_CPUS * 5];

    if (str == NULL)
        return -EINVAL;

    if (strcmp(str, "huge") == 0) {
#if CHIP_HAS_CBOX_HOME_MAP()
        kdata_huge = 1;
#else
        pr_err("kdata=huge: only supported on TILEPro and later.\n");
#endif
        return 0;
    }

    if (strcmp(str, "small") == 0) {
        kdata_huge = 0;
        str += strlen("small");
        if (*str == ',')
            ++str;
        if (*str == '\0')
            return 0;
    }

    if (cpulist_parse(str, &kdata_mask) != 0)
        return -EINVAL;

    kdata_arg_seen = 1;
    cpulist_scnprintf(buf, sizeof(buf), &kdata_mask);
    pr_info("kdata: using caching neighborhood %s\n", buf);
    return 0;
}
static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
{
	int i, ret;
	struct cpumask tmp_mask;

	if (!clusters_inited)
		return -EINVAL;

	ret = cpulist_parse(buf, &tmp_mask);

	if (ret)
		return ret;

	for (i = 0; i < num_clusters; i++) {
		if (cpumask_empty(managed_clusters[i]->cpus)) {
			mutex_lock(&managed_cpus_lock);
			cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
			cpumask_clear(managed_clusters[i]->offlined_cpus);
			mutex_unlock(&managed_cpus_lock);
			break;
		}
	}

	return ret;
}
Exemple #7
0
static int __init setup_ktext(char *str)
{
	if (str == NULL)
		return -EINVAL;

	/* If you have a leading "nocache", turn off ktext caching */
	if (strncmp(str, "nocache", 7) == 0) {
		ktext_nocache = 1;
		pr_info("ktext: disabling local caching of kernel text\n");
		str += 7;
		if (*str == ',')
			++str;
		if (*str == '\0')
			return 0;
	}

	ktext_arg_seen = 1;

	/* Default setting on Tile64: use a huge page */
	if (strcmp(str, "huge") == 0)
		pr_info("ktext: using one huge locally cached page\n");

	/* Pay TLB cost but get no cache benefit: cache small pages locally */
	else if (strcmp(str, "local") == 0) {
		ktext_small = 1;
		ktext_local = 1;
		pr_info("ktext: using small pages with local caching\n");
	}

	/* Neighborhood cache ktext pages on all cpus. */
	else if (strcmp(str, "all") == 0) {
		ktext_small = 1;
		ktext_all = 1;
		pr_info("ktext: using maximal caching neighborhood\n");
	}


	/* Neighborhood ktext pages on specified mask */
	else if (cpulist_parse(str, &ktext_mask) == 0) {
		char buf[NR_CPUS * 5];
		cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
		if (cpumask_weight(&ktext_mask) > 1) {
			ktext_small = 1;
			pr_info("ktext: using caching neighborhood %s "
			       "with small pages\n", buf);
		} else {
			pr_info("ktext: caching on cpu %s with one huge page\n",
			       buf);
		}
	}

	else if (*str)
		return -EINVAL;

	return 0;
}
Exemple #8
0
static int __init irq_affinity_setup(char *str)
{
	alloc_bootmem_cpumask_var(&irq_default_affinity);
	cpulist_parse(str, irq_default_affinity);
	/*
	 * Set at least the boot cpu. We don't want to end up with
	 * bugreports caused by random comandline masks
	 */
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
	return 1;
}
Exemple #9
0
static void cpu_parse(char *cpu_string, cpu_set_t *cpu_set, size_t setsize)
{
	int rc;

	rc = cpulist_parse(cpu_string, cpu_set, setsize, 1);
	if (rc == 0)
		return;
	if (rc == 2)
		errx(EXIT_FAILURE, _("invalid CPU number in CPU list: %s"), cpu_string);
	errx(EXIT_FAILURE, _("failed to parse CPU list: %s"), cpu_string);
}
Exemple #10
0
void tzdev_init_migration(void)
{
    cpumask_setall(&tzdev_cpu_mask[CLUSTER_BIG]);
    cpumask_clear(&tzdev_cpu_mask[CLUSTER_LITTLE]);

    if (strlen(CONFIG_HMP_FAST_CPU_MASK))
        cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, &tzdev_cpu_mask[CLUSTER_BIG]);
    else
        pr_notice("All CPUs are equal, core migration will do nothing.\n");
    cpumask_andnot(&tzdev_cpu_mask[CLUSTER_LITTLE], cpu_present_mask,
                   &tzdev_cpu_mask[CLUSTER_BIG]);
    register_cpu_notifier(&tzdev_cpu_notifier);
}
Exemple #11
0
/* Get cpu map from device tree */
static int __init eznps_get_map(const char *name, struct cpumask *cpumask)
{
	unsigned long dt_root = of_get_flat_dt_root();
	const char *buf;

	buf = of_get_flat_dt_prop(dt_root, name, NULL);
	if (!buf)
		return 1;

	cpulist_parse(buf, cpumask);

	return 0;
}
static int autohotplug_smart_get_slow_fast_cpus(struct cpumask *fast,
							struct cpumask *slow)
{
#if defined(CONFIG_SCHED_HMP)
	arch_get_fast_and_slow_cpus(fast, slow);
#elif defined(CONFIG_SCHED_SMP_DCMP)
	if (strlen(CONFIG_CLUSTER0_CPU_MASK) && strlen(CONFIG_CLUSTER1_CPU_MASK)) {
		if (cpulist_parse(CONFIG_CLUSTER0_CPU_MASK, fast)) {
			pr_err("Failed to parse cluster0 cpu mask!\n");
			return -1;
		}

		if (cpulist_parse(CONFIG_CLUSTER1_CPU_MASK, slow)) {
			pr_err("Failed to parse cluster1 cpu mask!\n");
			return -1;
		}
	}
#else
	cpumask_copy(slow, cpu_possible_mask);
#endif

	return 0;
}
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
	int cpu;

	alloc_bootmem_cpumask_var(&nohz_full_mask);
	if (cpulist_parse(str, nohz_full_mask) < 0) {
		pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
		return 1;
	}

	cpu = smp_processor_id();
	if (cpumask_test_cpu(cpu, nohz_full_mask)) {
		pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
		cpumask_clear_cpu(cpu, nohz_full_mask);
	}
	have_nohz_full_mask = true;

	return 1;
}
Exemple #14
0
int main(int argc, char **argv)
{
	cpu_set_t *new_set;
	pid_t pid = 0;
	int c, all_tasks = 0;
	int ncpus;
	size_t new_setsize, nbits;
	struct taskset ts;

	static const struct option longopts[] = {
		{ "all-tasks",	0, NULL, 'a' },
		{ "pid",	0, NULL, 'p' },
		{ "cpu-list",	0, NULL, 'c' },
		{ "help",	0, NULL, 'h' },
		{ "version",	0, NULL, 'V' },
		{ NULL,		0, NULL,  0  }
	};

	setlocale(LC_ALL, "");
	bindtextdomain(PACKAGE, LOCALEDIR);
	textdomain(PACKAGE);

	memset(&ts, 0, sizeof(ts));

	while ((c = getopt_long(argc, argv, "+apchV", longopts, NULL)) != -1) {
		switch (c) {
		case 'a':
			all_tasks = 1;
			break;
		case 'p':
			pid = strtol_or_err(argv[argc - 1],
					    _("failed to parse pid"));
			break;
		case 'c':
			ts.use_list = 1;
			break;
		case 'V':
			printf("%s from %s\n", program_invocation_short_name,
			       PACKAGE_STRING);
			return EXIT_SUCCESS;
		case 'h':
			usage(stdout);
			break;
		default:
			usage(stderr);
			break;
		}
	}

	if ((!pid && argc - optind < 2)
	    || (pid && (argc - optind < 1 || argc - optind > 2)))
		usage(stderr);

	ncpus = get_max_number_of_cpus();
	if (ncpus <= 0)
		errx(EXIT_FAILURE, _("cannot determine NR_CPUS; aborting"));

	/*
	 * the ts->set is always used for the sched_getaffinity call
	 * On the sched_getaffinity the kernel demands a user mask of
	 * at least the size of its own cpumask_t.
	 */
	ts.set = cpuset_alloc(ncpus, &ts.setsize, &nbits);
	if (!ts.set)
		err(EXIT_FAILURE, _("cpuset_alloc failed"));

	/* buffer for conversion from mask to string */
	ts.buflen = 7 * nbits;
	ts.buf = xmalloc(ts.buflen);

	/*
	 * new_set is always used for the sched_setaffinity call
	 * On the sched_setaffinity the kernel will zero-fill its
	 * cpumask_t if the user's mask is shorter.
	 */
	new_set = cpuset_alloc(ncpus, &new_setsize, NULL);
	if (!new_set)
		err(EXIT_FAILURE, _("cpuset_alloc failed"));

	if (argc - optind == 1)
		ts.get_only = 1;

	else if (ts.use_list) {
		if (cpulist_parse(argv[optind], new_set, new_setsize, 0))
			errx(EXIT_FAILURE, _("failed to parse CPU list: %s"),
			     argv[optind]);
	} else if (cpumask_parse(argv[optind], new_set, new_setsize)) {
		errx(EXIT_FAILURE, _("failed to parse CPU mask: %s"),
		     argv[optind]);
	}

	if (all_tasks) {
		struct proc_tasks *tasks = proc_open_tasks(pid);
		while (!proc_next_tid(tasks, &ts.pid))
			do_taskset(&ts, new_setsize, new_set);
		proc_close_tasks(tasks);
	} else {
		ts.pid = pid;
		do_taskset(&ts, new_setsize, new_set);
	}

	free(ts.buf);
	cpuset_free(ts.set);
	cpuset_free(new_set);

	if (!pid) {
		argv += optind + 1;
		execvp(argv[0], argv);
		err(EXIT_FAILURE, _("executing %s failed"), argv[0]);
	}

	return EXIT_SUCCESS;
}