Ejemplo n.º 1
0
static cpu_set_t *
path_cpuparse(int maxcpus, int islist, const char *path, va_list ap)
{
	FILE *fd;
	cpu_set_t *set;
	size_t setsize, len = maxcpus * 7;
	char buf[len];

	fd = path_vfopen("r" UL_CLOEXECSTR, 1, path, ap);

	if (!fgets(buf, len, fd))
		err(EXIT_FAILURE, _("cannot read %s"), pathbuf);
	fclose(fd);

	len = strlen(buf);
	if (buf[len - 1] == '\n')
		buf[len - 1] = '\0';

	set = cpuset_alloc(maxcpus, &setsize, NULL);
	if (!set)
		err(EXIT_FAILURE, _("failed to callocate cpu set"));

	if (islist) {
		if (cpulist_parse(buf, set, setsize, 0))
			errx(EXIT_FAILURE, _("failed to parse CPU list %s"), buf);
	} else {
		if (cpumask_parse(buf, set, setsize))
			errx(EXIT_FAILURE, _("failed to parse CPU mask %s"), buf);
	}
	return set;
}
Ejemplo n.º 2
0
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
				   unsigned long count, void *data)
{
	unsigned int irq = (int)(long)data, full_count = count, err;
	cpumask_t new_value, tmp;

	if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
		return -EIO;

	err = cpumask_parse(buffer, count, new_value);
	if (err)
		return err;

	/*
	 * Do not allow disabling IRQs completely - it's a too easy
	 * way to make the system unusable accidentally :-) At least
	 * one online CPU still has to be targeted.
	 */
	cpus_and(tmp, new_value, cpu_online_map);
	if (cpus_empty(tmp))
		return -EINVAL;

	irq_affinity[irq] = new_value;
	irq_desc[irq].handler->set_affinity(irq,
					cpumask_of_cpu(first_cpu(new_value)));

	return full_count;
}
Ejemplo n.º 3
0
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
				   unsigned long count, void *data)
{
	unsigned int irq = (int)(long)data, full_count = count, err;
	cpumask_t new_value, tmp;

	if (!irq_desc[irq].chip->set_affinity || no_irq_affinity)
		return -EIO;

	err = cpumask_parse(buffer, count, new_value);
	if (err)
		return err;

	/*
	 * Do not allow disabling IRQs completely - it's a too easy
	 * way to make the system unusable accidentally :-) At least
	 * one online CPU still has to be targeted.
	 */
	cpus_and(tmp, new_value, cpu_online_map);
	if (cpus_empty(tmp))
		/* Special case for empty set - allow the architecture
		   code to set default SMP affinity. */
		return select_smp_affinity(irq) ? -EINVAL : full_count;

	proc_set_irq_affinity(irq, new_value);

	return full_count;
}
Ejemplo n.º 4
0
static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
					unsigned long count, void *data)
{
	cpumask_t *mask = (cpumask_t *)data;
	unsigned long full_count = count, err;
	cpumask_t new_value;

	err = cpumask_parse(buffer, count, new_value);
	if (err)
		return err;

	*mask = new_value;
	return full_count;
}
Ejemplo n.º 5
0
int main(int argc, char **argv)
{
	cpu_set_t *new_set;
	pid_t pid = 0;
	int c, all_tasks = 0;
	int ncpus;
	size_t new_setsize, nbits;
	struct taskset ts;

	static const struct option longopts[] = {
		{ "all-tasks",	0, NULL, 'a' },
		{ "pid",	0, NULL, 'p' },
		{ "cpu-list",	0, NULL, 'c' },
		{ "help",	0, NULL, 'h' },
		{ "version",	0, NULL, 'V' },
		{ NULL,		0, NULL,  0  }
	};

	setlocale(LC_ALL, "");
	bindtextdomain(PACKAGE, LOCALEDIR);
	textdomain(PACKAGE);

	memset(&ts, 0, sizeof(ts));

	while ((c = getopt_long(argc, argv, "+apchV", longopts, NULL)) != -1) {
		switch (c) {
		case 'a':
			all_tasks = 1;
			break;
		case 'p':
			pid = strtol_or_err(argv[argc - 1],
					    _("failed to parse pid"));
			break;
		case 'c':
			ts.use_list = 1;
			break;
		case 'V':
			printf("%s from %s\n", program_invocation_short_name,
			       PACKAGE_STRING);
			return EXIT_SUCCESS;
		case 'h':
			usage(stdout);
			break;
		default:
			usage(stderr);
			break;
		}
	}

	if ((!pid && argc - optind < 2)
	    || (pid && (argc - optind < 1 || argc - optind > 2)))
		usage(stderr);

	ncpus = get_max_number_of_cpus();
	if (ncpus <= 0)
		errx(EXIT_FAILURE, _("cannot determine NR_CPUS; aborting"));

	/*
	 * the ts->set is always used for the sched_getaffinity call
	 * On the sched_getaffinity the kernel demands a user mask of
	 * at least the size of its own cpumask_t.
	 */
	ts.set = cpuset_alloc(ncpus, &ts.setsize, &nbits);
	if (!ts.set)
		err(EXIT_FAILURE, _("cpuset_alloc failed"));

	/* buffer for conversion from mask to string */
	ts.buflen = 7 * nbits;
	ts.buf = xmalloc(ts.buflen);

	/*
	 * new_set is always used for the sched_setaffinity call
	 * On the sched_setaffinity the kernel will zero-fill its
	 * cpumask_t if the user's mask is shorter.
	 */
	new_set = cpuset_alloc(ncpus, &new_setsize, NULL);
	if (!new_set)
		err(EXIT_FAILURE, _("cpuset_alloc failed"));

	if (argc - optind == 1)
		ts.get_only = 1;

	else if (ts.use_list) {
		if (cpulist_parse(argv[optind], new_set, new_setsize, 0))
			errx(EXIT_FAILURE, _("failed to parse CPU list: %s"),
			     argv[optind]);
	} else if (cpumask_parse(argv[optind], new_set, new_setsize)) {
		errx(EXIT_FAILURE, _("failed to parse CPU mask: %s"),
		     argv[optind]);
	}

	if (all_tasks) {
		struct proc_tasks *tasks = proc_open_tasks(pid);
		while (!proc_next_tid(tasks, &ts.pid))
			do_taskset(&ts, new_setsize, new_set);
		proc_close_tasks(tasks);
	} else {
		ts.pid = pid;
		do_taskset(&ts, new_setsize, new_set);
	}

	free(ts.buf);
	cpuset_free(ts.set);
	cpuset_free(new_set);

	if (!pid) {
		argv += optind + 1;
		execvp(argv[0], argv);
		err(EXIT_FAILURE, _("executing %s failed"), argv[0]);
	}

	return EXIT_SUCCESS;
}