Пример #1
0
struct numa_topology *numa_topology__new(void)
{
	struct cpu_map *node_map = NULL;
	struct numa_topology *tp = NULL;
	char path[MAXPATHLEN];
	char *buf = NULL;
	size_t len = 0;
	u32 nr, i;
	FILE *fp;
	char *c;

	scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
		  sysfs__mountpoint());

	fp = fopen(path, "r");
	if (!fp)
		return NULL;

	if (getline(&buf, &len, fp) <= 0)
		goto out;

	c = strchr(buf, '\n');
	if (c)
		*c = '\0';

	node_map = cpu_map__new(buf);
	if (!node_map)
		goto out;

	nr = (u32) node_map->nr;

	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
	if (!tp)
		goto out;

	tp->nr = nr;

	for (i = 0; i < nr; i++) {
		if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
			numa_topology__delete(tp);
			tp = NULL;
			break;
		}
	}

out:
	free(buf);
	fclose(fp);
	cpu_map__put(node_map);
	return tp;
}
Пример #2
0
struct cpu_topology *cpu_topology__new(void)
{
	struct cpu_topology *tp = NULL;
	void *addr;
	u32 nr, i;
	size_t sz;
	long ncpus;
	int ret = -1;
	struct cpu_map *map;

	ncpus = cpu__max_present_cpu();

	/* build online CPU map */
	map = cpu_map__new(NULL);
	if (map == NULL) {
		pr_debug("failed to get system cpumap\n");
		return NULL;
	}

	nr = (u32)(ncpus & UINT_MAX);

	sz = nr * sizeof(char *);
	addr = calloc(1, sizeof(*tp) + 2 * sz);
	if (!addr)
		goto out_free;

	tp = addr;
	addr += sizeof(*tp);
	tp->core_siblings = addr;
	addr += sz;
	tp->thread_siblings = addr;

	for (i = 0; i < nr; i++) {
		if (!cpu_map__has(map, i))
			continue;

		ret = build_cpu_topology(tp, i);
		if (ret < 0)
			break;
	}

out_free:
	cpu_map__put(map);
	if (ret) {
		cpu_topology__delete(tp);
		tp = NULL;
	}
	return tp;
}
Пример #3
0
/**
 * test__keep_tracking - test using a dummy software event to keep tracking.
 *
 * This function implements a test that checks that tracking events continue
 * when an event is disabled but a dummy software event is not disabled.  If the
 * test passes %0 is returned, otherwise %-1 is returned.
 */
int test__keep_tracking(void)
{
	struct record_opts opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.freq		     = 4000,
		.target		     = {
			.uses_mmap   = true,
		},
	};
	struct thread_map *threads = NULL;
	struct cpu_map *cpus = NULL;
	struct perf_evlist *evlist = NULL;
	struct perf_evsel *evsel = NULL;
	int found, err = -1;
	const char *comm;

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	CHECK_NOT_NULL__(threads);

	cpus = cpu_map__new(NULL);
	CHECK_NOT_NULL__(cpus);

	evlist = perf_evlist__new();
	CHECK_NOT_NULL__(evlist);

	perf_evlist__set_maps(evlist, cpus, threads);

	CHECK__(parse_events(evlist, "dummy:u", NULL));
	CHECK__(parse_events(evlist, "cycles:u", NULL));

	perf_evlist__config(evlist, &opts);

	evsel = perf_evlist__first(evlist);

	evsel->attr.comm = 1;
	evsel->attr.disabled = 1;
	evsel->attr.enable_on_exec = 0;

	if (perf_evlist__open(evlist) < 0) {
		fprintf(stderr, " (not supported)");
		err = 0;
		goto out_err;
	}

	CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));

	/*
	 * First, test that a 'comm' event can be found when the event is
	 * enabled.
	 */

	perf_evlist__enable(evlist);

	comm = "Test COMM 1";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));

	perf_evlist__disable(evlist);

	found = find_comm(evlist, comm);
	if (found != 1) {
		pr_debug("First time, failed to find tracking event.\n");
		goto out_err;
	}

	/*
	 * Secondly, test that a 'comm' event can be found when the event is
	 * disabled with the dummy event still enabled.
	 */

	perf_evlist__enable(evlist);

	evsel = perf_evlist__last(evlist);

	CHECK__(perf_evlist__disable_event(evlist, evsel));

	comm = "Test COMM 2";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));

	perf_evlist__disable(evlist);

	found = find_comm(evlist, comm);
	if (found != 1) {
		pr_debug("Seconf time, failed to find tracking event.\n");
		goto out_err;
	}

	err = 0;

out_err:
	if (evlist) {
		perf_evlist__disable(evlist);
		perf_evlist__delete(evlist);
	} else {
		cpu_map__put(cpus);
		thread_map__put(threads);
	}

	return err;
}
Пример #4
0
/*
 * This test will generate random numbers of calls to some getpid syscalls,
 * then establish an mmap for a group of events that are created to monitor
 * the syscalls.
 *
 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
 * sample.id field to map back to its respective perf_evsel instance.
 *
 * Then it checks if the number of syscalls reported as perf events by
 * the kernel corresponds to the number of syscalls made.
 */
int test__basic_mmap(void)
{
	int err = -1;
	union perf_event *event;
	struct thread_map *threads;
	struct cpu_map *cpus;
	struct perf_evlist *evlist;
	cpu_set_t cpu_set;
	const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
	pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
	unsigned int nr_events[nsyscalls],
		     expected_nr_events[nsyscalls], i, j;
	struct perf_evsel *evsels[nsyscalls], *evsel;
	char sbuf[STRERR_BUFSIZE];

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_free_threads;
	}

	CPU_ZERO(&cpu_set);
	CPU_SET(cpus->map[0], &cpu_set);
	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
	if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
		pr_debug("sched_setaffinity() failed on CPU %d: %s ",
			 cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf)));
		goto out_free_cpus;
	}

	evlist = perf_evlist__new();
	if (evlist == NULL) {
		pr_debug("perf_evlist__new\n");
		goto out_free_cpus;
	}

	perf_evlist__set_maps(evlist, cpus, threads);

	for (i = 0; i < nsyscalls; ++i) {
		char name[64];

		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
		evsels[i] = perf_evsel__newtp("syscalls", name);
		if (IS_ERR(evsels[i])) {
			pr_debug("perf_evsel__new\n");
			goto out_delete_evlist;
		}

		evsels[i]->attr.wakeup_events = 1;
		perf_evsel__set_sample_id(evsels[i], false);

		perf_evlist__add(evlist, evsels[i]);

		if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
			pr_debug("failed to open counter: %s, "
				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
				 strerror_r(errno, sbuf, sizeof(sbuf)));
			goto out_delete_evlist;
		}

		nr_events[i] = 0;
		expected_nr_events[i] = 1 + rand() % 127;
	}

	if (perf_evlist__mmap(evlist, 128, true) < 0) {
		pr_debug("failed to mmap events: %d (%s)\n", errno,
			 strerror_r(errno, sbuf, sizeof(sbuf)));
		goto out_delete_evlist;
	}

	for (i = 0; i < nsyscalls; ++i)
		for (j = 0; j < expected_nr_events[i]; ++j) {
			int foo = syscalls[i]();
			++foo;
		}

	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
		struct perf_sample sample;

		if (event->header.type != PERF_RECORD_SAMPLE) {
			pr_debug("unexpected %s event\n",
				 perf_event__name(event->header.type));
			goto out_delete_evlist;
		}

		err = perf_evlist__parse_sample(evlist, event, &sample);
		if (err) {
			pr_err("Can't parse sample, err = %d\n", err);
			goto out_delete_evlist;
		}

		err = -1;
		evsel = perf_evlist__id2evsel(evlist, sample.id);
		if (evsel == NULL) {
			pr_debug("event with id %" PRIu64
				 " doesn't map to an evsel\n", sample.id);
			goto out_delete_evlist;
		}
		nr_events[evsel->idx]++;
		perf_evlist__mmap_consume(evlist, 0);
	}

	err = 0;
	evlist__for_each(evlist, evsel) {
		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
			pr_debug("expected %d %s events, got %d\n",
				 expected_nr_events[evsel->idx],
				 perf_evsel__name(evsel), nr_events[evsel->idx]);
			err = -1;
			goto out_delete_evlist;
		}
	}

out_delete_evlist:
	perf_evlist__delete(evlist);
	cpus	= NULL;
	threads = NULL;
out_free_cpus:
	cpu_map__put(cpus);
out_free_threads:
	thread_map__put(threads);
	return err;
}
Пример #5
0
/*
 * This test will open software clock events (cpu-clock, task-clock)
 * then check their frequency -> period conversion has no artifact of
 * setting period to 1 forcefully.
 */
static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
{
	int i, err = -1;
	volatile int tmp = 0;
	u64 total_periods = 0;
	int nr_samples = 0;
	char sbuf[STRERR_BUFSIZE];
	union perf_event *event;
	struct perf_evsel *evsel;
	struct perf_evlist *evlist;
	struct perf_event_attr attr = {
		.type = PERF_TYPE_SOFTWARE,
		.config = clock_id,
		.sample_type = PERF_SAMPLE_PERIOD,
		.exclude_kernel = 1,
		.disabled = 1,
		.freq = 1,
	};
	struct cpu_map *cpus;
	struct thread_map *threads;

	attr.sample_freq = 500;

	evlist = perf_evlist__new();
	if (evlist == NULL) {
		pr_debug("perf_evlist__new\n");
		return -1;
	}

	evsel = perf_evsel__new(&attr);
	if (evsel == NULL) {
		pr_debug("perf_evsel__new\n");
		goto out_delete_evlist;
	}
	perf_evlist__add(evlist, evsel);

	cpus = cpu_map__dummy_new();
	threads = thread_map__new_by_tid(getpid());
	if (!cpus || !threads) {
		err = -ENOMEM;
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_free_maps;
	}

	perf_evlist__set_maps(evlist, cpus, threads);

	cpus	= NULL;
	threads = NULL;

	if (perf_evlist__open(evlist)) {
		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";

		err = -errno;
		pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
			 str_error_r(errno, sbuf, sizeof(sbuf)),
			 knob, (u64)attr.sample_freq);
		goto out_delete_evlist;
	}

	err = perf_evlist__mmap(evlist, 128);
	if (err < 0) {
		pr_debug("failed to mmap event: %d (%s)\n", errno,
			 str_error_r(errno, sbuf, sizeof(sbuf)));
		goto out_delete_evlist;
	}

	perf_evlist__enable(evlist);

	/* collect samples */
	for (i = 0; i < NR_LOOPS; i++)
		tmp++;

	perf_evlist__disable(evlist);

	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
		struct perf_sample sample;

		if (event->header.type != PERF_RECORD_SAMPLE)
			goto next_event;

		err = perf_evlist__parse_sample(evlist, event, &sample);
		if (err < 0) {
			pr_debug("Error during parse sample\n");
			goto out_delete_evlist;
		}

		total_periods += sample.period;
		nr_samples++;
next_event:
		perf_evlist__mmap_consume(evlist, 0);
	}

	if ((u64) nr_samples == total_periods) {
		pr_debug("All (%d) samples have period value of 1!\n",
			 nr_samples);
		err = -1;
	}

out_free_maps:
	cpu_map__put(cpus);
	thread_map__put(threads);
out_delete_evlist:
	perf_evlist__delete(evlist);
	return err;
}

int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused)
{
	int ret;

	ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
	if (!ret)
		ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);

	return ret;
}
Пример #6
0
static int do_test_code_reading(bool try_kcore)
{
	struct machine *machine;
	struct thread *thread;
	struct record_opts opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.freq		     = 500,
		.target		     = {
			.uses_mmap   = true,
		},
	};
	struct state state = {
		.done_cnt = 0,
	};
	struct thread_map *threads = NULL;
	struct cpu_map *cpus = NULL;
	struct perf_evlist *evlist = NULL;
	struct perf_evsel *evsel = NULL;
	int err = -1, ret;
	pid_t pid;
	struct map *map;
	bool have_vmlinux, have_kcore, excl_kernel = false;

	pid = getpid();

	machine = machine__new_host();

	ret = machine__create_kernel_maps(machine);
	if (ret < 0) {
		pr_debug("machine__create_kernel_maps failed\n");
		goto out_err;
	}

	/* Force the use of kallsyms instead of vmlinux to try kcore */
	if (try_kcore)
		symbol_conf.kallsyms_name = "/proc/kallsyms";

	/* Load kernel map */
	map = machine__kernel_map(machine);
	ret = map__load(map, NULL);
	if (ret < 0) {
		pr_debug("map__load failed\n");
		goto out_err;
	}
	have_vmlinux = dso__is_vmlinux(map->dso);
	have_kcore = dso__is_kcore(map->dso);

	/* 2nd time through we just try kcore */
	if (try_kcore && !have_kcore)
		return TEST_CODE_READING_NO_KCORE;

	/* No point getting kernel events if there is no kernel object */
	if (!have_vmlinux && !have_kcore)
		excl_kernel = true;

	threads = thread_map__new_by_tid(pid);
	if (!threads) {
		pr_debug("thread_map__new_by_tid failed\n");
		goto out_err;
	}

	ret = perf_event__synthesize_thread_map(NULL, threads,
						perf_event__process, machine, false, 500);
	if (ret < 0) {
		pr_debug("perf_event__synthesize_thread_map failed\n");
		goto out_err;
	}

	thread = machine__findnew_thread(machine, pid, pid);
	if (!thread) {
		pr_debug("machine__findnew_thread failed\n");
		goto out_put;
	}

	cpus = cpu_map__new(NULL);
	if (!cpus) {
		pr_debug("cpu_map__new failed\n");
		goto out_put;
	}

	while (1) {
		const char *str;

		evlist = perf_evlist__new();
		if (!evlist) {
			pr_debug("perf_evlist__new failed\n");
			goto out_put;
		}

		perf_evlist__set_maps(evlist, cpus, threads);

		if (excl_kernel)
			str = "cycles:u";
		else
			str = "cycles";
		pr_debug("Parsing event '%s'\n", str);
		ret = parse_events(evlist, str, NULL);
		if (ret < 0) {
			pr_debug("parse_events failed\n");
			goto out_put;
		}

		perf_evlist__config(evlist, &opts);

		evsel = perf_evlist__first(evlist);

		evsel->attr.comm = 1;
		evsel->attr.disabled = 1;
		evsel->attr.enable_on_exec = 0;

		ret = perf_evlist__open(evlist);
		if (ret < 0) {
			if (!excl_kernel) {
				excl_kernel = true;
				/*
				 * Both cpus and threads are now owned by evlist
				 * and will be freed by following perf_evlist__set_maps
				 * call. Getting refference to keep them alive.
				 */
				cpu_map__get(cpus);
				thread_map__get(threads);
				perf_evlist__set_maps(evlist, NULL, NULL);
				perf_evlist__delete(evlist);
				evlist = NULL;
				continue;
			}

			if (verbose) {
				char errbuf[512];
				perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
				pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
			}

			goto out_put;
		}
		break;
	}

	ret = perf_evlist__mmap(evlist, UINT_MAX, false);
	if (ret < 0) {
		pr_debug("perf_evlist__mmap failed\n");
		goto out_put;
	}

	perf_evlist__enable(evlist);

	do_something();

	perf_evlist__disable(evlist);

	ret = process_events(machine, evlist, &state);
	if (ret < 0)
		goto out_put;

	if (!have_vmlinux && !have_kcore && !try_kcore)
		err = TEST_CODE_READING_NO_KERNEL_OBJ;
	else if (!have_vmlinux && !try_kcore)
		err = TEST_CODE_READING_NO_VMLINUX;
	else if (excl_kernel)
		err = TEST_CODE_READING_NO_ACCESS;
	else
		err = TEST_CODE_READING_OK;
out_put:
	thread__put(thread);
out_err:

	if (evlist) {
		perf_evlist__delete(evlist);
	} else {
		cpu_map__put(cpus);
		thread_map__put(threads);
	}
	machine__delete_threads(machine);
	machine__delete(machine);

	return err;
}

int test__code_reading(int subtest __maybe_unused)
{
	int ret;

	ret = do_test_code_reading(false);
	if (!ret)
		ret = do_test_code_reading(true);

	switch (ret) {
	case TEST_CODE_READING_OK:
		return 0;
	case TEST_CODE_READING_NO_VMLINUX:
		pr_debug("no vmlinux\n");
		return 0;
	case TEST_CODE_READING_NO_KCORE:
		pr_debug("no kcore\n");
		return 0;
	case TEST_CODE_READING_NO_ACCESS:
		pr_debug("no access\n");
		return 0;
	case TEST_CODE_READING_NO_KERNEL_OBJ:
		pr_debug("no kernel obj\n");
		return 0;
	default:
		return -1;
	};
}