Ejemplo n.º 1
0
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
{
	int err = -1, fd, cpu;
	struct cpu_map *cpus;
	struct perf_evsel *evsel;
	unsigned int nr_openat_calls = 111, i;
	cpu_set_t cpu_set;
	struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
	char sbuf[STRERR_BUFSIZE];
	char errbuf[BUFSIZ];

	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_thread_map_delete;
	}

	CPU_ZERO(&cpu_set);

	evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
	if (IS_ERR(evsel)) {
		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
		pr_debug("%s\n", errbuf);
		goto out_thread_map_delete;
	}

	if (perf_evsel__open(evsel, cpus, threads) < 0) {
		pr_debug("failed to open counter: %s, "
			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
			 str_error_r(errno, sbuf, sizeof(sbuf)));
		goto out_evsel_delete;
	}

	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int ncalls = nr_openat_calls + cpu;
		/*
		 * XXX eventually lift this restriction in a way that
		 * keeps perf building on older glibc installations
		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
		 * a reasonable upper limit tho :-)
		 */
		if (cpus->map[cpu] >= CPU_SETSIZE) {
			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
			continue;
		}

		CPU_SET(cpus->map[cpu], &cpu_set);
		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
				 cpus->map[cpu],
				 str_error_r(errno, sbuf, sizeof(sbuf)));
			goto out_close_fd;
		}
		for (i = 0; i < ncalls; ++i) {
			fd = openat(0, "/etc/passwd", O_RDONLY);
			close(fd);
		}
		CPU_CLR(cpus->map[cpu], &cpu_set);
	}

	/*
	 * Here we need to explicitly preallocate the counts, as if
	 * we use the auto allocation it will allocate just for 1 cpu,
	 * as we start by cpu 0.
	 */
	if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
		goto out_close_fd;
	}

	err = 0;

	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int expected;

		if (cpus->map[cpu] >= CPU_SETSIZE)
			continue;

		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
			pr_debug("perf_evsel__read_on_cpu\n");
			err = -1;
			break;
		}

		expected = nr_openat_calls + cpu;
		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
			err = -1;
		}
	}

	perf_evsel__free_counts(evsel);
out_close_fd:
	perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
	perf_evsel__delete(evsel);
out_thread_map_delete:
	thread_map__put(threads);
	return err;
}
Ejemplo n.º 2
0
/**
 * test__keep_tracking - test using a dummy software event to keep tracking.
 *
 * This function implements a test that checks that tracking events continue
 * when an event is disabled but a dummy software event is not disabled.  If the
 * test passes %0 is returned, otherwise %-1 is returned.
 */
int test__keep_tracking(void)
{
	struct record_opts opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.freq		     = 4000,
		.target		     = {
			.uses_mmap   = true,
		},
	};
	struct thread_map *threads = NULL;
	struct cpu_map *cpus = NULL;
	struct perf_evlist *evlist = NULL;
	struct perf_evsel *evsel = NULL;
	int found, err = -1;
	const char *comm;

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	CHECK_NOT_NULL__(threads);

	cpus = cpu_map__new(NULL);
	CHECK_NOT_NULL__(cpus);

	evlist = perf_evlist__new();
	CHECK_NOT_NULL__(evlist);

	perf_evlist__set_maps(evlist, cpus, threads);

	CHECK__(parse_events(evlist, "dummy:u", NULL));
	CHECK__(parse_events(evlist, "cycles:u", NULL));

	perf_evlist__config(evlist, &opts);

	evsel = perf_evlist__first(evlist);

	evsel->attr.comm = 1;
	evsel->attr.disabled = 1;
	evsel->attr.enable_on_exec = 0;

	if (perf_evlist__open(evlist) < 0) {
		fprintf(stderr, " (not supported)");
		err = 0;
		goto out_err;
	}

	CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));

	/*
	 * First, test that a 'comm' event can be found when the event is
	 * enabled.
	 */

	perf_evlist__enable(evlist);

	comm = "Test COMM 1";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));

	perf_evlist__disable(evlist);

	found = find_comm(evlist, comm);
	if (found != 1) {
		pr_debug("First time, failed to find tracking event.\n");
		goto out_err;
	}

	/*
	 * Secondly, test that a 'comm' event can be found when the event is
	 * disabled with the dummy event still enabled.
	 */

	perf_evlist__enable(evlist);

	evsel = perf_evlist__last(evlist);

	CHECK__(perf_evlist__disable_event(evlist, evsel));

	comm = "Test COMM 2";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));

	perf_evlist__disable(evlist);

	found = find_comm(evlist, comm);
	if (found != 1) {
		pr_debug("Seconf time, failed to find tracking event.\n");
		goto out_err;
	}

	err = 0;

out_err:
	if (evlist) {
		perf_evlist__disable(evlist);
		perf_evlist__delete(evlist);
	} else {
		cpu_map__put(cpus);
		thread_map__put(threads);
	}

	return err;
}
Ejemplo n.º 3
0
int test__open_syscall_event(void)
{
	int err = -1, fd;
	struct thread_map *threads;
	struct perf_evsel *evsel;
	struct perf_event_attr attr;
	unsigned int nr_open_calls = 111, i;
	int id = trace_event__id("sys_enter_open");

	if (id < 0) {
		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
		return -1;
	}

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	memset(&attr, 0, sizeof(attr));
	attr.type = PERF_TYPE_TRACEPOINT;
	attr.config = id;
	evsel = perf_evsel__new(&attr, 0);
	if (evsel == NULL) {
		pr_debug("perf_evsel__new\n");
		goto out_thread_map_delete;
	}

	if (perf_evsel__open_per_thread(evsel, threads) < 0) {
		pr_debug("failed to open counter: %s, "
			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
			 strerror(errno));
		goto out_evsel_delete;
	}

	for (i = 0; i < nr_open_calls; ++i) {
		fd = open("/etc/passwd", O_RDONLY);
		close(fd);
	}

	if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
		pr_debug("perf_evsel__read_on_cpu\n");
		goto out_close_fd;
	}

	if (evsel->counts->cpu[0].val != nr_open_calls) {
		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
			 nr_open_calls, evsel->counts->cpu[0].val);
		goto out_close_fd;
	}

	err = 0;
out_close_fd:
	perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
	perf_evsel__delete(evsel);
out_thread_map_delete:
	thread_map__delete(threads);
	return err;
}
Ejemplo n.º 4
0
#include <api/fs/tracing_path.h>
#include <linux/err.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "thread_map.h"
#include "evsel.h"
#include "debug.h"
#include "tests.h"

int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __maybe_unused)
{
	int err = -1, fd;
	struct perf_evsel *evsel;
	unsigned int nr_openat_calls = 111, i;
	struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
	char sbuf[STRERR_BUFSIZE];
	char errbuf[BUFSIZ];

	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
	if (IS_ERR(evsel)) {
		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
		pr_debug("%s\n", errbuf);
		goto out_thread_map_delete;
	}
Ejemplo n.º 5
0
/*
 * This test will generate random numbers of calls to some getpid syscalls,
 * then establish an mmap for a group of events that are created to monitor
 * the syscalls.
 *
 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
 * sample.id field to map back to its respective perf_evsel instance.
 *
 * Then it checks if the number of syscalls reported as perf events by
 * the kernel corresponds to the number of syscalls made.
 */
int test__basic_mmap(void)
{
	int err = -1;
	union perf_event *event;
	struct thread_map *threads;
	struct cpu_map *cpus;
	struct perf_evlist *evlist;
	cpu_set_t cpu_set;
	const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
					"getpgid", };
	pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
				      (void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
	unsigned int nr_events[nsyscalls],
		     expected_nr_events[nsyscalls], i, j;
	struct perf_evsel *evsels[nsyscalls], *evsel;

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_free_threads;
	}

	CPU_ZERO(&cpu_set);
	CPU_SET(cpus->map[0], &cpu_set);
	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
	if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
		pr_debug("sched_setaffinity() failed on CPU %d: %s ",
			 cpus->map[0], strerror(errno));
		goto out_free_cpus;
	}

	evlist = perf_evlist__new();
	if (evlist == NULL) {
		pr_debug("perf_evlist__new\n");
		goto out_free_cpus;
	}

	perf_evlist__set_maps(evlist, cpus, threads);

	for (i = 0; i < nsyscalls; ++i) {
		char name[64];

		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
		evsels[i] = perf_evsel__newtp("syscalls", name);
		if (evsels[i] == NULL) {
			pr_debug("perf_evsel__new\n");
			goto out_delete_evlist;
		}

		evsels[i]->attr.wakeup_events = 1;
		perf_evsel__set_sample_id(evsels[i], false);

		perf_evlist__add(evlist, evsels[i]);

		if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
			pr_debug("failed to open counter: %s, "
				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
				 strerror(errno));
			goto out_delete_evlist;
		}

		nr_events[i] = 0;
		expected_nr_events[i] = 1 + rand() % 127;
	}

	if (perf_evlist__mmap(evlist, 128, true) < 0) {
		pr_debug("failed to mmap events: %d (%s)\n", errno,
			 strerror(errno));
		goto out_delete_evlist;
	}

	for (i = 0; i < nsyscalls; ++i)
		for (j = 0; j < expected_nr_events[i]; ++j) {
			int foo = syscalls[i]();
			++foo;
		}

	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
		struct perf_sample sample;

		if (event->header.type != PERF_RECORD_SAMPLE) {
			pr_debug("unexpected %s event\n",
				 perf_event__name(event->header.type));
			goto out_delete_evlist;
		}

		err = perf_evlist__parse_sample(evlist, event, &sample);
		if (err) {
			pr_err("Can't parse sample, err = %d\n", err);
			goto out_delete_evlist;
		}

		err = -1;
		evsel = perf_evlist__id2evsel(evlist, sample.id);
		if (evsel == NULL) {
			pr_debug("event with id %" PRIu64
				 " doesn't map to an evsel\n", sample.id);
			goto out_delete_evlist;
		}
		nr_events[evsel->idx]++;
		perf_evlist__mmap_consume(evlist, 0);
	}

	err = 0;
	evlist__for_each(evlist, evsel) {
		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
			pr_debug("expected %d %s events, got %d\n",
				 expected_nr_events[evsel->idx],
				 perf_evsel__name(evsel), nr_events[evsel->idx]);
			err = -1;
			goto out_delete_evlist;
		}
	}

out_delete_evlist:
	perf_evlist__delete(evlist);
	cpus	= NULL;
	threads = NULL;
out_free_cpus:
	cpu_map__delete(cpus);
out_free_threads:
	thread_map__delete(threads);
	return err;
}
Ejemplo n.º 6
0
/**
 * test__perf_time_to_tsc - test converting perf time to TSC.
 *
 * This function implements a test that checks that the conversion of perf time
 * to and from TSC is consistent with the order of events.  If the test passes
 * %0 is returned, otherwise %-1 is returned.  If TSC conversion is not
 * supported then then the test passes but " (not supported)" is printed.
 */
int test__perf_time_to_tsc(void)
{
	struct record_opts opts = {
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.freq		     = 4000,
		.target		     = {
			.uses_mmap   = true,
		},
		.sample_time	     = true,
	};
	struct thread_map *threads = NULL;
	struct cpu_map *cpus = NULL;
	struct perf_evlist *evlist = NULL;
	struct perf_evsel *evsel = NULL;
	int err = -1, ret, i;
	const char *comm1, *comm2;
	struct perf_tsc_conversion tc;
	struct perf_event_mmap_page *pc;
	union perf_event *event;
	u64 test_tsc, comm1_tsc, comm2_tsc;
	u64 test_time, comm1_time = 0, comm2_time = 0;

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	CHECK_NOT_NULL__(threads);

	cpus = cpu_map__new(NULL);
	CHECK_NOT_NULL__(cpus);

	evlist = perf_evlist__new();
	CHECK_NOT_NULL__(evlist);

	perf_evlist__set_maps(evlist, cpus, threads);

	CHECK__(parse_events(evlist, "cycles:u", NULL));

	perf_evlist__config(evlist, &opts);

	evsel = perf_evlist__first(evlist);

	evsel->attr.comm = 1;
	evsel->attr.disabled = 1;
	evsel->attr.enable_on_exec = 0;

	CHECK__(perf_evlist__open(evlist));

	CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));

	pc = evlist->mmap[0].base;
	ret = perf_read_tsc_conversion(pc, &tc);
	if (ret) {
		if (ret == -EOPNOTSUPP) {
			fprintf(stderr, " (not supported)");
			return 0;
		}
		goto out_err;
	}

	perf_evlist__enable(evlist);

	comm1 = "Test COMM 1";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));

	test_tsc = rdtsc();

	comm2 = "Test COMM 2";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));

	perf_evlist__disable(evlist);

	for (i = 0; i < evlist->nr_mmaps; i++) {
		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
			struct perf_sample sample;

			if (event->header.type != PERF_RECORD_COMM ||
			    (pid_t)event->comm.pid != getpid() ||
			    (pid_t)event->comm.tid != getpid())
				goto next_event;

			if (strcmp(event->comm.comm, comm1) == 0) {
				CHECK__(perf_evsel__parse_sample(evsel, event,
								 &sample));
				comm1_time = sample.time;
			}
			if (strcmp(event->comm.comm, comm2) == 0) {
				CHECK__(perf_evsel__parse_sample(evsel, event,
								 &sample));
				comm2_time = sample.time;
			}
next_event:
			perf_evlist__mmap_consume(evlist, i);
		}
	}

	if (!comm1_time || !comm2_time)
		goto out_err;

	test_time = tsc_to_perf_time(test_tsc, &tc);
	comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
	comm2_tsc = perf_time_to_tsc(comm2_time, &tc);

	pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
		 comm1_time, comm1_tsc);
	pr_debug("rdtsc          time %"PRIu64" tsc %"PRIu64"\n",
		 test_time, test_tsc);
	pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
		 comm2_time, comm2_tsc);

	if (test_time <= comm1_time ||
	    test_time >= comm2_time)
		goto out_err;

	if (test_tsc <= comm1_tsc ||
	    test_tsc >= comm2_tsc)
		goto out_err;

	err = 0;

out_err:
	if (evlist) {
		perf_evlist__disable(evlist);
		perf_evlist__delete(evlist);
	}

	return err;
}