int test__perf_evsel__tp_sched_test(void) { struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); int ret = 0; if (IS_ERR(evsel)) { pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel)); return -1; } if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) ret = -1; if (perf_evsel__test_field(evsel, "next_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_prio", 4, true)) ret = -1; perf_evsel__delete(evsel); evsel = perf_evsel__newtp("sched", "sched_wakeup"); if (IS_ERR(evsel)) { pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel)); return -1; } if (perf_evsel__test_field(evsel, "comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) ret = -1; return ret; }
int test__perf_evsel__tp_sched_test(void) { struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); int ret = 0; if (evsel == NULL) { pr_debug("perf_evsel__new\n"); return -1; } if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) ret = -1; if (perf_evsel__test_field(evsel, "next_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_prio", 4, true)) ret = -1; perf_evsel__delete(evsel); evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); if (perf_evsel__test_field(evsel, "comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "success", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) ret = -1; return ret; }
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused) { int err = -1, fd, cpu; struct cpu_map *cpus; struct perf_evsel *evsel; unsigned int nr_openat_calls = 111, i; cpu_set_t cpu_set; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); char sbuf[STRERR_BUFSIZE]; char errbuf[BUFSIZ]; if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_thread_map_delete; } CPU_ZERO(&cpu_set); evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); if (IS_ERR(evsel)) { tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); pr_debug("%s\n", errbuf); goto out_thread_map_delete; } if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_openat_calls + cpu; /* * XXX eventually lift this restriction in a way that * keeps perf building on older glibc installations * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ if (cpus->map[cpu] >= CPU_SETSIZE) { pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); continue; } CPU_SET(cpus->map[cpu], &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[cpu], str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } for (i = 0; i < ncalls; ++i) { fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } CPU_CLR(cpus->map[cpu], &cpu_set); } /* * Here we need to explicitly preallocate the counts, as if * we use the auto allocation it will allocate just for 1 cpu, * as we start by cpu 0. */ if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); goto out_close_fd; } err = 0; for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; if (cpus->map[cpu] >= CPU_SETSIZE) continue; if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); err = -1; break; } expected = nr_openat_calls + cpu; if (perf_counts(evsel->counts, cpu, 0)->val != expected) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); err = -1; } } perf_evsel__free_counts(evsel); out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__put(threads); return err; }
int test__syscall_open_tp_fields(void) { struct perf_record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .no_delay = true, .freq = 1, .mmap_pages = 256, .raw_samples = true, }; const char *filename = "/etc/passwd"; int flags = O_RDONLY | O_DIRECTORY; struct perf_evlist *evlist = perf_evlist__new(); struct perf_evsel *evsel; int err = -1, i, nr_events = 0, nr_polls = 0; if (evlist == NULL) { pr_debug("%s: perf_evlist__new\n", __func__); goto out; } evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); if (evsel == NULL) { pr_debug("%s: perf_evsel__newtp\n", __func__); goto out_delete_evlist; } perf_evlist__add(evlist, evsel); err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("%s: perf_evlist__create_maps\n", __func__); goto out_delete_evlist; } perf_evsel__config(evsel, &opts); evlist->threads->map[0] = getpid(); err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", strerror(errno)); goto out_delete_maps; } err = perf_evlist__mmap(evlist, UINT_MAX, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); goto out_close_evlist; } perf_evlist__enable(evlist); /* * Generate the event: */ open(filename, flags); while (1) { int before = nr_events; for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { const u32 type = event->header.type; int tp_flags; struct perf_sample sample; ++nr_events; if (type != PERF_RECORD_SAMPLE) { perf_evlist__mmap_consume(evlist, i); continue; } err = perf_evsel__parse_sample(evsel, event, &sample); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_munmap; } tp_flags = perf_evsel__intval(evsel, &sample, "flags"); if (flags != tp_flags) { pr_debug("%s: Expected flags=%#x, got %#x\n", __func__, flags, tp_flags); goto out_munmap; } goto out_ok; } } if (nr_events == before) poll(evlist->pollfd, evlist->nr_fds, 10); if (++nr_polls > 5) { pr_debug("%s: no events!\n", __func__); goto out_munmap; } } out_ok: err = 0; out_munmap: perf_evlist__munmap(evlist); out_close_evlist: perf_evlist__close(evlist); out_delete_maps: perf_evlist__delete_maps(evlist); out_delete_evlist: perf_evlist__delete(evlist); out: return err; }
/* * This test will generate random numbers of calls to some getpid syscalls, * then establish an mmap for a group of events that are created to monitor * the syscalls. * * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated * sample.id field to map back to its respective perf_evsel instance. * * Then it checks if the number of syscalls reported as perf events by * the kernel corresponds to the number of syscalls made. */ int test__basic_mmap(void) { int err = -1; union perf_event *event; struct thread_map *threads; struct cpu_map *cpus; struct perf_evlist *evlist; cpu_set_t cpu_set; const char *syscall_names[] = { "getsid", "getppid", "getpgrp", "getpgid", }; pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, (void*)getpgid }; #define nsyscalls ARRAY_SIZE(syscall_names) unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_free_threads; } CPU_ZERO(&cpu_set); CPU_SET(cpus->map[0], &cpu_set); sched_setaffinity(0, sizeof(cpu_set), &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[0], strerror(errno)); goto out_free_cpus; } evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); goto out_free_cpus; } perf_evlist__set_maps(evlist, cpus, threads); for (i = 0; i < nsyscalls; ++i) { char name[64]; snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); evsels[i] = perf_evsel__newtp("syscalls", name); if (evsels[i] == NULL) { pr_debug("perf_evsel__new\n"); goto out_delete_evlist; } evsels[i]->attr.wakeup_events = 1; perf_evsel__set_sample_id(evsels[i], false); perf_evlist__add(evlist, evsels[i]); if (perf_evsel__open(evsels[i], cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); goto out_delete_evlist; } nr_events[i] = 0; expected_nr_events[i] = 1 + rand() % 127; } if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, strerror(errno)); goto out_delete_evlist; } for (i = 0; i < nsyscalls; ++i) for (j = 0; j < expected_nr_events[i]; ++j) { int foo = syscalls[i](); ++foo; } while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) { pr_debug("unexpected %s event\n", perf_event__name(event->header.type)); goto out_delete_evlist; } err = perf_evlist__parse_sample(evlist, event, &sample); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_delete_evlist; } err = -1; evsel = perf_evlist__id2evsel(evlist, sample.id); if (evsel == NULL) { pr_debug("event with id %" PRIu64 " doesn't map to an evsel\n", sample.id); goto out_delete_evlist; } nr_events[evsel->idx]++; perf_evlist__mmap_consume(evlist, 0); } err = 0; evlist__for_each(evlist, evsel) { if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { pr_debug("expected %d %s events, got %d\n", expected_nr_events[evsel->idx], perf_evsel__name(evsel), nr_events[evsel->idx]); err = -1; goto out_delete_evlist; } } out_delete_evlist: perf_evlist__delete(evlist); cpus = NULL; threads = NULL; out_free_cpus: cpu_map__delete(cpus); out_free_threads: thread_map__delete(threads); return err; }
evsel->name, name, is_signed, should_be_signed); ret = -1; } if (field->size != size) { pr_debug("%s: \"%s\" size (%d) should be %d!\n", evsel->name, name, field->size, size); ret = -1; } return ret; } int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtest __maybe_unused) { struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); int ret = 0; if (IS_ERR(evsel)) { pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel)); return -1; } if (perf_evsel__test_field(evsel, "prev_comm", 16, false)) ret = -1; if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) ret = -1;