static void perf_evlist__purge(struct perf_evlist *evlist) { struct perf_evsel *pos, *n; evlist__for_each_safe(evlist, n, pos) { list_del_init(&pos->node); perf_evsel__delete(pos); }
int test__perf_evsel__tp_sched_test(void) { struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); int ret = 0; if (IS_ERR(evsel)) { pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel)); return -1; } if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) ret = -1; if (perf_evsel__test_field(evsel, "next_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_prio", 4, true)) ret = -1; perf_evsel__delete(evsel); evsel = perf_evsel__newtp("sched", "sched_wakeup"); if (IS_ERR(evsel)) { pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel)); return -1; } if (perf_evsel__test_field(evsel, "comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) ret = -1; return ret; }
int test__perf_evsel__tp_sched_test(void) { struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); int ret = 0; if (evsel == NULL) { pr_debug("perf_evsel__new\n"); return -1; } if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) ret = -1; if (perf_evsel__test_field(evsel, "next_comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "next_prio", 4, true)) ret = -1; perf_evsel__delete(evsel); evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); if (perf_evsel__test_field(evsel, "comm", 16, true)) ret = -1; if (perf_evsel__test_field(evsel, "pid", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "prio", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "success", 4, true)) ret = -1; if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) ret = -1; return ret; }
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused) { int err = -1, fd, cpu; struct cpu_map *cpus; struct perf_evsel *evsel; unsigned int nr_openat_calls = 111, i; cpu_set_t cpu_set; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); char sbuf[STRERR_BUFSIZE]; char errbuf[BUFSIZ]; if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_thread_map_delete; } CPU_ZERO(&cpu_set); evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); if (IS_ERR(evsel)) { tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); pr_debug("%s\n", errbuf); goto out_thread_map_delete; } if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_openat_calls + cpu; /* * XXX eventually lift this restriction in a way that * keeps perf building on older glibc installations * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ if (cpus->map[cpu] >= CPU_SETSIZE) { pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); continue; } CPU_SET(cpus->map[cpu], &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[cpu], str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } for (i = 0; i < ncalls; ++i) { fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } CPU_CLR(cpus->map[cpu], &cpu_set); } /* * Here we need to explicitly preallocate the counts, as if * we use the auto allocation it will allocate just for 1 cpu, * as we start by cpu 0. */ if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); goto out_close_fd; } err = 0; for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; if (cpus->map[cpu] >= CPU_SETSIZE) continue; if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); err = -1; break; } expected = nr_openat_calls + cpu; if (perf_counts(evsel->counts, cpu, 0)->val != expected) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); err = -1; } } perf_evsel__free_counts(evsel); out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__put(threads); return err; }
int test__open_syscall_event(void) { int err = -1, fd; struct thread_map *threads; struct perf_evsel *evsel; struct perf_event_attr attr; unsigned int nr_open_calls = 111, i; int id = trace_event__id("sys_enter_open"); if (id < 0) { pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); return -1; } threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } memset(&attr, 0, sizeof(attr)); attr.type = PERF_TYPE_TRACEPOINT; attr.config = id; evsel = perf_evsel__new(&attr, 0); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); goto out_thread_map_delete; } if (perf_evsel__open_per_thread(evsel, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); goto out_evsel_delete; } for (i = 0; i < nr_open_calls; ++i) { fd = open("/etc/passwd", O_RDONLY); close(fd); } if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); goto out_close_fd; } if (evsel->counts->cpu[0].val != nr_open_calls) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", nr_open_calls, evsel->counts->cpu[0].val); goto out_close_fd; } err = 0; out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__delete(threads); return err; }