void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) { int i; for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) INIT_HLIST_HEAD(&evlist->heads[i]); INIT_LIST_HEAD(&evlist->entries); perf_evlist__set_maps(evlist, cpus, threads); evlist->workload.pid = -1; }
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) { int i; for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) INIT_HLIST_HEAD(&evlist->heads[i]); INIT_LIST_HEAD(&evlist->entries); perf_evlist__set_maps(evlist, cpus, threads); fdarray__init(&evlist->pollfd, 64); evlist->workload.pid = -1; evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; }
/** * test__keep_tracking - test using a dummy software event to keep tracking. * * This function implements a test that checks that tracking events continue * when an event is disabled but a dummy software event is not disabled. If the * test passes %0 is returned, otherwise %-1 is returned. */ int test__keep_tracking(void) { struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int found, err = -1; const char *comm; threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); cpus = cpu_map__new(NULL); CHECK_NOT_NULL__(cpus); evlist = perf_evlist__new(); CHECK_NOT_NULL__(evlist); perf_evlist__set_maps(evlist, cpus, threads); CHECK__(parse_events(evlist, "dummy:u", NULL)); CHECK__(parse_events(evlist, "cycles:u", NULL)); perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; if (perf_evlist__open(evlist) < 0) { fprintf(stderr, " (not supported)"); err = 0; goto out_err; } CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); /* * First, test that a 'comm' event can be found when the event is * enabled. */ perf_evlist__enable(evlist); comm = "Test COMM 1"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); perf_evlist__disable(evlist); found = find_comm(evlist, comm); if (found != 1) { pr_debug("First time, failed to find tracking event.\n"); goto out_err; } /* * Secondly, test that a 'comm' event can be found when the event is * disabled with the dummy event still enabled. */ perf_evlist__enable(evlist); evsel = perf_evlist__last(evlist); CHECK__(perf_evlist__disable_event(evlist, evsel)); comm = "Test COMM 2"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); perf_evlist__disable(evlist); found = find_comm(evlist, comm); if (found != 1) { pr_debug("Seconf time, failed to find tracking event.\n"); goto out_err; } err = 0; out_err: if (evlist) { perf_evlist__disable(evlist); perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } return err; }
static int do_test_code_reading(bool try_kcore) { struct machines machines; struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machines__init(&machines); machine = &machines.host; ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine->vmlinux_maps[MAP__FUNCTION]; ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_err; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_err; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_err; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_err; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } pr_debug("perf_evlist__open failed\n"); goto out_err; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_err; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_err; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__delete(cpus); thread_map__delete(threads); } machines__destroy_kernel_maps(&machines); machine__delete_threads(machine); machines__exit(&machines); return err; } int test__code_reading(void) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: fprintf(stderr, " (no vmlinux)"); return 0; case TEST_CODE_READING_NO_KCORE: fprintf(stderr, " (no kcore)"); return 0; case TEST_CODE_READING_NO_ACCESS: fprintf(stderr, " (no access)"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: fprintf(stderr, " (no kernel obj)"); return 0; default: return -1; }; }
/* * This test will generate random numbers of calls to some getpid syscalls, * then establish an mmap for a group of events that are created to monitor * the syscalls. * * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated * sample.id field to map back to its respective perf_evsel instance. * * Then it checks if the number of syscalls reported as perf events by * the kernel corresponds to the number of syscalls made. */ int test__basic_mmap(void) { int err = -1; union perf_event *event; struct thread_map *threads; struct cpu_map *cpus; struct perf_evlist *evlist; cpu_set_t cpu_set; const char *syscall_names[] = { "getsid", "getppid", "getpgrp", "getpgid", }; pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, (void*)getpgid }; #define nsyscalls ARRAY_SIZE(syscall_names) unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; threads = thread_map__new(-1, getpid(), UINT_MAX); if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_free_threads; } CPU_ZERO(&cpu_set); CPU_SET(cpus->map[0], &cpu_set); sched_setaffinity(0, sizeof(cpu_set), &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[0], strerror(errno)); goto out_free_cpus; } evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); goto out_free_cpus; } perf_evlist__set_maps(evlist, cpus, threads); for (i = 0; i < nsyscalls; ++i) { char name[64]; snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); evsels[i] = perf_evsel__newtp("syscalls", name); if (evsels[i] == NULL) { pr_debug("perf_evsel__new\n"); goto out_delete_evlist; } evsels[i]->attr.wakeup_events = 1; perf_evsel__set_sample_id(evsels[i], false); perf_evlist__add(evlist, evsels[i]); if (perf_evsel__open(evsels[i], cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); goto out_delete_evlist; } nr_events[i] = 0; expected_nr_events[i] = 1 + rand() % 127; } if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, strerror(errno)); goto out_delete_evlist; } for (i = 0; i < nsyscalls; ++i) for (j = 0; j < expected_nr_events[i]; ++j) { int foo = syscalls[i](); ++foo; } while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) { pr_debug("unexpected %s event\n", perf_event__name(event->header.type)); goto out_delete_evlist; } err = perf_evlist__parse_sample(evlist, event, &sample); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_delete_evlist; } err = -1; evsel = perf_evlist__id2evsel(evlist, sample.id); if (evsel == NULL) { pr_debug("event with id %" PRIu64 " doesn't map to an evsel\n", sample.id); goto out_delete_evlist; } nr_events[evsel->idx]++; perf_evlist__mmap_consume(evlist, 0); } err = 0; evlist__for_each(evlist, evsel) { if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { pr_debug("expected %d %s events, got %d\n", expected_nr_events[evsel->idx], perf_evsel__name(evsel), nr_events[evsel->idx]); err = -1; goto out_delete_evlist; } } out_delete_evlist: perf_evlist__delete(evlist); cpus = NULL; threads = NULL; out_free_cpus: cpu_map__delete(cpus); out_free_threads: thread_map__delete(threads); return err; }
/* * This test will open software clock events (cpu-clock, task-clock) * then check their frequency -> period conversion has no artifact of * setting period to 1 forcefully. */ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) { int i, err = -1; volatile int tmp = 0; u64 total_periods = 0; int nr_samples = 0; char sbuf[STRERR_BUFSIZE]; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; struct perf_event_attr attr = { .type = PERF_TYPE_SOFTWARE, .config = clock_id, .sample_type = PERF_SAMPLE_PERIOD, .exclude_kernel = 1, .disabled = 1, .freq = 1, }; struct cpu_map *cpus; struct thread_map *threads; attr.sample_freq = 500; evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); return -1; } evsel = perf_evsel__new(&attr); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); goto out_delete_evlist; } perf_evlist__add(evlist, evsel); cpus = cpu_map__dummy_new(); threads = thread_map__new_by_tid(getpid()); if (!cpus || !threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_free_maps; } perf_evlist__set_maps(evlist, cpus, threads); cpus = NULL; threads = NULL; if (perf_evlist__open(evlist)) { const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; err = -errno; pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", str_error_r(errno, sbuf, sizeof(sbuf)), knob, (u64)attr.sample_freq); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, 128); if (err < 0) { pr_debug("failed to mmap event: %d (%s)\n", errno, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } perf_evlist__enable(evlist); /* collect samples */ for (i = 0; i < NR_LOOPS; i++) tmp++; perf_evlist__disable(evlist); while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) goto next_event; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { pr_debug("Error during parse sample\n"); goto out_delete_evlist; } total_periods += sample.period; nr_samples++; next_event: perf_evlist__mmap_consume(evlist, 0); } if ((u64) nr_samples == total_periods) { pr_debug("All (%d) samples have period value of 1!\n", nr_samples); err = -1; } out_free_maps: cpu_map__put(cpus); thread_map__put(threads); out_delete_evlist: perf_evlist__delete(evlist); return err; } int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused) { int ret; ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK); if (!ret) ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK); return ret; }
/** * test__perf_time_to_tsc - test converting perf time to TSC. * * This function implements a test that checks that the conversion of perf time * to and from TSC is consistent with the order of events. If the test passes * %0 is returned, otherwise %-1 is returned. If TSC conversion is not * supported then then the test passes but " (not supported)" is printed. */ int test__perf_time_to_tsc(void) { struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, .sample_time = true, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret, i; const char *comm1, *comm2; struct perf_tsc_conversion tc; struct perf_event_mmap_page *pc; union perf_event *event; u64 test_tsc, comm1_tsc, comm2_tsc; u64 test_time, comm1_time = 0, comm2_time = 0; threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); cpus = cpu_map__new(NULL); CHECK_NOT_NULL__(cpus); evlist = perf_evlist__new(); CHECK_NOT_NULL__(evlist); perf_evlist__set_maps(evlist, cpus, threads); CHECK__(parse_events(evlist, "cycles:u", NULL)); perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; CHECK__(perf_evlist__open(evlist)); CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); pc = evlist->mmap[0].base; ret = perf_read_tsc_conversion(pc, &tc); if (ret) { if (ret == -EOPNOTSUPP) { fprintf(stderr, " (not supported)"); return 0; } goto out_err; } perf_evlist__enable(evlist); comm1 = "Test COMM 1"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); test_tsc = rdtsc(); comm2 = "Test COMM 2"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); perf_evlist__disable(evlist); for (i = 0; i < evlist->nr_mmaps; i++) { while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_COMM || (pid_t)event->comm.pid != getpid() || (pid_t)event->comm.tid != getpid()) goto next_event; if (strcmp(event->comm.comm, comm1) == 0) { CHECK__(perf_evsel__parse_sample(evsel, event, &sample)); comm1_time = sample.time; } if (strcmp(event->comm.comm, comm2) == 0) { CHECK__(perf_evsel__parse_sample(evsel, event, &sample)); comm2_time = sample.time; } next_event: perf_evlist__mmap_consume(evlist, i); } } if (!comm1_time || !comm2_time) goto out_err; test_time = tsc_to_perf_time(test_tsc, &tc); comm1_tsc = perf_time_to_tsc(comm1_time, &tc); comm2_tsc = perf_time_to_tsc(comm2_time, &tc); pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", comm1_time, comm1_tsc); pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", test_time, test_tsc); pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", comm2_time, comm2_tsc); if (test_time <= comm1_time || test_time >= comm2_time) goto out_err; if (test_tsc <= comm1_tsc || test_tsc >= comm2_tsc) goto out_err; err = 0; out_err: if (evlist) { perf_evlist__disable(evlist); perf_evlist__delete(evlist); } return err; }
static int do_test_code_reading(bool try_kcore) { struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 500, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machine = machine__new_host(); ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine__kernel_map(machine); ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false, 500); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_put; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_put; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_put; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str, NULL); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_put; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; /* * Both cpus and threads are now owned by evlist * and will be freed by following perf_evlist__set_maps * call. Getting refference to keep them alive. */ cpu_map__get(cpus); thread_map__get(threads); perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } if (verbose) { char errbuf[512]; perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); } goto out_put; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_put; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_put; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_put: thread__put(thread); out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } machine__delete_threads(machine); machine__delete(machine); return err; } int test__code_reading(int subtest __maybe_unused) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: pr_debug("no vmlinux\n"); return 0; case TEST_CODE_READING_NO_KCORE: pr_debug("no kcore\n"); return 0; case TEST_CODE_READING_NO_ACCESS: pr_debug("no access\n"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: pr_debug("no kernel obj\n"); return 0; default: return -1; }; }