struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) { if (pid != -1) return thread_map__new_by_pid(pid); if (tid == -1 && uid != UINT_MAX) return thread_map__new_by_uid(uid); return thread_map__new_by_tid(tid); }
static int do_test_code_reading(bool try_kcore) { struct machines machines; struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machines__init(&machines); machine = &machines.host; ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine->vmlinux_maps[MAP__FUNCTION]; ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_err; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_err; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_err; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_err; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } pr_debug("perf_evlist__open failed\n"); goto out_err; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_err; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_err; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__delete(cpus); thread_map__delete(threads); } machines__destroy_kernel_maps(&machines); machine__delete_threads(machine); machines__exit(&machines); return err; } int test__code_reading(void) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: fprintf(stderr, " (no vmlinux)"); return 0; case TEST_CODE_READING_NO_KCORE: fprintf(stderr, " (no kcore)"); return 0; case TEST_CODE_READING_NO_ACCESS: fprintf(stderr, " (no access)"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: fprintf(stderr, " (no kernel obj)"); return 0; default: return -1; }; }
/* * This test will open software clock events (cpu-clock, task-clock) * then check their frequency -> period conversion has no artifact of * setting period to 1 forcefully. */ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) { int i, err = -1; volatile int tmp = 0; u64 total_periods = 0; int nr_samples = 0; char sbuf[STRERR_BUFSIZE]; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; struct perf_event_attr attr = { .type = PERF_TYPE_SOFTWARE, .config = clock_id, .sample_type = PERF_SAMPLE_PERIOD, .exclude_kernel = 1, .disabled = 1, .freq = 1, }; struct cpu_map *cpus; struct thread_map *threads; attr.sample_freq = 500; evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); return -1; } evsel = perf_evsel__new(&attr); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); goto out_delete_evlist; } perf_evlist__add(evlist, evsel); cpus = cpu_map__dummy_new(); threads = thread_map__new_by_tid(getpid()); if (!cpus || !threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_free_maps; } perf_evlist__set_maps(evlist, cpus, threads); cpus = NULL; threads = NULL; if (perf_evlist__open(evlist)) { const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; err = -errno; pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", str_error_r(errno, sbuf, sizeof(sbuf)), knob, (u64)attr.sample_freq); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, 128); if (err < 0) { pr_debug("failed to mmap event: %d (%s)\n", errno, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } perf_evlist__enable(evlist); /* collect samples */ for (i = 0; i < NR_LOOPS; i++) tmp++; perf_evlist__disable(evlist); while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) goto next_event; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { pr_debug("Error during parse sample\n"); goto out_delete_evlist; } total_periods += sample.period; nr_samples++; next_event: perf_evlist__mmap_consume(evlist, 0); } if ((u64) nr_samples == total_periods) { pr_debug("All (%d) samples have period value of 1!\n", nr_samples); err = -1; } out_free_maps: cpu_map__put(cpus); thread_map__put(threads); out_delete_evlist: perf_evlist__delete(evlist); return err; } int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused) { int ret; ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK); if (!ret) ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK); return ret; }
/* * This test will start a workload that does nothing then it checks * if the number of exit event reported by the kernel is 1 or not * in order to check the kernel returns correct number of event. */ int test__task_exit(void) { int err = -1; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; struct perf_target target = { .uid = UINT_MAX, .uses_mmap = true, }; const char *argv[] = { "true", NULL }; signal(SIGCHLD, sig_handler); signal(SIGUSR1, sig_handler); evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); return -1; } /* * We need at least one evsel in the evlist, use the default * one: "cycles". */ err = perf_evlist__add_default(evlist); if (err < 0) { pr_debug("Not enough memory to create evsel\n"); goto out_free_evlist; } /* * Create maps of threads and cpus to monitor. In this case * we start with all threads and cpus (-1, -1) but then in * perf_evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ evlist->cpus = cpu_map__dummy_new(); evlist->threads = thread_map__new_by_tid(-1); if (!evlist->cpus || !evlist->threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_maps; } err = perf_evlist__prepare_workload(evlist, &target, argv, false, true); if (err < 0) { pr_debug("Couldn't run the workload!\n"); goto out_delete_maps; } evsel = perf_evlist__first(evlist); evsel->attr.task = 1; evsel->attr.sample_freq = 0; evsel->attr.inherit = 0; evsel->attr.watermark = 0; evsel->attr.wakeup_events = 1; evsel->attr.exclude_kernel = 1; err = perf_evlist__open(evlist); if (err < 0) { pr_debug("Couldn't open the evlist: %s\n", strerror(-err)); goto out_delete_maps; } if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, strerror(errno)); goto out_close_evlist; } perf_evlist__start_workload(evlist); retry: while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { if (event->header.type != PERF_RECORD_EXIT) continue; nr_exit++; } if (!exited || !nr_exit) { poll(evlist->pollfd, evlist->nr_fds, -1); goto retry; } if (nr_exit != 1) { pr_debug("received %d EXIT records\n", nr_exit); err = -1; } perf_evlist__munmap(evlist); out_close_evlist: perf_evlist__close(evlist); out_delete_maps: perf_evlist__delete_maps(evlist); out_free_evlist: perf_evlist__delete(evlist); return err; }
static int do_test_code_reading(bool try_kcore) { struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 500, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machine = machine__new_host(); ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine__kernel_map(machine); ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false, 500); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_put; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_put; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_put; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str, NULL); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_put; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; /* * Both cpus and threads are now owned by evlist * and will be freed by following perf_evlist__set_maps * call. Getting refference to keep them alive. */ cpu_map__get(cpus); thread_map__get(threads); perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } if (verbose) { char errbuf[512]; perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); } goto out_put; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_put; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_put; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_put: thread__put(thread); out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } machine__delete_threads(machine); machine__delete(machine); return err; } int test__code_reading(int subtest __maybe_unused) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: pr_debug("no vmlinux\n"); return 0; case TEST_CODE_READING_NO_KCORE: pr_debug("no kcore\n"); return 0; case TEST_CODE_READING_NO_ACCESS: pr_debug("no access\n"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: pr_debug("no kernel obj\n"); return 0; default: return -1; }; }