/** * perf_evlist__set_id_pos - set the positions of event ids. * @evlist: selected event list * * Events with compatible sample types all have the same id_pos * and is_pos. For convenience, put a copy on evlist. */ void perf_evlist__set_id_pos(struct perf_evlist *evlist) { struct perf_evsel *first = perf_evlist__first(evlist); evlist->id_pos = first->id_pos; evlist->is_pos = first->is_pos; }
static int perf_evsel__roundtrip_cache_name_test(void) { char name[128]; int type, op, err = 0, ret = 0, i, idx; struct perf_evsel *evsel; struct perf_evlist *evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ if (!perf_evsel__is_cache_op_valid(type, op)) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __perf_evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); err = parse_events(evlist, name); if (err) ret = err; } } } idx = 0; evsel = perf_evlist__first(evlist); for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ if (!perf_evsel__is_cache_op_valid(type, op)) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __perf_evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); if (evsel->idx != idx) continue; ++idx; if (strcmp(perf_evsel__name(evsel), name)) { pr_debug("%s != %s\n", perf_evsel__name(evsel), name); ret = -1; } evsel = perf_evsel__next(evsel); } } } perf_evlist__delete(evlist); return ret; }
/** * test__keep_tracking - test using a dummy software event to keep tracking. * * This function implements a test that checks that tracking events continue * when an event is disabled but a dummy software event is not disabled. If the * test passes %0 is returned, otherwise %-1 is returned. */ int test__keep_tracking(void) { struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int found, err = -1; const char *comm; threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); cpus = cpu_map__new(NULL); CHECK_NOT_NULL__(cpus); evlist = perf_evlist__new(); CHECK_NOT_NULL__(evlist); perf_evlist__set_maps(evlist, cpus, threads); CHECK__(parse_events(evlist, "dummy:u", NULL)); CHECK__(parse_events(evlist, "cycles:u", NULL)); perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; if (perf_evlist__open(evlist) < 0) { fprintf(stderr, " (not supported)"); err = 0; goto out_err; } CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); /* * First, test that a 'comm' event can be found when the event is * enabled. */ perf_evlist__enable(evlist); comm = "Test COMM 1"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); perf_evlist__disable(evlist); found = find_comm(evlist, comm); if (found != 1) { pr_debug("First time, failed to find tracking event.\n"); goto out_err; } /* * Secondly, test that a 'comm' event can be found when the event is * disabled with the dummy event still enabled. */ perf_evlist__enable(evlist); evsel = perf_evlist__last(evlist); CHECK__(perf_evlist__disable_event(evlist, evsel)); comm = "Test COMM 2"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); perf_evlist__disable(evlist); found = find_comm(evlist, comm); if (found != 1) { pr_debug("Seconf time, failed to find tracking event.\n"); goto out_err; } err = 0; out_err: if (evlist) { perf_evlist__disable(evlist); perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } return err; }
/* * Create an event group that contains both a sampled hardware * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then * wait for the hardware perf counter to overflow and generate a PMI, * which triggers an event read for both of the events in the group. * * Since reading Intel CQM event counters requires sending SMP IPIs, the * CQM pmu needs to handle the above situation gracefully, and return * the last read counter value to avoid triggering a WARN_ON_ONCE() in * smp_call_function_many() caused by sending IPIs from NMI context. */ int test__intel_cqm_count_nmi_context(int subtest __maybe_unused) { struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; struct perf_event_attr pe; int i, fd[2], flag, ret; size_t mmap_len; void *event; pid_t pid; int err = TEST_FAIL; flag = perf_event_open_cloexec_flag(); evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); return TEST_FAIL; } ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL); if (ret) { pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n"); err = TEST_SKIP; goto out; } evsel = perf_evlist__first(evlist); if (!evsel) { pr_debug("perf_evlist__first failed\n"); goto out; } memset(&pe, 0, sizeof(pe)); pe.size = sizeof(pe); pe.type = PERF_TYPE_HARDWARE; pe.config = PERF_COUNT_HW_CPU_CYCLES; pe.read_format = PERF_FORMAT_GROUP; pe.sample_period = 128; pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ; pid = spawn(); fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag); if (fd[0] < 0) { pr_debug("failed to open event\n"); goto out; } memset(&pe, 0, sizeof(pe)); pe.size = sizeof(pe); pe.type = evsel->attr.type; pe.config = evsel->attr.config; fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag); if (fd[1] < 0) { pr_debug("failed to open event\n"); goto out; } /* * Pick a power-of-two number of pages + 1 for the meta-data * page (struct perf_event_mmap_page). See tools/perf/design.txt. */ mmap_len = page_size * 65; event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0); if (event == (void *)(-1)) { pr_debug("failed to mmap %d\n", errno); goto out; } sleep(1); err = TEST_OK; munmap(event, mmap_len); for (i = 0; i < 2; i++) close(fd[i]); kill(pid, SIGKILL); wait(NULL); out: perf_evlist__delete(evlist); return err; }
static int do_test_code_reading(bool try_kcore) { struct machines machines; struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machines__init(&machines); machine = &machines.host; ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine->vmlinux_maps[MAP__FUNCTION]; ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_err; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_err; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_err; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_err; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } pr_debug("perf_evlist__open failed\n"); goto out_err; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_err; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_err; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__delete(cpus); thread_map__delete(threads); } machines__destroy_kernel_maps(&machines); machine__delete_threads(machine); machines__exit(&machines); return err; } int test__code_reading(void) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: fprintf(stderr, " (no vmlinux)"); return 0; case TEST_CODE_READING_NO_KCORE: fprintf(stderr, " (no kcore)"); return 0; case TEST_CODE_READING_NO_ACCESS: fprintf(stderr, " (no access)"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: fprintf(stderr, " (no kernel obj)"); return 0; default: return -1; }; }
int test__PERF_RECORD(int subtest __maybe_unused) { struct record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .no_buffering = true, .mmap_pages = 256, }; cpu_set_t cpu_mask; size_t cpu_mask_size = sizeof(cpu_mask); struct perf_evlist *evlist = perf_evlist__new_dummy(); struct perf_evsel *evsel; struct perf_sample sample; const char *cmd = "sleep"; const char *argv[] = { cmd, "1", NULL, }; char *bname, *mmap_filename; u64 prev_time = 0; bool found_cmd_mmap = false, found_libc_mmap = false, found_vdso_mmap = false, found_ld_mmap = false; int err = -1, errs = 0, i, wakeups = 0; u32 cpu; int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; char sbuf[STRERR_BUFSIZE]; if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */ evlist = perf_evlist__new_default(); if (evlist == NULL || argv == NULL) { pr_debug("Not enough memory to create evlist\n"); goto out; } /* * Create maps of threads and cpus to monitor. In this case * we start with all threads and cpus (-1, -1) but then in * perf_evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_evlist; } /* * Prepare the workload in argv[] to run, it'll fork it, and then wait * for perf_evlist__start_workload() to exec it. This is done this way * so that we have time to open the evlist (calling sys_perf_event_open * on all the fds) and then mmap them. */ err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL); if (err < 0) { pr_debug("Couldn't run the workload!\n"); goto out_delete_evlist; } /* * Config the evsels, setting attr->comm on the first one, etc. */ evsel = perf_evlist__first(evlist); perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, TID); perf_evsel__set_sample_bit(evsel, TIME); perf_evlist__config(evlist, &opts); err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } cpu = err; /* * So that we can check perf_sample.cpu on all the samples. */ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * Call sys_perf_event_open on all the fds on all the evsels, * grouping them if asked to. */ err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * mmap the first fd on a given CPU and ask for events for the other * fds in the same CPU to be injected in the same mmap ring buffer * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). */ err = perf_evlist__mmap(evlist, opts.mmap_pages, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * Now that all is properly set up, enable the events, they will * count just on workload.pid, which will start... */ perf_evlist__enable(evlist); /* * Now! */ perf_evlist__start_workload(evlist); while (1) { int before = total_events; for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { const u32 type = event->header.type; const char *name = perf_event__name(type); ++total_events; if (type < PERF_RECORD_MAX) nr_events[type]++; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { if (verbose) perf_event__fprintf(event, stderr); pr_debug("Couldn't parse sample\n"); goto out_delete_evlist; } if (verbose) { pr_info("%" PRIu64" %d ", sample.time, sample.cpu); perf_event__fprintf(event, stderr); } if (prev_time > sample.time) { pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", name, prev_time, sample.time); ++errs; } prev_time = sample.time; if (sample.cpu != cpu) { pr_debug("%s with unexpected cpu, expected %d, got %d\n", name, cpu, sample.cpu); ++errs; } if ((pid_t)sample.pid != evlist->workload.pid) { pr_debug("%s with unexpected pid, expected %d, got %d\n", name, evlist->workload.pid, sample.pid); ++errs; } if ((pid_t)sample.tid != evlist->workload.pid) { pr_debug("%s with unexpected tid, expected %d, got %d\n", name, evlist->workload.pid, sample.tid); ++errs; } if ((type == PERF_RECORD_COMM || type == PERF_RECORD_MMAP || type == PERF_RECORD_MMAP2 || type == PERF_RECORD_FORK || type == PERF_RECORD_EXIT) && (pid_t)event->comm.pid != evlist->workload.pid) { pr_debug("%s with unexpected pid/tid\n", name); ++errs; } if ((type == PERF_RECORD_COMM || type == PERF_RECORD_MMAP || type == PERF_RECORD_MMAP2) && event->comm.pid != event->comm.tid) { pr_debug("%s with different pid/tid!\n", name); ++errs; } switch (type) { case PERF_RECORD_COMM: if (strcmp(event->comm.comm, cmd)) { pr_debug("%s with unexpected comm!\n", name); ++errs; } break; case PERF_RECORD_EXIT: goto found_exit; case PERF_RECORD_MMAP: mmap_filename = event->mmap.filename; goto check_bname; case PERF_RECORD_MMAP2: mmap_filename = event->mmap2.filename; check_bname: bname = strrchr(mmap_filename, '/'); if (bname != NULL) { if (!found_cmd_mmap) found_cmd_mmap = !strcmp(bname + 1, cmd); if (!found_libc_mmap) found_libc_mmap = !strncmp(bname + 1, "libc", 4); if (!found_ld_mmap) found_ld_mmap = !strncmp(bname + 1, "ld", 2); } else if (!found_vdso_mmap) found_vdso_mmap = !strcmp(mmap_filename, "[vdso]"); break; case PERF_RECORD_SAMPLE: /* Just ignore samples for now */ break; default: pr_debug("Unexpected perf_event->header.type %d!\n", type); ++errs; } perf_evlist__mmap_consume(evlist, i); } } /* * We don't use poll here because at least at 3.1 times the * PERF_RECORD_{!SAMPLE} events don't honour * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. */ if (total_events == before && false) perf_evlist__poll(evlist, -1); sleep(1); if (++wakeups > 5) { pr_debug("No PERF_RECORD_EXIT event!\n"); break; } } found_exit: if (nr_events[PERF_RECORD_COMM] > 1) { pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); ++errs; } if (nr_events[PERF_RECORD_COMM] == 0) { pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); ++errs; } if (!found_cmd_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); ++errs; } if (!found_libc_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); ++errs; } if (!found_ld_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); ++errs; } if (!found_vdso_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); ++errs; } out_delete_evlist: perf_evlist__delete(evlist); out: return (err < 0 || errs > 0) ? -1 : 0; }
/* * This test will start a workload that does nothing then it checks * if the number of exit event reported by the kernel is 1 or not * in order to check the kernel returns correct number of event. */ int test__task_exit(void) { int err = -1; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; struct perf_target target = { .uid = UINT_MAX, .uses_mmap = true, }; const char *argv[] = { "true", NULL }; signal(SIGCHLD, sig_handler); signal(SIGUSR1, sig_handler); evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); return -1; } /* * We need at least one evsel in the evlist, use the default * one: "cycles". */ err = perf_evlist__add_default(evlist); if (err < 0) { pr_debug("Not enough memory to create evsel\n"); goto out_free_evlist; } /* * Create maps of threads and cpus to monitor. In this case * we start with all threads and cpus (-1, -1) but then in * perf_evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ evlist->cpus = cpu_map__dummy_new(); evlist->threads = thread_map__new_by_tid(-1); if (!evlist->cpus || !evlist->threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_maps; } err = perf_evlist__prepare_workload(evlist, &target, argv, false, true); if (err < 0) { pr_debug("Couldn't run the workload!\n"); goto out_delete_maps; } evsel = perf_evlist__first(evlist); evsel->attr.task = 1; evsel->attr.sample_freq = 0; evsel->attr.inherit = 0; evsel->attr.watermark = 0; evsel->attr.wakeup_events = 1; evsel->attr.exclude_kernel = 1; err = perf_evlist__open(evlist); if (err < 0) { pr_debug("Couldn't open the evlist: %s\n", strerror(-err)); goto out_delete_maps; } if (perf_evlist__mmap(evlist, 128, true) < 0) { pr_debug("failed to mmap events: %d (%s)\n", errno, strerror(errno)); goto out_close_evlist; } perf_evlist__start_workload(evlist); retry: while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { if (event->header.type != PERF_RECORD_EXIT) continue; nr_exit++; } if (!exited || !nr_exit) { poll(evlist->pollfd, evlist->nr_fds, -1); goto retry; } if (nr_exit != 1) { pr_debug("received %d EXIT records\n", nr_exit); err = -1; } perf_evlist__munmap(evlist); out_close_evlist: perf_evlist__close(evlist); out_delete_maps: perf_evlist__delete_maps(evlist); out_free_evlist: perf_evlist__delete(evlist); return err; }
CHECK_NOT_NULL__(threads); cpus = cpu_map__new(NULL); CHECK_NOT_NULL__(cpus); evlist = perf_evlist__new(); CHECK_NOT_NULL__(evlist); perf_evlist__set_maps(evlist, cpus, threads); CHECK__(parse_events(evlist, "dummy:u", NULL)); CHECK__(parse_events(evlist, "cycles:u", NULL)); perf_evlist__config(evlist, &opts, NULL); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; if (perf_evlist__open(evlist) < 0) { pr_debug("Unable to open dummy and cycles event\n"); err = TEST_SKIP; goto out_err; } CHECK__(perf_evlist__mmap(evlist, UINT_MAX)); /* * First, test that a 'comm' event can be found when the event is
/** * test__perf_time_to_tsc - test converting perf time to TSC. * * This function implements a test that checks that the conversion of perf time * to and from TSC is consistent with the order of events. If the test passes * %0 is returned, otherwise %-1 is returned. If TSC conversion is not * supported then then the test passes but " (not supported)" is printed. */ int test__perf_time_to_tsc(void) { struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, .sample_time = true, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret, i; const char *comm1, *comm2; struct perf_tsc_conversion tc; struct perf_event_mmap_page *pc; union perf_event *event; u64 test_tsc, comm1_tsc, comm2_tsc; u64 test_time, comm1_time = 0, comm2_time = 0; threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); cpus = cpu_map__new(NULL); CHECK_NOT_NULL__(cpus); evlist = perf_evlist__new(); CHECK_NOT_NULL__(evlist); perf_evlist__set_maps(evlist, cpus, threads); CHECK__(parse_events(evlist, "cycles:u", NULL)); perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; CHECK__(perf_evlist__open(evlist)); CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); pc = evlist->mmap[0].base; ret = perf_read_tsc_conversion(pc, &tc); if (ret) { if (ret == -EOPNOTSUPP) { fprintf(stderr, " (not supported)"); return 0; } goto out_err; } perf_evlist__enable(evlist); comm1 = "Test COMM 1"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); test_tsc = rdtsc(); comm2 = "Test COMM 2"; CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); perf_evlist__disable(evlist); for (i = 0; i < evlist->nr_mmaps; i++) { while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_COMM || (pid_t)event->comm.pid != getpid() || (pid_t)event->comm.tid != getpid()) goto next_event; if (strcmp(event->comm.comm, comm1) == 0) { CHECK__(perf_evsel__parse_sample(evsel, event, &sample)); comm1_time = sample.time; } if (strcmp(event->comm.comm, comm2) == 0) { CHECK__(perf_evsel__parse_sample(evsel, event, &sample)); comm2_time = sample.time; } next_event: perf_evlist__mmap_consume(evlist, i); } } if (!comm1_time || !comm2_time) goto out_err; test_time = tsc_to_perf_time(test_tsc, &tc); comm1_tsc = perf_time_to_tsc(comm1_time, &tc); comm2_tsc = perf_time_to_tsc(comm2_time, &tc); pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", comm1_time, comm1_tsc); pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", test_time, test_tsc); pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", comm2_time, comm2_tsc); if (test_time <= comm1_time || test_time >= comm2_time) goto out_err; if (test_tsc <= comm1_tsc || test_tsc >= comm2_tsc) goto out_err; err = 0; out_err: if (evlist) { perf_evlist__disable(evlist); perf_evlist__delete(evlist); } return err; }
static int do_test_code_reading(bool try_kcore) { struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 500, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machine = machine__new_host(); ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine__kernel_map(machine); ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false, 500); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_put; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_put; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_put; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str, NULL); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_put; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; /* * Both cpus and threads are now owned by evlist * and will be freed by following perf_evlist__set_maps * call. Getting refference to keep them alive. */ cpu_map__get(cpus); thread_map__get(threads); perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } if (verbose) { char errbuf[512]; perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); } goto out_put; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_put; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_put; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_put: thread__put(thread); out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } machine__delete_threads(machine); machine__delete(machine); return err; } int test__code_reading(int subtest __maybe_unused) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: pr_debug("no vmlinux\n"); return 0; case TEST_CODE_READING_NO_KCORE: pr_debug("no kcore\n"); return 0; case TEST_CODE_READING_NO_ACCESS: pr_debug("no access\n"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: pr_debug("no kernel obj\n"); return 0; default: return -1; }; }
static bool perf_top__handle_keypress(struct perf_top *top, int c) { bool ret = true; if (!perf_top__key_mapped(top, c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios save; perf_top__print_mapped_keys(top); fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); fflush(stdout); set_term_quiet_input(&save); poll(&stdin_poll, 1, -1); c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); if (!perf_top__key_mapped(top, c)) return ret; } switch (c) { case 'd': prompt_integer(&top->delay_secs, "Enter display delay"); if (top->delay_secs < 1) top->delay_secs = 1; break; case 'e': prompt_integer(&top->print_entries, "Enter display entries (lines)"); if (top->print_entries == 0) { perf_top__resize(top); signal(SIGWINCH, winch_sig); } else { signal(SIGWINCH, SIG_DFL); } break; case 'E': if (top->evlist->nr_entries > 1) { /* Select 0 as the default event: */ int counter = 0; fprintf(stderr, "\nAvailable events:"); evlist__for_each_entry(top->evlist, top->sym_evsel) fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel)); prompt_integer(&counter, "Enter details event counter"); if (counter >= top->evlist->nr_entries) { top->sym_evsel = perf_evlist__first(top->evlist); fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); sleep(1); break; } evlist__for_each_entry(top->evlist, top->sym_evsel) if (top->sym_evsel->idx == counter) break; } else top->sym_evsel = perf_evlist__first(top->evlist); break; case 'f': prompt_integer(&top->count_filter, "Enter display event count filter"); break; case 'F': prompt_percent(&top->annotation_opts.min_pcnt, "Enter details display event filter (percent)"); break; case 'K': top->hide_kernel_symbols = !top->hide_kernel_symbols; break; case 'q': case 'Q': printf("exiting.\n"); if (top->dump_symtab) perf_session__fprintf_dsos(top->session, stderr); ret = false; break; case 's': perf_top__prompt_symbol(top, "Enter details symbol"); break; case 'S': if (!top->sym_filter_entry) break; else { struct hist_entry *syme = top->sym_filter_entry; top->sym_filter_entry = NULL; __zero_source_counters(syme); } break; case 'U': top->hide_user_symbols = !top->hide_user_symbols; break; case 'z': top->zero = !top->zero; break; default: break; }