int machine__init(struct machine *machine, const char *root_dir, pid_t pid) { map_groups__init(&machine->kmaps); RB_CLEAR_NODE(&machine->rb_node); INIT_LIST_HEAD(&machine->user_dsos); INIT_LIST_HEAD(&machine->kernel_dsos); machine->threads = RB_ROOT; INIT_LIST_HEAD(&machine->dead_threads); machine->last_match = NULL; machine->kmaps.machine = machine; machine->pid = pid; machine->symbol_filter = NULL; machine->id_hdr_size = 0; machine->root_dir = strdup(root_dir); if (machine->root_dir == NULL) return -ENOMEM; if (pid != HOST_KERNEL_ID) { struct thread *thread = machine__findnew_thread(machine, 0, pid); char comm[64]; if (thread == NULL) return -ENOMEM; snprintf(comm, sizeof(comm), "[guest/%d]", pid); thread__set_comm(thread, comm, 0); } return 0; }
static int process_sample_event(struct machine *machine, struct perf_evlist *evlist, union perf_event *event, struct state *state) { struct perf_sample sample; struct thread *thread; u8 cpumode; int ret; if (perf_evlist__parse_sample(evlist, event, &sample)) { pr_debug("perf_evlist__parse_sample failed\n"); return -1; } thread = machine__findnew_thread(machine, sample.pid, sample.tid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); return -1; } cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; ret = read_object_code(sample.ip, READLEN, cpumode, thread, state); thread__put(thread); return ret; }
static int mmap_events(synth_cb synth) { struct machines machines; struct machine *machine; int err, i; /* * The threads_create will not return before all threads * are spawned and all created memory map. * * They will loop until threads_destroy is called, so we * can safely run synthesizing function. */ TEST_ASSERT_VAL("failed to create threads", !threads_create()); machines__init(&machines); machine = &machines.host; dump_trace = verbose > 1 ? 1 : 0; err = synth(machine); dump_trace = 0; TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy()); TEST_ASSERT_VAL("failed to synthesize maps", !err); /* * All data is synthesized, try to find map for each * thread object. */ for (i = 0; i < THREADS; i++) { struct thread_data *td = &threads[i]; struct addr_location al; struct thread *thread; thread = machine__findnew_thread(machine, getpid(), td->tid); pr_debug("looking for map %p\n", td->map); thread__find_addr_map(thread, PERF_RECORD_MISC_USER, MAP__FUNCTION, (unsigned long) (td->map + 1), &al); thread__put(thread); if (!al.map) { pr_debug("failed, couldn't find map\n"); err = -1; break; } pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start); } machine__delete_threads(machine); machines__exit(&machines); return err; }
static size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg) { int pid = arg->val; struct trace *trace = arg->trace; size_t printed = scnprintf(bf, size, "%d", pid); struct thread *thread = machine__findnew_thread(trace->host, pid, pid); if (thread != NULL) { if (!thread->comm_set) thread__set_comm_from_proc(thread); if (thread->comm_set) printed += scnprintf(bf + printed, size - printed, " (%s)", thread__comm_str(thread)); thread__put(thread); } return printed; }
static int process_sample_event(struct machine *machine, struct perf_evlist *evlist, union perf_event *event, struct state *state) { struct perf_sample sample; struct thread *thread; int ret; if (perf_evlist__parse_sample(evlist, event, &sample)) { pr_debug("perf_evlist__parse_sample failed\n"); return -1; } thread = machine__findnew_thread(machine, sample.pid, sample.tid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); return -1; } ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state); thread__put(thread); return ret; }
struct map_groups *other_mg; /* * This test create 2 processes abstractions (struct thread) * with several threads and checks they properly share and * maintain map groups info (struct map_groups). * * thread group (pid: 0, tids: 0, 1, 2, 3) * other group (pid: 4, tids: 4, 5) */ machines__init(&machines); machine = &machines.host; /* create process with 4 threads */ leader = machine__findnew_thread(machine, 0, 0); t1 = machine__findnew_thread(machine, 0, 1); t2 = machine__findnew_thread(machine, 0, 2); t3 = machine__findnew_thread(machine, 0, 3); /* and create 1 separated process, without thread leader */ other = machine__findnew_thread(machine, 4, 5); TEST_ASSERT_VAL("failed to create threads", leader && t1 && t2 && t3 && other); mg = leader->mg; TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 4); /* test the map groups pointer is shared */ TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
int test__thread_mg_share(void) { struct machines machines; struct machine *machine; /* thread group */ struct thread *leader; struct thread *t1, *t2, *t3; struct map_groups *mg; /* other process */ struct thread *other, *other_leader; struct map_groups *other_mg; /* * This test create 2 processes abstractions (struct thread) * with several threads and checks they properly share and * maintain map groups info (struct map_groups). * * thread group (pid: 0, tids: 0, 1, 2, 3) * other group (pid: 4, tids: 4, 5) */ machines__init(&machines); machine = &machines.host; /* create process with 4 threads */ leader = machine__findnew_thread(machine, 0, 0); t1 = machine__findnew_thread(machine, 0, 1); t2 = machine__findnew_thread(machine, 0, 2); t3 = machine__findnew_thread(machine, 0, 3); /* and create 1 separated process, without thread leader */ other = machine__findnew_thread(machine, 4, 5); TEST_ASSERT_VAL("failed to create threads", leader && t1 && t2 && t3 && other); mg = leader->mg; TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 4); /* test the map groups pointer is shared */ TEST_ASSERT_VAL("map groups don't match", mg == t1->mg); TEST_ASSERT_VAL("map groups don't match", mg == t2->mg); TEST_ASSERT_VAL("map groups don't match", mg == t3->mg); /* * Verify the other leader was created by previous call. * It should have shared map groups with no change in * refcnt. */ other_leader = machine__find_thread(machine, 4, 4); TEST_ASSERT_VAL("failed to find other leader", other_leader); other_mg = other->mg; TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 2); TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg); /* release thread group */ thread__delete(leader); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3); thread__delete(t1); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2); thread__delete(t2); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1); thread__delete(t3); /* release other group */ thread__delete(other_leader); TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1); thread__delete(other); /* * Cannot call machine__delete_threads(machine) now, * because we've already released all the threads. */ machines__exit(&machines); return 0; }
static int do_test_code_reading(bool try_kcore) { struct machines machines; struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 4000, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machines__init(&machines); machine = &machines.host; ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine->vmlinux_maps[MAP__FUNCTION]; ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_err; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_err; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_err; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_err; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } pr_debug("perf_evlist__open failed\n"); goto out_err; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_err; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_err; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__delete(cpus); thread_map__delete(threads); } machines__destroy_kernel_maps(&machines); machine__delete_threads(machine); machines__exit(&machines); return err; } int test__code_reading(void) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: fprintf(stderr, " (no vmlinux)"); return 0; case TEST_CODE_READING_NO_KCORE: fprintf(stderr, " (no kcore)"); return 0; case TEST_CODE_READING_NO_ACCESS: fprintf(stderr, " (no access)"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: fprintf(stderr, " (no kernel obj)"); return 0; default: return -1; }; }
#include "session.h" #include "tool.h" #include "header.h" #include "vdso.h" static bool no_buildid_cache; int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; struct thread *thread = machine__findnew_thread(machine, sample->pid, sample->tid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); return -1; } thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, &al); if (al.map != NULL) al.map->dso->hit = 1; thread__put(thread); return 0; }
} s_alloc->alloc_cpu = -1; return 0; } typedef int (*tracepoint_handler)(struct perf_evsel *evsel, struct perf_sample *sample); static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", event->header.type); return -1; } dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; return f(evsel, sample); } return 0;
static int do_test_code_reading(bool try_kcore) { struct machine *machine; struct thread *thread; struct record_opts opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, .freq = 500, .target = { .uses_mmap = true, }, }; struct state state = { .done_cnt = 0, }; struct thread_map *threads = NULL; struct cpu_map *cpus = NULL; struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; int err = -1, ret; pid_t pid; struct map *map; bool have_vmlinux, have_kcore, excl_kernel = false; pid = getpid(); machine = machine__new_host(); ret = machine__create_kernel_maps(machine); if (ret < 0) { pr_debug("machine__create_kernel_maps failed\n"); goto out_err; } /* Force the use of kallsyms instead of vmlinux to try kcore */ if (try_kcore) symbol_conf.kallsyms_name = "/proc/kallsyms"; /* Load kernel map */ map = machine__kernel_map(machine); ret = map__load(map, NULL); if (ret < 0) { pr_debug("map__load failed\n"); goto out_err; } have_vmlinux = dso__is_vmlinux(map->dso); have_kcore = dso__is_kcore(map->dso); /* 2nd time through we just try kcore */ if (try_kcore && !have_kcore) return TEST_CODE_READING_NO_KCORE; /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) excl_kernel = true; threads = thread_map__new_by_tid(pid); if (!threads) { pr_debug("thread_map__new_by_tid failed\n"); goto out_err; } ret = perf_event__synthesize_thread_map(NULL, threads, perf_event__process, machine, false, 500); if (ret < 0) { pr_debug("perf_event__synthesize_thread_map failed\n"); goto out_err; } thread = machine__findnew_thread(machine, pid, pid); if (!thread) { pr_debug("machine__findnew_thread failed\n"); goto out_put; } cpus = cpu_map__new(NULL); if (!cpus) { pr_debug("cpu_map__new failed\n"); goto out_put; } while (1) { const char *str; evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); goto out_put; } perf_evlist__set_maps(evlist, cpus, threads); if (excl_kernel) str = "cycles:u"; else str = "cycles"; pr_debug("Parsing event '%s'\n", str); ret = parse_events(evlist, str, NULL); if (ret < 0) { pr_debug("parse_events failed\n"); goto out_put; } perf_evlist__config(evlist, &opts); evsel = perf_evlist__first(evlist); evsel->attr.comm = 1; evsel->attr.disabled = 1; evsel->attr.enable_on_exec = 0; ret = perf_evlist__open(evlist); if (ret < 0) { if (!excl_kernel) { excl_kernel = true; /* * Both cpus and threads are now owned by evlist * and will be freed by following perf_evlist__set_maps * call. Getting refference to keep them alive. */ cpu_map__get(cpus); thread_map__get(threads); perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__delete(evlist); evlist = NULL; continue; } if (verbose) { char errbuf[512]; perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); } goto out_put; } break; } ret = perf_evlist__mmap(evlist, UINT_MAX, false); if (ret < 0) { pr_debug("perf_evlist__mmap failed\n"); goto out_put; } perf_evlist__enable(evlist); do_something(); perf_evlist__disable(evlist); ret = process_events(machine, evlist, &state); if (ret < 0) goto out_put; if (!have_vmlinux && !have_kcore && !try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; else if (excl_kernel) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; out_put: thread__put(thread); out_err: if (evlist) { perf_evlist__delete(evlist); } else { cpu_map__put(cpus); thread_map__put(threads); } machine__delete_threads(machine); machine__delete(machine); return err; } int test__code_reading(int subtest __maybe_unused) { int ret; ret = do_test_code_reading(false); if (!ret) ret = do_test_code_reading(true); switch (ret) { case TEST_CODE_READING_OK: return 0; case TEST_CODE_READING_NO_VMLINUX: pr_debug("no vmlinux\n"); return 0; case TEST_CODE_READING_NO_KCORE: pr_debug("no kcore\n"); return 0; case TEST_CODE_READING_NO_ACCESS: pr_debug("no access\n"); return 0; case TEST_CODE_READING_NO_KERNEL_OBJ: pr_debug("no kernel obj\n"); return 0; default: return -1; }; }