static int bp_accounting(int wp_cnt, int share) { struct perf_event_attr attr, attr_mod, attr_new; int i, fd[wp_cnt], fd_wp, ret; for (i = 0; i < wp_cnt; i++) { fd[i] = wp_event((void *)&the_var, &attr); TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1); pr_debug("wp %d created\n", i); } attr_mod = attr; attr_mod.bp_type = HW_BREAKPOINT_X; attr_mod.bp_addr = (unsigned long) test_function; ret = ioctl(fd[0], PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr_mod); TEST_ASSERT_VAL("failed to modify wp\n", ret == 0); pr_debug("wp 0 modified to bp\n"); if (!share) { fd_wp = wp_event((void *)&the_var, &attr_new); TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1); pr_debug("wp max created\n"); } for (i = 0; i < wp_cnt; i++) close(fd[i]); return 0; }
static int mmap_events(synth_cb synth) { struct machines machines; struct machine *machine; int err, i; /* * The threads_create will not return before all threads * are spawned and all created memory map. * * They will loop until threads_destroy is called, so we * can safely run synthesizing function. */ TEST_ASSERT_VAL("failed to create threads", !threads_create()); machines__init(&machines); machine = &machines.host; dump_trace = verbose > 1 ? 1 : 0; err = synth(machine); dump_trace = 0; TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy()); TEST_ASSERT_VAL("failed to synthesize maps", !err); /* * All data is synthesized, try to find map for each * thread object. */ for (i = 0; i < THREADS; i++) { struct thread_data *td = &threads[i]; struct addr_location al; struct thread *thread; thread = machine__findnew_thread(machine, getpid(), td->tid); pr_debug("looking for map %p\n", td->map); thread__find_addr_map(thread, PERF_RECORD_MISC_USER, MAP__FUNCTION, (unsigned long) (td->map + 1), &al); thread__put(thread); if (!al.map) { pr_debug("failed, couldn't find map\n"); err = -1; break; } pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start); } machine__delete_threads(machine); machines__exit(&machines); return err; }
/* * This test creates 'THREADS' number of threads (including * main thread) and each thread creates memory map. * * When threads are created, we synthesize them with both * (separate tests): * perf_event__synthesize_thread_map (process based) * perf_event__synthesize_threads (global) * * We test we can find all memory maps via: * thread__find_addr_map * * by using all thread objects. */ int test__mmap_thread_lookup(int subtest __maybe_unused) { /* perf_event__synthesize_threads synthesize */ TEST_ASSERT_VAL("failed with sythesizing all", !mmap_events(synth_all)); /* perf_event__synthesize_thread_map synthesize */ TEST_ASSERT_VAL("failed with sythesizing process", !mmap_events(synth_process)); return 0; }
static int test_is_kernel_module(const char *path, int cpumode, bool expect) { TEST_ASSERT_VAL("is_kernel_module", (!!is_kernel_module(path, cpumode)) == (!!expect)); pr_debug("%s (cpumode: %d) - is_kernel_module: %s\n", path, cpumode, expect ? "true" : "false"); return 0; }
static int test(const char *path, bool alloc_name, bool alloc_ext, bool kmod, bool comp, const char *name, const char *ext) { struct kmod_path m; memset(&m, 0x0, sizeof(m)); TEST_ASSERT_VAL("kmod_path__parse", !__kmod_path__parse(&m, path, alloc_name, alloc_ext)); pr_debug("%s - alloc name %d, alloc ext %d, kmod %d, comp %d, name '%s', ext '%s'\n", path, alloc_name, alloc_ext, m.kmod, m.comp, m.name, m.ext); TEST_ASSERT_VAL("wrong kmod", m.kmod == kmod); TEST_ASSERT_VAL("wrong comp", m.comp == comp); if (ext) TEST_ASSERT_VAL("wrong ext", m.ext && !strcmp(ext, m.ext)); else TEST_ASSERT_VAL("wrong ext", !m.ext); if (name) TEST_ASSERT_VAL("wrong name", m.name && !strcmp(name, m.name)); else TEST_ASSERT_VAL("wrong name", !m.name); free(m.name); free(m.ext); return 0; }
int test__thread_map(int subtest __maybe_unused) { struct thread_map *map; /* test map on current pid */ map = thread_map__new_by_pid(getpid()); TEST_ASSERT_VAL("failed to alloc map", map); thread_map__read_comms(map); TEST_ASSERT_VAL("wrong nr", map->nr == 1); TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == getpid()); TEST_ASSERT_VAL("wrong comm", thread_map__comm(map, 0) && !strcmp(thread_map__comm(map, 0), "perf")); TEST_ASSERT_VAL("wrong refcnt", atomic_read(&map->refcnt) == 1); thread_map__put(map); /* test dummy pid */ map = thread_map__new_dummy(); TEST_ASSERT_VAL("failed to alloc map", map); thread_map__read_comms(map); TEST_ASSERT_VAL("wrong nr", map->nr == 1); TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == -1); TEST_ASSERT_VAL("wrong comm", thread_map__comm(map, 0) && !strcmp(thread_map__comm(map, 0), "dummy")); TEST_ASSERT_VAL("wrong refcnt", atomic_read(&map->refcnt) == 1); thread_map__put(map); return 0; }
static int detect_share(int wp_cnt, int bp_cnt) { struct perf_event_attr attr; int i, fd[wp_cnt + bp_cnt], ret; for (i = 0; i < wp_cnt; i++) { fd[i] = wp_event((void *)&the_var, &attr); TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1); } for (; i < (bp_cnt + wp_cnt); i++) { fd[i] = bp_event((void *)test_function, &attr); if (fd[i] == -1) break; } ret = i != (bp_cnt + wp_cnt); while (i--) close(fd[i]); return ret; }
int test__thread_mg_share(void) { struct machines machines; struct machine *machine; /* thread group */ struct thread *leader; struct thread *t1, *t2, *t3; struct map_groups *mg; /* other process */ struct thread *other, *other_leader; struct map_groups *other_mg; /* * This test create 2 processes abstractions (struct thread) * with several threads and checks they properly share and * maintain map groups info (struct map_groups). * * thread group (pid: 0, tids: 0, 1, 2, 3) * other group (pid: 4, tids: 4, 5) */ machines__init(&machines); machine = &machines.host; /* create process with 4 threads */ leader = machine__findnew_thread(machine, 0, 0); t1 = machine__findnew_thread(machine, 0, 1); t2 = machine__findnew_thread(machine, 0, 2); t3 = machine__findnew_thread(machine, 0, 3); /* and create 1 separated process, without thread leader */ other = machine__findnew_thread(machine, 4, 5); TEST_ASSERT_VAL("failed to create threads", leader && t1 && t2 && t3 && other); mg = leader->mg; TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 4); /* test the map groups pointer is shared */ TEST_ASSERT_VAL("map groups don't match", mg == t1->mg); TEST_ASSERT_VAL("map groups don't match", mg == t2->mg); TEST_ASSERT_VAL("map groups don't match", mg == t3->mg); /* * Verify the other leader was created by previous call. * It should have shared map groups with no change in * refcnt. */ other_leader = machine__find_thread(machine, 4, 4); TEST_ASSERT_VAL("failed to find other leader", other_leader); other_mg = other->mg; TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 2); TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg); /* release thread group */ thread__delete(leader); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3); thread__delete(t1); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2); thread__delete(t2); TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1); thread__delete(t3); /* release other group */ thread__delete(other_leader); TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1); thread__delete(other); /* * Cannot call machine__delete_threads(machine) now, * because we've already released all the threads. */ machines__exit(&machines); return 0; }