static int run_builtin(struct cmd_struct *p, int argc, const char **argv) { int status; struct stat st; const char *prefix; char sbuf[STRERR_BUFSIZE]; prefix = NULL; if (p->option & RUN_SETUP) prefix = NULL; /* setup_perf_directory(); */ if (use_browser == -1) use_browser = check_browser_config(p->cmd); if (use_pager == -1 && p->option & RUN_SETUP) use_pager = check_pager_config(p->cmd); if (use_pager == -1 && p->option & USE_PAGER) use_pager = 1; commit_pager_choice(); perf_env__set_cmdline(&perf_env, argc, argv); status = p->fn(argc, argv, prefix); perf_config__exit(); exit_browser(status); perf_env__exit(&perf_env); bpf__clear(); if (status) return status & 0xff; /* Somebody closed stdout? */ if (fstat(fileno(stdout), &st)) return 0; /* Ignore write errors for pipes and sockets.. */ if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) return 0; status = 1; /* Check for ENOSPC and EIO errors.. */ if (fflush(stdout)) { fprintf(stderr, "write failure on standard output: %s", str_error_r(errno, sbuf, sizeof(sbuf))); goto out; } if (ferror(stdout)) { fprintf(stderr, "unknown write failure on standard output"); goto out; } if (fclose(stdout)) { fprintf(stderr, "close failed on standard output: %s", str_error_r(errno, sbuf, sizeof(sbuf))); goto out; } status = 0; out: return status; }
static int wait_or_whine(pid_t pid) { char sbuf[STRERR_BUFSIZE]; for (;;) { int status, code; pid_t waiting = waitpid(pid, &status, 0); if (waiting < 0) { if (errno == EINTR) continue; fprintf(stderr, " Error: waitpid failed (%s)", str_error_r(errno, sbuf, sizeof(sbuf))); return -ERR_RUN_COMMAND_WAITPID; } if (waiting != pid) return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; if (WIFSIGNALED(status)) return -ERR_RUN_COMMAND_WAITPID_SIGNAL; if (!WIFEXITED(status)) return -ERR_RUN_COMMAND_WAITPID_NOEXIT; code = WEXITSTATUS(status); switch (code) { case 127: return -ERR_RUN_COMMAND_EXEC; case 0: return 0; default: return -code; } } }
int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name) { char sbuf[128]; char filename[PATH_MAX]; snprintf(filename, PATH_MAX, "%s/%s", sys, name ?: "*"); switch (err) { case ENOENT: /* * We will get here if we can't find the tracepoint, but one of * debugfs or tracefs is configured, which means you probably * want some tracepoint which wasn't compiled in your kernel. * - jirka */ if (debugfs__configured() || tracefs__configured()) { /* sdt markers */ if (!strncmp(filename, "sdt_", 4)) { snprintf(buf, size, "Error:\tFile %s/%s not found.\n" "Hint:\tSDT event cannot be directly recorded on.\n" "\tPlease first use 'perf probe %s:%s' before recording it.\n", tracing_events_path, filename, sys, name); } else { snprintf(buf, size, "Error:\tFile %s/%s not found.\n" "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n", tracing_events_path, filename); } break; } snprintf(buf, size, "%s", "Error:\tUnable to find debugfs/tracefs\n" "Hint:\tWas your kernel compiled with debugfs/tracefs support?\n" "Hint:\tIs the debugfs/tracefs filesystem mounted?\n" "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); break; case EACCES: { snprintf(buf, size, "Error:\tNo permissions to read %s/%s\n" "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n", tracing_events_path, filename, tracing_path_mount()); } break; default: snprintf(buf, size, "%s", str_error_r(err, sbuf, sizeof(sbuf))); break; } return 0; }
static int run_test(struct test *test, int subtest) { int status, err = -1, child = dont_fork ? 0 : fork(); char sbuf[STRERR_BUFSIZE]; if (child < 0) { pr_err("failed to fork test: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); return -1; } if (!child) { if (!dont_fork) { pr_debug("test child forked, pid %d\n", getpid()); if (!verbose) { int nullfd = open("/dev/null", O_WRONLY); if (nullfd >= 0) { close(STDERR_FILENO); close(STDOUT_FILENO); dup2(nullfd, STDOUT_FILENO); dup2(STDOUT_FILENO, STDERR_FILENO); close(nullfd); } } else { signal(SIGSEGV, sighandler_dump_stack); signal(SIGFPE, sighandler_dump_stack); } } err = test->func(subtest); if (!dont_fork) exit(err); } if (!dont_fork) { wait(&status); if (WIFEXITED(status)) { err = (signed char)WEXITSTATUS(status); pr_debug("test child finished with %d\n", err); } else if (WIFSIGNALED(status)) { err = -1; pr_debug("test child interrupted\n"); } } return err; }
static int open_file_write(struct perf_data *data) { int fd; char sbuf[STRERR_BUFSIZE]; if (check_backup(data)) return -1; fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC, S_IRUSR|S_IWUSR); if (fd < 0) pr_err("failed to open %s : %s\n", data->file.path, str_error_r(errno, sbuf, sizeof(sbuf))); return fd; }
int llvm__get_nr_cpus(void) { static int nr_cpus_avail = 0; char serr[STRERR_BUFSIZE]; if (nr_cpus_avail > 0) return nr_cpus_avail; nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF); if (nr_cpus_avail <= 0) { pr_err( "WARNING:\tunable to get available CPUs in this system: %s\n" " \tUse 128 instead.\n", str_error_r(errno, serr, sizeof(serr))); nr_cpus_avail = 128; } return nr_cpus_avail; }
static void exec_woman_emacs(const char *path, const char *page) { char sbuf[STRERR_BUFSIZE]; if (!check_emacsclient_version()) { /* This works only with emacsclient version >= 22. */ char *man_page; if (!path) path = "emacsclient"; if (asprintf(&man_page, "(woman \"%s\")", page) > 0) { execlp(path, "emacsclient", "-e", man_page, NULL); free(man_page); } warning("failed to exec '%s': %s", path, str_error_r(errno, sbuf, sizeof(sbuf))); } }
static int do_test(struct perf_evlist *evlist, int mmap_pages, int *sample_count, int *comm_count) { int err; char sbuf[STRERR_BUFSIZE]; err = perf_evlist__mmap(evlist, mmap_pages, true); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); return TEST_FAIL; } perf_evlist__enable(evlist); testcase(); perf_evlist__disable(evlist); err = count_samples(evlist, sample_count, comm_count); perf_evlist__munmap(evlist); return err; }
static int open_file_read(struct perf_data *data) { struct stat st; int fd; char sbuf[STRERR_BUFSIZE]; fd = open(data->file.path, O_RDONLY); if (fd < 0) { int err = errno; pr_err("failed to open %s: %s", data->file.path, str_error_r(err, sbuf, sizeof(sbuf))); if (err == ENOENT && !strcmp(data->file.path, "perf.data")) pr_err(" (try 'perf record' first)"); pr_err("\n"); return -err; } if (fstat(fd, &st) < 0) goto out_close; if (!data->force && st.st_uid && (st.st_uid != geteuid())) { pr_err("File %s not owned by current user or root (use -f to override)\n", data->file.path); goto out_close; } if (!st.st_size) { pr_info("zero-sized data (%s), nothing to do!\n", data->file.path); goto out_close; } data->size = st.st_size; return fd; out_close: close(fd); return -1; }
int cmd_buildid_cache(int argc, const char **argv) { struct strlist *list; struct str_node *pos; int ret = 0; int ns_id = -1; bool force = false; char const *add_name_list_str = NULL, *remove_name_list_str = NULL, *purge_name_list_str = NULL, *missing_filename = NULL, *update_name_list_str = NULL, *kcore_filename = NULL; char sbuf[STRERR_BUFSIZE]; struct perf_data data = { .mode = PERF_DATA_MODE_READ, }; struct perf_session *session = NULL; struct nsinfo *nsi = NULL; const struct option buildid_cache_options[] = { OPT_STRING('a', "add", &add_name_list_str, "file list", "file(s) to add"), OPT_STRING('k', "kcore", &kcore_filename, "file", "kcore file to add"), OPT_STRING('r', "remove", &remove_name_list_str, "file list", "file(s) to remove"), OPT_STRING('p', "purge", &purge_name_list_str, "path list", "path(s) to remove (remove old caches too)"), OPT_STRING('M', "missing", &missing_filename, "file", "to find missing build ids in the cache"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_STRING('u', "update", &update_name_list_str, "file list", "file(s) to update"), OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_INTEGER(0, "target-ns", &ns_id, "target pid for namespace context"), OPT_END() }; const char * const buildid_cache_usage[] = { "perf buildid-cache [<options>]", NULL }; argc = parse_options(argc, argv, buildid_cache_options, buildid_cache_usage, 0); if (argc || (!add_name_list_str && !kcore_filename && !remove_name_list_str && !purge_name_list_str && !missing_filename && !update_name_list_str)) usage_with_options(buildid_cache_usage, buildid_cache_options); if (ns_id > 0) nsi = nsinfo__new(ns_id); if (missing_filename) { data.file.path = missing_filename; data.force = force; session = perf_session__new(&data, false, NULL); if (session == NULL) return -1; } if (symbol__init(session ? &session->header.env : NULL) < 0) goto out; setup_pager(); if (add_name_list_str) { list = strlist__new(add_name_list_str, NULL); if (list) { strlist__for_each_entry(pos, list) if (build_id_cache__add_file(pos->s, nsi)) { if (errno == EEXIST) { pr_debug("%s already in the cache\n", pos->s); continue; } pr_warning("Couldn't add %s: %s\n", pos->s, str_error_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); } } if (remove_name_list_str) { list = strlist__new(remove_name_list_str, NULL); if (list) { strlist__for_each_entry(pos, list) if (build_id_cache__remove_file(pos->s, nsi)) { if (errno == ENOENT) { pr_debug("%s wasn't in the cache\n", pos->s); continue; } pr_warning("Couldn't remove %s: %s\n", pos->s, str_error_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); } } if (purge_name_list_str) { list = strlist__new(purge_name_list_str, NULL); if (list) { strlist__for_each_entry(pos, list) if (build_id_cache__purge_path(pos->s, nsi)) { if (errno == ENOENT) { pr_debug("%s wasn't in the cache\n", pos->s); continue; } pr_warning("Couldn't remove %s: %s\n", pos->s, str_error_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); } } if (missing_filename) ret = build_id_cache__fprintf_missing(session, stdout); if (update_name_list_str) { list = strlist__new(update_name_list_str, NULL); if (list) { strlist__for_each_entry(pos, list) if (build_id_cache__update_file(pos->s, nsi)) { if (errno == ENOENT) { pr_debug("%s wasn't in the cache\n", pos->s); continue; } pr_warning("Couldn't update %s: %s\n", pos->s, str_error_r(errno, sbuf, sizeof(sbuf))); } strlist__delete(list); } } if (kcore_filename && build_id_cache__add_kcore(kcore_filename, force)) pr_warning("Couldn't add %s\n", kcore_filename); out: perf_session__delete(session); nsinfo__zput(nsi); return ret; }
static int do_test(struct bpf_object *obj, int (*func)(void), int expect) { struct record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .freq = 0, .mmap_pages = 256, .default_interval = 1, }; char pid[16]; char sbuf[STRERR_BUFSIZE]; struct perf_evlist *evlist; int i, ret = TEST_FAIL, err = 0, count = 0; struct parse_events_evlist parse_evlist; struct parse_events_error parse_error; bzero(&parse_error, sizeof(parse_error)); bzero(&parse_evlist, sizeof(parse_evlist)); parse_evlist.error = &parse_error; INIT_LIST_HEAD(&parse_evlist.list); err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL); if (err || list_empty(&parse_evlist.list)) { pr_debug("Failed to add events selected by BPF\n"); return TEST_FAIL; } snprintf(pid, sizeof(pid), "%d", getpid()); pid[sizeof(pid) - 1] = '\0'; opts.target.tid = opts.target.pid = pid; /* Instead of perf_evlist__new_default, don't add default events */ evlist = perf_evlist__new(); if (!evlist) { pr_debug("Not enough memory to create evlist\n"); return TEST_FAIL; } err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_evlist; } perf_evlist__splice_list_tail(evlist, &parse_evlist.list); evlist->nr_groups = parse_evlist.nr_groups; perf_evlist__config(evlist, &opts, NULL); err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, opts.mmap_pages, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } perf_evlist__enable(evlist); (*func)(); perf_evlist__disable(evlist); for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { const u32 type = event->header.type; if (type == PERF_RECORD_SAMPLE) count ++; } } if (count != expect) { pr_debug("BPF filter result incorrect\n"); goto out_delete_evlist; } ret = TEST_OK; out_delete_evlist: perf_evlist__delete(evlist); return ret; }
int test__PERF_RECORD(int subtest __maybe_unused) { struct record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .no_buffering = true, .mmap_pages = 256, }; cpu_set_t cpu_mask; size_t cpu_mask_size = sizeof(cpu_mask); struct perf_evlist *evlist = perf_evlist__new_dummy(); struct perf_evsel *evsel; struct perf_sample sample; const char *cmd = "sleep"; const char *argv[] = { cmd, "1", NULL, }; char *bname, *mmap_filename; u64 prev_time = 0; bool found_cmd_mmap = false, found_libc_mmap = false, found_vdso_mmap = false, found_ld_mmap = false; int err = -1, errs = 0, i, wakeups = 0; u32 cpu; int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; char sbuf[STRERR_BUFSIZE]; if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */ evlist = perf_evlist__new_default(); if (evlist == NULL || argv == NULL) { pr_debug("Not enough memory to create evlist\n"); goto out; } /* * Create maps of threads and cpus to monitor. In this case * we start with all threads and cpus (-1, -1) but then in * perf_evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_evlist; } /* * Prepare the workload in argv[] to run, it'll fork it, and then wait * for perf_evlist__start_workload() to exec it. This is done this way * so that we have time to open the evlist (calling sys_perf_event_open * on all the fds) and then mmap them. */ err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL); if (err < 0) { pr_debug("Couldn't run the workload!\n"); goto out_delete_evlist; } /* * Config the evsels, setting attr->comm on the first one, etc. */ evsel = perf_evlist__first(evlist); perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, TID); perf_evsel__set_sample_bit(evsel, TIME); perf_evlist__config(evlist, &opts, NULL); err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } cpu = err; /* * So that we can check perf_sample.cpu on all the samples. */ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * Call sys_perf_event_open on all the fds on all the evsels, * grouping them if asked to. */ err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * mmap the first fd on a given CPU and ask for events for the other * fds in the same CPU to be injected in the same mmap ring buffer * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). */ err = perf_evlist__mmap(evlist, opts.mmap_pages, false); if (err < 0) { pr_debug("perf_evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } /* * Now that all is properly set up, enable the events, they will * count just on workload.pid, which will start... */ perf_evlist__enable(evlist); /* * Now! */ perf_evlist__start_workload(evlist); while (1) { int before = total_events; for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { const u32 type = event->header.type; const char *name = perf_event__name(type); ++total_events; if (type < PERF_RECORD_MAX) nr_events[type]++; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { if (verbose) perf_event__fprintf(event, stderr); pr_debug("Couldn't parse sample\n"); goto out_delete_evlist; } if (verbose) { pr_info("%" PRIu64" %d ", sample.time, sample.cpu); perf_event__fprintf(event, stderr); } if (prev_time > sample.time) { pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", name, prev_time, sample.time); ++errs; } prev_time = sample.time; if (sample.cpu != cpu) { pr_debug("%s with unexpected cpu, expected %d, got %d\n", name, cpu, sample.cpu); ++errs; } if ((pid_t)sample.pid != evlist->workload.pid) { pr_debug("%s with unexpected pid, expected %d, got %d\n", name, evlist->workload.pid, sample.pid); ++errs; } if ((pid_t)sample.tid != evlist->workload.pid) { pr_debug("%s with unexpected tid, expected %d, got %d\n", name, evlist->workload.pid, sample.tid); ++errs; } if ((type == PERF_RECORD_COMM || type == PERF_RECORD_MMAP || type == PERF_RECORD_MMAP2 || type == PERF_RECORD_FORK || type == PERF_RECORD_EXIT) && (pid_t)event->comm.pid != evlist->workload.pid) { pr_debug("%s with unexpected pid/tid\n", name); ++errs; } if ((type == PERF_RECORD_COMM || type == PERF_RECORD_MMAP || type == PERF_RECORD_MMAP2) && event->comm.pid != event->comm.tid) { pr_debug("%s with different pid/tid!\n", name); ++errs; } switch (type) { case PERF_RECORD_COMM: if (strcmp(event->comm.comm, cmd)) { pr_debug("%s with unexpected comm!\n", name); ++errs; } break; case PERF_RECORD_EXIT: goto found_exit; case PERF_RECORD_MMAP: mmap_filename = event->mmap.filename; goto check_bname; case PERF_RECORD_MMAP2: mmap_filename = event->mmap2.filename; check_bname: bname = strrchr(mmap_filename, '/'); if (bname != NULL) { if (!found_cmd_mmap) found_cmd_mmap = !strcmp(bname + 1, cmd); if (!found_libc_mmap) found_libc_mmap = !strncmp(bname + 1, "libc", 4); if (!found_ld_mmap) found_ld_mmap = !strncmp(bname + 1, "ld", 2); } else if (!found_vdso_mmap) found_vdso_mmap = !strcmp(mmap_filename, "[vdso]"); break; case PERF_RECORD_SAMPLE: /* Just ignore samples for now */ break; default: pr_debug("Unexpected perf_event->header.type %d!\n", type); ++errs; } perf_evlist__mmap_consume(evlist, i); } } /* * We don't use poll here because at least at 3.1 times the * PERF_RECORD_{!SAMPLE} events don't honour * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. */ if (total_events == before && false) perf_evlist__poll(evlist, -1); sleep(1); if (++wakeups > 5) { pr_debug("No PERF_RECORD_EXIT event!\n"); break; } } found_exit: if (nr_events[PERF_RECORD_COMM] > 1) { pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); ++errs; } if (nr_events[PERF_RECORD_COMM] == 0) { pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); ++errs; } if (!found_cmd_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); ++errs; } if (!found_libc_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); ++errs; } if (!found_ld_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); ++errs; } if (!found_vdso_mmap) { pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); ++errs; } out_delete_evlist: perf_evlist__delete(evlist); out: return (err < 0 || errs > 0) ? -1 : 0; }
int main(int argc, const char **argv) { int err; const char *cmd; char sbuf[STRERR_BUFSIZE]; int value; /* libsubcmd init */ exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); pager_init(PERF_PAGER_ENVIRONMENT); /* The page_size is placed in util object. */ page_size = sysconf(_SC_PAGE_SIZE); cache_line_size(&cacheline_size); if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) sysctl_perf_event_max_stack = value; if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0) sysctl_perf_event_max_contexts_per_stack = value; cmd = extract_argv0_path(argv[0]); if (!cmd) cmd = "perf-help"; srandom(time(NULL)); perf_config__init(); err = perf_config(perf_default_config, NULL); if (err) return err; set_buildid_dir(NULL); /* get debugfs/tracefs mount point from /proc/mounts */ tracing_path_mount(); /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * * - cannot take flags in between the "perf" and the "xxxx". * - cannot execute it externally (since it would just do * the same thing over again) * * So we just directly call the internal command handler. If that one * fails to handle this, then maybe we just run a renamed perf binary * that contains a dash in its name. To handle this scenario, we just * fall through and ignore the "xxxx" part of the command string. */ if (strstarts(cmd, "perf-")) { cmd += 5; argv[0] = cmd; handle_internal_command(argc, argv); /* * If the command is handled, the above function does not * return undo changes and fall through in such a case. */ cmd -= 5; argv[0] = cmd; } if (strstarts(cmd, "trace")) { #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) setup_path(); argv[0] = "trace"; return cmd_trace(argc, argv); #else fprintf(stderr, "trace command not available: missing audit-libs devel package at build time.\n"); goto out; #endif } /* Look for flags.. */ argv++; argc--; handle_options(&argv, &argc, NULL); commit_pager_choice(); if (argc > 0) { if (strstarts(argv[0], "--")) argv[0] += 2; } else { /* The user didn't specify a command; give them help */ printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); printf("\n %s\n\n", perf_more_info_string); goto out; } cmd = argv[0]; test_attr__init(); /* * We use PATH to find perf commands, but we prepend some higher * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH * environment, and the $(perfexecdir) from the Makefile at build * time. */ setup_path(); /* * Block SIGWINCH notifications so that the thread that wants it can * unblock and get syscalls like select interrupted instead of waiting * forever while the signal goes to some other non interested thread. */ pthread__block_sigwinch(); perf_debug_setup(); while (1) { static int done_help; run_argv(&argc, &argv); if (errno != ENOENT) break; if (!done_help) { cmd = argv[0] = help_unknown_cmd(cmd); done_help = 1; } else break; } fprintf(stderr, "Failed to run command '%s': %s\n", cmd, str_error_r(errno, sbuf, sizeof(sbuf))); out: return 1; }
/* * This test will open software clock events (cpu-clock, task-clock) * then check their frequency -> period conversion has no artifact of * setting period to 1 forcefully. */ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) { int i, err = -1; volatile int tmp = 0; u64 total_periods = 0; int nr_samples = 0; char sbuf[STRERR_BUFSIZE]; union perf_event *event; struct perf_evsel *evsel; struct perf_evlist *evlist; struct perf_event_attr attr = { .type = PERF_TYPE_SOFTWARE, .config = clock_id, .sample_type = PERF_SAMPLE_PERIOD, .exclude_kernel = 1, .disabled = 1, .freq = 1, }; struct cpu_map *cpus; struct thread_map *threads; attr.sample_freq = 500; evlist = perf_evlist__new(); if (evlist == NULL) { pr_debug("perf_evlist__new\n"); return -1; } evsel = perf_evsel__new(&attr); if (evsel == NULL) { pr_debug("perf_evsel__new\n"); goto out_delete_evlist; } perf_evlist__add(evlist, evsel); cpus = cpu_map__dummy_new(); threads = thread_map__new_by_tid(getpid()); if (!cpus || !threads) { err = -ENOMEM; pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_free_maps; } perf_evlist__set_maps(evlist, cpus, threads); cpus = NULL; threads = NULL; if (perf_evlist__open(evlist)) { const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; err = -errno; pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", str_error_r(errno, sbuf, sizeof(sbuf)), knob, (u64)attr.sample_freq); goto out_delete_evlist; } err = perf_evlist__mmap(evlist, 128); if (err < 0) { pr_debug("failed to mmap event: %d (%s)\n", errno, str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } perf_evlist__enable(evlist); /* collect samples */ for (i = 0; i < NR_LOOPS; i++) tmp++; perf_evlist__disable(evlist); while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) goto next_event; err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { pr_debug("Error during parse sample\n"); goto out_delete_evlist; } total_periods += sample.period; nr_samples++; next_event: perf_evlist__mmap_consume(evlist, 0); } if ((u64) nr_samples == total_periods) { pr_debug("All (%d) samples have period value of 1!\n", nr_samples); err = -1; } out_free_maps: cpu_map__put(cpus); thread_map__put(threads); out_delete_evlist: perf_evlist__delete(evlist); return err; } int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused) { int ret; ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK); if (!ret) ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK); return ret; }
int start_command(struct child_process *cmd) { int need_in, need_out, need_err; int fdin[2], fdout[2], fderr[2]; char sbuf[STRERR_BUFSIZE]; /* * In case of errors we must keep the promise to close FDs * that have been passed in via ->in and ->out. */ need_in = !cmd->no_stdin && cmd->in < 0; if (need_in) { if (pipe(fdin) < 0) { if (cmd->out > 0) close(cmd->out); return -ERR_RUN_COMMAND_PIPE; } cmd->in = fdin[1]; } need_out = !cmd->no_stdout && !cmd->stdout_to_stderr && cmd->out < 0; if (need_out) { if (pipe(fdout) < 0) { if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); return -ERR_RUN_COMMAND_PIPE; } cmd->out = fdout[0]; } need_err = !cmd->no_stderr && cmd->err < 0; if (need_err) { if (pipe(fderr) < 0) { if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); if (need_out) close_pair(fdout); else if (cmd->out) close(cmd->out); return -ERR_RUN_COMMAND_PIPE; } cmd->err = fderr[0]; } fflush(NULL); cmd->pid = fork(); if (!cmd->pid) { if (cmd->no_stdin) dup_devnull(0); else if (need_in) { dup2(fdin[0], 0); close_pair(fdin); } else if (cmd->in) { dup2(cmd->in, 0); close(cmd->in); } if (cmd->no_stderr) dup_devnull(2); else if (need_err) { dup2(fderr[1], 2); close_pair(fderr); } if (cmd->no_stdout) dup_devnull(1); else if (cmd->stdout_to_stderr) dup2(2, 1); else if (need_out) { dup2(fdout[1], 1); close_pair(fdout); } else if (cmd->out > 1) { dup2(cmd->out, 1); close(cmd->out); } if (cmd->dir && chdir(cmd->dir)) die("exec %s: cd to %s failed (%s)", cmd->argv[0], cmd->dir, str_error_r(errno, sbuf, sizeof(sbuf))); if (cmd->env) { for (; *cmd->env; cmd->env++) { if (strchr(*cmd->env, '=')) putenv((char*)*cmd->env); else unsetenv(*cmd->env); } } if (cmd->preexec_cb) cmd->preexec_cb(); if (cmd->exec_cmd) { execv_cmd(cmd->argv); } else { execvp(cmd->argv[0], (char *const*) cmd->argv); } exit(127); } if (cmd->pid < 0) { int err = errno; if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); if (need_out) close_pair(fdout); else if (cmd->out) close(cmd->out); if (need_err) close_pair(fderr); return err == ENOENT ? -ERR_RUN_COMMAND_EXEC : -ERR_RUN_COMMAND_FORK; } if (need_in) close(fdin[0]); else if (cmd->in) close(cmd->in); if (need_out) close(fdout[1]); else if (cmd->out) close(cmd->out); if (need_err) close(fderr[1]); return 0; }
int test__backward_ring_buffer(int subtest __maybe_unused) { int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0; char pid[16], sbuf[STRERR_BUFSIZE]; struct perf_evlist *evlist; struct perf_evsel *evsel __maybe_unused; struct parse_events_error parse_error; struct record_opts opts = { .target = { .uid = UINT_MAX, .uses_mmap = true, }, .freq = 0, .mmap_pages = 256, .default_interval = 1, }; snprintf(pid, sizeof(pid), "%d", getpid()); pid[sizeof(pid) - 1] = '\0'; opts.target.tid = opts.target.pid = pid; evlist = perf_evlist__new(); if (!evlist) { pr_debug("No ehough memory to create evlist\n"); return TEST_FAIL; } err = perf_evlist__create_maps(evlist, &opts.target); if (err < 0) { pr_debug("Not enough memory to create thread/cpu maps\n"); goto out_delete_evlist; } bzero(&parse_error, sizeof(parse_error)); /* * Set backward bit, ring buffer should be writing from end. Record * it in aux evlist */ err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error); if (err) { pr_debug("Failed to parse tracepoint event, try use root\n"); ret = TEST_SKIP; goto out_delete_evlist; } perf_evlist__config(evlist, &opts, NULL); err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_delete_evlist; } ret = TEST_FAIL; err = do_test(evlist, opts.mmap_pages, &sample_count, &comm_count); if (err != TEST_OK) goto out_delete_evlist; if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) { pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n", sample_count, comm_count); goto out_delete_evlist; } err = do_test(evlist, 1, &sample_count, &comm_count); if (err != TEST_OK) goto out_delete_evlist; ret = TEST_OK; out_delete_evlist: perf_evlist__delete(evlist); return ret; }
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused) { int err = -1, fd, cpu; struct cpu_map *cpus; struct perf_evsel *evsel; unsigned int nr_openat_calls = 111, i; cpu_set_t cpu_set; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); char sbuf[STRERR_BUFSIZE]; char errbuf[BUFSIZ]; if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_thread_map_delete; } CPU_ZERO(&cpu_set); evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); if (IS_ERR(evsel)) { tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); pr_debug("%s\n", errbuf); goto out_thread_map_delete; } if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_openat_calls + cpu; /* * XXX eventually lift this restriction in a way that * keeps perf building on older glibc installations * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ if (cpus->map[cpu] >= CPU_SETSIZE) { pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); continue; } CPU_SET(cpus->map[cpu], &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[cpu], str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } for (i = 0; i < ncalls; ++i) { fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } CPU_CLR(cpus->map[cpu], &cpu_set); } /* * Here we need to explicitly preallocate the counts, as if * we use the auto allocation it will allocate just for 1 cpu, * as we start by cpu 0. */ if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); goto out_close_fd; } err = 0; for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; if (cpus->map[cpu] >= CPU_SETSIZE) continue; if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); err = -1; break; } expected = nr_openat_calls + cpu; if (perf_counts(evsel->counts, cpu, 0)->val != expected) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); err = -1; } } perf_evsel__free_counts(evsel); out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__put(threads); return err; }
static int read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz) { int err = 0; void *buf = NULL; FILE *file = NULL; size_t read_sz = 0, buf_sz = 0; char serr[STRERR_BUFSIZE]; file = popen(cmd, "r"); if (!file) { pr_err("ERROR: unable to popen cmd: %s\n", str_error_r(errno, serr, sizeof(serr))); return -EINVAL; } while (!feof(file) && !ferror(file)) { /* * Make buf_sz always have obe byte extra space so we * can put '\0' there. */ if (buf_sz - read_sz < READ_SIZE + 1) { void *new_buf; buf_sz = read_sz + READ_SIZE + 1; new_buf = realloc(buf, buf_sz); if (!new_buf) { pr_err("ERROR: failed to realloc memory\n"); err = -ENOMEM; goto errout; } buf = new_buf; } read_sz += fread(buf + read_sz, 1, READ_SIZE, file); } if (buf_sz - read_sz < 1) { pr_err("ERROR: internal error\n"); err = -EINVAL; goto errout; } if (ferror(file)) { pr_err("ERROR: error occurred when reading from pipe: %s\n", str_error_r(errno, serr, sizeof(serr))); err = -EIO; goto errout; } err = WEXITSTATUS(pclose(file)); file = NULL; if (err) { err = -EINVAL; goto errout; } /* * If buf is string, give it terminal '\0' to make our life * easier. If buf is not string, that '\0' is out of space * indicated by read_sz so caller won't even notice it. */ ((char *)buf)[read_sz] = '\0'; if (!p_buf) free(buf); else *p_buf = buf; if (p_read_sz) *p_read_sz = read_sz; return 0; errout: if (file) pclose(file); free(buf); if (p_buf) *p_buf = NULL; if (p_read_sz) *p_read_sz = 0; return err; }