static int __event(bool is_x, void *addr, struct perf_event_attr *attr) { int fd; memset(attr, 0, sizeof(struct perf_event_attr)); attr->type = PERF_TYPE_BREAKPOINT; attr->size = sizeof(struct perf_event_attr); attr->config = 0; attr->bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W; attr->bp_addr = (unsigned long) addr; attr->bp_len = sizeof(long); attr->sample_period = 1; attr->sample_type = PERF_SAMPLE_IP; attr->exclude_kernel = 1; attr->exclude_hv = 1; fd = sys_perf_event_open(attr, -1, 0, -1, perf_event_open_cloexec_flag()); if (fd < 0) { pr_debug("failed opening event %llx\n", attr->config); return TEST_FAIL; } return fd; }
static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) { struct perf_event_attr attr; char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE]; int fd; struct rlimit limit; bool need_privilege = false; memset(&attr, 0, sizeof(attr)); attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_TASK_CLOCK; force_again: fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag()); if (fd < 0) { if (errno == EMFILE) { if (sched->force) { BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1); limit.rlim_cur += sched->nr_tasks - cur_task; if (limit.rlim_cur > limit.rlim_max) { limit.rlim_max = limit.rlim_cur; need_privilege = true; } if (setrlimit(RLIMIT_NOFILE, &limit) == -1) { if (need_privilege && errno == EPERM) strcpy(info, "Need privilege\n"); } else goto force_again; } else strcpy(info, "Have a try with -f option\n"); } pr_err("Error: sys_perf_event_open() syscall returned " "with %d (%s)\n%s", fd, strerror_r(errno, sbuf, sizeof(sbuf)), info); exit(EXIT_FAILURE); } return fd; }
/* * Create an event group that contains both a sampled hardware * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then * wait for the hardware perf counter to overflow and generate a PMI, * which triggers an event read for both of the events in the group. * * Since reading Intel CQM event counters requires sending SMP IPIs, the * CQM pmu needs to handle the above situation gracefully, and return * the last read counter value to avoid triggering a WARN_ON_ONCE() in * smp_call_function_many() caused by sending IPIs from NMI context. */ int test__intel_cqm_count_nmi_context(int subtest __maybe_unused) { struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; struct perf_event_attr pe; int i, fd[2], flag, ret; size_t mmap_len; void *event; pid_t pid; int err = TEST_FAIL; flag = perf_event_open_cloexec_flag(); evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); return TEST_FAIL; } ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL); if (ret) { pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n"); err = TEST_SKIP; goto out; } evsel = perf_evlist__first(evlist); if (!evsel) { pr_debug("perf_evlist__first failed\n"); goto out; } memset(&pe, 0, sizeof(pe)); pe.size = sizeof(pe); pe.type = PERF_TYPE_HARDWARE; pe.config = PERF_COUNT_HW_CPU_CYCLES; pe.read_format = PERF_FORMAT_GROUP; pe.sample_period = 128; pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ; pid = spawn(); fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag); if (fd[0] < 0) { pr_debug("failed to open event\n"); goto out; } memset(&pe, 0, sizeof(pe)); pe.size = sizeof(pe); pe.type = evsel->attr.type; pe.config = evsel->attr.config; fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag); if (fd[1] < 0) { pr_debug("failed to open event\n"); goto out; } /* * Pick a power-of-two number of pages + 1 for the meta-data * page (struct perf_event_mmap_page). See tools/perf/design.txt. */ mmap_len = page_size * 65; event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0); if (event == (void *)(-1)) { pr_debug("failed to mmap %d\n", errno); goto out; } sleep(1); err = TEST_OK; munmap(event, mmap_len); for (i = 0; i < 2; i++) close(fd[i]); kill(pid, SIGKILL); wait(NULL); out: perf_evlist__delete(evlist); return err; }
* CQM pmu needs to handle the above situation gracefully, and return * the last read counter value to avoid triggering a WARN_ON_ONCE() in * smp_call_function_many() caused by sending IPIs from NMI context. */ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused) { struct perf_evlist *evlist = NULL; struct perf_evsel *evsel = NULL; struct perf_event_attr pe; int i, fd[2], flag, ret; size_t mmap_len; void *event; pid_t pid; int err = TEST_FAIL; flag = perf_event_open_cloexec_flag(); evlist = perf_evlist__new(); if (!evlist) { pr_debug("perf_evlist__new failed\n"); return TEST_FAIL; } ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL); if (ret) { pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n"); err = TEST_SKIP; goto out; } evsel = perf_evlist__first(evlist);