static void *alloc_value(struct bpf_map_info *info) { if (map_is_per_cpu(info->type)) return malloc(round_up(info->value_size, 8) * get_possible_cpus()); else return malloc(info->value_size); }
static int do_dump_btf(const struct btf_dumper *d, struct bpf_map_info *map_info, void *key, void *value) { int ret; /* start of key-value pair */ jsonw_start_object(d->jw); if (map_info->btf_key_type_id) { jsonw_name(d->jw, "key"); ret = btf_dumper_type(d, map_info->btf_key_type_id, key); if (ret) goto err_end_obj; } if (!map_is_per_cpu(map_info->type)) { jsonw_name(d->jw, "value"); ret = btf_dumper_type(d, map_info->btf_value_type_id, value); } else { unsigned int i, n, step; jsonw_name(d->jw, "values"); jsonw_start_array(d->jw); n = get_possible_cpus(); step = round_up(map_info->value_size, 8); for (i = 0; i < n; i++) { jsonw_start_object(d->jw); jsonw_int_field(d->jw, "cpu", i); jsonw_name(d->jw, "value"); ret = btf_dumper_type(d, map_info->btf_value_type_id, value + i * step); jsonw_end_object(d->jw); if (ret) break; } jsonw_end_array(d->jw); } err_end_obj: /* end of key-value pair */ jsonw_end_object(d->jw); return ret; }
static int bpf_perf_event_open(int map_fd, int key, int cpu) { struct perf_event_attr attr = { .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME, .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_BPF_OUTPUT, }; int pmu_fd; pmu_fd = sys_perf_event_open(&attr, -1, cpu, -1, 0); if (pmu_fd < 0) { p_err("failed to open perf event %d for CPU %d", key, cpu); return -1; } if (bpf_map_update_elem(map_fd, &key, &pmu_fd, BPF_ANY)) { p_err("failed to update map for event %d for CPU %d", key, cpu); goto err_close; } if (ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) { p_err("failed to enable event %d for CPU %d", key, cpu); goto err_close; } return pmu_fd; err_close: close(pmu_fd); return -1; } int do_event_pipe(int argc, char **argv) { int i, nfds, map_fd, index = -1, cpu = -1; struct bpf_map_info map_info = {}; struct event_ring_info *rings; size_t tmp_buf_sz = 0; void *tmp_buf = NULL; struct pollfd *pfds; __u32 map_info_len; bool do_all = true; map_info_len = sizeof(map_info); map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len); if (map_fd < 0) return -1; if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { p_err("map is not a perf event array"); goto err_close_map; } while (argc) { if (argc < 2) BAD_ARG(); if (is_prefix(*argv, "cpu")) { char *endptr; NEXT_ARG(); cpu = strtoul(*argv, &endptr, 0); if (*endptr) { p_err("can't parse %s as CPU ID", **argv); goto err_close_map; } NEXT_ARG(); } else if (is_prefix(*argv, "index")) { char *endptr; NEXT_ARG(); index = strtoul(*argv, &endptr, 0); if (*endptr) { p_err("can't parse %s as index", **argv); goto err_close_map; } NEXT_ARG(); } else { BAD_ARG(); } do_all = false; } if (!do_all) { if (index == -1 || cpu == -1) { p_err("cpu and index must be specified together"); goto err_close_map; } nfds = 1; } else { nfds = min(get_possible_cpus(), map_info.max_entries); cpu = 0; index = 0; } rings = calloc(nfds, sizeof(rings[0])); if (!rings) goto err_close_map; pfds = calloc(nfds, sizeof(pfds[0])); if (!pfds) goto err_free_rings; for (i = 0; i < nfds; i++) { rings[i].cpu = cpu + i; rings[i].key = index + i; rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key, rings[i].cpu); if (rings[i].fd < 0) goto err_close_fds_prev; rings[i].mem = perf_event_mmap(rings[i].fd); if (!rings[i].mem) goto err_close_fds_current; pfds[i].fd = rings[i].fd; pfds[i].events = POLLIN; } signal(SIGINT, int_exit); signal(SIGHUP, int_exit); signal(SIGTERM, int_exit); if (json_output) jsonw_start_array(json_wtr); while (!stop) { poll(pfds, nfds, 200); for (i = 0; i < nfds; i++) perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz); } free(tmp_buf); if (json_output) jsonw_end_array(json_wtr); for (i = 0; i < nfds; i++) { perf_event_unmap(rings[i].mem); close(rings[i].fd); } free(pfds); free(rings); close(map_fd); return 0; err_close_fds_prev: while (i--) { perf_event_unmap(rings[i].mem); err_close_fds_current: close(rings[i].fd); } free(pfds); err_free_rings: free(rings); err_close_map: close(map_fd); return -1; }
jsonw_name(json_wtr, "value"); print_hex_data_json(value, info->value_size); if (btf) { struct btf_dumper d = { .btf = btf, .jw = json_wtr, .is_plain_text = false, }; jsonw_name(json_wtr, "formatted"); do_dump_btf(&d, info, key, value); } } else { unsigned int i, n, step; n = get_possible_cpus(); step = round_up(info->value_size, 8); jsonw_name(json_wtr, "key"); print_hex_data_json(key, info->key_size); jsonw_name(json_wtr, "values"); jsonw_start_array(json_wtr); for (i = 0; i < n; i++) { jsonw_start_object(json_wtr); jsonw_int_field(json_wtr, "cpu", i); jsonw_name(json_wtr, "value"); print_hex_data_json(value + i * step, info->value_size);