Esempio n. 1
0
static int bpf_perf_event_open(int map_fd, int key, int cpu)
{
	struct perf_event_attr attr = {
		.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
		.type = PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_BPF_OUTPUT,
	};
	int pmu_fd;

	pmu_fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
	if (pmu_fd < 0) {
		p_err("failed to open perf event %d for CPU %d", key, cpu);
		return -1;
	}

	if (bpf_map_update_elem(map_fd, &key, &pmu_fd, BPF_ANY)) {
		p_err("failed to update map for event %d for CPU %d", key, cpu);
		goto err_close;
	}
	if (ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
		p_err("failed to enable event %d for CPU %d", key, cpu);
		goto err_close;
	}

	return pmu_fd;

err_close:
	close(pmu_fd);
	return -1;
}

int do_event_pipe(int argc, char **argv)
{
	int i, nfds, map_fd, index = -1, cpu = -1;
	struct bpf_map_info map_info = {};
	struct event_ring_info *rings;
	size_t tmp_buf_sz = 0;
	void *tmp_buf = NULL;
	struct pollfd *pfds;
	__u32 map_info_len;
	bool do_all = true;

	map_info_len = sizeof(map_info);
	map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
	if (map_fd < 0)
		return -1;

	if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
		p_err("map is not a perf event array");
		goto err_close_map;
	}

	while (argc) {
		if (argc < 2)
			BAD_ARG();

		if (is_prefix(*argv, "cpu")) {
			char *endptr;

			NEXT_ARG();
			cpu = strtoul(*argv, &endptr, 0);
			if (*endptr) {
				p_err("can't parse %s as CPU ID", **argv);
				goto err_close_map;
			}

			NEXT_ARG();
		} else if (is_prefix(*argv, "index")) {
			char *endptr;

			NEXT_ARG();
			index = strtoul(*argv, &endptr, 0);
			if (*endptr) {
				p_err("can't parse %s as index", **argv);
				goto err_close_map;
			}

			NEXT_ARG();
		} else {
			BAD_ARG();
		}

		do_all = false;
	}

	if (!do_all) {
		if (index == -1 || cpu == -1) {
			p_err("cpu and index must be specified together");
			goto err_close_map;
		}

		nfds = 1;
	} else {
		nfds = min(get_possible_cpus(), map_info.max_entries);
		cpu = 0;
		index = 0;
	}

	rings = calloc(nfds, sizeof(rings[0]));
	if (!rings)
		goto err_close_map;

	pfds = calloc(nfds, sizeof(pfds[0]));
	if (!pfds)
		goto err_free_rings;

	for (i = 0; i < nfds; i++) {
		rings[i].cpu = cpu + i;
		rings[i].key = index + i;

		rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key,
						  rings[i].cpu);
		if (rings[i].fd < 0)
			goto err_close_fds_prev;

		rings[i].mem = perf_event_mmap(rings[i].fd);
		if (!rings[i].mem)
			goto err_close_fds_current;

		pfds[i].fd = rings[i].fd;
		pfds[i].events = POLLIN;
	}

	signal(SIGINT, int_exit);
	signal(SIGHUP, int_exit);
	signal(SIGTERM, int_exit);

	if (json_output)
		jsonw_start_array(json_wtr);

	while (!stop) {
		poll(pfds, nfds, 200);
		for (i = 0; i < nfds; i++)
			perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz);
	}
	free(tmp_buf);

	if (json_output)
		jsonw_end_array(json_wtr);

	for (i = 0; i < nfds; i++) {
		perf_event_unmap(rings[i].mem);
		close(rings[i].fd);
	}
	free(pfds);
	free(rings);
	close(map_fd);

	return 0;

err_close_fds_prev:
	while (i--) {
		perf_event_unmap(rings[i].mem);
err_close_fds_current:
		close(rings[i].fd);
	}
	free(pfds);
err_free_rings:
	free(rings);
err_close_map:
	close(map_fd);
	return -1;
}
Esempio n. 2
0
int main(int argc, char **argv)
{
	const char *file = "test_tcpnotify_kern.o";
	int prog_fd, map_fd, perf_event_fd;
	struct tcpnotify_globals g = {0};
	const char *cg_path = "/foo";
	int error = EXIT_FAILURE;
	struct bpf_object *obj;
	int cg_fd = -1;
	__u32 key = 0;
	int rv;
	char test_script[80];
	int pmu_fd;
	cpu_set_t cpuset;

	CPU_ZERO(&cpuset);
	CPU_SET(0, &cpuset);
	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);

	if (setup_cgroup_environment())
		goto err;

	cg_fd = create_and_get_cgroup(cg_path);
	if (!cg_fd)
		goto err;

	if (join_cgroup(cg_path))
		goto err;

	if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
		printf("FAILED: load_bpf_file failed for: %s\n", file);
		goto err;
	}

	rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
	if (rv) {
		printf("FAILED: bpf_prog_attach: %d (%s)\n",
		       error, strerror(errno));
		goto err;
	}

	perf_event_fd = bpf_find_map(__func__, obj, "perf_event_map");
	if (perf_event_fd < 0)
		goto err;

	map_fd = bpf_find_map(__func__, obj, "global_map");
	if (map_fd < 0)
		goto err;

	pmu_fd = setup_bpf_perf_event(perf_event_fd);
	if (pmu_fd < 0 || perf_event_mmap(pmu_fd) < 0)
		goto err;

	pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);

	sprintf(test_script,
		"/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
		TESTPORT);
	system(test_script);

	sprintf(test_script,
		"/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
		TESTPORT);
	system(test_script);

	sprintf(test_script,
		"/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
		TESTPORT);
	system(test_script);

	rv = bpf_map_lookup_elem(map_fd, &key, &g);
	if (rv != 0) {
		printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
		goto err;
	}

	sleep(10);

	if (verify_result(&g)) {
		printf("FAILED: Wrong stats Expected %d calls, got %d\n",
			g.ncalls, rx_callbacks);
		goto err;
	}

	printf("PASSED!\n");
	error = 0;
err:
	bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
	close(cg_fd);
	cleanup_cgroup_environment();
	return error;
}