示例#1
0
/* Test deletion */
static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
{
	int lru_map_fd, expected_map_fd;
	unsigned long long key, value[nr_cpus];
	unsigned long long end_key;
	int next_cpu = 0;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags,
					3 * tgt_free * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
				     3 * tgt_free);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	for (key = 1; key <= 2 * tgt_free; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	key = 1;
	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));

	for (key = 1; key <= tgt_free; key++) {
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
					    BPF_NOEXIST));
	}

	for (; key <= 2 * tgt_free; key++) {
		assert(!bpf_map_delete_elem(lru_map_fd, &key));
		assert(bpf_map_delete_elem(lru_map_fd, &key));
	}

	end_key = key + 2 * tgt_free;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
示例#2
0
static int bpf_do_map(const char *file, uint32_t flags, uint32_t key,
		      uint32_t value)
{
	int fd, ret;

	if (flags & BPF_F_PIN) {
		fd = bpf_map_create();
		printf("bpf: map fd:%d (%s)\n", fd, strerror(errno));
		assert(fd > 0);

		ret = bpf_obj_pin(fd, file);
		printf("bpf: pin ret:(%d,%s)\n", ret, strerror(errno));
		assert(ret == 0);
	} else {
		fd = bpf_obj_get(file);
		printf("bpf: get fd:%d (%s)\n", fd, strerror(errno));
		assert(fd > 0);
	}

	if ((flags & BPF_F_KEY_VAL) == BPF_F_KEY_VAL) {
		ret = bpf_map_update_elem(fd, &key, &value, 0);
		printf("bpf: fd:%d u->(%u:%u) ret:(%d,%s)\n", fd, key, value,
		       ret, strerror(errno));
		assert(ret == 0);
	} else if (flags & BPF_F_KEY) {
		ret = bpf_map_lookup_elem(fd, &key, &value);
		printf("bpf: fd:%d l->(%u):%u ret:(%d,%s)\n", fd, key, value,
		       ret, strerror(errno));
		assert(ret == 0);
	}

	return 0;
}
示例#3
0
int bpf_prog1(struct pt_regs *ctx)
{
	long rq = ctx->di;
	u64 val = bpf_ktime_get_ns();

	bpf_map_update_elem(&start_ts, &rq, &val, BPF_ANY);
	return 0;
}
示例#4
0
int bpf_prog1(struct pt_regs *ctx)
{
	long rq = PT_REGS_PARM1(ctx);
	u64 val = bpf_ktime_get_ns();

	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
	return 0;
}
示例#5
0
文件: xdp.c 项目: Anjali05/linux
void test_xdp(void)
{
	struct vip key4 = {.protocol = 6, .family = AF_INET};
	struct vip key6 = {.protocol = 6, .family = AF_INET6};
	struct iptnl_info value4 = {.family = AF_INET};
	struct iptnl_info value6 = {.family = AF_INET6};
	const char *file = "./test_xdp.o";
	struct bpf_object *obj;
	char buf[128];
	struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
	__u32 duration, retval, size;
	int err, prog_fd, map_fd;

	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
	if (err) {
		error_cnt++;
		return;
	}

	map_fd = bpf_find_map(__func__, obj, "vip2tnl");
	if (map_fd < 0)
		goto out;
	bpf_map_update_elem(map_fd, &key4, &value4, 0);
	bpf_map_update_elem(map_fd, &key6, &value6, 0);

	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
				buf, &size, &retval, &duration);

	CHECK(err || retval != XDP_TX || size != 74 ||
	      iph->protocol != IPPROTO_IPIP, "ipv4",
	      "err %d errno %d retval %d size %d\n",
	      err, errno, retval, size);

	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
				buf, &size, &retval, &duration);
	CHECK(err || retval != XDP_TX || size != 114 ||
	      iph6->nexthdr != IPPROTO_IPV6, "ipv6",
	      "err %d errno %d retval %d size %d\n",
	      err, errno, retval, size);
out:
	bpf_object__close(obj);
}
示例#6
0
static void clear_stats(int fd)
{
	unsigned int nr_cpus = bpf_num_possible_cpus();
	__u64 values[nr_cpus];
	__u32 key;

	memset(values, 0, sizeof(values));
	for (key = 0; key < SLOTS; key++)
		bpf_map_update_elem(fd, &key, values, BPF_ANY);
}
示例#7
0
static int populate_prog_array(const char *event, int prog_fd)
{
	int ind = atoi(event), err;

	err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
	if (err < 0) {
		printf("failed to store prog_fd in prog_array\n");
		return -1;
	}
	return 0;
}
示例#8
0
static void populate_map(uint32_t port_key, int magic_result)
{
	int ret;

	ret = bpf_map_update_elem(PORT_A, &port_key, &magic_result, BPF_ANY);
	assert(!ret);

	ret = bpf_map_update_elem(PORT_H, &port_key, &magic_result,
				  BPF_NOEXIST);
	assert(!ret);

	ret = bpf_map_update_elem(A_OF_PORT_A, &port_key, &PORT_A, BPF_ANY);
	assert(!ret);
	check_map_id(PORT_A, A_OF_PORT_A, port_key);

	ret = bpf_map_update_elem(H_OF_PORT_A, &port_key, &PORT_A, BPF_NOEXIST);
	assert(!ret);
	check_map_id(PORT_A, H_OF_PORT_A, port_key);

	ret = bpf_map_update_elem(H_OF_PORT_H, &port_key, &PORT_H, BPF_NOEXIST);
	assert(!ret);
	check_map_id(PORT_H, H_OF_PORT_H, port_key);
}
示例#9
0
int main(int argc, char **argv)
{
	const char *pinned_file = NULL;
	int ifindex = -1;
	int array_key = 0;
	int array_fd = -1;
	int ret = -1;
	int opt;

	while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
		switch (opt) {
		/* General args */
		case 'U':
			pinned_file = optarg;
			break;
		case 'i':
			ifindex = atoi(optarg);
			break;
		default:
			usage();
			goto out;
		}
	}

	if (ifindex < 0 || !pinned_file) {
		usage();
		goto out;
	}

	array_fd = bpf_obj_get(pinned_file);
	if (array_fd < 0) {
		fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
			pinned_file, strerror(errno), errno);
		goto out;
	}

	/* bpf_tunnel_key.remote_ipv4 expects host byte orders */
	ret = bpf_map_update_elem(array_fd, &array_key, &ifindex, 0);
	if (ret) {
		perror("bpf_map_update_elem");
		goto out;
	}

out:
	if (array_fd != -1)
		close(array_fd);
	return ret;
}
示例#10
0
static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
{
	unsigned long long key, value[nr_cpus];

	/* Ensure the last key inserted by previous CPU can be found */
	assert(!bpf_map_lookup_elem(map_fd, &last_key, value));

	value[0] = 1234;

	key = last_key + 1;
	assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
	assert(!bpf_map_lookup_elem(map_fd, &key, value));

	/* Cannot find the last key because it was removed by LRU */
	assert(bpf_map_lookup_elem(map_fd, &last_key, value));
}
示例#11
0
/* Test map with only one element */
static void test_lru_sanity5(int map_type, int map_flags)
{
	unsigned long long key, value[nr_cpus];
	int next_cpu = 0;
	int map_fd;

	if (map_flags & BPF_F_NO_COMMON_LRU)
		return;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	map_fd = create_map(map_type, map_flags, 1);
	assert(map_fd != -1);

	value[0] = 1234;
	key = 0;
	assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));

	while (sched_next_online(0, &next_cpu) != -1) {
		pid_t pid;

		pid = fork();
		if (pid == 0) {
			do_test_lru_sanity5(key, map_fd);
			exit(0);
		} else if (pid == -1) {
			printf("couldn't spawn process to test key:%llu\n",
			       key);
			exit(1);
		} else {
			int status;

			assert(waitpid(pid, &status, 0) == pid);
			assert(status == 0);
			key++;
		}
	}

	close(map_fd);
	/* At least one key should be tested */
	assert(key > 0);

	printf("Pass\n");
}
示例#12
0
static int setup_bpf_perf_event(int mapfd)
{
	struct perf_event_attr attr = {
		.sample_type = PERF_SAMPLE_RAW,
		.type = PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_BPF_OUTPUT,
	};
	int key = 0;
	int pmu_fd;

	pmu_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, 0);
	if (pmu_fd < 0)
		return pmu_fd;
	bpf_map_update_elem(mapfd, &key, &pmu_fd, BPF_ANY);

	ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
	return pmu_fd;
}
示例#13
0
int bpf_prog2(struct pt_regs *ctx)
{
	long loc = 0;
	long init_val = 1;
	long *value;

	/* read ip of kfree_skb caller.
	 * non-portable version of __builtin_return_address(0)
	 */
	BPF_KPROBE_READ_RET_IP(loc, ctx);

	value = bpf_map_lookup_elem(&my_map, &loc);
	if (value)
		*value += 1;
	else
		bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY);
	return 0;
}
示例#14
0
static void verify_map(int map_id)
{
	__u32 key = 0;
	__u32 val;

	if (bpf_map_lookup_elem(map_id, &key, &val) != 0) {
		fprintf(stderr, "map_lookup failed: %s\n", strerror(errno));
		return;
	}
	if (val == 0) {
		fprintf(stderr, "failed: map #%d returns value 0\n", map_id);
		return;
	}
	val = 0;
	if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) {
		fprintf(stderr, "map_update failed: %s\n", strerror(errno));
		return;
	}
}
static int run_test(int server_fd, int results_fd)
{
	int client = -1, srv_client = -1;
	int ret = 0;
	__u32 key = 0;
	__u64 value = 0;

	if (bpf_map_update_elem(results_fd, &key, &value, 0) < 0) {
		log_err("Can't clear results");
		goto err;
	}

	client = connect_to_server(server_fd);
	if (client == -1)
		goto err;

	srv_client = accept(server_fd, NULL, 0);
	if (srv_client == -1) {
		log_err("Can't accept connection");
		goto err;
	}

	if (bpf_map_lookup_elem(results_fd, &key, &value) < 0) {
		log_err("Can't lookup result");
		goto err;
	}

	if (value != 1) {
		log_err("Didn't match syncookie: %llu", value);
		goto err;
	}

	goto out;

err:
	ret = 1;
out:
	close(client);
	close(srv_client);
	return ret;
}
示例#16
0
/* Size of the LRU map is 1.5*tgt_free
 * Insert 1 to tgt_free (+tgt_free keys)
 * Lookup 1 to tgt_free/2
 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
 */
static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
{
	unsigned long long key, end_key, value[nr_cpus];
	int lru_map_fd, expected_map_fd;
	unsigned int batch_size;
	unsigned int map_size;
	int next_cpu = 0;

	if (map_flags & BPF_F_NO_COMMON_LRU)
		/* Ther percpu lru list (i.e each cpu has its own LRU
		 * list) does not have a local free list.  Hence,
		 * it will only free old nodes till there is no free
		 * from the LRU list.  Hence, this test does not apply
		 * to BPF_F_NO_COMMON_LRU
		 */
		return;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	batch_size = tgt_free / 2;
	assert(batch_size * 2 == tgt_free);

	map_size = tgt_free + batch_size;
	lru_map_fd = create_map(map_type, map_flags, map_size);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	/* Insert 1 to tgt_free (+tgt_free keys) */
	end_key = 1 + tgt_free;
	for (key = 1; key < end_key; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Lookup 1 to tgt_free/2 */
	end_key = 1 + batch_size;
	for (key = 1; key < end_key; key++) {
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	/* Insert 1+tgt_free to 2*tgt_free
	 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
	 * removed by LRU
	 */
	key = 1 + tgt_free;
	end_key = key + tgt_free;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
					    BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
示例#17
0
int trace_sys_connect(struct pt_regs *ctx)
{
	struct sockaddr_in6 *in6;
	u16 test_case, port, dst6[8];
	int addrlen, ret, inline_ret, ret_key = 0;
	u32 port_key;
	void *outer_map, *inner_map;
	bool inline_hash = false;

	in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
	addrlen = (int)PT_REGS_PARM3(ctx);

	if (addrlen != sizeof(*in6))
		return 0;

	ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr);
	if (ret) {
		inline_ret = ret;
		goto done;
	}

	if (dst6[0] != 0xdead || dst6[1] != 0xbeef)
		return 0;

	test_case = dst6[7];

	ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port);
	if (ret) {
		inline_ret = ret;
		goto done;
	}

	port_key = port;

	ret = -ENOENT;
	if (test_case == 0) {
		outer_map = &a_of_port_a;
	} else if (test_case == 1) {
		outer_map = &h_of_port_a;
	} else if (test_case == 2) {
		outer_map = &h_of_port_h;
	} else {
		ret = __LINE__;
		inline_ret = ret;
		goto done;
	}

	inner_map = bpf_map_lookup_elem(outer_map, &port_key);
	if (!inner_map) {
		ret = __LINE__;
		inline_ret = ret;
		goto done;
	}

	ret = do_reg_lookup(inner_map, port_key);

	if (test_case == 0 || test_case == 1)
		inline_ret = do_inline_array_lookup(inner_map, port_key);
	else
		inline_ret = do_inline_hash_lookup(inner_map, port_key);

done:
	bpf_map_update_elem(&reg_result_h, &ret_key, &ret, BPF_ANY);
	bpf_map_update_elem(&inline_result_h, &ret_key, &inline_ret, BPF_ANY);

	return 0;
}
示例#18
0
static void test_bpf_obj_id(void)
{
	const __u64 array_magic_value = 0xfaceb00c;
	const __u32 array_key = 0;
	const int nr_iters = 2;
	const char *file = "./test_obj_id.o";
	const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
	const char *expected_prog_name = "test_obj_id";
	const char *expected_map_name = "test_map_id";
	const __u64 nsec_per_sec = 1000000000;

	struct bpf_object *objs[nr_iters];
	int prog_fds[nr_iters], map_fds[nr_iters];
	/* +1 to test for the info_len returned by kernel */
	struct bpf_prog_info prog_infos[nr_iters + 1];
	struct bpf_map_info map_infos[nr_iters + 1];
	/* Each prog only uses one map. +1 to test nr_map_ids
	 * returned by kernel.
	 */
	__u32 map_ids[nr_iters + 1];
	char jited_insns[128], xlated_insns[128], zeros[128];
	__u32 i, next_id, info_len, nr_id_found, duration = 0;
	struct timespec real_time_ts, boot_time_ts;
	int sysctl_fd, jit_enabled = 0, err = 0;
	__u64 array_value;
	uid_t my_uid = getuid();
	time_t now, load_time;

	sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
	if (sysctl_fd != -1) {
		char tmpc;

		if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
			jit_enabled = (tmpc != '0');
		close(sysctl_fd);
	}

	err = bpf_prog_get_fd_by_id(0);
	CHECK(err >= 0 || errno != ENOENT,
	      "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);

	err = bpf_map_get_fd_by_id(0);
	CHECK(err >= 0 || errno != ENOENT,
	      "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);

	for (i = 0; i < nr_iters; i++)
		objs[i] = NULL;

	/* Check bpf_obj_get_info_by_fd() */
	bzero(zeros, sizeof(zeros));
	for (i = 0; i < nr_iters; i++) {
		now = time(NULL);
		err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
				    &objs[i], &prog_fds[i]);
		/* test_obj_id.o is a dumb prog. It should never fail
		 * to load.
		 */
		if (err)
			error_cnt++;
		assert(!err);

		/* Insert a magic value to the map */
		map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
		assert(map_fds[i] >= 0);
		err = bpf_map_update_elem(map_fds[i], &array_key,
					  &array_magic_value, 0);
		assert(!err);

		/* Check getting map info */
		info_len = sizeof(struct bpf_map_info) * 2;
		bzero(&map_infos[i], info_len);
		err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
					     &info_len);
		if (CHECK(err ||
			  map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
			  map_infos[i].key_size != sizeof(__u32) ||
			  map_infos[i].value_size != sizeof(__u64) ||
			  map_infos[i].max_entries != 1 ||
			  map_infos[i].map_flags != 0 ||
			  info_len != sizeof(struct bpf_map_info) ||
			  strcmp((char *)map_infos[i].name, expected_map_name),
			  "get-map-info(fd)",
			  "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
			  err, errno,
			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,
			  info_len, sizeof(struct bpf_map_info),
			  map_infos[i].key_size,
			  map_infos[i].value_size,
			  map_infos[i].max_entries,
			  map_infos[i].map_flags,
			  map_infos[i].name, expected_map_name))
			goto done;

		/* Check getting prog info */
		info_len = sizeof(struct bpf_prog_info) * 2;
		bzero(&prog_infos[i], info_len);
		bzero(jited_insns, sizeof(jited_insns));
		bzero(xlated_insns, sizeof(xlated_insns));
		prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
		prog_infos[i].jited_prog_len = sizeof(jited_insns);
		prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
		prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
		prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
		prog_infos[i].nr_map_ids = 2;
		err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
		assert(!err);
		err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
		assert(!err);
		err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
					     &info_len);
		load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
			+ (prog_infos[i].load_time / nsec_per_sec);
		if (CHECK(err ||
			  prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
			  info_len != sizeof(struct bpf_prog_info) ||
			  (jit_enabled && !prog_infos[i].jited_prog_len) ||
			  (jit_enabled &&
			   !memcmp(jited_insns, zeros, sizeof(zeros))) ||
			  !prog_infos[i].xlated_prog_len ||
			  !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
			  load_time < now - 60 || load_time > now + 60 ||
			  prog_infos[i].created_by_uid != my_uid ||
			  prog_infos[i].nr_map_ids != 1 ||
			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||
			  strcmp((char *)prog_infos[i].name, expected_prog_name),
			  "get-prog-info(fd)",
			  "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
			  err, errno, i,
			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
			  info_len, sizeof(struct bpf_prog_info),
			  jit_enabled,
			  prog_infos[i].jited_prog_len,
			  prog_infos[i].xlated_prog_len,
			  !!memcmp(jited_insns, zeros, sizeof(zeros)),
			  !!memcmp(xlated_insns, zeros, sizeof(zeros)),
			  load_time, now,
			  prog_infos[i].created_by_uid, my_uid,
			  prog_infos[i].nr_map_ids, 1,
			  *(int *)prog_infos[i].map_ids, map_infos[i].id,
			  prog_infos[i].name, expected_prog_name))
			goto done;
	}

	/* Check bpf_prog_get_next_id() */
	nr_id_found = 0;
	next_id = 0;
	while (!bpf_prog_get_next_id(next_id, &next_id)) {
		struct bpf_prog_info prog_info = {};
		__u32 saved_map_id;
		int prog_fd;

		info_len = sizeof(prog_info);

		prog_fd = bpf_prog_get_fd_by_id(next_id);
		if (prog_fd < 0 && errno == ENOENT)
			/* The bpf_prog is in the dead row */
			continue;
		if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
			  "prog_fd %d next_id %d errno %d\n",
			  prog_fd, next_id, errno))
			break;

		for (i = 0; i < nr_iters; i++)
			if (prog_infos[i].id == next_id)
				break;

		if (i == nr_iters)
			continue;

		nr_id_found++;

		/* Negative test:
		 * prog_info.nr_map_ids = 1
		 * prog_info.map_ids = NULL
		 */
		prog_info.nr_map_ids = 1;
		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
		if (CHECK(!err || errno != EFAULT,
			  "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
			  err, errno, EFAULT))
			break;
		bzero(&prog_info, sizeof(prog_info));
		info_len = sizeof(prog_info);

		saved_map_id = *(int *)(prog_infos[i].map_ids);
		prog_info.map_ids = prog_infos[i].map_ids;
		prog_info.nr_map_ids = 2;
		err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
		prog_infos[i].jited_prog_insns = 0;
		prog_infos[i].xlated_prog_insns = 0;
		CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
		      memcmp(&prog_info, &prog_infos[i], info_len) ||
		      *(int *)prog_info.map_ids != saved_map_id,
		      "get-prog-info(next_id->fd)",
		      "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
		      err, errno, info_len, sizeof(struct bpf_prog_info),
		      memcmp(&prog_info, &prog_infos[i], info_len),
		      *(int *)prog_info.map_ids, saved_map_id);
		close(prog_fd);
	}
	CHECK(nr_id_found != nr_iters,
	      "check total prog id found by get_next_id",
	      "nr_id_found %u(%u)\n",
	      nr_id_found, nr_iters);

	/* Check bpf_map_get_next_id() */
	nr_id_found = 0;
	next_id = 0;
	while (!bpf_map_get_next_id(next_id, &next_id)) {
		struct bpf_map_info map_info = {};
		int map_fd;

		info_len = sizeof(map_info);

		map_fd = bpf_map_get_fd_by_id(next_id);
		if (map_fd < 0 && errno == ENOENT)
			/* The bpf_map is in the dead row */
			continue;
		if (CHECK(map_fd < 0, "get-map-fd(next_id)",
			  "map_fd %d next_id %u errno %d\n",
			  map_fd, next_id, errno))
			break;

		for (i = 0; i < nr_iters; i++)
			if (map_infos[i].id == next_id)
				break;

		if (i == nr_iters)
			continue;

		nr_id_found++;

		err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
		assert(!err);

		err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
		CHECK(err || info_len != sizeof(struct bpf_map_info) ||
		      memcmp(&map_info, &map_infos[i], info_len) ||
		      array_value != array_magic_value,
		      "check get-map-info(next_id->fd)",
		      "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
		      err, errno, info_len, sizeof(struct bpf_map_info),
		      memcmp(&map_info, &map_infos[i], info_len),
		      array_value, array_magic_value);

		close(map_fd);
	}
	CHECK(nr_id_found != nr_iters,
	      "check total map id found by get_next_id",
	      "nr_id_found %u(%u)\n",
	      nr_id_found, nr_iters);

done:
	for (i = 0; i < nr_iters; i++)
		bpf_object__close(objs[i]);
}
示例#19
0
static void test_xdp(void)
{
	struct vip key4 = {.protocol = 6, .family = AF_INET};
	struct vip key6 = {.protocol = 6, .family = AF_INET6};
	struct iptnl_info value4 = {.family = AF_INET};
	struct iptnl_info value6 = {.family = AF_INET6};
	const char *file = "./test_xdp.o";
	struct bpf_object *obj;
	char buf[128];
	struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
	__u32 duration, retval, size;
	int err, prog_fd, map_fd;

	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
	if (err) {
		error_cnt++;
		return;
	}

	map_fd = bpf_find_map(__func__, obj, "vip2tnl");
	if (map_fd < 0)
		goto out;
	bpf_map_update_elem(map_fd, &key4, &value4, 0);
	bpf_map_update_elem(map_fd, &key6, &value6, 0);

	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
				buf, &size, &retval, &duration);

	CHECK(err || errno || retval != XDP_TX || size != 74 ||
	      iph->protocol != IPPROTO_IPIP, "ipv4",
	      "err %d errno %d retval %d size %d\n",
	      err, errno, retval, size);

	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
				buf, &size, &retval, &duration);
	CHECK(err || errno || retval != XDP_TX || size != 114 ||
	      iph6->nexthdr != IPPROTO_IPV6, "ipv6",
	      "err %d errno %d retval %d size %d\n",
	      err, errno, retval, size);
out:
	bpf_object__close(obj);
}

#define MAGIC_VAL 0x1234
#define NUM_ITER 100000
#define VIP_NUM 5

static void test_l4lb(void)
{
	unsigned int nr_cpus = bpf_num_possible_cpus();
	const char *file = "./test_l4lb.o";
	struct vip key = {.protocol = 6};
	struct vip_meta {
		__u32 flags;
		__u32 vip_num;
	} value = {.vip_num = VIP_NUM};
	__u32 stats_key = VIP_NUM;
	struct vip_stats {
		__u64 bytes;
		__u64 pkts;
	} stats[nr_cpus];
	struct real_definition {
		union {
			__be32 dst;
			__be32 dstv6[4];
		};
		__u8 flags;
	} real_def = {.dst = MAGIC_VAL};
	__u32 ch_key = 11, real_num = 3;
	__u32 duration, retval, size;
	int err, i, prog_fd, map_fd;
	__u64 bytes = 0, pkts = 0;
	struct bpf_object *obj;
	char buf[128];
	u32 *magic = (u32 *)buf;

	err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
	if (err) {
		error_cnt++;
		return;
	}

	map_fd = bpf_find_map(__func__, obj, "vip_map");
	if (map_fd < 0)
		goto out;
	bpf_map_update_elem(map_fd, &key, &value, 0);

	map_fd = bpf_find_map(__func__, obj, "ch_rings");
	if (map_fd < 0)
		goto out;
	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);

	map_fd = bpf_find_map(__func__, obj, "reals");
	if (map_fd < 0)
		goto out;
	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);

	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
				buf, &size, &retval, &duration);
	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
	      *magic != MAGIC_VAL, "ipv4",
	      "err %d errno %d retval %d size %d magic %x\n",
	      err, errno, retval, size, *magic);

	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
				buf, &size, &retval, &duration);
	CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
	      *magic != MAGIC_VAL, "ipv6",
	      "err %d errno %d retval %d size %d magic %x\n",
	      err, errno, retval, size, *magic);

	map_fd = bpf_find_map(__func__, obj, "stats");
	if (map_fd < 0)
		goto out;
	bpf_map_lookup_elem(map_fd, &stats_key, stats);
	for (i = 0; i < nr_cpus; i++) {
		bytes += stats[i].bytes;
		pkts += stats[i].pkts;
	}
	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
		error_cnt++;
		printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
	}
out:
	bpf_object__close(obj);
}
示例#20
0
static int bpf_pipeline_setdel_rules(struct net_mat_rule *rule, bool set_rule)
{
	__u8 *key, *value, *tcache;
	int err;
	unsigned int i;

	/* Assert on these because we expect matchlib should never pass us
	 * ill-formed rules. Added assert in the interim because I'm using
	 * this to fuzz-test the middle layer and want hard errors in lower
	 * layer when something goes wrong.
	 */
	assert(rule->table_id < BPF_MATCH_MAX_TABLES);

	/* BPF backend uses a linear reference scheme where uid maps
	 * 1:1 with hw_rule_ids. So when the user ids greater than
	 * the map size drop them.
	 *
	 * Should middle layer catch this?
	 */
	printf("%s: rule %i max %i set:%s\n", __func__, rule->uid, table_aux[rule->table_id].max_elem, set_rule ? "yes" : "no");
	assert(rule->uid < table_aux[rule->table_id].max_elem);
	/*
	if (uid  > table_aux[rule.table_id].max_elem) {
		MAT_LOG(DEBUG, "set_rule rule uid greater than table size!\n";
		return -EINVAL;
	}
	*/

	key = calloc(1, table_aux[rule->table_id].size_key);
	if (!key)
		return -ENOMEM;

	tcache = table_cache[rule->table_id];

	/* deleting a rule is just an update with a zero'd value
	 * so we only setup 'value' on set_rules.
	 */ 
	if (set_rule) {
		value = calloc(1, table_aux[rule->table_id].size_value);
		if (!value) {
			free(key);
			return -ENOMEM;
		}

		err = bpf_match_to_key(rule->table_id, rule->matches, key);
		if (err)
			return err;

		err = bpf_match_to_value(rule->actions, value);
		if (err)
			return err;

		bpf_map_update_elem((__u32)table_fds[rule->table_id],
				    (__u8 *) key,
				    value);

		memcpy(&tcache[rule->uid], key, table_aux[rule->table_id].size_key);
	} else {
		key = (__u8 *)&tcache[rule->uid];

		bpf_map_delete_elem((__u32)table_fds[rule->table_id],
				    (__u8 *) key);
	}

	printf("%s: using key: ", __func__);
	for (i = 0; i < table_aux[rule->table_id].size_key; i++)
		printf("%02x\n", key[i]);
	printf("\n");

	if (!set_rule)
		memset(&tcache[rule->uid], 0, table_aux[rule->table_id].size_key);

	return 0;
}
示例#21
0
int main(int argc, char **argv)
{
	unsigned char opt_flags[256] = {};
	unsigned int kill_after_s = 0;
	const char *optstr = "i:a:p:s:d:m:T:P:SNh";
	int min_port = 0, max_port = 0;
	struct iptnl_info tnl = {};
	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
	struct vip vip = {};
	char filename[256];
	int opt;
	int i;

	tnl.family = AF_UNSPEC;
	vip.protocol = IPPROTO_TCP;

	for (i = 0; i < strlen(optstr); i++)
		if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z')
			opt_flags[(unsigned char)optstr[i]] = 1;

	while ((opt = getopt(argc, argv, optstr)) != -1) {
		unsigned short family;
		unsigned int *v6;

		switch (opt) {
		case 'i':
			ifindex = atoi(optarg);
			break;
		case 'a':
			vip.family = parse_ipstr(optarg, vip.daddr.v6);
			if (vip.family == AF_UNSPEC)
				return 1;
			break;
		case 'p':
			if (parse_ports(optarg, &min_port, &max_port))
				return 1;
			break;
		case 'P':
			vip.protocol = atoi(optarg);
			break;
		case 's':
		case 'd':
			if (opt == 's')
				v6 = tnl.saddr.v6;
			else
				v6 = tnl.daddr.v6;

			family = parse_ipstr(optarg, v6);
			if (family == AF_UNSPEC)
				return 1;
			if (tnl.family == AF_UNSPEC) {
				tnl.family = family;
			} else if (tnl.family != family) {
				fprintf(stderr,
					"The IP version of the src and dst addresses used in the IP encapsulation does not match\n");
				return 1;
			}
			break;
		case 'm':
			if (!ether_aton_r(optarg,
					  (struct ether_addr *)tnl.dmac)) {
				fprintf(stderr, "Invalid mac address:%s\n",
					optarg);
				return 1;
			}
			break;
		case 'T':
			kill_after_s = atoi(optarg);
			break;
		case 'S':
			xdp_flags |= XDP_FLAGS_SKB_MODE;
			break;
		case 'N':
			xdp_flags |= XDP_FLAGS_DRV_MODE;
			break;
		default:
			usage(argv[0]);
			return 1;
		}
		opt_flags[opt] = 0;
	}

	for (i = 0; i < strlen(optstr); i++) {
		if (opt_flags[(unsigned int)optstr[i]]) {
			fprintf(stderr, "Missing argument -%c\n", optstr[i]);
			usage(argv[0]);
			return 1;
		}
	}

	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
		perror("setrlimit(RLIMIT_MEMLOCK, RLIM_INFINITY)");
		return 1;
	}

	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);

	if (load_bpf_file(filename)) {
		printf("%s", bpf_log_buf);
		return 1;
	}

	if (!prog_fd[0]) {
		printf("load_bpf_file: %s\n", strerror(errno));
		return 1;
	}

	signal(SIGINT, int_exit);
	signal(SIGTERM, int_exit);

	while (min_port <= max_port) {
		vip.dport = htons(min_port++);
		if (bpf_map_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
			perror("bpf_map_update_elem(&vip2tnl)");
			return 1;
		}
	}

	if (bpf_set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
		printf("link set xdp fd failed\n");
		return 1;
	}

	poll_stats(kill_after_s);

	bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);

	return 0;
}
示例#22
0
int bpf_testcb(struct bpf_sock_ops *skops)
{
	int rv = -1;
	int op;

	op = (int) skops->op;

	if (bpf_ntohl(skops->remote_port) != TESTPORT) {
		skops->reply = -1;
		return 0;
	}

	switch (op) {
	case BPF_SOCK_OPS_TIMEOUT_INIT:
	case BPF_SOCK_OPS_RWND_INIT:
	case BPF_SOCK_OPS_NEEDS_ECN:
	case BPF_SOCK_OPS_BASE_RTT:
	case BPF_SOCK_OPS_RTO_CB:
		rv = 1;
		break;

	case BPF_SOCK_OPS_TCP_CONNECT_CB:
	case BPF_SOCK_OPS_TCP_LISTEN_CB:
	case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
	case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
		bpf_sock_ops_cb_flags_set(skops, (BPF_SOCK_OPS_RETRANS_CB_FLAG|
					  BPF_SOCK_OPS_RTO_CB_FLAG));
		rv = 1;
		break;
	case BPF_SOCK_OPS_RETRANS_CB: {
			__u32 key = 0;
			struct tcpnotify_globals g, *gp;
			struct tcp_notifier msg = {
				.type = 0xde,
				.subtype = 0xad,
				.source = 0xbe,
				.hash = 0xef,
			};

			rv = 1;

			/* Update results */
			gp = bpf_map_lookup_elem(&global_map, &key);
			if (!gp)
				break;
			g = *gp;
			g.total_retrans = skops->total_retrans;
			g.ncalls++;
			bpf_map_update_elem(&global_map, &key, &g,
					    BPF_ANY);
			bpf_perf_event_output(skops, &perf_event_map,
					      BPF_F_CURRENT_CPU,
					      &msg, sizeof(msg));
		}
		break;
	default:
		rv = -1;
	}
	skops->reply = rv;
	return 1;
}
char _license[] SEC("license") = "GPL";
示例#23
0
/* Size of the LRU amp is 2
 * Add key=1 (+1 key)
 * Add key=2 (+1 key)
 * Lookup Key=1
 * Add Key=3
 *   => Key=2 will be removed by LRU
 * Iterate map.  Only found key=1 and key=3
 */
static void test_lru_sanity0(int map_type, int map_flags)
{
	unsigned long long key, value[nr_cpus];
	int lru_map_fd, expected_map_fd;
	int next_cpu = 0;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, 2);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	/* insert key=1 element */

	key = 1;
	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				    BPF_NOEXIST));

	/* BPF_NOEXIST means: add new element if it doesn't exist */
	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
	       /* key=1 already exists */
	       && errno == EEXIST);

	assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
	       errno == EINVAL);

	/* insert key=2 element */

	/* check that key=2 is not found */
	key = 2;
	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
	       errno == ENOENT);

	/* BPF_EXIST means: update existing element */
	assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
	       /* key=2 is not there */
	       errno == ENOENT);

	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));

	/* insert key=3 element */

	/* check that key=3 is not found */
	key = 3;
	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
	       errno == ENOENT);

	/* check that key=1 can be found and mark the ref bit to
	 * stop LRU from removing key=1
	 */
	key = 1;
	assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
	assert(value[0] == 1234);

	key = 3;
	assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
	assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				    BPF_NOEXIST));

	/* key=2 has been removed from the LRU */
	key = 2;
	assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1);

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
示例#24
0
int main(int argc, char **argv)
{
	const char *optstr = "SN";
	char filename[256];
	int ret, opt, key = 0;

	while ((opt = getopt(argc, argv, optstr)) != -1) {
		switch (opt) {
		case 'S':
			xdp_flags |= XDP_FLAGS_SKB_MODE;
			break;
		case 'N':
			xdp_flags |= XDP_FLAGS_DRV_MODE;
			break;
		default:
			usage(basename(argv[0]));
			return 1;
		}
	}

	if (optind == argc) {
		printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]);
		return 1;
	}

	ifindex_in = strtoul(argv[optind], NULL, 0);
	ifindex_out = strtoul(argv[optind + 1], NULL, 0);
	printf("input: %d output: %d\n", ifindex_in, ifindex_out);

	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);

	if (load_bpf_file(filename)) {
		printf("%s", bpf_log_buf);
		return 1;
	}

	if (!prog_fd[0]) {
		printf("load_bpf_file: %s\n", strerror(errno));
		return 1;
	}

	if (set_link_xdp_fd(ifindex_in, prog_fd[0], xdp_flags) < 0) {
		printf("ERROR: link set xdp fd failed on %d\n", ifindex_in);
		return 1;
	}

	/* Loading dummy XDP prog on out-device */
	if (set_link_xdp_fd(ifindex_out, prog_fd[1],
			    (xdp_flags | XDP_FLAGS_UPDATE_IF_NOEXIST)) < 0) {
		printf("WARN: link set xdp fd failed on %d\n", ifindex_out);
		ifindex_out_xdp_dummy_attached = false;
	}

	signal(SIGINT, int_exit);
	signal(SIGTERM, int_exit);

	printf("map[0] (vports) = %i, map[1] (map) = %i, map[2] (count) = %i\n",
		map_fd[0], map_fd[1], map_fd[2]);

	/* populate virtual to physical port map */
	ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0);
	if (ret) {
		perror("bpf_update_elem");
		goto out;
	}

	poll_stats(2, ifindex_out);

out:
	return 0;
}
示例#25
0
static int bpf_perf_event_open(int map_fd, int key, int cpu)
{
	struct perf_event_attr attr = {
		.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
		.type = PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_BPF_OUTPUT,
	};
	int pmu_fd;

	pmu_fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
	if (pmu_fd < 0) {
		p_err("failed to open perf event %d for CPU %d", key, cpu);
		return -1;
	}

	if (bpf_map_update_elem(map_fd, &key, &pmu_fd, BPF_ANY)) {
		p_err("failed to update map for event %d for CPU %d", key, cpu);
		goto err_close;
	}
	if (ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
		p_err("failed to enable event %d for CPU %d", key, cpu);
		goto err_close;
	}

	return pmu_fd;

err_close:
	close(pmu_fd);
	return -1;
}

int do_event_pipe(int argc, char **argv)
{
	int i, nfds, map_fd, index = -1, cpu = -1;
	struct bpf_map_info map_info = {};
	struct event_ring_info *rings;
	size_t tmp_buf_sz = 0;
	void *tmp_buf = NULL;
	struct pollfd *pfds;
	__u32 map_info_len;
	bool do_all = true;

	map_info_len = sizeof(map_info);
	map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
	if (map_fd < 0)
		return -1;

	if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
		p_err("map is not a perf event array");
		goto err_close_map;
	}

	while (argc) {
		if (argc < 2)
			BAD_ARG();

		if (is_prefix(*argv, "cpu")) {
			char *endptr;

			NEXT_ARG();
			cpu = strtoul(*argv, &endptr, 0);
			if (*endptr) {
				p_err("can't parse %s as CPU ID", **argv);
				goto err_close_map;
			}

			NEXT_ARG();
		} else if (is_prefix(*argv, "index")) {
			char *endptr;

			NEXT_ARG();
			index = strtoul(*argv, &endptr, 0);
			if (*endptr) {
				p_err("can't parse %s as index", **argv);
				goto err_close_map;
			}

			NEXT_ARG();
		} else {
			BAD_ARG();
		}

		do_all = false;
	}

	if (!do_all) {
		if (index == -1 || cpu == -1) {
			p_err("cpu and index must be specified together");
			goto err_close_map;
		}

		nfds = 1;
	} else {
		nfds = min(get_possible_cpus(), map_info.max_entries);
		cpu = 0;
		index = 0;
	}

	rings = calloc(nfds, sizeof(rings[0]));
	if (!rings)
		goto err_close_map;

	pfds = calloc(nfds, sizeof(pfds[0]));
	if (!pfds)
		goto err_free_rings;

	for (i = 0; i < nfds; i++) {
		rings[i].cpu = cpu + i;
		rings[i].key = index + i;

		rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key,
						  rings[i].cpu);
		if (rings[i].fd < 0)
			goto err_close_fds_prev;

		rings[i].mem = perf_event_mmap(rings[i].fd);
		if (!rings[i].mem)
			goto err_close_fds_current;

		pfds[i].fd = rings[i].fd;
		pfds[i].events = POLLIN;
	}

	signal(SIGINT, int_exit);
	signal(SIGHUP, int_exit);
	signal(SIGTERM, int_exit);

	if (json_output)
		jsonw_start_array(json_wtr);

	while (!stop) {
		poll(pfds, nfds, 200);
		for (i = 0; i < nfds; i++)
			perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz);
	}
	free(tmp_buf);

	if (json_output)
		jsonw_end_array(json_wtr);

	for (i = 0; i < nfds; i++) {
		perf_event_unmap(rings[i].mem);
		close(rings[i].fd);
	}
	free(pfds);
	free(rings);
	close(map_fd);

	return 0;

err_close_fds_prev:
	while (i--) {
		perf_event_unmap(rings[i].mem);
err_close_fds_current:
		close(rings[i].fd);
	}
	free(pfds);
err_free_rings:
	free(rings);
err_close_map:
	close(map_fd);
	return -1;
}
示例#26
0
/* Size of the LRU map is 2*tgt_free
 * It is to test the active/inactive list rotation
 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
 * Lookup key 1 to tgt_free*3/2
 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
 *  => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
 */
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
{
	unsigned long long key, end_key, value[nr_cpus];
	int lru_map_fd, expected_map_fd;
	unsigned int batch_size;
	unsigned int map_size;
	int next_cpu = 0;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	batch_size = tgt_free / 2;
	assert(batch_size * 2 == tgt_free);

	map_size = tgt_free * 2;
	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags,
					map_size * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, map_size);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
	end_key = 1 + (2 * tgt_free);
	for (key = 1; key < end_key; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Lookup key 1 to tgt_free*3/2 */
	end_key = tgt_free + batch_size;
	for (key = 1; key < end_key; key++) {
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	/* Add 1+2*tgt_free to tgt_free*5/2
	 * (+tgt_free/2 keys)
	 */
	key = 2 * tgt_free + 1;
	end_key = key + batch_size;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}
	tmp = *a;
	*a = *b;
	*b = tmp;
}

static int create_cpu_entry(__u32 cpu, __u32 queue_size,
			    __u32 avail_idx, bool new)
{
	__u32 curr_cpus_count = 0;
	__u32 key = 0;
	int ret;

	/* Add a CPU entry to cpumap, as this allocate a cpu entry in
	 * the kernel for the cpu.
	 */
	ret = bpf_map_update_elem(map_fd[0], &cpu, &queue_size, 0);
	if (ret) {
		fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
		exit(EXIT_FAIL_BPF);
	}

	/* Inform bpf_prog's that a new CPU is available to select
	 * from via some control maps.
	 */
	/* map_fd[5] = cpus_available */
	ret = bpf_map_update_elem(map_fd[5], &avail_idx, &cpu, 0);
	if (ret) {
		fprintf(stderr, "Add to avail CPUs failed\n");
		exit(EXIT_FAIL_BPF);
	}
示例#28
0
static void test(void)
{
	int listen_fd, cli_fd, accept_fd, epfd, err;
	struct epoll_event ev;
	socklen_t addrlen;

	addrlen = sizeof(struct sockaddr_in6);
	ev.events = EPOLLIN;

	epfd = epoll_create(1);
	CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);

	/* Prepare listen_fd */
	listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
	CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
	      listen_fd, errno);

	init_loopback6(&srv_sa6);
	err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
	CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);

	err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
	CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);

	err = listen(listen_fd, 1);
	CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);

	/* Prepare cli_fd */
	cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
	CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);

	init_loopback6(&cli_sa6);
	err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
	CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);

	err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
	CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
	      err, errno);

	/* Update addr_map with srv_sa6 and cli_sa6 */
	err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
	CHECK(err, "map_update", "err:%d errno:%d", err, errno);

	err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
	CHECK(err, "map_update", "err:%d errno:%d", err, errno);

	/* Connect from cli_sa6 to srv_sa6 */
	err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
	printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
	       ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
	CHECK(err && errno != EINPROGRESS,
	      "connect(cli_fd)", "err:%d errno:%d", err, errno);

	ev.data.fd = listen_fd;
	err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
	CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
	      err, errno);

	/* Accept the connection */
	/* Have some timeout in accept(listen_fd). Just in case. */
	err = epoll_wait(epfd, &ev, 1, 1000);
	CHECK(err != 1 || ev.data.fd != listen_fd,
	      "epoll_wait(listen_fd)",
	      "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
	      err, errno, ev.data.fd, listen_fd);

	accept_fd = accept(listen_fd, NULL, NULL);
	CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
	      accept_fd, errno);
	close(listen_fd);

	/* Send some data from accept_fd to cli_fd */
	err = send(accept_fd, DATA, DATA_LEN, 0);
	CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
	      err, errno);

	/* Have some timeout in recv(cli_fd). Just in case. */
	ev.data.fd = cli_fd;
	err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
	CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
	      err, errno);

	err = epoll_wait(epfd, &ev, 1, 1000);
	CHECK(err != 1 || ev.data.fd != cli_fd,
	      "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
	      err, errno, ev.data.fd, cli_fd);

	err = recv(cli_fd, NULL, 0, MSG_TRUNC);
	CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);

	close(epfd);
	close(accept_fd);
	close(cli_fd);

	check_result();
}
示例#29
0
static int test_multiprog(void)
{
	__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
	int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
	int drop_prog, allow_prog[6] = {}, rc = 0;
	unsigned long long value;
	int i = 0;

	for (i = 0; i < 6; i++) {
		allow_prog[i] = prog_load_cnt(1, 1 << i);
		if (!allow_prog[i])
			goto err;
	}
	drop_prog = prog_load_cnt(0, 1);
	if (!drop_prog)
		goto err;

	if (setup_cgroup_environment())
		goto err;

	cg1 = create_and_get_cgroup("/cg1");
	if (cg1 < 0)
		goto err;
	cg2 = create_and_get_cgroup("/cg1/cg2");
	if (cg2 < 0)
		goto err;
	cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
	if (cg3 < 0)
		goto err;
	cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
	if (cg4 < 0)
		goto err;
	cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
	if (cg5 < 0)
		goto err;

	if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
		goto err;

	if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
			    BPF_F_ALLOW_MULTI)) {
		log_err("Attaching prog to cg1");
		goto err;
	}
	if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
			     BPF_F_ALLOW_MULTI)) {
		log_err("Unexpected success attaching the same prog to cg1");
		goto err;
	}
	if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
			    BPF_F_ALLOW_MULTI)) {
		log_err("Attaching prog2 to cg1");
		goto err;
	}
	if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
			    BPF_F_ALLOW_OVERRIDE)) {
		log_err("Attaching prog to cg2");
		goto err;
	}
	if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
			    BPF_F_ALLOW_MULTI)) {
		log_err("Attaching prog to cg3");
		goto err;
	}
	if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
			    BPF_F_ALLOW_OVERRIDE)) {
		log_err("Attaching prog to cg4");
		goto err;
	}
	if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) {
		log_err("Attaching prog to cg5");
		goto err;
	}
	assert(system(PING_CMD) == 0);
	assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
	assert(value == 1 + 2 + 8 + 32);

	/* query the number of effective progs in cg5 */
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
			      NULL, NULL, &prog_cnt) == 0);
	assert(prog_cnt == 4);
	/* retrieve prog_ids of effective progs in cg5 */
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
			      &attach_flags, prog_ids, &prog_cnt) == 0);
	assert(prog_cnt == 4);
	assert(attach_flags == 0);
	saved_prog_id = prog_ids[0];
	/* check enospc handling */
	prog_ids[0] = 0;
	prog_cnt = 2;
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
			      &attach_flags, prog_ids, &prog_cnt) == -1 &&
	       errno == ENOSPC);
	assert(prog_cnt == 4);
	/* check that prog_ids are returned even when buffer is too small */
	assert(prog_ids[0] == saved_prog_id);
	/* retrieve prog_id of single attached prog in cg5 */
	prog_ids[0] = 0;
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
			      NULL, prog_ids, &prog_cnt) == 0);
	assert(prog_cnt == 1);
	assert(prog_ids[0] == saved_prog_id);

	/* detach bottom program and ping again */
	if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) {
		log_err("Detaching prog from cg5");
		goto err;
	}
	value = 0;
	assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
	assert(system(PING_CMD) == 0);
	assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
	assert(value == 1 + 2 + 8 + 16);

	/* detach 3rd from bottom program and ping again */
	errno = 0;
	if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) {
		log_err("Unexpected success on detach from cg3");
		goto err;
	}
	if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) {
		log_err("Detaching from cg3");
		goto err;
	}
	value = 0;
	assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
	assert(system(PING_CMD) == 0);
	assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
	assert(value == 1 + 2 + 16);

	/* detach 2nd from bottom program and ping again */
	if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) {
		log_err("Detaching prog from cg4");
		goto err;
	}
	value = 0;
	assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
	assert(system(PING_CMD) == 0);
	assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
	assert(value == 1 + 2 + 4);

	prog_cnt = 4;
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
			      &attach_flags, prog_ids, &prog_cnt) == 0);
	assert(prog_cnt == 3);
	assert(attach_flags == 0);
	assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
			      NULL, prog_ids, &prog_cnt) == 0);
	assert(prog_cnt == 0);
	goto out;
err:
	rc = 1;

out:
	for (i = 0; i < 6; i++)
		if (allow_prog[i] > 0)
			close(allow_prog[i]);
	close(cg1);
	close(cg2);
	close(cg3);
	close(cg4);
	close(cg5);
	cleanup_cgroup_environment();
	if (!rc)
		printf("### multi:PASS\n");
	else
		printf("### multi:FAIL\n");
	return rc;
}
示例#30
0
/* Size of the LRU map 1.5 * tgt_free
 * Insert 1 to tgt_free (+tgt_free keys)
 * Update 1 to tgt_free/2
 *   => The original 1 to tgt_free/2 will be removed due to
 *      the LRU shrink process
 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
 * Insert 1+tgt_free to tgt_free*3/2
 * Insert 1+tgt_free*3/2 to tgt_free*5/2
 *   => Key 1+tgt_free to tgt_free*3/2
 *      will be removed from LRU because it has never
 *      been lookup and ref bit is not set
 */
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
	unsigned long long key, value[nr_cpus];
	unsigned long long end_key;
	int lru_map_fd, expected_map_fd;
	unsigned int batch_size;
	unsigned int map_size;
	int next_cpu = 0;

	if (map_flags & BPF_F_NO_COMMON_LRU)
		/* Ther percpu lru list (i.e each cpu has its own LRU
		 * list) does not have a local free list.  Hence,
		 * it will only free old nodes till there is no free
		 * from the LRU list.  Hence, this test does not apply
		 * to BPF_F_NO_COMMON_LRU
		 */
		return;

	printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
	       map_flags);

	assert(sched_next_online(0, &next_cpu) != -1);

	batch_size = tgt_free / 2;
	assert(batch_size * 2 == tgt_free);

	map_size = tgt_free + batch_size;
	if (map_flags & BPF_F_NO_COMMON_LRU)
		lru_map_fd = create_map(map_type, map_flags,
					map_size * nr_cpus);
	else
		lru_map_fd = create_map(map_type, map_flags, map_size);
	assert(lru_map_fd != -1);

	expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
	assert(expected_map_fd != -1);

	value[0] = 1234;

	/* Insert 1 to tgt_free (+tgt_free keys) */
	end_key = 1 + tgt_free;
	for (key = 1; key < end_key; key++)
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Any bpf_map_update_elem will require to acquire a new node
	 * from LRU first.
	 *
	 * The local list is running out of free nodes.
	 * It gets from the global LRU list which tries to
	 * shrink the inactive list to get tgt_free
	 * number of free nodes.
	 *
	 * Hence, the oldest key 1 to tgt_free/2
	 * are removed from the LRU list.
	 */
	key = 1;
	if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_delete_elem(lru_map_fd, &key));
	} else {
		assert(bpf_map_update_elem(lru_map_fd, &key, value,
					   BPF_EXIST));
	}

	/* Re-insert 1 to tgt_free/2 again and do a lookup
	 * immeidately.
	 */
	end_key = 1 + batch_size;
	value[0] = 4321;
	for (key = 1; key < end_key; key++) {
		assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
		assert(value[0] == 4321);
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	value[0] = 1234;

	/* Insert 1+tgt_free to tgt_free*3/2 */
	end_key = 1 + tgt_free + batch_size;
	for (key = 1 + tgt_free; key < end_key; key++)
		/* These newly added but not referenced keys will be
		 * gone during the next LRU shrink.
		 */
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));

	/* Insert 1+tgt_free*3/2 to  tgt_free*5/2 */
	end_key = key + tgt_free;
	for (; key < end_key; key++) {
		assert(!bpf_map_update_elem(lru_map_fd, &key, value,
					    BPF_NOEXIST));
		assert(!bpf_map_update_elem(expected_map_fd, &key, value,
				            BPF_NOEXIST));
	}

	assert(map_equal(lru_map_fd, expected_map_fd));

	close(expected_map_fd);
	close(lru_map_fd);

	printf("Pass\n");
}