int main(int argc, char **argv) { int cgfd = -1; int err = 0; if (argc < 3) { fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]); exit(EXIT_FAILURE); } if (setup_cgroup_environment()) goto err; cgfd = create_and_get_cgroup(CGROUP_PATH); if (!cgfd) goto err; if (join_cgroup(CGROUP_PATH)) goto err; if (send_packet(argv[1])) goto err; if (check_ancestor_cgroup_ids(atoi(argv[2]))) goto err; goto out; err: err = -1; out: close(cgfd); cleanup_cgroup_environment(); printf("[%s]\n", err ? "FAIL" : "PASS"); return err; }
int main(int argc, char **argv) { struct bpf_prog_load_attr attr = { .file = "test_sock_fields_kern.o", .prog_type = BPF_PROG_TYPE_CGROUP_SKB, .expected_attach_type = BPF_CGROUP_INET_EGRESS, }; int cgroup_fd, prog_fd, err; struct bpf_object *obj; struct bpf_map *map; err = setup_cgroup_environment(); CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d", err, errno); atexit(cleanup_cgroup_environment); /* Create a cgroup, get fd, and join it */ cgroup_fd = create_and_get_cgroup(TEST_CGROUP); CHECK(cgroup_fd == -1, "create_and_get_cgroup()", "cgroup_fd:%d errno:%d", cgroup_fd, errno); err = join_cgroup(TEST_CGROUP); CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno); err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); CHECK(err, "bpf_prog_load_xattr()", "err:%d", err); err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0); CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)", "err:%d errno%d", err, errno); close(cgroup_fd); map = bpf_object__find_map_by_name(obj, "addr_map"); CHECK(!map, "cannot find addr_map", "(null)"); addr_map_fd = bpf_map__fd(map); map = bpf_object__find_map_by_name(obj, "sock_result_map"); CHECK(!map, "cannot find sock_result_map", "(null)"); sk_map_fd = bpf_map__fd(map); map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map"); CHECK(!map, "cannot find tcp_sock_result_map", "(null)"); tp_map_fd = bpf_map__fd(map); map = bpf_object__find_map_by_name(obj, "linum_map"); CHECK(!map, "cannot find linum_map", "(null)"); linum_map_fd = bpf_map__fd(map); test(); bpf_object__close(obj); cleanup_cgroup_environment(); printf("PASS\n"); return 0; }
static int test_foo_bar(void) { int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0; allow_prog = prog_load(1); if (!allow_prog) goto err; drop_prog = prog_load(0); if (!drop_prog) goto err; if (setup_cgroup_environment()) goto err; /* Create cgroup /foo, get fd, and join it */ foo = create_and_get_cgroup(FOO); if (foo < 0) goto err; if (join_cgroup(FOO)) goto err; if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to /foo"); goto err; } printf("Attached DROP prog. This ping in cgroup /foo should fail...\n"); assert(system(PING_CMD) != 0); /* Create cgroup /foo/bar, get fd, and join it */ bar = create_and_get_cgroup(BAR); if (bar < 0) goto err; if (join_cgroup(BAR)) goto err; printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to /foo/bar"); goto err; } printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching program from /foo/bar"); goto err; } printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n" "This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to /foo/bar"); goto err; } if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching program from /foo"); goto err; } printf("Attached PASS from /foo/bar and detached DROP from /foo.\n" "This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to /foo/bar"); goto err; } if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { errno = 0; log_err("Unexpected success attaching prog to /foo/bar"); goto err; } if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching program from /foo/bar"); goto err; } if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { errno = 0; log_err("Unexpected success in double detach from /foo"); goto err; } if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { log_err("Attaching non-overridable prog to /foo"); goto err; } if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { errno = 0; log_err("Unexpected success attaching non-overridable prog to /foo/bar"); goto err; } if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { errno = 0; log_err("Unexpected success attaching overridable prog to /foo/bar"); goto err; } if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { errno = 0; log_err("Unexpected success attaching overridable prog to /foo"); goto err; } if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { log_err("Attaching different non-overridable prog to /foo"); goto err; } goto out; err: rc = 1; out: close(foo); close(bar); cleanup_cgroup_environment(); if (!rc) printf("### override:PASS\n"); else printf("### override:FAIL\n"); return rc; }
static int test_multiprog(void) { __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; int drop_prog, allow_prog[6] = {}, rc = 0; unsigned long long value; int i = 0; for (i = 0; i < 6; i++) { allow_prog[i] = prog_load_cnt(1, 1 << i); if (!allow_prog[i]) goto err; } drop_prog = prog_load_cnt(0, 1); if (!drop_prog) goto err; if (setup_cgroup_environment()) goto err; cg1 = create_and_get_cgroup("/cg1"); if (cg1 < 0) goto err; cg2 = create_and_get_cgroup("/cg1/cg2"); if (cg2 < 0) goto err; cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); if (cg3 < 0) goto err; cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); if (cg4 < 0) goto err; cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); if (cg5 < 0) goto err; if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) goto err; if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI)) { log_err("Attaching prog to cg1"); goto err; } if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI)) { log_err("Unexpected success attaching the same prog to cg1"); goto err; } if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI)) { log_err("Attaching prog2 to cg1"); goto err; } if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to cg2"); goto err; } if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI)) { log_err("Attaching prog to cg3"); goto err; } if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_OVERRIDE)) { log_err("Attaching prog to cg4"); goto err; } if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) { log_err("Attaching prog to cg5"); goto err; } assert(system(PING_CMD) == 0); assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); assert(value == 1 + 2 + 8 + 32); /* query the number of effective progs in cg5 */ assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt) == 0); assert(prog_cnt == 4); /* retrieve prog_ids of effective progs in cg5 */ assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, &attach_flags, prog_ids, &prog_cnt) == 0); assert(prog_cnt == 4); assert(attach_flags == 0); saved_prog_id = prog_ids[0]; /* check enospc handling */ prog_ids[0] = 0; prog_cnt = 2; assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, &attach_flags, prog_ids, &prog_cnt) == -1 && errno == ENOSPC); assert(prog_cnt == 4); /* check that prog_ids are returned even when buffer is too small */ assert(prog_ids[0] == saved_prog_id); /* retrieve prog_id of single attached prog in cg5 */ prog_ids[0] = 0; assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, prog_ids, &prog_cnt) == 0); assert(prog_cnt == 1); assert(prog_ids[0] == saved_prog_id); /* detach bottom program and ping again */ if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching prog from cg5"); goto err; } value = 0; assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); assert(system(PING_CMD) == 0); assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); assert(value == 1 + 2 + 8 + 16); /* detach 3rd from bottom program and ping again */ errno = 0; if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) { log_err("Unexpected success on detach from cg3"); goto err; } if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching from cg3"); goto err; } value = 0; assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); assert(system(PING_CMD) == 0); assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); assert(value == 1 + 2 + 16); /* detach 2nd from bottom program and ping again */ if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching prog from cg4"); goto err; } value = 0; assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); assert(system(PING_CMD) == 0); assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); assert(value == 1 + 2 + 4); prog_cnt = 4; assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE, &attach_flags, prog_ids, &prog_cnt) == 0); assert(prog_cnt == 3); assert(attach_flags == 0); assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, prog_ids, &prog_cnt) == 0); assert(prog_cnt == 0); goto out; err: rc = 1; out: for (i = 0; i < 6; i++) if (allow_prog[i] > 0) close(allow_prog[i]); close(cg1); close(cg2); close(cg3); close(cg4); close(cg5); cleanup_cgroup_environment(); if (!rc) printf("### multi:PASS\n"); else printf("### multi:FAIL\n"); return rc; }
int main(int argc, char **argv) { struct percpu_net_cnt *percpu_netcnt; struct bpf_cgroup_storage_key key; int map_fd, percpu_map_fd; int error = EXIT_FAILURE; struct net_cnt netcnt; struct bpf_object *obj; int prog_fd, cgroup_fd; unsigned long packets; unsigned long bytes; int cpu, nproc; __u32 prog_cnt; nproc = get_nprocs_conf(); percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); if (!percpu_netcnt) { printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); goto err; } if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd)) { printf("Failed to load bpf program\n"); goto out; } if (setup_cgroup_environment()) { printf("Failed to load bpf program\n"); goto err; } /* Create a cgroup, get fd, and join it */ cgroup_fd = create_and_get_cgroup(TEST_CGROUP); if (!cgroup_fd) { printf("Failed to create test cgroup\n"); goto err; } if (join_cgroup(TEST_CGROUP)) { printf("Failed to join cgroup\n"); goto err; } /* Attach bpf program */ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { printf("Failed to attach bpf program"); goto err; } if (system("which ping6 &>/dev/null") == 0) assert(!system("ping6 localhost -c 10000 -f -q > /dev/null")); else assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null")); if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, &prog_cnt)) { printf("Failed to query attached programs"); goto err; } map_fd = bpf_find_map(__func__, obj, "netcnt"); if (map_fd < 0) { printf("Failed to find bpf map with net counters"); goto err; } percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt"); if (percpu_map_fd < 0) { printf("Failed to find bpf map with percpu net counters"); goto err; } if (bpf_map_get_next_key(map_fd, NULL, &key)) { printf("Failed to get key in cgroup storage\n"); goto err; } if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) { printf("Failed to lookup cgroup storage\n"); goto err; } if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) { printf("Failed to lookup percpu cgroup storage\n"); goto err; } /* Some packets can be still in per-cpu cache, but not more than * MAX_PERCPU_PACKETS. */ packets = netcnt.packets; bytes = netcnt.bytes; for (cpu = 0; cpu < nproc; cpu++) { if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) { printf("Unexpected percpu value: %llu\n", percpu_netcnt[cpu].packets); goto err; } packets += percpu_netcnt[cpu].packets; bytes += percpu_netcnt[cpu].bytes; } /* No packets should be lost */ if (packets != 10000) { printf("Unexpected packet count: %lu\n", packets); goto err; } /* Let's check that bytes counter matches the number of packets * multiplied by the size of ipv6 ICMP packet. */ if (bytes != packets * 104) { printf("Unexpected bytes count: %lu\n", bytes); goto err; } error = 0; printf("test_netcnt:PASS\n"); err: cleanup_cgroup_environment(); free(percpu_netcnt); out: return error; }
int main(int argc, char **argv) { const char *file = "test_tcpbpf_kern.o"; int prog_fd, map_fd, sock_map_fd; struct tcpbpf_globals g = {0}; const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; __u32 key = 0; int rv; if (setup_cgroup_environment()) goto err; cg_fd = create_and_get_cgroup(cg_path); if (cg_fd < 0) goto err; if (join_cgroup(cg_path)) goto err; if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; } rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0); if (rv) { printf("FAILED: bpf_prog_attach: %d (%s)\n", error, strerror(errno)); goto err; } if (system("./tcp_server.py")) { printf("FAILED: TCP server\n"); goto err; } map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) goto err; sock_map_fd = bpf_find_map(__func__, obj, "sockopt_results"); if (sock_map_fd < 0) goto err; rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; } if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); goto err; } if (verify_sockopt_result(sock_map_fd)) { printf("FAILED: Wrong sockopt stats\n"); goto err; } printf("PASSED!\n"); error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); close(cg_fd); cleanup_cgroup_environment(); return error; }
int main(int argc, char **argv) { pid_t remote_pid, local_pid = getpid(); int cg2, idx = 0, rc = 0; char filename[256]; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); return 1; } if (setup_cgroup_environment()) goto err; cg2 = create_and_get_cgroup(CGROUP_PATH); if (!cg2) goto err; if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { log_err("Adding target cgroup to map"); goto err; } if (join_cgroup(CGROUP_PATH)) goto err; /* * The installed helper program catched the sync call, and should * write it to the map. */ sync(); bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); if (local_pid != remote_pid) { fprintf(stderr, "BPF Helper didn't write correct PID to map, but: %d\n", remote_pid); goto err; } /* Verify the negative scenario; leave the cgroup */ if (join_cgroup("/")) goto err; remote_pid = 0; bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY); sync(); bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); if (local_pid == remote_pid) { fprintf(stderr, "BPF cgroup negative test did not work\n"); goto err; } goto out; err: rc = 1; out: close(cg2); cleanup_cgroup_environment(); return rc; }
int main(int argc, char **argv) { const char *file = "test_tcpnotify_kern.o"; int prog_fd, map_fd, perf_event_fd; struct tcpnotify_globals g = {0}; const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; __u32 key = 0; int rv; char test_script[80]; int pmu_fd; cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); if (setup_cgroup_environment()) goto err; cg_fd = create_and_get_cgroup(cg_path); if (!cg_fd) goto err; if (join_cgroup(cg_path)) goto err; if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; } rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0); if (rv) { printf("FAILED: bpf_prog_attach: %d (%s)\n", error, strerror(errno)); goto err; } perf_event_fd = bpf_find_map(__func__, obj, "perf_event_map"); if (perf_event_fd < 0) goto err; map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) goto err; pmu_fd = setup_bpf_perf_event(perf_event_fd); if (pmu_fd < 0 || perf_event_mmap(pmu_fd) < 0) goto err; pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd); sprintf(test_script, "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); sprintf(test_script, "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ", TESTPORT); system(test_script); sprintf(test_script, "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; } sleep(10); if (verify_result(&g)) { printf("FAILED: Wrong stats Expected %d calls, got %d\n", g.ncalls, rx_callbacks); goto err; } printf("PASSED!\n"); error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); close(cg_fd); cleanup_cgroup_environment(); return error; }