static void test_pkt_access(void) { const char *file = "./test_pkt_access.o"; struct bpf_object *obj; __u32 duration, retval; int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (err) { error_cnt++; return; } err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); CHECK(err || errno || retval, "ipv4", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), NULL, NULL, &retval, &duration); CHECK(err || errno || retval, "ipv6", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration); bpf_object__close(obj); }
static int prog_load(int map_fd, int verdict) { struct bpf_insn prog[] = { BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */ /* Count packets */ BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ /* Count bytes */ BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ BPF_LD_MAP_FD(BPF_REG_1, map_fd), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB, prog, sizeof(prog), "GPL", 0); }
int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int prog_len, const char *license, unsigned kern_version, char *log_buf, unsigned log_buf_size) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.prog_type = prog_type; attr.insns = ptr_to_u64((void *) insns); attr.insn_cnt = prog_len / sizeof(struct bpf_insn); attr.license = ptr_to_u64((void *) license); attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_size; attr.log_level = log_buf ? 1 : 0; attr.kern_version = kern_version; if (log_buf) log_buf[0] = 0; int ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); if (ret < 0 && !log_buf) { // caller did not specify log_buf but failure should be printed, // so call recursively and print the result to stderr bpf_prog_load(prog_type, insns, prog_len, license, kern_version, bpf_log_buf, LOG_BUF_SIZE); fprintf(stderr, "bpf: %s\n%s\n", strerror(errno), bpf_log_buf); } return ret; }
static void test_tcp_estats(void) { const char *file = "./test_tcp_estats.o"; int err, prog_fd; struct bpf_object *obj; __u32 duration = 0; err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); CHECK(err, "", "err %d errno %d\n", err, errno); if (err) { error_cnt++; return; } bpf_object__close(obj); }
int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int prog_len, const char *license, unsigned kern_version, char *log_buf, unsigned log_buf_size) { union bpf_attr attr; memset(&attr, 0, sizeof(attr)); attr.prog_type = prog_type; attr.insns = ptr_to_u64((void *) insns); attr.insn_cnt = prog_len / sizeof(struct bpf_insn); attr.license = ptr_to_u64((void *) license); attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_size; attr.log_level = log_buf ? 1 : 0; attr.kern_version = kern_version; if (log_buf) log_buf[0] = 0; int ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); if (ret < 0 && errno == EPERM) { // When EPERM is returned, two reasons are possible: // 1. user has no permissions for bpf() // 2. user has insufficent rlimit for locked memory // Unfortunately, there is no api to inspect the current usage of locked // mem for the user, so an accurate calculation of how much memory to lock // for this new program is difficult to calculate. As a hack, bump the limit // to unlimited. If program load fails again, return the error. struct rlimit rl = {}; if (getrlimit(RLIMIT_MEMLOCK, &rl) == 0) { rl.rlim_max = RLIM_INFINITY; rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_MEMLOCK, &rl) == 0) ret = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); } } if (ret < 0 && !log_buf) { // caller did not specify log_buf but failure should be printed, // so call recursively and print the result to stderr bpf_prog_load(prog_type, insns, prog_len, license, kern_version, bpf_log_buf, LOG_BUF_SIZE); fprintf(stderr, "bpf: %s\n%s\n", strerror(errno), bpf_log_buf); } return ret; }
void test_xdp(void) { struct vip key4 = {.protocol = 6, .family = AF_INET}; struct vip key6 = {.protocol = 6, .family = AF_INET6}; struct iptnl_info value4 = {.family = AF_INET}; struct iptnl_info value6 = {.family = AF_INET6}; const char *file = "./test_xdp.o"; struct bpf_object *obj; char buf[128]; struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); __u32 duration, retval, size; int err, prog_fd, map_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (err) { error_cnt++; return; } map_fd = bpf_find_map(__func__, obj, "vip2tnl"); if (map_fd < 0) goto out; bpf_map_update_elem(map_fd, &key4, &value4, 0); bpf_map_update_elem(map_fd, &key6, &value6, 0); err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); CHECK(err || retval != XDP_TX || size != 74 || iph->protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); CHECK(err || retval != XDP_TX || size != 114 || iph6->nexthdr != IPPROTO_IPV6, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); out: bpf_object__close(obj); }
static int __kprobe_setup(node_t *probe, prog_t *prog, const char *type) { kprobe_t *kp; char *func; kp = malloc(sizeof(*kp)); assert(kp); kp->type = type; kp->efds.fds = calloc(1, sizeof(*kp->efds.fds)); assert(kp->efds.fds); kp->efds.cap = 1; kp->efds.len = 0; probe->dyn.probe.pvdr_priv = kp; _d(""); kp->ctrl = fopen("/sys/kernel/debug/tracing/kprobe_events", "a+"); if (!kp->ctrl) { perror("unable to open kprobe_events"); return -EIO; } kp->bfd = bpf_prog_load(prog->insns, prog->ip - prog->insns); if (kp->bfd < 0) { perror("bpf"); fprintf(stderr, "bpf verifier:\n%s\n", bpf_log_buf); return -EINVAL; } func = strchr(probe->string, ':') + 1; if (strchr(func, '?') || strchr(func, '*')) return kprobe_attach_pattern(kp, func); else return kprobe_attach_one(kp, func); }
int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int prog_len, const char *license, int kern_version) { union bpf_attr attr = { .prog_type = prog_type, .insns = ptr_to_u64((void *) insns), .insn_cnt = prog_len / sizeof(struct bpf_insn), .license = ptr_to_u64((void *) license), .log_buf = ptr_to_u64(bpf_log_buf), .log_size = LOG_BUF_SIZE, .log_level = 1, }; attr.kern_version = kern_version; bpf_log_buf[0] = 0; return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, int map_flags) { union bpf_attr attr = { .map_type = map_type, .key_size = key_size, .value_size = value_size, .max_entries = max_entries }; return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); } int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .value = ptr_to_u64(value), .flags = flags, }; return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } int bpf_lookup_elem(int fd, void *key, void *value) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .value = ptr_to_u64(value), }; return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) #ifndef BPF_PSEUDO_MAP_FD # define BPF_PSEUDO_MAP_FD 1 #endif #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) #define BPF_DISABLE_VERIFIER() \ BPF_MOV32_IMM(BPF_REG_2, 0xFFFFFFFF), /* r2 = (u32)0xFFFFFFFF */ \ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0xFFFFFFFF, 2), /* if (r2 == -1) { */ \ BPF_MOV64_IMM(BPF_REG_0, 0), /* exit(0); */ \ BPF_EXIT_INSN() /* } */ \ #define BPF_MAP_GET(idx, dst) \ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), /* r1 = r9 */ \ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* r2 = fp */ \ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ \ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, idx), /* *(u32 *)(fp - 4) = idx */ \ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), /* if (r0 == 0) */ \ BPF_EXIT_INSN(), /* exit(0); */ \ BPF_LDX_MEM(BPF_DW, (dst), BPF_REG_0, 0) /* r_dst = *(u64 *)(r0) */ static int load_prog() { struct bpf_insn prog[] = { BPF_DISABLE_VERIFIER(), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -16), /* *(fp - 16) = r1 */ BPF_LD_MAP_FD(BPF_REG_9, mapfd), BPF_MAP_GET(0, BPF_REG_6), /* r6 = op */ BPF_MAP_GET(1, BPF_REG_7), /* r7 = address */ BPF_MAP_GET(2, BPF_REG_8), /* r8 = value */ /* store map slot address in r2 */ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), /* r2 = r0 */ BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 for exit(0) */ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 2), /* if (op == 0) */ /* get fp */ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, 0), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 1, 3), /* else if (op == 1) */ /* get skbuff */ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0), BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 2, 3), /* else if (op == 2) */ /* read */ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_7, 0), BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0), BPF_EXIT_INSN(), /* else */ /* write */ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), BPF_EXIT_INSN(), }; return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog), "GPL", 0); } void info(const char *fmt, ...) { va_list args; va_start(args, fmt); fprintf(stdout, "[.] "); vfprintf(stdout, fmt, args); va_end(args); }
static void test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; const int nr_iters = 2; const char *file = "./test_obj_id.o"; const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; const char *expected_prog_name = "test_obj_id"; const char *expected_map_name = "test_map_id"; const __u64 nsec_per_sec = 1000000000; struct bpf_object *objs[nr_iters]; int prog_fds[nr_iters], map_fds[nr_iters]; /* +1 to test for the info_len returned by kernel */ struct bpf_prog_info prog_infos[nr_iters + 1]; struct bpf_map_info map_infos[nr_iters + 1]; /* Each prog only uses one map. +1 to test nr_map_ids * returned by kernel. */ __u32 map_ids[nr_iters + 1]; char jited_insns[128], xlated_insns[128], zeros[128]; __u32 i, next_id, info_len, nr_id_found, duration = 0; struct timespec real_time_ts, boot_time_ts; int sysctl_fd, jit_enabled = 0, err = 0; __u64 array_value; uid_t my_uid = getuid(); time_t now, load_time; sysctl_fd = open(jit_sysctl, 0, O_RDONLY); if (sysctl_fd != -1) { char tmpc; if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) jit_enabled = (tmpc != '0'); close(sysctl_fd); } err = bpf_prog_get_fd_by_id(0); CHECK(err >= 0 || errno != ENOENT, "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); err = bpf_map_get_fd_by_id(0); CHECK(err >= 0 || errno != ENOENT, "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); for (i = 0; i < nr_iters; i++) objs[i] = NULL; /* Check bpf_obj_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { now = time(NULL); err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, &objs[i], &prog_fds[i]); /* test_obj_id.o is a dumb prog. It should never fail * to load. */ if (err) error_cnt++; assert(!err); /* Insert a magic value to the map */ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); assert(map_fds[i] >= 0); err = bpf_map_update_elem(map_fds[i], &array_key, &array_magic_value, 0); assert(!err); /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; bzero(&map_infos[i], info_len); err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], &info_len); if (CHECK(err || map_infos[i].type != BPF_MAP_TYPE_ARRAY || map_infos[i].key_size != sizeof(__u32) || map_infos[i].value_size != sizeof(__u64) || map_infos[i].max_entries != 1 || map_infos[i].map_flags != 0 || info_len != sizeof(struct bpf_map_info) || strcmp((char *)map_infos[i].name, expected_map_name), "get-map-info(fd)", "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", err, errno, map_infos[i].type, BPF_MAP_TYPE_ARRAY, info_len, sizeof(struct bpf_map_info), map_infos[i].key_size, map_infos[i].value_size, map_infos[i].max_entries, map_infos[i].map_flags, map_infos[i].name, expected_map_name)) goto done; /* Check getting prog info */ info_len = sizeof(struct bpf_prog_info) * 2; bzero(&prog_infos[i], info_len); bzero(jited_insns, sizeof(jited_insns)); bzero(xlated_insns, sizeof(xlated_insns)); prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); prog_infos[i].jited_prog_len = sizeof(jited_insns); prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); prog_infos[i].xlated_prog_len = sizeof(xlated_insns); prog_infos[i].map_ids = ptr_to_u64(map_ids + i); prog_infos[i].nr_map_ids = 2; err = clock_gettime(CLOCK_REALTIME, &real_time_ts); assert(!err); err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); assert(!err); err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], &info_len); load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + (prog_infos[i].load_time / nsec_per_sec); if (CHECK(err || prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || info_len != sizeof(struct bpf_prog_info) || (jit_enabled && !prog_infos[i].jited_prog_len) || (jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))) || !prog_infos[i].xlated_prog_len || !memcmp(xlated_insns, zeros, sizeof(zeros)) || load_time < now - 60 || load_time > now + 60 || prog_infos[i].created_by_uid != my_uid || prog_infos[i].nr_map_ids != 1 || *(int *)prog_infos[i].map_ids != map_infos[i].id || strcmp((char *)prog_infos[i].name, expected_prog_name), "get-prog-info(fd)", "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", err, errno, i, prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, info_len, sizeof(struct bpf_prog_info), jit_enabled, prog_infos[i].jited_prog_len, prog_infos[i].xlated_prog_len, !!memcmp(jited_insns, zeros, sizeof(zeros)), !!memcmp(xlated_insns, zeros, sizeof(zeros)), load_time, now, prog_infos[i].created_by_uid, my_uid, prog_infos[i].nr_map_ids, 1, *(int *)prog_infos[i].map_ids, map_infos[i].id, prog_infos[i].name, expected_prog_name)) goto done; } /* Check bpf_prog_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_prog_get_next_id(next_id, &next_id)) { struct bpf_prog_info prog_info = {}; __u32 saved_map_id; int prog_fd; info_len = sizeof(prog_info); prog_fd = bpf_prog_get_fd_by_id(next_id); if (prog_fd < 0 && errno == ENOENT) /* The bpf_prog is in the dead row */ continue; if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", "prog_fd %d next_id %d errno %d\n", prog_fd, next_id, errno)) break; for (i = 0; i < nr_iters; i++) if (prog_infos[i].id == next_id) break; if (i == nr_iters) continue; nr_id_found++; /* Negative test: * prog_info.nr_map_ids = 1 * prog_info.map_ids = NULL */ prog_info.nr_map_ids = 1; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); if (CHECK(!err || errno != EFAULT, "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", err, errno, EFAULT)) break; bzero(&prog_info, sizeof(prog_info)); info_len = sizeof(prog_info); saved_map_id = *(int *)(prog_infos[i].map_ids); prog_info.map_ids = prog_infos[i].map_ids; prog_info.nr_map_ids = 2; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); prog_infos[i].jited_prog_insns = 0; prog_infos[i].xlated_prog_insns = 0; CHECK(err || info_len != sizeof(struct bpf_prog_info) || memcmp(&prog_info, &prog_infos[i], info_len) || *(int *)prog_info.map_ids != saved_map_id, "get-prog-info(next_id->fd)", "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", err, errno, info_len, sizeof(struct bpf_prog_info), memcmp(&prog_info, &prog_infos[i], info_len), *(int *)prog_info.map_ids, saved_map_id); close(prog_fd); } CHECK(nr_id_found != nr_iters, "check total prog id found by get_next_id", "nr_id_found %u(%u)\n", nr_id_found, nr_iters); /* Check bpf_map_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_map_get_next_id(next_id, &next_id)) { struct bpf_map_info map_info = {}; int map_fd; info_len = sizeof(map_info); map_fd = bpf_map_get_fd_by_id(next_id); if (map_fd < 0 && errno == ENOENT) /* The bpf_map is in the dead row */ continue; if (CHECK(map_fd < 0, "get-map-fd(next_id)", "map_fd %d next_id %u errno %d\n", map_fd, next_id, errno)) break; for (i = 0; i < nr_iters; i++) if (map_infos[i].id == next_id) break; if (i == nr_iters) continue; nr_id_found++; err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); assert(!err); err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); CHECK(err || info_len != sizeof(struct bpf_map_info) || memcmp(&map_info, &map_infos[i], info_len) || array_value != array_magic_value, "check get-map-info(next_id->fd)", "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", err, errno, info_len, sizeof(struct bpf_map_info), memcmp(&map_info, &map_infos[i], info_len), array_value, array_magic_value); close(map_fd); } CHECK(nr_id_found != nr_iters, "check total map id found by get_next_id", "nr_id_found %u(%u)\n", nr_id_found, nr_iters); done: for (i = 0; i < nr_iters; i++) bpf_object__close(objs[i]); }
static void test_xdp(void) { struct vip key4 = {.protocol = 6, .family = AF_INET}; struct vip key6 = {.protocol = 6, .family = AF_INET6}; struct iptnl_info value4 = {.family = AF_INET}; struct iptnl_info value6 = {.family = AF_INET6}; const char *file = "./test_xdp.o"; struct bpf_object *obj; char buf[128]; struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); __u32 duration, retval, size; int err, prog_fd, map_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (err) { error_cnt++; return; } map_fd = bpf_find_map(__func__, obj, "vip2tnl"); if (map_fd < 0) goto out; bpf_map_update_elem(map_fd, &key4, &value4, 0); bpf_map_update_elem(map_fd, &key6, &value6, 0); err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); CHECK(err || errno || retval != XDP_TX || size != 74 || iph->protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); CHECK(err || errno || retval != XDP_TX || size != 114 || iph6->nexthdr != IPPROTO_IPV6, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); out: bpf_object__close(obj); } #define MAGIC_VAL 0x1234 #define NUM_ITER 100000 #define VIP_NUM 5 static void test_l4lb(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); const char *file = "./test_l4lb.o"; struct vip key = {.protocol = 6}; struct vip_meta { __u32 flags; __u32 vip_num; } value = {.vip_num = VIP_NUM}; __u32 stats_key = VIP_NUM; struct vip_stats { __u64 bytes; __u64 pkts; } stats[nr_cpus]; struct real_definition { union { __be32 dst; __be32 dstv6[4]; }; __u8 flags; } real_def = {.dst = MAGIC_VAL}; __u32 ch_key = 11, real_num = 3; __u32 duration, retval, size; int err, i, prog_fd, map_fd; __u64 bytes = 0, pkts = 0; struct bpf_object *obj; char buf[128]; u32 *magic = (u32 *)buf; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (err) { error_cnt++; return; } map_fd = bpf_find_map(__func__, obj, "vip_map"); if (map_fd < 0) goto out; bpf_map_update_elem(map_fd, &key, &value, 0); map_fd = bpf_find_map(__func__, obj, "ch_rings"); if (map_fd < 0) goto out; bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); map_fd = bpf_find_map(__func__, obj, "reals"); if (map_fd < 0) goto out; bpf_map_update_elem(map_fd, &real_num, &real_def, 0); err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || *magic != MAGIC_VAL, "ipv4", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || *magic != MAGIC_VAL, "ipv6", "err %d errno %d retval %d size %d magic %x\n", err, errno, retval, size, *magic); map_fd = bpf_find_map(__func__, obj, "stats"); if (map_fd < 0) goto out; bpf_map_lookup_elem(map_fd, &stats_key, stats); for (i = 0; i < nr_cpus; i++) { bytes += stats[i].bytes; pkts += stats[i].pkts; } if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { error_cnt++; printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); } out: bpf_object__close(obj); }
int main(int argc, char **argv) { struct percpu_net_cnt *percpu_netcnt; struct bpf_cgroup_storage_key key; int map_fd, percpu_map_fd; int error = EXIT_FAILURE; struct net_cnt netcnt; struct bpf_object *obj; int prog_fd, cgroup_fd; unsigned long packets; unsigned long bytes; int cpu, nproc; __u32 prog_cnt; nproc = get_nprocs_conf(); percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); if (!percpu_netcnt) { printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); goto err; } if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd)) { printf("Failed to load bpf program\n"); goto out; } if (setup_cgroup_environment()) { printf("Failed to load bpf program\n"); goto err; } /* Create a cgroup, get fd, and join it */ cgroup_fd = create_and_get_cgroup(TEST_CGROUP); if (!cgroup_fd) { printf("Failed to create test cgroup\n"); goto err; } if (join_cgroup(TEST_CGROUP)) { printf("Failed to join cgroup\n"); goto err; } /* Attach bpf program */ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { printf("Failed to attach bpf program"); goto err; } if (system("which ping6 &>/dev/null") == 0) assert(!system("ping6 localhost -c 10000 -f -q > /dev/null")); else assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null")); if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, &prog_cnt)) { printf("Failed to query attached programs"); goto err; } map_fd = bpf_find_map(__func__, obj, "netcnt"); if (map_fd < 0) { printf("Failed to find bpf map with net counters"); goto err; } percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt"); if (percpu_map_fd < 0) { printf("Failed to find bpf map with percpu net counters"); goto err; } if (bpf_map_get_next_key(map_fd, NULL, &key)) { printf("Failed to get key in cgroup storage\n"); goto err; } if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) { printf("Failed to lookup cgroup storage\n"); goto err; } if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) { printf("Failed to lookup percpu cgroup storage\n"); goto err; } /* Some packets can be still in per-cpu cache, but not more than * MAX_PERCPU_PACKETS. */ packets = netcnt.packets; bytes = netcnt.bytes; for (cpu = 0; cpu < nproc; cpu++) { if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) { printf("Unexpected percpu value: %llu\n", percpu_netcnt[cpu].packets); goto err; } packets += percpu_netcnt[cpu].packets; bytes += percpu_netcnt[cpu].bytes; } /* No packets should be lost */ if (packets != 10000) { printf("Unexpected packet count: %lu\n", packets); goto err; } /* Let's check that bytes counter matches the number of packets * multiplied by the size of ipv6 ICMP packet. */ if (bytes != packets * 104) { printf("Unexpected bytes count: %lu\n", bytes); goto err; } error = 0; printf("test_netcnt:PASS\n"); err: cleanup_cgroup_environment(); free(percpu_netcnt); out: return error; }
int main(int argc, char **argv) { const char *file = "test_tcpbpf_kern.o"; int prog_fd, map_fd, sock_map_fd; struct tcpbpf_globals g = {0}; const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; __u32 key = 0; int rv; if (setup_cgroup_environment()) goto err; cg_fd = create_and_get_cgroup(cg_path); if (cg_fd < 0) goto err; if (join_cgroup(cg_path)) goto err; if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; } rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0); if (rv) { printf("FAILED: bpf_prog_attach: %d (%s)\n", error, strerror(errno)); goto err; } if (system("./tcp_server.py")) { printf("FAILED: TCP server\n"); goto err; } map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) goto err; sock_map_fd = bpf_find_map(__func__, obj, "sockopt_results"); if (sock_map_fd < 0) goto err; rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; } if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); goto err; } if (verify_sockopt_result(sock_map_fd)) { printf("FAILED: Wrong sockopt stats\n"); goto err; } printf("PASSED!\n"); error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); close(cg_fd); cleanup_cgroup_environment(); return error; }
static void sanitise_bpf(struct syscallrecord *rec) { union bpf_attr *attr; unsigned long bpf_map_types[] = { BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_ARRAY, BPF_MAP_TYPE_STACK_TRACE, BPF_MAP_TYPE_CGROUP_ARRAY, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, }; attr = zmalloc(sizeof(union bpf_attr)); rec->a2 = (unsigned long) attr; switch (rec->a1) { case BPF_MAP_CREATE: attr->map_type = RAND_ARRAY(bpf_map_types); attr->key_size = rnd() % 1024; attr->value_size = rnd() % (1024 * 64); attr->max_entries = rnd() % 1024; attr->flags = RAND_RANGE(0, 4); rec->a3 = 20; break; case BPF_MAP_LOOKUP_ELEM: attr->map_fd = get_rand_bpf_fd(); attr->key = RAND_RANGE(0, 10); attr->value = rnd(); rec->a3 = 32; break; case BPF_MAP_UPDATE_ELEM: attr->map_fd = get_rand_bpf_fd(); attr->key = RAND_RANGE(0, 10); attr->value = rnd(); attr->next_key = rnd(); attr->flags = RAND_RANGE(0, 4); rec->a3 = 32; break; case BPF_MAP_DELETE_ELEM: attr->map_fd = get_rand_bpf_fd(); attr->key = RAND_RANGE(0, 10); rec->a3 = 32; break; case BPF_MAP_GET_NEXT_KEY: attr->map_fd = get_rand_bpf_fd(); attr->key = RAND_RANGE(0, 10); attr->value = rnd(); rec->a3 = 32; break; case BPF_OBJ_PIN: case BPF_OBJ_GET: attr->map_fd = get_rand_bpf_fd(); rec->a3 = 32; break; case BPF_PROG_LOAD: bpf_prog_load(attr); rec->a3 = 48; break; default: break; } }
int main(int argc, char **argv) { const char *file = "test_tcpnotify_kern.o"; int prog_fd, map_fd, perf_event_fd; struct tcpnotify_globals g = {0}; const char *cg_path = "/foo"; int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; __u32 key = 0; int rv; char test_script[80]; int pmu_fd; cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); if (setup_cgroup_environment()) goto err; cg_fd = create_and_get_cgroup(cg_path); if (!cg_fd) goto err; if (join_cgroup(cg_path)) goto err; if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; } rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0); if (rv) { printf("FAILED: bpf_prog_attach: %d (%s)\n", error, strerror(errno)); goto err; } perf_event_fd = bpf_find_map(__func__, obj, "perf_event_map"); if (perf_event_fd < 0) goto err; map_fd = bpf_find_map(__func__, obj, "global_map"); if (map_fd < 0) goto err; pmu_fd = setup_bpf_perf_event(perf_event_fd); if (pmu_fd < 0 || perf_event_mmap(pmu_fd) < 0) goto err; pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd); sprintf(test_script, "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); sprintf(test_script, "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ", TESTPORT); system(test_script); sprintf(test_script, "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; } sleep(10); if (verify_result(&g)) { printf("FAILED: Wrong stats Expected %d calls, got %d\n", g.ncalls, rx_callbacks); goto err; } printf("PASSED!\n"); error = 0; err: bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS); close(cg_fd); cleanup_cgroup_environment(); return error; }