/* * npfctl_bpf_icmp: code block to match ICMP type and/or code. * Note: suitable both for the ICMPv4 and ICMPv6. */ void npfctl_bpf_icmp(npf_bpf_t *ctx, int type, int code) { const u_int type_off = offsetof(struct icmp, icmp_type); const u_int code_off = offsetof(struct icmp, icmp_code); assert(offsetof(struct icmp6_hdr, icmp6_type) == type_off); assert(offsetof(struct icmp6_hdr, icmp6_code) == code_off); assert(type != -1 || code != -1); /* X <- IP header length */ fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF); if (type != -1) { struct bpf_insn insns_type[] = { BPF_STMT(BPF_LD+BPF_B+BPF_IND, type_off), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, type, 0, JUMP_MAGIC), }; add_insns(ctx, insns_type, __arraycount(insns_type)); uint32_t mwords[] = { BM_ICMP_TYPE, 1, type }; done_block(ctx, mwords, sizeof(mwords)); } if (code != -1) { struct bpf_insn insns_code[] = { BPF_STMT(BPF_LD+BPF_B+BPF_IND, code_off), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, code, 0, JUMP_MAGIC), }; add_insns(ctx, insns_code, __arraycount(insns_code)); uint32_t mwords[] = { BM_ICMP_CODE, 1, code }; done_block(ctx, mwords, sizeof(mwords)); } }
/* * npfctl_bpf_tcpfl: code block to match TCP flags. */ void npfctl_bpf_tcpfl(npf_bpf_t *ctx, uint8_t tf, uint8_t tf_mask) { const u_int tcpfl_off = offsetof(struct tcphdr, th_flags); /* X <- IP header length */ fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF); struct bpf_insn insns_tf[] = { /* A <- TCP flags */ BPF_STMT(BPF_LD+BPF_B+BPF_IND, tcpfl_off), }; add_insns(ctx, insns_tf, __arraycount(insns_tf)); if (tf_mask != tf) { /* A <- (A & mask) */ struct bpf_insn insns_mask[] = { BPF_STMT(BPF_ALU+BPF_AND+BPF_K, tf_mask), }; add_insns(ctx, insns_mask, __arraycount(insns_mask)); } struct bpf_insn insns_cmp[] = { /* A == expected-TCP-flags? */ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, tf, 0, JUMP_MAGIC), }; add_insns(ctx, insns_cmp, __arraycount(insns_cmp)); uint32_t mwords[] = { BM_TCPFL, 2, tf, tf_mask}; done_block(ctx, mwords, sizeof(mwords)); }
/* * npfctl_bpf_ports: code block to match TCP/UDP port range. * * => Port numbers shall be in the network byte order. */ void npfctl_bpf_ports(npf_bpf_t *ctx, u_int opts, in_port_t from, in_port_t to) { const u_int sport_off = offsetof(struct udphdr, uh_sport); const u_int dport_off = offsetof(struct udphdr, uh_dport); u_int off; /* TCP and UDP port offsets are the same. */ assert(sport_off == offsetof(struct tcphdr, th_sport)); assert(dport_off == offsetof(struct tcphdr, th_dport)); assert(ctx->flags & CHECKED_L4); assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0)); off = (opts & MATCH_SRC) ? sport_off : dport_off; /* X <- IP header length */ fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF); struct bpf_insn insns_fetch[] = { /* A <- port */ BPF_STMT(BPF_LD+BPF_H+BPF_IND, off), }; add_insns(ctx, insns_fetch, __arraycount(insns_fetch)); /* CAUTION: BPF operates in host byte-order. */ from = ntohs(from); to = ntohs(to); if (from == to) { /* Single port case. */ struct bpf_insn insns_port[] = { BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, from, 0, JUMP_MAGIC), }; add_insns(ctx, insns_port, __arraycount(insns_port)); } else { /* Port range case. */ struct bpf_insn insns_range[] = { BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, from, 0, JUMP_MAGIC), BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, to, JUMP_MAGIC, 0), }; add_insns(ctx, insns_range, __arraycount(insns_range)); } uint32_t mwords[] = { opts & MATCH_SRC ? BM_SRC_PORTS : BM_DST_PORTS, 2, from, to }; done_block(ctx, mwords, sizeof(mwords)); }
/* * npfctl_bpf_tcpfl: code block to match TCP flags. */ void npfctl_bpf_tcpfl(npf_bpf_t *ctx, uint8_t tf, uint8_t tf_mask, bool checktcp) { const u_int tcpfl_off = offsetof(struct tcphdr, th_flags); const bool usingmask = tf_mask != tf; /* X <- IP header length */ fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF); if (checktcp) { const u_int jf = usingmask ? 3 : 2; assert(ctx->ingroup == false); /* A <- L4 protocol; A == TCP? If not, jump out. */ struct bpf_insn insns_tcp[] = { BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, IPPROTO_TCP, 0, jf), }; add_insns(ctx, insns_tcp, __arraycount(insns_tcp)); } else { assert(ctx->flags & CHECKED_L4); } struct bpf_insn insns_tf[] = { /* A <- TCP flags */ BPF_STMT(BPF_LD+BPF_B+BPF_IND, tcpfl_off), }; add_insns(ctx, insns_tf, __arraycount(insns_tf)); if (usingmask) { /* A <- (A & mask) */ struct bpf_insn insns_mask[] = { BPF_STMT(BPF_ALU+BPF_AND+BPF_K, tf_mask), }; add_insns(ctx, insns_mask, __arraycount(insns_mask)); } struct bpf_insn insns_cmp[] = { /* A == expected-TCP-flags? */ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, tf, 0, JUMP_MAGIC), }; add_insns(ctx, insns_cmp, __arraycount(insns_cmp)); if (!checktcp) { uint32_t mwords[] = { BM_TCPFL, 2, tf, tf_mask}; done_block(ctx, mwords, sizeof(mwords)); } }
/* * npfctl_bpf_proto: code block to match IP version and L4 protocol. */ void npfctl_bpf_proto(npf_bpf_t *ctx, sa_family_t af, int proto) { assert(af != AF_UNSPEC || proto != -1); /* Note: fails if IP version does not match. */ fetch_l3(ctx, af, 0); if (proto == -1) { return; } struct bpf_insn insns_proto[] = { /* A <- L4 protocol; A == expected-protocol? */ BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, proto, 0, JUMP_MAGIC), }; add_insns(ctx, insns_proto, __arraycount(insns_proto)); uint32_t mwords[] = { BM_PROTO, 1, proto }; done_block(ctx, mwords, sizeof(mwords)); }
/* * npfctl_bpf_cidr: code block to match IPv4 or IPv6 CIDR. * * => IP address shall be in the network byte order. */ void npfctl_bpf_cidr(npf_bpf_t *ctx, u_int opts, sa_family_t af, const npf_addr_t *addr, const npf_netmask_t mask) { const uint32_t *awords = (const uint32_t *)addr; u_int nwords, length, maxmask, off; assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0)); assert((mask && mask <= NPF_MAX_NETMASK) || mask == NPF_NO_NETMASK); switch (af) { case AF_INET: maxmask = 32; off = (opts & MATCH_SRC) ? offsetof(struct ip, ip_src) : offsetof(struct ip, ip_dst); nwords = sizeof(struct in_addr) / sizeof(uint32_t); break; case AF_INET6: maxmask = 128; off = (opts & MATCH_SRC) ? offsetof(struct ip6_hdr, ip6_src) : offsetof(struct ip6_hdr, ip6_dst); nwords = sizeof(struct in6_addr) / sizeof(uint32_t); break; default: abort(); } /* Ensure address family. */ fetch_l3(ctx, af, 0); length = (mask == NPF_NO_NETMASK) ? maxmask : mask; /* CAUTION: BPF operates in host byte-order. */ for (u_int i = 0; i < nwords; i++) { const u_int woff = i * sizeof(uint32_t); uint32_t word = ntohl(awords[i]); uint32_t wordmask; if (length >= 32) { /* The mask is a full word - do not apply it. */ wordmask = 0; length -= 32; } else if (length) { wordmask = 0xffffffff << (32 - length); length = 0; } else { /* The mask became zero - skip the rest. */ break; } /* A <- IP address (or one word of it) */ struct bpf_insn insns_ip[] = { BPF_STMT(BPF_LD+BPF_W+BPF_ABS, off + woff), }; add_insns(ctx, insns_ip, __arraycount(insns_ip)); /* A <- (A & MASK) */ if (wordmask) { struct bpf_insn insns_mask[] = { BPF_STMT(BPF_ALU+BPF_AND+BPF_K, wordmask), }; add_insns(ctx, insns_mask, __arraycount(insns_mask)); } /* A == expected-IP-word ? */ struct bpf_insn insns_cmp[] = { BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, word, 0, JUMP_MAGIC), }; add_insns(ctx, insns_cmp, __arraycount(insns_cmp)); } uint32_t mwords[] = { (opts & MATCH_SRC) ? BM_SRC_CIDR: BM_DST_CIDR, 6, af, mask, awords[0], awords[1], awords[2], awords[3], }; done_block(ctx, mwords, sizeof(mwords)); }