int pflog_packet(struct pf_pdesc *pd, u_int8_t reason, struct pf_rule *rm, struct pf_rule *am, struct pf_ruleset *ruleset) { #if NBPFILTER > 0 struct ifnet *ifn; struct pfloghdr hdr; if (rm == NULL || pd == NULL || pd->kif == NULL || pd->m == NULL) return (-1); if (rm->logif >= npflogifs || (ifn = pflogifs[rm->logif]) == NULL || !ifn->if_bpf) return (0); bzero(&hdr, sizeof(hdr)); hdr.length = PFLOG_REAL_HDRLEN; hdr.action = rm->action; hdr.reason = reason; memcpy(hdr.ifname, pd->kif->pfik_name, sizeof(hdr.ifname)); if (am == NULL) { hdr.rulenr = htonl(rm->nr); hdr.subrulenr = -1; } else { hdr.rulenr = htonl(am->nr); hdr.subrulenr = htonl(rm->nr); if (ruleset != NULL && ruleset->anchor != NULL) strlcpy(hdr.ruleset, ruleset->anchor->name, sizeof(hdr.ruleset)); } if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done) pd->lookup.done = pf_socket_lookup(pd); if (pd->lookup.done > 0) { hdr.uid = pd->lookup.uid; hdr.pid = pd->lookup.pid; } else { hdr.uid = UID_MAX; hdr.pid = NO_PID; } hdr.rule_uid = rm->cuid; hdr.rule_pid = rm->cpid; hdr.dir = pd->dir; PF_ACPY(&hdr.saddr, &pd->nsaddr, pd->naf); PF_ACPY(&hdr.daddr, &pd->ndaddr, pd->naf); hdr.af = pd->af; hdr.naf = pd->naf; hdr.sport = pd->nsport; hdr.dport = pd->ndport; ifn->if_opackets++; ifn->if_obytes += pd->m->m_pkthdr.len; bpf_mtap_hdr(ifn->if_bpf, (caddr_t)&hdr, PFLOG_HDRLEN, pd->m, BPF_DIRECTION_OUT, pflog_bpfcopy); #endif return (0); }
int pf_get_transaddr(struct pf_rule *r, struct pf_pdesc *pd, struct pf_src_node **sns, struct pf_rule **nr) { struct pf_addr naddr; u_int16_t nport = 0; #ifdef INET6 if (pd->af != pd->naf) return (pf_get_transaddr_af(r, pd, sns)); #endif /* INET6 */ if (r->nat.addr.type != PF_ADDR_NONE) { /* XXX is this right? what if rtable is changed at the same * XXX time? where do I need to figure out the sport? */ if (pf_get_sport(pd, r, &naddr, &nport, r->nat.proxy_port[0], r->nat.proxy_port[1], sns)) { DPFPRINTF(LOG_NOTICE, "pf: NAT proxy port allocation (%u-%u) failed", r->nat.proxy_port[0], r->nat.proxy_port[1]); return (-1); } *nr = r; PF_ACPY(&pd->nsaddr, &naddr, pd->af); pd->nsport = nport; } if (r->rdr.addr.type != PF_ADDR_NONE) { if (pf_map_addr(pd->af, r, &pd->nsaddr, &naddr, NULL, sns, &r->rdr, PF_SN_RDR)) return (-1); if ((r->rdr.opts & PF_POOL_TYPEMASK) == PF_POOL_BITMASK) PF_POOLMASK(&naddr, &naddr, &r->rdr.addr.v.a.mask, &pd->ndaddr, pd->af); if (r->rdr.proxy_port[1]) { u_int32_t tmp_nport; tmp_nport = ((ntohs(pd->ndport) - ntohs(r->dst.port[0])) % (r->rdr.proxy_port[1] - r->rdr.proxy_port[0] + 1)) + r->rdr.proxy_port[0]; /* wrap around if necessary */ if (tmp_nport > 65535) tmp_nport -= 65535; nport = htons((u_int16_t)tmp_nport); } else if (r->rdr.proxy_port[0]) nport = htons(r->rdr.proxy_port[0]); *nr = r; PF_ACPY(&pd->ndaddr, &naddr, pd->af); if (nport) pd->ndport = nport; } return (0); }
int pf_get_transaddr(struct pf_rule *r, struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t *sport, struct pf_addr *daddr, u_int16_t *dport, struct pf_src_node **sns) { struct pf_addr naddr; u_int16_t nport = 0; if (!TAILQ_EMPTY(&r->nat.list)) { /* XXX is this right? what if rtable is changed at the same * XXX time? where do I need to figure out the sport? */ if (pf_get_sport(pd->af, pd->proto, r, saddr, daddr, *dport, &naddr, &nport, r->nat.proxy_port[0], r->nat.proxy_port[1], sns, pd->rdomain)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: NAT proxy port allocation " "(%u-%u) failed\n", r->nat.proxy_port[0], r->nat.proxy_port[1])); return (-1); } PF_ACPY(saddr, &naddr, pd->af); if (nport) *sport = nport; } if (!TAILQ_EMPTY(&r->rdr.list)) { if (pf_map_addr(pd->af, r, saddr, &naddr, NULL, sns, &r->rdr, PF_SN_RDR)) return (-1); if ((r->rdr.opts & PF_POOL_TYPEMASK) == PF_POOL_BITMASK) PF_POOLMASK(&naddr, &naddr, &r->rdr.cur->addr.v.a.mask, daddr, pd->af); if (r->rdr.proxy_port[1]) { u_int32_t tmp_nport; tmp_nport = ((ntohs(*dport) - ntohs(r->dst.port[0])) % (r->rdr.proxy_port[1] - r->rdr.proxy_port[0] + 1)) + r->rdr.proxy_port[0]; /* wrap around if necessary */ if (tmp_nport > 65535) tmp_nport -= 65535; nport = htons((u_int16_t)tmp_nport); } else if (r->rdr.proxy_port[0]) nport = htons(r->rdr.proxy_port[0]); PF_ACPY(daddr, &naddr, pd->af); if (nport) *dport = nport; } return (0); }
int pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) { struct pf_pool *rpool = &r->rpool; struct pf_addr *raddr = NULL, *rmask = NULL; /* Try to find a src_node if none was given and this is a sticky-address rule. */ if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) *sn = pf_find_src_node(saddr, r, af, 0); /* If a src_node was found or explicitly given and it has a non-zero route address, use this address. A zeroed address is found if the src node was created just a moment ago in pf_create_state and it needs to be filled in with routing decision calculated here. */ if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) { PF_ACPY(naddr, &(*sn)->raddr, af); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf_map_addr: src tracking maps "); pf_print_host(saddr, 0, af); printf(" to "); pf_print_host(naddr, 0, af); printf("\n"); } return (0); } /* Find the route using chosen algorithm. Store the found route in src_node if it was given or found. */ if (rpool->cur->addr.type == PF_ADDR_NOROUTE) return (1); if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { switch (af) { #ifdef INET case AF_INET: if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); raddr = &rpool->cur->addr.p.dyn->pfid_addr4; rmask = &rpool->cur->addr.p.dyn->pfid_mask4; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); raddr = &rpool->cur->addr.p.dyn->pfid_addr6; rmask = &rpool->cur->addr.p.dyn->pfid_mask6; break; #endif /* INET6 */ } } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); /* unsupported */ } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; } switch (rpool->opts & PF_POOL_TYPEMASK) { case PF_POOL_NONE: PF_ACPY(naddr, raddr, af); break; case PF_POOL_BITMASK: PF_POOLMASK(naddr, raddr, rmask, saddr, af); break; case PF_POOL_RANDOM: if (init_addr != NULL && PF_AZERO(init_addr, af)) { switch (af) { #ifdef INET case AF_INET: rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rmask->addr32[3] != 0xffffffff) rpool->counter.addr32[3] = htonl(arc4random()); else break; if (rmask->addr32[2] != 0xffffffff) rpool->counter.addr32[2] = htonl(arc4random()); else break; if (rmask->addr32[1] != 0xffffffff) rpool->counter.addr32[1] = htonl(arc4random()); else break; if (rmask->addr32[0] != 0xffffffff) rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET6 */ } PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); PF_ACPY(init_addr, naddr, af); } else { PF_AINC(&rpool->counter, af); PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); } break; case PF_POOL_SRCHASH: { unsigned char hash[16]; pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); break; } case PF_POOL_ROUNDROBIN: { struct pf_pooladdr *acur = rpool->cur; /* * XXXGL: in the round-robin case we need to store * the round-robin machine state in the rule, thus * forwarding thread needs to modify rule. * * This is done w/o locking, because performance is assumed * more important than round-robin precision. * * In the simpliest case we just update the "rpool->cur" * pointer. However, if pool contains tables or dynamic * addresses, then "tblidx" is also used to store machine * state. Since "tblidx" is int, concurrent access to it can't * lead to inconsistence, only to lost of precision. * * Things get worse, if table contains not hosts, but * prefixes. In this case counter also stores machine state, * and for IPv6 address, counter can't be updated atomically. * Probably, using round-robin on a table containing IPv6 * prefixes (or even IPv4) would cause a panic. */ if (rpool->cur->addr.type == PF_ADDR_TABLE) { if (!pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, af)) goto get_addr; } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, af)) goto get_addr; } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) goto get_addr; try_next: if (TAILQ_NEXT(rpool->cur, entries) == NULL) rpool->cur = TAILQ_FIRST(&rpool->list); else rpool->cur = TAILQ_NEXT(rpool->cur, entries); if (rpool->cur->addr.type == PF_ADDR_TABLE) { rpool->tblidx = -1; if (pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, af)) { /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; return (1); } } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { rpool->tblidx = -1; if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, af)) { /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; return (1); } } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; PF_ACPY(&rpool->counter, raddr, af); } get_addr: PF_ACPY(naddr, &rpool->counter, af); if (init_addr != NULL && PF_AZERO(init_addr, af)) PF_ACPY(init_addr, naddr, af); PF_AINC(&rpool->counter, af); break; } } if (*sn != NULL) PF_ACPY(&(*sn)->raddr, naddr, af); if (V_pf_status.debug >= PF_DEBUG_MISC && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { printf("pf_map_addr: selected address "); pf_print_host(naddr, 0, af); printf("\n"); } return (0); }
static int pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, struct pf_addr *saddr, uint16_t sport, struct pf_addr *daddr, uint16_t dport, struct pf_addr *naddr, uint16_t *nport, uint16_t low, uint16_t high, struct pf_src_node **sn) { struct pf_state_key_cmp key; struct pf_addr init_addr; uint16_t cut; bzero(&init_addr, sizeof(init_addr)); if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) return (1); if (proto == IPPROTO_ICMP) { low = 1; high = 65535; } bzero(&key, sizeof(key)); key.af = af; key.proto = proto; key.port[0] = dport; PF_ACPY(&key.addr[0], daddr, key.af); do { PF_ACPY(&key.addr[1], naddr, key.af); /* * port search; start random, step; * similar 2 portloop in in_pcbbind */ if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || proto == IPPROTO_ICMP) || (low == 0 && high == 0)) { /* * XXX bug: icmp states don't use the id on both sides. * (traceroute -I through nat) */ key.port[1] = sport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = sport; return (0); } } else if (low == high) { key.port[1] = htons(low); if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = htons(low); return (0); } } else { uint16_t tmp; if (low > high) { tmp = low; low = high; high = tmp; } /* low < high */ cut = htonl(arc4random()) % (1 + high - low) + low; /* low <= cut <= high */ for (tmp = cut; tmp <= high; ++(tmp)) { key.port[1] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = htons(tmp); return (0); } } for (tmp = cut - 1; tmp >= low; --(tmp)) { key.port[1] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = htons(tmp); return (0); } } } switch (r->rpool.opts & PF_POOL_TYPEMASK) { case PF_POOL_RANDOM: case PF_POOL_ROUNDROBIN: if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) return (1); break; case PF_POOL_NONE: case PF_POOL_SRCHASH: case PF_POOL_BITMASK: default: return (1); } } while (! PF_AEQ(&init_addr, naddr, af) ); return (1); /* none available */ }
void pflog_bpfcopy(const void *src_arg, void *dst_arg, size_t len) { struct mbuf *m, *mp, *mhdr, *mptr; struct pfloghdr *pfloghdr; u_int count; u_char *dst, *mdst; int afto, hlen, mlen, off; union pf_headers { struct tcphdr tcp; struct udphdr udp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; struct mld_hdr mld; struct nd_neighbor_solicit nd_ns; #endif /* INET6 */ } pdhdrs; struct pf_pdesc pd; struct pf_addr osaddr, odaddr; u_int16_t osport = 0, odport = 0; u_int8_t proto = 0; m = (struct mbuf *)src_arg; dst = dst_arg; mhdr = pflog_mhdr; mptr = pflog_mptr; if (m == NULL) panic("pflog_bpfcopy got no mbuf"); /* first mbuf holds struct pfloghdr */ pfloghdr = mtod(m, struct pfloghdr *); afto = pfloghdr->af != pfloghdr->naf; count = min(m->m_len, len); bcopy(pfloghdr, dst, count); pfloghdr = (struct pfloghdr *)dst; dst += count; len -= count; m = m->m_next; if (len <= 0) return; /* second mbuf is pkthdr */ if (m == NULL) panic("no second mbuf"); /* * temporary mbuf will hold an ip/ip6 header and 8 bytes * of the protocol header */ m_inithdr(mhdr); mhdr->m_len = 0; /* XXX not done in m_inithdr() */ #if INET && INET6 /* offset for a new header */ if (afto && pfloghdr->af == AF_INET) mhdr->m_data += sizeof(struct ip6_hdr) - sizeof(struct ip); #endif /* INET && INET6 */ mdst = mtod(mhdr, char *); switch (pfloghdr->af) { case AF_INET: { struct ip *h; if (m->m_pkthdr.len < sizeof(*h)) goto copy; m_copydata(m, 0, sizeof(*h), mdst); h = (struct ip *)mdst; hlen = h->ip_hl << 2; if (hlen > sizeof(*h) && (m->m_pkthdr.len >= hlen)) m_copydata(m, sizeof(*h), hlen - sizeof(*h), mdst + sizeof(*h)); break; } #ifdef INET6 case AF_INET6: { struct ip6_hdr *h; if (m->m_pkthdr.len < sizeof(*h)) goto copy; hlen = sizeof(struct ip6_hdr); m_copydata(m, 0, hlen, mdst); h = (struct ip6_hdr *)mdst; proto = h->ip6_nxt; break; } #endif /* INET6 */ default: /* shouldn't happen ever :-) */ goto copy; } if (m->m_pkthdr.len < hlen + 8 && proto != IPPROTO_NONE) goto copy; else if (proto != IPPROTO_NONE) { /* copy 8 bytes of the protocol header */ m_copydata(m, hlen, 8, mdst + hlen); hlen += 8; } mhdr->m_len += hlen; mhdr->m_pkthdr.len = mhdr->m_len; /* create a chain mhdr -> mptr, mptr->m_data = (m->m_data+hlen) */ mp = m_getptr(m, hlen, &off); if (mp != NULL) { bcopy(mp, mptr, sizeof(*mptr)); mptr->m_data += off; mptr->m_len -= off; mptr->m_flags &= ~M_PKTHDR; mhdr->m_next = mptr; mhdr->m_pkthdr.len += m->m_pkthdr.len - hlen; } /* * Rewrite addresses if needed. Reason pointer must be NULL to avoid * counting the packet here again. */ if (pf_setup_pdesc(&pd, &pdhdrs, pfloghdr->af, pfloghdr->dir, NULL, mhdr, NULL) != PF_PASS) goto copy; pd.naf = pfloghdr->naf; PF_ACPY(&osaddr, pd.src, pd.af); PF_ACPY(&odaddr, pd.dst, pd.af); if (pd.sport) osport = *pd.sport; if (pd.dport) odport = *pd.dport; if (pd.virtual_proto != PF_VPROTO_FRAGMENT && (pfloghdr->rewritten = pf_translate(&pd, &pfloghdr->saddr, pfloghdr->sport, &pfloghdr->daddr, pfloghdr->dport, 0, pfloghdr->dir))) { m_copyback(pd.m, pd.off, min(pd.m->m_len - pd.off, pd.hdrlen), pd.hdr.any, M_NOWAIT); #if INET && INET6 if (afto) { PF_ACPY(&pd.nsaddr, &pfloghdr->saddr, pd.naf); PF_ACPY(&pd.ndaddr, &pfloghdr->daddr, pd.naf); } #endif /* INET && INET6 */ PF_ACPY(&pfloghdr->saddr, &osaddr, pd.af); PF_ACPY(&pfloghdr->daddr, &odaddr, pd.af); pfloghdr->sport = osport; pfloghdr->dport = odport; } pd.tot_len = min(pd.tot_len, len); pd.tot_len -= pd.m->m_data - pd.m->m_pktdat; #if INET && INET6 if (afto && pfloghdr->rewritten) pf_translate_af(&pd); #endif /* INET && INET6 */ m = pd.m; copy: mlen = min(m->m_pkthdr.len, len); m_copydata(m, 0, mlen, dst); len -= mlen; if (len > 0) bzero(dst + mlen, len); }
int pf_get_transaddr_af(struct pf_rule *r, struct pf_pdesc *pd, struct pf_src_node **sns) { struct pf_addr ndaddr, nsaddr, naddr; u_int16_t nport = 0; int prefixlen = 96; if (pf_status.debug >= LOG_NOTICE) { log(LOG_NOTICE, "pf: af-to %s %s, ", pd->naf == AF_INET ? "inet" : "inet6", r->rdr.addr.type == PF_ADDR_NONE ? "nat" : "rdr"); pf_print_host(&pd->nsaddr, pd->nsport, pd->af); addlog(" -> "); pf_print_host(&pd->ndaddr, pd->ndport, pd->af); addlog("\n"); } if (r->nat.addr.type == PF_ADDR_NONE) panic("pf_get_transaddr_af: no nat pool for source address"); /* get source address and port */ if (pf_get_sport(pd, r, &nsaddr, &nport, r->nat.proxy_port[0], r->nat.proxy_port[1], sns)) { DPFPRINTF(LOG_NOTICE, "pf: af-to NAT proxy port allocation (%u-%u) failed", r->nat.proxy_port[0], r->nat.proxy_port[1]); return (-1); } pd->nsport = nport; if (pd->proto == IPPROTO_ICMPV6 && pd->naf == AF_INET) { if (pd->dir == PF_IN) { NTOHS(pd->ndport); if (pd->ndport == ICMP6_ECHO_REQUEST) pd->ndport = ICMP_ECHO; else if (pd->ndport == ICMP6_ECHO_REPLY) pd->ndport = ICMP_ECHOREPLY; HTONS(pd->ndport); } else { NTOHS(pd->nsport); if (pd->nsport == ICMP6_ECHO_REQUEST) pd->nsport = ICMP_ECHO; else if (pd->nsport == ICMP6_ECHO_REPLY) pd->nsport = ICMP_ECHOREPLY; HTONS(pd->nsport); } } else if (pd->proto == IPPROTO_ICMP && pd->naf == AF_INET6) { if (pd->dir == PF_IN) { NTOHS(pd->ndport); if (pd->ndport == ICMP_ECHO) pd->ndport = ICMP6_ECHO_REQUEST; else if (pd->ndport == ICMP_ECHOREPLY) pd->ndport = ICMP6_ECHO_REPLY; HTONS(pd->ndport); } else { NTOHS(pd->nsport); if (pd->nsport == ICMP_ECHO) pd->nsport = ICMP6_ECHO_REQUEST; else if (pd->nsport == ICMP_ECHOREPLY) pd->nsport = ICMP6_ECHO_REPLY; HTONS(pd->nsport); } } /* get the destination address and port */ if (r->rdr.addr.type != PF_ADDR_NONE) { if (pf_map_addr(pd->naf, r, &nsaddr, &naddr, NULL, sns, &r->rdr, PF_SN_RDR)) return (-1); if (r->rdr.proxy_port[0]) pd->ndport = htons(r->rdr.proxy_port[0]); if (pd->naf == AF_INET) { /* The prefix is the IPv4 rdr address */ prefixlen = in_mask2len((struct in_addr *) &r->rdr.addr.v.a.mask); inet_nat46(pd->naf, &pd->ndaddr, &ndaddr, &naddr, prefixlen); } else { /* The prefix is the IPv6 rdr address */ prefixlen = in6_mask2len((struct in6_addr *) &r->rdr.addr.v.a.mask, NULL); inet_nat64(pd->naf, &pd->ndaddr, &ndaddr, &naddr, prefixlen); } } else { if (pd->naf == AF_INET) { /* The prefix is the IPv6 dst address */ prefixlen = in6_mask2len((struct in6_addr *) &r->dst.addr.v.a.mask, NULL); if (prefixlen < 32) prefixlen = 96; inet_nat64(pd->naf, &pd->ndaddr, &ndaddr, &pd->ndaddr, prefixlen); } else { /* * The prefix is the IPv6 nat address * (that was stored in pd->nsaddr) */ prefixlen = in6_mask2len((struct in6_addr *) &r->nat.addr.v.a.mask, NULL); if (prefixlen > 96) prefixlen = 96; inet_nat64(pd->naf, &pd->ndaddr, &ndaddr, &nsaddr, prefixlen); } } PF_ACPY(&pd->nsaddr, &nsaddr, pd->naf); PF_ACPY(&pd->ndaddr, &ndaddr, pd->naf); if (pf_status.debug >= LOG_NOTICE) { log(LOG_NOTICE, "pf: af-to %s %s done, prefixlen %d, ", pd->naf == AF_INET ? "inet" : "inet6", r->rdr.addr.type == PF_ADDR_NONE ? "nat" : "rdr", prefixlen); pf_print_host(&pd->nsaddr, pd->nsport, pd->naf); addlog(" -> "); pf_print_host(&pd->ndaddr, pd->ndport, pd->naf); addlog("\n"); } return (0); }
int pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sns, struct pf_pool *rpool, enum pf_sn_types type) { unsigned char hash[16]; struct pf_addr faddr; struct pf_addr *raddr = &rpool->addr.v.a.addr; struct pf_addr *rmask = &rpool->addr.v.a.mask; u_int64_t states; u_int16_t weight; u_int64_t load; u_int64_t cload; if (sns[type] == NULL && rpool->opts & PF_POOL_STICKYADDR && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE && pf_map_addr_sticky(af, r, saddr, naddr, sns, rpool, type) == 0) return (0); if (rpool->addr.type == PF_ADDR_NOROUTE) return (1); if (rpool->addr.type == PF_ADDR_DYNIFTL) { switch (af) { #ifdef INET case AF_INET: if (rpool->addr.p.dyn->pfid_acnt4 < 1 && ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) && ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_LEASTSTATES)) return (1); raddr = &rpool->addr.p.dyn->pfid_addr4; rmask = &rpool->addr.p.dyn->pfid_mask4; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rpool->addr.p.dyn->pfid_acnt6 < 1 && ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) && ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_LEASTSTATES)) return (1); raddr = &rpool->addr.p.dyn->pfid_addr6; rmask = &rpool->addr.p.dyn->pfid_mask6; break; #endif /* INET6 */ } } else if (rpool->addr.type == PF_ADDR_TABLE) { if (((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) && ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_LEASTSTATES)) return (1); /* unsupported */ } else { raddr = &rpool->addr.v.a.addr; rmask = &rpool->addr.v.a.mask; } switch (rpool->opts & PF_POOL_TYPEMASK) { case PF_POOL_NONE: PF_ACPY(naddr, raddr, af); break; case PF_POOL_BITMASK: PF_POOLMASK(naddr, raddr, rmask, saddr, af); break; case PF_POOL_RANDOM: if (init_addr != NULL && PF_AZERO(init_addr, af)) { switch (af) { #ifdef INET case AF_INET: rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rmask->addr32[3] != 0xffffffff) rpool->counter.addr32[3] = htonl(arc4random()); else break; if (rmask->addr32[2] != 0xffffffff) rpool->counter.addr32[2] = htonl(arc4random()); else break; if (rmask->addr32[1] != 0xffffffff) rpool->counter.addr32[1] = htonl(arc4random()); else break; if (rmask->addr32[0] != 0xffffffff) rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET6 */ } PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); PF_ACPY(init_addr, naddr, af); } else { PF_AINC(&rpool->counter, af); PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); } break; case PF_POOL_SRCHASH: pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); break; case PF_POOL_ROUNDROBIN: if (rpool->addr.type == PF_ADDR_TABLE || rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_pool_get(rpool, &raddr, &rmask, af)) { /* * reset counter in case its value * has been removed from the pool. */ bzero(&rpool->counter, sizeof(rpool->counter)); if (pfr_pool_get(rpool, &raddr, &rmask, af)) return (1); } } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) return (1); /* iterate over table if it contains entries which are weighted */ if ((rpool->addr.type == PF_ADDR_TABLE && rpool->addr.p.tbl->pfrkt_refcntcost > 0) || (rpool->addr.type == PF_ADDR_DYNIFTL && rpool->addr.p.dyn->pfid_kt->pfrkt_refcntcost > 0)) { do { if (rpool->addr.type == PF_ADDR_TABLE || rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_pool_get(rpool, &raddr, &rmask, af)) return (1); } else { log(LOG_ERR, "pf: pf_map_addr: " "weighted RR failure"); return (1); } if (rpool->weight >= rpool->curweight) break; PF_AINC(&rpool->counter, af); } while (1); weight = rpool->weight; } PF_ACPY(naddr, &rpool->counter, af); if (init_addr != NULL && PF_AZERO(init_addr, af)) PF_ACPY(init_addr, naddr, af); PF_AINC(&rpool->counter, af); break; case PF_POOL_LEASTSTATES: /* retrieve an address first */ if (rpool->addr.type == PF_ADDR_TABLE || rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_pool_get(rpool, &raddr, &rmask, af)) { /* see PF_POOL_ROUNDROBIN */ bzero(&rpool->counter, sizeof(rpool->counter)); if (pfr_pool_get(rpool, &raddr, &rmask, af)) return (1); } } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) return (1); states = rpool->states; weight = rpool->weight; if ((rpool->addr.type == PF_ADDR_TABLE && rpool->addr.p.tbl->pfrkt_refcntcost > 0) || (rpool->addr.type == PF_ADDR_DYNIFTL && rpool->addr.p.dyn->pfid_kt->pfrkt_refcntcost > 0)) load = ((UINT16_MAX * rpool->states) / rpool->weight); else load = states; PF_ACPY(&faddr, &rpool->counter, af); PF_ACPY(naddr, &rpool->counter, af); if (init_addr != NULL && PF_AZERO(init_addr, af)) PF_ACPY(init_addr, naddr, af); /* * iterate *once* over whole table and find destination with * least connection */ do { PF_AINC(&rpool->counter, af); if (rpool->addr.type == PF_ADDR_TABLE || rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_pool_get(rpool, &raddr, &rmask, af)) return (1); } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) return (1); if ((rpool->addr.type == PF_ADDR_TABLE && rpool->addr.p.tbl->pfrkt_refcntcost > 0) || (rpool->addr.type == PF_ADDR_DYNIFTL && rpool->addr.p.dyn->pfid_kt->pfrkt_refcntcost > 0)) cload = ((UINT16_MAX * rpool->states) / rpool->weight); else cload = rpool->states; /* find lc minimum */ if (cload < load) { states = rpool->states; weight = rpool->weight; load = cload; PF_ACPY(naddr, &rpool->counter, af); if (init_addr != NULL && PF_AZERO(init_addr, af)) PF_ACPY(init_addr, naddr, af); } } while (pf_match_addr(1, &faddr, rmask, &rpool->counter, af) && (states > 0)); if (rpool->addr.type == PF_ADDR_TABLE) { if (pfr_states_increase(rpool->addr.p.tbl, naddr, af) == -1) { if (pf_status.debug >= LOG_DEBUG) { log(LOG_DEBUG,"pf: pf_map_addr: " "selected address "); pf_print_host(naddr, 0, af); addlog(". Failed to increase count!\n"); } return (1); } } else if (rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_states_increase(rpool->addr.p.dyn->pfid_kt, naddr, af) == -1) { if (pf_status.debug >= LOG_DEBUG) { log(LOG_DEBUG, "pf: pf_map_addr: " "selected address "); pf_print_host(naddr, 0, af); addlog(". Failed to increase count!\n"); } return (1); } } break; } if (rpool->opts & PF_POOL_STICKYADDR) { if (sns[type] != NULL) { pf_remove_src_node(sns[type]); sns[type] = NULL; } if (pf_insert_src_node(&sns[type], r, type, af, saddr, naddr, 0)) return (1); } if (pf_status.debug >= LOG_NOTICE && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { log(LOG_NOTICE, "pf: pf_map_addr: selected address "); pf_print_host(naddr, 0, af); if ((rpool->opts & PF_POOL_TYPEMASK) == PF_POOL_LEASTSTATES) addlog(" with state count %llu", states); if ((rpool->addr.type == PF_ADDR_TABLE && rpool->addr.p.tbl->pfrkt_refcntcost > 0) || (rpool->addr.type == PF_ADDR_DYNIFTL && rpool->addr.p.dyn->pfid_kt->pfrkt_refcntcost > 0)) addlog(" with weight %u", weight); addlog("\n"); } return (0); }
int pf_map_addr_sticky(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *naddr, struct pf_src_node **sns, struct pf_pool *rpool, enum pf_sn_types type) { struct pf_addr *raddr, *rmask, *cached; struct pf_state *s; struct pf_src_node k; int valid; k.af = af; k.type = type; PF_ACPY(&k.addr, saddr, af); k.rule.ptr = r; pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; sns[type] = RB_FIND(pf_src_tree, &tree_src_tracking, &k); if (sns[type] == NULL) return (-1); /* check if the cached entry is still valid */ cached = &(sns[type])->raddr; valid = 0; if (PF_AZERO(cached, af)) { valid = 1; } else if (rpool->addr.type == PF_ADDR_DYNIFTL) { if (pfr_kentry_byaddr(rpool->addr.p.dyn->pfid_kt, cached, af, 0)) valid = 1; } else if (rpool->addr.type == PF_ADDR_TABLE) { if (pfr_kentry_byaddr(rpool->addr.p.tbl, cached, af, 0)) valid = 1; } else if (rpool->addr.type != PF_ADDR_NOROUTE) { raddr = &rpool->addr.v.a.addr; rmask = &rpool->addr.v.a.mask; valid = pf_match_addr(0, raddr, rmask, cached, af); } if (!valid) { if (pf_status.debug >= LOG_DEBUG) { log(LOG_DEBUG, "pf: pf_map_addr: " "stale src tracking (%u) ", type); pf_print_host(&k.addr, 0, af); addlog(" to "); pf_print_host(cached, 0, af); addlog("\n"); } if (sns[type]->states != 0) { /* XXX expensive */ RB_FOREACH(s, pf_state_tree_id, &tree_id) pf_state_rm_src_node(s, sns[type]); } sns[type]->expire = 1; pf_remove_src_node(sns[type]); sns[type] = NULL; return (-1); } if (!PF_AZERO(cached, af)) PF_ACPY(naddr, cached, af); if (pf_status.debug >= LOG_DEBUG) { log(LOG_DEBUG, "pf: pf_map_addr: " "src tracking (%u) maps ", type); pf_print_host(&k.addr, 0, af); addlog(" to "); pf_print_host(naddr, 0, af); addlog("\n"); } return (0); }
int pf_get_sport(struct pf_pdesc *pd, struct pf_rule *r, struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, struct pf_src_node **sn) { struct pf_state_key_cmp key; struct pf_addr init_addr; u_int16_t cut; bzero(&init_addr, sizeof(init_addr)); if (pf_map_addr(pd->naf, r, &pd->nsaddr, naddr, &init_addr, sn, &r->nat, PF_SN_NAT)) return (1); if (pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) { if (pd->ndport == htons(ICMP6_ECHO_REQUEST) || pd->ndport == htons(ICMP_ECHO)) { low = 1; high = 65535; } else return (0); /* Don't try to modify non-echo ICMP */ } do { key.af = pd->naf; key.proto = pd->proto; key.rdomain = pd->rdomain; PF_ACPY(&key.addr[0], &pd->ndaddr, key.af); PF_ACPY(&key.addr[1], naddr, key.af); key.port[0] = pd->ndport; /* * port search; start random, step; * similar 2 portloop in in_pcbbind */ if (!(pd->proto == IPPROTO_TCP || pd->proto == IPPROTO_UDP || pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6)) { /* XXX bug: icmp states dont use the id on both * XXX sides (traceroute -I through nat) */ key.port[1] = pd->nsport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = pd->nsport; return (0); } } else if (low == 0 && high == 0) { key.port[1] = pd->nsport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = pd->nsport; return (0); } } else if (low == high) { key.port[1] = htons(low); if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = htons(low); return (0); } } else { u_int16_t tmp; if (low > high) { tmp = low; low = high; high = tmp; } /* low < high */ cut = arc4random_uniform(1 + high - low) + low; /* low <= cut <= high */ for (tmp = cut; tmp <= high; ++(tmp)) { key.port[1] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL && !in_baddynamic(tmp, pd->proto)) { *nport = htons(tmp); return (0); } } for (tmp = cut - 1; tmp >= low; --(tmp)) { key.port[1] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL && !in_baddynamic(tmp, pd->proto)) { *nport = htons(tmp); return (0); } } } switch (r->nat.opts & PF_POOL_TYPEMASK) { case PF_POOL_RANDOM: case PF_POOL_ROUNDROBIN: case PF_POOL_LEASTSTATES: /* * pick a different source address since we're out * of free port choices for the current one. */ if (pf_map_addr(pd->naf, r, &pd->nsaddr, naddr, &init_addr, sn, &r->nat, PF_SN_NAT)) return (1); break; case PF_POOL_NONE: case PF_POOL_SRCHASH: case PF_POOL_BITMASK: default: return (1); } } while (! PF_AEQ(&init_addr, naddr, pd->naf) ); return (1); /* none available */ }
void pflog_bpfcopy(const void *src_arg, void *dst_arg, size_t len) { const struct mbuf *m; struct pfloghdr *pfloghdr; u_int count; u_char *dst; u_short action, reason; int off = 0, hdrlen = 0; union { struct tcphdr tcp; struct udphdr udp; struct icmp icmp; #ifdef INET6 struct icmp6_hdr icmp6; #endif /* INET6 */ } pf_hdrs; struct pf_pdesc pd; struct pf_addr osaddr, odaddr; u_int16_t osport = 0, odport = 0; m = src_arg; dst = dst_arg; if (m == NULL) panic("pflog_bpfcopy got no mbuf"); /* first mbuf holds struct pfloghdr */ pfloghdr = mtod(m, struct pfloghdr *); count = min(m->m_len, len); bcopy(pfloghdr, dst, count); pfloghdr = (struct pfloghdr *)dst; dst += count; len -= count; m = m->m_next; /* second mbuf is pkthdr */ if (len > 0) { if (m == NULL) panic("no second mbuf"); bcopy(m, mfake, sizeof(*mfake)); mfake->m_flags &= ~(M_EXT|M_CLUSTER); mfake->m_next = NULL; mfake->m_nextpkt = NULL; mfake->m_data = dst; mfake->m_len = len; } else return; while (len > 0) { if (m == 0) panic("bpf_mcopy"); count = min(m->m_len, len); bcopy(mtod(m, caddr_t), (caddr_t)dst, count); m = m->m_next; dst += count; len -= count; } if (mfake->m_flags & M_PKTHDR) mfake->m_pkthdr.len = min(mfake->m_pkthdr.len, mfake->m_len); /* rewrite addresses if needed */ memset(&pd, 0, sizeof(pd)); pd.hdr.any = &pf_hdrs; if (pf_setup_pdesc(pfloghdr->af, pfloghdr->dir, &pd, mfake, &action, &reason, NULL, NULL, NULL, NULL, &off, &hdrlen) == -1) return; PF_ACPY(&osaddr, pd.src, pd.af); PF_ACPY(&odaddr, pd.dst, pd.af); if (pd.sport) osport = *pd.sport; if (pd.dport) odport = *pd.dport; if ((pfloghdr->rewritten = pf_translate(&pd, &pfloghdr->saddr, pfloghdr->sport, &pfloghdr->daddr, pfloghdr->dport, 0, pfloghdr->dir))) { m_copyback(mfake, off, min(mfake->m_len - off, hdrlen), pd.hdr.any, M_NOWAIT); PF_ACPY(&pfloghdr->saddr, &osaddr, pd.af); PF_ACPY(&pfloghdr->daddr, &odaddr, pd.af); pfloghdr->sport = osport; pfloghdr->dport = odport; } }
int pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sns, struct pf_pool *rpool, enum pf_sn_types type) { unsigned char hash[16]; struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; struct pf_pooladdr *acur = rpool->cur; struct pf_src_node k; if (sns[type] == NULL && rpool->opts & PF_POOL_STICKYADDR && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { k.af = af; k.type = type; PF_ACPY(&k.addr, saddr, af); k.rule.ptr = r; pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; sns[type] = RB_FIND(pf_src_tree, &tree_src_tracking, &k); if (sns[type] != NULL) { if (!PF_AZERO(&(sns[type])->raddr, af)) PF_ACPY(naddr, &(sns[type])->raddr, af); if (pf_status.debug >= PF_DEBUG_MISC) { printf("pf_map_addr: src tracking (%u) maps ", type); pf_print_host(&k.addr, 0, af); printf(" to "); pf_print_host(naddr, 0, af); printf("\n"); } return (0); } } if (rpool->cur->addr.type == PF_ADDR_NOROUTE) return (1); if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { switch (af) { #ifdef INET case AF_INET: if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); raddr = &rpool->cur->addr.p.dyn->pfid_addr4; rmask = &rpool->cur->addr.p.dyn->pfid_mask4; break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); raddr = &rpool->cur->addr.p.dyn->pfid_addr6; rmask = &rpool->cur->addr.p.dyn->pfid_mask6; break; #endif /* INET6 */ } } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) return (1); /* unsupported */ } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; } switch (rpool->opts & PF_POOL_TYPEMASK) { case PF_POOL_NONE: PF_ACPY(naddr, raddr, af); break; case PF_POOL_BITMASK: PF_POOLMASK(naddr, raddr, rmask, saddr, af); break; case PF_POOL_RANDOM: if (init_addr != NULL && PF_AZERO(init_addr, af)) { switch (af) { #ifdef INET case AF_INET: rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET */ #ifdef INET6 case AF_INET6: if (rmask->addr32[3] != 0xffffffff) rpool->counter.addr32[3] = htonl(arc4random()); else break; if (rmask->addr32[2] != 0xffffffff) rpool->counter.addr32[2] = htonl(arc4random()); else break; if (rmask->addr32[1] != 0xffffffff) rpool->counter.addr32[1] = htonl(arc4random()); else break; if (rmask->addr32[0] != 0xffffffff) rpool->counter.addr32[0] = htonl(arc4random()); break; #endif /* INET6 */ } PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); PF_ACPY(init_addr, naddr, af); } else { PF_AINC(&rpool->counter, af); PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af); } break; case PF_POOL_SRCHASH: pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af); PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af); break; case PF_POOL_ROUNDROBIN: if (rpool->cur->addr.type == PF_ADDR_TABLE) { if (!pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, &raddr, &rmask, af)) goto get_addr; } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, &raddr, &rmask, af)) goto get_addr; } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af)) goto get_addr; try_next: if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) rpool->cur = TAILQ_FIRST(&rpool->list); if (rpool->cur->addr.type == PF_ADDR_TABLE) { rpool->tblidx = -1; if (pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, &raddr, &rmask, af)) { /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; return (1); } } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { rpool->tblidx = -1; if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, &raddr, &rmask, af)) { /* table contains no address of type 'af' */ if (rpool->cur != acur) goto try_next; return (1); } } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; PF_ACPY(&rpool->counter, raddr, af); } get_addr: PF_ACPY(naddr, &rpool->counter, af); if (init_addr != NULL && PF_AZERO(init_addr, af)) PF_ACPY(init_addr, naddr, af); PF_AINC(&rpool->counter, af); break; } if (rpool->opts & PF_POOL_STICKYADDR) { if (sns[type] != NULL) { pf_remove_src_node(sns[type]); sns[type] = NULL; } if (pf_insert_src_node(&sns[type], r, type, af, saddr, naddr, 0)) return (1); } if (pf_status.debug >= PF_DEBUG_NOISY && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { printf("pf_map_addr: selected address "); pf_print_host(naddr, 0, af); printf("\n"); } return (0); }
int pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport, struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high, struct pf_src_node **sn, int rdomain) { struct pf_state_key_cmp key; struct pf_addr init_addr; u_int16_t cut; bzero(&init_addr, sizeof(init_addr)); if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn, &r->nat, PF_SN_NAT)) return (1); if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) { if (dport == htons(ICMP6_ECHO_REQUEST) || dport == htons(ICMP_ECHO)) { low = 1; high = 65535; } else return (0); /* Don't try to modify non-echo ICMP */ } do { key.af = af; key.proto = proto; key.rdomain = rdomain; PF_ACPY(&key.addr[1], daddr, key.af); PF_ACPY(&key.addr[0], naddr, key.af); key.port[1] = dport; /* * port search; start random, step; * similar 2 portloop in in_pcbbind */ if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || proto == IPPROTO_ICMP)) { /* XXX bug icmp states dont use the id on both sides */ key.port[0] = dport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL) return (0); } else if (low == 0 && high == 0) { key.port[0] = *nport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL) return (0); } else if (low == high) { key.port[0] = htons(low); if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { *nport = htons(low); return (0); } } else { u_int16_t tmp; if (low > high) { tmp = low; low = high; high = tmp; } /* low < high */ cut = arc4random_uniform(1 + high - low) + low; /* low <= cut <= high */ for (tmp = cut; tmp <= high; ++(tmp)) { key.port[0] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL && !in_baddynamic(tmp, proto)) { *nport = htons(tmp); return (0); } } for (tmp = cut - 1; tmp >= low; --(tmp)) { key.port[0] = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL && !in_baddynamic(tmp, proto)) { *nport = htons(tmp); return (0); } } } switch (r->nat.opts & PF_POOL_TYPEMASK) { case PF_POOL_RANDOM: case PF_POOL_ROUNDROBIN: if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn, &r->nat, PF_SN_NAT)) return (1); break; case PF_POOL_NONE: case PF_POOL_SRCHASH: case PF_POOL_BITMASK: default: return (1); } } while (! PF_AEQ(&init_addr, naddr, af) ); return (1); /* none available */ }