/* Returns true if at least one bit or field is wildcarded in 'a' but not in * 'b', false otherwise. */ bool flow_wildcards_has_extra(const struct flow_wildcards *a, const struct flow_wildcards *b) { int i; struct in6_addr ipv6_masked; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 8); for (i = 0; i < FLOW_N_REGS; i++) { if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) { return true; } } ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask); if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) { return true; } ipv6_masked = ipv6_addr_bitand(&a->ipv6_dst_mask, &b->ipv6_dst_mask); if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_dst_mask)) { return true; } return (a->wildcards & ~b->wildcards || (a->tun_id_mask & b->tun_id_mask) != b->tun_id_mask || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask || (a->nw_dst_mask & b->nw_dst_mask) != b->nw_dst_mask || (a->vlan_tci_mask & b->vlan_tci_mask) != b->vlan_tci_mask || (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask || (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask); }
static void test_ipv6_masking(void) { struct in6_addr dest; struct in6_addr mask; mask = ipv6_create_mask(0); dest = ipv6_addr_bitand(&in6addr_exact, &mask); assert(ipv6_count_cidr_bits(&dest) == 0); mask = ipv6_create_mask(1); dest = ipv6_addr_bitand(&in6addr_exact, &mask); assert(ipv6_count_cidr_bits(&dest) == 1); mask = ipv6_create_mask(13); dest = ipv6_addr_bitand(&in6addr_exact, &mask); assert(ipv6_count_cidr_bits(&dest) == 13); mask = ipv6_create_mask(127); dest = ipv6_addr_bitand(&in6addr_exact, &mask); assert(ipv6_count_cidr_bits(&dest) == 127); mask = ipv6_create_mask(128); dest = ipv6_addr_bitand(&in6addr_exact, &mask); assert(ipv6_count_cidr_bits(&dest) == 128); }
/* Initializes 'dst' as the combination of wildcards in 'src1' and 'src2'. * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded in * 'src1' or 'src2' or both. */ void flow_wildcards_combine(struct flow_wildcards *dst, const struct flow_wildcards *src1, const struct flow_wildcards *src2) { int i; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14); dst->wildcards = src1->wildcards | src2->wildcards; dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask; dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask; dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask; dst->ipv6_src_mask = ipv6_addr_bitand(&src1->ipv6_src_mask, &src2->ipv6_src_mask); dst->ipv6_dst_mask = ipv6_addr_bitand(&src1->ipv6_dst_mask, &src2->ipv6_dst_mask); dst->ipv6_label_mask = src1->ipv6_label_mask & src2->ipv6_label_mask; dst->nd_target_mask = ipv6_addr_bitand(&src1->nd_target_mask, &src2->nd_target_mask); for (i = 0; i < FLOW_N_REGS; i++) { dst->reg_masks[i] = src1->reg_masks[i] & src2->reg_masks[i]; } dst->metadata_mask = src1->metadata_mask & src2->metadata_mask; dst->vlan_tci_mask = src1->vlan_tci_mask & src2->vlan_tci_mask; dst->tp_src_mask = src1->tp_src_mask & src2->tp_src_mask; dst->tp_dst_mask = src1->tp_dst_mask & src2->tp_dst_mask; dst->nw_frag_mask = src1->nw_frag_mask & src2->nw_frag_mask; eth_addr_bitand(src1->dl_src_mask, src2->dl_src_mask, dst->dl_src_mask); eth_addr_bitand(src1->dl_dst_mask, src2->dl_dst_mask, dst->dl_dst_mask); eth_addr_bitand(src1->arp_sha_mask, src2->arp_sha_mask, dst->arp_sha_mask); eth_addr_bitand(src1->arp_tha_mask, src2->arp_tha_mask, dst->arp_tha_mask); }
/* For every bit of a field that is wildcarded in 'wildcards', sets the * corresponding bit in 'flow' to zero. */ void flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards) { const flow_wildcards_t wc = wildcards->wildcards; int i; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14); for (i = 0; i < FLOW_N_REGS; i++) { flow->regs[i] &= wildcards->reg_masks[i]; } flow->tun_id &= wildcards->tun_id_mask; flow->metadata &= wildcards->metadata_mask; flow->nw_src &= wildcards->nw_src_mask; flow->nw_dst &= wildcards->nw_dst_mask; if (wc & FWW_IN_PORT) { flow->in_port = 0; } flow->vlan_tci &= wildcards->vlan_tci_mask; if (wc & FWW_DL_TYPE) { flow->dl_type = htons(0); } flow->tp_src &= wildcards->tp_src_mask; flow->tp_dst &= wildcards->tp_dst_mask; eth_addr_bitand(flow->dl_src, wildcards->dl_src_mask, flow->dl_src); eth_addr_bitand(flow->dl_dst, wildcards->dl_dst_mask, flow->dl_dst); if (wc & FWW_NW_PROTO) { flow->nw_proto = 0; } flow->ipv6_label &= wildcards->ipv6_label_mask; if (wc & FWW_NW_DSCP) { flow->nw_tos &= ~IP_DSCP_MASK; } if (wc & FWW_NW_ECN) { flow->nw_tos &= ~IP_ECN_MASK; } if (wc & FWW_NW_TTL) { flow->nw_ttl = 0; } flow->nw_frag &= wildcards->nw_frag_mask; eth_addr_bitand(flow->arp_sha, wildcards->arp_sha_mask, flow->arp_sha); eth_addr_bitand(flow->arp_tha, wildcards->arp_tha_mask, flow->arp_tha); flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src, &wildcards->ipv6_src_mask); flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst, &wildcards->ipv6_dst_mask); flow->nd_target = ipv6_addr_bitand(&flow->nd_target, &wildcards->nd_target_mask); flow->skb_priority = 0; }
int str_to_ipv6(const char *str_, struct in6_addr *addrp, struct in6_addr *maskp) { char *str = xstrdup(str_); char *save_ptr = NULL; const char *name, *netmask; struct in6_addr addr, mask; int retval; name = strtok_r(str, "/", &save_ptr); retval = name ? lookup_ipv6(name, &addr) : EINVAL; if (retval) { printf("%s: could not convert to IPv6 address\n", str); return -1; } netmask = strtok_r(NULL, "/", &save_ptr); if (netmask) { int prefix = atoi(netmask); if (prefix <= 0 || prefix > 128) { printf("%s: network prefix bits not between 1 and 128\n", str); return -1; } else { mask = ipv6_create_mask(prefix); } } else { mask = in6addr_zero ; *maskp = mask; *addrp = ipv6_addr_bitand(&addr, &in6addr_exact); return 1; } mask = in6addr_exact ; *addrp = ipv6_addr_bitand(&addr, &mask); if (maskp) { *maskp = mask; } else { if (!ipv6_mask_is_exact(&mask)) { printf("%s: netmask not allowed here", str_); return -1; } } free(str); return 1; }
void match_set_ipv6_dst_masked(struct match *match, const struct in6_addr *dst, const struct in6_addr *mask) { match->flow.ipv6_dst = ipv6_addr_bitand(dst, mask); match->wc.masks.ipv6_dst = *mask; }
void match_set_ipv6_src_masked(struct match *match, const struct in6_addr *src, const struct in6_addr *mask) { match->flow.ipv6_src = ipv6_addr_bitand(src, mask); match->wc.masks.ipv6_src = *mask; }
void match_set_nd_target_masked(struct match *match, const struct in6_addr *target, const struct in6_addr *mask) { match->flow.nd_target = ipv6_addr_bitand(target, mask); match->wc.masks.nd_target = *mask; }
/* Initializes 'dst' as the combination of wildcards in 'src1' and 'src2'. * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded in * 'src1' or 'src2' or both. */ void flow_wildcards_combine(struct flow_wildcards *dst, const struct flow_wildcards *src1, const struct flow_wildcards *src2) { int i; dst->wildcards = src1->wildcards | src2->wildcards; dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask; dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask; dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask; dst->ipv6_src_mask = ipv6_addr_bitand(&src1->ipv6_src_mask, &src2->ipv6_src_mask); dst->ipv6_dst_mask = ipv6_addr_bitand(&src1->ipv6_dst_mask, &src2->ipv6_dst_mask); for (i = 0; i < FLOW_N_REGS; i++) { dst->reg_masks[i] = src1->reg_masks[i] & src2->reg_masks[i]; } dst->vlan_tci_mask = src1->vlan_tci_mask & src2->vlan_tci_mask; }
static void add_ipv6_netaddr(struct lport_addresses *laddrs, struct in6_addr addr, unsigned int plen) { laddrs->n_ipv6_addrs++; laddrs->ipv6_addrs = xrealloc(laddrs->ipv6_addrs, laddrs->n_ipv6_addrs * sizeof *laddrs->ipv6_addrs); struct ipv6_netaddr *na = &laddrs->ipv6_addrs[laddrs->n_ipv6_addrs - 1]; memcpy(&na->addr, &addr, sizeof na->addr); na->mask = ipv6_create_mask(plen); na->network = ipv6_addr_bitand(&addr, &na->mask); na->plen = plen; in6_addr_solicited_node(&na->sn_addr, &addr); inet_ntop(AF_INET6, &addr, na->addr_s, sizeof na->addr_s); inet_ntop(AF_INET6, &na->sn_addr, na->sn_addr_s, sizeof na->sn_addr_s); inet_ntop(AF_INET6, &na->network, na->network_s, sizeof na->network_s); }
/* For every bit of a field that is wildcarded in 'wildcards', sets the * corresponding bit in 'flow' to zero. */ void flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards) { const flow_wildcards_t wc = wildcards->wildcards; int i; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 8); for (i = 0; i < FLOW_N_REGS; i++) { flow->regs[i] &= wildcards->reg_masks[i]; } flow->tun_id &= wildcards->tun_id_mask; flow->nw_src &= wildcards->nw_src_mask; flow->nw_dst &= wildcards->nw_dst_mask; if (wc & FWW_IN_PORT) { flow->in_port = 0; } flow->vlan_tci &= wildcards->vlan_tci_mask; if (wc & FWW_DL_TYPE) { flow->dl_type = htons(0); } flow->tp_src &= wildcards->tp_src_mask; flow->tp_dst &= wildcards->tp_dst_mask; if (wc & FWW_DL_SRC) { memset(flow->dl_src, 0, sizeof flow->dl_src); } if (wc & FWW_DL_DST) { flow->dl_dst[0] &= 0x01; memset(&flow->dl_dst[1], 0, 5); } if (wc & FWW_ETH_MCAST) { flow->dl_dst[0] &= 0xfe; } if (wc & FWW_NW_PROTO) { flow->nw_proto = 0; } if (wc & FWW_IPV6_LABEL) { flow->ipv6_label = htonl(0); } if (wc & FWW_NW_DSCP) { flow->nw_tos &= ~IP_DSCP_MASK; } if (wc & FWW_NW_ECN) { flow->nw_tos &= ~IP_ECN_MASK; } if (wc & FWW_NW_TTL) { flow->nw_ttl = 0; } flow->nw_frag &= wildcards->nw_frag_mask; if (wc & FWW_ARP_SHA) { memset(flow->arp_sha, 0, sizeof flow->arp_sha); } if (wc & FWW_ARP_THA) { memset(flow->arp_tha, 0, sizeof flow->arp_tha); } flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src, &wildcards->ipv6_src_mask); flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst, &wildcards->ipv6_dst_mask); if (wc & FWW_ND_TARGET) { memset(&flow->nd_target, 0, sizeof flow->nd_target); } flow->skb_priority = 0; }
/* Returns true if at least one bit or field is wildcarded in 'a' but not in * 'b', false otherwise. */ bool flow_wildcards_has_extra(const struct flow_wildcards *a, const struct flow_wildcards *b) { int i; uint8_t eth_masked[ETH_ADDR_LEN]; struct in6_addr ipv6_masked; BUILD_ASSERT_DECL(FLOW_WC_SEQ == 14); for (i = 0; i < FLOW_N_REGS; i++) { if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) { return true; } } eth_addr_bitand(a->dl_src_mask, b->dl_src_mask, eth_masked); if (!eth_addr_equals(eth_masked, b->dl_src_mask)) { return true; } eth_addr_bitand(a->dl_dst_mask, b->dl_dst_mask, eth_masked); if (!eth_addr_equals(eth_masked, b->dl_dst_mask)) { return true; } eth_addr_bitand(a->arp_sha_mask, b->arp_sha_mask, eth_masked); if (!eth_addr_equals(eth_masked, b->arp_sha_mask)) { return true; } eth_addr_bitand(a->arp_tha_mask, b->arp_tha_mask, eth_masked); if (!eth_addr_equals(eth_masked, b->arp_tha_mask)) { return true; } ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask); if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) { return true; } ipv6_masked = ipv6_addr_bitand(&a->ipv6_dst_mask, &b->ipv6_dst_mask); if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_dst_mask)) { return true; } ipv6_masked = ipv6_addr_bitand(&a->nd_target_mask, &b->nd_target_mask); if (!ipv6_addr_equals(&ipv6_masked, &b->nd_target_mask)) { return true; } return (a->wildcards & ~b->wildcards || (a->tun_id_mask & b->tun_id_mask) != b->tun_id_mask || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask || (a->nw_dst_mask & b->nw_dst_mask) != b->nw_dst_mask || (a->ipv6_label_mask & b->ipv6_label_mask) != b->ipv6_label_mask || (a->vlan_tci_mask & b->vlan_tci_mask) != b->vlan_tci_mask || (a->metadata_mask & b->metadata_mask) != b->metadata_mask || (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask || (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask || (a->nw_frag_mask & b->nw_frag_mask) != b->nw_frag_mask); }