int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { int err; u8 *ext; u8 *msk; memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); ext = nfp_flow->unmasked_data; msk = nfp_flow->mask_data; if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) { /* Populate Exact Metadata. */ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext, flow, key_ls->key_layer, false); /* Populate Mask Metadata. */ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk, flow, key_ls->key_layer, true); ext += sizeof(struct nfp_flower_meta_two); msk += sizeof(struct nfp_flower_meta_two); /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, nfp_repr_get_port_id(netdev), false); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, nfp_repr_get_port_id(netdev), true); if (err) return err; ext += sizeof(struct nfp_flower_in_port); msk += sizeof(struct nfp_flower_in_port); } else { /* Populate Exact Metadata. */ nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext, key_ls->key_layer); /* Populate Mask Metadata. */ nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk, key_ls->key_layer); ext += sizeof(struct nfp_flower_meta_one); msk += sizeof(struct nfp_flower_meta_one); } if (NFP_FLOWER_LAYER_META & key_ls->key_layer) { /* Additional Metadata Fields. * Currently unsupported. */ return -EOPNOTSUPP; } if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { /* Populate Exact MAC Data. */ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, flow, false); /* Populate Mask MAC Data. */ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk, flow, true); ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { /* Populate Exact TP Data. */ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, flow, false); /* Populate Mask TP Data. */ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk, flow, true); ext += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports); } if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, flow, false); /* Populate Mask IPv4 Data. */ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk, flow, true); ext += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4); } if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, flow, false); /* Populate Mask IPv4 Data. */ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk, flow, true); ext += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6); } return 0; }
int nfp_flower_compile_flow_match(struct nfp_app *app, struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type) { u32 cmsg_port = 0; int err; u8 *ext; u8 *msk; if (nfp_netdev_is_nfp_repr(netdev)) cmsg_port = nfp_repr_get_port_id(netdev); memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); ext = nfp_flow->unmasked_data; msk = nfp_flow->mask_data; nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, (struct nfp_flower_meta_tci *)msk, flow, key_ls->key_layer); ext += sizeof(struct nfp_flower_meta_tci); msk += sizeof(struct nfp_flower_meta_tci); /* Populate Extended Metadata if Required. */ if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) { nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext, key_ls->key_layer_two); nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, key_ls->key_layer_two); ext += sizeof(struct nfp_flower_ext_meta); msk += sizeof(struct nfp_flower_ext_meta); } /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, cmsg_port, false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, cmsg_port, true, tun_type); if (err) return err; ext += sizeof(struct nfp_flower_in_port); msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, (struct nfp_flower_mac_mpls *)msk, flow); ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, (struct nfp_flower_tp_ports *)msk, flow); ext += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports); } if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, (struct nfp_flower_ipv4 *)msk, flow); ext += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4); } if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, (struct nfp_flower_ipv6 *)msk, flow); ext += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6); } if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { __be32 tun_dst; nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; ext += sizeof(struct nfp_flower_ipv4_udp_tun); msk += sizeof(struct nfp_flower_ipv4_udp_tun); /* Store the tunnel destination in the rule data. * This must be present and be an exact match. */ nfp_flow->nfp_tun_ipv4_addr = tun_dst; nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { err = nfp_flower_compile_geneve_opt(ext, msk, flow); if (err) return err; } } return 0; }