int oo_hw_filter_set_thc(struct oo_hw_filter* oofilter, tcp_helper_cluster_t* thc, int protocol, unsigned daddr, int dport, unsigned hwport_mask) { struct efx_filter_spec spec; int hwport, base_vi_id, rc; ci_assert_equal(oofilter->trs, NULL); oofilter->thc = thc; for( hwport = 0; hwport < CI_CFG_MAX_REGISTER_INTERFACES; ++hwport ) if( hwport_mask & (1 << hwport) && thc->thc_vi_set[hwport] != NULL ) { base_vi_id = efrm_vi_set_get_base(thc->thc_vi_set[hwport]); efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, EFX_FILTER_FLAG_RX_SCATTER | EFX_FILTER_FLAG_RX_RSS, base_vi_id); spec.rss_context = efrm_vi_set_get_rss_context(thc->thc_vi_set[hwport]); #if EFX_DRIVERLINK_API_VERSION >= 15 { int stack_id = tcp_helper_cluster_vi_hw_stack_id(thc, hwport); ci_assert( stack_id >= 0 ); efx_filter_set_stack_id(&spec, stack_id); } #endif rc = efx_filter_set_ipv4_local(&spec, protocol, daddr, dport); ci_assert_equal(rc, 0); rc = efrm_filter_insert(get_client(hwport), &spec, false); if( rc < 0 ) { oo_hw_filter_clear(oofilter); return rc; } oofilter->filter_id[hwport] = rc; } return 0; }
static int oo_hw_filter_set_hwport(struct oo_hw_filter* oofilter, int hwport, int protocol, unsigned saddr, int sport, unsigned daddr, int dport, ci_uint16 vlan_id, unsigned src_flags) { struct efx_filter_spec spec; int rc = 0, vi_id; ci_assert_equal(oofilter->thc, NULL); ci_assert(oofilter->filter_id[hwport] < 0); if( (vi_id = tcp_helper_rx_vi_id(oofilter->trs, hwport)) >= 0 ) { int flags = EFX_FILTER_FLAG_RX_SCATTER; int hw_rx_loopback_supported = tcp_helper_vi_hw_rx_loopback_supported(oofilter->trs, hwport); ci_assert( hw_rx_loopback_supported >= 0 ); if( hw_rx_loopback_supported && (src_flags & OO_HW_SRC_FLAG_LOOPBACK) ) { flags |= EFX_FILTER_FLAG_TX; } efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, flags, vi_id); #if EFX_DRIVERLINK_API_VERSION >= 15 { unsigned stack_id = tcp_helper_vi_hw_stack_id(oofilter->trs, hwport); ci_assert( stack_id >= 0 ); efx_filter_set_stack_id(&spec, stack_id); } #endif if( saddr != 0 ) rc = efx_filter_set_ipv4_full(&spec, protocol, daddr, dport, saddr, sport); else rc = efx_filter_set_ipv4_local(&spec, protocol, daddr, dport); ci_assert_equal(rc, 0); /* note: bug 42561 affecting loopback on VLAN 0 with fw <= v4_0_6_6688 */ if( vlan_id != OO_HW_VLAN_UNSPEC ) { rc = efx_filter_set_eth_local(&spec, vlan_id, NULL); ci_assert_equal(rc, 0); } rc = efrm_filter_insert(get_client(hwport), &spec, false); if( rc >= 0 ) { oofilter->filter_id[hwport] = rc; rc = 0; } } return rc; }
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_filter_spec spec; const __be16 *ports; __be16 ether_type; int nhoff; int rc; /* The core RPS/RFS code has already parsed and validated * VLAN, IP and transport headers. We assume they are in the * header area. */ if (skb->protocol == htons(ETH_P_8021Q)) { const struct vlan_hdr *vh = (const struct vlan_hdr *)skb->data; /* We can't filter on the IP 5-tuple and the vlan * together, so just strip the vlan header and filter * on the IP part. */ EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); ether_type = vh->h_vlan_encapsulated_proto; nhoff = sizeof(struct vlan_hdr); } else { ether_type = skb->protocol; nhoff = 0; } if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, rxq_index); spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; spec.ether_type = ether_type; if (ether_type == htons(ETH_P_IP)) { const struct iphdr *ip = (const struct iphdr *)(skb->data + nhoff); EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); if (ip_is_fragment(ip)) return -EPROTONOSUPPORT; spec.ip_proto = ip->protocol; spec.rem_host[0] = ip->saddr; spec.loc_host[0] = ip->daddr; EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); } else { const struct ipv6hdr *ip6 = (const struct ipv6hdr *)(skb->data + nhoff); EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip6) + 4); spec.ip_proto = ip6->nexthdr; memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); ports = (const __be16 *)(ip6 + 1); } spec.rem_port = ports[0]; spec.loc_port = ports[1]; rc = efx->type->filter_rfs_insert(efx, &spec); if (rc < 0) return rc; /* Remember this so we can check whether to expire the filter later */ efx->rps_flow_id[rc] = flow_id; channel = efx_get_channel(efx, skb_get_rx_queue(skb)); ++channel->rfs_filters_added; if (ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", spec.rem_host, ntohs(ports[0]), spec.loc_host, ntohs(ports[1]), rxq_index, flow_id, rc); else netif_info(efx, rx_status, efx->net_dev, "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", spec.rem_host, ntohs(ports[0]), spec.loc_host, ntohs(ports[1]), rxq_index, flow_id, rc); return rc; }