int tcp_tup_hash(tcp_tup_t *tup, u_int32_t *hval) { u_int32_t a, b, c; u_int32_t ports; struct in6_addr *s; if (unlikely(!tup || !hval)) return -1; ports = ((u_int32_t)(tup->dst.port) << 16) + (u_int32_t)(tup->src.port); if (tup->src.family == AF_INET) { *hval = jhash_3words(tup->dst._addr4.s_addr, tup->src._addr4.s_addr, ports); } else if (tup->src.family == AF_INET6) { a = tup->dst._addr6.s6_addr32[3]; s = &tup->src._addr6; b = s->s6_addr32[0] ^ s->s6_addr32[1]; c = jhash_3words(b, s->s6_addr32[2], s->s6_addr32[3]); *hval = jhash_3words(a, c, ports); } else return -1; return 0; }
static u32 hash_v4(const struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); /* packets in either direction go into same queue */ if (iph->saddr < iph->daddr) return jhash_3words((__force u32)iph->saddr, (__force u32)iph->daddr, iph->protocol, jhash_initval); return jhash_3words((__force u32)iph->daddr, (__force u32)iph->saddr, iph->protocol, jhash_initval); }
/** * Compute connection hash (index in a connection table, see ::conntable_t). * Hash is an integer in interval 0..(::CONNTABLE_BUCKETS-1). */ int tcptable_hash(conn_t * c) { /** \todo Hash the whole ip address! */ #ifndef FREEBSD return (jhash_3words(c->ip_src.s6_addr32[3], c->ip_dst.s6_addr32[3], (c->port_dst | c->port_src << 16), 32)) % CONNTABLE_BUCKETS; #else return (jhash_3words(c->ip_src.__u6_addr.__u6_addr32[3], c->ip_dst.__u6_addr.__u6_addr32[3], (c->port_dst | c->port_src << 16), 32)) % CONNTABLE_BUCKETS; #endif }
/** * Hash function for IPv6 keys from reassembly.c */ static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr, u32 rnd) { u32 c; c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), (__force u32)id, rnd); return c & (INETFRAGS_HASHSZ - 1); }
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) { net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); return jhash_3words((__force u32)id << 16 | prot, (__force u32)saddr, (__force u32)daddr, ip4_frags.rnd); }
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { u32 h, h2; switch (skb->protocol) { case __constant_htons(ETH_P_IP): { struct iphdr *iph = skb->nh.iph; h = iph->daddr; h2 = iph->saddr^iph->protocol; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_ESP)) h2 ^= *(((u32*)iph) + iph->ihl); break; } case __constant_htons(ETH_P_IPV6): { struct ipv6hdr *iph = skb->nh.ipv6h; h = iph->daddr.s6_addr32[3]; h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || iph->nexthdr == IPPROTO_ESP) h2 ^= *(u32*)&iph[1]; break; } default: h = (u32)(unsigned long)skb->dst^skb->protocol; h2 = (u32)(unsigned long)skb->sk; } //return sfq_fold_hash(q, h, h2); //SpeedMod return jhash_3words(0, h, h2, q->perturbation) & (SFQ_HASH_DIVISOR-1); }
/* * callers should be careful not to use the hash value outside the ipfrag_lock * as doing so could race with ipfrag_hash_rnd being recalculated. */ static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr) { net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd)); return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), (__force u32)id, ip6_frags.rnd); }
static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size, const struct ieee802154_addr *saddr, const struct ieee802154_addr *daddr) { net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd)); return jhash_3words(ieee802154_addr_hash(saddr), ieee802154_addr_hash(daddr), (__force u32)(tag + (d_size << 16)), lowpan_frags.rnd); }
static unsigned int sfq_hash(const struct sfq_sched_data *q, const struct sk_buff *skb) { const struct flow_keys *keys = &sfq_skb_cb(skb)->keys; unsigned int hash; hash = jhash_3words((__force u32)keys->dst, (__force u32)keys->src ^ keys->ip_proto, (__force u32)keys->ports, q->perturbation); return hash & (q->divisor - 1); }
unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr, u32 rnd) { u32 c; c = jhash_3words((__force u32)saddr->s6_addr32[0], (__force u32)saddr->s6_addr32[1], (__force u32)saddr->s6_addr32[2], rnd); c = jhash_3words((__force u32)saddr->s6_addr32[3], (__force u32)daddr->s6_addr32[0], (__force u32)daddr->s6_addr32[1], c); c = jhash_3words((__force u32)daddr->s6_addr32[2], (__force u32)daddr->s6_addr32[3], (__force u32)id, c); return c & (INETFRAGS_HASHSZ - 1); }
static inline u32 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) { u32 hash; if (t->dst < t->src) swap(t->src, t->dst); hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); hash = hash ^ (t->proto & info->proto_mask); return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; }
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, const struct sk_buff *skb) { struct flow_keys keys; unsigned int hash; skb_flow_dissect(skb, &keys); hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, q->perturbation); return reciprocal_scale(hash, q->flows_cnt); }
s32 mpls_vpws_select(struct sk_buff * skb, u32 count) { u32 sport = 0; u32 dport = 0; u32 hash_ip = 0; struct iphdr * iph; iph = (struct iphdr *)(skb->data + ETH_HLEN); if ((4 == iph->version) && (20 == iph->ihl * 4)) { switch (iph->protocol) { case IPPROTO_TCP: { struct tcphdr * th = (struct tcphdr * )(skb->data + ETH_HLEN + iph->ihl * 4); sport = th->source; dport = th->dest; break; } case IPPROTO_UDP: { struct udphdr * uh = (struct udphdr * )(skb->data + ETH_HLEN + iph->ihl * 4); sport = uh->source; dport = uh->dest; break; } default: break; } hash_ip = jhash_3words(iph->saddr, iph->daddr, iph->protocol, 0); return (jhash_3words(sport, dport, hash_ip, 0) & (count - 1)); } return 0; }
static unsigned int skb_hash(const struct hhf_sched_data *q, const struct sk_buff *skb) { struct flow_keys keys; unsigned int hash; if (skb->sk && skb->sk->sk_hash) return skb->sk->sk_hash; skb_flow_dissect(skb, &keys); hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, q->perturbation); return hash; }
uint32_t ptype_get_hash(struct ptype *pt) { size_t size = pt->type->info->value_size(pt); // Try to use the best hash function if (size == sizeof(uint32_t)) { // exactly one word return jhash_1word(*((uint32_t*)pt->value), INITVAL); } else if (size == 2 * sizeof(uint32_t)) { // exactly two words return jhash_2words(*((uint32_t*)pt->value), *((uint32_t*)(pt->value + sizeof(uint32_t))), INITVAL); } else if (size == 3 * sizeof(uint32_t)) { // exactly 3 words return jhash_3words(*((uint32_t*)pt->value), *((uint32_t*)(pt->value + sizeof(uint32_t))), *((uint32_t*)(pt->value + (2 * sizeof(uint32_t)))), INITVAL); } // Fallback on all size function return jhash((char*)pt->value, size, INITVAL); }
static u32 hash_v6(const struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); u32 a, b, c; if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) { a = (__force u32) ip6h->saddr.s6_addr32[3]; b = (__force u32) ip6h->daddr.s6_addr32[3]; } else { b = (__force u32) ip6h->saddr.s6_addr32[3]; a = (__force u32) ip6h->daddr.s6_addr32[3]; } if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1]) c = (__force u32) ip6h->saddr.s6_addr32[1]; else c = (__force u32) ip6h->daddr.s6_addr32[1]; return jhash_3words(a, b, c, jhash_initval); }
uint32_t conntrack_hash(struct ptype *a, struct ptype *b, void *parent) { // Create a reversible hash for a and b if (!a) return POM_ERR; // Use the parent pointer as an init value uint32_t parent_initval = (uint32_t) ((uint64_t)parent & 0xFFFFFFFF); size_t size_a = ptype_get_value_size(a); if (!b) { // Only fwd direction // Try to use the best hash function if (size_a == sizeof(uint32_t)) { // exactly one word return jhash_1word(*((uint32_t*)a->value), parent_initval); } else if (size_a == 2 * sizeof(uint32_t)) { // exactly two words return jhash_2words(*((uint32_t*)a->value), *((uint32_t*)(a->value + sizeof(uint32_t))), parent_initval); } else if (size_a == 3 * sizeof(uint32_t)) { // exactly 3 words return jhash_3words(*((uint32_t*)a->value), *((uint32_t*)(a->value + sizeof(uint32_t))), *((uint32_t*)(a->value + (2 * sizeof(uint32_t)))), parent_initval); } // Fallback on all size function return jhash((char*)a->value, size_a, parent_initval); } size_t size_b = ptype_get_value_size(b); // Try to use the best hash function if (size_a == sizeof(uint16_t) && size_b == sizeof(uint16_t)) { // Multiply the two 16bit values uint32_t value_a = *((uint16_t*)a->value); uint32_t value_b = *((uint16_t*)b->value); return jhash_1word(value_a * value_b, parent_initval); } else if (size_a == sizeof(uint32_t) && size_b == sizeof(uint32_t)) { // XOR the two 32bit values before hashing return jhash_1word(*((uint32_t*)a->value) ^ *((uint32_t*)b->value), parent_initval); } uint32_t hash_a = jhash((char*)a->value, size_a, parent_initval); uint32_t hash_b = jhash((char*)b->value, size_b, parent_initval); return hash_a ^ hash_b; }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; int i; u32 p_min = ~0; u32 minqlen = ~0; u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; struct flow_keys keys; if (unlikely(sch->q.qlen >= q->limit)) { sch->qstats.overlimits++; q->stats.queuedrop++; goto drop; } if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; if (unlikely(time_after(jiffies, limit))) { sfb_swap_slot(q); q->rehash_time = jiffies; } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && time_after(jiffies, limit - q->warmup_time))) { q->double_buffering = true; } } if (q->filter_list) { /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, q, &ret, &salt)) goto other_drop; keys.src = salt; keys.dst = 0; keys.ports = 0; } else { skb_flow_dissect(skb, &keys); } slot = q->slot; sfbhash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src, (__force u32)keys.ports, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); if (minqlen > b->qlen) minqlen = b->qlen; if (p_min > b->p_mark) p_min = b->p_mark; } slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { sch->qstats.overlimits++; q->stats.bucketdrop++; goto drop; } if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { sfbhash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src, (__force u32)keys.ports, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); } } if (sfb_rate_limit(skb, q)) { sch->qstats.overlimits++; q->stats.penaltydrop++; goto drop; } goto enqueue; }
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { struct sfq_packet_info info; u32 pert = q->perturbation; unsigned mask = (1<<q->hash_divisor) - 1; #ifdef CONFIG_NET_SCH_SFQ_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); #endif switch (skb->protocol) { case __constant_htons(ETH_P_IP): { const struct iphdr *iph = ip_hdr(skb); info.dst = iph->daddr; info.src = iph->saddr; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_UDPLITE || iph->protocol == IPPROTO_SCTP || iph->protocol == IPPROTO_DCCP || iph->protocol == IPPROTO_ESP)) info.proto = *(((u32*)iph) + iph->ihl); else info.proto = iph->protocol; break; } case __constant_htons(ETH_P_IPV6): { struct ipv6hdr *iph = ipv6_hdr(skb); /* Hash ipv6 addresses into a u32. This isn't ideal, * but the code is simple. */ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation); info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation); if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || iph->nexthdr == IPPROTO_UDPLITE || iph->nexthdr == IPPROTO_SCTP || iph->nexthdr == IPPROTO_DCCP || iph->nexthdr == IPPROTO_ESP) info.proto = *(u32*)&iph[1]; else info.proto = iph->nexthdr; break; } default: info.dst = (u32)(unsigned long)skb->dst; info.src = (u32)(unsigned long)skb->sk; info.proto = skb->protocol; } info.mark = skb->mark; #ifdef CONFIG_NET_SCH_SFQ_NFCT /* defaults if there is no conntrack info */ info.ctorigsrc = info.src; info.ctorigdst = info.dst; info.ctreplsrc = info.dst; info.ctrepldst = info.src; /* collect conntrack info */ if (ct && !nf_ct_is_untracked(ct)) { if (skb->protocol == __constant_htons(ETH_P_IP)) { info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip; info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip; info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip; } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { /* Again, hash ipv6 addresses into a single u32. */ info.ctorigsrc = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, pert); info.ctorigdst = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, pert); info.ctreplsrc = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, pert); info.ctrepldst = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, pert); } } #endif switch (q->hash_kind) { case TCA_SFQ_HASH_CLASSIC: return jhash_3words(info.dst, info.src, info.proto, pert) & mask; case TCA_SFQ_HASH_DST: return jhash_1word(info.dst, pert) & mask; case TCA_SFQ_HASH_SRC: return jhash_1word(info.src, pert) & mask; case TCA_SFQ_HASH_FWMARK: return jhash_1word(info.mark, pert) & mask; #ifdef CONFIG_NET_SCH_SFQ_NFCT case TCA_SFQ_HASH_CTORIGDST: return jhash_1word(info.ctorigdst, pert) & mask; case TCA_SFQ_HASH_CTORIGSRC: return jhash_1word(info.ctorigsrc, pert) & mask; case TCA_SFQ_HASH_CTREPLDST: return jhash_1word(info.ctrepldst, pert) & mask; case TCA_SFQ_HASH_CTREPLSRC: return jhash_1word(info.ctreplsrc, pert) & mask; case TCA_SFQ_HASH_CTNATCHG: { if (info.ctorigdst == info.ctreplsrc) return jhash_1word(info.ctorigsrc, pert) & mask; return jhash_1word(info.ctreplsrc, pert) & mask; } #else case TCA_SFQ_HASH_CTORIGDST: case TCA_SFQ_HASH_CTORIGSRC: case TCA_SFQ_HASH_CTREPLDST: case TCA_SFQ_HASH_CTREPLSRC: case TCA_SFQ_HASH_CTNATCHG: if (net_ratelimit()) printk(KERN_WARNING "SFQ: Conntrack support not enabled."); #endif } if (net_ratelimit()) printk(KERN_WARNING "SFQ: Unknown hash method. " "Falling back to classic.\n"); q->hash_kind = TCA_SFQ_HASH_CLASSIC; return jhash_3words(info.dst, info.src, info.proto, pert) & mask; }
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) { return jhash_3words((__force u32)id << 16 | prot, (__force u32)saddr, (__force u32)daddr, ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); }
static inline unsigned int cake_hash(struct cake_bin_data *q, const struct sk_buff *skb, int flow_mode) { #if KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE struct flow_keys keys; #else struct flow_keys keys, host_keys; #endif u32 flow_hash, host_hash, reduced_hash; if (unlikely(flow_mode == CAKE_FLOW_NONE || q->flows_cnt < CAKE_SET_WAYS)) return 0; #if KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE skb_flow_dissect(skb, &keys); host_hash = jhash_3words( (__force u32)((flow_mode & CAKE_FLOW_DST_IP) ? keys.dst : 0), (__force u32)((flow_mode & CAKE_FLOW_SRC_IP) ? keys.src : 0), (__force u32)0, q->perturbation); if (!(flow_mode & CAKE_FLOW_FLOWS)) flow_hash = host_hash; else flow_hash = jhash_3words( (__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, q->perturbation); #else /* Linux kernel 4.2.x have skb_flow_dissect_flow_keys which takes only 2 * arguments */ #if (KERNEL_VERSION(4, 2, 0) <= LINUX_VERSION_CODE) && (KERNEL_VERSION(4,3,0) > LINUX_VERSION_CODE) skb_flow_dissect_flow_keys(skb, &keys); #else skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); #endif /* flow_hash_from_keys() sorts the addresses by value, so we have * to preserve their order in a separate data structure to treat * src and dst host addresses as independently selectable. */ host_keys = keys; host_keys.ports.ports = 0; host_keys.basic.ip_proto = 0; host_keys.keyid.keyid = 0; host_keys.tags.vlan_id = 0; host_keys.tags.flow_label = 0; if (!(flow_mode & CAKE_FLOW_SRC_IP)) { switch (host_keys.control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: host_keys.addrs.v4addrs.src = 0; break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: memset(&host_keys.addrs.v6addrs.src, 0, sizeof(host_keys.addrs.v6addrs.src)); break; }; } if (!(flow_mode & CAKE_FLOW_DST_IP)) { switch (host_keys.control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: host_keys.addrs.v4addrs.dst = 0; break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: memset(&host_keys.addrs.v6addrs.dst, 0, sizeof(host_keys.addrs.v6addrs.dst)); break; }; } host_hash = flow_hash_from_keys(&host_keys); if (!(flow_mode & CAKE_FLOW_FLOWS)) { flow_hash = host_hash; } else { flow_hash = flow_hash_from_keys(&keys); } #endif reduced_hash = reciprocal_scale(flow_hash, q->flows_cnt); /* set-associative hashing */ /* fast path if no hash collision (direct lookup succeeds) */ if (likely(q->tags[reduced_hash] == flow_hash)) { q->way_directs++; } else { u32 inner_hash = reduced_hash % CAKE_SET_WAYS; u32 outer_hash = reduced_hash - inner_hash; u32 i, j, k; /* check if any active queue in the set is reserved for * this flow. count the empty queues in the set, too */ for (i = j = 0, k = inner_hash; i < CAKE_SET_WAYS; i++, k = (k + 1) % CAKE_SET_WAYS) { if (q->tags[outer_hash + k] == flow_hash) { q->way_hits++; goto found; } else if (list_empty(&q->flows[outer_hash + k]. flowchain)) { j++; } } /* no queue is reserved for this flow */ if (j) { /* there's at least one empty queue, so find one * to reserve. */ q->way_misses++; for (i = 0; i < CAKE_SET_WAYS; i++, k = (k + 1) % CAKE_SET_WAYS) if (list_empty(&q->flows[outer_hash + k]. flowchain)) goto found; } else { /* With no empty queues default to the original * queue and accept the collision. */ q->way_collisions++; } found: /* reserve queue for future packets in same flow */ reduced_hash = outer_hash + k; q->tags[reduced_hash] = flow_hash; } return reduced_hash; }
u_int32_t jhash_1word (u_int32_t a, u_int32_t initval) { return jhash_3words (a, 0, 0, initval); }
u_int32_t jhash_2words (u_int32_t a, u_int32_t b, u_int32_t initval) { return jhash_3words (a, b, 0, initval); }
static u16 imq_hash(struct net_device *dev, struct sk_buff *skb) { unsigned int pull_len; u16 protocol = skb->protocol; u32 addr1, addr2; u32 hash, ihl = 0; union { u16 in16[2]; u32 in32; } ports; u8 ip_proto; pull_len = 0; recheck: switch (protocol) { case htons(ETH_P_8021Q): { if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL)) goto other; pull_len += VLAN_HLEN; skb->network_header += VLAN_HLEN; protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; goto recheck; } case htons(ETH_P_PPP_SES): { if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL)) goto other; pull_len += PPPOE_SES_HLEN; skb->network_header += PPPOE_SES_HLEN; protocol = pppoe_proto(skb); goto recheck; } case htons(ETH_P_IP): { const struct iphdr *iph = ip_hdr(skb); if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr)))) goto other; addr1 = iph->daddr; addr2 = iph->saddr; ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? iph->protocol : 0; ihl = ip_hdrlen(skb); break; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): { const struct ipv6hdr *iph = ipv6_hdr(skb); if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr)))) goto other; addr1 = iph->daddr.s6_addr32[3]; addr2 = iph->saddr.s6_addr32[3]; ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto); if (unlikely(ihl < 0)) goto other; break; } #endif default: other: if (pull_len != 0) { skb_push(skb, pull_len); skb->network_header -= pull_len; } return (u16)(ntohs(protocol) % dev->real_num_tx_queues); } if (addr1 > addr2) swap(addr1, addr2); switch (ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_DCCP: case IPPROTO_ESP: case IPPROTO_AH: case IPPROTO_SCTP: case IPPROTO_UDPLITE: { if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) { if (ports.in16[0] > ports.in16[1]) swap(ports.in16[0], ports.in16[1]); break; } /* fall-through */ } default: ports.in32 = 0; break; } if (pull_len != 0) { skb_push(skb, pull_len); skb->network_header -= pull_len; } hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto); return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32); }
static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c) { __flow_hash_secret_init(); return jhash_3words(a, b, c, hashrnd); }