static unsigned int bgp_address_hash_key_make (void *p) { const struct bgp_addr *addr = p; return jhash_1word(addr->addr.s_addr, 0); }
static inline unsigned int connlimit_iphash(__be32 addr) { if (unlikely(!connlimit_rnd_inited)) { get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); connlimit_rnd_inited = true; } return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; }
static unsigned int recent_entry_hash(__be32 addr) { if (!hash_rnd_initted) { get_random_bytes(&hash_rnd, 4); hash_rnd_initted = 1; } return jhash_1word((__force u32)addr, hash_rnd) & (ip_list_hash_size - 1); }
static unsigned int recent_entry_hash(u_int32_t addr) { if (!hash_rnd_initted) { get_random_bytes(&hash_rnd, 4); hash_rnd_initted = 1; } return jhash_1word(addr, hash_rnd) & (ip_list_hash_size - 1); }
static unsigned int recent_entry_hash4(const union nf_inet_addr *addr) { if (!hash_rnd_initted) { get_random_bytes(&hash_rnd, sizeof(hash_rnd)); hash_rnd_initted = true; } return jhash_1word((__force u32)addr->ip, hash_rnd) & (ip_list_hash_size - 1); }
unsigned int sockunion_hash(const union sockunion *su) { switch (sockunion_family(su)) { case AF_INET: return jhash_1word(su->sin.sin_addr.s_addr, 0); case AF_INET6: return jhash2(su->sin6.sin6_addr.s6_addr32, ZEBRA_NUM_OF(su->sin6.sin6_addr.s6_addr32), 0); } return 0; }
uint32_t conntrack_hash(struct ptype *a, struct ptype *b, void *parent) { // Create a reversible hash for a and b if (!a) return POM_ERR; // Use the parent pointer as an init value uint32_t parent_initval = (uint32_t) ((uint64_t)parent & 0xFFFFFFFF); size_t size_a = ptype_get_value_size(a); if (!b) { // Only fwd direction // Try to use the best hash function if (size_a == sizeof(uint32_t)) { // exactly one word return jhash_1word(*((uint32_t*)a->value), parent_initval); } else if (size_a == 2 * sizeof(uint32_t)) { // exactly two words return jhash_2words(*((uint32_t*)a->value), *((uint32_t*)(a->value + sizeof(uint32_t))), parent_initval); } else if (size_a == 3 * sizeof(uint32_t)) { // exactly 3 words return jhash_3words(*((uint32_t*)a->value), *((uint32_t*)(a->value + sizeof(uint32_t))), *((uint32_t*)(a->value + (2 * sizeof(uint32_t)))), parent_initval); } // Fallback on all size function return jhash((char*)a->value, size_a, parent_initval); } size_t size_b = ptype_get_value_size(b); // Try to use the best hash function if (size_a == sizeof(uint16_t) && size_b == sizeof(uint16_t)) { // Multiply the two 16bit values uint32_t value_a = *((uint16_t*)a->value); uint32_t value_b = *((uint16_t*)b->value); return jhash_1word(value_a * value_b, parent_initval); } else if (size_a == sizeof(uint32_t) && size_b == sizeof(uint32_t)) { // XOR the two 32bit values before hashing return jhash_1word(*((uint32_t*)a->value) ^ *((uint32_t*)b->value), parent_initval); } uint32_t hash_a = jhash((char*)a->value, size_a, parent_initval); uint32_t hash_b = jhash((char*)b->value, size_b, parent_initval); return hash_a ^ hash_b; }
uint32_t ptype_get_hash(struct ptype *pt) { size_t size = pt->type->info->value_size(pt); // Try to use the best hash function if (size == sizeof(uint32_t)) { // exactly one word return jhash_1word(*((uint32_t*)pt->value), INITVAL); } else if (size == 2 * sizeof(uint32_t)) { // exactly two words return jhash_2words(*((uint32_t*)pt->value), *((uint32_t*)(pt->value + sizeof(uint32_t))), INITVAL); } else if (size == 3 * sizeof(uint32_t)) { // exactly 3 words return jhash_3words(*((uint32_t*)pt->value), *((uint32_t*)(pt->value + sizeof(uint32_t))), *((uint32_t*)(pt->value + (2 * sizeof(uint32_t)))), INITVAL); } // Fallback on all size function return jhash((char*)pt->value, size, INITVAL); }
static int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc, const struct vstack *stack) { struct mp_node *mp_node; unsigned long hash = jhash_1word(target_pc, 0); struct hlist_head *head; struct mp_node *lookup_node; int found = 0; dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n", target_pc, hash); mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL); if (!mp_node) return -ENOMEM; mp_node->target_pc = target_pc; memcpy(&mp_node->stack, stack, sizeof(mp_node->stack)); head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)]; lttng_hlist_for_each_entry(lookup_node, head, node) { if (lttng_hash_match(lookup_node, target_pc)) { found = 1; break; } } if (found) { /* Key already present */ dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n", target_pc, hash); kfree(mp_node); if (merge_points_compare(stack, &lookup_node->stack)) { printk(KERN_WARNING "Merge points differ for offset %lu\n", target_pc); return -EINVAL; } } else { hlist_add_head(&mp_node->node, head); } return 0; }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; int i; u32 p_min = ~0; u32 minqlen = ~0; u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (unlikely(sch->q.qlen >= q->limit)) { sch->qstats.overlimits++; q->stats.queuedrop++; goto drop; } if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; if (unlikely(time_after(jiffies, limit))) { sfb_swap_slot(q); q->rehash_time = jiffies; } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && time_after(jiffies, limit - q->warmup_time))) { q->double_buffering = true; } } if (q->filter_list) { /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, q, &ret, &salt)) goto other_drop; } else { salt = skb_get_rxhash(skb); } slot = q->slot; sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); if (minqlen > b->qlen) minqlen = b->qlen; if (p_min > b->p_mark) p_min = b->p_mark; } slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { sch->qstats.overlimits++; q->stats.bucketdrop++; goto drop; } if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); } } if (sfb_rate_limit(skb, q)) { sch->qstats.overlimits++; q->stats.penaltydrop++; goto drop; } goto enqueue; }
static inline unsigned int connlimit_iphash(u_int32_t addr) { return jhash_1word(addr, connlimit_rnd) & 0xFF; }
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) { hash = jhash_1word(hash, ti->hash_seed); return flex_array_get(ti->buckets, (hash & (ti->n_buckets - 1))); }
// __stp_tf_map_hash(): Compute the map hash. static inline u32 __stp_tf_map_hash(struct task_struct *tsk) { return (jhash_1word(tsk->pid, 0) & (__STP_TF_TABLE_SIZE - 1)); }
static inline unsigned int connlimit_iphash(__be32 addr) { return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; }
static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) { return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; }
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) { struct sfq_packet_info info; u32 pert = q->perturbation; unsigned mask = (1<<q->hash_divisor) - 1; #ifdef CONFIG_NET_SCH_SFQ_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); #endif switch (skb->protocol) { case __constant_htons(ETH_P_IP): { const struct iphdr *iph = ip_hdr(skb); info.dst = iph->daddr; info.src = iph->saddr; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_UDPLITE || iph->protocol == IPPROTO_SCTP || iph->protocol == IPPROTO_DCCP || iph->protocol == IPPROTO_ESP)) info.proto = *(((u32*)iph) + iph->ihl); else info.proto = iph->protocol; break; } case __constant_htons(ETH_P_IPV6): { struct ipv6hdr *iph = ipv6_hdr(skb); /* Hash ipv6 addresses into a u32. This isn't ideal, * but the code is simple. */ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation); info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation); if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || iph->nexthdr == IPPROTO_UDPLITE || iph->nexthdr == IPPROTO_SCTP || iph->nexthdr == IPPROTO_DCCP || iph->nexthdr == IPPROTO_ESP) info.proto = *(u32*)&iph[1]; else info.proto = iph->nexthdr; break; } default: info.dst = (u32)(unsigned long)skb->dst; info.src = (u32)(unsigned long)skb->sk; info.proto = skb->protocol; } info.mark = skb->mark; #ifdef CONFIG_NET_SCH_SFQ_NFCT /* defaults if there is no conntrack info */ info.ctorigsrc = info.src; info.ctorigdst = info.dst; info.ctreplsrc = info.dst; info.ctrepldst = info.src; /* collect conntrack info */ if (ct && !nf_ct_is_untracked(ct)) { if (skb->protocol == __constant_htons(ETH_P_IP)) { info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip; info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip; info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip; } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { /* Again, hash ipv6 addresses into a single u32. */ info.ctorigsrc = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, pert); info.ctorigdst = jhash2( ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, pert); info.ctreplsrc = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, pert); info.ctrepldst = jhash2( ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, pert); } } #endif switch (q->hash_kind) { case TCA_SFQ_HASH_CLASSIC: return jhash_3words(info.dst, info.src, info.proto, pert) & mask; case TCA_SFQ_HASH_DST: return jhash_1word(info.dst, pert) & mask; case TCA_SFQ_HASH_SRC: return jhash_1word(info.src, pert) & mask; case TCA_SFQ_HASH_FWMARK: return jhash_1word(info.mark, pert) & mask; #ifdef CONFIG_NET_SCH_SFQ_NFCT case TCA_SFQ_HASH_CTORIGDST: return jhash_1word(info.ctorigdst, pert) & mask; case TCA_SFQ_HASH_CTORIGSRC: return jhash_1word(info.ctorigsrc, pert) & mask; case TCA_SFQ_HASH_CTREPLDST: return jhash_1word(info.ctrepldst, pert) & mask; case TCA_SFQ_HASH_CTREPLSRC: return jhash_1word(info.ctreplsrc, pert) & mask; case TCA_SFQ_HASH_CTNATCHG: { if (info.ctorigdst == info.ctreplsrc) return jhash_1word(info.ctorigsrc, pert) & mask; return jhash_1word(info.ctreplsrc, pert) & mask; } #else case TCA_SFQ_HASH_CTORIGDST: case TCA_SFQ_HASH_CTORIGSRC: case TCA_SFQ_HASH_CTREPLDST: case TCA_SFQ_HASH_CTREPLSRC: case TCA_SFQ_HASH_CTNATCHG: if (net_ratelimit()) printk(KERN_WARNING "SFQ: Conntrack support not enabled."); #endif } if (net_ratelimit()) printk(KERN_WARNING "SFQ: Unknown hash method. " "Falling back to classic.\n"); q->hash_kind = TCA_SFQ_HASH_CLASSIC; return jhash_3words(info.dst, info.src, info.proto, pert) & mask; }
static inline __u32 jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip) { return jhash_1word(ip, *(((uint32_t *) map->initval) + i)); }
static inline u_int32_t xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) { return jhash_1word(ip, info->hash_seed); }
static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr) { return jhash_1word((__force u32)addr->ip, hash_rnd) & (ip_list_hash_size - 1); }
static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); }
/* peer hash and peer list helpers */ static unsigned int pim_msdp_peer_hash_key_make(const void *p) { const struct pim_msdp_peer *mp = p; return (jhash_1word(mp->peer.s_addr, 0)); }