Example #1
0
/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
 * hash for non-TCP traffic with only IP numbers.
 */
static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
{
	struct flow_keys flow;
	u32 hash;
	static u32 hashrnd __read_mostly;

	net_get_random_once(&hashrnd, sizeof(hashrnd));

	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
		return 0;

	if (flow.basic.ip_proto == IPPROTO_TCP) {
		return skb_get_hash(skb);
	} else {
		if (flow.basic.n_proto == htons(ETH_P_IP))
			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
		else
			hash = 0;

		skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
	}

	return hash;
}
Example #2
0
static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
{
	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;

	return jhash2((u32 *)&cmp_arg->cookie,
		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
}
Example #3
0
static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
{
	const struct inet_frag_queue *fq = data;

	return jhash2((const u32 *)&fq->key.v4,
		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
}
Example #4
0
static u32 flow_hash_code(struct flowi *key, int cpu)
{
	u32 *k = (u32 *) key;

	return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
		(flow_hash_size - 1));
}
Example #5
0
static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
{
	const struct nfp_fl_payload *flower_entry = data;

	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
		      seed);
}
Example #6
0
static unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
{
	if (!hash_rnd_initted) {
		get_random_bytes(&hash_rnd, sizeof(hash_rnd));
		hash_rnd_initted = true;
	}
	return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
	       (ip_list_hash_size - 1);
}
Example #7
0
unsigned int sockunion_hash(const union sockunion *su)
{
	switch (sockunion_family(su)) {
	case AF_INET:
		return jhash_1word(su->sin.sin_addr.s_addr, 0);
	case AF_INET6:
		return jhash2(su->sin6.sin6_addr.s6_addr32,
			      ZEBRA_NUM_OF(su->sin6.sin6_addr.s6_addr32), 0);
	}
	return 0;
}
Example #8
0
static inline unsigned int
connlimit_iphash6(const union nf_inet_addr *addr,
                  const union nf_inet_addr *mask)
{
	union nf_inet_addr res;
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
		res.ip6[i] = addr->ip6[i] & mask->ip6[i];

	return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
}
Example #9
0
static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
{
	const struct nf_conntrack_tuple *t;
	const struct nf_conn *ct = data;

	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
	/* Original src, to ensure we map it consistently if poss. */

	seed ^= net_hash_mix(nf_ct_net(ct));
	return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
		      t->dst.protonum ^ seed);
}
Example #10
0
static u32 hash_v6(const struct sk_buff *skb)
{
	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
	__be32 addr[4];

	addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
	addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
	addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
	addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];

	return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
}
Example #11
0
static u32 __ipv6_select_ident(const struct in6_addr *addr)
{
	u32 newid, oldid, hash = jhash2((u32 *)addr, 4, hashidentrnd);
	u32 *pid = &ipv6_fragmentation_id[hash % FID_HASH_SZ];

	do {
		oldid = *pid;
		newid = oldid + 1;
		if (!(hash + newid))
			newid++;
	} while (cmpxchg(pid, oldid, newid) != oldid);

	return hash + newid;
}
static inline unsigned int
connlimit_iphash6(const union nf_inet_addr *addr,
                  const union nf_inet_addr *mask)
{
	union nf_inet_addr res;
	unsigned int i;

	if (unlikely(!connlimit_rnd_inited)) {
		get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
		connlimit_rnd_inited = true;
	}

	for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
		res.ip6[i] = addr->ip6[i] & mask->ip6[i];

	return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
}
Example #13
0
static uint32_t hash_ipv6(struct in6_addr *addr)
{
	return jhash2(addr->s6_addr32, sizeof(*addr) / 4, 0);
}
Example #14
0
static inline u_int32_t
xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info)
{
	return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed);
}
Example #15
0
static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
					     u32 keyval)
{
	return jhash2(words, length, keyval);
}
Example #16
0
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
	struct sfq_packet_info info;
	u32 pert = q->perturbation;
	unsigned mask = (1<<q->hash_divisor) - 1;
#ifdef CONFIG_NET_SCH_SFQ_NFCT
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
#endif

	switch (skb->protocol) {
	case __constant_htons(ETH_P_IP):
	{
		const struct iphdr *iph = ip_hdr(skb);
		info.dst = iph->daddr;
		info.src = iph->saddr;
		if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
		    (iph->protocol == IPPROTO_TCP ||
		     iph->protocol == IPPROTO_UDP ||
		     iph->protocol == IPPROTO_UDPLITE ||
		     iph->protocol == IPPROTO_SCTP ||
		     iph->protocol == IPPROTO_DCCP ||
		     iph->protocol == IPPROTO_ESP))
			info.proto = *(((u32*)iph) + iph->ihl);
		else
			info.proto = iph->protocol;
		break;
	}
	case __constant_htons(ETH_P_IPV6):
	{
		struct ipv6hdr *iph = ipv6_hdr(skb);
		/* Hash ipv6 addresses into a u32. This isn't ideal,
		* but the code is simple. */
		info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
		info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
		if (iph->nexthdr == IPPROTO_TCP ||
		    iph->nexthdr == IPPROTO_UDP ||
		    iph->nexthdr == IPPROTO_UDPLITE ||
		    iph->nexthdr == IPPROTO_SCTP ||
		    iph->nexthdr == IPPROTO_DCCP ||
		    iph->nexthdr == IPPROTO_ESP)
		    info.proto = *(u32*)&iph[1];
		else
			info.proto = iph->nexthdr;
		break;
	}
	default:
		info.dst   = (u32)(unsigned long)skb->dst;
		info.src   = (u32)(unsigned long)skb->sk;
		info.proto = skb->protocol;
	}

	info.mark = skb->mark;

#ifdef CONFIG_NET_SCH_SFQ_NFCT
	/* defaults if there is no conntrack info */
	info.ctorigsrc = info.src;
	info.ctorigdst = info.dst;
	info.ctreplsrc = info.dst;
	info.ctrepldst = info.src;
	/* collect conntrack info */
	if (ct && !nf_ct_is_untracked(ct)) {
		if (skb->protocol == __constant_htons(ETH_P_IP)) {
			info.ctorigsrc =
			    ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
			info.ctorigdst =
			    ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
			info.ctreplsrc =
			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
			info.ctrepldst =
			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
		}
		else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
			/* Again, hash ipv6 addresses into a single u32. */
			info.ctorigsrc = jhash2(
			    ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6,
			    4, pert);
			info.ctorigdst = jhash2(
			    ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6,
			    4, pert);
			info.ctreplsrc = jhash2(
			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6,
			    4, pert);
			info.ctrepldst = jhash2(
			    ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6,
			    4, pert);
		}
	}
#endif

	switch (q->hash_kind) {
	case TCA_SFQ_HASH_CLASSIC:
		return jhash_3words(info.dst, info.src, info.proto, pert) & mask;
	case TCA_SFQ_HASH_DST:
		return jhash_1word(info.dst, pert) & mask;
	case TCA_SFQ_HASH_SRC:
		return jhash_1word(info.src, pert) & mask;
	case TCA_SFQ_HASH_FWMARK:
		return jhash_1word(info.mark, pert) & mask;
#ifdef CONFIG_NET_SCH_SFQ_NFCT
	case TCA_SFQ_HASH_CTORIGDST:
		return jhash_1word(info.ctorigdst, pert) & mask;
	case TCA_SFQ_HASH_CTORIGSRC:
		return jhash_1word(info.ctorigsrc, pert) & mask;
	case TCA_SFQ_HASH_CTREPLDST:
		return jhash_1word(info.ctrepldst, pert) & mask;
	case TCA_SFQ_HASH_CTREPLSRC:
		return jhash_1word(info.ctreplsrc, pert) & mask;
	case TCA_SFQ_HASH_CTNATCHG:
	{
		if (info.ctorigdst == info.ctreplsrc)
			return jhash_1word(info.ctorigsrc, pert) & mask;
		return jhash_1word(info.ctreplsrc, pert) & mask;
	}
#else
	case TCA_SFQ_HASH_CTORIGDST:
	case TCA_SFQ_HASH_CTORIGSRC:
	case TCA_SFQ_HASH_CTREPLDST:
	case TCA_SFQ_HASH_CTREPLSRC:
	case TCA_SFQ_HASH_CTNATCHG:
	if (net_ratelimit())
		printk(KERN_WARNING "SFQ: Conntrack support not enabled.");
#endif
 	}
	if (net_ratelimit())
		printk(KERN_WARNING "SFQ: Unknown hash method. "
		                    "Falling back to classic.\n");
	q->hash_kind = TCA_SFQ_HASH_CLASSIC;
	return jhash_3words(info.dst, info.src, info.proto, pert) & mask;
}
Example #17
0
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
	   u64, flags)
{
	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
	struct perf_callchain_entry *trace;
	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
	u32 max_depth = map->value_size / 8;
	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
	u32 hash, id, trace_nr, trace_len;
	bool user = flags & BPF_F_USER_STACK;
	bool kernel = !user;
	u64 *ips;

	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
		return -EINVAL;

	trace = get_perf_callchain(regs, init_nr, kernel, user,
				   sysctl_perf_event_max_stack, false, false);

	if (unlikely(!trace))
		/* couldn't fetch the stack trace */
		return -EFAULT;

	/* get_perf_callchain() guarantees that trace->nr >= init_nr
	 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
	 */
	trace_nr = trace->nr - init_nr;

	if (trace_nr <= skip)
		/* skipping more than usable stack trace */
		return -EFAULT;

	trace_nr -= skip;
	trace_len = trace_nr * sizeof(u64);
	ips = trace->ip + skip + init_nr;
	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
	id = hash & (smap->n_buckets - 1);
	bucket = READ_ONCE(smap->buckets[id]);

	if (bucket && bucket->hash == hash) {
		if (flags & BPF_F_FAST_STACK_CMP)
			return id;
		if (bucket->nr == trace_nr &&
		    memcmp(bucket->ip, ips, trace_len) == 0)
			return id;
	}

	/* this call stack is not in the map, try to add it */
	if (bucket && !(flags & BPF_F_REUSE_STACKID))
		return -EEXIST;

	new_bucket = (struct stack_map_bucket *)
		pcpu_freelist_pop(&smap->freelist);
	if (unlikely(!new_bucket))
		return -ENOMEM;

	memcpy(new_bucket->ip, ips, trace_len);
	new_bucket->hash = hash;
	new_bucket->nr = trace_nr;

	old_bucket = xchg(&smap->buckets[id], new_bucket);
	if (old_bucket)
		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
	return id;
}
Example #18
0
/* For [FUTURE] fragmentation handling, we want the least-used
 * src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
 * 1-65535, we don't do pro-rata allocation based on ports; we choose
 * the ip with the lowest src-ip/dst-ip/proto usage.
 */
static void
find_best_ips_proto(const struct nf_conntrack_zone *zone,
		    struct nf_conntrack_tuple *tuple,
		    const struct nf_nat_range *range,
		    const struct nf_conn *ct,
		    enum nf_nat_manip_type maniptype)
{
	union nf_inet_addr *var_ipp;
	unsigned int i, max;
	/* Host order */
	u32 minip, maxip, j, dist;
	bool full_range;

	/* No IP mapping?  Do nothing. */
	if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
		return;

	if (maniptype == NF_NAT_MANIP_SRC)
		var_ipp = &tuple->src.u3;
	else
		var_ipp = &tuple->dst.u3;

	/* Fast path: only one choice. */
	if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
		*var_ipp = range->min_addr;
		return;
	}

	if (nf_ct_l3num(ct) == NFPROTO_IPV4)
		max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
	else
		max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;

	/* Hashing source and destination IPs gives a fairly even
	 * spread in practice (if there are a small number of IPs
	 * involved, there usually aren't that many connections
	 * anyway).  The consistency means that servers see the same
	 * client coming from the same IP (some Internet Banking sites
	 * like this), even across reboots.
	 */
	j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
		   range->flags & NF_NAT_RANGE_PERSISTENT ?
			0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);

	full_range = false;
	for (i = 0; i <= max; i++) {
		/* If first bytes of the address are at the maximum, use the
		 * distance. Otherwise use the full range.
		 */
		if (!full_range) {
			minip = ntohl((__force __be32)range->min_addr.all[i]);
			maxip = ntohl((__force __be32)range->max_addr.all[i]);
			dist  = maxip - minip + 1;
		} else {
			minip = 0;
			dist  = ~0;
		}

		var_ipp->all[i] = (__force __u32)
			htonl(minip + reciprocal_scale(j, dist));
		if (var_ipp->all[i] != range->max_addr.all[i])
			full_range = true;

		if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
			j ^= (__force u32)tuple->dst.u3.all[i];
	}
}
Example #19
0
static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
{
	return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
	       (ip_list_hash_size - 1);
}
Example #20
0
static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
{
	return jhash2(data,
		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
}