示例#1
0
文件: netvsc_drv.c 项目: mdamt/linux
/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
 * hash for non-TCP traffic with only IP numbers.
 */
static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
{
	struct flow_keys flow;
	u32 hash;
	static u32 hashrnd __read_mostly;

	net_get_random_once(&hashrnd, sizeof(hashrnd));

	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
		return 0;

	if (flow.basic.ip_proto == IPPROTO_TCP) {
		return skb_get_hash(skb);
	} else {
		if (flow.basic.n_proto == htons(ETH_P_IP))
			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
		else
			hash = 0;

		skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
	}

	return hash;
}
示例#2
0
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
				  struct flow_keys *keys, u32 keyval)
{
	skb_flow_dissect_flow_keys(skb, keys,
				   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);

	return __flow_hash_from_keys(keys, keyval);
}
示例#3
0
/**
 * skb_get_poff - get the offset to the payload
 * @skb: sk_buff to get the payload offset from
 *
 * The function will get the offset to the payload as far as it could
 * be dissected.  The main user is currently BPF, so that we can dynamically
 * truncate packets without needing to push actual payload to the user
 * space and can analyze headers only, instead.
 */
u32 skb_get_poff(const struct sk_buff *skb)
{
	struct flow_keys keys;

	if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
		return 0;

	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
}
示例#4
0
/*
 * Compare flow of two packets
 *  Returns true only if source and destination address and port match.
 *          false for special cases
 */
static bool choke_match_flow(struct sk_buff *skb1,
			     struct sk_buff *skb2)
{
	struct flow_keys temp;

	if (skb1->protocol != skb2->protocol)
		return false;

	if (!choke_skb_cb(skb1)->keys_valid) {
		choke_skb_cb(skb1)->keys_valid = 1;
		skb_flow_dissect_flow_keys(skb1, &temp, 0);
		make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
	}

	if (!choke_skb_cb(skb2)->keys_valid) {
		choke_skb_cb(skb2)->keys_valid = 1;
		skb_flow_dissect_flow_keys(skb2, &temp, 0);
		make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
	}

	return !memcmp(&choke_skb_cb(skb1)->keys,
		       &choke_skb_cb(skb2)->keys,
		       sizeof(choke_skb_cb(skb1)->keys));
}
示例#5
0
static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
{
	struct flow_keys flow;
	int data_len;

	if (!skb_flow_dissect_flow_keys(skb, &flow) ||
	    !(flow.basic.n_proto == htons(ETH_P_IP) ||
	      flow.basic.n_proto == htons(ETH_P_IPV6)))
		return false;

	if (flow.basic.ip_proto == IPPROTO_TCP)
		data_len = 12;
	else
		data_len = 8;

	*hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);

	return true;
}
示例#6
0
static inline unsigned int
cake_hash(struct cake_bin_data *q, const struct sk_buff *skb, int flow_mode)
{
#if KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE
	struct flow_keys keys;
#else
	struct flow_keys keys, host_keys;
#endif
	u32 flow_hash, host_hash, reduced_hash;

	if (unlikely(flow_mode == CAKE_FLOW_NONE ||
		     q->flows_cnt < CAKE_SET_WAYS))
		return 0;

#if KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE
	skb_flow_dissect(skb, &keys);

	host_hash = jhash_3words(
		(__force u32)((flow_mode & CAKE_FLOW_DST_IP) ? keys.dst : 0),
		(__force u32)((flow_mode & CAKE_FLOW_SRC_IP) ? keys.src : 0),
		(__force u32)0, q->perturbation);

	if (!(flow_mode & CAKE_FLOW_FLOWS))
		flow_hash = host_hash;
	else
		flow_hash = jhash_3words(
			(__force u32)keys.dst,
			(__force u32)keys.src ^ keys.ip_proto,
			(__force u32)keys.ports, q->perturbation);

#else

/* Linux kernel 4.2.x have skb_flow_dissect_flow_keys which takes only 2
 * arguments
 */
#if (KERNEL_VERSION(4, 2, 0) <= LINUX_VERSION_CODE) && (KERNEL_VERSION(4,3,0) >  LINUX_VERSION_CODE)
	skb_flow_dissect_flow_keys(skb, &keys);
#else
	skb_flow_dissect_flow_keys(skb, &keys,
				FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
#endif
	/* flow_hash_from_keys() sorts the addresses by value, so we have
	 * to preserve their order in a separate data structure to treat
	 * src and dst host addresses as independently selectable.
	 */
	host_keys = keys;
	host_keys.ports.ports     = 0;
	host_keys.basic.ip_proto  = 0;
	host_keys.keyid.keyid     = 0;
	host_keys.tags.vlan_id    = 0;
	host_keys.tags.flow_label = 0;

	if (!(flow_mode & CAKE_FLOW_SRC_IP)) {
		switch (host_keys.control.addr_type) {
		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
			host_keys.addrs.v4addrs.src = 0;
			break;

		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
			memset(&host_keys.addrs.v6addrs.src, 0,
			       sizeof(host_keys.addrs.v6addrs.src));
			break;
		};
	}

	if (!(flow_mode & CAKE_FLOW_DST_IP)) {
		switch (host_keys.control.addr_type) {
		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
			host_keys.addrs.v4addrs.dst = 0;
			break;

		case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
			memset(&host_keys.addrs.v6addrs.dst, 0,
			       sizeof(host_keys.addrs.v6addrs.dst));
			break;
		};
	}

	host_hash = flow_hash_from_keys(&host_keys);
	if (!(flow_mode & CAKE_FLOW_FLOWS)) {
		flow_hash = host_hash;
	} else {		
		flow_hash = flow_hash_from_keys(&keys);
	}
#endif
	reduced_hash = reciprocal_scale(flow_hash, q->flows_cnt);

	/* set-associative hashing */
	/* fast path if no hash collision (direct lookup succeeds) */
	if (likely(q->tags[reduced_hash] == flow_hash)) {
		q->way_directs++;
	} else {
		u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
		u32 outer_hash = reduced_hash - inner_hash;
		u32 i, j, k;

		/* check if any active queue in the set is reserved for
		 * this flow. count the empty queues in the set, too
		 */

		for (i = j = 0, k = inner_hash; i < CAKE_SET_WAYS;
		     i++, k = (k + 1) % CAKE_SET_WAYS) {
			if (q->tags[outer_hash + k] == flow_hash) {
				q->way_hits++;
				goto found;
			} else if (list_empty(&q->flows[outer_hash + k].
					      flowchain)) {
				j++;
			}
		}

		/* no queue is reserved for this flow */
		if (j) {
			/* there's at least one empty queue, so find one
			 * to reserve.
			 */
			q->way_misses++;

			for (i = 0; i < CAKE_SET_WAYS; i++, k = (k + 1)
				     % CAKE_SET_WAYS)
				if (list_empty(&q->flows[outer_hash + k].
					       flowchain))
					goto found;
		} else {
			/* With no empty queues default to the original
			 * queue and accept the collision.
			 */
			q->way_collisions++;
		}

found:
		/* reserve queue for future packets in same flow */
		reduced_hash = outer_hash + k;
		q->tags[reduced_hash] = flow_hash;
	}

	return reduced_hash;
}