Пример #1
0
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb)
#endif
{
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
	struct sock *sk = skb->sk;
	int q_idx = sk_tx_queue_get(sk);

	if (q_idx < 0 || skb->ooo_okay ||
	    q_idx >= ndev->real_num_tx_queues) {
		u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
		int new_idx;

		new_idx = nvsc_dev->send_table[hash]
			% nvsc_dev->num_chn;

		if (q_idx != new_idx && sk &&
		    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_idx);

		q_idx = new_idx;
	}

	if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
		q_idx = 0;
	
	return q_idx;
}
Пример #2
0
static struct netdev_queue *imq_select_queue(struct net_device *dev,
						struct sk_buff *skb)
{
	u16 queue_index = 0;
	u32 hash;

	if (likely(dev->real_num_tx_queues == 1))
		goto out;

	/* IMQ can be receiving ingress or engress packets. */

	/* Check first for if rx_queue is set */
	if (skb_rx_queue_recorded(skb)) {
		queue_index = skb_get_rx_queue(skb);
		goto out;
	}

	/* Check if socket has tx_queue set */
	if (sk_tx_queue_recorded(skb->sk)) {
		queue_index = sk_tx_queue_get(skb->sk);
		goto out;
	}

	/* Try use socket hash */
	if (skb->sk && skb->sk->sk_hash) {
		hash = skb->sk->sk_hash;
		queue_index =
			(u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
		goto out;
	}
Пример #3
0
static u16 mlx5e_select_queue_assigned(struct mlx5e_priv *priv,
				       struct sk_buff *skb)
{
	struct mlx5e_sq_flow_map *flow_map;
	int sk_ix = sk_tx_queue_get(skb->sk);
	u32 key_all, key_dip, key_dport;
	u16 dport;
	u32 dip;

	if (sk_ix >= priv->params.num_channels)
		return sk_ix;

	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
		dip = ip_hdr(skb)->daddr;
		if (ip_hdr(skb)->protocol == IPPROTO_UDP ||
		    ip_hdr(skb)->protocol == IPPROTO_TCP)
			dport = udp_hdr(skb)->dest;
		else
			goto fallback;
	} else {
		goto fallback;
	}

	key_all = dip ^ dport;
	hash_for_each_possible_rcu(priv->flow_map_hash, flow_map,
				   hlist, key_all)
		if (flow_map->dst_ip == dip && flow_map->dst_port == dport)
			return flow_map->queue_index;

	key_dip = dip;
	hash_for_each_possible_rcu(priv->flow_map_hash, flow_map,
				   hlist, key_dip)
		if (flow_map->dst_ip == dip)
			return flow_map->queue_index;

	key_dport = dport;
	hash_for_each_possible_rcu(priv->flow_map_hash, flow_map,
				   hlist, key_dport)
		if (flow_map->dst_port == dport)
			return flow_map->queue_index;

fallback:
	return 0;
}
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
	int queue_index = sk_tx_queue_get(sk);

	if (queue_index < 0 || skb->ooo_okay ||
	    queue_index >= dev->real_num_tx_queues) {
		int new_index = get_xps_queue(dev, skb);
		if (new_index < 0)
			new_index = skb_tx_hash(dev, skb);

		if (queue_index != new_index && sk &&
		    rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_index);

		queue_index = new_index;
	}

	return queue_index;
}
Пример #5
0
/*
 * Select queue for transmit.
 *
 * If a valid queue has already been assigned, then use that.
 * Otherwise compute tx queue based on hash and the send table.
 *
 * This is basically similar to default (__netdev_pick_tx) with the added step
 * of using the host send_table when no other queue has been assigned.
 *
 * TODO support XPS - but get_xps_queue not exported
 */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
			void *accel_priv, select_queue_fallback_t fallback)
{
	unsigned int num_tx_queues = ndev->real_num_tx_queues;
	int q_idx = sk_tx_queue_get(skb->sk);

	if (q_idx < 0 || skb->ooo_okay) {
		/* If forwarding a packet, we use the recorded queue when
		 * available for better cache locality.
		 */
		if (skb_rx_queue_recorded(skb))
			q_idx = skb_get_rx_queue(skb);
		else
			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
	}

	while (unlikely(q_idx >= num_tx_queues))
		q_idx -= num_tx_queues;

	return q_idx;
}
Пример #6
0
/*
 * Select queue for transmit.
 *
 * If a valid queue has already been assigned, then use that.
 * Otherwise compute tx queue based on hash and the send table.
 *
 * This is basically similar to default (__netdev_pick_tx) with the added step
 * of using the host send_table when no other queue has been assigned.
 *
 * TODO support XPS - but get_xps_queue not exported
 */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
			void *accel_priv, select_queue_fallback_t fallback)
{
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	unsigned int num_tx_queues = ndev->real_num_tx_queues;
	struct sock *sk = skb->sk;
	int q_idx = sk_tx_queue_get(sk);

	if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
		u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
		int new_idx;

		new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;

		if (q_idx != new_idx && sk &&
		    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_idx);

		q_idx = new_idx;
	}

	return q_idx;
}
Пример #7
0
static inline bool sk_tx_queue_recorded(struct sock *sk)
{
	return (sk_tx_queue_get(sk) >= 0);
}