u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
		       void *accel_priv, select_queue_fallback_t fallback)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	int channel_ix = fallback(dev, skb);
	int up = 0;

	if (priv->params.num_rl_txqs) {
		u16 ix = mlx5e_select_queue_assigned(priv, skb);

		if (ix) {
			sk_tx_queue_set(skb->sk, ix);
			return ix;
		}
	}

	if (!netdev_get_num_tc(dev))
		return channel_ix;

	if (skb_vlan_tag_present(skb))
		up = skb->vlan_tci >> VLAN_PRIO_SHIFT;

	/* channel_ix can be larger than num_channels since
	 * dev->num_real_tx_queues = num_channels * num_tc
	 */
	if (channel_ix >= priv->params.num_channels)
		channel_ix = reciprocal_scale(channel_ix,
					      priv->params.num_channels);

	return priv->tc_to_txq_map[channel_ix][up];
}
示例#2
0
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb)
#endif
{
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
	struct sock *sk = skb->sk;
	int q_idx = sk_tx_queue_get(sk);

	if (q_idx < 0 || skb->ooo_okay ||
	    q_idx >= ndev->real_num_tx_queues) {
		u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
		int new_idx;

		new_idx = nvsc_dev->send_table[hash]
			% nvsc_dev->num_chn;

		if (q_idx != new_idx && sk &&
		    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_idx);

		q_idx = new_idx;
	}

	if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
		q_idx = 0;
	
	return q_idx;
}
示例#3
0
文件: netvsc_drv.c 项目: mdamt/linux
static inline int netvsc_get_tx_queue(struct net_device *ndev,
				      struct sk_buff *skb, int old_idx)
{
	const struct net_device_context *ndc = netdev_priv(ndev);
	struct sock *sk = skb->sk;
	int q_idx;

	q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) &
				   (VRSS_SEND_TAB_SIZE - 1)];

	/* If queue index changed record the new value */
	if (q_idx != old_idx &&
	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
		sk_tx_queue_set(sk, q_idx);

	return q_idx;
}
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
	int queue_index = sk_tx_queue_get(sk);

	if (queue_index < 0 || skb->ooo_okay ||
	    queue_index >= dev->real_num_tx_queues) {
		int new_index = get_xps_queue(dev, skb);
		if (new_index < 0)
			new_index = skb_tx_hash(dev, skb);

		if (queue_index != new_index && sk &&
		    rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_index);

		queue_index = new_index;
	}

	return queue_index;
}
示例#5
0
/*
 * Select queue for transmit.
 *
 * If a valid queue has already been assigned, then use that.
 * Otherwise compute tx queue based on hash and the send table.
 *
 * This is basically similar to default (__netdev_pick_tx) with the added step
 * of using the host send_table when no other queue has been assigned.
 *
 * TODO support XPS - but get_xps_queue not exported
 */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
			void *accel_priv, select_queue_fallback_t fallback)
{
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	unsigned int num_tx_queues = ndev->real_num_tx_queues;
	struct sock *sk = skb->sk;
	int q_idx = sk_tx_queue_get(sk);

	if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
		u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
		int new_idx;

		new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;

		if (q_idx != new_idx && sk &&
		    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
			sk_tx_queue_set(sk, new_idx);

		q_idx = new_idx;
	}

	return q_idx;
}