static struct netdev_queue *imq_select_queue(struct net_device *dev, struct sk_buff *skb) { u16 queue_index = 0; u32 hash; if (likely(dev->real_num_tx_queues == 1)) goto out; /* IMQ can be receiving ingress or engress packets. */ /* Check first for if rx_queue is set */ if (skb_rx_queue_recorded(skb)) { queue_index = skb_get_rx_queue(skb); goto out; } /* Check if socket has tx_queue set */ if (sk_tx_queue_recorded(skb->sk)) { queue_index = sk_tx_queue_get(skb->sk); goto out; } /* Try use socket hash */ if (skb->sk && skb->sk->sk_hash) { hash = skb->sk->sk_hash; queue_index = (u16)(((u64)hash * dev->real_num_tx_queues) >> 32); goto out; }
/* * Select queue for transmit. * * If a valid queue has already been assigned, then use that. * Otherwise compute tx queue based on hash and the send table. * * This is basically similar to default (__netdev_pick_tx) with the added step * of using the host send_table when no other queue has been assigned. * * TODO support XPS - but get_xps_queue not exported */ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { unsigned int num_tx_queues = ndev->real_num_tx_queues; int q_idx = sk_tx_queue_get(skb->sk); if (q_idx < 0 || skb->ooo_okay) { /* If forwarding a packet, we use the recorded queue when * available for better cache locality. */ if (skb_rx_queue_recorded(skb)) q_idx = skb_get_rx_queue(skb); else q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); } while (unlikely(q_idx >= num_tx_queues)) q_idx -= num_tx_queues; return q_idx; }
/* * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; u16 qoffset = 0; u16 qcount = num_tx_queues; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= num_tx_queues)) hash -= num_tx_queues; return hash; } if (dev->num_tc) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); qoffset = dev->tc_to_txq[tc].offset; qcount = dev->tc_to_txq[tc].count; } return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; }
u16 mpodp_select_queue(struct net_device *dev, struct sk_buff *skb #if (LINUX_VERSION_CODE > KERNEL_VERSION (3, 13, 0)) , void *accel_priv, select_queue_fallback_t fallback #endif ) { int txq; txq = (skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : smp_processor_id()); txq = txq % dev->real_num_tx_queues; return txq; #if (LINUX_VERSION_CODE > KERNEL_VERSION (3, 13, 0)) return fallback(dev, skb) % dev->real_num_tx_queues; #else return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); #endif }