Exemplo n.º 1
0
static struct netdev_queue *imq_select_queue(struct net_device *dev,
						struct sk_buff *skb)
{
	u16 queue_index = 0;
	u32 hash;

	if (likely(dev->real_num_tx_queues == 1))
		goto out;

	/* IMQ can be receiving ingress or engress packets. */

	/* Check first for if rx_queue is set */
	if (skb_rx_queue_recorded(skb)) {
		queue_index = skb_get_rx_queue(skb);
		goto out;
	}

	/* Check if socket has tx_queue set */
	if (sk_tx_queue_recorded(skb->sk)) {
		queue_index = sk_tx_queue_get(skb->sk);
		goto out;
	}

	/* Try use socket hash */
	if (skb->sk && skb->sk->sk_hash) {
		hash = skb->sk->sk_hash;
		queue_index =
			(u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
		goto out;
	}
Exemplo n.º 2
0
/*
 * Select queue for transmit.
 *
 * If a valid queue has already been assigned, then use that.
 * Otherwise compute tx queue based on hash and the send table.
 *
 * This is basically similar to default (__netdev_pick_tx) with the added step
 * of using the host send_table when no other queue has been assigned.
 *
 * TODO support XPS - but get_xps_queue not exported
 */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
			void *accel_priv, select_queue_fallback_t fallback)
{
	unsigned int num_tx_queues = ndev->real_num_tx_queues;
	int q_idx = sk_tx_queue_get(skb->sk);

	if (q_idx < 0 || skb->ooo_okay) {
		/* If forwarding a packet, we use the recorded queue when
		 * available for better cache locality.
		 */
		if (skb_rx_queue_recorded(skb))
			q_idx = skb_get_rx_queue(skb);
		else
			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
	}

	while (unlikely(q_idx >= num_tx_queues))
		q_idx -= num_tx_queues;

	return q_idx;
}
/*
 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 * to be used as a distribution range.
 */
u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
		  unsigned int num_tx_queues)
{
	u32 hash;
	u16 qoffset = 0;
	u16 qcount = num_tx_queues;

	if (skb_rx_queue_recorded(skb)) {
		hash = skb_get_rx_queue(skb);
		while (unlikely(hash >= num_tx_queues))
			hash -= num_tx_queues;
		return hash;
	}

	if (dev->num_tc) {
		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
		qoffset = dev->tc_to_txq[tc].offset;
		qcount = dev->tc_to_txq[tc].count;
	}

	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
}
Exemplo n.º 4
0
u16 mpodp_select_queue(struct net_device *dev, struct sk_buff *skb
#if (LINUX_VERSION_CODE > KERNEL_VERSION (3, 13, 0))
		       , void *accel_priv, select_queue_fallback_t fallback
#endif
		       )
{
	int txq;

	txq = (skb_rx_queue_recorded(skb)
	       ? skb_get_rx_queue(skb)
	       : smp_processor_id());

	txq = txq % dev->real_num_tx_queues;

	return txq;

#if (LINUX_VERSION_CODE > KERNEL_VERSION (3, 13, 0))
	return fallback(dev, skb) % dev->real_num_tx_queues;
#else
	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
#endif
}
Exemplo n.º 5
0
Arquivo: rx.c Projeto: 3bsa/linux
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
		   u16 rxq_index, u32 flow_id)
{
	struct efx_nic *efx = netdev_priv(net_dev);
	struct efx_channel *channel;
	struct efx_filter_spec spec;
	const __be16 *ports;
	__be16 ether_type;
	int nhoff;
	int rc;

	/* The core RPS/RFS code has already parsed and validated
	 * VLAN, IP and transport headers.  We assume they are in the
	 * header area.
	 */

	if (skb->protocol == htons(ETH_P_8021Q)) {
		const struct vlan_hdr *vh =
			(const struct vlan_hdr *)skb->data;

		/* We can't filter on the IP 5-tuple and the vlan
		 * together, so just strip the vlan header and filter
		 * on the IP part.
		 */
		EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
		ether_type = vh->h_vlan_encapsulated_proto;
		nhoff = sizeof(struct vlan_hdr);
	} else {
		ether_type = skb->protocol;
		nhoff = 0;
	}

	if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
		return -EPROTONOSUPPORT;

	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
			   rxq_index);
	spec.match_flags =
		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
	spec.ether_type = ether_type;

	if (ether_type == htons(ETH_P_IP)) {
		const struct iphdr *ip =
			(const struct iphdr *)(skb->data + nhoff);

		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
		if (ip_is_fragment(ip))
			return -EPROTONOSUPPORT;
		spec.ip_proto = ip->protocol;
		spec.rem_host[0] = ip->saddr;
		spec.loc_host[0] = ip->daddr;
		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
		ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
	} else {
		const struct ipv6hdr *ip6 =
			(const struct ipv6hdr *)(skb->data + nhoff);

		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
				    nhoff + sizeof(*ip6) + 4);
		spec.ip_proto = ip6->nexthdr;
		memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
		memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
		ports = (const __be16 *)(ip6 + 1);
	}

	spec.rem_port = ports[0];
	spec.loc_port = ports[1];

	rc = efx->type->filter_rfs_insert(efx, &spec);
	if (rc < 0)
		return rc;

	/* Remember this so we can check whether to expire the filter later */
	efx->rps_flow_id[rc] = flow_id;
	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
	++channel->rfs_filters_added;

	if (ether_type == htons(ETH_P_IP))
		netif_info(efx, rx_status, efx->net_dev,
			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
			   ntohs(ports[1]), rxq_index, flow_id, rc);
	else
		netif_info(efx, rx_status, efx->net_dev,
			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
			   ntohs(ports[1]), rxq_index, flow_id, rc);

	return rc;
}