Ejemplo n.º 1
0
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;

	done = mlx4_en_process_rx_cq(dev, cq, budget);

	/* If we used up all the quota - we're probably not done yet... */
	if (done == budget)
		INC_PERF_COUNTER(priv->pstats.napi_quota);
	else {
		/* Done for now */
		napi_complete(napi);
		mlx4_en_arm_cq(priv, cq);
	}
	return done;
}
Ejemplo n.º 2
0
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget)
{
	struct mlx4_en_cq *cq = poll_dev->priv;
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;
	int work = min(*budget, poll_dev->quota);

	done = mlx4_en_process_rx_cq(dev, cq, work);
	dev->quota -= done;
	*budget -= done;

	/* If we used up all the quota - we're probably not done yet... */
	if (done == work) {
		INC_PERF_COUNTER(priv->pstats.napi_quota);
		return 1;
	}

        /* Done for now */
	netif_rx_complete(poll_dev);
	mlx4_en_arm_cq(priv, cq);
	return 0;
}
Ejemplo n.º 3
0
void mlx4_en_poll_tx_cq(unsigned long data)
{
	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
	u32 inflight;

	INC_PERF_COUNTER(priv->pstats.tx_poll);

	if (!spin_trylock(&ring->comp_lock)) {
		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
		return;
	}
	mlx4_en_process_tx_cq(cq->dev, cq);
	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);

	/* If there are still packets in flight and the timer has not already
	 * been scheduled by the Tx routine then schedule it here to guarantee
	 * completion processing of these packets */
	if (inflight && priv->port_up)
		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);

	spin_unlock(&ring->comp_lock);
}
Ejemplo n.º 4
0
void mlx4_en_poll_tx_cq(unsigned long data)
{
	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
	u32 inflight;

	INC_PERF_COUNTER(priv->pstats.tx_poll);

	if (!spin_trylock_irq(&ring->comp_lock)) {
		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
		return;
	}
	mlx4_en_process_tx_cq(cq->dev, cq);
	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);

	/*                                                                   
                                                                       
                                           */
	if (inflight && priv->port_up)
		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);

	spin_unlock_irq(&ring->comp_lock);
}
Ejemplo n.º 5
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct skb_frag_struct *skb_frags;
	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
			en_err(priv, "CQE completed in error - vendor "
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
			goto next;
		}

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		ring->bytes += length;
		ring->packets++;

		if (likely(priv->rx_csum)) {
			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
			    (cqe->checksum == cpu_to_be16(0xffff))) {
				priv->port_stats.rx_chksum_good++;
				/* This packet is eligible for LRO if it is:
				 * - DIX Ethernet (type interpretation)
				 * - TCP/IP (v4)
				 * - without IP options
				 * - not an IP fragment */
				if (mlx4_en_can_lro(cqe->status) &&
				    dev->features & NETIF_F_LRO) {

					nr = mlx4_en_complete_rx_desc(
						priv, rx_desc,
						skb_frags, lro_frags,
						ring->page_alloc, length);
					if (!nr)
						goto next;

					if (priv->vlgrp && (cqe->vlan_my_qpn &
							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
						lro_vlan_hwaccel_receive_frags(
						       &ring->lro, lro_frags,
						       length, length,
						       priv->vlgrp,
						       be16_to_cpu(cqe->sl_vid),
						       NULL, 0);
					} else
						lro_receive_frags(&ring->lro,
								  lro_frags,
								  length,
								  length,
								  NULL, 0);

					goto next;
				}

				/* LRO not possible, complete processing here */
				ip_summed = CHECKSUM_UNNECESSARY;
				INC_PERF_COUNTER(priv->pstats.lro_misses);
			} else {
				ip_summed = CHECKSUM_NONE;
				priv->port_stats.rx_chksum_none++;
			}
		} else {
			ip_summed = CHECKSUM_NONE;
			priv->port_stats.rx_chksum_none++;
		}

		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
				     ring->page_alloc, length);
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
		skb_record_rx_queue(skb, cq->ring);

		/* Push it up the stack */
		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
				    MLX4_CQE_VLAN_PRESENT_MASK)) {
			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
						be16_to_cpu(cqe->sl_vid));
		} else
			netif_receive_skb(skb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget) {
			/* We are here because we reached the NAPI budget -
			 * flush only pending LRO sessions */
			lro_flush_all(&ring->lro);
			goto out;
		}
	}

	/* If CQ is empty flush all LRO sessions unconditionally */
	lro_flush_all(&ring->lro);

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Ejemplo n.º 6
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct mbuf **mb_list;
	struct mlx4_en_rx_desc *rx_desc;
	struct mbuf *mb;
#ifdef INET
	struct lro_entry *queued;
#endif
	int index;
	unsigned int length;
	int polled = 0;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		mb_list = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		if (invalid_cqe(priv, cqe))
			goto next;

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
		if (!mb) {
			ring->errors++;
			goto next;
		}

		ring->bytes += length;
		ring->packets++;

                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, mb);
			goto next;
		}

		mb->m_pkthdr.flowid = cq->ring;
		mb->m_flags |= M_FLOWID;
		mb->m_pkthdr.rcvif = dev;
		if (be32_to_cpu(cqe->vlan_my_qpn) &
		    MLX4_CQE_VLAN_PRESENT_MASK) {
			mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
			mb->m_flags |= M_VLANTAG;
		}
		if (likely(priv->rx_csum) &&
		    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
		    (cqe->checksum == cpu_to_be16(0xffff))) {
			priv->port_stats.rx_chksum_good++;
			mb->m_pkthdr.csum_flags = 
			    CSUM_IP_CHECKED | CSUM_IP_VALID |
			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
			mb->m_pkthdr.csum_data = htons(0xffff);
			/* This packet is eligible for LRO if it is:
			 * - DIX Ethernet (type interpretation)
			 * - TCP/IP (v4)
			 * - without IP options
			 * - not an IP fragment
			 */
#ifdef INET
			if (mlx4_en_can_lro(cqe->status) &&
			    (dev->if_capenable & IFCAP_LRO)) {
				if (ring->lro.lro_cnt != 0 &&
				    tcp_lro_rx(&ring->lro, mb, 0) == 0)
					goto next;
			}
#endif

			/* LRO not possible, complete processing here */
			INC_PERF_COUNTER(priv->pstats.lro_misses);
		} else {
			mb->m_pkthdr.csum_flags = 0;
			priv->port_stats.rx_chksum_none++;
#ifdef INET
			if (priv->ip_reasm &&
			    cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
			    !mlx4_en_rx_frags(priv, ring, mb, cqe))
				goto next;
#endif
		}

		/* Push it up the stack */
		dev->if_input(dev, mb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget)
			goto out;
	}
	/* Flush all pending IP reassembly sessions */
out:
#ifdef INET
	mlx4_en_flush_frags(priv, ring);
	while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
		SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
		tcp_lro_flush(&ring->lro, queued);
	}
#endif
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}