Exemplo n.º 1
0
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;

		ring->stride = stride;
		if (ring->stride <= TXBB_SIZE)
			ring->buf += TXBB_SIZE;

		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

		/* Initailize all descriptors */
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);

		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
			en_err(priv, "Failed initializing ring allocator\n");
			ring_ind--;
			goto err_allocator;
		}
	}
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
		ring_ind--;
	}
	return err;
}
Exemplo n.º 2
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct skb_frag_struct *skb_frags;
	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
			en_err(priv, "CQE completed in error - vendor "
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
			goto next;
		}

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		ring->bytes += length;
		ring->packets++;

		if (likely(priv->rx_csum)) {
			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
			    (cqe->checksum == cpu_to_be16(0xffff))) {
				priv->port_stats.rx_chksum_good++;
				/* This packet is eligible for LRO if it is:
				 * - DIX Ethernet (type interpretation)
				 * - TCP/IP (v4)
				 * - without IP options
				 * - not an IP fragment */
				if (mlx4_en_can_lro(cqe->status) &&
				    dev->features & NETIF_F_LRO) {

					nr = mlx4_en_complete_rx_desc(
						priv, rx_desc,
						skb_frags, lro_frags,
						ring->page_alloc, length);
					if (!nr)
						goto next;

					if (priv->vlgrp && (cqe->vlan_my_qpn &
							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
						lro_vlan_hwaccel_receive_frags(
						       &ring->lro, lro_frags,
						       length, length,
						       priv->vlgrp,
						       be16_to_cpu(cqe->sl_vid),
						       NULL, 0);
					} else
						lro_receive_frags(&ring->lro,
								  lro_frags,
								  length,
								  length,
								  NULL, 0);

					goto next;
				}

				/* LRO not possible, complete processing here */
				ip_summed = CHECKSUM_UNNECESSARY;
				INC_PERF_COUNTER(priv->pstats.lro_misses);
			} else {
				ip_summed = CHECKSUM_NONE;
				priv->port_stats.rx_chksum_none++;
			}
		} else {
			ip_summed = CHECKSUM_NONE;
			priv->port_stats.rx_chksum_none++;
		}

		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
				     ring->page_alloc, length);
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
		skb_record_rx_queue(skb, cq->ring);

		/* Push it up the stack */
		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
				    MLX4_CQE_VLAN_PRESENT_MASK)) {
			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
						be16_to_cpu(cqe->sl_vid));
		} else
			netif_receive_skb(skb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget) {
			/* We are here because we reached the NAPI budget -
			 * flush only pending LRO sessions */
			lro_flush_all(&ring->lro);
			goto out;
		}
	}

	/* If CQ is empty flush all LRO sessions unconditionally */
	lro_flush_all(&ring->lro);

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Exemplo n.º 3
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct mbuf **mb_list;
	struct mlx4_en_rx_desc *rx_desc;
	struct mbuf *mb;
#ifdef INET
	struct lro_entry *queued;
#endif
	int index;
	unsigned int length;
	int polled = 0;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		mb_list = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		if (invalid_cqe(priv, cqe))
			goto next;

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
		if (!mb) {
			ring->errors++;
			goto next;
		}

		ring->bytes += length;
		ring->packets++;

                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, mb);
			goto next;
		}

		mb->m_pkthdr.flowid = cq->ring;
		mb->m_flags |= M_FLOWID;
		mb->m_pkthdr.rcvif = dev;
		if (be32_to_cpu(cqe->vlan_my_qpn) &
		    MLX4_CQE_VLAN_PRESENT_MASK) {
			mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
			mb->m_flags |= M_VLANTAG;
		}
		if (likely(priv->rx_csum) &&
		    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
		    (cqe->checksum == cpu_to_be16(0xffff))) {
			priv->port_stats.rx_chksum_good++;
			mb->m_pkthdr.csum_flags = 
			    CSUM_IP_CHECKED | CSUM_IP_VALID |
			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
			mb->m_pkthdr.csum_data = htons(0xffff);
			/* This packet is eligible for LRO if it is:
			 * - DIX Ethernet (type interpretation)
			 * - TCP/IP (v4)
			 * - without IP options
			 * - not an IP fragment
			 */
#ifdef INET
			if (mlx4_en_can_lro(cqe->status) &&
			    (dev->if_capenable & IFCAP_LRO)) {
				if (ring->lro.lro_cnt != 0 &&
				    tcp_lro_rx(&ring->lro, mb, 0) == 0)
					goto next;
			}
#endif

			/* LRO not possible, complete processing here */
			INC_PERF_COUNTER(priv->pstats.lro_misses);
		} else {
			mb->m_pkthdr.csum_flags = 0;
			priv->port_stats.rx_chksum_none++;
#ifdef INET
			if (priv->ip_reasm &&
			    cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
			    !mlx4_en_rx_frags(priv, ring, mb, cqe))
				goto next;
#endif
		}

		/* Push it up the stack */
		dev->if_input(dev, mb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget)
			goto out;
	}
	/* Flush all pending IP reassembly sessions */
out:
#ifdef INET
	mlx4_en_flush_frags(priv, ring);
	while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
		SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
		tcp_lro_flush(&ring->lro, queued);
	}
#endif
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Exemplo n.º 4
0
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_wqe_srq_next_seg *next;
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);
	int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
	
		ring->stride = stride;
		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;
	
		memset(ring->rx_info, 0, sizeof(*(ring->rx_info)));
		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);
	
		/* Initailize all descriptors */
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);
	
		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
			 mlx4_err(mdev, "Failed initializing ring allocator\n");
			 goto err_allocator;
		}
	
		/* Fill Rx buffers */
		ring->full = 0;
	}
	if (mlx4_en_fill_rx_buffers(priv))
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
		ring = &priv->rx_ring[ring_ind];

		mlx4_en_update_rx_prod_db(ring);
	
		/* Configure SRQ representing the ring */
		ring->srq.max    = ring->size;
		ring->srq.max_gs = max_gs;
		ring->srq.wqe_shift = ilog2(ring->stride);
	
		for (i = 0; i < ring->srq.max; ++i) {
			next = get_wqe(ring, i);
			next->next_wqe_index =
			cpu_to_be16((i + 1) & (ring->srq.max - 1));
		}
	
		err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, 
				     &ring->wqres.mtt, ring->wqres.db.dma,
				     &ring->srq);
		if (err){
			mlx4_err(mdev, "Failed to allocate srq\n");
			goto err_srq;
		}
		ring->srq.event = mlx4_en_srq_event;
	}

	return 0;

err_srq:
	while (ring_ind >= 0) {
		ring = &priv->rx_ring[ring_ind];
		mlx4_srq_free(mdev->dev, &ring->srq);
		ring_ind--;
	}

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
		ring_ind--;
	}
	return err;
}