struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct skb_frag_struct *skb_frags, struct mlx4_en_rx_alloc *page_alloc, unsigned int length) { struct mlx4_en_dev *mdev = priv->mdev; struct sk_buff *skb; void *va; int used_frags; dma_addr_t dma; skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); if (!skb) { mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); return NULL; } skb->dev = priv->dev; skb_reserve(skb, NET_IP_ALIGN); skb->len = length; skb->truesize = length + sizeof(struct sk_buff); /* Get pointer to first fragment so we could copy the headers into the * (linear part of the) skb */ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; if (length <= SMALL_PACKET_SIZE) { /* We are copying all relevant data to the skb - temporarily * synch buffers for the copy */ dma = be64_to_cpu(rx_desc->data[0].addr); dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, va, length); dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0, length, DMA_FROM_DEVICE); skb->tail += length; } else { /* Move relevant fragments to skb */ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, skb_shinfo(skb)->frags, page_alloc, length); skb_shinfo(skb)->nr_frags = used_frags; /* Copy headers into the skb linear buffer */ memcpy(skb->data, va, HEADER_COPY_SIZE); skb->tail += HEADER_COPY_SIZE; /* Skip headers in first fragment */ skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; /* Adjust size of first fragment */ skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; skb->data_len = length - HEADER_COPY_SIZE; } return skb; }
static struct mbuf *mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct mbuf **mb_list, unsigned int length) { struct mbuf *mb; mb = mb_list[0]; /* Move relevant fragments to mb */ if (unlikely(mlx4_en_complete_rx_desc(priv, rx_desc, mb_list, length))) return NULL; return mb; }
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; int index; int nr; unsigned int length; int polled = 0; int ip_summed; if (!priv->port_up) return 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { skb_frags = ring->rx_info + (index << priv->log_rx_info); rx_desc = ring->buf + (index << ring->log_stride); /* * make sure we read the CQE after we read the ownership bit */ rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); ring->bytes += length; ring->packets++; if (likely(dev->features & NETIF_F_RXCSUM)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && (cqe->checksum == cpu_to_be16(0xffff))) { priv->port_stats.rx_chksum_good++; /* This packet is eligible for LRO if it is: * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options * - not an IP fragment */ if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; nr = mlx4_en_complete_rx_desc( priv, rx_desc, skb_frags, skb_shinfo(gro_skb)->frags, ring->page_alloc, length); if (!nr) goto next; skb_shinfo(gro_skb)->nr_frags = nr; gro_skb->len = length; gro_skb->data_len = length; gro_skb->truesize += length; gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (priv->vlgrp && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); else napi_gro_frags(&cq->napi); goto next; } /* LRO not possible, complete processing here */ ip_summed = CHECKSUM_UNNECESSARY; } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc, length); if (!skb) { priv->stats.rx_dropped++; goto next; } if (unlikely(priv->validate_loopback)) { validate_loopback(priv, skb); goto next; } skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK)) { vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); } else netif_receive_skb(skb); next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ goto out; } } out: AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ mlx4_en_update_rx_prod_db(ring); return polled; }