static int enic_poll(struct napi_struct *napi, int budget) { struct enic *enic = container_of(napi, struct enic, napi); struct net_device *netdev = enic->netdev; unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = -1; /* no limit */ unsigned int work_done, rq_work_done, wq_work_done; /* Service RQ (first) and WQ */ rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], rq_work_to_do, enic_rq_service, NULL); wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], wq_work_to_do, enic_wq_service, NULL); /* Accumulate intr event credits for this polling * cycle. An intr event is the completion of a * a WQ or RQ packet. */ work_done = rq_work_done + wq_work_done; if (work_done > 0) vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], work_done, 0 /* don't unmask intr */, 0 /* don't reset intr timer */); if (rq_work_done > 0) { /* Replenish RQ */ vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); } else { /* If no work done, flush all LROs and exit polling */ if (netdev->features & NETIF_F_LRO) lro_flush_all(&enic->lro_mgr); netif_rx_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); } return rq_work_done; }
static int enic_poll(struct napi_struct *napi, int budget) { struct enic *enic = container_of(napi, struct enic, napi); struct net_device *netdev = enic->netdev; unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = -1; unsigned int work_done, rq_work_done, wq_work_done; rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], rq_work_to_do, enic_rq_service, NULL); wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], wq_work_to_do, enic_wq_service, NULL); work_done = rq_work_done + wq_work_done; if (work_done > 0) vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], work_done, 0 , 0 ); if (rq_work_done > 0) { vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); } else { if (netdev->features & NETIF_F_LRO) lro_flush_all(&enic->lro_mgr); napi_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); } return rq_work_done; }
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS]; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; int index; int nr; unsigned int length; int polled = 0; int ip_summed; if (!priv->port_up) return 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { skb_frags = ring->rx_info + (index << priv->log_rx_info); rx_desc = ring->buf + (index << ring->log_stride); /* * make sure we read the CQE after we read the ownership bit */ rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); ring->bytes += length; ring->packets++; if (likely(priv->rx_csum)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && (cqe->checksum == cpu_to_be16(0xffff))) { priv->port_stats.rx_chksum_good++; /* This packet is eligible for LRO if it is: * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options * - not an IP fragment */ if (mlx4_en_can_lro(cqe->status) && dev->features & NETIF_F_LRO) { nr = mlx4_en_complete_rx_desc( priv, rx_desc, skb_frags, lro_frags, ring->page_alloc, length); if (!nr) goto next; if (priv->vlgrp && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { lro_vlan_hwaccel_receive_frags( &ring->lro, lro_frags, length, length, priv->vlgrp, be16_to_cpu(cqe->sl_vid), NULL, 0); } else lro_receive_frags(&ring->lro, lro_frags, length, length, NULL, 0); goto next; } /* LRO not possible, complete processing here */ ip_summed = CHECKSUM_UNNECESSARY; INC_PERF_COUNTER(priv->pstats.lro_misses); } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc, length); if (!skb) { priv->stats.rx_dropped++; goto next; } skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK)) { vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); } else netif_receive_skb(skb); next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ lro_flush_all(&ring->lro); goto out; } } /* If CQ is empty flush all LRO sessions unconditionally */ lro_flush_all(&ring->lro); out: AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ mlx4_en_update_rx_prod_db(ring); return polled; }
void efx_flush_lro(struct efx_channel *channel) { lro_flush_all(&channel->lro_mgr); }