static void lro_flush(struct net_lro_mgr *lro_mgr, struct net_lro_desc *lro_desc) { if (lro_desc->pkt_aggr_cnt > 1) lro_update_tcp_ip_header(lro_desc); skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss; if (lro_desc->vgrp) { if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(lro_desc->parent, lro_desc->vgrp, lro_desc->vlan_tag); else vlan_hwaccel_rx(lro_desc->parent, lro_desc->vgrp, lro_desc->vlan_tag); } else { if (lro_mgr->features & LRO_F_NAPI) netif_receive_skb(lro_desc->parent); else netif_rx(lro_desc->parent); } LRO_INC_STATS(lro_mgr, flushed); lro_clear_desc(lro_desc); }
static void flush_session(struct mlx4_en_priv *priv, struct mlx4_en_ipfrag *session, u16 more) { struct sk_buff *skb = session->fragments; struct iphdr *iph = ip_hdr(skb); struct net_device *dev = skb->dev; /* Update IP length and checksum */ iph->tot_len = htons(session->total_len); iph->frag_off = htons(more | (session->offset >> 3)); iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); if (session->vlan) { if (priv->mdev->profile.use_napi) vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(session->sl_vid)); else vlan_hwaccel_rx(skb, priv->vlgrp, be16_to_cpu(session->sl_vid)); } else { if (priv->mdev->profile.use_napi) netif_receive_skb(skb); else netif_rx(skb); } dev->last_rx = jiffies; session->fragments = NULL; session->last = NULL; }
int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb) { if (netpoll_rx_on(skb)) return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); skb_gro_reset_offset(skb); return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); }
void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, struct vlan_group *vgrp, u16 vlan_tag, void *priv) { if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) { if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); else vlan_hwaccel_rx(skb, vgrp, vlan_tag); } }
static int mvswitch_mangle_rx(struct sk_buff *skb, int napi) { struct mvswitch_priv *priv; struct net_device *dev; int vlan = -1; unsigned char *buf; int i; dev = skb->dev; if (!dev) goto error; priv = dev->phy_ptr; if (!priv) goto error; if (!priv->grp) goto error; #ifdef HEADER_MODE buf = skb->data; skb_pull(skb, MV_HEADER_SIZE); #else buf = skb->data + skb->len - MV_TRAILER_SIZE; if (buf[0] != 0x80) goto error; #endif /* look for the vlan matching the incoming port */ for (i = 0; i < ARRAY_SIZE(priv->vlans); i++) { if ((1 << buf[1]) & priv->vlans[i]) vlan = i; } if (vlan == -1) goto error; skb->protocol = eth_type_trans(skb, skb->dev); if (napi) return vlan_hwaccel_receive_skb(skb, priv->grp, vlan); else return vlan_hwaccel_rx(skb, priv->grp, vlan); error: /* no vlan? eat the packet! */ dev_kfree_skb_any(skb); return 0; }
int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci) { struct sk_buff *skb = napi_frags_skb(napi); if (!skb) return NET_RX_DROP; if (netpoll_rx_on(skb)) { skb->protocol = eth_type_trans(skb, skb->dev); return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); } return napi_frags_finish(napi, skb, vlan_gro_common(napi, grp, vlan_tci, skb)); }
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, struct cp_desc *desc) { skb->protocol = eth_type_trans (skb, cp->dev); cp->net_stats.rx_packets++; cp->net_stats.rx_bytes += skb->len; cp->dev->last_rx = jiffies; #if CP_VLAN_TAG_USED if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { vlan_hwaccel_receive_skb(skb, cp->vlgrp, be16_to_cpu(desc->opts2 & 0xffff)); } else #endif netif_receive_skb(skb); }
void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, struct vlan_group *vgrp, u16 vlan_tag, void *priv, __wsum sum) { struct sk_buff *skb; skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp, vlan_tag, priv, sum); if (!skb) return; if (lro_mgr->features & LRO_F_NAPI) vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); else vlan_hwaccel_rx(skb, vgrp, vlan_tag); }
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; int index; int nr; unsigned int length; int polled = 0; int ip_summed; if (!priv->port_up) return 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { skb_frags = ring->rx_info + (index << priv->log_rx_info); rx_desc = ring->buf + (index << ring->log_stride); /* * make sure we read the CQE after we read the ownership bit */ rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); ring->bytes += length; ring->packets++; if (likely(dev->features & NETIF_F_RXCSUM)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && (cqe->checksum == cpu_to_be16(0xffff))) { priv->port_stats.rx_chksum_good++; /* This packet is eligible for LRO if it is: * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options * - not an IP fragment */ if (dev->features & NETIF_F_GRO) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; nr = mlx4_en_complete_rx_desc( priv, rx_desc, skb_frags, skb_shinfo(gro_skb)->frags, ring->page_alloc, length); if (!nr) goto next; skb_shinfo(gro_skb)->nr_frags = nr; gro_skb->len = length; gro_skb->data_len = length; gro_skb->truesize += length; gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (priv->vlgrp && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); else napi_gro_frags(&cq->napi); goto next; } /* LRO not possible, complete processing here */ ip_summed = CHECKSUM_UNNECESSARY; } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc, length); if (!skb) { priv->stats.rx_dropped++; goto next; } if (unlikely(priv->validate_loopback)) { validate_loopback(priv, skb); goto next; } skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK)) { vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); } else netif_receive_skb(skb); next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ goto out; } } out: AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ mlx4_en_update_rx_prod_db(ring); return polled; }
static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan, checksum; u32 rss_hash; if (skipped) return; skb = buf->os_buf; prefetch(skb->data - NET_IP_ALIGN); pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); if (packet_error) { if (!fcs_ok) { if (bytes_written > 0) enic->rq_bad_fcs++; else if (bytes_written == 0) enic->rq_truncated_pkts++; } dev_kfree_skb_any(skb); return; } if (eop && bytes_written > 0) { skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); if (enic->csum_rx_enabled && !csum_not_calc) { skb->csum = htons(checksum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->dev = netdev; if (enic->vlan_group && vlan_stripped) { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, skb, enic->vlan_group, vlan, cq_desc); else vlan_hwaccel_receive_skb(skb, enic->vlan_group, vlan); } else { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_receive_skb(&enic->lro_mgr, skb, cq_desc); else netif_receive_skb(skb); } } else { dev_kfree_skb_any(skb); } }
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; int index; unsigned int length; int polled = 0; int ip_summed; if (!priv->port_up) return 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { skb_frags = ring->rx_info + (index << priv->log_rx_info); rx_desc = ring->buf + (index << ring->log_stride); /* * make sure we read the CQE after we read the ownership bit */ rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { mlx4_err(mdev, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); ring->bytes += length; ring->packets++; if (likely(priv->rx_csum)) { if ((cqe->status & MLX4_CQE_STATUS_IPOK) && (cqe->checksum == 0xffff)) { priv->port_stats.rx_chksum_good++; if (mdev->profile.num_lro && !mlx4_en_lro_rx(priv, ring, rx_desc, skb_frags, length, cqe)) goto next; /* LRO not possible, complete processing here */ ip_summed = CHECKSUM_UNNECESSARY; INC_PERF_COUNTER(priv->pstats.lro_misses); } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } } else { ip_summed = CHECKSUM_NONE; priv->port_stats.rx_chksum_none++; } skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc, length); if (!skb) { priv->stats.rx_dropped++; goto next; } skb->ip_summed = ip_summed; skb->protocol = eth_type_trans(skb, dev); /* Push it up the stack */ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK)) { vlan_hwaccel_receive_skb(skb, priv->vlgrp, be16_to_cpu(cqe->sl_vid)); } else netif_receive_skb(skb); dev->last_rx = jiffies; next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ if (mdev->profile.num_lro) mlx4_en_lro_flush(priv, ring, 0); goto out; } } /* If CQ is empty flush all LRO sessions unconditionally */ if (mdev->profile.num_lro) mlx4_en_lro_flush(priv, ring, 1); out: AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; ring->prod += polled; /* Polled descriptors were realocated in place */ if (unlikely(!ring->full)) { mlx4_en_copy_desc(priv, ring, ring->cons - polled, ring->prod - polled, polled); mlx4_en_fill_rx_buf(dev, ring); } mlx4_en_update_rx_prod_db(ring); return polled; }