static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv) { struct cq_enet_rq_desc *cq_desc = priv; unsigned int ip_len; struct iphdr *iph; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan, checksum; u32 rss_hash; cq_enet_rq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); if (!(ipv4 && tcp && !ipv4_fragment)) return -1; skb_reset_network_header(skb); iph = ip_hdr(skb); ip_len = ip_hdrlen(skb); skb_set_transport_header(skb, ip_len); if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) return -1; *hdr_flags = LRO_IPV4 | LRO_TCP; *tcph = tcp_hdr(skb); *iphdr = iph; return 0; }
static int enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque; struct rte_mbuf *rx_pkt = NULL; struct rte_mbuf *hdr_rx_pkt = NULL; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); rx_pkt = (struct rte_mbuf *)buf->os_buf; buf->os_buf = NULL; if (unlikely(packet_error)) { dev_err(enic, "packet error\n"); rx_pkt->data_len = 0; return 0; } if (unlikely(skipped)) { rx_pkt->data_len = 0; return 0; } if (likely(!vnic_get_hdr_split_size(enic->vdev))) { /* No header split configured */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; if (ipv4) { rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } else { /* Header split */ if (sop && !eop) { /* This piece is header */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; } else { if (sop && eop) { /* The packet is smaller than split_hdr_size */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; if (ipv4) { rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } else { /* Payload */ hdr_rx_pkt = *rx_pkt_bucket; hdr_rx_pkt->pkt_len += bytes_written; if (ipv4) { hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) hdr_rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) hdr_rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } } } rx_pkt->data_len = bytes_written; if (rss_hash) { rx_pkt->ol_flags |= PKT_RX_RSS_HASH; rx_pkt->hash.rss = rss_hash; } if (vlan_tci) { rx_pkt->ol_flags |= PKT_RX_VLAN_PKT; rx_pkt->vlan_tci = vlan_tci; } return eop; }
static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan, checksum; u32 rss_hash; if (skipped) return; skb = buf->os_buf; prefetch(skb->data - NET_IP_ALIGN); pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); if (packet_error) { if (!fcs_ok) { if (bytes_written > 0) enic->rq_bad_fcs++; else if (bytes_written == 0) enic->rq_truncated_pkts++; } dev_kfree_skb_any(skb); return; } if (eop && bytes_written > 0) { skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); if (enic->csum_rx_enabled && !csum_not_calc) { skb->csum = htons(checksum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->dev = netdev; if (enic->vlan_group && vlan_stripped) { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, skb, enic->vlan_group, vlan, cq_desc); else vlan_hwaccel_receive_skb(skb, enic->vlan_group, vlan); } else { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_receive_skb(&enic->lro_mgr, skb, cq_desc); else netif_receive_skb(skb); } } else { dev_kfree_skb_any(skb); } }
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; struct fc_frame *fp; unsigned int eth_hdrs_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe = 0, fcoe_sof, fcoe_eof; u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; u8 fcs_ok = 1, packet_error = 0; u16 q_number, completed_index, bytes_written = 0, vlan, checksum; u32 rss_hash; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; unsigned long flags; pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); skb = buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, &tmpl, &fcp_bytes_written, &sof, &eof, &ingress_port, &packet_error, &fcoe_enc_error, &fcs_ok, &vlan_stripped, &vlan); eth_hdrs_stripped = 1; } else if (type == CQ_DESC_TYPE_RQ_ENET) { cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); eth_hdrs_stripped = 0; } else { /* wrong CQ type*/ shost_printk(KERN_ERR, fnic->lport->host, "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fnic rq_cmpl fcoe x%x fcsok x%x" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" " x%x\n", fcoe, fcs_ok, packet_error, fcoe_fc_crc_ok, fcoe_enc_error); goto drop; } if (eth_hdrs_stripped) fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); else if (fnic_import_rq_eth_pkt(skb, bytes_written)) goto drop; fp = (struct fc_frame *)skb; /* * If frame is an ELS response that matches the cached FLOGI OX_ID, * and is accept, issue flogi_reg_request copy wq request to firmware * to register the S_ID and determine whether FC_OUI mode or GW mode. */ if (is_matching_flogi_resp_frame(fnic, fp)) { if (!eth_hdrs_stripped) { if (fc_frame_payload_op(fp) == ELS_LS_ACC) { fnic_handle_flogi_resp(fnic, fp); return; } /* * Recd. Flogi reject. No point registering * with fw, but forward to libFC */ goto forward; } goto drop; } if (!eth_hdrs_stripped) goto drop; forward: spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto drop; } /* Use fr_flags to indicate whether succ. flogi resp or not */ fr_flags(fp) = 0; fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); skb_queue_tail(&fnic->frame_queue, skb); queue_work(fnic_event_queue, &fnic->frame_work); return; drop: dev_kfree_skb_irq(skb); }