void enic_free_rq(void *rxq) { struct vnic_rq *rq = (struct vnic_rq *)rxq; struct enic *enic = vnic_dev_priv(rq->vdev); vnic_rq_free(rq); vnic_cq_free(&enic->cq[rq->index]); }
void enic_free_wq(void *txq) { struct vnic_wq *wq = (struct vnic_wq *)txq; struct enic *enic = vnic_dev_priv(wq->vdev); vnic_wq_free(wq); vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); }
static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); return vnic_rq_service(&enic->rq[q_number], cq_desc, completed_index, VNIC_RQ_RETURN_DESC, enic_rq_indicate_buf, opaque); }
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) { struct enic *enic = vnic_dev_priv(rq->vdev); if (!buf->os_buf) return; pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(buf->os_buf); }
static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); vnic_wq_service(&enic->wq[q_number], cq_desc, completed_index, enic_wq_free_buf, opaque); return 0; }
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { struct fc_frame *fp = buf->os_buf; struct fnic *fnic = vnic_dev_priv(wq->vdev); pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; }
void enic_free_rq(void *rxq) { struct vnic_rq *rq = (struct vnic_rq *)rxq; struct enic *enic = vnic_dev_priv(rq->vdev); enic_rxmbuf_queue_release(enic, rq); rte_free(rq->mbuf_ring); rq->mbuf_ring = NULL; vnic_rq_free(rq); vnic_cq_free(&enic->cq[rq->index]); }
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct fnic *fnic = vnic_dev_priv(vdev); vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, NULL); return 0; }
static void fnic_wq_complete_frame_send(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { struct sk_buff *skb = buf->os_buf; struct fc_frame *fp = (struct fc_frame *)skb; struct fnic *fnic = vnic_dev_priv(wq->vdev); pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(fp_skb(fp)); buf->os_buf = NULL; }
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { struct enic *enic = vnic_dev_priv(wq->vdev); if (buf->sop) pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); else pci_unmap_page(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); if (buf->os_buf) dev_kfree_skb_any(buf->os_buf); }
static void enic_rq_drop_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct sk_buff *skb = buf->os_buf; if (skipped) return; pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); }
static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct fnic *fnic = vnic_dev_priv(vdev); unsigned long flags; spin_lock_irqsave(&fnic->wq_lock[q_number], flags); vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, fnic_wq_complete_frame_send, NULL); spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); return 0; }
static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); spin_lock(&enic->wq_lock[q_number]); vnic_wq_service(&enic->wq[q_number], cq_desc, completed_index, enic_wq_free_buf, opaque); if (netif_queue_stopped(enic->netdev) && vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) netif_wake_queue(enic->netdev); spin_unlock(&enic->wq_lock[q_number]); return 0; }
static int enic_rq_alloc_buf(struct vnic_rq *rq) { struct enic *enic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; unsigned int len = enic->netdev->mtu + ETH_HLEN; unsigned int os_buf_index = 0; dma_addr_t dma_addr; skb = enic_rq_alloc_skb(len); if (!skb) return -ENOMEM; dma_addr = pci_map_single(enic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); enic_queue_rq_desc(rq, skb, os_buf_index, dma_addr, len); return 0; }
int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done) { struct enic *enic = vnic_dev_priv(rq->vdev); unsigned int cq = enic_cq_rq(enic, rq->index); int err = 0; *work_done = vnic_cq_service(&enic->cq[cq], budget, enic_rq_service, (void *)rx_pkts); if (*work_done) { vnic_rq_fill(rq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(rq) == 0) { dev_err(enic, "Unable to alloc receive buffers\n"); err = -1; } } return err; }
/* * This function is called once at init time to allocate and fill RQ * buffers. Subsequently, it is called in the interrupt context after RQ * buffer processing to replenish the buffers in the RQ */ int fnic_alloc_rq_frame(struct vnic_rq *rq) { struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; u16 len; dma_addr_t pa; len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; skb = dev_alloc_skb(len); if (!skb) { FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "Unable to allocate RQ sk_buff\n"); return -ENOMEM; } skb_reset_mac_header(skb); skb_reset_transport_header(skb); skb_reset_network_header(skb); skb_put(skb, len); pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); fnic_queue_rq_desc(rq, skb, pa, len); return 0; }
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; struct fc_frame *fp; unsigned int eth_hdrs_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe = 0, fcoe_sof, fcoe_eof; u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; u8 fcs_ok = 1, packet_error = 0; u16 q_number, completed_index, bytes_written = 0, vlan, checksum; u32 rss_hash; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; unsigned long flags; pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); skb = buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, &tmpl, &fcp_bytes_written, &sof, &eof, &ingress_port, &packet_error, &fcoe_enc_error, &fcs_ok, &vlan_stripped, &vlan); eth_hdrs_stripped = 1; } else if (type == CQ_DESC_TYPE_RQ_ENET) { cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); eth_hdrs_stripped = 0; } else { /* wrong CQ type*/ shost_printk(KERN_ERR, fnic->lport->host, "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fnic rq_cmpl fcoe x%x fcsok x%x" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" " x%x\n", fcoe, fcs_ok, packet_error, fcoe_fc_crc_ok, fcoe_enc_error); goto drop; } if (eth_hdrs_stripped) fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); else if (fnic_import_rq_eth_pkt(skb, bytes_written)) goto drop; fp = (struct fc_frame *)skb; /* * If frame is an ELS response that matches the cached FLOGI OX_ID, * and is accept, issue flogi_reg_request copy wq request to firmware * to register the S_ID and determine whether FC_OUI mode or GW mode. */ if (is_matching_flogi_resp_frame(fnic, fp)) { if (!eth_hdrs_stripped) { if (fc_frame_payload_op(fp) == ELS_LS_ACC) { fnic_handle_flogi_resp(fnic, fp); return; } /* * Recd. Flogi reject. No point registering * with fw, but forward to libFC */ goto forward; } goto drop; } if (!eth_hdrs_stripped) goto drop; forward: spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto drop; } /* Use fr_flags to indicate whether succ. flogi resp or not */ fr_flags(fp) = 0; fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); skb_queue_tail(&fnic->frame_queue, skb); queue_work(fnic_event_queue, &fnic->frame_work); return; drop: dev_kfree_skb_irq(skb); }
static int enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque; struct rte_mbuf *rx_pkt = NULL; struct rte_mbuf *hdr_rx_pkt = NULL; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); rx_pkt = (struct rte_mbuf *)buf->os_buf; buf->os_buf = NULL; if (unlikely(packet_error)) { dev_err(enic, "packet error\n"); rx_pkt->data_len = 0; return 0; } if (unlikely(skipped)) { rx_pkt->data_len = 0; return 0; } if (likely(!vnic_get_hdr_split_size(enic->vdev))) { /* No header split configured */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; if (ipv4) { rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } else { /* Header split */ if (sop && !eop) { /* This piece is header */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; } else { if (sop && eop) { /* The packet is smaller than split_hdr_size */ *rx_pkt_bucket = rx_pkt; rx_pkt->pkt_len = bytes_written; if (ipv4) { rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } else { /* Payload */ hdr_rx_pkt = *rx_pkt_bucket; hdr_rx_pkt->pkt_len += bytes_written; if (ipv4) { hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR; if (!csum_not_calc) { if (unlikely(!ipv4_csum_ok)) hdr_rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; if ((tcp || udp) && (!tcp_udp_csum_ok)) hdr_rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; } } else if (ipv6) hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR; } } } rx_pkt->data_len = bytes_written; if (rss_hash) { rx_pkt->ol_flags |= PKT_RX_RSS_HASH; rx_pkt->hash.rss = rss_hash; } if (vlan_tci) { rx_pkt->ol_flags |= PKT_RX_VLAN_PKT; rx_pkt->vlan_tci = vlan_tci; } return eop; }
static int enic_rq_alloc_buf(struct vnic_rq *rq) { struct enic *enic = vnic_dev_priv(rq->vdev); dma_addr_t dma_addr; struct rq_enet_desc *desc = vnic_rq_next_desc(rq); uint8_t type = RQ_ENET_TYPE_ONLY_SOP; u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev); struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp); struct rte_mbuf *hdr_mbuf = NULL; if (!mbuf) { dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n"); return -1; } if (unlikely(split_hdr_size)) { if (vnic_rq_desc_avail(rq) < 2) { rte_mempool_put(mbuf->pool, mbuf); return -1; } hdr_mbuf = enic_rxmbuf_alloc(rq->mp); if (!hdr_mbuf) { rte_mempool_put(mbuf->pool, mbuf); dev_err(enic, "hdr_mbuf alloc in enic_rq_alloc_buf failed\n"); return -1; } hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM; hdr_mbuf->nb_segs = 2; hdr_mbuf->port = rq->index; hdr_mbuf->next = mbuf; dma_addr = (dma_addr_t) (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off); rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size); vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr, (unsigned int)split_hdr_size, 0 /*wrid*/); desc = vnic_rq_next_desc(rq); type = RQ_ENET_TYPE_NOT_SOP; } else { mbuf->nb_segs = 1; mbuf->port = rq->index; } mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->next = NULL; dma_addr = (dma_addr_t) (mbuf->buf_physaddr + mbuf->data_off); rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len); vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr, (unsigned int)mbuf->buf_len, 0 /*wrid*/); return 0; }
static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan, checksum; u32 rss_hash; if (skipped) return; skb = buf->os_buf; prefetch(skb->data - NET_IP_ALIGN); pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); if (packet_error) { if (!fcs_ok) { if (bytes_written > 0) enic->rq_bad_fcs++; else if (bytes_written == 0) enic->rq_truncated_pkts++; } dev_kfree_skb_any(skb); return; } if (eop && bytes_written > 0) { skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); if (enic->csum_rx_enabled && !csum_not_calc) { skb->csum = htons(checksum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->dev = netdev; if (enic->vlan_group && vlan_stripped) { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, skb, enic->vlan_group, vlan, cq_desc); else vlan_hwaccel_receive_skb(skb, enic->vlan_group, vlan); } else { if ((netdev->features & NETIF_F_LRO) && ipv4) lro_receive_skb(&enic->lro_mgr, skb, cq_desc); else netif_receive_skb(skb); } } else { dev_kfree_skb_any(skb); } }