void vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) { struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; buf = rq->to_clean; while (vnic_rq_desc_used(rq) > 0) { (*buf_clean)(rq, buf); buf = rq->to_clean = buf->next; rq->ring.desc_avail++; } /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; iowrite32(fetch_index, &rq->ctrl->posted_index); vnic_dev_clear_desc_ring(&rq->ring); }
void vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) { struct vnic_rq_buf *buf; u32 fetch_index; BUG_ON(ioread32(&rq->ctrl->enable)); buf = rq->to_clean; while (vnic_rq_desc_used(rq) > 0) { (*buf_clean)(rq, buf); buf = rq->to_clean = buf->next; rq->ring.desc_avail++; } /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; iowrite32(fetch_index, &rq->ctrl->posted_index); rq->buf_index = 0; vnic_dev_clear_desc_ring(&rq->ring); }
int enic_enable(struct enic *enic) { unsigned int index; struct rte_eth_dev *eth_dev = enic->rte_dev; eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ if (enic_clsf_init(enic)) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); /* Fill RQ bufs */ for (index = 0; index < enic->rq_count; index++) { vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[index]) == 0) { dev_err(enic, "Unable to alloc receive buffers\n"); return -1; } } for (index = 0; index < enic->wq_count; index++) vnic_wq_enable(&enic->wq[index]); for (index = 0; index < enic->rq_count; index++) vnic_rq_enable(&enic->rq[index]); vnic_dev_enable_wait(enic->vdev); /* Register and enable error interrupt */ rte_intr_callback_register(&(enic->pdev->intr_handle), enic_intr_handler, (void *)enic->rte_dev); rte_intr_enable(&(enic->pdev->intr_handle)); vnic_intr_unmask(&enic->intr); return 0; }
int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done) { struct enic *enic = vnic_dev_priv(rq->vdev); unsigned int cq = enic_cq_rq(enic, rq->index); int err = 0; *work_done = vnic_cq_service(&enic->cq[cq], budget, enic_rq_service, (void *)rx_pkts); if (*work_done) { vnic_rq_fill(rq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(rq) == 0) { dev_err(enic, "Unable to alloc receive buffers\n"); err = -1; } } return err; }