void enic_init_vnic_resources(struct enic *enic) { unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; unsigned int index = 0; for (index = 0; index < enic->rq_count; index++) { vnic_rq_init(&enic->rq[index], enic_cq_rq(enic, index), error_interrupt_enable, error_interrupt_offset); } for (index = 0; index < enic->wq_count; index++) { vnic_wq_init(&enic->wq[index], enic_cq_wq(enic, index), error_interrupt_enable, error_interrupt_offset); } vnic_dev_stats_clear(enic->vdev); for (index = 0; index < enic->cq_count; index++) { vnic_cq_init(&enic->cq[index], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 0 /* interrupt_enable */, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, 0 /* interrupt offset */, 0 /* cq_message_addr */); } vnic_intr_init(&enic->intr, enic->config.intr_timer_usec, enic->config.intr_timer_type, /*mask_on_assertion*/1); }
int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done) { struct enic *enic = vnic_dev_priv(rq->vdev); unsigned int cq = enic_cq_rq(enic, rq->index); int err = 0; *work_done = vnic_cq_service(&enic->cq[cq], budget, enic_rq_service, (void *)rx_pkts); if (*work_done) { vnic_rq_fill(rq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(rq) == 0) { dev_err(enic, "Unable to alloc receive buffers\n"); err = -1; } } return err; }
static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct enic *enic = pmd_priv(dev); uint32_t queue_count = 0; struct vnic_cq *cq; uint32_t cq_tail; uint16_t cq_idx; int rq_num; rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id); cq = &enic->cq[enic_cq_rq(enic, rq_num)]; cq_idx = cq->to_clean; cq_tail = ioread32(&cq->ctrl->cq_tail); if (cq_tail < cq_idx) cq_tail += cq->ring.desc_count; queue_count = cq_tail - cq_idx; return queue_count; }