static int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) { struct rte_mbuf *mb; struct rq_enet_desc *rqd = rq->ring.descs; unsigned i; dma_addr_t dma_addr; dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, rq->ring.desc_count); for (i = 0; i < rq->ring.desc_count; i++, rqd++) { mb = rte_rxmbuf_alloc(rq->mp); if (mb == NULL) { dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", (unsigned)rq->index); return -ENOMEM; } dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM); rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, mb->buf_len - RTE_PKTMBUF_HEADROOM); rq->mbuf_ring[i] = mb; } /* make sure all prior writes are complete before doing the PIO write */ rte_rmb(); /* Post all but the last 2 cache lines' worth of descriptors */ rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE / sizeof(struct rq_enet_desc)); rq->rx_nb_hold = 0; dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); iowrite32(rq->posted_index, &rq->ctrl->posted_index); rte_rmb(); return 0; }
static uint16_t eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct virtqueue *rxvq = q; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num; uint32_t len[VIRTIO_MBUF_BURST_SZ]; uint32_t i; struct pmd_internals *pi = rxvq->internals; nb_used = VIRTQUEUE_NUSED(rxvq); rte_compiler_barrier(); /* rmb */ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); if (unlikely(num == 0)) return 0; num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num); for (i = 0; i < num ; i ++) { rxm = rx_pkts[i]; PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]); rxm->next = NULL; rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr)); rxm->nb_segs = 1; rxm->port = pi->port_id; rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr)); } /* allocate new mbuf for the used descriptor */ while (likely(!virtqueue_full(rxvq))) { new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { break; } if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) { rte_pktmbuf_free_seg(new_mbuf); break; } } pi->eth_stats.ipackets += num; return num; }
/* * Allocates mbufs and clusters. Post rx descriptors with buffer details * so that device can receive packets in those buffers. * Ring layout: * Among the two rings, 1st ring contains buffers of type 0 and type1. * bufs_per_pkt is set such that for non-LRO cases all the buffers required * by a frame will fit in 1st ring (1st buf of type0 and rest of type1). * 2nd ring contains buffers of type 1 alone. Second ring mostly be used * only for LRO. * */ static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) { int err = 0; uint32_t i = 0, val = 0; struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id]; if (ring_id == 0) { /* Usually: One HEAD type buf per packet * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ? * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD; */ /* We use single packet buffer so all heads here */ val = VMXNET3_RXD_BTYPE_HEAD; } else { /* All BODY type buffers for 2nd ring */ val = VMXNET3_RXD_BTYPE_BODY; } while (vmxnet3_cmd_ring_desc_avail(ring) > 0) { struct Vmxnet3_RxDesc *rxd; struct rte_mbuf *mbuf; vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill]; rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill); /* Allocate blank mbuf for the current Rx Descriptor */ mbuf = rte_rxmbuf_alloc(rxq->mp); if (unlikely(mbuf == NULL)) { PMD_RX_LOG(ERR, "Error allocating mbuf in %s", __func__); rxq->stats.rx_buf_alloc_failure++; err = ENOMEM; break; } /* * Load mbuf pointer into buf_info[ring_size] * buf_info structure is equivalent to cookie for virtio-virtqueue */ buf_info->m = mbuf; buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM); buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf); /* Load Rx Descriptor with the buffer's GPA */ rxd->addr = buf_info->bufPA; /* After this point rxd->addr MUST not be NULL */ rxd->btype = val; rxd->len = buf_info->len; /* Flip gen bit at the end to change ownership */ rxd->gen = ring->gen; vmxnet3_cmd_ring_adv_next2fill(ring); i++; } /* Return error only if no buffers are posted at present */ if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1)) return -err; else return i; }
static void virtio_dev_vring_start(struct virtqueue *vq, int queue_type) { struct rte_mbuf *m; int i, nbufs, error, size = vq->vq_nentries; struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; PMD_INIT_FUNC_TRACE(); /* * Reinitialise since virtio port might have been stopped and restarted */ memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); vq->vq_used_cons_idx = 0; vq->vq_desc_head_idx = 0; vq->vq_avail_idx = 0; vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); vq->vq_free_cnt = vq->vq_nentries; memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); /* Chain all the descriptors in the ring with an END */ for (i = 0; i < size - 1; i++) vr->desc[i].next = (uint16_t)(i + 1); vr->desc[i].next = VQ_RING_DESC_CHAIN_END; /* * Disable device(host) interrupting guest */ virtqueue_disable_intr(vq); /* Only rx virtqueue needs mbufs to be allocated at initialization */ if (queue_type == VTNET_RQ) { if (vq->mpool == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate initial mbufs for rx virtqueue"); /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; error = ENOSPC; while (!virtqueue_full(vq)) { m = rte_rxmbuf_alloc(vq->mpool); if (m == NULL) break; /****************************************** * Enqueue allocated buffers * *******************************************/ error = virtqueue_enqueue_recv_refill(vq, m); if (error) { rte_pktmbuf_free(m); break; } nbufs++; } vq_update_avail_idx(vq); PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index); VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN, vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } else if (queue_type == VTNET_TQ) {