コード例 #1
0
ファイル: vmxnet3_rxtx.c プロジェクト: gongleiarei/dpdk
static void
vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
{
	uint32_t avail = 0;

	if (rxq == NULL)
		return;

	PMD_RX_LOG(DEBUG,
		   "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
		   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
	PMD_RX_LOG(DEBUG,
		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
		   (unsigned long)rxq->cmd_ring[0].basePA,
		   (unsigned long)rxq->cmd_ring[1].basePA,
		   (unsigned long)rxq->comp_ring.basePA);

	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
	PMD_RX_LOG(DEBUG,
		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
		   (uint32_t)rxq->cmd_ring[0].size, avail,
		   rxq->comp_ring.next2proc,
		   rxq->cmd_ring[0].size - avail);

	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
		   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
		   rxq->cmd_ring[1].size - avail);

}
コード例 #2
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
static void
vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
{
	uint32_t avail = 0;

	if (txq == NULL)
		return;

	PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.",
		   txq->cmd_ring.base, txq->comp_ring.base);
	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.",
		   (unsigned long)txq->cmd_ring.basePA,
		   (unsigned long)txq->comp_ring.basePA);

	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
		   (uint32_t)txq->cmd_ring.size, avail,
		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
}
コード例 #3
0
ファイル: vmxnet3_rxtx.c プロジェクト: gongleiarei/dpdk
/*
 *  Allocates mbufs and clusters. Post rx descriptors with buffer details
 *  so that device can receive packets in those buffers.
 *	Ring layout:
 *      Among the two rings, 1st ring contains buffers of type 0 and type1.
 *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
 *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
 *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
 *      only for LRO.
 *
 */
static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
	int err = 0;
	uint32_t i = 0, val = 0;
	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];

	if (ring_id == 0) {
		/* Usually: One HEAD type buf per packet
		 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
		 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
		 */

		/* We use single packet buffer so all heads here */
		val = VMXNET3_RXD_BTYPE_HEAD;
	} else {
		/* All BODY type buffers for 2nd ring */
		val = VMXNET3_RXD_BTYPE_BODY;
	}

	while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
		struct Vmxnet3_RxDesc *rxd;
		struct rte_mbuf *mbuf;
		vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];

		rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);

		/* Allocate blank mbuf for the current Rx Descriptor */
		mbuf = rte_rxmbuf_alloc(rxq->mp);
		if (unlikely(mbuf == NULL)) {
			PMD_RX_LOG(ERR, "Error allocating mbuf in %s", __func__);
			rxq->stats.rx_buf_alloc_failure++;
			err = ENOMEM;
			break;
		}

		/*
		 * Load mbuf pointer into buf_info[ring_size]
		 * buf_info structure is equivalent to cookie for virtio-virtqueue
		 */
		buf_info->m = mbuf;
		buf_info->len = (uint16_t)(mbuf->buf_len -
					   RTE_PKTMBUF_HEADROOM);
		buf_info->bufPA =
			rte_mbuf_data_dma_addr_default(mbuf);

		/* Load Rx Descriptor with the buffer's GPA */
		rxd->addr = buf_info->bufPA;

		/* After this point rxd->addr MUST not be NULL */
		rxd->btype = val;
		rxd->len = buf_info->len;
		/* Flip gen bit at the end to change ownership */
		rxd->gen = ring->gen;

		vmxnet3_cmd_ring_adv_next2fill(ring);
		i++;
	}

	/* Return error only if no buffers are posted at present */
	if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
		return -err;
	else
		return i;
}
コード例 #4
0
ファイル: vmxnet3_rxtx.c プロジェクト: gongleiarei/dpdk
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	vmxnet3_tx_queue_t *txq = tx_queue;
	struct vmxnet3_hw *hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {
		Vmxnet3_GenericDesc *gdesc;
		vmxnet3_buf_info_t *tbi;
		uint32_t first2fill, avail, dw2;
		struct rte_mbuf *txm = tx_pkts[nb_tx];
		struct rte_mbuf *m_seg = txm;

		/* Is this packet execessively fragmented, then drop */
		if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
			++txq->stats.drop_too_many_segs;
			++txq->stats.drop_total;
			rte_pktmbuf_free(txm);
			++nb_tx;
			continue;
		}

		/* Is command ring full? */
		avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
		if (txm->nb_segs > avail) {
			++txq->stats.tx_ring_full;
			break;
		}

		/* use the previous gen bit for the SOP desc */
		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
		first2fill = txq->cmd_ring.next2fill;
		do {
			/* Remember the transmit buffer for cleanup */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->m = m_seg;

			/* NB: the following assumes that VMXNET3 maximum
			   transmit buffer size (16K) is greater than
			   maximum sizeof mbuf segment size. */
			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
			gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
			gdesc->dword[2] = dw2 | m_seg->data_len;
			gdesc->dword[3] = 0;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);

			/* use the right gen for non-SOP desc */
			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
		} while ((m_seg = m_seg->next) != NULL);

		/* Update the EOP descriptor */
		gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;

		/* Add VLAN tag if present */
		gdesc = txq->cmd_ring.base + first2fill;
		if (txm->ol_flags & PKT_TX_VLAN_PKT) {
			gdesc->txd.ti = 1;
			gdesc->txd.tci = txm->vlan_tci;
		}

		/* TODO: Add transmit checksum offload here */

		/* flip the GEN bit on the SOP */
		rte_compiler_barrier();
		gdesc->dword[2] ^= VMXNET3_TXD_GEN;

		txq->shared->ctrl.txNumDeferred++;
		nb_tx++;
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #5
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (txq->stopped) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->pkt.nb_segs != 1) {
				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			/* Needs to minus ether header len */
			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			txd->addr = tbi->bufPA;
			txd->len = txm->pkt.data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {
			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
			txq->stats.drop_total += (nb_pkts - nb_tx);
			break;
		}
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #6
0
ファイル: vmxnet3_rxtx.c プロジェクト: carriercomm/trex-core
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
			int copy_size = 0;

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->nb_segs != 1) {
                if (vmxnet3_xmit_convert_callback ){
                    txm=vmxnet3_xmit_convert_callback(txm);
                }else{
                    txq->stats.drop_total++;
                    nb_tx++;
                    rte_pktmbuf_free(txm);
                    continue;
                }
			}

            if (!txm) {
                txq->stats.drop_total++;
                nb_tx++;
                continue;
            }

			/* Needs to minus ether header len */
			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(txm);
				txq->stats.drop_total++;
				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
			if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
				struct Vmxnet3_TxDataDesc *tdd;

				tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
				copy_size = rte_pktmbuf_pkt_len(txm);
				rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
			}

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			if (copy_size)
				txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
							txq->cmd_ring.next2fill *
							sizeof(struct Vmxnet3_TxDataDesc));
			else
				txd->addr = tbi->bufPA;
			txd->len = txm->data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Add VLAN tag if requested */
			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
				txd->ti = 1;
				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
			}

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {