コード例 #1
0
ファイル: rte_eth_xenvirt.c プロジェクト: goby/dpdk
static uint16_t
eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
	struct virtqueue *txvq = tx_queue;
	struct rte_mbuf *txm;
	uint16_t nb_used, nb_tx, num, i;
	int error;
	uint32_t len[VIRTIO_MBUF_BURST_SZ];
	struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
	struct pmd_internals *pi = txvq->internals;

	nb_tx = 0;

	if (unlikely(nb_pkts == 0))
		return 0;

	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
	nb_used = VIRTQUEUE_NUSED(txvq);

	rte_compiler_barrier();   /* rmb */

	num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
	num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);

	for (i = 0; i < num ; i ++) {
		/* mergable not supported, one segment only */
		rte_pktmbuf_free_seg(snd_pkts[i]);
	}

	while (nb_tx < nb_pkts) {
		if (likely(!virtqueue_full(txvq))) {
		/* TODO drop tx_pkts if it contains multiple segments */
			txm = tx_pkts[nb_tx];
			error = virtqueue_enqueue_xmit(txvq, txm);
			if (unlikely(error)) {
				if (error == ENOSPC)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
				else if (error == EMSGSIZE)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
				else
					PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
				break;
			}
			nb_tx++;
		} else {
			PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
			/* virtqueue_notify not needed in our para-virt solution */
			break;
		}
	}
	pi->eth_stats.opackets += nb_tx;
	return nb_tx;
}
コード例 #2
0
ファイル: vmxnet3_rxtx.c プロジェクト: gongleiarei/dpdk
static void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {
		/* Release cmd_ring descriptor and free mbuf */
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
		while (txq->cmd_ring.next2comp != tcd->txdIdx) {
			mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m;
			txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL;
			rte_pktmbuf_free_seg(mbuf);

			/* Mark the txd for which tcd was generated as completed */
			vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
			completed++;
		}

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
コード例 #3
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {

		/* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
		mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
		if (unlikely(mbuf == NULL))
			rte_panic("EOP desc does not point to a valid mbuf");
		else
			rte_pktmbuf_free(mbuf);


		txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
		/* Mark the txd for which tcd was generated as completed */
		vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
		completed++;
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
コード例 #4
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
static void
vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
{
	uint32_t avail = 0;

	if (txq == NULL)
		return;

	PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.",
		   txq->cmd_ring.base, txq->comp_ring.base);
	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.",
		   (unsigned long)txq->cmd_ring.basePA,
		   (unsigned long)txq->comp_ring.basePA);

	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
		   (uint32_t)txq->cmd_ring.size, avail,
		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
}
コード例 #5
0
ファイル: bnx2x_rxtx.c プロジェクト: 0day-ci/dpdk
static uint16_t
bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
	struct bnx2x_tx_queue *txq;
	struct bnx2x_softc *sc;
	struct bnx2x_fastpath *fp;
	uint32_t burst, nb_tx;
	struct rte_mbuf **m = tx_pkts;
	int ret;

	txq = p_txq;
	sc = txq->sc;
	fp = &sc->fp[txq->queue_id];

	nb_tx = nb_pkts;

	do {
		burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);

		ret = bnx2x_tx_encap(txq, m, burst);
		if (unlikely(ret)) {
			PMD_TX_LOG(ERR, "tx_encap failed!");
		}

		bnx2x_update_fp_sb_idx(fp);

		if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
			bnx2x_txeof(sc, fp);
		}

		if (unlikely(ret == -ENOMEM)) {
			break;
		}

		m += burst;
		nb_pkts -= burst;

	} while (nb_pkts);

	return nb_tx - nb_pkts;
}
コード例 #6
0
ファイル: vmxnet3_rxtx.c プロジェクト: gongleiarei/dpdk
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	vmxnet3_tx_queue_t *txq = tx_queue;
	struct vmxnet3_hw *hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {
		Vmxnet3_GenericDesc *gdesc;
		vmxnet3_buf_info_t *tbi;
		uint32_t first2fill, avail, dw2;
		struct rte_mbuf *txm = tx_pkts[nb_tx];
		struct rte_mbuf *m_seg = txm;

		/* Is this packet execessively fragmented, then drop */
		if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
			++txq->stats.drop_too_many_segs;
			++txq->stats.drop_total;
			rte_pktmbuf_free(txm);
			++nb_tx;
			continue;
		}

		/* Is command ring full? */
		avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
		if (txm->nb_segs > avail) {
			++txq->stats.tx_ring_full;
			break;
		}

		/* use the previous gen bit for the SOP desc */
		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
		first2fill = txq->cmd_ring.next2fill;
		do {
			/* Remember the transmit buffer for cleanup */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->m = m_seg;

			/* NB: the following assumes that VMXNET3 maximum
			   transmit buffer size (16K) is greater than
			   maximum sizeof mbuf segment size. */
			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
			gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
			gdesc->dword[2] = dw2 | m_seg->data_len;
			gdesc->dword[3] = 0;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);

			/* use the right gen for non-SOP desc */
			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
		} while ((m_seg = m_seg->next) != NULL);

		/* Update the EOP descriptor */
		gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;

		/* Add VLAN tag if present */
		gdesc = txq->cmd_ring.base + first2fill;
		if (txm->ol_flags & PKT_TX_VLAN_PKT) {
			gdesc->txd.ti = 1;
			gdesc->txd.tci = txm->vlan_tci;
		}

		/* TODO: Add transmit checksum offload here */

		/* flip the GEN bit on the SOP */
		rte_compiler_barrier();
		gdesc->dword[2] ^= VMXNET3_TXD_GEN;

		txq->shared->ctrl.txNumDeferred++;
		nb_tx++;
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #7
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (txq->stopped) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->pkt.nb_segs != 1) {
				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			/* Needs to minus ether header len */
			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			txd->addr = tbi->bufPA;
			txd->len = txm->pkt.data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {
			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
			txq->stats.drop_total += (nb_pkts - nb_tx);
			break;
		}
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #8
0
ファイル: vmxnet3_rxtx.c プロジェクト: carriercomm/trex-core
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
			int copy_size = 0;

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->nb_segs != 1) {
                if (vmxnet3_xmit_convert_callback ){
                    txm=vmxnet3_xmit_convert_callback(txm);
                }else{
                    txq->stats.drop_total++;
                    nb_tx++;
                    rte_pktmbuf_free(txm);
                    continue;
                }
			}

            if (!txm) {
                txq->stats.drop_total++;
                nb_tx++;
                continue;
            }

			/* Needs to minus ether header len */
			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(txm);
				txq->stats.drop_total++;
				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
			if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
				struct Vmxnet3_TxDataDesc *tdd;

				tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
				copy_size = rte_pktmbuf_pkt_len(txm);
				rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
			}

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			if (copy_size)
				txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
							txq->cmd_ring.next2fill *
							sizeof(struct Vmxnet3_TxDataDesc));
			else
				txd->addr = tbi->bufPA;
			txd->len = txm->data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Add VLAN tag if requested */
			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
				txd->ti = 1;
				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
			}

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {