コード例 #1
0
ファイル: virtio_rxtx.c プロジェクト: AMildner/MoonGen
static int
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
	struct vq_desc_extra *dxp;
	struct vring_desc *start_dp;
	uint16_t seg_num = cookie->pkt.nb_segs;
	uint16_t needed = 1 + seg_num;
	uint16_t head_idx, idx;
	uint16_t head_size = txvq->hw->vtnet_hdr_size;

	if (unlikely(txvq->vq_free_cnt == 0))
		return -ENOSPC;
	if (unlikely(txvq->vq_free_cnt < needed))
		return -EMSGSIZE;
	head_idx = txvq->vq_desc_head_idx;
	if (unlikely(head_idx >= txvq->vq_nentries))
		return -EFAULT;

	idx = head_idx;
	dxp = &txvq->vq_descx[idx];
	if (dxp->cookie != NULL)
		rte_pktmbuf_free(dxp->cookie);
	dxp->cookie = (void *)cookie;
	dxp->ndescs = needed;

	start_dp = txvq->vq_ring.desc;
	start_dp[idx].addr =
		txvq->virtio_net_hdr_mem + idx * head_size;
	start_dp[idx].len = (uint32_t)head_size;
	start_dp[idx].flags = VRING_DESC_F_NEXT;

	for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
		idx = start_dp[idx].next;
		start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
		start_dp[idx].len   = cookie->pkt.data_len;
		start_dp[idx].flags = VRING_DESC_F_NEXT;
		cookie = cookie->pkt.next;
	}

	start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
	idx = start_dp[idx].next;
	txvq->vq_desc_head_idx = idx;
	if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
		txvq->vq_desc_tail_idx = idx;
	txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
	vq_update_avail_ring(txvq, head_idx);

	return 0;
}
コード例 #2
0
ファイル: bnxt_rxr.c プロジェクト: ANLAB-KAIST/dpdk
static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
				     struct bnxt_rx_ring_info *rxr,
				     uint16_t prod)
{
	struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
	struct rte_mbuf *data;

	data = __bnxt_alloc_rx_data(rxq->mb_pool);
	if (!data)
		return -ENOMEM;

	rx_buf->mbuf = data;

	rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));

	return 0;
}
コード例 #3
0
ファイル: bnxt_txr.c プロジェクト: qoriq-open-source/dpdk
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
				struct bnxt_tx_queue *txq)
{
	struct bnxt_tx_ring_info *txr = txq->tx_ring;
	struct tx_bd_long *txbd;
	struct tx_bd_long_hi *txbd1;
	uint32_t vlan_tag_flags, cfa_action;
	bool long_bd = false;
	uint16_t last_prod = 0;
	struct rte_mbuf *m_seg;
	struct bnxt_sw_tx_bd *tx_buf;
	static const uint32_t lhint_arr[4] = {
		TX_BD_LONG_FLAGS_LHINT_LT512,
		TX_BD_LONG_FLAGS_LHINT_LT1K,
		TX_BD_LONG_FLAGS_LHINT_LT2K,
		TX_BD_LONG_FLAGS_LHINT_LT2K
	};

	if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
				PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
				PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
		long_bd = true;

	tx_buf = &txr->tx_buf_ring[txr->tx_prod];
	tx_buf->mbuf = tx_pkt;
	tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
	last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
				txr->tx_ring_struct->ring_mask;

	if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
		return -ENOMEM;

	txbd = &txr->tx_desc_ring[txr->tx_prod];
	txbd->opaque = txr->tx_prod;
	txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
	txbd->len = tx_pkt->data_len;
	if (txbd->len >= 2014)
		txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
	else
		txbd->flags_type |= lhint_arr[txbd->len >> 9];
	txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));

	if (long_bd) {
		txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
		vlan_tag_flags = 0;
		cfa_action = 0;
		if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
			/* shurd: Should this mask at
			 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
			 */
			vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
				tx_buf->mbuf->vlan_tci;
			/* Currently supports 8021Q, 8021AD vlan offloads
			 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
			 */
			/* DPDK only supports 802.11q VLAN packets */
			vlan_tag_flags |=
					TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
		}

		txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);

		txbd1 = (struct tx_bd_long_hi *)
					&txr->tx_desc_ring[txr->tx_prod];
		txbd1->lflags = 0;
		txbd1->cfa_meta = vlan_tag_flags;
		txbd1->cfa_action = cfa_action;

		if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
			/* TSO */
			txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
			txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
					tx_pkt->l4_len + tx_pkt->outer_l2_len +
					tx_pkt->outer_l3_len;
			txbd1->mss = tx_pkt->tso_segsz;

		} else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
			/* (Inner) IP, (Inner) TCP/UDP CSO */
			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
			/* Outer IP, (Inner) TCP/UDP CSO */
			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
			/* Outer IP, Inner IP CSO */
			txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
			/* TCP/UDP CSO */
			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
			/* IP CSO */
			txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
			txbd1->mss = 0;
		} else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
			/* IP CSO */
			txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
			txbd1->mss = 0;
		}
	} else {
		txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
	}

	m_seg = tx_pkt->next;
	/* i is set at the end of the if(long_bd) block */
	while (txr->tx_prod != last_prod) {
		txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
		tx_buf = &txr->tx_buf_ring[txr->tx_prod];

		txbd = &txr->tx_desc_ring[txr->tx_prod];
		txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
		txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
		txbd->len = m_seg->data_len;

		m_seg = m_seg->next;
	}

	txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;

	txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);

	return 0;
}
コード例 #4
0
ファイル: vmxnet3_rxtx.c プロジェクト: ATCP/mtcp
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	vmxnet3_tx_queue_t *txq = tx_queue;
	struct vmxnet3_hw *hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {
		Vmxnet3_GenericDesc *gdesc;
		vmxnet3_buf_info_t *tbi;
		uint32_t first2fill, avail, dw2;
		struct rte_mbuf *txm = tx_pkts[nb_tx];
		struct rte_mbuf *m_seg = txm;

		/* Is this packet execessively fragmented, then drop */
		if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
			++txq->stats.drop_too_many_segs;
			++txq->stats.drop_total;
			rte_pktmbuf_free(txm);
			++nb_tx;
			continue;
		}

		/* Is command ring full? */
		avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
		if (txm->nb_segs > avail) {
			++txq->stats.tx_ring_full;
			break;
		}

		/* use the previous gen bit for the SOP desc */
		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
		first2fill = txq->cmd_ring.next2fill;
		do {
			/* Remember the transmit buffer for cleanup */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->m = m_seg;

			/* NB: the following assumes that VMXNET3 maximum
			   transmit buffer size (16K) is greater than
			   maximum sizeof mbuf segment size. */
			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
			gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
			gdesc->dword[2] = dw2 | m_seg->data_len;
			gdesc->dword[3] = 0;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);

			/* use the right gen for non-SOP desc */
			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
		} while ((m_seg = m_seg->next) != NULL);

		/* Update the EOP descriptor */
		gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;

		/* Add VLAN tag if present */
		gdesc = txq->cmd_ring.base + first2fill;
		if (txm->ol_flags & PKT_TX_VLAN_PKT) {
			gdesc->txd.ti = 1;
			gdesc->txd.tci = txm->vlan_tci;
		}

		/* TODO: Add transmit checksum offload here */

		/* flip the GEN bit on the SOP */
		rte_compiler_barrier();
		gdesc->dword[2] ^= VMXNET3_TXD_GEN;

		txq->shared->ctrl.txNumDeferred++;
		nb_tx++;
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #5
0
ファイル: vmxnet3_rxtx.c プロジェクト: AMildner/MoonGen
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (txq->stopped) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->pkt.nb_segs != 1) {
				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			/* Needs to minus ether header len */
			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			txd->addr = tbi->bufPA;
			txd->len = txm->pkt.data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {
			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
			txq->stats.drop_total += (nb_pkts - nb_tx);
			break;
		}
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
コード例 #6
0
ファイル: vmxnet3_rxtx.c プロジェクト: carriercomm/trex-core
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
			int copy_size = 0;

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->nb_segs != 1) {
                if (vmxnet3_xmit_convert_callback ){
                    txm=vmxnet3_xmit_convert_callback(txm);
                }else{
                    txq->stats.drop_total++;
                    nb_tx++;
                    rte_pktmbuf_free(txm);
                    continue;
                }
			}

            if (!txm) {
                txq->stats.drop_total++;
                nb_tx++;
                continue;
            }

			/* Needs to minus ether header len */
			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(txm);
				txq->stats.drop_total++;
				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
			if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
				struct Vmxnet3_TxDataDesc *tdd;

				tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
				copy_size = rte_pktmbuf_pkt_len(txm);
				rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
			}

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			if (copy_size)
				txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
							txq->cmd_ring.next2fill *
							sizeof(struct Vmxnet3_TxDataDesc));
			else
				txd->addr = tbi->bufPA;
			txd->len = txm->data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Add VLAN tag if requested */
			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
				txd->ti = 1;
				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
			}

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {