示例#1
0
static void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {
		/* Release cmd_ring descriptor and free mbuf */
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
		while (txq->cmd_ring.next2comp != tcd->txdIdx) {
			mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m;
			txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL;
			rte_pktmbuf_free_seg(mbuf);

			/* Mark the txd for which tcd was generated as completed */
			vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
			completed++;
		}

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
示例#2
0
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {

		/* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
		mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
		if (unlikely(mbuf == NULL))
			rte_panic("EOP desc does not point to a valid mbuf");
		else
			rte_pktmbuf_free(mbuf);


		txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
		/* Mark the txd for which tcd was generated as completed */
		vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
		completed++;
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
示例#3
0
/*
 * Process the Rx Completion Ring of given vmxnet3_rx_queue
 * for nb_pkts burst and return the number of packets received
 */
uint16_t
vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
	uint16_t nb_rx;
	uint32_t nb_rxd, idx;
	uint8_t ring_idx;
	vmxnet3_rx_queue_t *rxq;
	Vmxnet3_RxCompDesc *rcd;
	vmxnet3_buf_info_t *rbi;
	Vmxnet3_RxDesc *rxd;
	struct rte_mbuf *rxm = NULL;
	struct vmxnet3_hw *hw;

	nb_rx = 0;
	ring_idx = 0;
	nb_rxd = 0;
	idx = 0;

	rxq = rx_queue;
	hw = rxq->hw;

	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;

	if (unlikely(rxq->stopped)) {
		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
		return 0;
	}

	while (rcd->gen == rxq->comp_ring.gen) {
		if (nb_rx >= nb_pkts)
			break;

		idx = rcd->rxdIdx;
		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;

		if (unlikely(rcd->sop != 1 || rcd->eop != 1)) {
			rte_pktmbuf_free_seg(rbi->m);
			PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
			goto rcd_done;
		}

		PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);

		VMXNET3_ASSERT(rcd->len <= rxd->len);
		VMXNET3_ASSERT(rbi->m);

		if (unlikely(rcd->len == 0)) {
			PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
				   ring_idx, idx);
			VMXNET3_ASSERT(rcd->sop && rcd->eop);
			rte_pktmbuf_free_seg(rbi->m);
			goto rcd_done;
		}

		/* Assuming a packet is coming in a single packet buffer */
		if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) {
			PMD_RX_LOG(DEBUG,
				   "Alert : Misbehaving device, incorrect "
				   " buffer type used. iPacket dropped.");
			rte_pktmbuf_free_seg(rbi->m);
			goto rcd_done;
		}
		VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);

		/* Get the packet buffer pointer from buf_info */
		rxm = rbi->m;

		/* Clear descriptor associated buf_info to be reused */
		rbi->m = NULL;
		rbi->bufPA = 0;

		/* Update the index that we received a packet */
		rxq->cmd_ring[ring_idx].next2comp = idx;

		/* For RCD with EOP set, check if there is frame error */
		if (unlikely(rcd->err)) {
			rxq->stats.drop_total++;
			rxq->stats.drop_err++;

			if (!rcd->fcs) {
				rxq->stats.drop_fcs++;
				PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
			}
			PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
				   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
					 rxq->comp_ring.base), rcd->rxdIdx);
			rte_pktmbuf_free_seg(rxm);
			goto rcd_done;
		}


		/* Initialize newly received packet buffer */
		rxm->port = rxq->port_id;
		rxm->nb_segs = 1;
		rxm->next = NULL;
		rxm->pkt_len = (uint16_t)rcd->len;
		rxm->data_len = (uint16_t)rcd->len;
		rxm->data_off = RTE_PKTMBUF_HEADROOM;
		rxm->ol_flags = 0;
		rxm->vlan_tci = 0;

		vmxnet3_rx_offload(rcd, rxm);

		rx_pkts[nb_rx++] = rxm;
rcd_done:
		rxq->cmd_ring[ring_idx].next2comp = idx;
		VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);

		/* It's time to allocate some new buf and renew descriptors */
		vmxnet3_post_rx_bufs(rxq, ring_idx);
		if (unlikely(rxq->shared->ctrl.updateRxProd)) {
			VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
					       rxq->cmd_ring[ring_idx].next2fill);
		}

		/* Advance to the next descriptor in comp_ring */
		vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);

		rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
		nb_rxd++;
		if (nb_rxd > rxq->cmd_ring[0].size) {
			PMD_RX_LOG(ERR,
				   "Used up quota of receiving packets,"
				   " relinquish control.");
			break;
		}
	}

	return nb_rx;
}
示例#4
0
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (txq->stopped) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->pkt.nb_segs != 1) {
				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			/* Needs to minus ether header len */
			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(tx_pkts[nb_tx]);
				txq->stats.drop_total++;

				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			txd->addr = tbi->bufPA;
			txd->len = txm->pkt.data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {
			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
			txq->stats.drop_total += (nb_pkts - nb_tx);
			break;
		}
	}

	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);

	if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {

		txq->shared->ctrl.txNumDeferred = 0;
		/* Notify vSwitch that packets are available. */
		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
				       txq->cmd_ring.next2fill);
	}

	return nb_tx;
}
示例#5
0
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
			int copy_size = 0;

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->nb_segs != 1) {
                if (vmxnet3_xmit_convert_callback ){
                    txm=vmxnet3_xmit_convert_callback(txm);
                }else{
                    txq->stats.drop_total++;
                    nb_tx++;
                    rte_pktmbuf_free(txm);
                    continue;
                }
			}

            if (!txm) {
                txq->stats.drop_total++;
                nb_tx++;
                continue;
            }

			/* Needs to minus ether header len */
			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(txm);
				txq->stats.drop_total++;
				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
			if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
				struct Vmxnet3_TxDataDesc *tdd;

				tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
				copy_size = rte_pktmbuf_pkt_len(txm);
				rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
			}

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			if (copy_size)
				txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
							txq->cmd_ring.next2fill *
							sizeof(struct Vmxnet3_TxDataDesc));
			else
				txd->addr = tbi->bufPA;
			txd->len = txm->data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Add VLAN tag if requested */
			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
				txd->ti = 1;
				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
			}

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {