Exemple #1
0
static int
test_pktmbuf_free_segment(void)
{
	unsigned i;
	struct rte_mbuf *m[NB_MBUF];
	int ret = 0;

	for (i=0; i<NB_MBUF; i++)
		m[i] = NULL;

	/* alloc NB_MBUF mbufs */
	for (i=0; i<NB_MBUF; i++) {
		m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
		if (m[i] == NULL) {
			printf("rte_pktmbuf_alloc() failed (%u)\n", i);
			ret = -1;
		}
	}

	/* free them */
	for (i=0; i<NB_MBUF; i++) {
		if (m[i] != NULL) {
			struct rte_mbuf *mb, *mt;

			mb = m[i];
			while(mb != NULL) {
				mt = mb;
				mb = mb->pkt.next;
				rte_pktmbuf_free_seg(mt);
			}
		}
	}

	return ret;
}
Exemple #2
0
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
	unsigned i;
	struct ixgbe_tx_entry_v *txe;
	uint16_t nb_free, max_desc;

	if (txq->sw_ring != NULL) {
		/* release the used mbufs in sw_ring */
		nb_free = txq->nb_tx_free;
		max_desc = (uint16_t)(txq->nb_tx_desc - 1);
		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
		     nb_free < max_desc && i != txq->tx_tail;
		     i = (i + 1) & max_desc) {
			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
			/*
			 * Check for already freed packets.
			 * Note: ixgbe_tx_free_bufs does not NULL after free,
			 * so we actually have to check the reference count.
			 */
			if (txe->mbuf != NULL &&
					rte_mbuf_refcnt_read(txe->mbuf) != 0)
				rte_pktmbuf_free_seg(txe->mbuf);
		}
		/* reset tx_entry */
		for (i = 0; i < txq->nb_tx_desc; i++) {
			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
			txe->mbuf = NULL;
		}
	}
}
Exemple #3
0
static void
ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
{
	unsigned i;
	struct igb_tx_entry_v *txe;
	uint16_t nb_free, max_desc;

	if (txq->sw_ring != NULL) {
		/* release the used mbufs in sw_ring */
		nb_free = txq->nb_tx_free;
		max_desc = (uint16_t)(txq->nb_tx_desc - 1);
		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
		     nb_free < max_desc && i != txq->tx_tail;
		     i = (i + 1) & max_desc) {
			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
			if (txe->mbuf != NULL)
				rte_pktmbuf_free_seg(txe->mbuf);
		}
		/* reset tx_entry */
		for (i = 0; i < txq->nb_tx_desc; i++) {
			txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
			txe->mbuf = NULL;
		}
	}
}
Exemple #4
0
static void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
	int completed = 0;
	struct rte_mbuf *mbuf;
	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
		(comp_ring->base + comp_ring->next2proc);

	while (tcd->gen == comp_ring->gen) {
		/* Release cmd_ring descriptor and free mbuf */
		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
		while (txq->cmd_ring.next2comp != tcd->txdIdx) {
			mbuf = txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m;
			txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = NULL;
			rte_pktmbuf_free_seg(mbuf);

			/* Mark the txd for which tcd was generated as completed */
			vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
			completed++;
		}

		vmxnet3_comp_ring_adv_next2proc(comp_ring);
		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
						    comp_ring->next2proc);
	}

	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
}
Exemple #5
0
void user_data_available_cbk(struct socket *sock)
{
	struct msghdr msg;
	struct iovec vec;
	struct sockaddr_in sockaddrin;
	struct rte_mbuf *mbuf;
	int i,dummy = 1;
	user_on_rx_opportunity_called++;
	memset(&vec,0,sizeof(vec));
	if(unlikely(sock == NULL))
	{
		return;
	}
	msg.msg_namelen = sizeof(sockaddrin);
	msg.msg_name = &sockaddrin;
	while(unlikely((i = kernel_recvmsg(sock, &msg,&vec, 1 /*num*/, 1448 /*size*/, 0 /*flags*/)) > 0))
	{
		dummy = 0;
		while(unlikely(mbuf = msg.msg_iov->head))
		{
			msg.msg_iov->head = msg.msg_iov->head->pkt.next;
			//printf("received %d\n",i);
			rte_pktmbuf_free_seg(mbuf);
		}
		//printf("received %d\n",i);
		memset(&vec,0,sizeof(vec));
		msg.msg_namelen = sizeof(sockaddrin);
		msg.msg_name = &sockaddrin;
	}
	if(dummy) {
		user_on_rx_opportunity_called_wo_result++;
	}
}
Exemple #6
0
/**
 * Manage TX completions.
 *
 * When sending a burst, mlx5_tx_burst() posts several WRs.
 * To improve performance, a completion event is only required once every
 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
 * for other WRs, but this information would not be used anyway.
 *
 * @param txq
 *   Pointer to TX queue structure.
 *
 * @return
 *   0 on success, -1 on failure.
 */
static int
txq_complete(struct txq *txq)
{
	unsigned int elts_comp = txq->elts_comp;
	unsigned int elts_tail = txq->elts_tail;
	unsigned int elts_free = txq->elts_tail;
	const unsigned int elts_n = txq->elts_n;
	int wcs_n;

	if (unlikely(elts_comp == 0))
		return 0;
#ifdef DEBUG_SEND
	DEBUG("%p: processing %u work requests completions",
	      (void *)txq, elts_comp);
#endif
	wcs_n = txq->poll_cnt(txq->cq, elts_comp);
	if (unlikely(wcs_n == 0))
		return 0;
	if (unlikely(wcs_n < 0)) {
		DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
		      (void *)txq, wcs_n);
		return -1;
	}
	elts_comp -= wcs_n;
	assert(elts_comp <= txq->elts_comp);
	/*
	 * Assume WC status is successful as nothing can be done about it
	 * anyway.
	 */
	elts_tail += wcs_n * txq->elts_comp_cd_init;
	if (elts_tail >= elts_n)
		elts_tail -= elts_n;

	while (elts_free != elts_tail) {
		struct txq_elt *elt = &(*txq->elts)[elts_free];
		unsigned int elts_free_next =
			(((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
		struct rte_mbuf *tmp = elt->buf;
		struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];

#ifndef NDEBUG
		/* Poisoning. */
		memset(elt, 0x66, sizeof(*elt));
#endif
		RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
		/* Faster than rte_pktmbuf_free(). */
		do {
			struct rte_mbuf *next = NEXT(tmp);

			rte_pktmbuf_free_seg(tmp);
			tmp = next;
		} while (tmp != NULL);
		elts_free = elts_free_next;
	}

	txq->elts_tail = elts_tail;
	txq->elts_comp = elts_comp;
	return 0;
}
Exemple #7
0
static uint16_t
eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
	struct virtqueue *txvq = tx_queue;
	struct rte_mbuf *txm;
	uint16_t nb_used, nb_tx, num, i;
	int error;
	uint32_t len[VIRTIO_MBUF_BURST_SZ];
	struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
	struct pmd_internals *pi = txvq->internals;

	nb_tx = 0;

	if (unlikely(nb_pkts == 0))
		return 0;

	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
	nb_used = VIRTQUEUE_NUSED(txvq);

	rte_compiler_barrier();   /* rmb */

	num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
	num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);

	for (i = 0; i < num ; i ++) {
		/* mergable not supported, one segment only */
		rte_pktmbuf_free_seg(snd_pkts[i]);
	}

	while (nb_tx < nb_pkts) {
		if (likely(!virtqueue_full(txvq))) {
		/* TODO drop tx_pkts if it contains multiple segments */
			txm = tx_pkts[nb_tx];
			error = virtqueue_enqueue_xmit(txvq, txm);
			if (unlikely(error)) {
				if (error == ENOSPC)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
				else if (error == EMSGSIZE)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
				else
					PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
				break;
			}
			nb_tx++;
		} else {
			PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
			/* virtqueue_notify not needed in our para-virt solution */
			break;
		}
	}
	pi->eth_stats.opackets += nb_tx;
	return nb_tx;
}
void user_transmitted_callback(struct rte_mbuf *mbuf,struct socket *sock)
{
	int last = /*(rte_mbuf_refcnt_read(mbuf) == 1)*/1;
        if((sock)&&(last)) {
               socket_satelite_data_t *socket_satelite_data = get_user_data(sock);
               if(socket_satelite_data) {
//printf("%s %d %p %d %d %d\n",__FILE__,__LINE__,&g_ipaugenblick_sockets[socket_satelite_data->ringset_idx],socket_satelite_data->ringset_idx, rte_pktmbuf_data_len(mbuf),rte_mbuf_refcnt_read(mbuf));
                       user_increment_socket_tx_space(&g_ipaugenblick_sockets[socket_satelite_data->ringset_idx].tx_space,rte_pktmbuf_data_len(mbuf));
               }
        }
        rte_pktmbuf_free_seg(mbuf);
}
void rte_pktmbuf_free(rte_mbuf_t *m){

	rte_mbuf_t *m_next;

    utl_rte_pktmbuf_check(m);

	while (m != NULL) {
		m_next = m->next;
		rte_pktmbuf_free_seg(m);
		m = m_next;
	}
}
Exemple #10
0
static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
{
	uint16_t next_rs, count = 0;

	next_rs = fifo_peek(&q->rs_tracker);
	if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
		return;

	/* the DONE flag is set on this descriptor so remove the ID
	 * from the RS bit tracker and free the buffers */
	fifo_remove(&q->rs_tracker);

	/* wrap around? if so, free buffers from last_free up to but NOT
	 * including nb_desc */
	if (q->last_free > next_rs) {
		count = q->nb_desc - q->last_free;
		while (q->last_free < q->nb_desc) {
			rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
			q->sw_ring[q->last_free] = NULL;
			++q->last_free;
		}
		q->last_free = 0;
	}

	/* adjust free descriptor count before the next loop */
	q->nb_free += count + (next_rs + 1 - q->last_free);

	/* free buffers from last_free, up to and including next_rs */
	while (q->last_free <= next_rs) {
		rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
		q->sw_ring[q->last_free] = NULL;
		++q->last_free;
	}

	if (q->last_free == q->nb_desc)
		q->last_free = 0;
}
Exemple #11
0
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
	const unsigned int mask = rxq->nb_rx_desc - 1;
	unsigned int i;

	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
		return;

	/* free all mbufs that are valid in the ring */
	for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
		rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
	rxq->rxrearm_nb = rxq->nb_rx_desc;

	/* set all entries to NULL */
	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
}
Exemple #12
0
static void
enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
{
	uint16_t i;

	if (!rq || !rq->mbuf_ring) {
		dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
		return;
	}

	for (i = 0; i < enic->config.rq_desc_count; i++) {
		if (rq->mbuf_ring[i]) {
			rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
			rq->mbuf_ring[i] = NULL;
		}
	}
}
Exemple #13
0
struct rte_mbuf *user_get_buffer(struct sock *sk,int *copy)
{
	struct rte_mbuf *mbuf;
	user_on_tx_opportunity_getbuff_called++;

	mbuf = app_glue_get_buffer();
	if (unlikely(mbuf == NULL)) {
		user_on_tx_opportunity_cannot_get_buff++;
		return NULL;
	}
	mbuf->pkt.data_len = (*copy) > 1448 ? 1448 : (*copy);
	*copy = mbuf->pkt.data_len;
	if(unlikely(mbuf->pkt.data_len == 0))
	{
		rte_pktmbuf_free_seg(mbuf);
		return NULL;
	}
	return mbuf;
}
Exemple #14
0
static uint16_t
eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
	struct virtqueue *rxvq = q;
	struct rte_mbuf *rxm, *new_mbuf;
	uint16_t nb_used, num;
	uint32_t len[VIRTIO_MBUF_BURST_SZ];
	uint32_t i;
	struct pmd_internals *pi = rxvq->internals;

	nb_used = VIRTQUEUE_NUSED(rxvq);

	rte_compiler_barrier(); /* rmb */
	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
	if (unlikely(num == 0)) return 0;

	num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
	for (i = 0; i < num ; i ++) {
		rxm = rx_pkts[i];
		PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
		rxm->next = NULL;
		rxm->data_off = RTE_PKTMBUF_HEADROOM;
		rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
		rxm->nb_segs = 1;
		rxm->port = pi->port_id;
		rxm->pkt_len  = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
	}
	/* allocate new mbuf for the used descriptor */
	while (likely(!virtqueue_full(rxvq))) {
		new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
		if (unlikely(new_mbuf == NULL)) {
			break;
		}
		if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) {
			rte_pktmbuf_free_seg(new_mbuf);
			break;
		}
	}
	pi->eth_stats.ipackets += num;
	return num;
}
Exemple #15
0
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
	unsigned int i;
	struct ixgbe_tx_entry_v *txe;
	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);

	if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
		return;

	/* release the used mbufs in sw_ring */
	for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
	     i != txq->tx_tail;
	     i = (i + 1) & max_desc) {
		txe = &txq->sw_ring_v[i];
		rte_pktmbuf_free_seg(txe->mbuf);
	}
	txq->nb_tx_free = max_desc;

	/* reset tx_entry */
	for (i = 0; i < txq->nb_tx_desc; i++) {
		txe = &txq->sw_ring_v[i];
		txe->mbuf = NULL;
	}
}
Exemple #16
0
static inline uint16_t
reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
		   uint16_t nb_bufs, uint8_t *split_flags)
{
	struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
	struct rte_mbuf *start = rxq->pkt_first_seg;
	struct rte_mbuf *end =  rxq->pkt_last_seg;
	unsigned int pkt_idx, buf_idx;

	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
		if (end != NULL) {
			/* processing a split packet */
			end->next = rx_bufs[buf_idx];
			rx_bufs[buf_idx]->data_len += rxq->crc_len;

			start->nb_segs++;
			start->pkt_len += rx_bufs[buf_idx]->data_len;
			end = end->next;

			if (!split_flags[buf_idx]) {
				/* it's the last packet of the set */
				start->hash = end->hash;
				start->ol_flags = end->ol_flags;
				/* we need to strip crc for the whole packet */
				start->pkt_len -= rxq->crc_len;
				if (end->data_len > rxq->crc_len)
					end->data_len -= rxq->crc_len;
				else {
					/* free up last mbuf */
					struct rte_mbuf *secondlast = start;

					start->nb_segs--;
					while (secondlast->next != end)
						secondlast = secondlast->next;
					secondlast->data_len -= (rxq->crc_len -
							end->data_len);
					secondlast->next = NULL;
					rte_pktmbuf_free_seg(end);
					end = secondlast;
				}
				pkts[pkt_idx++] = start;
				start = end = NULL;
			}
		} else {
			/* not processing a split packet */
			if (!split_flags[buf_idx]) {
				/* not a split packet, save and skip */
				pkts[pkt_idx++] = rx_bufs[buf_idx];
				continue;
			}
			end = start = rx_bufs[buf_idx];
			rx_bufs[buf_idx]->data_len += rxq->crc_len;
			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
		}
	}

	/* save the partial packet for next time */
	rxq->pkt_first_seg = start;
	rxq->pkt_last_seg = end;
	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
	return pkt_idx;
}
Exemple #17
0
/*
 * Process the Rx Completion Ring of given vmxnet3_rx_queue
 * for nb_pkts burst and return the number of packets received
 */
uint16_t
vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
	uint16_t nb_rx;
	uint32_t nb_rxd, idx;
	uint8_t ring_idx;
	vmxnet3_rx_queue_t *rxq;
	Vmxnet3_RxCompDesc *rcd;
	vmxnet3_buf_info_t *rbi;
	Vmxnet3_RxDesc *rxd;
	struct rte_mbuf *rxm = NULL;
	struct vmxnet3_hw *hw;

	nb_rx = 0;
	ring_idx = 0;
	nb_rxd = 0;
	idx = 0;

	rxq = rx_queue;
	hw = rxq->hw;

	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;

	if (unlikely(rxq->stopped)) {
		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
		return 0;
	}

	while (rcd->gen == rxq->comp_ring.gen) {
		if (nb_rx >= nb_pkts)
			break;

		idx = rcd->rxdIdx;
		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;

		if (unlikely(rcd->sop != 1 || rcd->eop != 1)) {
			rte_pktmbuf_free_seg(rbi->m);
			PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
			goto rcd_done;
		}

		PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);

		VMXNET3_ASSERT(rcd->len <= rxd->len);
		VMXNET3_ASSERT(rbi->m);

		if (unlikely(rcd->len == 0)) {
			PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
				   ring_idx, idx);
			VMXNET3_ASSERT(rcd->sop && rcd->eop);
			rte_pktmbuf_free_seg(rbi->m);
			goto rcd_done;
		}

		/* Assuming a packet is coming in a single packet buffer */
		if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) {
			PMD_RX_LOG(DEBUG,
				   "Alert : Misbehaving device, incorrect "
				   " buffer type used. iPacket dropped.");
			rte_pktmbuf_free_seg(rbi->m);
			goto rcd_done;
		}
		VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);

		/* Get the packet buffer pointer from buf_info */
		rxm = rbi->m;

		/* Clear descriptor associated buf_info to be reused */
		rbi->m = NULL;
		rbi->bufPA = 0;

		/* Update the index that we received a packet */
		rxq->cmd_ring[ring_idx].next2comp = idx;

		/* For RCD with EOP set, check if there is frame error */
		if (unlikely(rcd->err)) {
			rxq->stats.drop_total++;
			rxq->stats.drop_err++;

			if (!rcd->fcs) {
				rxq->stats.drop_fcs++;
				PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
			}
			PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
				   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
					 rxq->comp_ring.base), rcd->rxdIdx);
			rte_pktmbuf_free_seg(rxm);
			goto rcd_done;
		}


		/* Initialize newly received packet buffer */
		rxm->port = rxq->port_id;
		rxm->nb_segs = 1;
		rxm->next = NULL;
		rxm->pkt_len = (uint16_t)rcd->len;
		rxm->data_len = (uint16_t)rcd->len;
		rxm->data_off = RTE_PKTMBUF_HEADROOM;
		rxm->ol_flags = 0;
		rxm->vlan_tci = 0;

		vmxnet3_rx_offload(rcd, rxm);

		rx_pkts[nb_rx++] = rxm;
rcd_done:
		rxq->cmd_ring[ring_idx].next2comp = idx;
		VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);

		/* It's time to allocate some new buf and renew descriptors */
		vmxnet3_post_rx_bufs(rxq, ring_idx);
		if (unlikely(rxq->shared->ctrl.updateRxProd)) {
			VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
					       rxq->cmd_ring[ring_idx].next2fill);
		}

		/* Advance to the next descriptor in comp_ring */
		vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);

		rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
		nb_rxd++;
		if (nb_rxd > rxq->cmd_ring[0].size) {
			PMD_RX_LOG(ERR,
				   "Used up quota of receiving packets,"
				   " relinquish control.");
			break;
		}
	}

	return nb_rx;
}
Exemple #18
0
void
VIFHYPER_MBUF_FREECB(void *data, size_t dlen, void *arg)
{

	rte_pktmbuf_free_seg(arg);
}
Exemple #19
0
tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
	    struct rte_mbuf *buf, unsigned int elts_head,
	    struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
{
	unsigned int sent_size = 0;
	unsigned int j;
	int linearize = 0;

	/* When there are too many segments, extra segments are
	 * linearized in the last SGE. */
	if (unlikely(segs > RTE_DIM(*sges))) {
		segs = (RTE_DIM(*sges) - 1);
		linearize = 1;
	}
	/* Update element. */
	elt->buf = buf;
	/* Register segments as SGEs. */
	for (j = 0; (j != segs); ++j) {
		struct ibv_sge *sge = &(*sges)[j];
		uint32_t lkey;

		/* Retrieve Memory Region key for this memory pool. */
		lkey = txq_mp2mr(txq, txq_mb2mp(buf));
		if (unlikely(lkey == (uint32_t)-1)) {
			/* MR does not exist. */
			DEBUG("%p: unable to get MP <-> MR association",
			      (void *)txq);
			/* Clean up TX element. */
			elt->buf = NULL;
			goto stop;
		}
		/* Update SGE. */
		sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
		if (txq->priv->vf)
			rte_prefetch0((volatile void *)
				      (uintptr_t)sge->addr);
		sge->length = DATA_LEN(buf);
		sge->lkey = lkey;
		sent_size += sge->length;
		buf = NEXT(buf);
	}
	/* If buf is not NULL here and is not going to be linearized,
	 * nb_segs is not valid. */
	assert(j == segs);
	assert((buf == NULL) || (linearize));
	/* Linearize extra segments. */
	if (linearize) {
		struct ibv_sge *sge = &(*sges)[segs];
		linear_t *linear = &(*txq->elts_linear)[elts_head];
		unsigned int size = linearize_mbuf(linear, buf);

		assert(segs == (RTE_DIM(*sges) - 1));
		if (size == 0) {
			/* Invalid packet. */
			DEBUG("%p: packet too large to be linearized.",
			      (void *)txq);
			/* Clean up TX element. */
			elt->buf = NULL;
			goto stop;
		}
		/* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
		if (RTE_DIM(*sges) == 1) {
			do {
				struct rte_mbuf *next = NEXT(buf);

				rte_pktmbuf_free_seg(buf);
				buf = next;
			} while (buf != NULL);
			elt->buf = NULL;
		}
		/* Update SGE. */
		sge->addr = (uintptr_t)&(*linear)[0];
		sge->length = size;
		sge->lkey = txq->mr_linear->lkey;
		sent_size += size;
		/* Include last segment. */
		segs++;
	}
	return (struct tx_burst_sg_ret){
		.length = sent_size,
		.num = segs,
	};
stop:
	return (struct tx_burst_sg_ret){
		.length = -1,
		.num = -1,
	};
}

#endif /* MLX5_PMD_SGE_WR_N > 1 */

/**
 * DPDK callback for TX.
 *
 * @param dpdk_txq
 *   Generic pointer to TX queue structure.
 * @param[in] pkts
 *   Packets to transmit.
 * @param pkts_n
 *   Number of packets in array.
 *
 * @return
 *   Number of packets successfully transmitted (<= pkts_n).
 */
uint16_t
mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
	struct txq *txq = (struct txq *)dpdk_txq;
	unsigned int elts_head = txq->elts_head;
	const unsigned int elts_n = txq->elts_n;
	unsigned int elts_comp_cd = txq->elts_comp_cd;
	unsigned int elts_comp = 0;
	unsigned int i;
	unsigned int max;
	int err;
	struct rte_mbuf *buf = pkts[0];

	assert(elts_comp_cd != 0);
	/* Prefetch first packet cacheline. */
	rte_prefetch0(buf);
	txq_complete(txq);
	max = (elts_n - (elts_head - txq->elts_tail));
	if (max > elts_n)
		max -= elts_n;
	assert(max >= 1);
	assert(max <= elts_n);
	/* Always leave one free entry in the ring. */
	--max;
	if (max == 0)
		return 0;
	if (max > pkts_n)
		max = pkts_n;
	for (i = 0; (i != max); ++i) {
		struct rte_mbuf *buf_next = pkts[i + 1];
		unsigned int elts_head_next =
			(((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
		struct txq_elt *elt = &(*txq->elts)[elts_head];
		unsigned int segs = NB_SEGS(buf);
#ifdef MLX5_PMD_SOFT_COUNTERS
		unsigned int sent_size = 0;
#endif
		uint32_t send_flags = 0;
#ifdef HAVE_VERBS_VLAN_INSERTION
		int insert_vlan = 0;
#endif /* HAVE_VERBS_VLAN_INSERTION */

		if (i + 1 < max)
			rte_prefetch0(buf_next);
		/* Request TX completion. */
		if (unlikely(--elts_comp_cd == 0)) {
			elts_comp_cd = txq->elts_comp_cd_init;
			++elts_comp;
			send_flags |= IBV_EXP_QP_BURST_SIGNALED;
		}
		/* Should we enable HW CKSUM offload */
		if (buf->ol_flags &
		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
			send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
			/* HW does not support checksum offloads at arbitrary
			 * offsets but automatically recognizes the packet
			 * type. For inner L3/L4 checksums, only VXLAN (UDP)
			 * tunnels are currently supported. */
			if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
				send_flags |= IBV_EXP_QP_BURST_TUNNEL;
		}
		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
#ifdef HAVE_VERBS_VLAN_INSERTION
			if (!txq->priv->mps)
				insert_vlan = 1;
			else
#endif /* HAVE_VERBS_VLAN_INSERTION */
			{
				err = insert_vlan_sw(buf);
				if (unlikely(err))
					goto stop;
			}
		}
		if (likely(segs == 1)) {
			uintptr_t addr;
			uint32_t length;
			uint32_t lkey;
			uintptr_t buf_next_addr;

			/* Retrieve buffer information. */
			addr = rte_pktmbuf_mtod(buf, uintptr_t);
			length = DATA_LEN(buf);
			/* Update element. */
			elt->buf = buf;
			if (txq->priv->vf)
				rte_prefetch0((volatile void *)
					      (uintptr_t)addr);
			/* Prefetch next buffer data. */
			if (i + 1 < max) {
				buf_next_addr =
					rte_pktmbuf_mtod(buf_next, uintptr_t);
				rte_prefetch0((volatile void *)
					      (uintptr_t)buf_next_addr);
			}
			/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
			if (length <= txq->max_inline) {
#ifdef HAVE_VERBS_VLAN_INSERTION
				if (insert_vlan)
					err = txq->send_pending_inline_vlan
						(txq->qp,
						 (void *)addr,
						 length,
						 send_flags,
						 &buf->vlan_tci);
				else
#endif /* HAVE_VERBS_VLAN_INSERTION */
					err = txq->send_pending_inline
						(txq->qp,
						 (void *)addr,
						 length,
						 send_flags);
			} else
#endif
			{
				/* Retrieve Memory Region key for this
				 * memory pool. */
				lkey = txq_mp2mr(txq, txq_mb2mp(buf));
				if (unlikely(lkey == (uint32_t)-1)) {
					/* MR does not exist. */
					DEBUG("%p: unable to get MP <-> MR"
					      " association", (void *)txq);
					/* Clean up TX element. */
					elt->buf = NULL;
					goto stop;
				}
#ifdef HAVE_VERBS_VLAN_INSERTION
				if (insert_vlan)
					err = txq->send_pending_vlan
						(txq->qp,
						 addr,
						 length,
						 lkey,
						 send_flags,
						 &buf->vlan_tci);
				else
#endif /* HAVE_VERBS_VLAN_INSERTION */
					err = txq->send_pending
						(txq->qp,
						 addr,
						 length,
						 lkey,
						 send_flags);
			}
			if (unlikely(err))
				goto stop;
#ifdef MLX5_PMD_SOFT_COUNTERS
			sent_size += length;
#endif
		} else {
#if MLX5_PMD_SGE_WR_N > 1
			struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
			struct tx_burst_sg_ret ret;

			ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
					  &sges);
			if (ret.length == (unsigned int)-1)
				goto stop;
			/* Put SG list into send queue. */
#ifdef HAVE_VERBS_VLAN_INSERTION
			if (insert_vlan)
				err = txq->send_pending_sg_list_vlan
					(txq->qp,
					 sges,
					 ret.num,
					 send_flags,
					 &buf->vlan_tci);
			else
#endif /* HAVE_VERBS_VLAN_INSERTION */
				err = txq->send_pending_sg_list
					(txq->qp,
					 sges,
					 ret.num,
					 send_flags);
			if (unlikely(err))
				goto stop;
#ifdef MLX5_PMD_SOFT_COUNTERS
			sent_size += ret.length;
#endif
#else /* MLX5_PMD_SGE_WR_N > 1 */
			DEBUG("%p: TX scattered buffers support not"
			      " compiled in", (void *)txq);
			goto stop;
#endif /* MLX5_PMD_SGE_WR_N > 1 */
		}
		elts_head = elts_head_next;
		buf = buf_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
		/* Increment sent bytes counter. */
		txq->stats.obytes += sent_size;
#endif
	}
stop:
	/* Take a shortcut if nothing must be sent. */
	if (unlikely(i == 0))
		return 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
	/* Increment sent packets counter. */
	txq->stats.opackets += i;
#endif
	/* Ring QP doorbell. */
	err = txq->send_flush(txq->qp);
	if (unlikely(err)) {
		/* A nonzero value is not supposed to be returned.
		 * Nothing can be done about it. */
		DEBUG("%p: send_flush() failed with error %d",
		      (void *)txq, err);
	}
	txq->elts_head = elts_head;
	txq->elts_comp += elts_comp;
	txq->elts_comp_cd = elts_comp_cd;
	return i;
}

/**
 * Translate RX completion flags to packet type.
 *
 * @param flags
 *   RX completion flags returned by poll_length_flags().
 *
 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
 *
 * @return
 *   Packet type for struct rte_mbuf.
 */
static inline uint32_t
rxq_cq_to_pkt_type(uint32_t flags)
{
	uint32_t pkt_type;

	if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
		pkt_type =
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
				  RTE_PTYPE_L3_IPV4) |
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
				  RTE_PTYPE_L3_IPV6) |
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_IPV4_PACKET,
				  RTE_PTYPE_INNER_L3_IPV4) |
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_IPV6_PACKET,
				  RTE_PTYPE_INNER_L3_IPV6);
	else
		pkt_type =
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_IPV4_PACKET,
				  RTE_PTYPE_L3_IPV4) |
			TRANSPOSE(flags,
				  IBV_EXP_CQ_RX_IPV6_PACKET,
				  RTE_PTYPE_L3_IPV6);
	return pkt_type;
}