Beispiel #1
0
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
	struct ixgbe_tx_entry_v *txep;
	uint32_t status;
	uint32_t n;
	uint32_t i;
	int nb_free = 0;
	struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];

	/* check DD bit on threshold descriptor */
	status = txq->tx_ring[txq->tx_next_dd].wb.status;
	if (!(status & IXGBE_ADVTXD_STAT_DD))
		return 0;

	n = txq->tx_rs_thresh;

	/*
	 * first buffer to free from S/W ring is at index
	 * tx_next_dd - (tx_rs_thresh-1)
	 */
	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
			(n - 1)];
	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
	if (likely(m != NULL)) {
		free[0] = m;
		nb_free = 1;
		for (i = 1; i < n; i++) {
			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
			if (likely(m != NULL)) {
				if (likely(m->pool == free[0]->pool))
					free[nb_free++] = m;
				else {
					rte_mempool_put_bulk(free[0]->pool,
							(void *)free, nb_free);
					free[0] = m;
					nb_free = 1;
				}
			}
		}
		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
	} else {
		for (i = 1; i < n; i++) {
			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
			if (m != NULL)
				rte_mempool_put(m->pool, m);
		}
	}

	/* buffers were freed, update counters */
	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
	if (txq->tx_next_dd >= txq->nb_tx_desc)
		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);

	return txq->tx_rs_thresh;
}
Beispiel #2
0
static __rte_always_inline int
fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
{
	struct rte_mbuf **txep;
	uint8_t flags;
	uint32_t n;
	uint32_t i;
	int nb_free = 0;
	struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];

	/* check DD bit on threshold descriptor */
	flags = txq->hw_ring[txq->next_dd].flags;
	if (!(flags & FM10K_TXD_FLAG_DONE))
		return 0;

	n = txq->rs_thresh;

	/* First buffer to free from S/W ring is at index
	 * next_dd - (rs_thresh-1)
	 */
	txep = &txq->sw_ring[txq->next_dd - (n - 1)];
	m = rte_pktmbuf_prefree_seg(txep[0]);
	if (likely(m != NULL)) {
		free[0] = m;
		nb_free = 1;
		for (i = 1; i < n; i++) {
			m = rte_pktmbuf_prefree_seg(txep[i]);
			if (likely(m != NULL)) {
				if (likely(m->pool == free[0]->pool))
					free[nb_free++] = m;
				else {
					rte_mempool_put_bulk(free[0]->pool,
							(void *)free, nb_free);
					free[0] = m;
					nb_free = 1;
				}
			}
		}
		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
	} else {
		for (i = 1; i < n; i++) {
			m = rte_pktmbuf_prefree_seg(txep[i]);
			if (m != NULL)
				rte_mempool_put(m->pool, m);
		}
	}

	/* buffers were freed, update counters */
	txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
	txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
	if (txq->next_dd >= txq->nb_desc)
		txq->next_dd = (uint16_t)(txq->rs_thresh - 1);

	return txq->rs_thresh;
}
Beispiel #3
0
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
	struct i40e_tx_entry *txep;
	uint32_t n;
	uint32_t i;
	int nb_free = 0;
	struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];

	/* check DD bits on threshold descriptor */
	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
		return 0;

	n = txq->tx_rs_thresh;

	 /* first buffer to free from S/W ring is at index
	  * tx_next_dd - (tx_rs_thresh-1)
	  */
	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
	if (likely(m != NULL)) {
		free[0] = m;
		nb_free = 1;
		for (i = 1; i < n; i++) {
			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
			if (likely(m != NULL)) {
				if (likely(m->pool == free[0]->pool)) {
					free[nb_free++] = m;
				} else {
					rte_mempool_put_bulk(free[0]->pool,
							     (void *)free,
							     nb_free);
					free[0] = m;
					nb_free = 1;
				}
			}
		}
		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
	} else {
		for (i = 1; i < n; i++) {
			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
			if (m != NULL)
				rte_mempool_put(m->pool, m);
		}
	}

	/* buffers were freed, update counters */
	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
	if (txq->tx_next_dd >= txq->nb_tx_desc)
		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);

	return txq->tx_rs_thresh;
}
/* benchmark alloc-build-free of ops */
static inline int
pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
		uint16_t test_burst_size)
{
	uint32_t iter_ops_left = state->opts->total_ops - cur_op;
	uint32_t iter_ops_needed =
			RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
	uint32_t cur_iter_op;
	uint32_t imix_idx = 0;

	for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
			cur_iter_op += test_burst_size) {
		uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
				test_burst_size);
		struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];

		/* Allocate objects containing crypto operations and mbufs */
		if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
					burst_size) != 0) {
			RTE_LOG(ERR, USER1,
					"Failed to allocate more crypto operations "
					"from the crypto operation pool.\n"
					"Consider increasing the pool size "
					"with --pool-sz\n");
				return -1;
		}

		/* Setup crypto op, attach mbuf etc */
		(state->ctx->populate_ops)(ops,
				state->ctx->src_buf_offset,
				state->ctx->dst_buf_offset,
				burst_size,
				state->ctx->sess, state->opts,
				state->ctx->test_vector, iv_offset,
				&imix_idx);

#ifdef CPERF_LINEARIZATION_ENABLE
		/* Check if source mbufs require coalescing */
		if (state->linearize) {
			uint8_t i;
			for (i = 0; i < burst_size; i++) {
				struct rte_mbuf *src = ops[i]->sym->m_src;
				rte_pktmbuf_linearize(src);
			}
		}
#endif /* CPERF_LINEARIZATION_ENABLE */
		rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
				burst_size);
	}

	return 0;
}
/*
 * This basic performance test just repeatedly sends in 32 packets at a time
 * to the distributor and verifies at the end that we got them all in the worker
 * threads and finally how long per packet the processing took.
 */
static inline int
perf_test(struct rte_distributor *d, struct rte_mempool *p)
{
	unsigned int i;
	uint64_t start, end;
	struct rte_mbuf *bufs[BURST];

	clear_packet_count();
	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
		printf("Error getting mbufs from pool\n");
		return -1;
	}
	/* ensure we have different hash value for each pkt */
	for (i = 0; i < BURST; i++)
		bufs[i]->hash.usr = i;

	start = rte_rdtsc();
	for (i = 0; i < (1<<ITER_POWER); i++)
		rte_distributor_process(d, bufs, BURST);
	end = rte_rdtsc();

	do {
		usleep(100);
		rte_distributor_process(d, NULL, 0);
	} while (total_packet_count() < (BURST << ITER_POWER));

	rte_distributor_clear_returns(d);

	printf("Time per burst:  %"PRIu64"\n", (end - start) >> ITER_POWER);
	printf("Time per packet: %"PRIu64"\n\n",
			((end - start) >> ITER_POWER)/BURST);
	rte_mempool_put_bulk(p, (void *)bufs, BURST);

	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);
	printf("Total packets: %u (%x)\n", total_packet_count(),
			total_packet_count());
	printf("=== Perf test done ===\n\n");

	return 0;
}
/* Useful function which ensures that all worker functions terminate */
static void
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
	const unsigned num_workers = rte_lcore_count() - 1;
	unsigned i;
	struct rte_mbuf *bufs[RTE_MAX_LCORE];
	rte_mempool_get_bulk(p, (void *)bufs, num_workers);

	quit = 1;
	for (i = 0; i < num_workers; i++)
		bufs[i]->hash.usr = i << 1;
	rte_distributor_process(d, bufs, num_workers);

	rte_mempool_put_bulk(p, (void *)bufs, num_workers);

	rte_distributor_process(d, NULL, 0);
	rte_eal_mp_wait_lcore();
	quit = 0;
	worker_idx = 0;
}
Beispiel #7
0
static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
{
	struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
	uint64_t got = 0;

	while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
		++got;

	while (got) {
		int idx;
		do {
			idx = rand() % nb_mbuf - 1;
		} while (pkts[idx] == 0);

		rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
		pkts[idx] = 0;
		--got;
	};
	prox_free(pkts);
}
Beispiel #8
0
Datei: env.c Projekt: spdk/spdk
void
spdk_mempool_put_bulk(struct spdk_mempool *mp, void *const *ele_arr, size_t count)
{
	rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count);
}
Beispiel #9
0
/* do basic sanity testing of the distributor. This test tests the following:
 * - send 32 packets through distributor with the same tag and ensure they
 *   all go to the one worker
 * - send 32 packets throught the distributor with two different tags and
 *   verify that they go equally to two different workers.
 * - send 32 packets with different tags through the distributors and
 *   just verify we get all packets back.
 * - send 1024 packets through the distributor, gathering the returned packets
 *   as we go. Then verify that we correctly got all 1024 pointers back again,
 *   not necessarily in the same order (as different flows).
 */
static int
sanity_test(struct rte_distributor *d, struct rte_mempool *p)
{
	struct rte_mbuf *bufs[BURST];
	unsigned i;

	printf("=== Basic distributor sanity tests ===\n");
	clear_packet_count();
	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
		printf("line %d: Error getting mbufs from pool\n", __LINE__);
		return -1;
	}

	/* now set all hash values in all buffers to zero, so all pkts go to the
	 * one worker thread */
	for (i = 0; i < BURST; i++)
		bufs[i]->pkt.hash.rss = 0;

	rte_distributor_process(d, bufs, BURST);
	rte_distributor_flush(d);
	if (total_packet_count() != BURST) {
		printf("Line %d: Error, not all packets flushed. "
				"Expected %u, got %u\n",
				__LINE__, BURST, total_packet_count());
		return -1;
	}

	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);
	printf("Sanity test with all zero hashes done.\n");
	if (worker_stats[0].handled_packets != BURST)
		return -1;

	/* pick two flows and check they go correctly */
	if (rte_lcore_count() >= 3) {
		clear_packet_count();
		for (i = 0; i < BURST; i++)
			bufs[i]->pkt.hash.rss = (i & 1) << 8;

		rte_distributor_process(d, bufs, BURST);
		rte_distributor_flush(d);
		if (total_packet_count() != BURST) {
			printf("Line %d: Error, not all packets flushed. "
					"Expected %u, got %u\n",
					__LINE__, BURST, total_packet_count());
			return -1;
		}

		for (i = 0; i < rte_lcore_count() - 1; i++)
			printf("Worker %u handled %u packets\n", i,
					worker_stats[i].handled_packets);
		printf("Sanity test with two hash values done\n");

		if (worker_stats[0].handled_packets != 16 ||
				worker_stats[1].handled_packets != 16)
			return -1;
	}

	/* give a different hash value to each packet,
	 * so load gets distributed */
	clear_packet_count();
	for (i = 0; i < BURST; i++)
		bufs[i]->pkt.hash.rss = i;

	rte_distributor_process(d, bufs, BURST);
	rte_distributor_flush(d);
	if (total_packet_count() != BURST) {
		printf("Line %d: Error, not all packets flushed. "
				"Expected %u, got %u\n",
				__LINE__, BURST, total_packet_count());
		return -1;
	}

	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);
	printf("Sanity test with non-zero hashes done\n");

	rte_mempool_put_bulk(p, (void *)bufs, BURST);

	/* sanity test with BIG_BATCH packets to ensure they all arrived back
	 * from the returned packets function */
	clear_packet_count();
	struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
	unsigned num_returned = 0;

	/* flush out any remaining packets */
	rte_distributor_flush(d);
	rte_distributor_clear_returns(d);
	if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
		printf("line %d: Error getting mbufs from pool\n", __LINE__);
		return -1;
	}
	for (i = 0; i < BIG_BATCH; i++)
		many_bufs[i]->pkt.hash.rss = i << 2;

	for (i = 0; i < BIG_BATCH/BURST; i++) {
		rte_distributor_process(d, &many_bufs[i*BURST], BURST);
		num_returned += rte_distributor_returned_pkts(d,
				&return_bufs[num_returned],
				BIG_BATCH - num_returned);
	}
	rte_distributor_flush(d);
	num_returned += rte_distributor_returned_pkts(d,
			&return_bufs[num_returned], BIG_BATCH - num_returned);

	if (num_returned != BIG_BATCH) {
		printf("line %d: Number returned is not the same as "
				"number sent\n", __LINE__);
		return -1;
	}
	/* big check -  make sure all packets made it back!! */
	for (i = 0; i < BIG_BATCH; i++) {
		unsigned j;
		struct rte_mbuf *src = many_bufs[i];
		for (j = 0; j < BIG_BATCH; j++)
			if (return_bufs[j] == src)
				break;

		if (j == BIG_BATCH) {
			printf("Error: could not find source packet #%u\n", i);
			return -1;
		}
	}
	printf("Sanity test of returned packets done\n");

	rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);

	printf("\n");
	return 0;
}
/* run benchmark per burst size */
static inline int
pmd_cyclecount_bench_burst_sz(
		struct pmd_cyclecount_state *state, uint16_t test_burst_size)
{
	uint64_t tsc_start;
	uint64_t tsc_end;
	uint64_t tsc_op;
	uint64_t tsc_enq;
	uint64_t tsc_deq;
	uint32_t cur_op;

	/* reset all counters */
	tsc_enq = 0;
	tsc_deq = 0;
	state->ops_enqd = 0;
	state->ops_enq_retries = 0;
	state->ops_deqd = 0;
	state->ops_deq_retries = 0;

	/*
	 * Benchmark crypto op alloc-build-free separately.
	 */
	tsc_start = rte_rdtsc_precise();

	for (cur_op = 0; cur_op < state->opts->total_ops;
			cur_op += state->opts->nb_descriptors) {
		if (unlikely(pmd_cyclecount_bench_ops(
				state, cur_op, test_burst_size)))
			return -1;
	}

	tsc_end = rte_rdtsc_precise();
	tsc_op = tsc_end - tsc_start;


	/*
	 * Hardware acceleration cyclecount benchmarking loop.
	 *
	 * We're benchmarking raw enq/deq performance by filling up the device
	 * queue, so we never get any failed enqs unless the driver won't accept
	 * the exact number of descriptors we requested, or the driver won't
	 * wrap around the end of the TX ring. However, since we're only
	 * dequeueing once we've filled up the queue, we have to benchmark it
	 * piecemeal and then average out the results.
	 */
	cur_op = 0;
	while (cur_op < state->opts->total_ops) {
		uint32_t iter_ops_left = state->opts->total_ops - cur_op;
		uint32_t iter_ops_needed = RTE_MIN(
				state->opts->nb_descriptors, iter_ops_left);
		uint32_t iter_ops_allocd = iter_ops_needed;

		/* allocate and build ops */
		if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
				test_burst_size)))
			return -1;

		tsc_start = rte_rdtsc_precise();

		/* fill up TX ring */
		iter_ops_needed = pmd_cyclecount_bench_enq(state,
				iter_ops_needed, test_burst_size);

		tsc_end = rte_rdtsc_precise();

		tsc_enq += tsc_end - tsc_start;

		/* allow for HW to catch up */
		if (state->delay)
			rte_delay_us_block(state->delay);

		tsc_start = rte_rdtsc_precise();

		/* drain RX ring */
		pmd_cyclecount_bench_deq(state, iter_ops_needed,
				test_burst_size);

		tsc_end = rte_rdtsc_precise();

		tsc_deq += tsc_end - tsc_start;

		cur_op += iter_ops_needed;

		/*
		 * we may not have processed all ops that we allocated, so
		 * free everything we've allocated.
		 */
		rte_mempool_put_bulk(state->ctx->pool,
				(void **)state->ctx->ops, iter_ops_allocd);
	}

	state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
	state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
	state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;

	return 0;
}
Beispiel #11
0
/* do basic sanity testing of the distributor. This test tests the following:
 * - send 32 packets through distributor with the same tag and ensure they
 *   all go to the one worker
 * - send 32 packets through the distributor with two different tags and
 *   verify that they go equally to two different workers.
 * - send 32 packets with different tags through the distributors and
 *   just verify we get all packets back.
 * - send 1024 packets through the distributor, gathering the returned packets
 *   as we go. Then verify that we correctly got all 1024 pointers back again,
 *   not necessarily in the same order (as different flows).
 */
static int
sanity_test(struct worker_params *wp, struct rte_mempool *p)
{
	struct rte_distributor *db = wp->dist;
	struct rte_mbuf *bufs[BURST];
	struct rte_mbuf *returns[BURST*2];
	unsigned int i, count;
	unsigned int retries;

	printf("=== Basic distributor sanity tests ===\n");
	clear_packet_count();
	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
		printf("line %d: Error getting mbufs from pool\n", __LINE__);
		return -1;
	}

	/* now set all hash values in all buffers to zero, so all pkts go to the
	 * one worker thread */
	for (i = 0; i < BURST; i++)
		bufs[i]->hash.usr = 0;

	rte_distributor_process(db, bufs, BURST);
	count = 0;
	do {

		rte_distributor_flush(db);
		count += rte_distributor_returned_pkts(db,
				returns, BURST*2);
	} while (count < BURST);

	if (total_packet_count() != BURST) {
		printf("Line %d: Error, not all packets flushed. "
				"Expected %u, got %u\n",
				__LINE__, BURST, total_packet_count());
		return -1;
	}

	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);
	printf("Sanity test with all zero hashes done.\n");

	/* pick two flows and check they go correctly */
	if (rte_lcore_count() >= 3) {
		clear_packet_count();
		for (i = 0; i < BURST; i++)
			bufs[i]->hash.usr = (i & 1) << 8;

		rte_distributor_process(db, bufs, BURST);
		count = 0;
		do {
			rte_distributor_flush(db);
			count += rte_distributor_returned_pkts(db,
					returns, BURST*2);
		} while (count < BURST);
		if (total_packet_count() != BURST) {
			printf("Line %d: Error, not all packets flushed. "
					"Expected %u, got %u\n",
					__LINE__, BURST, total_packet_count());
			return -1;
		}

		for (i = 0; i < rte_lcore_count() - 1; i++)
			printf("Worker %u handled %u packets\n", i,
					worker_stats[i].handled_packets);
		printf("Sanity test with two hash values done\n");
	}

	/* give a different hash value to each packet,
	 * so load gets distributed */
	clear_packet_count();
	for (i = 0; i < BURST; i++)
		bufs[i]->hash.usr = i+1;

	rte_distributor_process(db, bufs, BURST);
	count = 0;
	do {
		rte_distributor_flush(db);
		count += rte_distributor_returned_pkts(db,
				returns, BURST*2);
	} while (count < BURST);
	if (total_packet_count() != BURST) {
		printf("Line %d: Error, not all packets flushed. "
				"Expected %u, got %u\n",
				__LINE__, BURST, total_packet_count());
		return -1;
	}

	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);
	printf("Sanity test with non-zero hashes done\n");

	rte_mempool_put_bulk(p, (void *)bufs, BURST);

	/* sanity test with BIG_BATCH packets to ensure they all arrived back
	 * from the returned packets function */
	clear_packet_count();
	struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
	unsigned num_returned = 0;

	/* flush out any remaining packets */
	rte_distributor_flush(db);
	rte_distributor_clear_returns(db);

	if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
		printf("line %d: Error getting mbufs from pool\n", __LINE__);
		return -1;
	}
	for (i = 0; i < BIG_BATCH; i++)
		many_bufs[i]->hash.usr = i << 2;

	printf("=== testing big burst (%s) ===\n", wp->name);
	for (i = 0; i < BIG_BATCH/BURST; i++) {
		rte_distributor_process(db,
				&many_bufs[i*BURST], BURST);
		count = rte_distributor_returned_pkts(db,
				&return_bufs[num_returned],
				BIG_BATCH - num_returned);
		num_returned += count;
	}
	rte_distributor_flush(db);
	count = rte_distributor_returned_pkts(db,
		&return_bufs[num_returned],
			BIG_BATCH - num_returned);
	num_returned += count;
	retries = 0;
	do {
		rte_distributor_flush(db);
		count = rte_distributor_returned_pkts(db,
				&return_bufs[num_returned],
				BIG_BATCH - num_returned);
		num_returned += count;
		retries++;
	} while ((num_returned < BIG_BATCH) && (retries < 100));

	if (num_returned != BIG_BATCH) {
		printf("line %d: Missing packets, expected %d\n",
				__LINE__, num_returned);
		return -1;
	}

	/* big check -  make sure all packets made it back!! */
	for (i = 0; i < BIG_BATCH; i++) {
		unsigned j;
		struct rte_mbuf *src = many_bufs[i];
		for (j = 0; j < BIG_BATCH; j++) {
			if (return_bufs[j] == src)
				break;
		}

		if (j == BIG_BATCH) {
			printf("Error: could not find source packet #%u\n", i);
			return -1;
		}
	}
	printf("Sanity test of returned packets done\n");

	rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);

	printf("\n");
	return 0;
}
Beispiel #12
0
int
cperf_verify_test_runner(void *test_ctx)
{
	struct cperf_verify_ctx *ctx = test_ctx;

	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
	uint64_t ops_failed = 0;

	static int only_once;

	uint64_t i;
	uint16_t ops_unused = 0;

	struct rte_crypto_op *ops[ctx->options->max_burst_size];
	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];

	uint32_t lcore = rte_lcore_id();

#ifdef CPERF_LINEARIZATION_ENABLE
	struct rte_cryptodev_info dev_info;
	int linearize = 0;

	/* Check if source mbufs require coalescing */
	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
		if ((dev_info.feature_flags &
				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
			linearize = 1;
	}
#endif /* CPERF_LINEARIZATION_ENABLE */

	ctx->lcore_id = lcore;

	if (!ctx->options->csv)
		printf("\n# Running verify test on device: %u, lcore: %u\n",
			ctx->dev_id, lcore);

	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
		sizeof(struct rte_crypto_sym_op);

	while (ops_enqd_total < ctx->options->total_ops) {

		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
				<= ctx->options->total_ops) ?
						ctx->options->max_burst_size :
						ctx->options->total_ops -
						ops_enqd_total;

		uint16_t ops_needed = burst_size - ops_unused;

		/* Allocate objects containing crypto operations and mbufs */
		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
					ops_needed) != 0) {
			RTE_LOG(ERR, USER1,
				"Failed to allocate more crypto operations "
				"from the the crypto operation pool.\n"
				"Consider increasing the pool size "
				"with --pool-sz\n");
			return -1;
		}

		/* Setup crypto op, attach mbuf etc */
		(ctx->populate_ops)(ops, ctx->src_buf_offset,
				ctx->dst_buf_offset,
				ops_needed, ctx->sess, ctx->options,
				ctx->test_vector, iv_offset);


		/* Populate the mbuf with the test vector, for verification */
		for (i = 0; i < ops_needed; i++)
			cperf_mbuf_set(ops[i]->sym->m_src,
					ctx->options,
					ctx->test_vector);

#ifdef CPERF_LINEARIZATION_ENABLE
		if (linearize) {
			/* PMD doesn't support scatter-gather and source buffer
			 * is segmented.
			 * We need to linearize it before enqueuing.
			 */
			for (i = 0; i < burst_size; i++)
				rte_pktmbuf_linearize(ops[i]->sym->m_src);
		}
#endif /* CPERF_LINEARIZATION_ENABLE */

		/* Enqueue burst of ops on crypto device */
		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
				ops, burst_size);
		if (ops_enqd < burst_size)
			ops_enqd_failed++;

		/**
		 * Calculate number of ops not enqueued (mainly for hw
		 * accelerators whose ingress queue can fill up).
		 */
		ops_unused = burst_size - ops_enqd;
		ops_enqd_total += ops_enqd;


		/* Dequeue processed burst of ops from crypto device */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);

		if (ops_deqd == 0) {
			/**
			 * Count dequeue polls which didn't return any
			 * processed operations. This statistic is mainly
			 * relevant to hw accelerators.
			 */
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	/* Dequeue any operations still in the crypto device */

	while (ops_deqd_total < ctx->options->total_ops) {
		/* Sending 0 length burst to flush sw crypto device */
		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);

		/* dequeue burst */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);
		if (ops_deqd == 0) {
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	if (!ctx->options->csv) {
		if (!only_once)
			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
				"lcore id", "Buf Size", "Burst size",
				"Enqueued", "Dequeued", "Failed Enq",
				"Failed Deq", "Failed Ops");
		only_once = 1;

		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
				"%12"PRIu64"%12"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	} else {
		if (!only_once)
			printf("\n# lcore id, Buffer Size(B), "
				"Burst Size,Enqueued,Dequeued,Failed Enq,"
				"Failed Deq,Failed Ops\n");
		only_once = 1;

		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
				"%"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	}

	return 0;
}