Esempio n. 1
0
/* benchmark enqueue, returns number of ops enqueued */
static uint32_t
pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
		uint32_t iter_ops_needed, uint16_t test_burst_size)
{
	/* Enqueue full descriptor ring of ops on crypto device */
	uint32_t cur_iter_op = 0;
	while (cur_iter_op < iter_ops_needed) {
		uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
				test_burst_size);
		struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
		uint32_t burst_enqd;

		burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
				state->ctx->qp_id, ops, burst_size);

		/* if we couldn't enqueue anything, the queue is full */
		if (!burst_enqd) {
			/* don't try to dequeue anything we didn't enqueue */
			return cur_iter_op;
		}

		if (burst_enqd < burst_size)
			state->ops_enq_retries++;
		state->ops_enqd += burst_enqd;
		cur_iter_op += burst_enqd;
	}
	return iter_ops_needed;
}
Esempio n. 2
0
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
	struct scheduler_qp_ctx *qp_ctx = qp;
	struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
	struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
	struct scheduler_session *sess;
	uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
			psd_qp_ctx->primary_slave.nb_inflight_cops,
			psd_qp_ctx->secondary_slave.nb_inflight_cops
	};
	struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
		{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
	};
	struct psd_schedule_op *p_enq_op;
	uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
	uint32_t job_len;

	if (unlikely(nb_ops == 0))
		return 0;

	for (i = 0; i < nb_ops && i < 4; i++) {
		rte_prefetch0(ops[i]->sym);
		rte_prefetch0(ops[i]->sym->session);
	}

	for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
		rte_prefetch0(ops[i + 4]->sym);
		rte_prefetch0(ops[i + 4]->sym->session);
		rte_prefetch0(ops[i + 5]->sym);
		rte_prefetch0(ops[i + 5]->sym->session);
		rte_prefetch0(ops[i + 6]->sym);
		rte_prefetch0(ops[i + 6]->sym->session);
		rte_prefetch0(ops[i + 7]->sym);
		rte_prefetch0(ops[i + 7]->sym->session);

		sess = (struct scheduler_session *)
				ops[i]->sym->session->_private;
		/* job_len is initialized as cipher data length, once
		 * it is 0, equals to auth data length
		 */
		job_len = ops[i]->sym->cipher.data.length;
		job_len += (ops[i]->sym->cipher.data.length == 0) *
				ops[i]->sym->auth.data.length;
		/* decide the target op based on the job length */
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		/* stop schedule cops before the queue is full, this shall
		 * prevent the failed enqueue
		 */
		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
		ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+1]->sym->session->_private;
		job_len = ops[i+1]->sym->cipher.data.length;
		job_len += (ops[i+1]->sym->cipher.data.length == 0) *
				ops[i+1]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
		ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+2]->sym->session->_private;
		job_len = ops[i+2]->sym->cipher.data.length;
		job_len += (ops[i+2]->sym->cipher.data.length == 0) *
				ops[i+2]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
		ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;

		sess = (struct scheduler_session *)
				ops[i+3]->sym->session->_private;

		job_len = ops[i+3]->sym->cipher.data.length;
		job_len += (ops[i+3]->sym->cipher.data.length == 0) *
				ops[i+3]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
		ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;
	}

	for (; i < nb_ops; i++) {
		sess = (struct scheduler_session *)
				ops[i]->sym->session->_private;

		job_len = ops[i]->sym->cipher.data.length;
		job_len += (ops[i]->sym->cipher.data.length == 0) *
				ops[i]->sym->auth.data.length;
		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];

		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
				qp_ctx->max_nb_objs) {
			i = nb_ops;
			break;
		}

		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
		ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
		p_enq_op->pos++;
	}

	processed_ops_pri = rte_cryptodev_enqueue_burst(
			psd_qp_ctx->primary_slave.dev_id,
			psd_qp_ctx->primary_slave.qp_id,
			sched_ops[PRIMARY_SLAVE_IDX],
			enq_ops[PRIMARY_SLAVE_IDX].pos);
	/* enqueue shall not fail as the slave queue is monitored */
	RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);

	psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;

	processed_ops_sec = rte_cryptodev_enqueue_burst(
			psd_qp_ctx->secondary_slave.dev_id,
			psd_qp_ctx->secondary_slave.qp_id,
			sched_ops[SECONDARY_SLAVE_IDX],
			enq_ops[SECONDARY_SLAVE_IDX].pos);
	RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);

	psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;

	return processed_ops_pri + processed_ops_sec;
}
Esempio n. 3
0
int
cperf_verify_test_runner(void *test_ctx)
{
	struct cperf_verify_ctx *ctx = test_ctx;

	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
	uint64_t ops_failed = 0;

	static int only_once;

	uint64_t i;
	uint16_t ops_unused = 0;

	struct rte_crypto_op *ops[ctx->options->max_burst_size];
	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];

	uint32_t lcore = rte_lcore_id();

#ifdef CPERF_LINEARIZATION_ENABLE
	struct rte_cryptodev_info dev_info;
	int linearize = 0;

	/* Check if source mbufs require coalescing */
	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
		if ((dev_info.feature_flags &
				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
			linearize = 1;
	}
#endif /* CPERF_LINEARIZATION_ENABLE */

	ctx->lcore_id = lcore;

	if (!ctx->options->csv)
		printf("\n# Running verify test on device: %u, lcore: %u\n",
			ctx->dev_id, lcore);

	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
		sizeof(struct rte_crypto_sym_op);

	while (ops_enqd_total < ctx->options->total_ops) {

		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
				<= ctx->options->total_ops) ?
						ctx->options->max_burst_size :
						ctx->options->total_ops -
						ops_enqd_total;

		uint16_t ops_needed = burst_size - ops_unused;

		/* Allocate objects containing crypto operations and mbufs */
		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
					ops_needed) != 0) {
			RTE_LOG(ERR, USER1,
				"Failed to allocate more crypto operations "
				"from the the crypto operation pool.\n"
				"Consider increasing the pool size "
				"with --pool-sz\n");
			return -1;
		}

		/* Setup crypto op, attach mbuf etc */
		(ctx->populate_ops)(ops, ctx->src_buf_offset,
				ctx->dst_buf_offset,
				ops_needed, ctx->sess, ctx->options,
				ctx->test_vector, iv_offset);


		/* Populate the mbuf with the test vector, for verification */
		for (i = 0; i < ops_needed; i++)
			cperf_mbuf_set(ops[i]->sym->m_src,
					ctx->options,
					ctx->test_vector);

#ifdef CPERF_LINEARIZATION_ENABLE
		if (linearize) {
			/* PMD doesn't support scatter-gather and source buffer
			 * is segmented.
			 * We need to linearize it before enqueuing.
			 */
			for (i = 0; i < burst_size; i++)
				rte_pktmbuf_linearize(ops[i]->sym->m_src);
		}
#endif /* CPERF_LINEARIZATION_ENABLE */

		/* Enqueue burst of ops on crypto device */
		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
				ops, burst_size);
		if (ops_enqd < burst_size)
			ops_enqd_failed++;

		/**
		 * Calculate number of ops not enqueued (mainly for hw
		 * accelerators whose ingress queue can fill up).
		 */
		ops_unused = burst_size - ops_enqd;
		ops_enqd_total += ops_enqd;


		/* Dequeue processed burst of ops from crypto device */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);

		if (ops_deqd == 0) {
			/**
			 * Count dequeue polls which didn't return any
			 * processed operations. This statistic is mainly
			 * relevant to hw accelerators.
			 */
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	/* Dequeue any operations still in the crypto device */

	while (ops_deqd_total < ctx->options->total_ops) {
		/* Sending 0 length burst to flush sw crypto device */
		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);

		/* dequeue burst */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);
		if (ops_deqd == 0) {
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	if (!ctx->options->csv) {
		if (!only_once)
			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
				"lcore id", "Buf Size", "Burst size",
				"Enqueued", "Dequeued", "Failed Enq",
				"Failed Deq", "Failed Ops");
		only_once = 1;

		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
				"%12"PRIu64"%12"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	} else {
		if (!only_once)
			printf("\n# lcore id, Buffer Size(B), "
				"Burst Size,Enqueued,Dequeued,Failed Enq,"
				"Failed Deq,Failed Ops\n");
		only_once = 1;

		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
				"%"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	}

	return 0;
}