Beispiel #1
0
/** Get device info */
static void
scheduler_pmd_info_get(struct rte_cryptodev *dev,
		struct rte_cryptodev_info *dev_info)
{
	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
	uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
			UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
	uint32_t i;

	if (!dev_info)
		return;

	/* although scheduler_attach_init_slave presents multiple times,
	 * there will be only 1 meaningful execution.
	 */
	scheduler_attach_init_slave(dev);

	for (i = 0; i < sched_ctx->nb_slaves; i++) {
		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
		struct rte_cryptodev_info slave_info;

		rte_cryptodev_info_get(slave_dev_id, &slave_info);
		max_nb_sessions = slave_info.sym.max_nb_sessions <
				max_nb_sessions ?
				slave_info.sym.max_nb_sessions :
				max_nb_sessions;
	}

	dev_info->driver_id = dev->driver_id;
	dev_info->feature_flags = dev->feature_flags;
	dev_info->capabilities = sched_ctx->capabilities;
	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
	dev_info->sym.max_nb_sessions = max_nb_sessions;
}
Beispiel #2
0
/** Get device info */
static void
scheduler_pmd_info_get(struct rte_cryptodev *dev,
		struct rte_cryptodev_info *dev_info)
{
	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
	uint32_t max_nb_sess = 0;
	uint16_t headroom_sz = 0;
	uint16_t tailroom_sz = 0;
	uint32_t i;

	if (!dev_info)
		return;

	/* although scheduler_attach_init_slave presents multiple times,
	 * there will be only 1 meaningful execution.
	 */
	scheduler_attach_init_slave(dev);

	for (i = 0; i < sched_ctx->nb_slaves; i++) {
		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
		struct rte_cryptodev_info slave_info;

		rte_cryptodev_info_get(slave_dev_id, &slave_info);
		uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
		if (dev_max_sess != 0) {
			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
				max_nb_sess = slave_info.sym.max_nb_sessions;
		}

		/* Get the max headroom requirement among slave PMDs */
		headroom_sz = slave_info.min_mbuf_headroom_req >
				headroom_sz ?
				slave_info.min_mbuf_headroom_req :
				headroom_sz;

		/* Get the max tailroom requirement among slave PMDs */
		tailroom_sz = slave_info.min_mbuf_tailroom_req >
				tailroom_sz ?
				slave_info.min_mbuf_tailroom_req :
				tailroom_sz;
	}

	dev_info->driver_id = dev->driver_id;
	dev_info->feature_flags = dev->feature_flags;
	dev_info->capabilities = sched_ctx->capabilities;
	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
	dev_info->min_mbuf_headroom_req = headroom_sz;
	dev_info->min_mbuf_tailroom_req = tailroom_sz;
	dev_info->sym.max_nb_sessions = max_nb_sess;
}
static int
test_blockcipher_one_case(const struct blockcipher_test_case *t,
	struct rte_mempool *mbuf_pool,
	struct rte_mempool *op_mpool,
	uint8_t dev_id,
	enum rte_cryptodev_type cryptodev_type,
	char *test_msg)
{
	struct rte_mbuf *ibuf = NULL;
	struct rte_mbuf *obuf = NULL;
	struct rte_mbuf *iobuf;
	struct rte_crypto_sym_xform *cipher_xform = NULL;
	struct rte_crypto_sym_xform *auth_xform = NULL;
	struct rte_crypto_sym_xform *init_xform = NULL;
	struct rte_crypto_sym_op *sym_op = NULL;
	struct rte_crypto_op *op = NULL;
	struct rte_cryptodev_sym_session *sess = NULL;
	struct rte_cryptodev_info dev_info;

	int status = TEST_SUCCESS;
	const struct blockcipher_test_data *tdata = t->test_data;
	uint8_t cipher_key[tdata->cipher_key.len];
	uint8_t auth_key[tdata->auth_key.len];
	uint32_t buf_len = tdata->ciphertext.len;
	uint32_t digest_len = 0;
	char *buf_p = NULL;
	uint8_t src_pattern = 0xa5;
	uint8_t dst_pattern = 0xb6;
	uint8_t tmp_src_buf[MBUF_SIZE];
	uint8_t tmp_dst_buf[MBUF_SIZE];

	int nb_segs = 1;

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
		rte_cryptodev_info_get(dev_id, &dev_info);
		if (!(dev_info.feature_flags &
				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
			printf("Device doesn't support scatter-gather. "
					"Test Skipped.\n");
			return 0;
		}
		nb_segs = 3;
	}

	if (tdata->cipher_key.len)
		memcpy(cipher_key, tdata->cipher_key.data,
			tdata->cipher_key.len);
	if (tdata->auth_key.len)
		memcpy(auth_key, tdata->auth_key.data,
			tdata->auth_key.len);

	switch (cryptodev_type) {
	case RTE_CRYPTODEV_QAT_SYM_PMD:
	case RTE_CRYPTODEV_OPENSSL_PMD:
	case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
		digest_len = tdata->digest.len;
		break;
	case RTE_CRYPTODEV_AESNI_MB_PMD:
	case RTE_CRYPTODEV_SCHEDULER_PMD:
		digest_len = tdata->digest.truncated_len;
		break;
	default:
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Unsupported PMD type");
		status = TEST_FAILED;
		goto error_exit;
	}

	/* preparing data */
	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
		buf_len += tdata->iv.len;
	if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
		buf_len += digest_len;

	/* for contiguous mbuf, nb_segs is 1 */
	ibuf = create_segmented_mbuf(mbuf_pool,
			tdata->ciphertext.len, nb_segs, src_pattern);
	if (ibuf == NULL) {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Cannot create source mbuf");
		status = TEST_FAILED;
		goto error_exit;
	}

	/* only encryption requires plaintext.data input,
	 * decryption/(digest gen)/(digest verify) use ciphertext.data
	 * to be computed
	 */
	if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT)
		pktmbuf_write(ibuf, 0, tdata->plaintext.len,
				tdata->plaintext.data);
	else
		pktmbuf_write(ibuf, 0, tdata->ciphertext.len,
				tdata->ciphertext.data);

	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
		rte_memcpy(rte_pktmbuf_prepend(ibuf, tdata->iv.len),
				tdata->iv.data, tdata->iv.len);
	}
	buf_p = rte_pktmbuf_append(ibuf, digest_len);
	if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
		rte_memcpy(buf_p, tdata->digest.data, digest_len);
	else
		memset(buf_p, 0, digest_len);

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
		obuf = rte_pktmbuf_alloc(mbuf_pool);
		if (!obuf) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__,
				"Allocation of rte_mbuf failed");
			status = TEST_FAILED;
			goto error_exit;
		}
		memset(obuf->buf_addr, dst_pattern, obuf->buf_len);

		buf_p = rte_pktmbuf_append(obuf, buf_len);
		if (!buf_p) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__,
				"No room to append mbuf");
			status = TEST_FAILED;
			goto error_exit;
		}
		memset(buf_p, 0, buf_len);
	}

	/* Generate Crypto op data structure */
	op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
	if (!op) {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Failed to allocate symmetric crypto "
			"operation struct");
		status = TEST_FAILED;
		goto error_exit;
	}

	sym_op = op->sym;

	sym_op->m_src = ibuf;

	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
		sym_op->m_dst = obuf;
		iobuf = obuf;
	} else {
		sym_op->m_dst = NULL;
		iobuf = ibuf;
	}

	/* sessionless op requires allocate xform using
	 * rte_crypto_op_sym_xforms_alloc(), otherwise rte_zmalloc()
	 * is used
	 */
	if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
		uint32_t n_xforms = 0;

		if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
			n_xforms++;
		if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
			n_xforms++;

		if (rte_crypto_op_sym_xforms_alloc(op, n_xforms)
			== NULL) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__, "Failed to "
				"allocate space for crypto transforms");
			status = TEST_FAILED;
			goto error_exit;
		}
	} else {
		cipher_xform = rte_zmalloc(NULL,
			sizeof(struct rte_crypto_sym_xform), 0);

		auth_xform = rte_zmalloc(NULL,
			sizeof(struct rte_crypto_sym_xform), 0);

		if (!cipher_xform || !auth_xform) {
			snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
				"FAILED: %s", __LINE__, "Failed to "
				"allocate memory for crypto transforms");
			status = TEST_FAILED;
			goto error_exit;
		}
	}

	/* preparing xform, for sessioned op, init_xform is initialized
	 * here and later as param in rte_cryptodev_sym_session_create() call
	 */
	if (t->op_mask == BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
			cipher_xform = op->sym->xform;
			auth_xform = cipher_xform->next;
			auth_xform->next = NULL;
		} else {
			cipher_xform->next = auth_xform;
			auth_xform->next = NULL;
			init_xform = cipher_xform;
		}
	} else if (t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS) {
			auth_xform = op->sym->xform;
			cipher_xform = auth_xform->next;
			cipher_xform->next = NULL;
		} else {
			auth_xform->next = cipher_xform;
			cipher_xform->next = NULL;
			init_xform = auth_xform;
		}
	} else if ((t->op_mask == BLOCKCIPHER_TEST_OP_ENCRYPT) ||
			(t->op_mask == BLOCKCIPHER_TEST_OP_DECRYPT)) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)
			cipher_xform = op->sym->xform;
		else
			init_xform = cipher_xform;
		cipher_xform->next = NULL;
	} else if ((t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_GEN) ||
			(t->op_mask == BLOCKCIPHER_TEST_OP_AUTH_VERIFY)) {
		if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SESSIONLESS)
			auth_xform = op->sym->xform;
		else
			init_xform = auth_xform;
		auth_xform->next = NULL;
	} else {
		snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
			"line %u FAILED: %s",
			__LINE__, "Unrecognized operation");
		status = TEST_FAILED;
		goto error_exit;
	}

	/*configure xforms & sym_op cipher and auth data*/
	if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
		cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
		cipher_xform->cipher.algo = tdata->crypto_algo;
		if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT)
			cipher_xform->cipher.op =
				RTE_CRYPTO_CIPHER_OP_ENCRYPT;
		else
			cipher_xform->cipher.op =
				RTE_CRYPTO_CIPHER_OP_DECRYPT;
		cipher_xform->cipher.key.data = cipher_key;
		cipher_xform->cipher.key.length = tdata->cipher_key.len;

		sym_op->cipher.data.offset = tdata->iv.len;
		sym_op->cipher.data.length = tdata->ciphertext.len;
		sym_op->cipher.iv.data = rte_pktmbuf_mtod(sym_op->m_src,
			uint8_t *);
		sym_op->cipher.iv.length = tdata->iv.len;
		sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(
			sym_op->m_src);
	}
int
cperf_pmd_cyclecount_test_runner(void *test_ctx)
{
	struct pmd_cyclecount_state state = {0};
	const struct cperf_options *opts;
	uint16_t test_burst_size;
	uint8_t burst_size_idx = 0;

	state.ctx = test_ctx;
	opts = state.ctx->options;
	state.opts = opts;
	state.lcore = rte_lcore_id();
	state.linearize = 0;

	static int only_once;
	static bool warmup = true;

	/*
	 * We need a small delay to allow for hardware to process all the crypto
	 * operations. We can't automatically figure out what the delay should
	 * be, so we leave it up to the user (by default it's 0).
	 */
	state.delay = 1000 * opts->pmdcc_delay;

#ifdef CPERF_LINEARIZATION_ENABLE
	struct rte_cryptodev_info dev_info;

	/* Check if source mbufs require coalescing */
	if (opts->segments_sz < ctx->options->max_buffer_size) {
		rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
		if ((dev_info.feature_flags &
				    RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
				0) {
			state.linearize = 1;
		}
	}
#endif /* CPERF_LINEARIZATION_ENABLE */

	state.ctx->lcore_id = state.lcore;

	/* Get first size from range or list */
	if (opts->inc_burst_size != 0)
		test_burst_size = opts->min_burst_size;
	else
		test_burst_size = opts->burst_size_list[0];

	while (test_burst_size <= opts->max_burst_size) {
		/* do a benchmark run */
		if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
			return -1;

		/*
		 * First run is always a warm up run.
		 */
		if (warmup) {
			warmup = false;
			continue;
		}

		if (!opts->csv) {
			if (!only_once)
				printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
						"Burst Size", "Enqueued",
						"Dequeued", "Enq Retries",
						"Deq Retries", "Cycles/Op",
						"Cycles/Enq", "Cycles/Deq");
			only_once = 1;

			printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
					opts->test_buffer_size, test_burst_size,
					state.ops_enqd, state.ops_deqd,
					state.ops_enq_retries,
					state.ops_deq_retries,
					state.cycles_per_build,
					state.cycles_per_enq,
					state.cycles_per_deq);
		} else {
			if (!only_once)
				printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
						"Burst Size", "Enqueued",
						"Dequeued", "Enq Retries",
						"Deq Retries", "Cycles/Op",
						"Cycles/Enq", "Cycles/Deq");
			only_once = 1;

			printf(CSV_LINE_FMT, state.ctx->lcore_id,
					opts->test_buffer_size, test_burst_size,
					state.ops_enqd, state.ops_deqd,
					state.ops_enq_retries,
					state.ops_deq_retries,
					state.cycles_per_build,
					state.cycles_per_enq,
					state.cycles_per_deq);
		}

		/* Get next size from range or list */
		if (opts->inc_burst_size != 0)
			test_burst_size += opts->inc_burst_size;
		else {
			if (++burst_size_idx == opts->burst_size_count)
				break;
			test_burst_size = opts->burst_size_list[burst_size_idx];
		}
	}

	return 0;
}
Beispiel #5
0
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
{
	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
	uint32_t sessions_needed = 0;
	unsigned int i, j;
	int ret;

	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
	if (enabled_cdev_count == 0) {
		printf("No crypto devices type %s available\n",
				opts->device_type);
		return -EINVAL;
	}

	nb_lcores = rte_lcore_count() - 1;

	if (nb_lcores < 1) {
		RTE_LOG(ERR, USER1,
			"Number of enabled cores need to be higher than 1\n");
		return -EINVAL;
	}

	/*
	 * Use less number of devices,
	 * if there are more available than cores.
	 */
	if (enabled_cdev_count > nb_lcores)
		enabled_cdev_count = nb_lcores;

	/* Create a mempool shared by all the devices */
	uint32_t max_sess_size = 0, sess_size;

	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
		if (sess_size > max_sess_size)
			max_sess_size = sess_size;
	}

	/*
	 * Calculate number of needed queue pairs, based on the amount
	 * of available number of logical cores and crypto devices.
	 * For instance, if there are 4 cores and 2 crypto devices,
	 * 2 queue pairs will be set up per device.
	 */
	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
				(nb_lcores / enabled_cdev_count) + 1 :
				nb_lcores / enabled_cdev_count;

	for (i = 0; i < enabled_cdev_count &&
			i < RTE_CRYPTO_MAX_DEVS; i++) {
		cdev_id = enabled_cdevs[i];
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
		/*
		 * If multi-core scheduler is used, limit the number
		 * of queue pairs to 1, as there is no way to know
		 * how many cores are being used by the PMD, and
		 * how many will be available for the application.
		 */
		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
				rte_cryptodev_scheduler_mode_get(cdev_id) ==
				CDEV_SCHED_MODE_MULTICORE)
			opts->nb_qps = 1;
#endif

		struct rte_cryptodev_info cdev_info;
		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		rte_cryptodev_info_get(cdev_id, &cdev_info);
		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
			printf("Number of needed queue pairs is higher "
				"than the maximum number of queue pairs "
				"per device.\n");
			printf("Lower the number of cores or increase "
				"the number of crypto devices\n");
			return -EINVAL;
		}
		struct rte_cryptodev_config conf = {
			.nb_queue_pairs = opts->nb_qps,
			.socket_id = socket_id
		};

		struct rte_cryptodev_qp_conf qp_conf = {
			.nb_descriptors = opts->nb_descriptors
		};

		/**
		 * Device info specifies the min headroom and tailroom
		 * requirement for the crypto PMD. This need to be honoured
		 * by the application, while creating mbuf.
		 */
		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
			/* Update headroom */
			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
		}
		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
			/* Update tailroom */
			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
		}

		/* Update segment size to include headroom & tailroom */
		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);

		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
		/*
		 * Two sessions objects are required for each session
		 * (one for the header, one for the private data)
		 */
		if (!strcmp((const char *)opts->device_type,
					"crypto_scheduler")) {
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
			uint32_t nb_slaves =
				rte_cryptodev_scheduler_slaves_get(cdev_id,
								NULL);

			sessions_needed = enabled_cdev_count *
				opts->nb_qps * nb_slaves;
#endif
		} else
			sessions_needed = enabled_cdev_count *
						opts->nb_qps;

		/*
		 * A single session is required per queue pair
		 * in each device
		 */
		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
			RTE_LOG(ERR, USER1,
				"Device does not support at least "
				"%u sessions\n", opts->nb_qps);
			return -ENOTSUP;
		}

		ret = fill_session_pool_socket(socket_id, max_sess_size,
				sessions_needed);
		if (ret < 0)
			return ret;

		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
		qp_conf.mp_session_private =
				session_pool_socket[socket_id].priv_mp;

		ret = rte_cryptodev_configure(cdev_id, &conf);
		if (ret < 0) {
			printf("Failed to configure cryptodev %u", cdev_id);
			return -EINVAL;
		}

		for (j = 0; j < opts->nb_qps; j++) {
			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
				&qp_conf, socket_id);
			if (ret < 0) {
				printf("Failed to setup queue pair %u on "
					"cryptodev %u",	j, cdev_id);
				return -EINVAL;
			}
		}

		ret = rte_cryptodev_start(cdev_id);
		if (ret < 0) {
			printf("Failed to start device %u: error %d\n",
					cdev_id, ret);
			return -EPERM;
		}
	}

	return enabled_cdev_count;
}

static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
	struct rte_cryptodev_sym_capability_idx cap_idx;
	const struct rte_cryptodev_symmetric_capability *capability;

	uint8_t i, cdev_id;
	int ret;

	for (i = 0; i < nb_cryptodevs; i++) {

		cdev_id = enabled_cdevs[i];

		if (opts->op_type == CPERF_AUTH_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
			cap_idx.algo.auth = opts->auth_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_auth(
					capability,
					opts->auth_key_sz,
					opts->digest_sz,
					opts->auth_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_CIPHER_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
			cap_idx.algo.cipher = opts->cipher_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_cipher(
					capability,
					opts->cipher_key_sz,
					opts->cipher_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_AEAD) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
			cap_idx.algo.aead = opts->aead_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_aead(
					capability,
					opts->aead_key_sz,
					opts->digest_sz,
					opts->aead_aad_sz,
					opts->aead_iv_sz);
			if (ret != 0)
				return ret;
		}
	}

	return 0;
}

static int
cperf_check_test_vector(struct cperf_options *opts,
		struct cperf_test_vector *test_vec)
{
	if (opts->op_type == CPERF_CIPHER_ONLY) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			/* Cipher IV is only required for some algorithms */
			if (opts->cipher_iv_sz &&
					test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AUTH_ONLY) {
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			/* Auth key is only required for some algorithms */
			if (opts->auth_key_sz &&
					test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}

	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AEAD) {
		if (test_vec->plaintext.data == NULL)
			return -1;
		if (test_vec->plaintext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->ciphertext.data == NULL)
			return -1;
		if (test_vec->ciphertext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->aead_key.data == NULL)
			return -1;
		if (test_vec->aead_key.length != opts->aead_key_sz)
			return -1;
		if (test_vec->aead_iv.data == NULL)
			return -1;
		if (test_vec->aead_iv.length != opts->aead_iv_sz)
			return -1;
		if (test_vec->aad.data == NULL)
			return -1;
		if (test_vec->aad.length != opts->aead_aad_sz)
			return -1;
		if (test_vec->digest.data == NULL)
			return -1;
		if (test_vec->digest.length < opts->digest_sz)
			return -1;
	}
	return 0;
}

int
main(int argc, char **argv)
{
	struct cperf_options opts = {0};
	struct cperf_test_vector *t_vec = NULL;
	struct cperf_op_fns op_fns;
	void *ctx[RTE_MAX_LCORE] = { };
	int nb_cryptodevs = 0;
	uint16_t total_nb_qps = 0;
	uint8_t cdev_id, i;
	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };

	uint8_t buffer_size_idx = 0;

	int ret;
	uint32_t lcore_id;

	/* Initialise DPDK EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
	argc -= ret;
	argv += ret;

	cperf_options_default(&opts);

	ret = cperf_options_parse(&opts, argc, argv);
	if (ret) {
		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
		goto err;
	}

	ret = cperf_options_check(&opts);
	if (ret) {
		RTE_LOG(ERR, USER1,
				"Checking on or more user options failed\n");
		goto err;
	}

	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);

	if (!opts.silent)
		cperf_options_dump(&opts);

	if (nb_cryptodevs < 1) {
		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
				"device type\n");
		nb_cryptodevs = 0;
		goto err;
	}

	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
			nb_cryptodevs);
	if (ret) {
		RTE_LOG(ERR, USER1, "Crypto device type does not support "
				"capabilities requested\n");
		goto err;
	}

	if (opts.test_file != NULL) {
		t_vec = cperf_test_vector_get_from_file(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified file\n");
			goto err;
		}

		if (cperf_check_test_vector(&opts, t_vec)) {
			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
					"\n");
			goto err;
		}
	} else {
		t_vec = cperf_test_vector_get_dummy(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified algorithms\n");
			goto err;
		}
	}

	ret = cperf_get_op_functions(&opts, &op_fns);
	if (ret) {
		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
				"specified algorithms combination\n");
		goto err;
	}

	if (!opts.silent)
		show_test_vector(t_vec);

	total_nb_qps = nb_cryptodevs * opts.nb_qps;

	i = 0;
	uint8_t qp_id = 0, cdev_index = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[cdev_index];

		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		ctx[i] = cperf_testmap[opts.test].constructor(
				session_pool_socket[socket_id].sess_mp,
				session_pool_socket[socket_id].priv_mp,
				cdev_id, qp_id,
				&opts, t_vec, &op_fns);
		if (ctx[i] == NULL) {
			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
			goto err;
		}
		qp_id = (qp_id + 1) % opts.nb_qps;
		if (qp_id == 0)
			cdev_index++;
		i++;
	}

	if (opts.imix_distribution_count != 0) {
		uint8_t buffer_size_count = opts.buffer_size_count;
		uint16_t distribution_total[buffer_size_count];
		uint32_t op_idx;
		uint32_t test_average_size = 0;
		const uint32_t *buffer_size_list = opts.buffer_size_list;
		const uint32_t *imix_distribution_list = opts.imix_distribution_list;

		opts.imix_buffer_sizes = rte_malloc(NULL,
					sizeof(uint32_t) * opts.pool_sz,
					0);
		/*
		 * Calculate accumulated distribution of
		 * probabilities per packet size
		 */
		distribution_total[0] = imix_distribution_list[0];
		for (i = 1; i < buffer_size_count; i++)
			distribution_total[i] = imix_distribution_list[i] +
				distribution_total[i-1];

		/* Calculate a random sequence of packet sizes, based on distribution */
		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
			uint16_t random_number = rte_rand() %
				distribution_total[buffer_size_count - 1];
			for (i = 0; i < buffer_size_count; i++)
				if (random_number < distribution_total[i])
					break;

			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
		}

		/* Calculate average buffer size for the IMIX distribution */
		for (i = 0; i < buffer_size_count; i++)
			test_average_size += buffer_size_list[i] *
				imix_distribution_list[i];

		opts.test_buffer_size = test_average_size /
				distribution_total[buffer_size_count - 1];

		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;

			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
				ctx[i], lcore_id);
			i++;
		}
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;
			rte_eal_wait_lcore(lcore_id);
			i++;
		}
	} else {

		/* Get next size from range or list */
		if (opts.inc_buffer_size != 0)
			opts.test_buffer_size = opts.min_buffer_size;
		else
			opts.test_buffer_size = opts.buffer_size_list[0];

		while (opts.test_buffer_size <= opts.max_buffer_size) {
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;

				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
					ctx[i], lcore_id);
				i++;
			}
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;
				rte_eal_wait_lcore(lcore_id);
				i++;
			}

			/* Get next size from range or list */
			if (opts.inc_buffer_size != 0)
				opts.test_buffer_size += opts.inc_buffer_size;
			else {
				if (++buffer_size_idx == opts.buffer_size_count)
					break;
				opts.test_buffer_size =
					opts.buffer_size_list[buffer_size_idx];
			}
		}
	}

	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_SUCCESS;

err:
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (i == total_nb_qps)
			break;

		if (ctx[i] && cperf_testmap[opts.test].destructor)
			cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);
	rte_free(opts.imix_buffer_sizes);
	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_FAILURE;
}
Beispiel #6
0
int
cperf_verify_test_runner(void *test_ctx)
{
	struct cperf_verify_ctx *ctx = test_ctx;

	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
	uint64_t ops_failed = 0;

	static int only_once;

	uint64_t i;
	uint16_t ops_unused = 0;

	struct rte_crypto_op *ops[ctx->options->max_burst_size];
	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];

	uint32_t lcore = rte_lcore_id();

#ifdef CPERF_LINEARIZATION_ENABLE
	struct rte_cryptodev_info dev_info;
	int linearize = 0;

	/* Check if source mbufs require coalescing */
	if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
		rte_cryptodev_info_get(ctx->dev_id, &dev_info);
		if ((dev_info.feature_flags &
				RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
			linearize = 1;
	}
#endif /* CPERF_LINEARIZATION_ENABLE */

	ctx->lcore_id = lcore;

	if (!ctx->options->csv)
		printf("\n# Running verify test on device: %u, lcore: %u\n",
			ctx->dev_id, lcore);

	uint16_t iv_offset = sizeof(struct rte_crypto_op) +
		sizeof(struct rte_crypto_sym_op);

	while (ops_enqd_total < ctx->options->total_ops) {

		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
				<= ctx->options->total_ops) ?
						ctx->options->max_burst_size :
						ctx->options->total_ops -
						ops_enqd_total;

		uint16_t ops_needed = burst_size - ops_unused;

		/* Allocate objects containing crypto operations and mbufs */
		if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
					ops_needed) != 0) {
			RTE_LOG(ERR, USER1,
				"Failed to allocate more crypto operations "
				"from the the crypto operation pool.\n"
				"Consider increasing the pool size "
				"with --pool-sz\n");
			return -1;
		}

		/* Setup crypto op, attach mbuf etc */
		(ctx->populate_ops)(ops, ctx->src_buf_offset,
				ctx->dst_buf_offset,
				ops_needed, ctx->sess, ctx->options,
				ctx->test_vector, iv_offset);


		/* Populate the mbuf with the test vector, for verification */
		for (i = 0; i < ops_needed; i++)
			cperf_mbuf_set(ops[i]->sym->m_src,
					ctx->options,
					ctx->test_vector);

#ifdef CPERF_LINEARIZATION_ENABLE
		if (linearize) {
			/* PMD doesn't support scatter-gather and source buffer
			 * is segmented.
			 * We need to linearize it before enqueuing.
			 */
			for (i = 0; i < burst_size; i++)
				rte_pktmbuf_linearize(ops[i]->sym->m_src);
		}
#endif /* CPERF_LINEARIZATION_ENABLE */

		/* Enqueue burst of ops on crypto device */
		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
				ops, burst_size);
		if (ops_enqd < burst_size)
			ops_enqd_failed++;

		/**
		 * Calculate number of ops not enqueued (mainly for hw
		 * accelerators whose ingress queue can fill up).
		 */
		ops_unused = burst_size - ops_enqd;
		ops_enqd_total += ops_enqd;


		/* Dequeue processed burst of ops from crypto device */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);

		if (ops_deqd == 0) {
			/**
			 * Count dequeue polls which didn't return any
			 * processed operations. This statistic is mainly
			 * relevant to hw accelerators.
			 */
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	/* Dequeue any operations still in the crypto device */

	while (ops_deqd_total < ctx->options->total_ops) {
		/* Sending 0 length burst to flush sw crypto device */
		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);

		/* dequeue burst */
		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
				ops_processed, ctx->options->max_burst_size);
		if (ops_deqd == 0) {
			ops_deqd_failed++;
			continue;
		}

		for (i = 0; i < ops_deqd; i++) {
			if (cperf_verify_op(ops_processed[i], ctx->options,
						ctx->test_vector))
				ops_failed++;
		}
		/* Free crypto ops so they can be reused. */
		rte_mempool_put_bulk(ctx->pool,
					(void **)ops_processed, ops_deqd);
		ops_deqd_total += ops_deqd;
	}

	if (!ctx->options->csv) {
		if (!only_once)
			printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
				"lcore id", "Buf Size", "Burst size",
				"Enqueued", "Dequeued", "Failed Enq",
				"Failed Deq", "Failed Ops");
		only_once = 1;

		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
				"%12"PRIu64"%12"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	} else {
		if (!only_once)
			printf("\n# lcore id, Buffer Size(B), "
				"Burst Size,Enqueued,Dequeued,Failed Enq,"
				"Failed Deq,Failed Ops\n");
		only_once = 1;

		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
				"%"PRIu64"\n",
				ctx->lcore_id,
				ctx->options->max_buffer_size,
				ctx->options->max_burst_size,
				ops_enqd_total,
				ops_deqd_total,
				ops_enqd_failed,
				ops_deqd_failed,
				ops_failed);
	}

	return 0;
}
Beispiel #7
0
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
			struct rte_mempool *session_pool_socket[])
{
	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
	unsigned int i, j;
	int ret;

	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
	if (enabled_cdev_count == 0) {
		printf("No crypto devices type %s available\n",
				opts->device_type);
		return -EINVAL;
	}

	nb_lcores = rte_lcore_count() - 1;

	if (nb_lcores < 1) {
		RTE_LOG(ERR, USER1,
			"Number of enabled cores need to be higher than 1\n");
		return -EINVAL;
	}

	/*
	 * Use less number of devices,
	 * if there are more available than cores.
	 */
	if (enabled_cdev_count > nb_lcores)
		enabled_cdev_count = nb_lcores;

	/* Create a mempool shared by all the devices */
	uint32_t max_sess_size = 0, sess_size;

	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
		sess_size = rte_cryptodev_get_private_session_size(cdev_id);
		if (sess_size > max_sess_size)
			max_sess_size = sess_size;
	}

	/*
	 * Calculate number of needed queue pairs, based on the amount
	 * of available number of logical cores and crypto devices.
	 * For instance, if there are 4 cores and 2 crypto devices,
	 * 2 queue pairs will be set up per device.
	 */
	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
				(nb_lcores / enabled_cdev_count) + 1 :
				nb_lcores / enabled_cdev_count;

	for (i = 0; i < enabled_cdev_count &&
			i < RTE_CRYPTO_MAX_DEVS; i++) {
		cdev_id = enabled_cdevs[i];
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
		/*
		 * If multi-core scheduler is used, limit the number
		 * of queue pairs to 1, as there is no way to know
		 * how many cores are being used by the PMD, and
		 * how many will be available for the application.
		 */
		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
				rte_cryptodev_scheduler_mode_get(cdev_id) ==
				CDEV_SCHED_MODE_MULTICORE)
			opts->nb_qps = 1;
#endif

		struct rte_cryptodev_info cdev_info;
		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		rte_cryptodev_info_get(cdev_id, &cdev_info);
		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
			printf("Number of needed queue pairs is higher "
				"than the maximum number of queue pairs "
				"per device.\n");
			printf("Lower the number of cores or increase "
				"the number of crypto devices\n");
			return -EINVAL;
		}
		struct rte_cryptodev_config conf = {
			.nb_queue_pairs = opts->nb_qps,
			.socket_id = socket_id
		};

		struct rte_cryptodev_qp_conf qp_conf = {
			.nb_descriptors = opts->nb_descriptors
		};

		if (session_pool_socket[socket_id] == NULL) {
			char mp_name[RTE_MEMPOOL_NAMESIZE];
			struct rte_mempool *sess_mp;

			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
				"sess_mp_%u", socket_id);

			sess_mp = rte_mempool_create(mp_name,
						NUM_SESSIONS,
						max_sess_size,
						SESS_MEMPOOL_CACHE_SIZE,
						0, NULL, NULL, NULL,
						NULL, socket_id,
						0);

			if (sess_mp == NULL) {
				printf("Cannot create session pool on socket %d\n",
					socket_id);
				return -ENOMEM;
			}

			printf("Allocated session pool on socket %d\n", socket_id);
			session_pool_socket[socket_id] = sess_mp;
		}

		ret = rte_cryptodev_configure(cdev_id, &conf);
		if (ret < 0) {
			printf("Failed to configure cryptodev %u", cdev_id);
			return -EINVAL;
		}

		for (j = 0; j < opts->nb_qps; j++) {
			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
				&qp_conf, socket_id,
				session_pool_socket[socket_id]);
			if (ret < 0) {
				printf("Failed to setup queue pair %u on "
					"cryptodev %u",	j, cdev_id);
				return -EINVAL;
			}
		}

		ret = rte_cryptodev_start(cdev_id);
		if (ret < 0) {
			printf("Failed to start device %u: error %d\n",
					cdev_id, ret);
			return -EPERM;
		}
	}

	return enabled_cdev_count;
}

static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
	struct rte_cryptodev_sym_capability_idx cap_idx;
	const struct rte_cryptodev_symmetric_capability *capability;

	uint8_t i, cdev_id;
	int ret;

	for (i = 0; i < nb_cryptodevs; i++) {

		cdev_id = enabled_cdevs[i];

		if (opts->op_type == CPERF_AUTH_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
			cap_idx.algo.auth = opts->auth_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_auth(
					capability,
					opts->auth_key_sz,
					opts->digest_sz,
					opts->auth_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_CIPHER_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
			cap_idx.algo.cipher = opts->cipher_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_cipher(
					capability,
					opts->cipher_key_sz,
					opts->cipher_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_AEAD) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
			cap_idx.algo.aead = opts->aead_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_aead(
					capability,
					opts->aead_key_sz,
					opts->digest_sz,
					opts->aead_aad_sz,
					opts->aead_iv_sz);
			if (ret != 0)
				return ret;
		}
	}

	return 0;
}

static int
cperf_check_test_vector(struct cperf_options *opts,
		struct cperf_test_vector *test_vec)
{
	if (opts->op_type == CPERF_CIPHER_ONLY) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AUTH_ONLY) {
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}

	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AEAD) {
		if (test_vec->plaintext.data == NULL)
			return -1;
		if (test_vec->plaintext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->ciphertext.data == NULL)
			return -1;
		if (test_vec->ciphertext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->aead_iv.data == NULL)
			return -1;
		if (test_vec->aead_iv.length != opts->aead_iv_sz)
			return -1;
		if (test_vec->aad.data == NULL)
			return -1;
		if (test_vec->aad.length != opts->aead_aad_sz)
			return -1;
		if (test_vec->digest.data == NULL)
			return -1;
		if (test_vec->digest.length < opts->digest_sz)
			return -1;
	}
	return 0;
}

int
main(int argc, char **argv)
{
	struct cperf_options opts = {0};
	struct cperf_test_vector *t_vec = NULL;
	struct cperf_op_fns op_fns;

	void *ctx[RTE_MAX_LCORE] = { };
	struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };

	int nb_cryptodevs = 0;
	uint16_t total_nb_qps = 0;
	uint8_t cdev_id, i;
	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };

	uint8_t buffer_size_idx = 0;

	int ret;
	uint32_t lcore_id;

	/* Initialise DPDK EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
	argc -= ret;
	argv += ret;

	cperf_options_default(&opts);

	ret = cperf_options_parse(&opts, argc, argv);
	if (ret) {
		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
		goto err;
	}

	ret = cperf_options_check(&opts);
	if (ret) {
		RTE_LOG(ERR, USER1,
				"Checking on or more user options failed\n");
		goto err;
	}

	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
			session_pool_socket);

	if (!opts.silent)
		cperf_options_dump(&opts);

	if (nb_cryptodevs < 1) {
		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
				"device type\n");
		nb_cryptodevs = 0;
		goto err;
	}

	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
			nb_cryptodevs);
	if (ret) {
		RTE_LOG(ERR, USER1, "Crypto device type does not support "
				"capabilities requested\n");
		goto err;
	}

	if (opts.test_file != NULL) {
		t_vec = cperf_test_vector_get_from_file(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified file\n");
			goto err;
		}

		if (cperf_check_test_vector(&opts, t_vec)) {
			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
					"\n");
			goto err;
		}
	} else {
		t_vec = cperf_test_vector_get_dummy(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified algorithms\n");
			goto err;
		}
	}

	ret = cperf_get_op_functions(&opts, &op_fns);
	if (ret) {
		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
				"specified algorithms combination\n");
		goto err;
	}

	if (!opts.silent)
		show_test_vector(t_vec);

	total_nb_qps = nb_cryptodevs * opts.nb_qps;

	i = 0;
	uint8_t qp_id = 0, cdev_index = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[cdev_index];

		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		ctx[i] = cperf_testmap[opts.test].constructor(
				session_pool_socket[socket_id], cdev_id, qp_id,
				&opts, t_vec, &op_fns);
		if (ctx[i] == NULL) {
			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
			goto err;
		}
		qp_id = (qp_id + 1) % opts.nb_qps;
		if (qp_id == 0)
			cdev_index++;
		i++;
	}

	/* Get first size from range or list */
	if (opts.inc_buffer_size != 0)
		opts.test_buffer_size = opts.min_buffer_size;
	else
		opts.test_buffer_size = opts.buffer_size_list[0];

	while (opts.test_buffer_size <= opts.max_buffer_size) {
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;

			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
				ctx[i], lcore_id);
			i++;
		}
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;
			rte_eal_wait_lcore(lcore_id);
			i++;
		}

		/* Get next size from range or list */
		if (opts.inc_buffer_size != 0)
			opts.test_buffer_size += opts.inc_buffer_size;
		else {
			if (++buffer_size_idx == opts.buffer_size_count)
				break;
			opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
		}
	}

	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_SUCCESS;

err:
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[i];

		if (ctx[i] && cperf_testmap[opts.test].destructor)
			cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_FAILURE;
}