Esempio n. 1
0
/*
 * TODO: we could "error proof" these as done in test_hash_perf.c ln 165:
 *
 * The current setup may give errors if too full in some cases which we check
 * for. However, since EFD allows for ~99% capacity, these errors are rare for
 * #"KEYS_TO_ADD" which is 75% capacity.
 */
static int
setup_keys_and_data(struct efd_perf_params *params, unsigned int cycle)
{
	unsigned int i, j;
	int num_duplicates;

	params->key_size = hashtest_key_lens[cycle];
	params->cycle = cycle;

	/* Reset all arrays */
	for (i = 0; i < params->key_size; i++)
		keys[0][i] = 0;

	/* Generate a list of keys, some of which may be duplicates */
	for (i = 0; i < KEYS_TO_ADD; i++) {
		for (j = 0; j < params->key_size; j++)
			keys[i][j] = rte_rand() & 0xFF;

		data[i] = rte_rand() & VALUE_BITMASK;
	}

	/* Remove duplicates from the keys array */
	do {
		num_duplicates = 0;

		/* Sort the list of keys to make it easier to find duplicates */
		qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare);

		/* Sift through the list of keys and look for duplicates */
		int num_duplicates = 0;
		for (i = 0; i < KEYS_TO_ADD - 1; i++) {
			if (memcmp(keys[i], keys[i + 1], params->key_size) == 0) {
				/* This key already exists, try again */
				num_duplicates++;
				for (j = 0; j < params->key_size; j++)
					keys[i][j] = rte_rand() & 0xFF;
			}
		}
	} while (num_duplicates != 0);

	/* Shuffle the random values again */
	shuffle_input_keys(params);

	params->efd_table = rte_efd_create("test_efd_perf",
			MAX_ENTRIES, params->key_size,
			efd_get_all_sockets_bitmask(), test_socket_id);
	TEST_ASSERT_NOT_NULL(params->efd_table, "Error creating the efd table\n");

	return 0;
}
/* Shuffle the keys that have been added, so lookups will be totally random */
static void
shuffle_input_keys(unsigned table_index)
{
	unsigned i;
	uint32_t swap_idx;
	uint8_t temp_key[MAX_KEYSIZE];
	hash_sig_t temp_signature;
	int32_t temp_position;

	for (i = KEYS_TO_ADD - 1; i > 0; i--) {
		swap_idx = rte_rand() % i;

		memcpy(temp_key, keys[i], hashtest_key_lens[table_index]);
		temp_signature = signatures[i];
		temp_position = positions[i];

		memcpy(keys[i], keys[swap_idx], hashtest_key_lens[table_index]);
		signatures[i] = signatures[swap_idx];
		positions[i] = positions[swap_idx];

		memcpy(keys[swap_idx], temp_key, hashtest_key_lens[table_index]);
		signatures[swap_idx] = temp_signature;
		positions[swap_idx] = temp_position;
	}
}
Esempio n. 3
0
static int
avf_init_rss(struct avf_adapter *adapter)
{
	struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	struct rte_eth_rss_conf *rss_conf;
	uint8_t i, j, nb_q;
	int ret;

	rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
	nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
		       AVF_MAX_NUM_QUEUES);

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
		PMD_DRV_LOG(DEBUG, "RSS is not supported");
		return -ENOTSUP;
	}
	if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
		/* set all lut items to default queue */
		for (i = 0; i < vf->vf_res->rss_lut_size; i++)
			vf->rss_lut[i] = 0;
		ret = avf_configure_rss_lut(adapter);
		return ret;
	}

	/* In AVF, RSS enablement is set by PF driver. It is not supported
	 * to set based on rss_conf->rss_hf.
	 */

	/* configure RSS key */
	if (!rss_conf->rss_key) {
		/* Calculate the default hash key */
		for (i = 0; i <= vf->vf_res->rss_key_size; i++)
			vf->rss_key[i] = (uint8_t)rte_rand();
	} else
		rte_memcpy(vf->rss_key, rss_conf->rss_key,
			   RTE_MIN(rss_conf->rss_key_len,
				   vf->vf_res->rss_key_size));

	/* init RSS LUT table */
	for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
		if (j >= nb_q)
			j = 0;
		vf->rss_lut[i] = j;
	}
	/* send virtchnnl ops to configure rss*/
	ret = avf_configure_rss_lut(adapter);
	if (ret)
		return ret;
	ret = avf_configure_rss_key(adapter);
	if (ret)
		return ret;

	return 0;
}
Esempio n. 4
0
static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
				   u8 *mac)
{
	int err = 0;
	u32 h = 0U;
	u32 l = 0U;
	u32 mac_addr[2];

	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
		unsigned int rnd = (uint32_t)rte_rand();
		unsigned int ucp_0x370 = 0;

		//get_random_bytes(&rnd, sizeof(unsigned int));

		ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
	}

	err = hw_atl_utils_fw_downld_dwords(self,
					    aq_hw_read_reg(self, 0x00000374U) +
					    (40U * 4U),
					    mac_addr,
					    ARRAY_SIZE(mac_addr));
	if (err < 0) {
		mac_addr[0] = 0U;
		mac_addr[1] = 0U;
		err = 0;
	} else {
		mac_addr[0] = rte_constant_bswap32(mac_addr[0]);
		mac_addr[1] = rte_constant_bswap32(mac_addr[1]);
	}

	ether_addr_copy((struct ether_addr *)mac_addr,
			(struct ether_addr *)mac);

	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
		/* chip revision */
		l = 0xE3000000U
			| (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
			| (0x00 << 16);
		h = 0x8001300EU;

		mac[5] = (u8)(0xFFU & l);
		l >>= 8;
		mac[4] = (u8)(0xFFU & l);
		l >>= 8;
		mac[3] = (u8)(0xFFU & l);
		l >>= 8;
		mac[2] = (u8)(0xFFU & l);
		mac[1] = (u8)(0xFFU & h);
		h >>= 8;
		mac[0] = (u8)(0xFFU & h);
	}
Esempio n. 5
0
/*
 * Do a single performance test, of one type of operation.
 *
 * @param h
 *   hash table to run test on
 * @param func
 *   function to call (add, delete or lookup function)
 * @param avg_occupancy
 *   The average number of entries in each bucket of the hash table
 * @param invalid_pos_count
 *   The amount of errors (e.g. due to a full bucket).
 * @return
 *   The average number of ticks per hash function call. A negative number
 *   signifies failure.
 */
static double
run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func,
		const struct tbl_perf_test_params *params, double *avg_occupancy,
		uint32_t *invalid_pos_count)
{
	uint64_t begin, end, ticks = 0;
	uint8_t *key = NULL;
	uint32_t *bucket_occupancies = NULL;
	uint32_t num_buckets, i, j;
	int32_t pos;

	/* Initialise */
	num_buckets = params->entries / params->bucket_entries;
	key = (uint8_t *) rte_zmalloc("hash key",
			params->key_len * sizeof(uint8_t), 16);
	if (key == NULL)
		return -1;

	bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies",
			num_buckets * sizeof(uint32_t), 16);
	if (bucket_occupancies == NULL) {
		rte_free(key);
		return -1;
	}

	ticks = 0;
	*invalid_pos_count = 0;

	for (i = 0; i < params->num_iterations; i++) {
		/* Prepare inputs for the current iteration */
		for (j = 0; j < params->key_len; j++)
			key[j] = (uint8_t) rte_rand();

		/* Perform operation, and measure time it takes */
		begin = rte_rdtsc();
		pos = func(h, key);
		end = rte_rdtsc();
		ticks += end - begin;

		/* Other work per iteration */
		if (pos < 0)
			*invalid_pos_count += 1;
		else
			bucket_occupancies[pos / params->bucket_entries]++;
	}
	*avg_occupancy = get_avg(bucket_occupancies, num_buckets);

	rte_free(bucket_occupancies);
	rte_free(key);

	return (double)ticks / params->num_iterations;
}
Esempio n. 6
0
/* Initialise data buffers. */
static int
init_buffers(void)
{
	unsigned i;

	large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
	if (large_buf_read == NULL)
		goto error_large_buf_read;

	large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
	if (large_buf_write == NULL)
		goto error_large_buf_write;

	small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
	if (small_buf_read == NULL)
		goto error_small_buf_read;

	small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
	if (small_buf_write == NULL)
		goto error_small_buf_write;

	for (i = 0; i < LARGE_BUFFER_SIZE; i++)
		large_buf_read[i] = rte_rand();
	for (i = 0; i < SMALL_BUFFER_SIZE; i++)
		small_buf_read[i] = rte_rand();

	return 0;

error_small_buf_write:
	rte_free(small_buf_read);
error_small_buf_read:
	rte_free(large_buf_write);
error_large_buf_write:
	rte_free(large_buf_read);
error_large_buf_read:
	printf("ERROR: not enough memory\n");
	return -1;
}
Esempio n. 7
0
int
rte_red_config_init(struct rte_red_config *red_cfg,
	const uint16_t wq_log2,
	const uint16_t min_th,
	const uint16_t max_th,
	const uint16_t maxp_inv)
{
	if (red_cfg == NULL) {
		return -1;
	}
	if (max_th > RTE_RED_MAX_TH_MAX) {
		return -2;
	}
	if (min_th >= max_th) {
		return -3;
	}
	if (wq_log2 > RTE_RED_WQ_LOG2_MAX) {
		return -4;
	}
	if (wq_log2 < RTE_RED_WQ_LOG2_MIN) {
		return -5;
	}
	if (maxp_inv < RTE_RED_MAXP_INV_MIN) {
		return -6;
	}
	if (maxp_inv > RTE_RED_MAXP_INV_MAX) {
		return -7;
	}
	
	/**
	 *  Initialize the RED module if not already done
	 */
	if (!rte_red_init_done) {
		rte_red_rand_seed = rte_rand();
		rte_red_rand_val = rte_fast_rand();
		__rte_red_init_tables();
		rte_red_init_done = 1;
	}

	red_cfg->min_th = ((uint32_t) min_th) << (wq_log2 + RTE_RED_SCALING);
	red_cfg->max_th = ((uint32_t) max_th) << (wq_log2 + RTE_RED_SCALING);
	red_cfg->pa_const = (2 * (max_th - min_th) * maxp_inv) << RTE_RED_SCALING;
	red_cfg->maxp_inv = maxp_inv;
	red_cfg->wq_log2 = wq_log2;

	return 0;
}
Esempio n. 8
0
/* Shuffle the keys that have been added, so lookups will be totally random */
static void
shuffle_input_keys(struct efd_perf_params *params)
{
	efd_value_t temp_data;
	unsigned int i;
	uint32_t swap_idx;
	uint8_t temp_key[MAX_KEYSIZE];

	for (i = KEYS_TO_ADD - 1; i > 0; i--) {
		swap_idx = rte_rand() % i;

		memcpy(temp_key, keys[i], hashtest_key_lens[params->cycle]);
		temp_data = data[i];

		memcpy(keys[i], keys[swap_idx], hashtest_key_lens[params->cycle]);
		data[i] = data[swap_idx];

		memcpy(keys[swap_idx], temp_key, hashtest_key_lens[params->cycle]);
		data[swap_idx] = temp_data;
	}
}
Esempio n. 9
0
/*
 * Test a hash function.
 */
static void run_hash_func_test(rte_hash_function f, uint32_t init_val,
		uint32_t key_len)
{
	static uint8_t key[RTE_HASH_KEY_LENGTH_MAX];
	uint64_t ticks = 0, start, end;
	unsigned i, j;

	for (i = 0; i < HASHTEST_ITERATIONS; i++) {

		for (j = 0; j < key_len; j++)
			key[j] = (uint8_t) rte_rand();

		start = rte_rdtsc();
		f(key, key_len, init_val);
		end = rte_rdtsc();
		ticks += end - start;
	}

	printf("%-12s, %-18u, %-13u, %.02f\n", get_hash_name(f), (unsigned) key_len,
			(unsigned) init_val, (double)ticks / HASHTEST_ITERATIONS);
}
Esempio n. 10
0
static int hw_atl_utils_init_ucp(struct aq_hw_s *self)
{
	int err = 0;

	if (!aq_hw_read_reg(self, 0x370U)) {
		unsigned int rnd = (uint32_t)rte_rand();
		unsigned int ucp_0x370 = 0U;

		ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
	}

	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);

	/* check 10 times by 1ms */
	AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
		       aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
	AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
		       aq_hw_read_reg(self, 0x334U)), 1000U, 100U);

	return err;
}
Esempio n. 11
0
static int
test_memzone_reserve_max_aligned(void)
{
	const struct rte_memzone *mz;
	const struct rte_config *config;
	const struct rte_memseg *ms;
	int memseg_idx = 0;
	int memzone_idx = 0;
	uintptr_t addr_offset;
	size_t len = 0;
	void* last_addr;
	size_t maxlen = 0;

	/* random alignment */
	rte_srand((unsigned)rte_rdtsc());
	const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */

	/* get pointer to global configuration */
	config = rte_eal_get_configuration();

	ms = rte_eal_get_physmem_layout();

	addr_offset = 0;

	for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){

		/* ignore smaller memsegs as they can only get smaller */
		if (ms[memseg_idx].len < maxlen)
			continue;

		/* align everything */
		last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE);
		len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
		len &= ~((size_t) RTE_CACHE_LINE_MASK);

		/* cycle through all memzones */
		for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {

			/* stop when reaching last allocated memzone */
			if (config->mem_config->memzone[memzone_idx].addr == NULL)
				break;

			/* check if the memzone is in our memseg and subtract length */
			if ((config->mem_config->memzone[memzone_idx].addr >=
					ms[memseg_idx].addr) &&
					(config->mem_config->memzone[memzone_idx].addr <
					(RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
				/* since the zones can now be aligned and occasionally skip
				 * some space, we should calculate the length based on
				 * reported length and start addresses difference.
				 */
				len -= (uintptr_t) RTE_PTR_SUB(
						config->mem_config->memzone[memzone_idx].addr,
						(uintptr_t) last_addr);
				len -= config->mem_config->memzone[memzone_idx].len;
				last_addr =
						RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
						(size_t) config->mem_config->memzone[memzone_idx].len);
			}
		}

		/* make sure we get the alignment offset */
		if (len > maxlen) {
			addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr;
			maxlen = len;
		}
	}

	if (maxlen == 0 || maxlen == addr_offset) {
		printf("There is no space left for biggest %u-aligned memzone!\n", align);
		return 0;
	}

	maxlen -= addr_offset;

	mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
			SOCKET_ID_ANY, 0, align);
	if (mz == NULL){
		printf("Failed to reserve a big chunk of memory\n");
		rte_dump_physmem_layout(stdout);
		rte_memzone_dump(stdout);
		return -1;
	}

	if (mz->len != maxlen) {
		printf("Memzone reserve with 0 size and alignment %u did not return"
				" bigest block\n", align);
		printf("Expected size = %zu, actual size = %zu\n",
				maxlen, mz->len);
		rte_dump_physmem_layout(stdout);
		rte_memzone_dump(stdout);

		return -1;
	}
	return 0;
}
Esempio n. 12
0
static int
fbk_hash_perf_test(void)
{
	struct rte_fbk_hash_params params = {
		.name = "fbk_hash_test",
		.entries = ENTRIES,
		.entries_per_bucket = 4,
		.socket_id = rte_socket_id(),
	};
	struct rte_fbk_hash_table *handle = NULL;
	uint32_t *keys = NULL;
	unsigned indexes[TEST_SIZE];
	uint64_t lookup_time = 0;
	unsigned added = 0;
	unsigned value = 0;
	uint32_t key;
	uint16_t val;
	unsigned i, j;

	handle = rte_fbk_hash_create(&params);
	if (handle == NULL) {
		printf("Error creating table\n");
		return -1;
	}

	keys = rte_zmalloc(NULL, ENTRIES * sizeof(*keys), 0);
	if (keys == NULL) {
		printf("fbk hash: memory allocation for key store failed\n");
		return -1;
	}

	/* Generate random keys and values. */
	for (i = 0; i < ENTRIES; i++) {
		key = (uint32_t)rte_rand();
		key = ((uint64_t)key << 32) | (uint64_t)rte_rand();
		val = (uint16_t)rte_rand();

		if (rte_fbk_hash_add_key(handle, key, val) == 0) {
			keys[added] = key;
			added++;
		}
		if (added > (LOAD_FACTOR * ENTRIES))
			break;
	}

	for (i = 0; i < TEST_ITERATIONS; i++) {
		uint64_t begin;
		uint64_t end;

		/* Generate random indexes into keys[] array. */
		for (j = 0; j < TEST_SIZE; j++)
			indexes[j] = rte_rand() % added;

		begin = rte_rdtsc();
		/* Do lookups */
		for (j = 0; j < TEST_SIZE; j++)
			value += rte_fbk_hash_lookup(handle, keys[indexes[j]]);

		end = rte_rdtsc();
		lookup_time += (double)(end - begin);
	}

	printf("\n\n *** FBK Hash function performance test results ***\n");
	/*
	 * The use of the 'value' variable ensures that the hash lookup is not
	 * being optimised out by the compiler.
	 */
	if (value != 0)
		printf("Number of ticks per lookup = %g\n",
			(double)lookup_time /
			((double)TEST_ITERATIONS * (double)TEST_SIZE));

	rte_fbk_hash_free(handle);

	return 0;
}

static int
test_hash_perf(void)
{
	unsigned with_pushes;

	for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
		if (with_pushes == 0)
			printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
		else
			printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
		if (run_all_tbl_perf_tests(with_pushes) < 0)
			return -1;
	}
	if (fbk_hash_perf_test() < 0)
		return -1;

	return 0;
}
Esempio n. 13
0
File: main.c Progetto: btw616/dpdk
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
{
	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
	uint32_t sessions_needed = 0;
	unsigned int i, j;
	int ret;

	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
	if (enabled_cdev_count == 0) {
		printf("No crypto devices type %s available\n",
				opts->device_type);
		return -EINVAL;
	}

	nb_lcores = rte_lcore_count() - 1;

	if (nb_lcores < 1) {
		RTE_LOG(ERR, USER1,
			"Number of enabled cores need to be higher than 1\n");
		return -EINVAL;
	}

	/*
	 * Use less number of devices,
	 * if there are more available than cores.
	 */
	if (enabled_cdev_count > nb_lcores)
		enabled_cdev_count = nb_lcores;

	/* Create a mempool shared by all the devices */
	uint32_t max_sess_size = 0, sess_size;

	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
		if (sess_size > max_sess_size)
			max_sess_size = sess_size;
	}

	/*
	 * Calculate number of needed queue pairs, based on the amount
	 * of available number of logical cores and crypto devices.
	 * For instance, if there are 4 cores and 2 crypto devices,
	 * 2 queue pairs will be set up per device.
	 */
	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
				(nb_lcores / enabled_cdev_count) + 1 :
				nb_lcores / enabled_cdev_count;

	for (i = 0; i < enabled_cdev_count &&
			i < RTE_CRYPTO_MAX_DEVS; i++) {
		cdev_id = enabled_cdevs[i];
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
		/*
		 * If multi-core scheduler is used, limit the number
		 * of queue pairs to 1, as there is no way to know
		 * how many cores are being used by the PMD, and
		 * how many will be available for the application.
		 */
		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
				rte_cryptodev_scheduler_mode_get(cdev_id) ==
				CDEV_SCHED_MODE_MULTICORE)
			opts->nb_qps = 1;
#endif

		struct rte_cryptodev_info cdev_info;
		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		rte_cryptodev_info_get(cdev_id, &cdev_info);
		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
			printf("Number of needed queue pairs is higher "
				"than the maximum number of queue pairs "
				"per device.\n");
			printf("Lower the number of cores or increase "
				"the number of crypto devices\n");
			return -EINVAL;
		}
		struct rte_cryptodev_config conf = {
			.nb_queue_pairs = opts->nb_qps,
			.socket_id = socket_id
		};

		struct rte_cryptodev_qp_conf qp_conf = {
			.nb_descriptors = opts->nb_descriptors
		};

		/**
		 * Device info specifies the min headroom and tailroom
		 * requirement for the crypto PMD. This need to be honoured
		 * by the application, while creating mbuf.
		 */
		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
			/* Update headroom */
			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
		}
		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
			/* Update tailroom */
			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
		}

		/* Update segment size to include headroom & tailroom */
		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);

		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
		/*
		 * Two sessions objects are required for each session
		 * (one for the header, one for the private data)
		 */
		if (!strcmp((const char *)opts->device_type,
					"crypto_scheduler")) {
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
			uint32_t nb_slaves =
				rte_cryptodev_scheduler_slaves_get(cdev_id,
								NULL);

			sessions_needed = enabled_cdev_count *
				opts->nb_qps * nb_slaves;
#endif
		} else
			sessions_needed = enabled_cdev_count *
						opts->nb_qps;

		/*
		 * A single session is required per queue pair
		 * in each device
		 */
		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
			RTE_LOG(ERR, USER1,
				"Device does not support at least "
				"%u sessions\n", opts->nb_qps);
			return -ENOTSUP;
		}

		ret = fill_session_pool_socket(socket_id, max_sess_size,
				sessions_needed);
		if (ret < 0)
			return ret;

		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
		qp_conf.mp_session_private =
				session_pool_socket[socket_id].priv_mp;

		ret = rte_cryptodev_configure(cdev_id, &conf);
		if (ret < 0) {
			printf("Failed to configure cryptodev %u", cdev_id);
			return -EINVAL;
		}

		for (j = 0; j < opts->nb_qps; j++) {
			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
				&qp_conf, socket_id);
			if (ret < 0) {
				printf("Failed to setup queue pair %u on "
					"cryptodev %u",	j, cdev_id);
				return -EINVAL;
			}
		}

		ret = rte_cryptodev_start(cdev_id);
		if (ret < 0) {
			printf("Failed to start device %u: error %d\n",
					cdev_id, ret);
			return -EPERM;
		}
	}

	return enabled_cdev_count;
}

static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
	struct rte_cryptodev_sym_capability_idx cap_idx;
	const struct rte_cryptodev_symmetric_capability *capability;

	uint8_t i, cdev_id;
	int ret;

	for (i = 0; i < nb_cryptodevs; i++) {

		cdev_id = enabled_cdevs[i];

		if (opts->op_type == CPERF_AUTH_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
			cap_idx.algo.auth = opts->auth_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_auth(
					capability,
					opts->auth_key_sz,
					opts->digest_sz,
					opts->auth_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_CIPHER_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
			cap_idx.algo.cipher = opts->cipher_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_cipher(
					capability,
					opts->cipher_key_sz,
					opts->cipher_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_AEAD) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
			cap_idx.algo.aead = opts->aead_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_aead(
					capability,
					opts->aead_key_sz,
					opts->digest_sz,
					opts->aead_aad_sz,
					opts->aead_iv_sz);
			if (ret != 0)
				return ret;
		}
	}

	return 0;
}

static int
cperf_check_test_vector(struct cperf_options *opts,
		struct cperf_test_vector *test_vec)
{
	if (opts->op_type == CPERF_CIPHER_ONLY) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			/* Cipher IV is only required for some algorithms */
			if (opts->cipher_iv_sz &&
					test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AUTH_ONLY) {
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			/* Auth key is only required for some algorithms */
			if (opts->auth_key_sz &&
					test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}

	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AEAD) {
		if (test_vec->plaintext.data == NULL)
			return -1;
		if (test_vec->plaintext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->ciphertext.data == NULL)
			return -1;
		if (test_vec->ciphertext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->aead_key.data == NULL)
			return -1;
		if (test_vec->aead_key.length != opts->aead_key_sz)
			return -1;
		if (test_vec->aead_iv.data == NULL)
			return -1;
		if (test_vec->aead_iv.length != opts->aead_iv_sz)
			return -1;
		if (test_vec->aad.data == NULL)
			return -1;
		if (test_vec->aad.length != opts->aead_aad_sz)
			return -1;
		if (test_vec->digest.data == NULL)
			return -1;
		if (test_vec->digest.length < opts->digest_sz)
			return -1;
	}
	return 0;
}

int
main(int argc, char **argv)
{
	struct cperf_options opts = {0};
	struct cperf_test_vector *t_vec = NULL;
	struct cperf_op_fns op_fns;
	void *ctx[RTE_MAX_LCORE] = { };
	int nb_cryptodevs = 0;
	uint16_t total_nb_qps = 0;
	uint8_t cdev_id, i;
	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };

	uint8_t buffer_size_idx = 0;

	int ret;
	uint32_t lcore_id;

	/* Initialise DPDK EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
	argc -= ret;
	argv += ret;

	cperf_options_default(&opts);

	ret = cperf_options_parse(&opts, argc, argv);
	if (ret) {
		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
		goto err;
	}

	ret = cperf_options_check(&opts);
	if (ret) {
		RTE_LOG(ERR, USER1,
				"Checking on or more user options failed\n");
		goto err;
	}

	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);

	if (!opts.silent)
		cperf_options_dump(&opts);

	if (nb_cryptodevs < 1) {
		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
				"device type\n");
		nb_cryptodevs = 0;
		goto err;
	}

	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
			nb_cryptodevs);
	if (ret) {
		RTE_LOG(ERR, USER1, "Crypto device type does not support "
				"capabilities requested\n");
		goto err;
	}

	if (opts.test_file != NULL) {
		t_vec = cperf_test_vector_get_from_file(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified file\n");
			goto err;
		}

		if (cperf_check_test_vector(&opts, t_vec)) {
			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
					"\n");
			goto err;
		}
	} else {
		t_vec = cperf_test_vector_get_dummy(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified algorithms\n");
			goto err;
		}
	}

	ret = cperf_get_op_functions(&opts, &op_fns);
	if (ret) {
		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
				"specified algorithms combination\n");
		goto err;
	}

	if (!opts.silent)
		show_test_vector(t_vec);

	total_nb_qps = nb_cryptodevs * opts.nb_qps;

	i = 0;
	uint8_t qp_id = 0, cdev_index = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[cdev_index];

		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		ctx[i] = cperf_testmap[opts.test].constructor(
				session_pool_socket[socket_id].sess_mp,
				session_pool_socket[socket_id].priv_mp,
				cdev_id, qp_id,
				&opts, t_vec, &op_fns);
		if (ctx[i] == NULL) {
			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
			goto err;
		}
		qp_id = (qp_id + 1) % opts.nb_qps;
		if (qp_id == 0)
			cdev_index++;
		i++;
	}

	if (opts.imix_distribution_count != 0) {
		uint8_t buffer_size_count = opts.buffer_size_count;
		uint16_t distribution_total[buffer_size_count];
		uint32_t op_idx;
		uint32_t test_average_size = 0;
		const uint32_t *buffer_size_list = opts.buffer_size_list;
		const uint32_t *imix_distribution_list = opts.imix_distribution_list;

		opts.imix_buffer_sizes = rte_malloc(NULL,
					sizeof(uint32_t) * opts.pool_sz,
					0);
		/*
		 * Calculate accumulated distribution of
		 * probabilities per packet size
		 */
		distribution_total[0] = imix_distribution_list[0];
		for (i = 1; i < buffer_size_count; i++)
			distribution_total[i] = imix_distribution_list[i] +
				distribution_total[i-1];

		/* Calculate a random sequence of packet sizes, based on distribution */
		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
			uint16_t random_number = rte_rand() %
				distribution_total[buffer_size_count - 1];
			for (i = 0; i < buffer_size_count; i++)
				if (random_number < distribution_total[i])
					break;

			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
		}

		/* Calculate average buffer size for the IMIX distribution */
		for (i = 0; i < buffer_size_count; i++)
			test_average_size += buffer_size_list[i] *
				imix_distribution_list[i];

		opts.test_buffer_size = test_average_size /
				distribution_total[buffer_size_count - 1];

		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;

			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
				ctx[i], lcore_id);
			i++;
		}
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;
			rte_eal_wait_lcore(lcore_id);
			i++;
		}
	} else {

		/* Get next size from range or list */
		if (opts.inc_buffer_size != 0)
			opts.test_buffer_size = opts.min_buffer_size;
		else
			opts.test_buffer_size = opts.buffer_size_list[0];

		while (opts.test_buffer_size <= opts.max_buffer_size) {
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;

				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
					ctx[i], lcore_id);
				i++;
			}
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;
				rte_eal_wait_lcore(lcore_id);
				i++;
			}

			/* Get next size from range or list */
			if (opts.inc_buffer_size != 0)
				opts.test_buffer_size += opts.inc_buffer_size;
			else {
				if (++buffer_size_idx == opts.buffer_size_count)
					break;
				opts.test_buffer_size =
					opts.buffer_size_list[buffer_size_idx];
			}
		}
	}

	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_SUCCESS;

err:
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (i == total_nb_qps)
			break;

		if (ctx[i] && cperf_testmap[opts.test].destructor)
			cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);
	rte_free(opts.imix_buffer_sizes);
	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_FAILURE;
}
Esempio n. 14
0
static int
fbk_hash_perf_test(void)
{
	struct rte_fbk_hash_params params = {
		.name = "fbk_hash_test",
		.entries = ENTRIES,
		.entries_per_bucket = 4,
		.socket_id = rte_socket_id(),
	};
	struct rte_fbk_hash_table *handle;
	uint32_t keys[ENTRIES] = {0};
	unsigned indexes[TEST_SIZE];
	uint64_t lookup_time = 0;
	unsigned added = 0;
	unsigned value = 0;
	unsigned i, j;

	handle = rte_fbk_hash_create(&params);
	RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed");

	/* Generate random keys and values. */
	for (i = 0; i < ENTRIES; i++) {
		uint32_t key = (uint32_t)rte_rand();
		key = ((uint64_t)key << 32) | (uint64_t)rte_rand();
		uint16_t val = (uint16_t)rte_rand();

		if (rte_fbk_hash_add_key(handle, key, val) == 0) {
			keys[added] = key;
			added++;
		}
		if (added > (LOAD_FACTOR * ENTRIES)) {
			break;
		}
	}

	for (i = 0; i < TEST_ITERATIONS; i++) {
		uint64_t begin;
		uint64_t end;

		/* Generate random indexes into keys[] array. */
		for (j = 0; j < TEST_SIZE; j++) {
			indexes[j] = rte_rand() % added;
		}

		begin = rte_rdtsc();
		/* Do lookups */
		for (j = 0; j < TEST_SIZE; j++) {
			value += rte_fbk_hash_lookup(handle, keys[indexes[j]]);
		}
		end = rte_rdtsc();
		lookup_time += (double)(end - begin);
	}

	printf("\n\n *** FBK Hash function performance test results ***\n");
	/*
	 * The use of the 'value' variable ensures that the hash lookup is not
	 * being optimised out by the compiler.
	 */
	if (value != 0)
		printf("Number of ticks per lookup = %g\n",
			(double)lookup_time /
			((double)TEST_ITERATIONS * (double)TEST_SIZE));

	rte_fbk_hash_free(handle);

	return 0;
}

/*
 * Do all unit and performance tests.
 */
int test_hash_perf(void)
{
	if (run_all_tbl_perf_tests() < 0)
		return -1;
	run_hash_func_tests();

	if (fbk_hash_perf_test() < 0)
		return -1;
	return 0;
}
#else /* RTE_LIBRTE_HASH */

int
test_hash_perf(void)
{
	printf("The Hash library is not included in this build\n");
	return 0;
}
Esempio n. 15
0
static int
test_reciprocal(void)
{
	int result = 0;
	uint32_t divisor_u32 = 0;
	uint32_t dividend_u32;
	uint32_t nresult_u32;
	uint32_t rresult_u32;
	uint64_t i, j;
	uint64_t divisor_u64 = 0;
	uint64_t dividend_u64;
	uint64_t nresult_u64;
	uint64_t rresult_u64;
	struct rte_reciprocal reci_u32 = {0};
	struct rte_reciprocal_u64 reci_u64 = {0};

	rte_srand(rte_rdtsc());
	printf("Validating unsigned 32bit division.\n");
	for (i = 0; i < MAX_ITERATIONS; i++) {
		/* Change divisor every DIVIDE_ITER iterations. */
		if (i % DIVIDE_ITER == 0) {
			divisor_u32 = rte_rand();
			reci_u32 = rte_reciprocal_value(divisor_u32);
		}

		dividend_u32 = rte_rand();
		nresult_u32 = dividend_u32 / divisor_u32;
		rresult_u32 = rte_reciprocal_divide(dividend_u32,
				reci_u32);
		if (nresult_u32 != rresult_u32) {
			printf("Division failed, %"PRIu32"/%"PRIu32" = "
					"expected %"PRIu32" result %"PRIu32"\n",
					dividend_u32, divisor_u32,
					nresult_u32, rresult_u32);
			result = 1;
			break;
		}
	}

	printf("Validating unsigned 64bit division.\n");
	for (i = 0; i < MAX_ITERATIONS; i++) {
		/* Change divisor every DIVIDE_ITER iterations. */
		if (i % DIVIDE_ITER == 0) {
			divisor_u64 = rte_rand();
			reci_u64 = rte_reciprocal_value_u64(divisor_u64);
		}

		dividend_u64 = rte_rand();
		nresult_u64 = dividend_u64 / divisor_u64;
		rresult_u64 = rte_reciprocal_divide_u64(dividend_u64,
				&reci_u64);
		if (nresult_u64 != rresult_u64) {
			printf("Division failed,  %"PRIu64"/%"PRIu64" = "
					"expected %"PRIu64" result %"PRIu64"\n",
					dividend_u64, divisor_u64,
					nresult_u64, rresult_u64);
			result = 1;
			break;
		}
	}

	printf("Validating unsigned 64bit division with 32bit divisor.\n");
	for (i = 0; i < MAX_ITERATIONS; i++) {
		/* Change divisor every DIVIDE_ITER iterations. */
		if (i % DIVIDE_ITER == 0) {
			divisor_u64 = rte_rand() >> 32;
			reci_u64 = rte_reciprocal_value_u64(divisor_u64);
		}

		dividend_u64 = rte_rand();

		nresult_u64 = dividend_u64 / divisor_u64;
		rresult_u64 = rte_reciprocal_divide_u64(dividend_u64,
				&reci_u64);

		if (nresult_u64 != rresult_u64) {
			printf("Division failed, %"PRIu64"/%"PRIu64" = "
					"expected %"PRIu64" result %"PRIu64"\n",
					dividend_u64, divisor_u64,
					nresult_u64, rresult_u64);
			result = 1;
			break;
		}
	}
Esempio n. 16
0
int32_t
perf_test(void)
{
	struct rte_lpm *lpm = NULL;
	uint64_t begin, total_time, lpm_used_entries = 0;
	unsigned i, j;
	uint8_t next_hop_add = 0xAA, next_hop_return = 0;
	int status = 0;
	uint64_t cache_line_counter = 0;
	int64_t count = 0;

	rte_srand(rte_rdtsc());

	printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);

	print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);

	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
	TEST_LPM_ASSERT(lpm != NULL);

	/* Measue add. */
	begin = rte_rdtsc();

	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
		if (rte_lpm_add(lpm, large_route_table[i].ip,
				large_route_table[i].depth, next_hop_add) == 0)
			status++;
	}
	/* End Timer. */
	total_time = rte_rdtsc() - begin;

	printf("Unique added entries = %d\n", status);
	/* Obtain add statistics. */
	for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
		if (lpm->tbl24[i].valid)
			lpm_used_entries++;

		if (i % 32 == 0){
			if ((uint64_t)count < lpm_used_entries) {
				cache_line_counter++;
				count = lpm_used_entries;
			}
		}
	}

	printf("Used table 24 entries = %u (%g%%)\n",
			(unsigned) lpm_used_entries,
			(lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
	printf("64 byte Cache entries used = %u (%u bytes)\n",
			(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);

	printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);

	/* Measure single Lookup */
	total_time = 0;
	count = 0;

	for (i = 0; i < ITERATIONS; i ++) {
		static uint32_t ip_batch[BATCH_SIZE];

		for (j = 0; j < BATCH_SIZE; j ++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();

		for (j = 0; j < BATCH_SIZE; j ++) {
			if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
				count++;
		}

		total_time += rte_rdtsc() - begin;

	}
	printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Measure bulk Lookup */
	total_time = 0;
	count = 0;
	for (i = 0; i < ITERATIONS; i ++) {
		static uint32_t ip_batch[BATCH_SIZE];
		uint16_t next_hops[BULK_SIZE];

		/* Create array of random IP addresses */
		for (j = 0; j < BATCH_SIZE; j ++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();
		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
			unsigned k;
			rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
			for (k = 0; k < BULK_SIZE; k++)
				if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
					count++;
		}

		total_time += rte_rdtsc() - begin;
	}
	printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Measure LookupX4 */
	total_time = 0;
	count = 0;
	for (i = 0; i < ITERATIONS; i++) {
		static uint32_t ip_batch[BATCH_SIZE];
		uint16_t next_hops[4];

		/* Create array of random IP addresses */
		for (j = 0; j < BATCH_SIZE; j++)
			ip_batch[j] = rte_rand();

		/* Lookup per batch */
		begin = rte_rdtsc();
		for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
			unsigned k;
			__m128i ipx4;

			ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
			ipx4 = *(__m128i *)(ip_batch + j);
			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
			for (k = 0; k < RTE_DIM(next_hops); k++)
				if (unlikely(next_hops[k] == UINT16_MAX))
					count++;
		}

		total_time += rte_rdtsc() - begin;
	}
	printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));

	/* Delete */
	status = 0;
	begin = rte_rdtsc();

	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
		/* rte_lpm_delete(lpm, ip, depth) */
		status += rte_lpm_delete(lpm, large_route_table[i].ip,
				large_route_table[i].depth);
	}

	total_time += rte_rdtsc() - begin;

	printf("Average LPM Delete: %g cycles\n",
			(double)total_time / NUM_ROUTE_ENTRIES);

	rte_lpm_delete_all(lpm);
	rte_lpm_free(lpm);

	return PASS;
}
Esempio n. 17
0
/*
 * Get a random offset into large array, with enough space needed to perform
 * max copy size. Offset is aligned.
 */
static inline size_t
get_rand_offset(void)
{
	return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
	                ~(ALIGNMENT_UNIT - 1));
}