Пример #1
0
/* this is really a sanity check */
static int
test_macros(int __rte_unused unused_parm)
{
#define SMALLER 0x1000U
#define BIGGER 0x2000U
#define PTR_DIFF BIGGER - SMALLER
#define FAIL_MACRO(x)\
	{printf(#x "() test failed!\n");\
	return -1;}

	uintptr_t unused = 0;

	RTE_SET_USED(unused);

	if ((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF) != BIGGER)
		FAIL_MACRO(RTE_PTR_ADD);
	if ((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF) != SMALLER)
		FAIL_MACRO(RTE_PTR_SUB);
	if (RTE_PTR_DIFF(BIGGER, SMALLER) != PTR_DIFF)
		FAIL_MACRO(RTE_PTR_DIFF);
	if (RTE_MAX(SMALLER, BIGGER) != BIGGER)
		FAIL_MACRO(RTE_MAX);
	if (RTE_MIN(SMALLER, BIGGER) != SMALLER)
		FAIL_MACRO(RTE_MIN);

	if (strncmp(RTE_STR(test), "test", sizeof("test")))
		FAIL_MACRO(RTE_STR);

	return 0;
}
Пример #2
0
static inline int
create_unique_device_name(char *name, size_t size)
{
	int ret;

	if (name == NULL)
		return -EINVAL;

	ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD),
			unique_name_id++);
	if (ret < 0)
		return ret;
	return 0;
}
Пример #3
0
static int
test_lpm(void)
{
	unsigned i;
	int status, global_status = 0;

	for (i = 0; i < NUM_LPM_TESTS; i++) {
		status = tests[i]();
		if (status < 0) {
			printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
			global_status = status;
		}
	}

	return global_status;
}
Пример #4
0
static int
update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
{
	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];

	if (sched_ctx->reordering_enabled) {
		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
		uint32_t buff_size = rte_align32pow2(
			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);

		if (qp_ctx->order_ring) {
			rte_ring_free(qp_ctx->order_ring);
			qp_ctx->order_ring = NULL;
		}

		if (!buff_size)
			return 0;

		if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
			dev->data->dev_id, qp_id) < 0) {
			CS_LOG_ERR("failed to create unique reorder buffer "
					"name");
			return -ENOMEM;
		}

		qp_ctx->order_ring = rte_ring_create(order_ring_name,
				buff_size, rte_socket_id(),
				RING_F_SP_ENQ | RING_F_SC_DEQ);
		if (!qp_ctx->order_ring) {
			CS_LOG_ERR("failed to create order ring");
			return -ENOMEM;
		}
	} else {
		if (qp_ctx->order_ring) {
			rte_ring_free(qp_ctx->order_ring);
			qp_ctx->order_ring = NULL;
		}
	}

	return 0;
}
Пример #5
0
static void
ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
	dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
	dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
	dev_info->max_event_queues = edev->max_event_queues;
	dev_info->max_event_queue_flows = (1ULL << 20);
	dev_info->max_event_queue_priority_levels = 8;
	dev_info->max_event_priority_levels = 1;
	dev_info->max_event_ports = edev->max_event_ports;
	dev_info->max_event_port_dequeue_depth = 1;
	dev_info->max_event_port_enqueue_depth = 1;
	dev_info->max_num_events =  edev->max_num_events;
	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
}
Пример #6
0
	RTE_CRYPTODEV_VDEV_SLAVE,
	RTE_CRYPTODEV_VDEV_MODE,
	RTE_CRYPTODEV_VDEV_ORDERING,
	RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
	RTE_CRYPTODEV_VDEV_SOCKET_ID,
	RTE_CRYPTODEV_VDEV_COREMASK,
	RTE_CRYPTODEV_VDEV_CORELIST
};

struct scheduler_parse_map {
	const char *name;
	uint32_t val;
};

const struct scheduler_parse_map scheduler_mode_map[] = {
	{RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
			CDEV_SCHED_MODE_ROUNDROBIN},
	{RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
			CDEV_SCHED_MODE_PKT_SIZE_DISTR},
	{RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
			CDEV_SCHED_MODE_FAILOVER},
	{RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
			CDEV_SCHED_MODE_MULTICORE}
};

const struct scheduler_parse_map scheduler_ordering_map[] = {
		{"enable", 1},
		{"disable", 0}
};

static int
Пример #7
0
/*
 * Prepare physical memory mapping: fill configuration structure with
 * these infos, return 0 on success.
 *  1. map N huge pages in separate files in hugetlbfs
 *  2. find associated physical addr
 *  3. find associated NUMA socket ID
 *  4. sort all huge pages by physical address
 *  5. remap these N huge pages in the correct order
 *  6. unmap the first mapping
 *  7. fill memsegs in configuration with contiguous zones
 */
static int
rte_eal_hugepage_init(void)
{
	struct rte_mem_config *mcfg;
	struct hugepage *hugepage, *tmp_hp = NULL;
	struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];

	uint64_t memory[RTE_MAX_NUMA_NODES];

	unsigned hp_offset;
	int i, j, new_memseg;
	int nrpages, total_pages = 0;
	void *addr;

	memset(used_hp, 0, sizeof(used_hp));

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* for debug purposes, hugetlbfs can be disabled */
	if (internal_config.no_hugetlbfs) {
		addr = malloc(internal_config.memory);
		mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
		mcfg->memseg[0].addr = addr;
		mcfg->memseg[0].len = internal_config.memory;
		mcfg->memseg[0].socket_id = 0;
		return 0;
	}


	/* calculate total number of hugepages available. at this point we haven't
	 * yet started sorting them so they all are on socket 0 */
	for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
		/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
		used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;

		total_pages += internal_config.hugepage_info[i].num_pages[0];
	}

	/*
	 * allocate a memory area for hugepage table.
	 * this isn't shared memory yet. due to the fact that we need some
	 * processing done on these pages, shared memory will be created
	 * at a later stage.
	 */
	tmp_hp = malloc(total_pages * sizeof(struct hugepage));
	if (tmp_hp == NULL)
		goto fail;

	memset(tmp_hp, 0, total_pages * sizeof(struct hugepage));

	hp_offset = 0; /* where we start the current page size entries */

	/* map all hugepages and sort them */
	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
		struct hugepage_info *hpi;

		/*
		 * we don't yet mark hugepages as used at this stage, so
		 * we just map all hugepages available to the system
		 * all hugepages are still located on socket 0
		 */
		hpi = &internal_config.hugepage_info[i];

		if (hpi->num_pages == 0)
			continue;

		/* map all hugepages available */
		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
			RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
					(unsigned)(hpi->hugepage_sz / 0x100000));
			goto fail;
		}

		/* find physical addresses and sockets for each hugepage */
		if (find_physaddr(&tmp_hp[hp_offset], hpi) < 0){
			RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
					(unsigned)(hpi->hugepage_sz / 0x100000));
			goto fail;
		}

		if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
			RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
					(unsigned)(hpi->hugepage_sz / 0x100000));
			goto fail;
		}

		if (sort_by_physaddr(&tmp_hp[hp_offset], hpi) < 0)
			goto fail;

		/* remap all hugepages */
		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){
			RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
					(unsigned)(hpi->hugepage_sz / 0x100000));
			goto fail;
		}

		/* unmap original mappings */
		if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
			goto fail;

		/* we have processed a num of hugepages of this size, so inc offset */
		hp_offset += hpi->num_pages[0];
	}

	/* clean out the numbers of pages */
	for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
		for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
			internal_config.hugepage_info[i].num_pages[j] = 0;

	/* get hugepages for each socket */
	for (i = 0; i < total_pages; i++) {
		int socket = tmp_hp[i].socket_id;

		/* find a hugepage info with right size and increment num_pages */
		for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) {
			if (tmp_hp[i].size ==
					internal_config.hugepage_info[j].hugepage_sz) {
				internal_config.hugepage_info[j].num_pages[socket]++;
			}
		}
	}

	/* make a copy of socket_mem, needed for number of pages calculation */
	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
		memory[i] = internal_config.socket_mem[i];

	/* calculate final number of pages */
	nrpages = calc_num_pages_per_socket(memory,
			internal_config.hugepage_info, used_hp,
			internal_config.num_hugepage_sizes);

	/* error if not enough memory available */
	if (nrpages < 0)
		goto fail;

	/* reporting in! */
	for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
		for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
			if (used_hp[i].num_pages[j] > 0) {
				RTE_LOG(INFO, EAL,
						"Requesting %u pages of size %uMB"
						" from socket %i\n",
						used_hp[i].num_pages[j],
						(unsigned)
							(used_hp[i].hugepage_sz / 0x100000),
						j);
			}
		}
	}

	/* create shared memory */
	hugepage = create_shared_memory(eal_hugepage_info_path(),
					nrpages * sizeof(struct hugepage));

	if (hugepage == NULL) {
		RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
		goto fail;
	}

	/*
	 * unmap pages that we won't need (looks at used_hp).
	 * also, sets final_va to NULL on pages that were unmapped.
	 */
	if (unmap_unneeded_hugepages(tmp_hp, used_hp,
			internal_config.num_hugepage_sizes) < 0) {
		RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
		goto fail;
	}

	/*
	 * copy stuff from malloc'd hugepage* to the actual shared memory.
	 * this procedure only copies those hugepages that have final_va
	 * not NULL. has overflow protection.
	 */
	if (copy_hugepages_to_shared_mem(hugepage, nrpages,
			tmp_hp, total_pages) < 0) {
		RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
		goto fail;
	}

	/* free the temporary hugepage table */
	free(tmp_hp);
	tmp_hp = NULL;

	memset(mcfg->memseg, 0, sizeof(mcfg->memseg));
	j = -1;
	for (i = 0; i < nrpages; i++) {
		new_memseg = 0;

		/* if this is a new section, create a new memseg */
		if (i == 0)
			new_memseg = 1;
		else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
			new_memseg = 1;
		else if (hugepage[i].size != hugepage[i-1].size)
			new_memseg = 1;
		else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
		    hugepage[i].size)
			new_memseg = 1;
		else if (((unsigned long)hugepage[i].final_va -
		    (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
			new_memseg = 1;

		if (new_memseg) {
			j += 1;
			if (j == RTE_MAX_MEMSEG)
				break;

			mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
			mcfg->memseg[j].addr = hugepage[i].final_va;
			mcfg->memseg[j].len = hugepage[i].size;
			mcfg->memseg[j].socket_id = hugepage[i].socket_id;
			mcfg->memseg[j].hugepage_sz = hugepage[i].size;
		}
		/* continuation of previous memseg */
		else {
			mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
		}
		hugepage[i].memseg_id = j;
	}

	if (i < nrpages) {
		RTE_LOG(ERR, EAL, "Can only reserve %d pages "
			"from %d requested\n"
			"Current %s=%d is not enough\n"
			"Please either increase it or request less amount "
			"of memory.\n",
			i, nrpages, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
			RTE_MAX_MEMSEG);
		return (-ENOMEM);
	}
	

	return 0;


fail:
	if (tmp_hp)
		free(tmp_hp);
	return -1;
}