Пример #1
0
int
rte_event_pmd_release(struct rte_eventdev *eventdev)
{
	int ret;
	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
	const struct rte_memzone *mz;

	if (eventdev == NULL)
		return -EINVAL;

	eventdev->attached = RTE_EVENTDEV_DETACHED;
	eventdev_globals.nb_devs--;

	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		rte_free(eventdev->data->dev_private);

		/* Generate memzone name */
		ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
				eventdev->data->dev_id);
		if (ret >= (int)sizeof(mz_name))
			return -EINVAL;

		mz = rte_memzone_lookup(mz_name);
		if (mz == NULL)
			return -ENOMEM;

		ret = rte_memzone_free(mz);
		if (ret)
			return ret;
	}

	eventdev->data = NULL;
	return 0;
}
static int
rte_event_eth_rx_adapter_init(void)
{
	const char *name = "rte_event_eth_rx_adapter_array";
	const struct rte_memzone *mz;
	unsigned int sz;

	sz = sizeof(*event_eth_rx_adapter) *
	    RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);

	mz = rte_memzone_lookup(name);
	if (mz == NULL) {
		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
						 RTE_CACHE_LINE_SIZE);
		if (mz == NULL) {
			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
					PRId32, rte_errno);
			return -rte_errno;
		}
	}

	event_eth_rx_adapter = mz->addr;
	return 0;
}
Пример #3
0
static inline int
rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
		int socket_id)
{
	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
	const struct rte_memzone *mz;
	int n;

	/* Generate memzone name */
	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
	if (n >= (int)sizeof(mz_name))
		return -EINVAL;

	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		mz = rte_memzone_reserve(mz_name,
				sizeof(struct rte_eventdev_data),
				socket_id, 0);
	} else
		mz = rte_memzone_lookup(mz_name);

	if (mz == NULL)
		return -ENOMEM;

	*data = mz->addr;
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
		memset(*data, 0, sizeof(struct rte_eventdev_data));

	return 0;
}
Пример #4
0
static const struct rte_memzone *
queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
			int socket_id)
{
	const struct rte_memzone *mz;
	unsigned memzone_flags = 0;
	const struct rte_memseg *ms;

	PMD_INIT_FUNC_TRACE();
	mz = rte_memzone_lookup(queue_name);
	if (mz != 0) {
		if (((size_t)queue_size <= mz->len) &&
				((socket_id == SOCKET_ID_ANY) ||
					(socket_id == mz->socket_id))) {
			PMD_DRV_LOG(DEBUG, "re-use memzone already "
					"allocated for %s", queue_name);
			return mz;
		}

		PMD_DRV_LOG(ERR, "Incompatible memzone already "
				"allocated %s, size %u, socket %d. "
				"Requested size %u, socket %u",
				queue_name, (uint32_t)mz->len,
				mz->socket_id, queue_size, socket_id);
		return NULL;
	}

	PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
					queue_name, queue_size, socket_id);
	ms = rte_eal_get_physmem_layout();
	switch (ms[0].hugepage_sz) {
	case(RTE_PGSIZE_2M):
		memzone_flags = RTE_MEMZONE_2MB;
	break;
	case(RTE_PGSIZE_1G):
		memzone_flags = RTE_MEMZONE_1GB;
	break;
	case(RTE_PGSIZE_16M):
		memzone_flags = RTE_MEMZONE_16MB;
	break;
	case(RTE_PGSIZE_16G):
		memzone_flags = RTE_MEMZONE_16GB;
	break;
	default:
		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
}
#ifdef RTE_LIBRTE_XEN_DOM0
	return rte_memzone_reserve_bounded(queue_name, queue_size,
		socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
#else
	return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
		memzone_flags, queue_size);
#endif
}
Пример #5
0
Файл: env.c Проект: spdk/spdk
void *
spdk_memzone_lookup(const char *name)
{
	const struct rte_memzone *mz = rte_memzone_lookup(name);

	if (mz != NULL) {
		return mz->addr;
	} else {
		return NULL;
	}
}
Пример #6
0
static const struct rte_memzone *
kni_memzone_reserve(const char *name, size_t len, int socket_id,
						unsigned flags)
{
	const struct rte_memzone *mz = rte_memzone_lookup(name);

	if (mz == NULL)
		mz = rte_memzone_reserve(name, len, socket_id, flags);

	return mz;
}
Пример #7
0
Файл: env.c Проект: spdk/spdk
int
spdk_memzone_free(const char *name)
{
	const struct rte_memzone *mz = rte_memzone_lookup(name);

	if (mz != NULL) {
		return rte_memzone_free(mz);
	}

	return -1;
}
Пример #8
0
static void
setup_shared_variables(void)
{
    const struct rte_memzone *qw_memzone;

    qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME);
    if (qw_memzone == NULL)
        rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");

    quota = qw_memzone->addr;
    low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
}
Пример #9
0
static const struct rte_memzone *
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
		      uint16_t queue_id, uint32_t ring_size, int socket_id)
{
	char z_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;

	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
			dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);

	mz = rte_memzone_lookup(z_name);
	if (mz)
		return mz;

	return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
}
Пример #10
0
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
		const char *post_string, int socket_id, uint16_t align)
{
	char z_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;

	rte_snprintf(z_name, sizeof(z_name), "%s_%d_%s",
					dev->driver->pci_drv.name, dev->data->port_id, post_string);

	mz = rte_memzone_lookup(z_name);
	if (mz)
		return mz;

	return rte_memzone_reserve_aligned(z_name, size,
			socket_id, 0, align);
}
Пример #11
0
static int
test_memzone_invalid_alignment(void)
{
	const struct rte_memzone * mz;

	mz = rte_memzone_lookup("invalid_alignment");
	if (mz != NULL) {
		printf("Zone with invalid alignment has been reserved\n");
		return -1;
	}

	mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
			SOCKET_ID_ANY, 0, 100);
	if (mz != NULL) {
		printf("Zone with invalid alignment has been reserved\n");
		return -1;
	}
	return 0;
}
Пример #12
0
static int
test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
{
	const struct rte_memzone * mz;

	mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
	if (mz != NULL) {
		printf("zone_size_bigger_than_the_maximum has been reserved\n");
		return -1;
	}

	mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
			SOCKET_ID_ANY, 0);
	if (mz != NULL) {
		printf("It is impossible to reserve such big a memzone\n");
		return -1;
	}

	return 0;
}
Пример #13
0
struct rte_kni *
rte_kni_get(const char *name)
{
	struct rte_kni *kni;
	const struct rte_memzone *mz;
	char mz_name[RTE_MEMZONE_NAMESIZE];

	if (!name || !name[0])
		return NULL;

	rte_snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%s", name);
	mz = rte_memzone_lookup(mz_name);
	if (!mz)
		return NULL;

	kni = mz->addr;
	if (!kni->in_use)
		return NULL;

	return kni;
}
Пример #14
0
static void qat_queue_delete(struct qat_queue *queue)
{
	const struct rte_memzone *mz;
	int status = 0;

	if (queue == NULL) {
		PMD_DRV_LOG(DEBUG, "Invalid queue");
		return;
	}
	mz = rte_memzone_lookup(queue->memz_name);
	if (mz != NULL)	{
		/* Write an unused pattern to the queue memory. */
		memset(queue->base_addr, 0x7F, queue->queue_size);
		status = rte_memzone_free(mz);
		if (status != 0)
			PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
					status, queue->memz_name);
	} else {
		PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
				queue->memz_name);
	}
}
Пример #15
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	uint8_t i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(VM_MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else
	{
		mz = rte_memzone_reserve(VM_MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		;
	}
	else
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms 
	init_shm_rings();
	*/

	return 0;
}
Пример #16
0
/*
 * This function is run in the secondary instance to test that creation of
 * objects fails in a secondary
 */
static int
run_object_creation_tests(void)
{
	const unsigned flags = 0;
	const unsigned size = 1024;
	const unsigned elt_size = 64;
	const unsigned cache_size = 64;
	const unsigned priv_data_size = 32;

	printf("### Testing object creation - expect lots of mz reserve errors!\n");

	rte_errno = 0;
	if ((rte_memzone_reserve("test_mz", size, rte_socket_id(),
				 flags) == NULL) &&
	    (rte_memzone_lookup("test_mz") == NULL)) {
		printf("Error: unexpected return value from rte_memzone_reserve\n");
		return -1;
	}
	printf("# Checked rte_memzone_reserve() OK\n");

	rte_errno = 0;
	if ((rte_ring_create(
		     "test_ring", size, rte_socket_id(), flags) == NULL) &&
		    (rte_ring_lookup("test_ring") == NULL)){
		printf("Error: unexpected return value from rte_ring_create()\n");
		return -1;
	}
	printf("# Checked rte_ring_create() OK\n");

	rte_errno = 0;
	if ((rte_mempool_create("test_mp", size, elt_size, cache_size,
				priv_data_size, NULL, NULL, NULL, NULL,
				rte_socket_id(), flags) == NULL) &&
	     (rte_mempool_lookup("test_mp") == NULL)){
		printf("Error: unexpected return value from rte_mempool_create()\n");
		return -1;
	}
	printf("# Checked rte_mempool_create() OK\n");

#ifdef RTE_LIBRTE_HASH
	const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" };
	rte_errno=0;
	if ((rte_hash_create(&hash_params) != NULL) &&
	    (rte_hash_find_existing(hash_params.name) == NULL)){
		printf("Error: unexpected return value from rte_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_hash_create() OK\n");

	const struct rte_fbk_hash_params fbk_params = { .name = "test_fbk_mp_hash" };
	rte_errno=0;
	if ((rte_fbk_hash_create(&fbk_params) != NULL) &&
	    (rte_fbk_hash_find_existing(fbk_params.name) == NULL)){
		printf("Error: unexpected return value from rte_fbk_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_fbk_hash_create() OK\n");
#endif

#ifdef RTE_LIBRTE_LPM
	rte_errno=0;
	struct rte_lpm_config config;

	config.max_rules = rte_socket_id();
	config.number_tbl8s = 256;
	config.flags = 0;
	if ((rte_lpm_create("test_lpm", size, &config) != NULL) &&
	    (rte_lpm_find_existing("test_lpm") == NULL)){
		printf("Error: unexpected return value from rte_lpm_create()\n");
		return -1;
	}
	printf("# Checked rte_lpm_create() OK\n");
#endif

	/* Run a test_pci call */
	if (test_pci() != 0) {
		printf("PCI scan failed in secondary\n");
		if (getuid() == 0) /* pci scans can fail as non-root */
			return -1;
	} else
		printf("PCI scan succeeded in secondary\n");

	return 0;
}

/* if called in a primary process, just spawns off a secondary process to
 * run validation tests - which brings us right back here again...
 * if called in a secondary process, this runs a series of API tests to check
 * how things run in a secondary instance.
 */
int
test_mp_secondary(void)
{
	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		if (!test_pci_run) {
			printf("=== Running pre-requisite test of test_pci\n");
			test_pci();
			printf("=== Requisite test done\n");
		}
		return run_secondary_instances();
	}

	printf("IN SECONDARY PROCESS\n");

	return run_object_creation_tests();
}

static struct test_command multiprocess_cmd = {
	.command = "multiprocess_autotest",
	.callback = test_mp_secondary,
};
REGISTER_TEST_COMMAND(multiprocess_cmd);
Пример #17
0
static int
test_memzone(void)
{
	const struct rte_memzone *memzone1;
	const struct rte_memzone *memzone2;
	const struct rte_memzone *memzone3;
	const struct rte_memzone *memzone4;
	const struct rte_memzone *mz;

	memzone1 = rte_memzone_reserve("testzone1", 100,
				SOCKET_ID_ANY, 0);

	memzone2 = rte_memzone_reserve("testzone2", 1000,
				0, 0);

	memzone3 = rte_memzone_reserve("testzone3", 1000,
				1, 0);

	memzone4 = rte_memzone_reserve("testzone4", 1024,
				SOCKET_ID_ANY, 0);

	/* memzone3 may be NULL if we don't have NUMA */
	if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL)
		return -1;

	rte_memzone_dump(stdout);

	/* check cache-line alignments */
	printf("check alignments and lengths\n");

	if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0)
		return -1;
	if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0)
		return -1;
	if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0)
		return -1;
	if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
		return -1;
	if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0)
		return -1;
	if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 ||
			memzone3->len == 0))
		return -1;
	if (memzone4->len != 1024)
		return -1;

	/* check that zones don't overlap */
	printf("check overlapping\n");

	if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
			memzone2->phys_addr, memzone2->len))
		return -1;
	if (memzone3 != NULL &&
			is_memory_overlap(memzone1->phys_addr, memzone1->len,
					memzone3->phys_addr, memzone3->len))
		return -1;
	if (memzone3 != NULL &&
			is_memory_overlap(memzone2->phys_addr, memzone2->len,
					memzone3->phys_addr, memzone3->len))
		return -1;

	printf("check socket ID\n");

	/* memzone2 must be on socket id 0 and memzone3 on socket 1 */
	if (memzone2->socket_id != 0)
		return -1;
	if (memzone3 != NULL && memzone3->socket_id != 1)
		return -1;

	printf("test zone lookup\n");
	mz = rte_memzone_lookup("testzone1");
	if (mz != memzone1)
		return -1;

	printf("test duplcate zone name\n");
	mz = rte_memzone_reserve("testzone1", 100,
			SOCKET_ID_ANY, 0);
	if (mz != NULL)
		return -1;

	printf("test reserving memzone with bigger size than the maximum\n");
	if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
		return -1;

	printf("test reserving memory in smallest segments\n");
	if (test_memzone_reserve_memory_in_smallest_segment() < 0)
		return -1;

	printf("test reserving memory in segments with smallest offsets\n");
	if (test_memzone_reserve_memory_with_smallest_offset() < 0)
		return -1;

	printf("test memzone_reserve flags\n");
	if (test_memzone_reserve_flags() < 0)
		return -1;

	printf("test alignment for memzone_reserve\n");
	if (test_memzone_aligned() < 0)
		return -1;

	printf("test boundary alignment for memzone_reserve\n");
	if (test_memzone_bounded() < 0)
		return -1;

	printf("test invalid alignment for memzone_reserve\n");
	if (test_memzone_invalid_alignment() < 0)
		return -1;

	printf("test reserving amounts of memory equal to segment's length\n");
	if (test_memzone_reserve_remainder() < 0)
		return -1;

	printf("test reserving the largest size memzone possible\n");
	if (test_memzone_reserve_max() < 0)
		return -1;

	printf("test reserving the largest size aligned memzone possible\n");
	if (test_memzone_reserve_max_aligned() < 0)
		return -1;

	return 0;
}
Пример #18
0
static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
{
	const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
	struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
	const struct rte_memzone *mz;
	struct rte_mempool *mp = NULL;
	uint32_t flags = 0;
	char memzone_name[64];
	char name[64];

	/* mbuf size can be set
	 *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
	 *  - through each 'mode', overwriting the default mbuf_size
	 *  - defaulted to MBUF_SIZE i.e. 1518 Bytes
	 * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
	 */
	if (targ->mbuf_size_set_explicitely) {
		flags = MEMPOOL_F_NO_SPREAD;
		/* targ->mbuf_size already set */
	}
	else if (targ->task_init->mbuf_size != 0) {
		/* mbuf_size not set through config file but set through mode */
		targ->mbuf_size = targ->task_init->mbuf_size;
	}
	else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
		if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
			targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
	}

	/* allocate memory pool for packets */
	PROX_ASSERT(targ->nb_mbuf != 0);

	if (targ->pool_name[0] == '\0') {
		sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
	}

	snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
	mz = rte_memzone_lookup(memzone_name);

	if (mz != NULL) {
		mp = (struct rte_mempool*)mz->addr;

		targ->nb_mbuf = mp->size;
		targ->pool = mp;
	}

#ifdef RTE_LIBRTE_IVSHMEM_FALSE
	if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
		/* Init mbufs with ioremap_addr for dma */
		mp->phys_addr = mz->ioremap_addr;
		mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);

		struct prox_pktmbuf_reinit_args init_args;
		init_args.mp = mp;
		init_args.lconf = lconf;

		uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
		rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
				     mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
	}
#endif

	/* Use this pool for the interface that the core is
	   receiving from if one core receives from multiple
	   ports, all the ports use the same mempool */
	if (targ->pool == NULL) {
		plog_info("\t\tCreating mempool with name '%s'\n", name);
		targ->pool = rte_mempool_create(name,
						targ->nb_mbuf - 1, targ->mbuf_size,
						targ->nb_cache_mbuf,
						sizeof(struct rte_pktmbuf_pool_private),
						rte_pktmbuf_pool_init, NULL,
						prox_pktmbuf_init, lconf,
						socket, flags);
	}

	PROX_PANIC(targ->pool == NULL,
		   "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));

	plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
		  targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
	if (prox_cfg.flags & DSF_SHUFFLE) {
		shuffle_mempool(targ->pool, targ->nb_mbuf);
	}
}
Пример #19
0
/*
 * Allocates a completion ring with vmem and stats optionally also allocating
 * a TX and/or RX ring.  Passing NULL as tx_ring_info and/or rx_ring_info
 * to not allocate them.
 *
 * Order in the allocation is:
 * stats - Always non-zero length
 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
 * tx vmem - Only non-zero length if tx_ring_info is not NULL
 * rx vmem - Only non-zero length if rx_ring_info is not NULL
 * cp bd ring - Always non-zero length
 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
 */
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
			    struct bnxt_tx_ring_info *tx_ring_info,
			    struct bnxt_rx_ring_info *rx_ring_info,
			    struct bnxt_cp_ring_info *cp_ring_info,
			    const char *suffix)
{
	struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
	struct bnxt_ring *tx_ring;
	struct bnxt_ring *rx_ring;
	struct rte_pci_device *pdev = bp->pdev;
	const struct rte_memzone *mz = NULL;
	char mz_name[RTE_MEMZONE_NAMESIZE];
	rte_iova_t mz_phys_addr;
	int sz;

	int stats_len = (tx_ring_info || rx_ring_info) ?
	    RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;

	int cp_vmem_start = stats_len;
	int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);

	int tx_vmem_start = cp_vmem_start + cp_vmem_len;
	int tx_vmem_len =
	    tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
						tx_ring_struct->vmem_size) : 0;

	int rx_vmem_start = tx_vmem_start + tx_vmem_len;
	int rx_vmem_len = rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
						rx_ring_struct->vmem_size) : 0;
	int ag_vmem_start = 0;
	int ag_vmem_len = 0;
	int cp_ring_start =  0;

	ag_vmem_start = rx_vmem_start + rx_vmem_len;
	ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
				rx_ring_info->ag_ring_struct->vmem_size) : 0;
	cp_ring_start = ag_vmem_start + ag_vmem_len;

	int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
						 sizeof(struct cmpl_base));

	int tx_ring_start = cp_ring_start + cp_ring_len;
	int tx_ring_len = tx_ring_info ?
	    RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
				   sizeof(struct tx_bd_long)) : 0;

	int rx_ring_start = tx_ring_start + tx_ring_len;
	int rx_ring_len =  rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
		sizeof(struct rx_prod_pkt_bd)) : 0;

	int ag_ring_start = rx_ring_start + rx_ring_len;
	int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;

	int ag_bitmap_start = ag_ring_start + ag_ring_len;
	int ag_bitmap_len =  rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
			rx_ring_info->rx_ring_struct->ring_size *
			AGG_RING_SIZE_FACTOR)) : 0;

	int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
	int tpa_info_len = rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
				       sizeof(struct bnxt_tpa_info)) : 0;

	int total_alloc_len = tpa_info_start;
	if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
		total_alloc_len += tpa_info_len;

	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
		 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
		 suffix);
	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
	mz = rte_memzone_lookup(mz_name);
	if (!mz) {
		mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
					 SOCKET_ID_ANY,
					 RTE_MEMZONE_2MB |
					 RTE_MEMZONE_SIZE_HINT_ONLY,
					 getpagesize());
		if (mz == NULL)
			return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);
	mz_phys_addr = mz->iova;
	if ((unsigned long)mz->addr == mz_phys_addr) {
		RTE_LOG(WARNING, PMD,
			"Memzone physical address same as virtual.\n");
		RTE_LOG(WARNING, PMD,
			"Using rte_mem_virt2iova()\n");
		for (sz = 0; sz < total_alloc_len; sz += getpagesize())
			rte_mem_lock_page(((char *)mz->addr) + sz);
		mz_phys_addr = rte_mem_virt2iova(mz->addr);
		if (mz_phys_addr == 0) {
			RTE_LOG(ERR, PMD,
			"unable to map ring address to physical memory\n");
			return -ENOMEM;
		}
	}

	if (tx_ring_info) {
		tx_ring = tx_ring_info->tx_ring_struct;

		tx_ring->bd = ((char *)mz->addr + tx_ring_start);
		tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
		tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
		tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
		tx_ring->mem_zone = (const void *)mz;

		if (!tx_ring->bd)
			return -ENOMEM;
		if (tx_ring->vmem_size) {
			tx_ring->vmem =
			    (void **)((char *)mz->addr + tx_vmem_start);
			tx_ring_info->tx_buf_ring =
			    (struct bnxt_sw_tx_bd *)tx_ring->vmem;
		}
	}

	if (rx_ring_info) {
		rx_ring = rx_ring_info->rx_ring_struct;

		rx_ring->bd = ((char *)mz->addr + rx_ring_start);
		rx_ring_info->rx_desc_ring =
		    (struct rx_prod_pkt_bd *)rx_ring->bd;
		rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
		rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
		rx_ring->mem_zone = (const void *)mz;

		if (!rx_ring->bd)
			return -ENOMEM;
		if (rx_ring->vmem_size) {
			rx_ring->vmem =
			    (void **)((char *)mz->addr + rx_vmem_start);
			rx_ring_info->rx_buf_ring =
			    (struct bnxt_sw_rx_bd *)rx_ring->vmem;
		}

		rx_ring = rx_ring_info->ag_ring_struct;

		rx_ring->bd = ((char *)mz->addr + ag_ring_start);
		rx_ring_info->ag_desc_ring =
		    (struct rx_prod_pkt_bd *)rx_ring->bd;
		rx_ring->bd_dma = mz->iova + ag_ring_start;
		rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
		rx_ring->mem_zone = (const void *)mz;

		if (!rx_ring->bd)
			return -ENOMEM;
		if (rx_ring->vmem_size) {
			rx_ring->vmem =
			    (void **)((char *)mz->addr + ag_vmem_start);
			rx_ring_info->ag_buf_ring =
			    (struct bnxt_sw_rx_bd *)rx_ring->vmem;
		}

		rx_ring_info->ag_bitmap =
		    rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
				    AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
				    ag_bitmap_start, ag_bitmap_len);

		/* TPA info */
		if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
			rx_ring_info->tpa_info =
				((struct bnxt_tpa_info *)((char *)mz->addr +
							  tpa_info_start));
	}

	cp_ring->bd = ((char *)mz->addr + cp_ring_start);
	cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
	cp_ring_info->cp_desc_ring = cp_ring->bd;
	cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
	cp_ring->mem_zone = (const void *)mz;

	if (!cp_ring->bd)
		return -ENOMEM;
	if (cp_ring->vmem_size)
		*cp_ring->vmem = ((char *)mz->addr + stats_len);
	if (stats_len) {
		cp_ring_info->hw_stats = mz->addr;
		cp_ring_info->hw_stats_map = mz_phys_addr;
	}
	cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
	return 0;
}
Пример #20
0
struct rte_kni *
rte_kni_create(uint8_t port_id,
		unsigned mbuf_size,
		struct rte_mempool *pktmbuf_pool,
		struct rte_kni_ops *ops)
{
	struct rte_kni_device_info dev_info;
	struct rte_eth_dev_info eth_dev_info;
	struct rte_kni *ctx;
	char itf_name[IFNAMSIZ];
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	const struct rte_memzone *mz;

	if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL || !ops)
		return NULL;

	/* Check FD and open once */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0) {
			RTE_LOG(ERR, KNI, "Can not open /dev/%s\n",
							KNI_DEVICE);
			return NULL;
		}
	}

	rte_eth_dev_info_get(port_id, &eth_dev_info);
	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
					eth_dev_info.pci_dev->addr.bus,
					eth_dev_info.pci_dev->addr.devid,
					eth_dev_info.pci_dev->addr.function,
					eth_dev_info.pci_dev->id.vendor_id,
					eth_dev_info.pci_dev->id.device_id);
	dev_info.bus = eth_dev_info.pci_dev->addr.bus;
	dev_info.devid = eth_dev_info.pci_dev->addr.devid;
	dev_info.function = eth_dev_info.pci_dev->addr.function;
	dev_info.vendor_id = eth_dev_info.pci_dev->id.vendor_id;
	dev_info.device_id = eth_dev_info.pci_dev->id.device_id;

	ctx = rte_zmalloc("kni devs", sizeof(struct rte_kni), 0);
	if (ctx == NULL)
		rte_panic("Cannot allocate memory for kni dev\n");
	memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	rte_snprintf(itf_name, IFNAMSIZ, "vEth%u", port_id);
	rte_snprintf(ctx->name, IFNAMSIZ, itf_name);
	rte_snprintf(dev_info.name, IFNAMSIZ, itf_name);

	/* TX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_tx_%d queue\n", port_id);
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_rx_%d queue\n", port_id);
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_alloc_%d queue\n", port_id);
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_free_%d queue\n", port_id);
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_req_%d ring\n", port_id);
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_resp_%d ring\n", port_id);
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_sync_%d mem\n", port_id);
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	/* MBUF mempool */
	mz = rte_memzone_lookup("MP_mbuf_pool");
	if (mz == NULL) {
		RTE_LOG(ERR, KNI, "Can not find MP_mbuf_pool\n");
		goto fail;
	}
	dev_info.mbuf_va = mz->addr;
	dev_info.mbuf_phys = mz->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->port_id = port_id;
	ctx->mbuf_size = mbuf_size;

	/* Configure the buffer size which will be checked in kernel module */
	dev_info.mbuf_size = ctx->mbuf_size;

	if (ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info) < 0) {
		RTE_LOG(ERR, KNI, "Fail to create kni device\n");
		goto fail;
	}

	return ctx;

fail:
	if (ctx != NULL)
		rte_free(ctx);

	return NULL;
}
Пример #21
0
/*
 * Allocates a completion ring with vmem and stats optionally also allocating
 * a TX and/or RX ring.  Passing NULL as tx_ring_info and/or rx_ring_info
 * to not allocate them.
 *
 * Order in the allocation is:
 * stats - Always non-zero length
 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
 * tx vmem - Only non-zero length if tx_ring_info is not NULL
 * rx vmem - Only non-zero length if rx_ring_info is not NULL
 * cp bd ring - Always non-zero length
 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
 */
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
			    struct bnxt_tx_ring_info *tx_ring_info,
			    struct bnxt_rx_ring_info *rx_ring_info,
			    struct bnxt_cp_ring_info *cp_ring_info,
			    const char *suffix)
{
	struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
	struct bnxt_ring *tx_ring;
	struct bnxt_ring *rx_ring;
	struct rte_pci_device *pdev = bp->pdev;
	const struct rte_memzone *mz = NULL;
	char mz_name[RTE_MEMZONE_NAMESIZE];

	int stats_len = (tx_ring_info || rx_ring_info) ?
	    RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;

	int cp_vmem_start = stats_len;
	int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);

	int tx_vmem_start = cp_vmem_start + cp_vmem_len;
	int tx_vmem_len =
	    tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
						tx_ring_struct->vmem_size) : 0;

	int rx_vmem_start = tx_vmem_start + tx_vmem_len;
	int rx_vmem_len = rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
						rx_ring_struct->vmem_size) : 0;

	int cp_ring_start = rx_vmem_start + rx_vmem_len;
	int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
						 sizeof(struct cmpl_base));

	int tx_ring_start = cp_ring_start + cp_ring_len;
	int tx_ring_len = tx_ring_info ?
	    RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
				   sizeof(struct tx_bd_long)) : 0;

	int rx_ring_start = tx_ring_start + tx_ring_len;
	int rx_ring_len =  rx_ring_info ?
		RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
		sizeof(struct rx_prod_pkt_bd)) : 0;

	int total_alloc_len = rx_ring_start + rx_ring_len;

	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
		 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
		 suffix);
	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
	mz = rte_memzone_lookup(mz_name);
	if (!mz) {
		mz = rte_memzone_reserve(mz_name, total_alloc_len,
					 SOCKET_ID_ANY,
					 RTE_MEMZONE_2MB |
					 RTE_MEMZONE_SIZE_HINT_ONLY);
		if (mz == NULL)
			return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);

	if (tx_ring_info) {
		tx_ring = tx_ring_info->tx_ring_struct;

		tx_ring->bd = ((char *)mz->addr + tx_ring_start);
		tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
		tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
		tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
		tx_ring->mem_zone = (const void *)mz;

		if (!tx_ring->bd)
			return -ENOMEM;
		if (tx_ring->vmem_size) {
			tx_ring->vmem =
			    (void **)((char *)mz->addr + tx_vmem_start);
			tx_ring_info->tx_buf_ring =
			    (struct bnxt_sw_tx_bd *)tx_ring->vmem;
		}
	}

	if (rx_ring_info) {
		rx_ring = rx_ring_info->rx_ring_struct;

		rx_ring->bd = ((char *)mz->addr + rx_ring_start);
		rx_ring_info->rx_desc_ring =
		    (struct rx_prod_pkt_bd *)rx_ring->bd;
		rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
		rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
		rx_ring->mem_zone = (const void *)mz;

		if (!rx_ring->bd)
			return -ENOMEM;
		if (rx_ring->vmem_size) {
			rx_ring->vmem =
			    (void **)((char *)mz->addr + rx_vmem_start);
			rx_ring_info->rx_buf_ring =
			    (struct bnxt_sw_rx_bd *)rx_ring->vmem;
		}
	}

	cp_ring->bd = ((char *)mz->addr + cp_ring_start);
	cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
	cp_ring_info->cp_desc_ring = cp_ring->bd;
	cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
	cp_ring->mem_zone = (const void *)mz;

	if (!cp_ring->bd)
		return -ENOMEM;
	if (cp_ring->vmem_size)
		*cp_ring->vmem = ((char *)mz->addr + stats_len);
	if (stats_len) {
		cp_ring_info->hw_stats = mz->addr;
		cp_ring_info->hw_stats_map = mz->phys_addr;
	}
	cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
	return 0;
}
Пример #22
0
/**
 * CALLED BY NF:
 * Initialises everything we need
 *
 * Returns the number of arguments parsed by both rte_eal_init and
 * parse_nflib_args offset by 1.  This is used by getopt in the NF's
 * code.  The offsetting by one accounts for getopt parsing "--" which
 * increments optind by 1 each time.
 */
int
onvm_nf_init(int argc, char *argv[], const char *nf_tag) {
        const struct rte_memzone *mz;
	const struct rte_memzone *mz_scp;
        struct rte_mempool *mp;
	struct onvm_service_chain **scp;
        int retval_eal, retval_parse, retval_final;

        if ((retval_eal = rte_eal_init(argc, argv)) < 0)
                return -1;

        /* Modify argc and argv to conform to getopt rules for parse_nflib_args */
        argc -= retval_eal; argv += retval_eal;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        if ((retval_parse = parse_nflib_args(argc, argv)) < 0)
                rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

        /*
         * Calculate the offset that the nf will use to modify argc and argv for its
         * getopt call. This is the sum of the number of arguments parsed by
         * rte_eal_init and parse_nflib_args. This will be decremented by 1 to assure
         * getopt is looking at the correct index since optind is incremented by 1 each
         * time "--" is parsed.
         * This is the value that will be returned if initialization succeeds.
         */
        retval_final = (retval_eal + retval_parse) - 1;

        /* Reset getopt global variables opterr and optind to their default values */
        opterr = 0; optind = 1;

        /* Lookup mempool for nf_info struct */
        nf_info_mp = rte_mempool_lookup(_NF_MEMPOOL_NAME);
        if (nf_info_mp == NULL)
                rte_exit(EXIT_FAILURE, "No Client Info mempool - bye\n");

        /* Initialize the info struct */
        nf_info = ovnm_nf_info_init(nf_tag);

        mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
        if (mp == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

        mz = rte_memzone_lookup(MZ_CLIENT_INFO);
        if (mz == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get tx info structure\n");
        tx_stats = mz->addr;

	mz_scp = rte_memzone_lookup(MZ_SCP_INFO);
	if (mz_scp == NULL)
		rte_exit(EXIT_FAILURE, "Cannot get service chain info structre\n");
	scp = mz_scp->addr;
	default_chain = *scp;

	onvm_sc_print(default_chain);

        nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME);
        if (nf_info_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get nf_info ring");

        /* Put this NF's info struct onto queue for manager to process startup */
        if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) {
                rte_mempool_put(nf_info_mp, nf_info); // give back mermory
                rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager");
        }

        /* Wait for a client id to be assigned by the manager */
        RTE_LOG(INFO, APP, "Waiting for manager to assign an ID...\n");
        for (; nf_info->status == (uint16_t)NF_WAITING_FOR_ID ;) {
                sleep(1);
        }

        /* This NF is trying to declare an ID already in use. */
        if (nf_info->status == NF_ID_CONFLICT) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_ID_CONFLICT, "Selected ID already in use. Exiting...\n");
        } else if(nf_info->status == NF_NO_IDS) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(NF_NO_IDS, "There are no ids available for this NF\n");
        } else if(nf_info->status != NF_STARTING) {
                rte_mempool_put(nf_info_mp, nf_info);
                rte_exit(EXIT_FAILURE, "Error occurred during manager initialization\n");
        }
        RTE_LOG(INFO, APP, "Using Instance ID %d\n", nf_info->instance_id);
        RTE_LOG(INFO, APP, "Using Service ID %d\n", nf_info->service_id);

        /* Now, map rx and tx rings into client space */
        rx_ring = rte_ring_lookup(get_rx_queue_name(nf_info->instance_id));
        if (rx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

        tx_ring = rte_ring_lookup(get_tx_queue_name(nf_info->instance_id));
        if (tx_ring == NULL)
                rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n");

        /* Tell the manager we're ready to recieve packets */
        nf_info->status = NF_RUNNING;

        RTE_LOG(INFO, APP, "Finished Process Init.\n");
        return retval_final;
}
Пример #23
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	unsigned i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else /* RTE_PROC_PRIMARY */
	{
		mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms */
	
	init_shm_rings();
	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init.\n");
		/* create metadata, output cmdline 
		if (rte_hostshmem_metadata_create(HOSTSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create HOSTSHMEM metadata\n");
		if (rte_hostshmem_metadata_add_memzone(mz, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to HOSTSHMEM metadata\n");					
		if (rte_hostshmem_metadata_add_mempool(pktmbuf_pool, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to HOSTSHMEM metadata\n");	
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_hostshmem_metadata_add_ring(clients[i].rx_q,
					HOSTSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to HOSTSHMEM metadata\n", i);
		}
		generate_hostshmem_cmdline(HOSTSHMEM_METADATA_NAME);
		*/
		
		const struct rte_mem_config *mcfg;
		/* get pointer to global configuration */
		mcfg = rte_eal_get_configuration()->mem_config;

		for (i = 0; i < RTE_MAX_MEMSEG; i++) {
			if (mcfg->memseg[i].addr == NULL)
				break;

			printf("Segment %u: phys:0x%"PRIx64", len:%zu, "
				   "virt:%p, socket_id:%"PRId32", "
				   "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
				   "nrank:%"PRIx32"\n", i,
				   mcfg->memseg[i].phys_addr,
				   mcfg->memseg[i].len,
				   mcfg->memseg[i].addr,
				   mcfg->memseg[i].socket_id,
				   mcfg->memseg[i].hugepage_sz,
				   mcfg->memseg[i].nchannel,
				   mcfg->memseg[i].nrank);
		}
		
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init. done\n");
		
		RTE_LOG(INFO, APP, "IV SHARE MEM Init.\n");
		/* create metadata, output cmdline */
		if (rte_ivshmem_metadata_create(IVSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n");	
		if (rte_ivshmem_metadata_add_memzone(mz, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to IVSHMEM metadata\n");					
		if (rte_ivshmem_metadata_add_mempool(pktmbuf_pool, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n");				
		
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_ivshmem_metadata_add_ring(clients[i].rx_q,
					IVSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to IVSHMEM metadata\n", i);
		}
		generate_ivshmem_cmdline(IVSHMEM_METADATA_NAME);
		RTE_LOG(INFO, APP, "IV SHARE MEM Done.\n");
	}
	
	
	return 0;
}
Пример #24
0
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
	      const struct rte_kni_conf *conf,
	      struct rte_kni_ops *ops)
{
	int ret;
	struct rte_kni_device_info dev_info;
	struct rte_kni *ctx;
	char intf_name[RTE_KNI_NAMESIZE];
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	char mz_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;

	if (!pktmbuf_pool || !conf || !conf->name[0])
		return NULL;

	/* Check FD and open once */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0) {
			RTE_LOG(ERR, KNI, "Can not open /dev/%s\n",
							KNI_DEVICE);
			return NULL;
		}
	}

	rte_snprintf(intf_name, RTE_KNI_NAMESIZE, conf->name);
	rte_snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%s", intf_name);
	mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni), 
				SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx = mz->addr;

	if (ctx->in_use) {
		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
		goto fail;
	}
	memset(ctx, 0, sizeof(struct rte_kni));
	if (ops)
		memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	memset(&dev_info, 0, sizeof(dev_info));
	dev_info.bus = conf->addr.bus;
	dev_info.devid = conf->addr.devid;
	dev_info.function = conf->addr.function;
	dev_info.vendor_id = conf->id.vendor_id;
	dev_info.device_id = conf->id.device_id;
	dev_info.core_id = conf->core_id;
	dev_info.force_bind = conf->force_bind;
	dev_info.group_id = conf->group_id;
	dev_info.mbuf_size = conf->mbuf_size;

	rte_snprintf(ctx->name, RTE_KNI_NAMESIZE, intf_name);
	rte_snprintf(dev_info.name, RTE_KNI_NAMESIZE, intf_name);

	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
		dev_info.bus, dev_info.devid, dev_info.function,
			dev_info.vendor_id, dev_info.device_id);

	/* TX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	/* MBUF mempool */
	rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", pktmbuf_pool->name);
	mz = rte_memzone_lookup(mz_name);
	KNI_MZ_CHECK(mz == NULL);
	dev_info.mbuf_va = mz->addr;
	dev_info.mbuf_phys = mz->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->group_id = conf->group_id;
	ctx->mbuf_size = conf->mbuf_size;

	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
	KNI_MZ_CHECK(ret < 0);

	ctx->in_use = 1;

	return ctx;

fail:

	return NULL;
}
Пример #25
0
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
	      const struct rte_kni_conf *conf,
	      struct rte_kni_ops *ops)
{
	int ret;
	struct rte_kni_device_info dev_info;
	struct rte_kni *ctx;
	char intf_name[RTE_KNI_NAMESIZE];
	char mz_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;
	const struct rte_mempool *mp;
	struct rte_kni_memzone_slot *slot = NULL;

	if (!pktmbuf_pool || !conf || !conf->name[0])
		return NULL;

	/* Check if KNI subsystem has been initialized */
	if (kni_memzone_pool.initialized != 1) {
		RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
		return NULL;
	}

	/* Get an available slot from the pool */
	slot = kni_memzone_pool_alloc();
	if (!slot) {
		RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
			kni_memzone_pool.max_ifaces);
		return NULL;
	}

	/* Recover ctx */
	ctx = slot->m_ctx->addr;
	snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);

	if (ctx->in_use) {
		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
		return NULL;
	}
	memset(ctx, 0, sizeof(struct rte_kni));
	if (ops)
		memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	memset(&dev_info, 0, sizeof(dev_info));
	dev_info.bus = conf->addr.bus;
	dev_info.devid = conf->addr.devid;
	dev_info.function = conf->addr.function;
	dev_info.vendor_id = conf->id.vendor_id;
	dev_info.device_id = conf->id.device_id;
	dev_info.core_id = conf->core_id;
	dev_info.force_bind = conf->force_bind;
	dev_info.group_id = conf->group_id;
	dev_info.mbuf_size = conf->mbuf_size;
#ifdef RTE_LIBRW_PIOT
        dev_info.no_data = conf->no_data;
        dev_info.no_pci     = conf->no_pci;
        dev_info.ifindex    = conf->ifindex;
        dev_info.always_up  = conf->always_up;
        dev_info.no_tx  = conf->no_tx;
        dev_info.loopback = conf->loopback;
        dev_info.no_user_ring = conf->no_user_ring;
        dev_info.mtu = conf->mtu;
        dev_info.vlanid = conf->vlanid;
        memcpy(dev_info.mac, conf->mac, 6);
        strncpy(dev_info.netns_name, conf->netns_name, sizeof(dev_info.netns_name));
        dev_info.netns_fd = conf->netns_fd;
        dev_info.pid      = getpid();
#ifdef RTE_LIBRW_NOHUGE
        dev_info.nohuge = conf->nohuge;
        dev_info.nl_pid = conf->nl_pid;
#endif
#endif
	snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
	snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);

	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
		dev_info.bus, dev_info.devid, dev_info.function,
			dev_info.vendor_id, dev_info.device_id);
	/* TX RING */
	mz = slot->m_tx_q;
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	mz = slot->m_rx_q;
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	mz = slot->m_alloc_q;
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	mz = slot->m_free_q;
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;
#ifndef RTE_LIBRW_PIOT
	/* Request RING */
	mz = slot->m_req_q;
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	mz = slot->m_resp_q;
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	mz = slot->m_sync_addr;
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;
#endif

	/* MBUF mempool */
	snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT,
		pktmbuf_pool->name);
	mz = rte_memzone_lookup(mz_name);
	KNI_MEM_CHECK(mz == NULL);
	mp = (struct rte_mempool *)mz->addr;
	/* KNI currently requires to have only one memory chunk */
	if (mp->nb_mem_chunks != 1)
		goto kni_fail;

	dev_info.mbuf_va = STAILQ_FIRST(&mp->mem_list)->addr;
	dev_info.mbuf_phys = STAILQ_FIRST(&mp->mem_list)->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->group_id = conf->group_id;
	ctx->slot_id = slot->id;
	ctx->mbuf_size = conf->mbuf_size;

	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
	KNI_MEM_CHECK(ret < 0);

	ctx->in_use = 1;

	/* Allocate mbufs and then put them into alloc_q */
	kni_allocate_mbufs(ctx);

	return ctx;

kni_fail:
	if (slot)
		kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);

	return NULL;
}
Пример #26
0
/*
 * Application main function - loops through
 * receiving and processing packets. Never returns
 */
int
main(int argc, char *argv[])
{
    const struct rte_memzone *mz;
    struct rte_ring *rx_ring;
    struct rte_mempool *mp;
    struct port_info *ports;
    int need_flush = 0; /* indicates whether we have unsent packets */
    int retval;
    void *pkts[PKT_READ_SIZE];
    uint16_t sent;

    if ((retval = rte_eal_init(argc, argv)) < 0)
        return -1;
    argc -= retval;
    argv += retval;

    if (parse_app_args(argc, argv) < 0)
        rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");

    if (rte_eth_dev_count() == 0)
        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");

    rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
    if (rx_ring == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");

    mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
    if (mp == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");

    mz = rte_memzone_lookup(MZ_PORT_INFO);
    if (mz == NULL)
        rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
    ports = mz->addr;
    tx_stats = &(ports->tx_stats[client_id]);

    configure_output_ports(ports);

    RTE_LOG(INFO, APP, "Finished Process Init.\n");

    printf("\nClient process %d handling packets\n", client_id);
    printf("[Press Ctrl-C to quit ...]\n");

    for (;;) {
        uint16_t i, rx_pkts = PKT_READ_SIZE;
        uint8_t port;

        /* try dequeuing max possible packets first, if that fails, get the
         * most we can. Loop body should only execute once, maximum */
        while (rx_pkts > 0 &&
                unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
            rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);

        if (unlikely(rx_pkts == 0)) {
            if (need_flush)
                for (port = 0; port < ports->num_ports; port++) {
                    sent = rte_eth_tx_buffer_flush(ports->id[port], client_id,
                                                   tx_buffer[port]);
                    if (unlikely(sent))
                        tx_stats->tx[port] += sent;
                }
            need_flush = 0;
            continue;
        }

        for (i = 0; i < rx_pkts; i++)
            handle_packet(pkts[i]);

        need_flush = 1;
    }
}