Exemple #1
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	uint8_t i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;
        if (rte_eal_pci_probe())
                rte_panic("Cannot probe PCI\n");


	/* initialise the nic drivers */
	retval = init_drivers();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n");

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
				rte_socket_id(), NO_FLAGS);
	if (mz == NULL)
		rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
	memset(mz->addr, 0, sizeof(*ports));
	ports = mz->addr;
	RTE_LOG(INFO, APP, "memzone address is %lx\n", mz->phys_addr);

	/* set up array for statistics */
	mz = rte_memzone_reserve(MZ_STATS_INFO, VPORT_STATS_SIZE, rte_socket_id(), NO_FLAGS);
	if (mz == NULL)
		rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for statistics\n");
	memset(mz->addr, 0, VPORT_STATS_SIZE);
	vport_stats = mz->addr;

	/* set up array for flow table data */
	mz = rte_memzone_reserve(MZ_FLOW_TABLE, sizeof(*flow_table),
				rte_socket_id(), NO_FLAGS);
	if (mz == NULL)
		rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
	memset(mz->addr, 0, sizeof(*flow_table));
	flow_table = mz->addr;

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */
	for (i = 0; i < ports->num_ports; i++) {
		retval = init_port(ports->id[i]);
		if (retval != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
					(unsigned)i);
	}

	/* initialise the client queues/rings for inter process comms */
	init_shm_rings();

	/* initalise kni queues */
	init_kni();

	return 0;
}
Exemple #2
0
int
main(int argc, char **argv)
{
	int ret;
	unsigned nb_ports;
	unsigned int lcore_id, last_lcore_id, master_lcore_id;
	uint8_t port_id;
	uint8_t nb_ports_available;
	struct worker_thread_args worker_args = {NULL, NULL};
	struct send_thread_args send_args = {NULL, NULL};
	struct rte_ring *rx_to_workers;
	struct rte_ring *workers_to_tx;

	/* catch ctrl-c so we can print on exit */
	signal(SIGINT, int_handler);

	/* Initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* Parse the application specific arguments */
	ret = parse_args(argc, argv);
	if (ret < 0)
		return -1;

	/* Check if we have enought cores */
	if (rte_lcore_count() < 3)
		rte_exit(EXIT_FAILURE, "Error, This application needs at "
				"least 3 logical cores to run:\n"
				"1 lcore for packet RX\n"
				"1 lcore for packet TX\n"
				"and at least 1 lcore for worker threads\n");

	nb_ports = rte_eth_dev_count();
	if (nb_ports == 0)
		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
	if (nb_ports != 1 && (nb_ports & 1))
		rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
				"when using a single port\n");

	mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
			MBUF_POOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
			rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	nb_ports_available = nb_ports;

	/* initialize all ports */
	for (port_id = 0; port_id < nb_ports; port_id++) {
		/* skip ports that are not enabled */
		if ((portmask & (1 << port_id)) == 0) {
			printf("\nSkipping disabled port %d\n", port_id);
			nb_ports_available--;
			continue;
		}
		/* init port */
		printf("Initializing port %u... done\n", (unsigned) port_id);

		if (configure_eth_port(port_id) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
					port_id);
	}

	if (!nb_ports_available) {
		rte_exit(EXIT_FAILURE,
			"All available ports are disabled. Please set portmask.\n");
	}

	/* Create rings for inter core communication */
	rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
			RING_F_SP_ENQ);
	if (rx_to_workers == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
			RING_F_SC_DEQ);
	if (workers_to_tx == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	if (!disable_reorder) {
		send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
				REORDER_BUFFER_SIZE);
		if (send_args.buffer == NULL)
			rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
	}

	last_lcore_id   = get_last_lcore_id();
	master_lcore_id = rte_get_master_lcore();

	worker_args.ring_in  = rx_to_workers;
	worker_args.ring_out = workers_to_tx;

	/* Start worker_thread() on all the available slave cores but the last 1 */
	for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
		if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
			rte_eal_remote_launch(worker_thread, (void *)&worker_args,
					lcore_id);

	if (disable_reorder) {
		/* Start tx_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
				last_lcore_id);
	} else {
		send_args.ring_in = workers_to_tx;
		/* Start send_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)send_thread,
				(void *)&send_args, last_lcore_id);
	}

	/* Start rx_thread() on the master core */
	rx_thread(rx_to_workers);

	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (rte_eal_wait_lcore(lcore_id) < 0)
			return -1;
	}

	print_stats();
	return 0;
}
Exemple #3
0
static int
vmxnet3_dev_configure(struct rte_eth_dev *dev)
{
	const struct rte_memzone *mz;
	struct vmxnet3_hw *hw = dev->data->dev_private;
	size_t size;

	PMD_INIT_FUNC_TRACE();

	if (dev->data->nb_rx_queues > UINT8_MAX ||
	    dev->data->nb_tx_queues > UINT8_MAX)
		return -EINVAL;

	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);

	if (size > UINT16_MAX)
		return -EINVAL;

	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;

	/*
	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
	 * on current socket
	 */
	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
			      "shared", rte_socket_id(), 8);

	if (mz == NULL) {
		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
		return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);

	hw->shared = mz->addr;
	hw->sharedPA = mz->phys_addr;

	/*
	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
	 * on current socket
	 */
	mz = gpa_zone_reserve(dev, size, "queuedesc",
			      rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
	if (mz == NULL) {
		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
		return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);

	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);

	hw->queueDescPA = mz->phys_addr;
	hw->queue_desc_len = (uint16_t)size;

	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {

		/* Allocate memory structure for UPT1_RSSConf and configure */
		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
				      rte_socket_id(), RTE_CACHE_LINE_SIZE);
		if (mz == NULL) {
			PMD_INIT_LOG(ERR,
				     "ERROR: Creating rss_conf structure zone");
			return -ENOMEM;
		}
		memset(mz->addr, 0, mz->len);

		hw->rss_conf = mz->addr;
		hw->rss_confPA = mz->phys_addr;
	}

	return 0;
}
Exemple #4
0
/* Create a new flow table made of an rte_hash table and a fixed size
 * data array for storing values. Only supports IPv4 5-tuple lookups. */
struct onvm_ft*
onvm_ft_create(int cnt, int entry_size) {
        struct rte_hash* hash;
        struct onvm_ft* ft;
        struct rte_hash_parameters ipv4_hash_params = {
            .name = NULL,
            .entries = cnt,
            .key_len = sizeof(struct onvm_ft_ipv4_5tuple),
            .hash_func = NULL,
            .hash_func_init_val = 0,
        };

        char s[64];
        /* create ipv4 hash table. use core number and cycle counter to get a unique name. */
        ipv4_hash_params.name = s;
        ipv4_hash_params.socket_id = rte_socket_id();
        snprintf(s, sizeof(s), "onvm_ft_%d-%"PRIu64, rte_lcore_id(), rte_get_tsc_cycles());
        hash = rte_hash_create(&ipv4_hash_params);
        if (hash == NULL) {
                return NULL;
        }
	ft = (struct onvm_ft*)rte_calloc("table", 1, sizeof(struct onvm_ft), 0);
        if (ft == NULL) {
                rte_hash_free(hash);
                return NULL;
        }
        ft->hash = hash;
        ft->cnt = cnt;
        ft->entry_size = entry_size;
        /* Create data array for storing values */
        ft->data = rte_calloc("entry", cnt, entry_size, 0);
        if (ft->data == NULL) {
                rte_hash_free(hash);
                rte_free(ft);
                return NULL;
        }
        return ft;
}

/* Add an entry in flow table and set data to point to the new value.
Returns:
 index in the array on success
 -EPROTONOSUPPORT if packet is not ipv4.
 -EINVAL if the parameters are invalid.
 -ENOSPC if there is no space in the hash for this key.
*/
int
onvm_ft_add_pkt(struct onvm_ft* table, struct rte_mbuf *pkt, char** data) {
        int32_t tbl_index;
        struct onvm_ft_ipv4_5tuple key;
        int err;

        err = onvm_ft_fill_key(&key, pkt);
        if (err < 0) {
                return err;
        }
        tbl_index = rte_hash_add_key_with_hash(table->hash, (const void *)&key, pkt->hash.rss);
        if (tbl_index >= 0) {
        	*data = &table->data[tbl_index*table->entry_size];
        }
        return tbl_index;
}
static int
test_distributor_perf(void)
{
	static struct rte_distributor *ds;
	static struct rte_distributor *db;
	static struct rte_mempool *p;

	if (rte_lcore_count() < 2) {
		printf("ERROR: not enough cores to test distributor\n");
		return -1;
	}

	/* first time how long it takes to round-trip a cache line */
	time_cache_line_switch();

	if (ds == NULL) {
		ds = rte_distributor_create("Test_perf", rte_socket_id(),
				rte_lcore_count() - 1,
				RTE_DIST_ALG_SINGLE);
		if (ds == NULL) {
			printf("Error creating distributor\n");
			return -1;
		}
	} else {
		rte_distributor_clear_returns(ds);
	}

	if (db == NULL) {
		db = rte_distributor_create("Test_burst", rte_socket_id(),
				rte_lcore_count() - 1,
				RTE_DIST_ALG_BURST);
		if (db == NULL) {
			printf("Error creating burst distributor\n");
			return -1;
		}
	} else {
		rte_distributor_clear_returns(db);
	}

	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
	if (p == NULL) {
		p = rte_pktmbuf_pool_create("DPT_MBUF_POOL", nb_bufs, BURST,
			0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
		if (p == NULL) {
			printf("Error creating mempool\n");
			return -1;
		}
	}

	printf("=== Performance test of distributor (single mode) ===\n");
	rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER);
	if (perf_test(ds, p) < 0)
		return -1;
	quit_workers(ds, p);

	printf("=== Performance test of distributor (burst mode) ===\n");
	rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER);
	if (perf_test(db, p) < 0)
		return -1;
	quit_workers(db, p);

	return 0;
}
Exemple #6
0
int main(int argc, char *argv[])
{
	int err, ret;
	uint32_t i, pmsk;
	struct nmreq req;
	struct pollfd pollfd[MAX_PORT_NUM];
	struct rte_mempool *pool;
	struct netmap_ring *rx_ring, *tx_ring;

	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");

	argc -= ret;
	argv += ret;

	parse_args(argc, argv);

	if (ports.num == 0)
		rte_exit(EXIT_FAILURE, "no ports specified\n");

	if (rte_eth_dev_count() < 1)
		rte_exit(EXIT_FAILURE, "Not enough ethernet ports available\n");

	pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
		MBUF_DATA_SIZE, rte_socket_id());
	if (pool == NULL)
		rte_exit(EXIT_FAILURE, "Couldn't create mempool\n");

	netmap_conf.socket_id = rte_socket_id();
	err = rte_netmap_init(&netmap_conf);

	if (err < 0)
		rte_exit(EXIT_FAILURE,
			"Couldn't initialize librte_compat_netmap\n");
	else
		printf("librte_compat_netmap initialized\n");

	port_conf.pool = pool;
	port_conf.socket_id = rte_socket_id();

	for (i = 0; i != ports.num; i++) {

		err = rte_netmap_init_port(ports.p[i].id, &port_conf);
		if (err < 0)
			rte_exit(EXIT_FAILURE, "Couldn't setup port %hhu\n",
				ports.p[i].id);

		rte_eth_promiscuous_enable(ports.p[i].id);
	}

	for (i = 0; i != ports.num; i++) {

		err = netmap_port_open(i);
		if (err) {
			rte_exit(EXIT_FAILURE, "Couldn't set port %hhu "
				"under NETMAP control\n",
				ports.p[i].id);
		}
		else
			printf("Port %hhu now in Netmap mode\n", ports.p[i].id);
	}

	memset(pollfd, 0, sizeof(pollfd));

	for (i = 0; i != ports.num; i++) {
		pollfd[i].fd = ports.p[i].fd;
		pollfd[i].events = POLLIN | POLLOUT;
	}

	signal(SIGINT, sigint_handler);

	pmsk = ports.num - 1;

	printf("Bridge up and running!\n");

	while (!stop) {
		uint32_t n_pkts;

		pollfd[0].revents = 0;
		pollfd[1].revents = 0;

		ret = rte_netmap_poll(pollfd, ports.num, 0);
		if (ret < 0) {
	   		stop = 1;
	    		printf("[E] poll returned with error %d\n", ret);
		}

		if (((pollfd[0].revents | pollfd[1].revents) & POLLERR) != 0) {
			printf("POLLERR!\n");
		}

		if ((pollfd[0].revents & POLLIN) != 0 &&
				(pollfd[pmsk].revents & POLLOUT) != 0) {

			rx_ring = ports.p[0].rx_ring;
			tx_ring = ports.p[pmsk].tx_ring;

			n_pkts = RTE_MIN(rx_ring->avail, tx_ring->avail);
			move(n_pkts, rx_ring, tx_ring);
		}

		if (pmsk != 0 && (pollfd[pmsk].revents & POLLIN) != 0 &&
				(pollfd[0].revents & POLLOUT) != 0) {

			rx_ring = ports.p[pmsk].rx_ring;
			tx_ring = ports.p[0].tx_ring;

			n_pkts = RTE_MIN(rx_ring->avail, tx_ring->avail);
			move(n_pkts, rx_ring, tx_ring);
		}
	}

	printf("Bridge stopped!\n");

	for (i = 0; i != ports.num; i++) {
		err = rte_netmap_ioctl(ports.p[i].fd, NIOCUNREGIF, &req);
		if (err) {
			printf("[E] NIOCUNREGIF ioctl failed (error %d)\n",
				err);
		}
		else
			printf("Port %hhu unregistered from Netmap mode\n", ports.p[i].id);

		rte_netmap_close(ports.p[i].fd);
	}
	return 0;
}
void
app_main_loop_pipeline_passthrough(void) {
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id);

	/* Pipeline configuration */
	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("%s: Unable to configure the pipeline\n", __func__);

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings[core_params->swq_in[i]],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.arg_ah = NULL,
			.burst_size = app.bsz_swq_rd,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i])) {
			rte_panic("%s: Unable to configure input port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_writer_params port_ring_params = {
			.ring = app.rings[core_params->swq_out[i]],
			.tx_burst_sz = app.bsz_swq_wr,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ring_writer_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i])) {
			rte_panic("%s: Unable to configure output port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Table configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_stub_ops,
			.arg_create = NULL,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
			rte_panic("%s: Unable to configure table %u\n",
				__func__, i);
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++) {
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id[i])) {
			rte_panic("%s: Unable to connect input port %u to "
				"table %u\n", __func__, port_in_id[i],
				table_id[i]);
		}
	}

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry default_entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i]},
		};

		struct rte_pipeline_table_entry *default_entry_ptr;

		if (rte_pipeline_table_default_entry_add(p, table_id[i],
			&default_entry, &default_entry_ptr))
			rte_panic("%s: Unable to add default entry to "
				"table %u\n", __func__, table_id[i]);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("%s: Pipeline consistency check failed\n", __func__);

	/* Run-time */
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
}

void
app_main_loop_passthrough(void) {
	struct app_mbuf_array *m;
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n",
		core_id);

	m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
		RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (m == NULL)
		rte_panic("%s: cannot allocate buffer space\n", __func__);

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		int ret;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings[core_params->swq_in[i]],
			(void **) m->array,
			app.bsz_swq_rd);

		if (ret == -ENOENT)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings[core_params->swq_out[i]],
				(void **) m->array,
				app.bsz_swq_wr);
		} while (ret < 0);
	}
}
Exemple #8
0
static int
fs_eth_dev_create(struct rte_vdev_device *vdev)
{
	struct rte_eth_dev *dev;
	struct ether_addr *mac;
	struct fs_priv *priv;
	struct sub_device *sdev;
	const char *params;
	unsigned int socket_id;
	uint8_t i;
	int ret;

	dev = NULL;
	priv = NULL;
	socket_id = rte_socket_id();
	INFO("Creating fail-safe device on NUMA socket %u", socket_id);
	params = rte_vdev_device_args(vdev);
	if (params == NULL) {
		ERROR("This PMD requires sub-devices, none provided");
		return -1;
	}
	dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
	if (dev == NULL) {
		ERROR("Unable to allocate rte_eth_dev");
		return -1;
	}
	priv = PRIV(dev);
	priv->dev = dev;
	dev->dev_ops = &failsafe_ops;
	dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0];
	dev->data->dev_link = eth_link;
	PRIV(dev)->nb_mac_addr = 1;
	TAILQ_INIT(&PRIV(dev)->flow_list);
	dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst;
	dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst;
	ret = fs_sub_device_alloc(dev, params);
	if (ret) {
		ERROR("Could not allocate sub_devices");
		goto free_dev;
	}
	ret = failsafe_args_parse(dev, params);
	if (ret)
		goto free_subs;
	ret = rte_eth_dev_owner_new(&priv->my_owner.id);
	if (ret) {
		ERROR("Failed to get unique owner identifier");
		goto free_args;
	}
	snprintf(priv->my_owner.name, sizeof(priv->my_owner.name),
		 FAILSAFE_OWNER_NAME);
	DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id,
	      priv->my_owner.name, priv->my_owner.id);
	ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
					    failsafe_eth_new_event_callback,
					    dev);
	if (ret) {
		ERROR("Failed to register NEW callback");
		goto free_args;
	}
	ret = failsafe_eal_init(dev);
	if (ret)
		goto unregister_new_callback;
	ret = fs_mutex_init(priv);
	if (ret)
		goto unregister_new_callback;
	ret = failsafe_hotplug_alarm_install(dev);
	if (ret) {
		ERROR("Could not set up plug-in event detection");
		goto unregister_new_callback;
	}
	mac = &dev->data->mac_addrs[0];
	if (failsafe_mac_from_arg) {
		/*
		 * If MAC address was provided as a parameter,
		 * apply to all probed slaves.
		 */
		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
			ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
							       mac);
			if (ret) {
				ERROR("Failed to set default MAC address");
				goto cancel_alarm;
			}
		}
	} else {
Exemple #9
0
/* Main function */
int main(int argc, char **argv)
{
        int ret;
        int i;

        /* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/
        signal(SIGINT, sig_handler);
        signal(SIGALRM, alarm_routine);

        /* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */
        ret = rte_eal_init(argc, argv);
        if (ret < 0) FATAL_ERROR("Cannot init EAL\n");
        argc -= ret;
        argv += ret;

        /* Check if this application can use two cores*/
        ret = rte_lcore_count ();
        if (ret != 2) PRINT_INFO("This application needs exactly two (2) cores.");

        /* Parse arguments */
        parse_args(argc, argv);
        if (ret < 0) FATAL_ERROR("Wrong arguments\n");

        /* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */
        #if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8
                ret = rte_eal_pci_probe();
                if (ret < 0) FATAL_ERROR("Cannot probe PCI\n");
        #endif

        /* Get number of ethernet devices */
        nb_sys_ports = rte_eth_dev_count();
         //if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n");

        /* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */
       // pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0);
        pktmbuf_pool = rte_pktmbuf_pool_create(MEMPOOL_NAME,buffer_size-1, MEMPOOL_CACHE_SZ, 0, snaplen + RTE_PKTMBUF_HEADROOM, rte_socket_id());
        if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, RTE_MAX_TAILQ/*E_RTE_NO_TAILQ*/, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST  );

        /* Init intermediate queue data structures: the ring. */
        intermediate_ring = rte_ring_create (INTERMEDIATERING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ );
        if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring");

        /* Operations needed for each ethernet device */
        for(i=0; i < nb_sys_ports; i++)
                init_port(i);

        /* Start Pcap  */
        //PcapStartUp();        

        /* Start consumer and producer routine on 2 different cores: consumer launched first... */
        ret =  rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER);
        if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n");
        /*RTE_LCORE_FOREACH_SLAVE(i) {
                if (rte_eal_wait_lcore(i) < 0)
                        return -1;
        }*/

        /* ... and then loop in consumer */
        //main_loop_producer ( NULL );
        main_loop_consumer(NULL);

        return 0;
}
Exemple #10
0
int
test_rw_piot(int argc, char **argv)
{
  int ret;
  rw_piot_open_request_info_t req_param;
  rw_piot_open_response_info_t rsp_param;
  char devname[256];

  /* Init PIOT */
  ret = rw_piot_init(argc, argv, (void *)&my_instance, NULL);
  ASSERT(ret>=0);
  printf("PIOT Init Successful\n");

  idx = rw_piot_dev_count();
  /* No devices opened yet */
  ASSERT(idx == 0);

  /* Open first Ring device */
  bzero(&req_param, sizeof(req_param));
  req_param.num_rx_queues = 1;
  req_param.num_tx_queues = 1;
  strcpy(devname, "eth_ring:name=ring0");
  piot_handle[idx] = rw_piot_open(devname, &req_param, &rsp_param);
  ASSERT(piot_handle[idx] != 0);
  ASSERT(rw_piot_dev_count() == 1);
  printf("PIOT device open %s - Successful\n", devname);


  /* Open second Ring device */
  idx ++;
  bzero(&req_param, sizeof(req_param));
  req_param.num_rx_queues = 4;
  req_param.num_tx_queues = 4;
  strcpy(devname, "eth_ring:name=ring1");
  piot_handle[idx] = rw_piot_open(devname, &req_param, &rsp_param);
  ASSERT(piot_handle[idx] != 0);
  ASSERT(rw_piot_dev_count() == 2);
  printf("PIOT device open %s - Successful\n", devname);

  /* Open third Ring device */
  idx ++;
  bzero(&req_param, sizeof(req_param));
  req_param.num_rx_queues = 16;
  req_param.num_tx_queues = 16;
  strcpy(devname, "eth_ring:name=ring2");
  piot_handle[idx] = rw_piot_open(devname, &req_param, &rsp_param);
  ASSERT(piot_handle[idx] != 0);
  ASSERT(rw_piot_dev_count() == 3);
  printf("PIOT device open %s - Successful\n", devname);

  mp = rte_mempool_create("mbuf_pool", NB_MBUF,
      MBUF_SIZE, 32,
      sizeof(struct rte_pktmbuf_pool_private),
      rte_pktmbuf_pool_init, NULL,
      rte_pktmbuf_init, NULL,
      rte_socket_id(), 0);

  ASSERT(mp);


  ASSERT(test_rw_piot_setup_queues() >=0);
  printf("PIOT Queue setup Successful\n");


  ret = rw_piot_dev_start(piot_handle[0]);
  ASSERT(ret == 0);
  ret = rw_piot_dev_start(piot_handle[1]);
  ASSERT(ret == 0);
  ret = rw_piot_dev_start(piot_handle[2]);
  ASSERT(ret == 0);
  printf("PIOT device start Successful\n");

  ASSERT(test_rw_piot_send_packets() >= 0);
  printf("PIOT RX/TX  - Successful\n");

  ASSERT(test_get_stats() >= 0);
  printf("PIOT Get Stats  - Successful\n");

  ASSERT(test_stats_reset() >= 0);
  printf("PIOT Reset Stats  - Successful\n");

  rw_piot_dev_stop(piot_handle[0]);
  rw_piot_dev_stop(piot_handle[1]);
  rw_piot_dev_stop(piot_handle[2]);
  printf("PIOT Stopping Devices  - Successful\n");

  return 0;
}
static int
fbk_hash_perf_test(void)
{
	struct rte_fbk_hash_params params = {
		.name = "fbk_hash_test",
		.entries = ENTRIES,
		.entries_per_bucket = 4,
		.socket_id = rte_socket_id(),
	};
	struct rte_fbk_hash_table *handle = NULL;
	uint32_t *keys = NULL;
	unsigned indexes[TEST_SIZE];
	uint64_t lookup_time = 0;
	unsigned added = 0;
	unsigned value = 0;
	uint32_t key;
	uint16_t val;
	unsigned i, j;

	handle = rte_fbk_hash_create(&params);
	if (handle == NULL) {
		printf("Error creating table\n");
		return -1;
	}

	keys = rte_zmalloc(NULL, ENTRIES * sizeof(*keys), 0);
	if (keys == NULL) {
		printf("fbk hash: memory allocation for key store failed\n");
		return -1;
	}

	/* Generate random keys and values. */
	for (i = 0; i < ENTRIES; i++) {
		key = (uint32_t)rte_rand();
		key = ((uint64_t)key << 32) | (uint64_t)rte_rand();
		val = (uint16_t)rte_rand();

		if (rte_fbk_hash_add_key(handle, key, val) == 0) {
			keys[added] = key;
			added++;
		}
		if (added > (LOAD_FACTOR * ENTRIES))
			break;
	}

	for (i = 0; i < TEST_ITERATIONS; i++) {
		uint64_t begin;
		uint64_t end;

		/* Generate random indexes into keys[] array. */
		for (j = 0; j < TEST_SIZE; j++)
			indexes[j] = rte_rand() % added;

		begin = rte_rdtsc();
		/* Do lookups */
		for (j = 0; j < TEST_SIZE; j++)
			value += rte_fbk_hash_lookup(handle, keys[indexes[j]]);

		end = rte_rdtsc();
		lookup_time += (double)(end - begin);
	}

	printf("\n\n *** FBK Hash function performance test results ***\n");
	/*
	 * The use of the 'value' variable ensures that the hash lookup is not
	 * being optimised out by the compiler.
	 */
	if (value != 0)
		printf("Number of ticks per lookup = %g\n",
			(double)lookup_time /
			((double)TEST_ITERATIONS * (double)TEST_SIZE));

	rte_fbk_hash_free(handle);

	return 0;
}

static int
test_hash_perf(void)
{
	unsigned with_pushes;

	for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
		if (with_pushes == 0)
			printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
		else
			printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
		if (run_all_tbl_perf_tests(with_pushes) < 0)
			return -1;
	}
	if (fbk_hash_perf_test() < 0)
		return -1;

	return 0;
}
Exemple #12
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
Exemple #13
0
void
app_main_loop_worker_pipeline_lpm_ipv6(void) {
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id;
	uint32_t i;

	RTE_LOG(INFO, USER1,
		"Core %u is doing work (pipeline with IPv6 LPM table)\n",
		rte_lcore_id());

	/* Pipeline configuration */
	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("Unable to configure the pipeline\n");

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings_rx[i],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.arg_ah = NULL,
			.burst_size = app.burst_size_worker_read,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i]))
			rte_panic("Unable to configure input port for "
				"ring %d\n", i);
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_writer_params port_ring_params = {
			.ring = app.rings_tx[i],
			.tx_burst_sz = app.burst_size_worker_write,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ring_writer_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i]))
			rte_panic("Unable to configure output port for "
				"ring %d\n", i);
	}

	/* Table configuration */
	{
		struct rte_table_lpm_ipv6_params table_lpm_ipv6_params = {
			.name = "LPM",
			.n_rules = 1 << 24,
			.number_tbl8s = 1 << 21,
			.entry_unique_size =
				sizeof(struct rte_pipeline_table_entry),
			.offset = APP_METADATA_OFFSET(32),
		};

		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_lpm_ipv6_ops,
			.arg_create = &table_lpm_ipv6_params,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id))
			rte_panic("Unable to configure the IPv6 LPM table\n");
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id))
			rte_panic("Unable to connect input port %u to "
				"table %u\n", port_in_id[i],  table_id);

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i & (app.n_ports - 1)]},
		};

		struct rte_table_lpm_ipv6_key key;
		struct rte_pipeline_table_entry *entry_ptr;
		uint32_t ip;
		int key_found, status;

		key.depth = 8 + __builtin_popcount(app.n_ports - 1);

		ip = rte_bswap32(i << (24 -
			__builtin_popcount(app.n_ports - 1)));
		memcpy(key.ip, &ip, sizeof(uint32_t));

		printf("Adding rule to IPv6 LPM table (IPv6 destination = "
			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
			"%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => "
			"port out = %u)\n",
			key.ip[0], key.ip[1], key.ip[2], key.ip[3],
			key.ip[4], key.ip[5], key.ip[6], key.ip[7],
			key.ip[8], key.ip[9], key.ip[10], key.ip[11],
			key.ip[12], key.ip[13], key.ip[14], key.ip[15],
			key.depth, i);

		status = rte_pipeline_table_entry_add(p, table_id, &key, &entry,
			&key_found, &entry_ptr);
		if (status < 0)
			rte_panic("Unable to add entry to table %u (%d)\n",
				table_id, status);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("Pipeline consistency check failed\n");

	/* Run-time */
#if APP_FLUSH == 0
	for ( ; ; )
		rte_pipeline_run(p);
#else
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
#endif
}
Exemple #14
0
static int
test_distributor(void)
{
	static struct rte_distributor *ds;
	static struct rte_distributor *db;
	static struct rte_distributor *dist[2];
	static struct rte_mempool *p;
	int i;

	if (rte_lcore_count() < 2) {
		printf("ERROR: not enough cores to test distributor\n");
		return -1;
	}

	if (db == NULL) {
		db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
				rte_lcore_count() - 1,
				RTE_DIST_ALG_BURST);
		if (db == NULL) {
			printf("Error creating burst distributor\n");
			return -1;
		}
	} else {
		rte_distributor_flush(db);
		rte_distributor_clear_returns(db);
	}

	if (ds == NULL) {
		ds = rte_distributor_create("Test_dist_single",
				rte_socket_id(),
				rte_lcore_count() - 1,
			RTE_DIST_ALG_SINGLE);
		if (ds == NULL) {
			printf("Error creating single distributor\n");
			return -1;
		}
	} else {
		rte_distributor_flush(ds);
		rte_distributor_clear_returns(ds);
	}

	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
	if (p == NULL) {
		p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST,
			0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
		if (p == NULL) {
			printf("Error creating mempool\n");
			return -1;
		}
	}

	dist[0] = ds;
	dist[1] = db;

	for (i = 0; i < 2; i++) {

		worker_params.dist = dist[i];
		if (i)
			sprintf(worker_params.name, "burst");
		else
			sprintf(worker_params.name, "single");

		rte_eal_mp_remote_launch(handle_work,
				&worker_params, SKIP_MASTER);
		if (sanity_test(&worker_params, p) < 0)
			goto err;
		quit_workers(&worker_params, p);

		rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
				&worker_params, SKIP_MASTER);
		if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
			goto err;
		quit_workers(&worker_params, p);

		if (rte_lcore_count() > 2) {
			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
					&worker_params,
					SKIP_MASTER);
			if (sanity_test_with_worker_shutdown(&worker_params,
					p) < 0)
				goto err;
			quit_workers(&worker_params, p);

			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
					&worker_params,
					SKIP_MASTER);
			if (test_flush_with_worker_shutdown(&worker_params,
					p) < 0)
				goto err;
			quit_workers(&worker_params, p);

		} else {
			printf("Too few cores to run worker shutdown test\n");
		}

	}

	if (test_error_distributor_create_numworkers() == -1 ||
			test_error_distributor_create_name() == -1) {
		printf("rte_distributor_create parameter check tests failed");
		return -1;
	}

	return 0;

err:
	quit_workers(&worker_params, p);
	return -1;
}
Exemple #15
0
static int
paxos_rx_process(struct rte_mbuf *pkt, struct proposer* proposer)
{
    int ret = 0;
    uint8_t l4_proto = 0;
    uint16_t outer_header_len;
    union tunnel_offload_info info = { .data = 0 };
    struct udp_hdr *udp_hdr;
    struct paxos_hdr *paxos_hdr;
    struct ether_hdr *phdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);

    parse_ethernet(phdr, &info, &l4_proto);

    if (l4_proto != IPPROTO_UDP)
        return -1;

    udp_hdr = (struct udp_hdr *)((char *)phdr +
                                 info.outer_l2_len + info.outer_l3_len);

    /* if UDP dst port is not either PROPOSER or LEARNER port */
    if (!(udp_hdr->dst_port == rte_cpu_to_be_16(PROPOSER_PORT) ||
            udp_hdr->dst_port == rte_cpu_to_be_16(LEARNER_PORT)) &&
            (pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
        return -1;

    paxos_hdr = (struct paxos_hdr *)((char *)udp_hdr + sizeof(struct udp_hdr));

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        //rte_hexdump(stdout, "udp", udp_hdr, sizeof(struct udp_hdr));
        //rte_hexdump(stdout, "paxos", paxos_hdr, sizeof(struct paxos_hdr));
        print_paxos_hdr(paxos_hdr);
    }

    int value_len = rte_be_to_cpu_16(paxos_hdr->value_len);
    struct paxos_value *v = paxos_value_new((char *)paxos_hdr->paxosval, value_len);
    switch(rte_be_to_cpu_16(paxos_hdr->msgtype)) {
    case PAXOS_PROMISE: {
        struct paxos_promise promise = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_promise(proposer, &promise);
        break;
    }
    case PAXOS_ACCEPT: {
        if (first_time) {
            proposer_preexecute(proposer);
            first_time = false;
        }
        struct paxos_accept acpt = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accept(proposer, &acpt);
        break;
    }
    case PAXOS_ACCEPTED: {
        struct paxos_accepted ack = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accepted(proposer, &ack);
        break;
    }
    default:
        break;
    }
    outer_header_len = info.outer_l2_len + info.outer_l3_len
                       + sizeof(struct udp_hdr) + sizeof(struct paxos_hdr);

    rte_pktmbuf_adj(pkt, outer_header_len);

    return ret;

}

static uint16_t
add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
               struct rte_mbuf **pkts, uint16_t nb_pkts,
               uint16_t max_pkts __rte_unused, void *user_param)
{
    struct proposer* proposer = (struct proposer *)user_param;
    unsigned i;
    uint64_t now = rte_rdtsc();

    for (i = 0; i < nb_pkts; i++) {
        pkts[i]->udata64 = now;
        paxos_rx_process(pkts[i], proposer);
    }
    return nb_pkts;
}


static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool, struct proposer* proposer)
{
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;
    struct rte_eth_rxconf *rxconf;
    struct rte_eth_conf port_conf = port_conf_default;
    const uint16_t rx_rings = 1, tx_rings = 1;
    int retval;
    uint16_t q;

    rte_eth_dev_info_get(port, &dev_info);

    rxconf = &dev_info.default_rxconf;
    txconf = &dev_info.default_txconf;

    txconf->txq_flags &= PKT_TX_IPV4;
    txconf->txq_flags &= PKT_TX_UDP_CKSUM;
    if (port >= rte_eth_dev_count())
        return -1;

    retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
    if (retval != 0)
        return retval;

    for (q = 0; q < rx_rings; q++) {
        retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), rxconf, mbuf_pool);
        if (retval < 0)
            return retval;
    }

    for (q = 0; q < tx_rings; q++) {
        retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), txconf);
        if (retval < 0)
            return retval;
    }

    retval = rte_eth_dev_start(port);
    if (retval < 0)
        return retval;

    struct ether_addr addr;
    rte_eth_macaddr_get(port, &addr);
    rte_eth_promiscuous_enable(port);

    rte_eth_add_rx_callback(port, 0, add_timestamps, proposer);
    rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
    return 0;
}


static void
lcore_main(uint8_t port, __rte_unused struct proposer *p)
{
    proposer_preexecute(p);

    for (;;) {
        // Check if signal is received
        if (force_quit)
            break;
        struct rte_mbuf *bufs[BURST_SIZE];
        const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE);
        if (unlikely(nb_rx == 0))
            continue;
        uint16_t buf;
        for (buf = 0; buf < nb_rx; buf++)
            rte_pktmbuf_free(bufs[buf]);
    }
}



static __attribute__((noreturn)) int
lcore_mainloop(__attribute__((unused)) void *arg)
{
    uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
    unsigned lcore_id;

    lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_TIMER,
            "Starting mainloop on core %u\n", lcore_id);

    while(1) {
        cur_tsc = rte_rdtsc();
        diff_tsc = cur_tsc - prev_tsc;
        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
            rte_timer_manage();
            prev_tsc = cur_tsc;
        }
    }
}

static void
report_stat(struct rte_timer *tim, __attribute((unused)) void *arg)
{
    /* print stat */
    uint32_t count = rte_atomic32_read(&stat);
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER8,
            "Throughput = %8u msg/s\n", count);
    /* reset stat */
    rte_atomic32_set(&stat, 0);
    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}


static void
check_timeout(struct rte_timer *tim, void *arg)
{
    struct proposer* p = (struct proposer *) arg;
    unsigned lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "%s() on lcore_id %i\n", __func__, lcore_id);

    struct paxos_message out;
    out.type = PAXOS_PREPARE;
    struct timeout_iterator* iter = proposer_timeout_iterator(p);
    while(timeout_iterator_prepare(iter, &out.u.prepare)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s Send PREPARE inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    out.type = PAXOS_ACCEPT;
    while(timeout_iterator_accept(iter, &out.u.accept)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s: Send ACCEPT inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    timeout_iterator_free(iter);

    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}

int
main(int argc, char *argv[])
{
    uint8_t portid = 0;
    unsigned master_core, lcore_id;
    signal(SIGTERM, signal_handler);
    signal(SIGINT, signal_handler);
    force_quit = false;
    int proposer_id = 0;

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        paxos_config.verbosity = PAXOS_LOG_DEBUG;
    }

    struct proposer *proposer = proposer_new(proposer_id, NUM_ACCEPTORS);
    first_time = true;
    /* init EAL */
    int ret = rte_eal_init(argc, argv);

    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    /* init timer structure */
    rte_timer_init(&timer);
    rte_timer_init(&stat_timer);

    /* load deliver_timer, every 1 s, on a slave lcore, reloaded automatically */
    uint64_t hz = rte_get_timer_hz();

    /* Call rte_timer_manage every 10ms */
    TIMER_RESOLUTION_CYCLES = hz / 100;
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "Clock: %"PRIu64"\n", hz);

    /* master core */
    master_core = rte_lcore_id();
    /* slave core */
    lcore_id = rte_get_next_lcore(master_core, 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&timer, hz, PERIODICAL, lcore_id, check_timeout, proposer);
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    /* stat core */
    lcore_id = rte_get_next_lcore(lcore_id , 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&stat_timer, hz, PERIODICAL, lcore_id,
                    report_stat, NULL);

    /* init RTE timer library */
    rte_timer_subsystem_init();

    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
                                        NUM_MBUFS, MBUF_CACHE_SIZE, 0,
                                        RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create mbuf_pool\n");
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    if (port_init(portid, mbuf_pool, proposer) != 0)
        rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", portid);


    lcore_main(portid, proposer);

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Free proposer\n");
    proposer_free(proposer);
    return 0;
}
Exemple #16
0
void
app_main_loop_worker_pipeline_acl(void) {
    struct rte_pipeline_params pipeline_params = {
        .name = "pipeline",
        .socket_id = rte_socket_id(),
    };

    struct rte_pipeline *p;
    uint32_t port_in_id[APP_MAX_PORTS];
    uint32_t port_out_id[APP_MAX_PORTS];
    uint32_t table_id;
    uint32_t i;

    RTE_LOG(INFO, USER1,
            "Core %u is doing work (pipeline with ACL table)\n",
            rte_lcore_id());

    /* Pipeline configuration */
    p = rte_pipeline_create(&pipeline_params);
    if (p == NULL)
        rte_panic("Unable to configure the pipeline\n");

    /* Input port configuration */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_port_ring_reader_params port_ring_params = {
            .ring = app.rings_rx[i],
        };

        struct rte_pipeline_port_in_params port_params = {
            .ops = &rte_port_ring_reader_ops,
            .arg_create = (void *) &port_ring_params,
            .f_action = NULL,
            .arg_ah = NULL,
            .burst_size = app.burst_size_worker_read,
        };

        if (rte_pipeline_port_in_create(p, &port_params,
                                        &port_in_id[i]))
            rte_panic("Unable to configure input port for "
                      "ring %d\n", i);
    }

    /* Output port configuration */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_port_ring_writer_params port_ring_params = {
            .ring = app.rings_tx[i],
            .tx_burst_sz = app.burst_size_worker_write,
        };

        struct rte_pipeline_port_out_params port_params = {
            .ops = &rte_port_ring_writer_ops,
            .arg_create = (void *) &port_ring_params,
            .f_action = NULL,
            .arg_ah = NULL,
        };

        if (rte_pipeline_port_out_create(p, &port_params,
                                         &port_out_id[i]))
            rte_panic("Unable to configure output port for "
                      "ring %d\n", i);
    }

    /* Table configuration */
    {
        struct rte_table_acl_params table_acl_params = {
            .name = "test", /* unique identifier for acl contexts */
            .n_rules = 1 << 5,
            .n_rule_fields = DIM(ipv4_field_formats),
        };

        /* Copy in the rule meta-data defined above into the params */
        memcpy(table_acl_params.field_format, ipv4_field_formats,
               sizeof(ipv4_field_formats));

        struct rte_pipeline_table_params table_params = {
            .ops = &rte_table_acl_ops,
            .arg_create = &table_acl_params,
            .f_action_hit = NULL,
            .f_action_miss = NULL,
            .arg_ah = NULL,
            .action_data_size = 0,
        };

        if (rte_pipeline_table_create(p, &table_params, &table_id))
            rte_panic("Unable to configure the ACL table\n");
    }

    /* Interconnecting ports and tables */
    for (i = 0; i < app.n_ports; i++)
        if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
                table_id))
            rte_panic("Unable to connect input port %u to "
                      "table %u\n", port_in_id[i],  table_id);

    /* Add entries to tables */
    for (i = 0; i < app.n_ports; i++) {
        struct rte_pipeline_table_entry table_entry = {
            .action = RTE_PIPELINE_ACTION_PORT,
            {.port_id = port_out_id[i & (app.n_ports - 1)]},
        };
        struct rte_table_acl_rule_add_params rule_params;
        struct rte_pipeline_table_entry *entry_ptr;
        int key_found, ret;

        memset(&rule_params, 0, sizeof(rule_params));

        /* Set the rule values */
        rule_params.field_value[SRC_FIELD_IPV4].value.u32 = 0;
        rule_params.field_value[SRC_FIELD_IPV4].mask_range.u32 = 0;
        rule_params.field_value[DST_FIELD_IPV4].value.u32 =
            i << (24 - __builtin_popcount(app.n_ports - 1));
        rule_params.field_value[DST_FIELD_IPV4].mask_range.u32 =
            8 + __builtin_popcount(app.n_ports - 1);
        rule_params.field_value[SRCP_FIELD_IPV4].value.u16 = 0;
        rule_params.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
            UINT16_MAX;
        rule_params.field_value[DSTP_FIELD_IPV4].value.u16 = 0;
        rule_params.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
            UINT16_MAX;
        rule_params.field_value[PROTO_FIELD_IPV4].value.u8 = 0;
        rule_params.field_value[PROTO_FIELD_IPV4].mask_range.u8 = 0;

        rule_params.priority = 0;

        uint32_t dst_addr = rule_params.field_value[DST_FIELD_IPV4].
                            value.u32;
        uint32_t dst_mask =
            rule_params.field_value[DST_FIELD_IPV4].mask_range.u32;

        printf("Adding rule to ACL table (IPv4 destination = "
               "%u.%u.%u.%u/%u => port out = %u)\n",
               (dst_addr & 0xFF000000) >> 24,
               (dst_addr & 0x00FF0000) >> 16,
               (dst_addr & 0x0000FF00) >> 8,
               dst_addr & 0x000000FF,
               dst_mask,
               table_entry.port_id);

        /* For ACL, add needs an rte_table_acl_rule_add_params struct */
        ret = rte_pipeline_table_entry_add(p, table_id, &rule_params,
                                           &table_entry, &key_found, &entry_ptr);
        if (ret < 0)
            rte_panic("Unable to add entry to table %u (%d)\n",
                      table_id, ret);
    }

    /* Enable input ports */
    for (i = 0; i < app.n_ports; i++)
        if (rte_pipeline_port_in_enable(p, port_in_id[i]))
            rte_panic("Unable to enable input port %u\n",
                      port_in_id[i]);

    /* Check pipeline consistency */
    if (rte_pipeline_check(p) < 0)
        rte_panic("Pipeline consistency check failed\n");

    /* Run-time */
#if APP_FLUSH == 0
    for ( ; ; )
        rte_pipeline_run(p);
#else
    for (i = 0; ; i++) {
        rte_pipeline_run(p);

        if ((i & APP_FLUSH) == 0)
            rte_pipeline_flush(p);
    }
#endif
}
Exemple #17
0
/**
 * Set up the DPDK rings which will be used to pass packets, via
 * pointers, between the multi-process server and client processes.
 * Each client needs one RX queue.
 */
static int
init_shm_rings(void)
{
    unsigned i;
    unsigned socket_id;
    const char * q_name;

#ifdef INTERRUPT_FIFO
    const char * fifo_name;
#endif

#ifdef INTERRUPT_SEM
    const char * sem_name;
    sem_t *mutex;
#endif

#if defined(INTERRUPT_FIFO) || defined(INTERRUPT_SEM)

#ifdef DPDK_FLAG
    const char *irq_flag_name;
    const unsigned flagsize = 4;
#else
    key_t key;
    int shmid;
    char *shm;
#endif

#endif

    const unsigned ringsize = CLIENT_QUEUE_RINGSIZE;

    clients = rte_malloc("client details",
                         sizeof(*clients) * num_clients, 0);
    if (clients == NULL)
        rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n");

    for (i = 0; i < num_clients; i++) {
        /* Create an RX queue for each client */
        socket_id = rte_socket_id();
        q_name = get_rx_queue_name(i);
        clients[i].rx_q = rte_ring_create(q_name,
                                          ringsize, socket_id,
                                          RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
        //verify two functions
        uint16_t ring_cur_entries = rte_ring_count(clients[i].rx_q);
        uint16_t ring_free_entries = rte_ring_free_count(clients[i].rx_q);
        fprintf(stderr, "ring_cur_entries=%d, ring_free_entries=%d\n", ring_cur_entries, ring_free_entries);
        if (clients[i].rx_q == NULL)
            rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i);

        //add by wei, create FIFO pipe
#ifdef INTERRUPT_FIFO
        umask(0);
        fifo_name = get_fifo_name(i);
        clients[i].fifo_name = fifo_name;
        mknod(fifo_name, S_IFIFO|0666, 0);

        clients[i].fifo_fp = fopen(fifo_name, "w");
        if(clients[i].fifo_fp == NULL) {
            fprintf(stderr, "can not create FIFO for client %d\n", i);
            exit(1);
        }
#endif

#ifdef INTERRUPT_SEM
        sem_name = get_sem_name(i);
        clients[i].sem_name = sem_name;

        fprintf(stderr, "sem_name=%s for client %d\n", sem_name, i);
        mutex = sem_open(sem_name, O_CREAT, 06666, 0);
        if(mutex == SEM_FAILED) {
            fprintf(stderr, "can not create semaphore for client %d\n", i);
            sem_unlink(sem_name);
            exit(1);
        }
        clients[i].mutex = mutex;
#endif

#if defined(INTERRUPT_FIFO) || defined(INTERRUPT_SEM)

#ifdef DPDK_FLAG
        irq_flag_name = get_irq_flag_name(i);
        clients[i].irq_flag = (int *)rte_ring_create(irq_flag_name,
                              flagsize, socket_id,
                              RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
#else
        key = get_rx_shmkey(i);
        if ((shmid = shmget(key, SHMSZ, IPC_CREAT | 0666)) < 0) {
            fprintf(stderr, "can not create the shared memory segment for client %d\n", i);
            exit(1);
        }

        if ((shm = shmat(shmid, NULL, 0)) == (char *) -1) {
            fprintf(stderr, "can not attach the shared segment to the server space for client %d\n", i);
            exit(1);
        }

        clients[i].shm_server = (int *)shm;
#endif

#endif
    }
    return 0;
}
Exemple #18
0
int dpdk_init(void)
{
	int ret;
	/* -m stands for memory in MBs that DPDK will allocate. Must be enough
	 * to accommodate the pool_size defined below. */
	char *argv[] = { "./ix", "-m", "148" };
	const int pool_buffer_size = 0;
	const int pool_cache_size = 0;
	/* pool_size sets an implicit limit on cores * NICs that DPDK allows */
	const int pool_size = 32768;

	optind = 0;
	internal_config.no_hugetlbfs = 1;
	ret = rte_eal_init(sizeof(argv) / sizeof(argv[0]), argv);
	if (ret < 0)
		return ret;

	dpdk_pool = rte_pktmbuf_pool_create("mempool", pool_size, pool_cache_size, 0, pool_buffer_size, rte_socket_id());
	if (dpdk_pool == NULL)
		panic("Cannot create DPDK pool\n");

	return 0;
}
Exemple #19
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	uint8_t i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(VM_MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else
	{
		mz = rte_memzone_reserve(VM_MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		;
	}
	else
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms 
	init_shm_rings();
	*/

	return 0;
}
Exemple #20
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	unsigned i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else /* RTE_PROC_PRIMARY */
	{
		mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms */
	
	init_shm_rings();
	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init.\n");
		/* create metadata, output cmdline 
		if (rte_hostshmem_metadata_create(HOSTSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create HOSTSHMEM metadata\n");
		if (rte_hostshmem_metadata_add_memzone(mz, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to HOSTSHMEM metadata\n");					
		if (rte_hostshmem_metadata_add_mempool(pktmbuf_pool, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to HOSTSHMEM metadata\n");	
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_hostshmem_metadata_add_ring(clients[i].rx_q,
					HOSTSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to HOSTSHMEM metadata\n", i);
		}
		generate_hostshmem_cmdline(HOSTSHMEM_METADATA_NAME);
		*/
		
		const struct rte_mem_config *mcfg;
		/* get pointer to global configuration */
		mcfg = rte_eal_get_configuration()->mem_config;

		for (i = 0; i < RTE_MAX_MEMSEG; i++) {
			if (mcfg->memseg[i].addr == NULL)
				break;

			printf("Segment %u: phys:0x%"PRIx64", len:%zu, "
				   "virt:%p, socket_id:%"PRId32", "
				   "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
				   "nrank:%"PRIx32"\n", i,
				   mcfg->memseg[i].phys_addr,
				   mcfg->memseg[i].len,
				   mcfg->memseg[i].addr,
				   mcfg->memseg[i].socket_id,
				   mcfg->memseg[i].hugepage_sz,
				   mcfg->memseg[i].nchannel,
				   mcfg->memseg[i].nrank);
		}
		
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init. done\n");
		
		RTE_LOG(INFO, APP, "IV SHARE MEM Init.\n");
		/* create metadata, output cmdline */
		if (rte_ivshmem_metadata_create(IVSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n");	
		if (rte_ivshmem_metadata_add_memzone(mz, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to IVSHMEM metadata\n");					
		if (rte_ivshmem_metadata_add_mempool(pktmbuf_pool, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n");				
		
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_ivshmem_metadata_add_ring(clients[i].rx_q,
					IVSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to IVSHMEM metadata\n", i);
		}
		generate_ivshmem_cmdline(IVSHMEM_METADATA_NAME);
		RTE_LOG(INFO, APP, "IV SHARE MEM Done.\n");
	}
	
	
	return 0;
}
/* Main function */
int main(int argc, char **argv)
{
	int ret;
	int i;

	/* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/
	signal(SIGINT, sig_handler);
	signal(SIGALRM, alarm_routine);

	/* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */
	ret = rte_eal_init(argc, argv);
	if (ret < 0) FATAL_ERROR("Cannot init EAL\n");
	argc -= ret;
	argv += ret;

	/* Check if this application can use 1 core*/
	ret = rte_lcore_count ();
	if (ret != 1) FATAL_ERROR("This application needs exactly 1 cores.");

	/* Parse arguments */
	parse_args(argc, argv);
	if (ret < 0) FATAL_ERROR("Wrong arguments\n");

	/* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */
	#if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8
		ret = rte_eal_pci_probe();
		if (ret < 0) FATAL_ERROR("Cannot probe PCI\n");
	#endif

	/* Get number of ethernet devices */
	nb_sys_ports = rte_eth_dev_count();
	if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n");
	
	/* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */
	pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0);
	if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST  );
	
	/* Operations needed for each ethernet device */			
	for(i=0; i < nb_sys_ports; i++)
		init_port(i);

	/* ... and then loop in consumer */
	main_loop_producer ( NULL );	

	return 0;
}
Exemple #22
0
/* Main function, does initialisation and calls the per-lcore functions */
int
main(int argc, char *argv[])
{
	unsigned cores;
	struct rte_mempool *mbuf_pool;
	unsigned lcore_id;
	uintptr_t i;
	int ret;
	unsigned nb_ports, valid_num_ports;
	uint16_t portid;

	signal(SIGHUP, sighup_handler);

	/* init EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
	argc -= ret;
	argv += ret;

	/* parse app arguments */
	ret = vmdq_parse_args(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");

	cores = rte_lcore_count();
	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
		rte_exit(EXIT_FAILURE,"This program can only run on an even"
				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
	}

	nb_ports = rte_eth_dev_count();

	/*
	 * Update the global var NUM_PORTS and global array PORTS
	 * and get value of var VALID_NUM_PORTS according to system ports number
	 */
	valid_num_ports = check_ports_num(nb_ports);

	if (valid_num_ports < 2 || valid_num_ports % 2) {
		printf("Current valid ports number is %u\n", valid_num_ports);
		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
	}

	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

	/* initialize all ports */
	for (portid = 0; portid < nb_ports; portid++) {
		/* skip ports that are not enabled */
		if ((enabled_port_mask & (1 << portid)) == 0) {
			printf("\nSkipping disabled port %d\n", portid);
			continue;
		}
		if (port_init(portid, mbuf_pool) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
	}

	/* call lcore_main() on every slave lcore */
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
	}
	/* call on master too */
	(void) lcore_main((void*)i);

	return 0;
}
Exemple #23
0
void
app_main_loop_pipeline_tx(void) {
	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing TX\n", core_id);

	/* Pipeline configuration */
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("%s: Unable to configure the pipeline\n", __func__);

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings[core_params->swq_in[i]],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = (app.ether_hdr_pop_push) ?
				app_pipeline_tx_port_in_action_handler : NULL,
			.arg_ah = NULL,
			.burst_size = app.bsz_swq_rd,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i])) {
			rte_panic("%s: Unable to configure input port for "
				"ring TX %i\n", __func__, i);
		}
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ethdev_writer_params port_ethdev_params = {
			.port_id = app.ports[i],
			.queue_id = 0,
			.tx_burst_sz = app.bsz_hwq_wr,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ethdev_writer_ops,
			.arg_create = (void *) &port_ethdev_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i])) {
			rte_panic("%s: Unable to configure output port for "
				"port %d\n", __func__, app.ports[i]);
		}
	}

	/* Table configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_stub_ops,
			.arg_create = NULL,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id[i])) {
			rte_panic("%s: Unable to configure table %u\n",
				__func__, table_id[i]);
		}
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id[i]))
			rte_panic("%s: Unable to connect input port %u to "
				"table %u\n", __func__, port_in_id[i],
				table_id[i]);

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry default_entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i]},
		};

		struct rte_pipeline_table_entry *default_entry_ptr;

		if (rte_pipeline_table_default_entry_add(p, table_id[i],
			&default_entry, &default_entry_ptr))
			rte_panic("%s: Unable to add default entry to "
				"table %u\n", __func__, table_id[i]);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("%s: Pipeline consistency check failed\n", __func__);

	/* Run-time */
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
}

void
app_main_loop_tx(void) {
	struct app_mbuf_array *m[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing TX (no pipeline)\n", core_id);

	for (i = 0; i < APP_MAX_PORTS; i++) {
		m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
			CACHE_LINE_SIZE, rte_socket_id());
		if (m[i] == NULL)
			rte_panic("%s: Cannot allocate buffer space\n",
				__func__);
	}

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		uint32_t n_mbufs, n_pkts;
		int ret;

		n_mbufs = m[i]->n_mbufs;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings[core_params->swq_in[i]],
			(void **) &m[i]->array[n_mbufs],
			app.bsz_swq_rd);

		if (ret == -ENOENT)
			continue;

		n_mbufs += app.bsz_swq_rd;

		if (n_mbufs < app.bsz_hwq_wr) {
			m[i]->n_mbufs = n_mbufs;
			continue;
		}

		n_pkts = rte_eth_tx_burst(
			app.ports[i],
			0,
			m[i]->array,
			n_mbufs);

		if (n_pkts < n_mbufs) {
			uint32_t k;

			for (k = n_pkts; k < n_mbufs; k++) {
				struct rte_mbuf *pkt_to_free;

				pkt_to_free = m[i]->array[k];
				rte_pktmbuf_free(pkt_to_free);
			}
		}

		m[i]->n_mbufs = 0;
	}
}
void
pktgen_config_ports(void)
{
	uint32_t lid, pid, i, s, q, sid;
	rxtx_t rt;
	pkt_seq_t   *pkt;
	port_info_t     *info;
	char buff[RTE_MEMZONE_NAMESIZE];
	int32_t ret, cache_size;
	char output_buff[256] = { 0 };

	/* Find out the total number of ports in the system. */
	/* We have already blacklisted the ones we needed to in main routine. */
	pktgen.nb_ports = rte_eth_dev_count();
	if (pktgen.nb_ports > RTE_MAX_ETHPORTS)
		pktgen.nb_ports = RTE_MAX_ETHPORTS;

	if (pktgen.nb_ports == 0)
		pktgen_log_panic("*** Did not find any ports to use ***");

	pktgen.starting_port = 0;

	/* Setup the number of ports to display at a time */
	if (pktgen.nb_ports > pktgen.nb_ports_per_page)
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports_per_page;
	else
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports;

	wr_port_matrix_dump(pktgen.l2p);

	pktgen_log_info("Configuring %d ports, MBUF Size %d, MBUF Cache Size %d",
	                pktgen.nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE);

	/* For each lcore setup each port that is handled by that lcore. */
	for (lid = 0; lid < RTE_MAX_LCORE; lid++) {

		if (wr_get_map(pktgen.l2p, RTE_MAX_ETHPORTS, lid) == 0)
			continue;

		/* For each port attached or handled by the lcore */
		for (pid = 0; pid < pktgen.nb_ports; pid++) {

			/* If non-zero then this port is handled by this lcore. */
			if (wr_get_map(pktgen.l2p, pid, lid) == 0)
				continue;
			wr_set_port_private(pktgen.l2p, pid, &pktgen.info[pid]);
			pktgen.info[pid].pid = pid;
		}
	}
	wr_dump_l2p(pktgen.l2p);

	pktgen.total_mem_used = 0;

	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		/* Skip if we do not have any lcores attached to a port. */
		if ( (rt.rxtx = wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE)) == 0)
			continue;

		pktgen.port_cnt++;
		snprintf(output_buff, sizeof(output_buff),
		         "Initialize Port %d -- TxQ %d, RxQ %d", pid, rt.tx, rt.rx);

		info = wr_get_port_private(pktgen.l2p, pid);

		info->fill_pattern_type  = ABC_FILL_PATTERN;
		strncpy(info->user_pattern, "0123456789abcdef", USER_PATTERN_SIZE);

		rte_spinlock_init(&info->port_lock);

		/* Create the pkt header structures for transmitting sequence of packets. */
		snprintf(buff, sizeof(buff), "seq_hdr_%d", pid);
		info->seq_pkt = (pkt_seq_t *)rte_zmalloc_socket(buff, (sizeof(pkt_seq_t) * NUM_TOTAL_PKTS),
														RTE_CACHE_LINE_SIZE, rte_socket_id());
		if (info->seq_pkt == NULL)
			pktgen_log_panic("Unable to allocate %d pkt_seq_t headers", NUM_TOTAL_PKTS);

		info->seqIdx    = 0;
		info->seqCnt    = 0;

		info->nb_mbufs  = MAX_MBUFS_PER_PORT;
		cache_size = (info->nb_mbufs > RTE_MEMPOOL_CACHE_MAX_SIZE) ?
		        RTE_MEMPOOL_CACHE_MAX_SIZE : info->nb_mbufs;

		pktgen_port_conf_setup(pid, &rt, &default_port_conf);

		if ( (ret = rte_eth_dev_configure(pid, rt.rx, rt.tx, &info->port_conf)) < 0)
			pktgen_log_panic("Cannot configure device: port=%d, Num queues %d,%d (%d)%s",
			                 pid, rt.rx, rt.tx, errno, rte_strerror(-ret));

		pkt = &info->seq_pkt[SINGLE_PKT];

		/* Grab the source MAC addresses * / */
		rte_eth_macaddr_get(pid, &pkt->eth_src_addr);
		pktgen_log_info("%s,  Src MAC %02x:%02x:%02x:%02x:%02x:%02x", output_buff,
		                pkt->eth_src_addr.addr_bytes[0],
		                pkt->eth_src_addr.addr_bytes[1],
		                pkt->eth_src_addr.addr_bytes[2],
		                pkt->eth_src_addr.addr_bytes[3],
		                pkt->eth_src_addr.addr_bytes[4],
		                pkt->eth_src_addr.addr_bytes[5]);

		/* Copy the first Src MAC address in SINGLE_PKT to the rest of the sequence packets. */
		for (i = 0; i < NUM_SEQ_PKTS; i++)
			ethAddrCopy(&info->seq_pkt[i].eth_src_addr, &pkt->eth_src_addr);

		pktgen.mem_used = 0;

		for (q = 0; q < rt.rx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Receive buffers. */
			info->q[q].rx_mp = pktgen_mbuf_pool_create("Default RX", pid, q, info->nb_mbufs, sid, cache_size);
			if (info->q[q].rx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default RX mbufs", pid);

			ret = rte_eth_rx_queue_setup(pid, q, pktgen.nb_rxd, sid, &info->rx_conf, pktgen.info[pid].q[q].rx_mp);
			if (ret < 0)
				pktgen_log_panic("rte_eth_rx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
		}
		pktgen_log_info("");

		for (q = 0; q < rt.tx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Transmit buffers. */
			info->q[q].tx_mp = pktgen_mbuf_pool_create("Default TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].tx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default TX mbufs", pid);

			/* Create and initialize the range Transmit buffers. */
			info->q[q].range_mp = pktgen_mbuf_pool_create("Range TX", pid, q, MAX_MBUFS_PER_PORT,   sid, 0);
			if (info->q[q].range_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Range TX mbufs", pid);

			/* Create and initialize the sequence Transmit buffers. */
			info->q[q].seq_mp = pktgen_mbuf_pool_create("Sequence TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].seq_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Sequence TX mbufs", pid);

			/* Used for sending special packets like ARP requests */
			info->q[q].special_mp = pktgen_mbuf_pool_create("Special TX", pid, q, MAX_SPECIAL_MBUFS, sid, 0);
			if (info->q[q].special_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Special TX mbufs", pid);

			/* Setup the PCAP file for each port */
			if (pktgen.info[pid].pcap != NULL)
				if (pktgen_pcap_parse(pktgen.info[pid].pcap, info, q) == -1)
					pktgen_log_panic("Cannot load PCAP file for port %d", pid);
			/* Find out the link speed to program the WTHRESH value correctly. */
			pktgen_get_link_status(info, pid, 0);

			ret = rte_eth_tx_queue_setup(pid, q, pktgen.nb_txd, sid, &info->tx_conf);
			if (ret < 0)
				pktgen_log_panic("rte_eth_tx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
			pktgen_log_info("");
		}
		pktgen_log_info("%*sPort memory used = %6lu KB", 71, " ", (pktgen.mem_used + 1023) / 1024);
	}
	pktgen_log_info("%*sTotal memory used = %6lu KB", 70, " ", (pktgen.total_mem_used + 1023) / 1024);

	/* Start up the ports and display the port Link status */
	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		if (wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE) == 0)
			continue;

		info = wr_get_port_private(pktgen.l2p, pid);

		/* Start device */
		if ( (ret = rte_eth_dev_start(pid)) < 0)
			pktgen_log_panic("rte_eth_dev_start: port=%d, %s", pid, rte_strerror(-ret));

		pktgen_get_link_status(info, pid, 1);

		if (info->link.link_status)
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Up - speed %u Mbps - %s", pid,
			         (uint32_t)info->link.link_speed,
			         (info->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
			         ("full-duplex") : ("half-duplex"));
		else
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Down", pid);

		/* If enabled, put device in promiscuous mode. */
		if (pktgen.flags & PROMISCUOUS_ON_FLAG) {
			strncatf(output_buff, " <Enable promiscuous mode>");
			rte_eth_promiscuous_enable(pid);
		}

		pktgen_log_info("%s", output_buff);
		pktgen.info[pid].seq_pkt[SINGLE_PKT].pktSize = MIN_PKT_SIZE;

		/* Setup the port and packet defaults. (must be after link speed is found) */
		for (s = 0; s < NUM_TOTAL_PKTS; s++)
			pktgen_port_defaults(pid, s);

		pktgen_range_setup(info);

		rte_eth_stats_get(pid, &info->init_stats);

		pktgen_rnd_bits_init(&pktgen.info[pid].rnd_bitfields);
	}

	/* Clear the log information by putting a blank line */
	pktgen_log_info("");

	/* Setup the packet capture per port if needed. */
	for (sid = 0; sid < wr_coremap_cnt(pktgen.core_info, pktgen.core_cnt, 0); sid++)
		pktgen_packet_capture_init(&pktgen.capture[sid], sid);
}
Exemple #25
0
int
main(int argc, char **argv)
{
    //struct lcore_queue_conf *qconf = NULL;
    //struct rte_eth_dev_info dev_info;
    struct lcore_env** envs;
    int ret;
    uint8_t n_ports;
    unsigned lcore_count;

    ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
    argc -= ret;
    argv += ret;

    ret = l2sw_parse_args(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Invalid MARIO arguments\n");

    lcore_count = rte_lcore_count();
    n_ports = rte_eth_dev_count();
    //RTE_LOG(INFO, MARIO, "Find %u logical cores\n" , lcore_count);

    mbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE,
                                   32, sizeof(struct rte_pktmbuf_pool_private),
                                   rte_pktmbuf_pool_init, NULL,
                                   rte_pktmbuf_init, NULL,
                                   rte_socket_id(), 0);

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");

// init route_table
    route_table = create_route_table(ROUTE_ENTRY_SIZE);
    add_staticroute(route_table);

// init arp_table
    arp_table = create_arp_table(ARP_ENTRY_SIZE);



    n_ports = rte_eth_dev_count();
    if (n_ports == 0)
        rte_exit(EXIT_FAILURE, "No Ethernet ports - byte\n");
    //RTE_LOG(INFO, MARIO, "Find %u ethernet ports\n", n_ports);

    if (n_ports > RTE_MAX_ETHPORTS)
        n_ports = RTE_MAX_ETHPORTS;

    /* Each logical core is assigned a dedicated TX queue on each port. */
    /*
    for(uint8_t port_id = 0; port_id < n_ports; port_id++) {
      rte_eth_dev_info_get(port_id, &dev_info);
    }
    */
    /* Initialize the port/queue configuration of each logical core */
    /*
    for(uint8_t port_id = 0; port_id < n_ports; port_id++) {
      ;
    }
    */

    /* Initialize lcore_env */
    envs = (struct lcore_env**) rte_malloc(NULL,sizeof(struct lcore_env*),0);
    if (envs == NULL)
        rte_exit(EXIT_FAILURE, "Cannot allocate memory for core envs\n");

    uint8_t lcore_id;
    for (lcore_id = 0; lcore_id < lcore_count; lcore_id++) {
        struct lcore_env* env;
        env = (struct lcore_env*) rte_malloc(NULL,sizeof(struct lcore_env) +
                                             sizeof(struct mbuf_table) *n_ports,0);
        if (env == NULL)
            rte_exit(EXIT_FAILURE,
                     "Cannot allocate memory for %u core env\n", lcore_id);
        env->n_port = n_ports;
        env->lcore_id = lcore_id;
        memset(env->tx_mbufs, 0, sizeof(struct mbuf_table) * n_ports);
        envs[lcore_id] = env;
    }

    /* Initialise each port */
    uint8_t port_id;
    for(port_id = 0; port_id < n_ports; port_id++) {
        //RTE_LOG(INFO, MARIO, "Initializing port %u...", port_id);
        fflush(stdout);
        ret = rte_eth_dev_configure(port_id, lcore_count, lcore_count, &port_conf);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
                     ret, (unsigned)port_id);
        //RTE_LOG(INFO, MARIO, "done\n");
        rte_eth_macaddr_get(port_id, &port2eth[port_id]);

        /* init one RX queue */
        uint8_t core_id;
        for (core_id = 0; core_id < lcore_count; core_id++) {
            ret = rte_eth_rx_queue_setup(port_id, core_id, nb_rxd,
                                         rte_eth_dev_socket_id(port_id),
                                         NULL,
                                         mbuf_pool);
            if (ret < 0)
                rte_exit(EXIT_FAILURE,
                         "rte_eth_rx_queue_setup:err=%d, port=%u queue=%u\n",
                         ret, (unsigned) port_id, (unsigned) core_id);
        }

        /* init one TX queue */
        for (core_id = 0; core_id < lcore_count; core_id++) {
            ret = rte_eth_tx_queue_setup(port_id, core_id, nb_txd,
                                         rte_eth_dev_socket_id(port_id), NULL);
            if (ret < 0)
                rte_exit(EXIT_FAILURE,
                         "rte_eth_tx_queue_setup:err=%d, port=%u queue=%u\n",
                         ret, (unsigned) port_id, (unsigned) core_id);
        }

        /* Start device */
        ret = rte_eth_dev_start(port_id);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
                     ret, (unsigned) port_id);

        rte_eth_promiscuous_enable(port_id);

        /*RTE_LOG(INFO, MARIO,
                "Port %u, MAC address %02x:%02x:%02x:%02x:%02x:%02x\n\n",
                port_id,
                port2eth[port_id].addr_bytes[0],
                port2eth[port_id].addr_bytes[1],
                port2eth[port_id].addr_bytes[2],
                port2eth[port_id].addr_bytes[3],
                port2eth[port_id].addr_bytes[4],
                port2eth[port_id].addr_bytes[5]);
        */
        memset(&port_statistics, 0, sizeof(port_statistics));
    }

    check_all_ports_link_status(n_ports);


    /* launch per-lcore init on every lcore */
    rte_eal_mp_remote_launch(l2sw_launch_one_lcore, envs, CALL_MASTER);
    {
        uint8_t lcore_id;
        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
            if (rte_eal_wait_lcore(lcore_id) < 0)
                return -1;
        }
    }

    rte_free(arp_table);
    rte_free(route_table);

    return 0;
}
Exemple #26
0
int
test_distributor(void)
{
	static struct rte_distributor *d;
	static struct rte_mempool *p;

	if (rte_lcore_count() < 2) {
		printf("ERROR: not enough cores to test distributor\n");
		return -1;
	}

	if (d == NULL) {
		d = rte_distributor_create("Test_distributor", rte_socket_id(),
				rte_lcore_count() - 1);
		if (d == NULL) {
			printf("Error creating distributor\n");
			return -1;
		}
	} else {
		rte_distributor_flush(d);
		rte_distributor_clear_returns(d);
	}

	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
	if (p == NULL) {
		p = rte_mempool_create("DT_MBUF_POOL", nb_bufs,
				MBUF_SIZE, BURST,
				sizeof(struct rte_pktmbuf_pool_private),
				rte_pktmbuf_pool_init, NULL,
				rte_pktmbuf_init, NULL,
				rte_socket_id(), 0);
		if (p == NULL) {
			printf("Error creating mempool\n");
			return -1;
		}
	}

	rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
	if (sanity_test(d, p) < 0)
		goto err;
	quit_workers(d, p);

	rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER);
	if (sanity_test_with_mbuf_alloc(d, p) < 0)
		goto err;
	quit_workers(d, p);

	if (rte_lcore_count() > 2) {
		rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
				SKIP_MASTER);
		if (sanity_test_with_worker_shutdown(d, p) < 0)
			goto err;
		quit_workers(d, p);

		rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
				SKIP_MASTER);
		if (test_flush_with_worker_shutdown(d, p) < 0)
			goto err;
		quit_workers(d, p);

	} else {
		printf("Not enough cores to run tests for worker shutdown\n");
	}

	if (test_error_distributor_create_numworkers() == -1 ||
			test_error_distributor_create_name() == -1) {
		printf("rte_distributor_create parameter check tests failed");
		return -1;
	}

	return 0;

err:
	quit_workers(d, p);
	return -1;
}
static struct vr_usocket *
usock_alloc(unsigned short proto, unsigned short type)
{
    int sock_fd = -1, domain, ret;
    /* socket TX buffer size = (hold flow table entries * size of jumbo frame) */
    int setsocksndbuff = vr_flow_hold_limit * VR_DPDK_MAX_PACKET_SZ;
    int getsocksndbuff;
    socklen_t getsocksndbufflen = sizeof(getsocksndbuff);
    int error = 0, flags;
    unsigned int buf_len;
    struct vr_usocket *usockp = NULL, *child;
    bool is_socket = true;
    unsigned short sock_type;

    RTE_SET_USED(child);

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: proto %u type %u\n", __func__,
                pthread_self(), proto, type);
    switch (type) {
    case TCP:
        domain = AF_INET;
        sock_type = SOCK_STREAM;
        break;

    case UNIX:
    case RAW:
        domain = AF_UNIX;
        sock_type = SOCK_DGRAM;
        break;

    default:
        return NULL;
    }

    if (proto == EVENT) {
        is_socket = false;
        sock_fd = eventfd(0, 0);
        RTE_LOG(DEBUG, USOCK, "%s[%lx]: new event FD %d\n", __func__,
                pthread_self(), sock_fd);
        if (sock_fd < 0)
            return NULL;
    }

    if (is_socket) {
        sock_fd = socket(domain, sock_type, 0);
        RTE_LOG(INFO, USOCK, "%s[%lx]: new socket FD %d\n", __func__,
                pthread_self(), sock_fd);
        if (sock_fd < 0)
            return NULL;

        /* set socket send buffer size */
        ret = setsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &setsocksndbuff,
                         sizeof(setsocksndbuff));
        if (ret == 0) {
            /* check if setting buffer succeeded */
            ret = getsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &getsocksndbuff,
                             &getsocksndbufflen);
            if (ret == 0) {
                if (getsocksndbuff >= setsocksndbuff) {
                    RTE_LOG(INFO, USOCK, "%s[%lx]: setting socket FD %d send buff size.\n"
                            "Buffer size set to %d (requested %d)\n", __func__,
                            pthread_self(), sock_fd, getsocksndbuff, setsocksndbuff);
                } else { /* set other than requested */
                    RTE_LOG(ERR, USOCK, "%s[%lx]: setting socket FD %d send buff size failed.\n"
                            "Buffer size set to %d (requested %d)\n", __func__,
                            pthread_self(), sock_fd, getsocksndbuff, setsocksndbuff);
                }
            } else { /* requesting buffer size failed */
                RTE_LOG(ERR, USOCK, "%s[%lx]: getting socket FD %d send buff size failed (%d)\n",
                         __func__, pthread_self(), sock_fd, errno);
            }
        } else { /* setting buffer size failed */
            RTE_LOG(ERR, USOCK, "%s[%lx]: setting socket FD %d send buff size %d failed (%d)\n",
                     __func__, pthread_self(), sock_fd, setsocksndbuff, errno);
        }
    }

    usockp = vr_zalloc(sizeof(*usockp), VR_USOCK_OBJECT);
    if (!usockp)
        goto error_exit;

    usockp->usock_type = type;
    usockp->usock_proto = proto;
    usockp->usock_fd = sock_fd;
    usockp->usock_state = INITED;

    if (is_socket) {
        error = vr_usocket_bind(usockp);
        if (error < 0)
            goto error_exit;

        if (usockp->usock_proto == PACKET) {
            error = vr_usocket_connect(usockp);
            if (error < 0)
                goto error_exit;
        }
    }

    switch (proto) {
    case NETLINK:
        usockp->usock_max_cfds = USOCK_MAX_CHILD_FDS;
        buf_len = 0;
        break;

    case PACKET:
        usockp->usock_max_cfds = USOCK_MAX_CHILD_FDS;
        buf_len = 0;
        break;

    case EVENT:
        /* TODO: we don't need the buf since we use stack to send an event */
        buf_len = USOCK_EVENT_BUF_LEN;
        break;

    default:
        buf_len = 0;
        break;
    }

    if (buf_len) {
        usockp->usock_rx_buf = vr_zalloc(buf_len, VR_USOCK_BUF_OBJECT);
        if (!usockp->usock_rx_buf)
            goto error_exit;

        usockp->usock_buf_len = buf_len;
        usock_read_init(usockp);
    }

    if (proto == PACKET) {
        usockp->usock_mbuf_pool = rte_mempool_lookup("packet_mbuf_pool");
        if (!usockp->usock_mbuf_pool) {
            usockp->usock_mbuf_pool = rte_mempool_create("packet_mbuf_pool",
                    PKT0_MBUF_POOL_SIZE, PKT0_MBUF_PACKET_SIZE,
                    PKT0_MBUF_POOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private),
                    vr_dpdk_pktmbuf_pool_init, NULL, vr_dpdk_pktmbuf_init, NULL,
                    rte_socket_id(), 0);
            if (!usockp->usock_mbuf_pool)
                goto error_exit;
        }

        usockp->usock_iovec = vr_zalloc(sizeof(struct iovec) *
                PKT0_MAX_IOV_LEN, VR_USOCK_IOVEC_OBJECT);
        if (!usockp->usock_iovec)
            goto error_exit;

        usock_read_init(usockp);
    }

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d F_GETFL\n", __func__, pthread_self(),
                usockp->usock_fd);
    flags = fcntl(usockp->usock_fd, F_GETFL);
    if (flags == -1)
        goto error_exit;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d F_SETFL\n", __func__, pthread_self(),
                usockp->usock_fd);
    error = fcntl(usockp->usock_fd, F_SETFL, flags | O_NONBLOCK);
    if (error == -1)
        goto error_exit;

    usockp->usock_poll_block = 1;

    return usockp;

error_exit:

    error = errno;
    if (sock_fd >= 0) {
        close(sock_fd);
        sock_fd = -1;
    }

    usock_close(usockp);
    usockp = NULL;
    errno = error;

    return usockp;
}
Exemple #28
0
			fprintf(stderr, "packet get payload failed!\n");
			continue;
		}
		ret = dpi_engine_exec(payload, len);
#endif
	}
}

static int fwd_loop(__attribute__((__unused__))void *arg)
{
	int lcoreid, portid, queueid;
	int nb_rx, nb_tx;
	int pl_len, i;
	lcoreid = rte_lcore_id();
	pl_len = lcore_args[lcoreid].pl_len;
	printf("lcore %d from socket %d\n", rte_lcore_id(), rte_socket_id());
    for(i = 0; i < pl_len; i++)
	{
		portid = lcore_args[lcoreid].pl[i].portid;
		queueid = lcore_args[lcoreid].pl[i].queueid;
		printf("lcore %d enter loop port %d queue %d\n", lcoreid, portid, queueid);
	}
#ifdef EXEC_MBUF_PA_CNT
	lcore_args[lcoreid].pa_ht = pacnt_hash_create(NB_MBUF << 1);
	if(lcore_args[lcoreid].pa_ht == NULL)
	{
		rte_exit(EINVAL, "pacnt hash created failed on lcore %d\n", lcoreid);
	}
#endif
	while(!is_stop)
	{
/* must be called per process */
int ipaugenblick_app_init(int argc,char **argv,char *app_unique_id)
{
    int i;
    char ringname[1024];

    openlog(NULL, 0, LOG_USER);

    if(rte_eal_init(argc, argv) < 0) {
        syslog(LOG_ERR,"cannot initialize rte_eal");
	return -1;
    }
    syslog(LOG_INFO,"EAL initialized\n");

    free_clients_ring = rte_ring_lookup(FREE_CLIENTS_RING);
    if(!free_clients_ring) {
        syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__);
        exit(0);
    }

    free_connections_ring = rte_ring_lookup(FREE_CONNECTIONS_RING);

    if(!free_connections_ring) {
        syslog(LOG_ERR,"cannot find free connections ring\n");
        return -1;
    }

    free_connections_pool = rte_mempool_lookup(FREE_CONNECTIONS_POOL_NAME);

    if(!free_connections_pool) {
        syslog(LOG_ERR,"cannot find free connections pool\n");
        return -1;
    }

    memset(local_socket_descriptors,0,sizeof(local_socket_descriptors));
    for(i = 0;i < IPAUGENBLICK_CONNECTION_POOL_SIZE;i++) {
        sprintf(ringname,RX_RING_NAME_BASE"%d",i);
        local_socket_descriptors[i].rx_ring = rte_ring_lookup(ringname);
        if(!local_socket_descriptors[i].rx_ring) {
            syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
            exit(0);
        }
        sprintf(ringname,TX_RING_NAME_BASE"%d",i);
        local_socket_descriptors[i].tx_ring = rte_ring_lookup(ringname);
        if(!local_socket_descriptors[i].tx_ring) {
            syslog(LOG_ERR,"%s %d\n",__FILE__,__LINE__);
            exit(0);
        }
        local_socket_descriptors[i].select = -1;
        local_socket_descriptors[i].socket = NULL;
        sprintf(ringname,"lrxcache%s_%d",app_unique_id,i);
        syslog(LOG_DEBUG,"local cache name %s\n",ringname);
        local_socket_descriptors[i].local_cache = rte_ring_create(ringname, 16384,rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ);
        if(!local_socket_descriptors[i].local_cache) {
           syslog(LOG_WARNING,"cannot create local cache\n");
	   local_socket_descriptors[i].local_cache = rte_ring_lookup(ringname);
	   if(!local_socket_descriptors[i].local_cache) {
		syslog(LOG_ERR,"and cannot find\n");
		exit(0);
	   } 
        }
	local_socket_descriptors[i].any_event_received = 0;
    }
    tx_bufs_pool = rte_mempool_lookup("mbufs_mempool");
    if(!tx_bufs_pool) {
        syslog(LOG_ERR,"cannot find tx bufs pool\n");
        return -1;
    }
    
    free_command_pool = rte_mempool_lookup(FREE_COMMAND_POOL_NAME);
    if(!free_command_pool) {
        syslog(LOG_ERR,"cannot find free command pool\n");
        return -1;
    }
    
    command_ring = rte_ring_lookup(COMMAND_RING_NAME);
    if(!command_ring) {
        syslog(LOG_ERR,"cannot find command ring\n");
        return -1;
    }
    rx_bufs_ring = rte_ring_lookup("rx_mbufs_ring");
    if(!rx_bufs_ring) {
        syslog(LOG_ERR,"cannot find rx bufs ring\n");
        return -1;
    }
    selectors_ring = rte_ring_lookup(SELECTOR_RING_NAME);
    
    for(i = 0;i < IPAUGENBLICK_SELECTOR_POOL_SIZE;i++) {
        sprintf(ringname,"SELECTOR_RING_NAME%d",i);
        selectors[i].ready_connections = rte_ring_lookup(ringname);
        if(!selectors[i].ready_connections) {
            syslog(LOG_ERR,"cannot find ring %s %d\n",__FILE__,__LINE__);
            exit(0);
        } 
    }
    
    signal(SIGHUP, sig_handler);
    signal(SIGINT, sig_handler);
    signal(SIGILL, sig_handler);
    signal(SIGABRT, sig_handler);
    signal(SIGFPE, sig_handler);
    signal(SIGFPE, sig_handler);
    signal(SIGSEGV, sig_handler);
    signal(SIGTERM, sig_handler);
    signal(SIGUSR1, sig_handler);
//    pthread_create(&stats_thread,NULL,print_stats,NULL);
    return ((tx_bufs_pool == NULL)||(command_ring == NULL)||(free_command_pool == NULL));
}
Exemple #30
0
static int configure_eth_port(int32_t id) {

	struct port_configure* pconf = &ports_conf[id];

	struct ether_addr* addr = &pconf->addr;

	const uint16_t rxRings = 1, txRings = 1;
	const uint8_t nb_ports = rte_eth_dev_count();
	int ret;
	uint16_t q;
	int32_t port_id = pconf->portid;
	char buf[1024];
	pconf->out_buf.count = 0;

	if (port_id > nb_ports)
		return -1;

	ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default);

	if (ret != 0)
		return ret;

	for (q = 0; q < rxRings; q++) {
		ret = rte_eth_rx_queue_setup(port_id, q, RX_DESC_PER_QUEUE,
				rte_eth_dev_socket_id(port_id), NULL, mbuf_pool);
		if (ret < 0)
			return ret;
	}

	for (q = 0; q < txRings; q++) {
		ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE,
				rte_eth_dev_socket_id(port_id), NULL);
		if (ret < 0)
			return ret;
	}

	snprintf(buf, sizeof(buf), "ring_in_%d", port_id);
	pconf->ring_in = rte_ring_create(buf, RING_SIZE, rte_socket_id(),
	RING_F_SP_ENQ);
	if (pconf->ring_in == NULL) {
		return -1;
	}
	snprintf(buf, sizeof(buf), "ring_out_%d", port_id);
	pconf->ring_out = rte_ring_create(buf, RING_SIZE, rte_socket_id(),
	RING_F_SC_DEQ);
	if (pconf->ring_out == NULL) {
		return -1;
	}

	ret = rte_eth_dev_start(port_id);
	if (ret < 0)
		return ret;

	rte_eth_macaddr_get(port_id, addr);
	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
	" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned) port_id,
			addr->addr_bytes[0], addr->addr_bytes[1], addr->addr_bytes[2],
			addr->addr_bytes[3], addr->addr_bytes[4], addr->addr_bytes[5]);

	rte_eth_promiscuous_enable(port_id);

	return 0;
}