Beispiel #1
0
/* open_ll_socket */
ll_socket_t *open_ll_socket
	(	const bool is_transmitter, const int tx_delay,
		const char* ll_if_name, const int ll_sap,
		const int frame_type	)
{

	// 1) create RAW socket
	ll_socket_t *ll_socket = init_ll_socket
			(is_transmitter, tx_delay, ll_if_name, ll_sap, frame_type);
printf("volvo de init_ll_socket\n");
//print_eth_address(ll_socket->if_mac);
	#ifdef KERNEL_RING

	// 2) initialize rings for frames tx+rx
	if ( init_rings(ll_socket) < 0 )
		{ handle_app_error("Could not initialize TX/RX rings.\n"); }
	log_app_msg("IO rings iniatialized.\n");

	#endif
	
	// 3) bind RAW socket	

	if ( bind_ll_socket(ll_socket,is_transmitter) < 0 )
		{ handle_sys_error("Could not bind socket"); }

	log_app_msg("ll_socket bound, ll_sap = %d.\n", ll_socket->ll_sap);

	return(ll_socket);

}
Beispiel #2
0
/* Initialize cores and allocate mempools */
static void init_lcores(void)
{
	struct lcore_cfg *lconf = 0;
	uint32_t lcore_id = -1;

	while(prox_core_next(&lcore_id, 0) == 0) {
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);
		PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
	}

	/* need to allocate mempools as the first thing to use the lowest possible address range */
	plog_info("=== Initializing mempools ===\n");
	setup_mempools();

	lcore_cfg_alloc_hp();

	set_dest_threads();
	set_task_lconf();

	plog_info("=== Initializing port addresses ===\n");
	init_port_addr();

	plog_info("=== Initializing queue numbers on cores ===\n");
	configure_if_queues();

	plog_info("=== Initializing rings on cores ===\n");
	init_rings();

	plog_info("=== Checking configuration consistency ===\n");
	check_cfg_consistent();

	plog_all_rings();

	setup_all_task_structs_early_init();
	plog_info("=== Initializing tasks ===\n");
	setup_all_task_structs();
}
Beispiel #3
0
/* Initialize cores and allocate mempools */
static void init_lcores(void)
{
	char name[64];
	struct lcore_cfg *lconf = 0;
	static uint8_t *worker_thread_table[MAX_SOCKETS] = {0};
	static uint16_t *user_table[MAX_SOCKETS] = {0};
	struct rte_lpm *ipv4_lpm[MAX_SOCKETS] = {0};
	struct rte_hash *qinq_to_gre_lookup[MAX_SOCKETS] = {0};
	struct next_hop_struct *next_hop[MAX_SOCKETS] = {0};

	/* need to allocate mempools as the first thing to use the lowest possible address range */
	setup_mempools(lcore_cfg_init);

	lcore_cfg = rte_zmalloc_socket("lcore_cfg_hp", RTE_MAX_LCORE * sizeof(struct lcore_cfg), CACHE_LINE_SIZE, rte_socket_id());
	TGEN_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n");
	rte_memcpy(lcore_cfg, lcore_cfg_init, RTE_MAX_LCORE * sizeof(struct lcore_cfg));

	init_lcore_info();
	check_no_mode_core();

	mprintf("=== Initializing rings on cores ===\n");
	init_rings();

	for (uint8_t socket_id = 0; socket_id < MAX_SOCKETS; ++socket_id) {
		uint16_t data_structs_flags = data_structs_needed(lconf, socket_id);
		if (data_structs_flags & DATA_STRUCTS_NEED_WT_TABLE) {
			worker_thread_table[socket_id] = rte_zmalloc_socket(NULL , 0x1000000, CACHE_LINE_SIZE, socket_id);
			TGEN_PANIC(worker_thread_table == NULL, "Error creating worker thread table");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_GRE_TABLE) {
			mprintf("=== user <-> QinQ table configuration ===\n");
			qinq_to_gre_lookup[socket_id] = read_gre_table_config(config_path, "gre_table.cfg", worker_thread_table[socket_id], lb_nb_txrings, socket_id);
			TGEN_PANIC(NULL == qinq_to_gre_lookup[socket_id], "Failed to allocate qinq to gre lookup table\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_USER_TABLE) {
			mprintf("=== User table configuration ===\n");
			user_table[socket_id] = read_user_table_config(config_path, "user_table.cfg", &qinq_to_gre_lookup[socket_id], socket_id);
			TGEN_PANIC(NULL == user_table[socket_id], "Failed to allocate user lookup table\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_NEXT_HOP) {
			mprintf("=== Next hop configuration ===\n");
			next_hop[socket_id] = read_next_hop_config(config_path, "next_hop.cfg", &tgen_used_port_mask, socket_id);
			init_routing_ports();
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V4) {
			mprintf("=== IPv4 routing configuration ===\n");
			ipv4_lpm[socket_id] = read_lpm_v4_config(config_path, "ipv4.cfg", socket_id);
			TGEN_PANIC(NULL == ipv4_lpm[socket_id], "Failed to allocate IPv4 LPM\n");
		}

		if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V6) {
			mprintf("=== IPv6 routing configuration ===\n");
			read_lpm_v6_config(config_path, "ipv6.cfg", socket_id);
		}
	}

	check_consistent_cfg();

	mprintf("=== Initializing tables, mempools and queue numbers on cores ===\n");
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		lconf = &lcore_cfg[lcore_id];
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);

		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];

			if (QOS == startup_cfg->mode) {
				rte_snprintf(name, sizeof(name), "qos_sched_port_%u_%u", lcore_id, task_id);

				startup_cfg->qos_conf.port_params.name = name;
				startup_cfg->qos_conf.port_params.socket = socket;
				startup_cfg->qos_conf.port_params.rate = TEN_GIGABIT;
				startup_cfg->sched_port = rte_sched_port_config(&startup_cfg->qos_conf.port_params);

				TGEN_PANIC(startup_cfg->sched_port == NULL, "failed to create sched_port");

				mprintf("number of pipes: %d\n\n", startup_cfg->qos_conf.port_params.n_pipes_per_subport);
				int err = rte_sched_subport_config(startup_cfg->sched_port, 0, startup_cfg->qos_conf.subport_params);
				TGEN_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err);

				/* only single subport and single pipe profile is supported */
				for (uint32_t pipe = 0; pipe < startup_cfg->qos_conf.port_params.n_pipes_per_subport; ++pipe) {
					err = rte_sched_pipe_config(startup_cfg->sched_port, 0 , pipe, 0);
					TGEN_PANIC(err != 0, "failed setting up sched port pipe, error: %d", err);
				}
			}
			if (LB_QINQ == startup_cfg->mode) {
				startup_cfg->worker_thread_table = worker_thread_table[rte_socket_id()];
			}
			if (QINQ_DECAP_ARP == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				startup_cfg->qinq_gre = qinq_to_gre_lookup[rte_socket_id()];
			}
			if (QOS == startup_cfg->mode || CLASSIFY == startup_cfg->mode || QINQ_DECAP_V6 == startup_cfg->mode) {
				startup_cfg->user_table = user_table[rte_socket_id()];
			}
			if (ROUTING == startup_cfg->mode || FWD == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				startup_cfg->next_hop = next_hop[rte_socket_id()];
			}
			if (QINQ_DECAP_V4 == startup_cfg->mode || FWD == startup_cfg->mode || ROUTING == startup_cfg->mode) {
				startup_cfg->ipv4_lpm = ipv4_lpm[rte_socket_id()];
			}

		}

		mprintf("\t*** Initializing core %u ***\n", lcore_id);
		if (lconf->flags & PCFG_CPETABLEv4) {
			sprintf(name, "core_%u_CPEv4Table", lcore_id);

			uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads;
			if (!rte_is_power_of_2(table_part)) {
				table_part = rte_align32pow2(table_part) >> 1;
			}

			struct rte_hash_parameters hash_params = {
				.name = name,
				.entries = MAX_GRE / table_part,
				.bucket_entries = GRE_BUCKET_ENTRIES,
				.key_len = sizeof(struct hash_gre_struct),
				.entry_len = sizeof(struct cpe_table_hash_entry),
				.hash_func_init_val = 0,
				.socket_id = socket
			};
			lconf->cpe_v4_table = rte_hash_ext_create(&hash_params);
			TGEN_PANIC(lconf->cpe_v4_table == NULL, "Unable to allocate memory for IPv4 hash table on core %u\n", lcore_id);

			/* set all entries to expire at MAX_TSC (i.e. never) so that we don't waste cycles at startup going through all the empty entries */
			setup_arp_entries(lconf->cpe_v4_table);

			/* for locality, copy the pointer to the port structure where it is needed at packet handling time */
			for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
				if (lconf->startup_cfg[task_id].flags & PORT_STARTUP_CPEv4) {
					lconf->startup_cfg[task_id].cpe_table = lconf->cpe_v4_table;
				}
			}
		}

		if (lconf->flags & PCFG_CPETABLEv6) {
			sprintf(name, "core_%u_CPEv6Table", lcore_id);

			uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads;
			if (!rte_is_power_of_2(table_part)) {
				table_part = rte_align32pow2(table_part) >> 1;
			}