Example #1
0
static int file_read_cached(const char *file_name, uint8_t **mem, uint32_t beg, uint32_t len, uint32_t socket, struct hash_set *hs)
{
	if (len == 0) {
		*mem = 0;
		return 0;
	}

	uint8_t *data_mem;

	/* Since the configuration can reference the same file from
	   multiple places, use prox_shared infrastructure to detect
	   this and return previously loaded data. */
	char name[256];

	snprintf(name, sizeof(name), "%u-%u:%s", beg, len, file_name);
	*mem = prox_sh_find_socket(socket, name);
	if (*mem)
		return 0;

	/* check if the file has been loaded on the other socket. */
	if (socket == 1 && (data_mem = prox_sh_find_socket(0, name))) {
		uint8_t *data_find = hash_set_find(hs, data_mem, len);
		if (!data_find) {
			data_find = prox_zmalloc(len, socket);
			PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len);

			rte_memcpy(data_find, data_mem, len);
			hash_set_add(hs, data_find, len);
		}
		*mem = data_find;
		prox_sh_add_socket(socket, name, *mem);
		return 0;
	}

	/* It is possible that a file with a different name contains
	   the same data. In that case, search all loaded files and
	   compare the data to reduce memory utilization.*/
	data_mem = malloc(len);
	PROX_PANIC(data_mem == NULL, "Failed to allocate temporary memory to hold data\n");

	if (file_read_content(file_name, data_mem, beg, len)) {
		plog_err("%s\n", file_get_error());
		return -1;
	}

	uint8_t *data_find = hash_set_find(hs, data_mem, len);
	if (!data_find) {
		data_find = prox_zmalloc(len, socket);
		PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len);

		rte_memcpy(data_find, data_mem, len);
		hash_set_add(hs, data_find, len);
	}

	free(data_mem);

	*mem = data_find;
	prox_sh_add_socket(socket, name, *mem);
	return 0;
}
struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ)
{
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
	struct qinq_gre_map *ret = prox_sh_find_socket(socket_id, "qinq_gre_map");

	if (!ret) {
		PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n");
		int rv = lua_to_qinq_gre_map(prox_lua(), GLOBAL, targ->user_table, socket_id, &ret);
		PROX_PANIC(rv, "Error reading mapping between qinq and gre from qinq_gre_map: \n%s\n",
			   get_lua_to_errors());
		prox_sh_add_socket(socket_id, "qinq_gre_map", ret);
	}
	return ret;
}
static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
{
	struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
	int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	task->qinq_tag = targ->qinq_tag;
	task->cpe_table = targ->cpe_table;
	task->cpe_timeout = rte_get_tsc_hz()/1000*targ->cpe_table_timeout_ms;

	if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
		PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
		fill_table(targ, task->cpe_table);
	}

#ifdef ENABLE_EXTRA_USER_STATISTICS
	task->n_users = targ->n_users;
	task->stats_per_user = rte_zmalloc_socket(NULL, targ->n_users * sizeof(uint32_t),
						  RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(targ->lconf->id));
#endif
	if (targ->runtime_flags & TASK_CLASSIFY) {
		PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
		task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
		if (!task->dscp) {
			int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
			PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
				   get_lua_to_errors());
			prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
		}
	}

	task->runtime_flags = targ->runtime_flags;

	for (uint32_t i = 0; i < 64; ++i) {
		task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
	}

	targ->lconf->ctrl_timeout = rte_get_tsc_hz()/targ->ctrl_freq;
	targ->lconf->ctrl_func_m[targ->task] = arp_msg;

	/* TODO: check if it is not necessary to limit reverse mapping
	   for the elements that have been changing in mapping? */

	for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
		task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
	}

	/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
}
Example #4
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id);

	plogx_info("n_listen = %d\n", n_listen);

	struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 1, hash_set
		   will grow if full. */
		hs = hash_set_create(n_listen, socket_id);
		prox_sh_add_socket(socket_id, "genl4_streams", hs);
	}

	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id);
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";

	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn * 2, socket_id);
	task->seed = rte_rdtsc();

	/* TODO: calculate the CDF of the reply distribution and the
	   number of replies as the number to cover for 99% of the
	   replies. For now, assume that this is number is 2. */
	uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2);

	PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n");
	task->fqueue = fqueue_create(queue_size, socket_id);
	PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n");

	uint32_t n_descriptors;

	if (targ->nb_txports) {
		PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n");
		n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd;
	} else {
		PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n");
		n_descriptors = 256;
	}

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	uint32_t i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);

	struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 8, hash_set
		   will grow if full. */
		hs = hash_set_create(n_bundle_cfgs * 8, socket);
		prox_sh_add_socket(socket, "genl4_streams", hs);
	}

	task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;

	uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket);
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		occur[i] = imix;
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}

	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n");

	task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate;

	static char name2[] = "task_gen_hash";
	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
	/* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */

	/* To avoid overflowing the tx descriptors, the token bucket
	   size needs to be limited. The descriptors are filled most
	   quickly with the smallest packets. For that reason, the
	   token bucket size is given by "number of tx descriptors" *
	   "smallest Ethernet packet". */
	PROX_ASSERT(targ->nb_txports == 1);

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void start_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);

	task->new_conn_tokens = 0;
	task->new_conn_last_tsc = rte_rdtsc();
}

static void stop_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	struct bundle_ctx *bundle;

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}
}

static void start_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);
}

static void stop_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct bundle_ctx *bundle;
	uint8_t out[MAX_PKT_BURST];

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}

	if (task->cancelled) {
		struct rte_mbuf *mbuf = task->mbuf_saved;

		out[0] = OUT_DISCARD;
		task->cancelled = 0;
		task->base.tx_pkt(&task->base, &mbuf, 1, out);
	}

	do {
		if (task->cur_mbufs_beg == task->cur_mbufs_end) {
			task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
			task->cur_mbufs_beg = 0;
			if (task->cur_mbufs_end == 0)
				break;
		}
		uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
		struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

		if (n_pkts) {
			for (uint16_t j = 0; j < n_pkts; ++j) {
				out[j] = OUT_DISCARD;
			}
			task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
		}
	} while (1);
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.start = start_task_gen_server,
	.stop = stop_task_gen_server,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.start = start_task_gen_client,
	.stop = stop_task_gen_client,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}