static struct cpe_table_data *read_cpe_table_config(const char *name, uint8_t socket)
{
	struct lua_State *L = prox_lua();
	struct cpe_table_data *ret = NULL;

	lua_getglobal(L, name);
	PROX_PANIC(lua_isnil(L, -1), "Coudn't find cpe_table data\n");

	return ret;
}
示例#2
0
static int lua_to_bundle_cfg(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct bundle_cfg *bundle)
{
	int pop, pop2, idx;

	if ((pop = lua_getfrom(L, from, name)) < 0)
		return -1;

	if (!lua_istable(L, -1))
		return -1;

	if (lua_to_host_set(L, TABLE, "clients", &bundle->clients))
		return -1;

	/* Read streams array */
	if ((pop2 = lua_getfrom(L, TABLE, "streams")) < 0)
		return -1;

	if (!lua_istable(L, -1))
		return -1;

	lua_len(prox_lua(), -1);
	bundle->n_stream_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);

	if (bundle->n_stream_cfgs >= sizeof(bundle->stream_cfgs)/sizeof(bundle->stream_cfgs[0]))
		return -1;
	plogx_dbg("loading bundle cfg with %d streams\n", bundle->n_stream_cfgs);
	idx = 0;
	lua_pushnil(L);
	while (lua_next(L, -2)) {
		if (lua_to_stream_cfg(L, STACK, NULL, socket, &bundle->stream_cfgs[idx]))
			return -1;

		++idx;
		lua_pop(L, 1);
	}
	lua_pop(L, pop2);

	lua_pop(L, pop);
	return 0;
}
示例#3
0
static int lua_to_bundle_cfg(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct bundle_cfg *bundle, struct hash_set *hs)
{
	int pop, pop2, idx;
	int clients_loaded = 0;

	if ((pop = lua_getfrom(L, from, name)) < 0)
		return -1;

	if (!lua_istable(L, -1))
		return -1;

	lua_len(prox_lua(), -1);
	bundle->n_stream_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);

	bundle->stream_cfgs = prox_zmalloc(sizeof(*bundle->stream_cfgs) * bundle->n_stream_cfgs, socket);

	plogx_dbg("loading bundle cfg with %d streams\n", bundle->n_stream_cfgs);
	idx = 0;
	lua_pushnil(L);
	while (lua_next(L, -2)) {
		if (!clients_loaded) {
			if (lua_to_host_set(L, TABLE, "clients", &bundle->clients)) {
				return -1;
			}
			clients_loaded = 1;
		}
		if (lua_to_stream_cfg(L, STACK, NULL, socket, &bundle->stream_cfgs[idx], hs)) {
			return -1;
		}

		++idx;
		lua_pop(L, 1);
	}

	lua_pop(L, pop);
	return 0;
}
struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ)
{
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
	struct qinq_gre_map *ret = prox_sh_find_socket(socket_id, "qinq_gre_map");

	if (!ret) {
		PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n");
		int rv = lua_to_qinq_gre_map(prox_lua(), GLOBAL, targ->user_table, socket_id, &ret);
		PROX_PANIC(rv, "Error reading mapping between qinq and gre from qinq_gre_map: \n%s\n",
			   get_lua_to_errors());
		prox_sh_add_socket(socket_id, "qinq_gre_map", ret);
	}
	return ret;
}
static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ)
{
	struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase);
	int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	task->qinq_tag = targ->qinq_tag;
	task->cpe_table = targ->cpe_table;
	task->cpe_timeout = rte_get_tsc_hz()/1000*targ->cpe_table_timeout_ms;

	if (!strcmp(targ->task_init->sub_mode_str, "pe")) {
		PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n");
		fill_table(targ, task->cpe_table);
	}

#ifdef ENABLE_EXTRA_USER_STATISTICS
	task->n_users = targ->n_users;
	task->stats_per_user = rte_zmalloc_socket(NULL, targ->n_users * sizeof(uint32_t),
						  RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(targ->lconf->id));
#endif
	if (targ->runtime_flags & TASK_CLASSIFY) {
		PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n");
		task->dscp = prox_sh_find_socket(socket_id, targ->dscp);
		if (!task->dscp) {
			int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp);
			PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n",
				   get_lua_to_errors());
			prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
		}
	}

	task->runtime_flags = targ->runtime_flags;

	for (uint32_t i = 0; i < 64; ++i) {
		task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf));
	}

	targ->lconf->ctrl_timeout = rte_get_tsc_hz()/targ->ctrl_freq;
	targ->lconf->ctrl_func_m[targ->task] = arp_msg;

	/* TODO: check if it is not necessary to limit reverse mapping
	   for the elements that have been changing in mapping? */

	for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) {
		task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr;
	}

	/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
}
static void fill_table(struct task_args *targ, struct rte_table_hash *table)
{
	struct cpe_table_data *cpe_table_data;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
	int ret = lua_to_cpe_table_data(prox_lua(), GLOBAL, targ->cpe_table_name, socket_id, &cpe_table_data);
	const uint8_t n_slaves = targ->nb_slave_threads;
	const uint8_t worker_id = targ->worker_thread_id;

	for (uint32_t i = 0; i < cpe_table_data->n_entries; ++i) {
		if (rte_bswap32(cpe_table_data->entries[i].ip) % n_slaves != worker_id) {
			continue;
		}
		struct cpe_table_entry *entry = &cpe_table_data->entries[i];

		uint32_t port_idx = prox_cfg.cpe_table_ports[entry->port_idx];
		PROX_PANIC(targ->mapping[port_idx] == 255, "Error reading cpe table: Mapping for port %d is missing", port_idx);

		struct cpe_key key = {
			.ip = entry->ip,
			.gre_id = entry->gre_id,
		};

		struct cpe_data data = {
			.qinq_svlan = entry->svlan,
			.qinq_cvlan = entry->cvlan,
			.user = entry->user,
			.mac_port = {
				.mac = entry->eth_addr,
				.out_idx = targ->mapping[port_idx],
			},
			.tsc = UINT64_MAX,
		};

		int key_found;
		void* entry_in_hash;
		rte_table_hash_key8_ext_dosig_ops.f_add(table, &key, &data, &key_found, &entry_in_hash);
	}
}
示例#7
0
static int parse_single_var(char *val, size_t len, const char *name)
{
	struct var *match;

	match = var_lookup(name);
	if (match) {
		if (strlen(match->val) + 1 > len) {
			set_errf("Variables '%s' with value '%s' is too long\n",
				 match->name, match->val);
			return -1;
		}
		strncpy(val, match->val, len);
		return 0;
	}
	else {
		/* name + 1 to skip leading '$' */
		if (lua_to_string(prox_lua(), GLOBAL, name + 1, val, len) >= 0)
			return 0;
	}

	set_errf("Variable '%s' not defined!", name);
	return 1;
}
示例#8
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id);

	plogx_info("n_listen = %d\n", n_listen);

	struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 1, hash_set
		   will grow if full. */
		hs = hash_set_create(n_listen, socket_id);
		prox_sh_add_socket(socket_id, "genl4_streams", hs);
	}

	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id);
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";

	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn * 2, socket_id);
	task->seed = rte_rdtsc();

	/* TODO: calculate the CDF of the reply distribution and the
	   number of replies as the number to cover for 99% of the
	   replies. For now, assume that this is number is 2. */
	uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2);

	PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n");
	task->fqueue = fqueue_create(queue_size, socket_id);
	PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n");

	uint32_t n_descriptors;

	if (targ->nb_txports) {
		PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n");
		n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd;
	} else {
		PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n");
		n_descriptors = 256;
	}

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	uint32_t i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);

	struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 8, hash_set
		   will grow if full. */
		hs = hash_set_create(n_bundle_cfgs * 8, socket);
		prox_sh_add_socket(socket, "genl4_streams", hs);
	}

	task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;

	uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket);
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		occur[i] = imix;
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}

	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n");

	task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate;

	static char name2[] = "task_gen_hash";
	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
	/* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */

	/* To avoid overflowing the tx descriptors, the token bucket
	   size needs to be limited. The descriptors are filled most
	   quickly with the smallest packets. For that reason, the
	   token bucket size is given by "number of tx descriptors" *
	   "smallest Ethernet packet". */
	PROX_ASSERT(targ->nb_txports == 1);

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void start_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);

	task->new_conn_tokens = 0;
	task->new_conn_last_tsc = rte_rdtsc();
}

static void stop_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	struct bundle_ctx *bundle;

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}
}

static void start_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);
}

static void stop_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct bundle_ctx *bundle;
	uint8_t out[MAX_PKT_BURST];

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}

	if (task->cancelled) {
		struct rte_mbuf *mbuf = task->mbuf_saved;

		out[0] = OUT_DISCARD;
		task->cancelled = 0;
		task->base.tx_pkt(&task->base, &mbuf, 1, out);
	}

	do {
		if (task->cur_mbufs_beg == task->cur_mbufs_end) {
			task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
			task->cur_mbufs_beg = 0;
			if (task->cur_mbufs_end == 0)
				break;
		}
		uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
		struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

		if (n_pkts) {
			for (uint16_t j = 0; j < n_pkts; ++j) {
				out[j] = OUT_DISCARD;
			}
			task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
		}
	} while (1);
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.start = start_task_gen_server,
	.stop = stop_task_gen_server,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.start = start_task_gen_client,
	.stop = stop_task_gen_client,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
示例#9
0
static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct stream_cfg **stream_cfg, struct hash_set *hs)
{
	int pop;
	struct stream_cfg *ret;

	if ((pop = lua_getfrom(L, from, name)) < 0)
		return -1;

	if (lua_getfrom(L, TABLE, "actions") < 0)
		return -1;

	lua_len(prox_lua(), -1);
	uint32_t n_actions = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);

	lua_pop(L, 1);

	size_t mem_size = 0;
	mem_size += sizeof(*ret);
	/* one additional action is allocated to allow inserting an
	   additional "default" action to close down TCP sessions from
	   the client side. */
	mem_size += sizeof(ret->actions[0]) * (n_actions + 1);

	ret = prox_zmalloc(sizeof(*ret) + mem_size, socket);
	ret->n_actions = n_actions;

	size_t client_contents_len, server_contents_len;
	char proto[16];
	uint32_t timeout_us, timeout_time_wait_us;
	plogx_dbg("loading stream\n");
	if (lua_to_host_set(L, TABLE, "servers", &ret->servers))
		return -1;
	if (lua_to_string(L, TABLE, "l4_proto", proto, sizeof(proto)))
		return -1;
	if (lua_to_peer_data(L, TABLE, "client_data", socket, &ret->data[PEER_CLIENT], &client_contents_len, hs))
		return -1;
	if (lua_to_peer_data(L, TABLE, "server_data", socket, &ret->data[PEER_SERVER], &server_contents_len, hs))
		return -1;

	if (lua_to_int(L, TABLE, "timeout", &timeout_us)) {
		timeout_us = 1000000;
	}

	ret->tsc_timeout = usec_to_tsc(timeout_us);

	double up, dn;

	if (lua_to_double(L, TABLE, "up_bps", &up))
		up = 5000;// Default rate is 40 Mbps

	if (lua_to_double(L, TABLE, "dn_bps", &dn))
		dn = 5000;// Default rate is 40 Mbps

	const uint64_t hz = rte_get_tsc_hz();

	ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, ETHER_MAX_LEN + 20);
	ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, ETHER_MAX_LEN + 20);

	if (!strcmp(proto, "tcp")) {
		ret->proto = IPPROTO_TCP;
		ret->proc = stream_tcp_proc;
		ret->is_ended = stream_tcp_is_ended;

		if (lua_to_int(L, TABLE, "timeout_time_wait", &timeout_time_wait_us)) {
			timeout_time_wait_us = 2000000;
		}

		ret->tsc_timeout_time_wait = usec_to_tsc(timeout_time_wait_us);
	}
	else if (!strcmp(proto, "udp")) {
		plogx_dbg("loading UDP\n");
		ret->proto = IPPROTO_UDP;
		ret->proc = stream_udp_proc;
		ret->is_ended = stream_udp_is_ended;
	}
	else
		return -1;

	/* get all actions */
	if (lua_getfrom(L, TABLE, "actions") < 0)
		return -1;

	uint32_t idx = 0;
	lua_pushnil(L);
	while (lua_next(L, -2)) {
		if (lua_to_peer_action(L, STACK, NULL, &ret->actions[idx], client_contents_len, server_contents_len))
			return -1;

		stream_cfg_verify_action(ret, &ret->actions[idx]);

		idx++;

		lua_pop(L, 1);
	}
	lua_pop(L, 1);

	/* For TCP, one of the peers initiates closing down the
	   connection. This is signified by the last action having
	   with zero length. If such an action is not specified in the
	   configuration file, the default is for the client to close
	   the connection. This means that the TCP connection at the
	   client will go into a TIME_WAIT state and the server
	   releases all the resources avoiding resource starvation at
	   the server. */
	if (ret->proto == IPPROTO_TCP && ret->actions[ret->n_actions - 1].len != 0) {
		ret->actions[ret->n_actions].len = 0;
		ret->actions[ret->n_actions].beg = 0;
		ret->actions[ret->n_actions].peer = PEER_CLIENT;
		ret->n_actions++;
	}

	if (IPPROTO_TCP == ret->proto)
		stream_tcp_calc_len(ret, &ret->n_pkts, &ret->n_bytes);
	else
		stream_udp_calc_len(ret, &ret->n_pkts, &ret->n_bytes);

	lua_pop(L, pop);
	*stream_cfg = ret;
	return 0;
}
示例#10
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_listen * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket_id);

	plogx_info("n_listen = %d\n", n_listen);
	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = rte_zmalloc_socket(NULL, listen_table.entries * sizeof(task->listen_entries[0]), RTE_CACHE_LINE_SIZE, socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0]);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn*2, &task->bundle_ctx_pool, socket_id), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn*2, socket_id);
	task->seed = rte_rdtsc();
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	int i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);
	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i]),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}
	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	task->tot_imix = total_imix;
	task->cdf = cdf;
	static char name2[] = "task_gen_hash";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, socket), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
示例#11
0
static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct stream_cfg **stream_cfg)
{
	int pop;
	struct stream_cfg *ret;

	if ((pop = lua_getfrom(L, from, name)) < 0)
		return -1;

	if (lua_getfrom(L, TABLE, "actions") < 0)
		return -1;

	lua_len(prox_lua(), -1);
	uint32_t n_actions = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);

	lua_pop(L, 1);

	size_t mem_size = 0;
	mem_size += sizeof(*ret);
	mem_size += sizeof(ret->actions[0])*n_actions;

	ret = rte_zmalloc_socket(NULL, sizeof(*ret) + mem_size, RTE_CACHE_LINE_SIZE, socket);
	ret->n_actions = n_actions;

	size_t client_contents_len, server_contents_len;
	char proto[16];
	uint32_t timeout_us, timeout_time_wait_us;

	plogx_dbg("loading stream\n");
	if (lua_to_host_set(L, TABLE, "servers", &ret->servers) ||
	    lua_to_string(L, TABLE, "l4_proto", proto, sizeof(proto)) ||
	    lua_to_peer_data(L, TABLE, "client_data", socket, &ret->data[PEER_CLIENT], &client_contents_len) ||
	    lua_to_peer_data(L, TABLE, "server_data", socket, &ret->data[PEER_SERVER], &server_contents_len)) {
		return -1;
	}

	if (lua_to_int(L, TABLE, "timeout", &timeout_us)) {
		timeout_us = 1000000;
	}

	ret->tsc_timeout = (uint64_t)timeout_us * rte_get_tsc_hz()/1000000;

	if (!strcmp(proto, "tcp")) {
		ret->proto = IPPROTO_TCP;
		ret->proc = stream_tcp_proc;
		ret->is_ended = stream_tcp_is_ended;

		if (lua_to_int(L, TABLE, "timeout_time_wait", &timeout_time_wait_us)) {
			timeout_time_wait_us = 2000000;
		}

		ret->tsc_timeout_time_wait = (uint64_t)timeout_time_wait_us * rte_get_tsc_hz()/1000000;
	}
	else if (!strcmp(proto, "udp")) {
		plogx_dbg("loading UDP\n");
		ret->proto = IPPROTO_UDP;
		ret->proc = stream_udp_proc;
		ret->is_ended = stream_udp_is_ended;
	}
	else
		return -1;

	/* get all actions */
	if (lua_getfrom(L, TABLE, "actions") < 0)
		return -1;

	uint32_t idx = 0;
	lua_pushnil(L);
	while (lua_next(L, -2)) {
		if (lua_to_peer_action(L, STACK, NULL, &ret->actions[idx], client_contents_len, server_contents_len))
			return -1;
		idx++;

		lua_pop(L, 1);
	}
	lua_pop(L, 1);

	lua_pop(L, pop);
	*stream_cfg = ret;
	return 0;
}