Пример #1
0
void
dump_acl6_rule(struct rte_mbuf *m, uint32_t sig)
{
	unsigned i;
	uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
	struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(
	    m, unsigned char *)+sizeof(struct ether_hdr));

	acl_log("Packet Src");
	for (i = 0; i < RTE_DIM(ipv6_hdr->src_addr); i += sizeof(uint16_t))
		acl_log(":%.2x%.2x", ipv6_hdr->src_addr[i],
			ipv6_hdr->src_addr[i + 1]);

	acl_log("\nDst");
	for (i = 0; i < RTE_DIM(ipv6_hdr->dst_addr); i += sizeof(uint16_t))
		acl_log(":%.2x%.2x", ipv6_hdr->dst_addr[i],
			ipv6_hdr->dst_addr[i + 1]);

	acl_log("\nSrc port:%hu,Dst port:%hu ",
		rte_bswap16(*(uint16_t *)(ipv6_hdr + 1)),
		rte_bswap16(*((uint16_t *)(ipv6_hdr + 1) + 1)));
	acl_log("hit ACL %d - ", offset);

	print_one_ipv6_rule(acl_config.rule_ipv6 + offset, 1);

	acl_log("\n\n");
}
Пример #2
0
static inline void add_key(struct task_args *targ, struct qinq_gre_map *qinq_gre_map, struct rte_table_hash* qinq_gre_table, uint32_t i, uint32_t *count)
{
	struct qinq_gre_data entry = {
		.gre_id = qinq_gre_map->entries[i].gre_id,
		.user = qinq_gre_map->entries[i].user,
	};

#ifdef USE_QINQ
	struct vlans qinq2 = {
		.svlan = {.eth_proto = targ->qinq_tag, .vlan_tci = qinq_gre_map->entries[i].svlan},
		.cvlan = {.eth_proto = ETYPE_VLAN,     .vlan_tci = qinq_gre_map->entries[i].cvlan}
	};

	int key_found = 0;
	void* entry_in_hash = NULL;
	rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &qinq2, &entry, &key_found, &entry_in_hash);

	plog_dbg("Core %u adding user %u (tag %x svlan %x cvlan %x), rss=%x\n",
		 targ->lconf->id, qinq2.svlan.eth_proto, qinq_gre_map->entries[i].user,
		 rte_bswap16(qinq_gre_map->entries[i].svlan),
		 rte_bswap16(qinq_gre_map->entries[i].cvlan),
		 qinq_gre_map->entries[i].rss);
#else
	/* lower 3 bytes of IPv4 address contain svlan/cvlan. */
	uint64_t ip = ((uint32_t)rte_bswap16(qinq_gre_map->entries[i].svlan) << 12) |
		rte_bswap16(qinq_gre_map->entries[i].cvlan);
	int key_found = 0;
	void* entry_in_hash = NULL;
	rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &ip, &entry, &key_found, &entry_in_hash);

	plog_dbg("Core %u hash table add: key = %016"PRIx64"\n",
		 targ->lconf->id, ip);
#endif
	(*count)++;
}
Пример #3
0
static int lua_to_host_set(struct lua_State *L, enum lua_place from, const char *name, struct host_set *h)
{
	int pop;
	if ((pop = lua_getfrom(L, from, name)) < 0)
		return -1;

	if (!lua_istable(L, -1))
		return -1;

	uint32_t port = 0, port_mask = 0;

	if (lua_to_ip(L, TABLE, "ip", &h->ip) || lua_to_int(L, TABLE, "port", &port))
		return -1;

	if (lua_to_int(L, TABLE, "ip_mask", &h->ip_mask))
		h->ip_mask = 0;
	if (lua_to_int(L, TABLE, "port_mask", &port_mask))
		h->port_mask = 0;

	h->port = rte_bswap16(port);
	h->port_mask = rte_bswap16(port_mask);
	h->ip = rte_bswap32(h->ip);
	h->ip_mask = rte_bswap32(h->ip_mask);

	lua_pop(L, pop);
	return 0;
}
Пример #4
0
void
dump_acl4_rule(struct rte_mbuf *m, uint32_t sig)
{
	uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
	unsigned char a, b, c, d;
	struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(
	    m, unsigned char *)+sizeof(struct ether_hdr));

	uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d);
	acl_log("Packet Src:%hhu.%hhu.%hhu.%hhu ", a, b, c, d);
	uint32_t_to_char(rte_bswap32(ipv4_hdr->dst_addr), &a, &b, &c, &d);
	acl_log("Dst:%hhu.%hhu.%hhu.%hhu ", a, b, c, d);

	acl_log("Src port:%hu,Dst port:%hu ",
		rte_bswap16(*(uint16_t *)(ipv4_hdr + 1)),
		rte_bswap16(*((uint16_t *)(ipv4_hdr + 1) + 1)));
	acl_log("hit ACL %d - ", offset);

	print_one_ipv4_rule(acl_config.rule_ipv4 + offset, 1);

	acl_log("\n\n");
}
Пример #5
0
static inline void
app_pkt_metadata_flush(struct rte_mbuf *pkt)
{
	struct app_pkt_metadata *pkt_meta = (struct app_pkt_metadata *)
		RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
	struct ether_hdr *ether_hdr = (struct ether_hdr *)
		rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(struct ether_hdr));

	ether_addr_copy(&pkt_meta->nh_arp, &ether_hdr->d_addr);
	ether_addr_copy(&local_ether_addr, &ether_hdr->s_addr);
	ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
	pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
}
Пример #6
0
static int
test_byteorder(void)
{
	uint16_t res_u16;
	uint32_t res_u32;
	uint64_t res_u64;

	res_u16 = rte_bswap16(u16);
	printf("%"PRIx16" -> %"PRIx16"\n", u16, res_u16);
	if (res_u16 != 0x3713)
		return -1;

	res_u32 = rte_bswap32(u32);
	printf("%"PRIx32" -> %"PRIx32"\n", u32, res_u32);
	if (res_u32 != 0xefbeaddeUL)
		return -1;

	res_u64 = rte_bswap64(u64);
	printf("%"PRIx64" -> %"PRIx64"\n", u64, res_u64);
	if (res_u64 != 0xcefabebafecaaddeULL)
		return -1;

	res_u16 = rte_bswap16(0x1337);
	printf("const %"PRIx16" -> %"PRIx16"\n", 0x1337, res_u16);
	if (res_u16 != 0x3713)
		return -1;

	res_u32 = rte_bswap32(0xdeadbeefUL);
	printf("const %"PRIx32" -> %"PRIx32"\n", (uint32_t) 0xdeadbeef, res_u32);
	if (res_u32 != 0xefbeaddeUL)
		return -1;

	res_u64 = rte_bswap64(0xdeadcafebabefaceULL);
	printf("const %"PRIx64" -> %"PRIx64"\n", (uint64_t) 0xdeadcafebabefaceULL, res_u64);
	if (res_u64 != 0xcefabebafecaaddeULL)
		return -1;

	return 0;
}
Пример #7
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id);

	plogx_info("n_listen = %d\n", n_listen);

	struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 1, hash_set
		   will grow if full. */
		hs = hash_set_create(n_listen, socket_id);
		prox_sh_add_socket(socket_id, "genl4_streams", hs);
	}

	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id);
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";

	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn * 2, socket_id);
	task->seed = rte_rdtsc();

	/* TODO: calculate the CDF of the reply distribution and the
	   number of replies as the number to cover for 99% of the
	   replies. For now, assume that this is number is 2. */
	uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2);

	PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n");
	task->fqueue = fqueue_create(queue_size, socket_id);
	PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n");

	uint32_t n_descriptors;

	if (targ->nb_txports) {
		PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n");
		n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd;
	} else {
		PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n");
		n_descriptors = 256;
	}

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   4*1024 - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);
	PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	uint32_t i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);

	struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams");
	if (hs == NULL) {
		/* Expected number of streams per bundle = 8, hash_set
		   will grow if full. */
		hs = hash_set_create(n_bundle_cfgs * 8, socket);
		prox_sh_add_socket(socket, "genl4_streams", hs);
	}

	task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;

	uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket);
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		occur[i] = imix;
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}

	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n");

	task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate;

	static char name2[] = "task_gen_hash";
	name2[0]++;
	plogx_dbg("Creating bundle ctx pool\n");
	if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) {
		cmd_mem_stats();
		PROX_PANIC(1, "Failed to create conn_ctx_pool\n");
	}

	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
	/* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */

	/* To avoid overflowing the tx descriptors, the token bucket
	   size needs to be limited. The descriptors are filled most
	   quickly with the smallest packets. For that reason, the
	   token bucket size is given by "number of tx descriptors" *
	   "smallest Ethernet packet". */
	PROX_ASSERT(targ->nb_txports == 1);

	struct token_time_cfg tt_cfg = {
		.bpp = targ->rate_bps,
		.period = rte_get_tsc_hz(),
		.bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
	};

	token_time_init(&task->token_time, &tt_cfg);
}

static void start_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);

	task->new_conn_tokens = 0;
	task->new_conn_last_tsc = rte_rdtsc();
}

static void stop_task_gen_client(struct task_base *tbase)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	struct bundle_ctx *bundle;

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}
}

static void start_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;

	token_time_reset(&task->token_time, rte_rdtsc(), 0);
}

static void stop_task_gen_server(struct task_base *tbase)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct bundle_ctx *bundle;
	uint8_t out[MAX_PKT_BURST];

	while (!heap_is_empty(task->heap)) {
		bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap));
		bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats);
	}

	if (task->cancelled) {
		struct rte_mbuf *mbuf = task->mbuf_saved;

		out[0] = OUT_DISCARD;
		task->cancelled = 0;
		task->base.tx_pkt(&task->base, &mbuf, 1, out);
	}

	do {
		if (task->cur_mbufs_beg == task->cur_mbufs_end) {
			task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
			task->cur_mbufs_beg = 0;
			if (task->cur_mbufs_end == 0)
				break;
		}
		uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
		struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

		if (n_pkts) {
			for (uint16_t j = 0; j < n_pkts; ++j) {
				out[j] = OUT_DISCARD;
			}
			task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
		}
	} while (1);
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.start = start_task_gen_server,
	.stop = stop_task_gen_server,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.start = start_task_gen_client,
	.stop = stop_task_gen_client,
	.flag_features = TASK_FEATURE_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
Пример #8
0
static int handle_gen_queued(struct task_gen_server *task)
{
	uint8_t out[MAX_PKT_BURST];
	struct bundle_ctx *conn;
	struct pkt_tuple pkt_tuple;
	struct l4_meta l4_meta;
	uint16_t j;
	uint16_t cancelled = 0;
	int ret;

	if (task->cur_mbufs_beg == task->cur_mbufs_end) {
		task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
		task->cur_mbufs_beg = 0;
	}
	uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
	struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

	j = task->cancelled;
	if (task->cancelled) {
		uint16_t pkt_len = mbuf_wire_size(mbufs[0]);

		if (token_time_take(&task->token_time, pkt_len) != 0)
			return -1;

		out[0] = task->out_saved;
		task->cancelled = 0;
	}

	/* Main proc loop */
	for (; j < n_pkts; ++j) {

		if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) {
			plogdx_err(mbufs[j], "Unknown packet, parsing failed\n");
			out[j] = OUT_DISCARD;
		}

		conn = NULL;
		ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);

		if (ret >= 0)
			conn = task->bundle_ctx_pool.hash_entries[ret];
		else {
			/* If not part of existing connection, try to create a connection */
			struct new_tuple nt;
			nt.dst_addr = pkt_tuple.dst_addr;
			nt.proto_id = pkt_tuple.proto_id;
			nt.dst_port = pkt_tuple.dst_port;
			rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types));
			const struct bundle_cfg *n;

			if (NULL != (n = server_accept(task, &nt))) {
				conn = bundle_ctx_pool_get(&task->bundle_ctx_pool);
				if (!conn) {
					out[j] = OUT_DISCARD;
					plogx_err("No more free bundles to accept new connection\n");
					continue;
				}
				ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);
				if (ret < 0) {
					out[j] = OUT_DISCARD;
					bundle_ctx_pool_put(&task->bundle_ctx_pool, conn);
					plog_err("Adding key failed while trying to accept connection\n");
					continue;
				}

				task->bundle_ctx_pool.hash_entries[ret] = conn;

				bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed);
				conn->tuple = pkt_tuple;

				if (conn->ctx.stream_cfg->proto == IPPROTO_TCP)
					task->l4_stats.tcp_created++;
				else
					task->l4_stats.udp_created++;
			}
			else {
				plog_err("Packet received for service that does not exist :\n"
					 "source ip = %0x:%u\n"
					 "dst ip    = %0x:%u\n",
					 pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port),
					 pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port));
			}
		}

		/* bundle contains either an active connection or a
		   newly created connection. If it is NULL, then not
		   listening. */
		if (NULL != conn) {
			ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			out[j] = ret == 0? 0: OUT_HANDLED;

			if (ret == 0) {
				uint16_t pkt_len = mbuf_wire_size(mbufs[j]);

				if (token_time_take(&task->token_time, pkt_len) != 0) {
					task->out_saved = out[j];
					task->cancelled = 1;
					task->base.tx_pkt(&task->base, mbufs, j, out);
					task->cur_mbufs_beg += j;
					return -1;
				}
			}
		}
		else {
			pkt_tuple_debug(&pkt_tuple);
			plogd_dbg(mbufs[j], NULL);
			out[j] = OUT_DISCARD;
		}
	}

	task->base.tx_pkt(&task->base, mbufs, j, out);

	task->cur_mbufs_beg += j;
	return 0;
}
Пример #9
0
static int handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	uint8_t out[MAX_PKT_BURST] = {0};
	struct bundle_ctx *conn;
	int ret;

	if (n_pkts) {
		for (int i = 0; i < n_pkts; ++i) {
			struct pkt_tuple pt;
			struct l4_meta l4_meta;

			if (parse_pkt(mbufs[i], &pt, &l4_meta)) {
				plogdx_err(mbufs[i], "Parsing failed\n");
				out[i] = OUT_DISCARD;
				continue;
			}

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt);

			if (ret < 0) {
				plogx_dbg("Client: packet RX that does not belong to connection:"
					  "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n",
					  IPv4_BYTES(((uint8_t*)&pt.dst_addr)),
					  rte_bswap16(pt.dst_port),
					  IPv4_BYTES(((uint8_t*)&pt.src_addr)),
					  rte_bswap16(pt.src_port));

				plogdx_dbg(mbufs[i], NULL);

				if (pt.proto_id == IPPROTO_TCP) {
					stream_tcp_create_rst(mbufs[i], &l4_meta, &pt);
					out[i] = 0;
					continue;
				}
				else {
					out[i] = OUT_DISCARD;
					continue;
				}
			}

			conn = task->bundle_ctx_pool.hash_entries[ret];
			ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
			out[i] = ret == 0? 0: OUT_HANDLED;
		}
		task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
	}

	/* If there is at least one callback to handle, handle at most MAX_PKT_BURST */
	if (heap_top_is_lower(task->heap, rte_rdtsc())) {
		if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs))
			return 0;

		uint16_t n_called_back = 0;
		while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}
		plogx_dbg("During callback, will send %d packets\n", n_called_back);

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}

	uint32_t n_new = task->bundle_ctx_pool.n_free_bundles;
	n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new;

	uint64_t diff = (rte_rdtsc() - task->new_conn_last_tsc)/task->new_conn_cost;
	task->new_conn_last_tsc += diff * task->new_conn_cost;
	task->new_conn_tokens += diff;

	if (task->new_conn_tokens > 16)
		task->new_conn_tokens = 16;
	if (n_new > task->new_conn_tokens)
		n_new = task->new_conn_tokens;
	task->new_conn_tokens -= n_new;
	if (n_new == 0)
		return 0;

	if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs))
		return 0;

	for (uint32_t i = 0; i < n_new; ++i) {
		struct bundle_ctx *bundle_ctx = bundle_ctx_pool_get_w_cfg(&task->bundle_ctx_pool);
		PROX_ASSERT(bundle_ctx);

		struct pkt_tuple *pt = &bundle_ctx->tuple;

		int n_retries = 0;
		do {
			/* Note that the actual packet sent will
			   contain swapped addresses and ports
			   (i.e. pkt.src <=> tuple.dst). The incoming
			   packet will match this struct. */
			bundle_init(bundle_ctx, task->heap, PEER_CLIENT, &task->seed);

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt);
			if (ret >= 0) {
				if (n_retries++ == 1000) {
					plogx_err("Already tried 1K times\n");
				}
			}
		} while (ret >= 0);

		ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt);

		if (ret < 0) {
			plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles);
			bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx);

			pkt_tuple_debug2(pt);
			out[i] = OUT_DISCARD;
			continue;
		}

		task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx;

		if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP)
			task->l4_stats.tcp_created++;
		else
			task->l4_stats.udp_created++;

		task->l4_stats.bundles_created++;

		ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
		out[i] = ret == 0? 0: OUT_HANDLED;
	}

	int ret2 = task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out);
	task->n_new_mbufs -= n_new;
	return ret2;
}
Пример #10
0
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	static char name[] = "server_mempool";
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket_id, 0);
	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_listen = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_listen == 0, "No services specified to listen on\n");

	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_listen * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket_id);

	plogx_info("n_listen = %d\n", n_listen);
	const struct rte_hash_parameters listen_table = {
		.name = name,
		.entries = n_listen * 4,
		.key_len = sizeof(struct new_tuple),
		.hash_func = rte_hash_crc,
		.hash_func_init_val = 0,
		.socket_id = socket_id,
	};
	name[0]++;

	task->listen_hash = rte_hash_create(&listen_table);
	task->listen_entries = rte_zmalloc_socket(NULL, listen_table.entries * sizeof(task->listen_entries[0]), RTE_CACHE_LINE_SIZE, socket_id);

	int idx = 0;
	lua_pushnil(prox_lua());
	while (lua_next(prox_lua(), -2)) {
		task->bundle_cfgs[idx].n_stream_cfgs = 1;
		int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0]);
		PROX_PANIC(ret, "Failed to load stream cfg\n");
		struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0];

		// TODO: check mask and add to hash for each host
		struct new_tuple nt = {
			.dst_addr = stream->servers.ip,
			.proto_id = stream->proto,
			.dst_port = stream->servers.port,
			.l2_types[0] = 0x0008,
		};

		ret = rte_hash_add_key(task->listen_hash, &nt);
		PROX_PANIC(ret < 0, "Failed to add\n");

		task->listen_entries[ret] = &task->bundle_cfgs[idx];

		plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port));
		++idx;
		lua_pop(prox_lua(), 1);
	}

	static char name2[] = "task_gen_hash2";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn*2, &task->bundle_ctx_pool, socket_id), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn*2, socket_id);
	task->seed = rte_rdtsc();
}

static void init_task_gen_client(struct task_base *tbase, struct task_args *targ)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	static char name[] = "gen_pool";
	const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
	name[0]++;
	task->mempool = rte_mempool_create(name,
					   targ->nb_mbuf - 1, MBUF_SIZE,
					   targ->nb_cache_mbuf,
					   sizeof(struct rte_pktmbuf_pool_private),
					   rte_pktmbuf_pool_init, NULL,
					   rte_pktmbuf_init, 0,
					   socket, 0);

	/* streams contains a lua table. Go through it and read each
	   stream with associated imix_fraction. */
	uint32_t imix;
	int i = 0;

	int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams);
	PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams);

	lua_len(prox_lua(), -1);
	uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1);
	lua_pop(prox_lua(), 1);
	PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n");
	plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs);
	task->bundle_cfgs = rte_zmalloc_socket(NULL, n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket);
	lua_pushnil(prox_lua());

	int total_imix = 0;
	struct cdf *cdf = cdf_create(n_bundle_cfgs, socket);

	while (lua_next(prox_lua(), -2)) {
		PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) ||
			   lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i]),
			   "Failed to load bundle cfg:\n%s\n", get_lua_to_errors());
		cdf_add(cdf, imix);
		total_imix += imix;
		++i;
		lua_pop(prox_lua(), 1);
	}
	lua_pop(prox_lua(), pop);
	cdf_setup(cdf);

	task->tot_imix = total_imix;
	task->cdf = cdf;
	static char name2[] = "task_gen_hash";
	name2[0]++;
	PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, socket), "Failed to create conn_ctx_pool");
	task->heap = heap_create(targ->n_concur_conn, socket);
	task->seed = rte_rdtsc();
}

static struct task_init task_init_gen1 = {
	.mode_str = "genl4",
	.sub_mode_str = "server",
	.init = init_task_gen,
	.handle = handle_gen_bulk,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_server),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

static struct task_init task_init_gen2 = {
	.mode_str = "genl4",
	.init = init_task_gen_client,
	.handle = handle_gen_bulk_client,
	.flag_features = TASK_ZERO_RX,
	.size = sizeof(struct task_gen_client),
	.mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};

__attribute__((constructor)) static void reg_task_gen(void)
{
	reg_task(&task_init_gen1);
	reg_task(&task_init_gen2);
}
Пример #11
0
static void handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	uint8_t out[MAX_PKT_BURST] = {0};
	struct bundle_ctx *conn;
	int ret;

	if (n_pkts) {
		for (int i = 0; i < n_pkts; ++i) {
			struct pkt_tuple pt;
			struct l4_meta l4_meta;

			if (parse_pkt(mbufs[i], &pt, &l4_meta)) {
				plogdx_err(mbufs[i], "Parsing failed\n");
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt);

			if (ret < 0) {
				plogx_dbg("Client: packet RX that does not belong to connection:"
					  "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port));
				plogdx_dbg(mbufs[i], NULL);
				// if tcp, send RST
				/* pkt_tuple_debug2(&pt); */
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			conn = task->bundle_ctx_pool.hash_entries[ret];
			ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
			out[i] = ret == 0? 0: NO_PORT_AVAIL;
		}
		task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
	}

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
 		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	/* If there is at least one callback to handle, handle at most MAX_PKT_BURST */
	if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) {
		uint16_t n_called_back = 0;
		while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}
		plogx_dbg("During callback, will send %d packets\n", n_called_back);

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}

	int n_new = task->bundle_ctx_pool.n_free_bundles;
	n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new;

	if (n_new == 0)
		return ;

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	for (int i = 0; i < n_new; ++i) {
		int32_t ret = cdf_sample(task->cdf, &task->seed);
		/* Select a new bundle_cfg according to imix */
		struct bundle_cfg *bundle_cfg = &task->bundle_cfgs[ret];
		struct bundle_ctx *bundle_ctx;

		bundle_ctx = bundle_ctx_pool_get(&task->bundle_ctx_pool);

		/* Should be an assert: */
		if (!bundle_ctx) {
			plogx_err("No more available bundles\n");
			exit(-1);
		}

		struct pkt_tuple *pt = &bundle_ctx->tuple;

		int n_retries = 0;
		do {
			/* Note that the actual packet sent will
			   contain swapped addresses and ports
			   (i.e. pkt.src <=> tuple.dst). The incoming
			   packet will match this struct. */
			bundle_init(bundle_ctx, bundle_cfg, task->heap, PEER_CLIENT, &task->seed);

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt);
			if (n_retries == 1000) {
				plogx_err("Already tried 1K times\n");
			}
			if (ret >= 0) {
				n_retries++;
			}
		} while (ret >= 0);

		ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt);

		if (ret < 0) {
			plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles);
			bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx);

			pkt_tuple_debug2(pt);
			out[i] = NO_PORT_AVAIL;
			continue;
		}

		task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx;

		if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP)
			task->l4_stats.tcp_created++;
		else
			task->l4_stats.udp_created++;

		ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
		out[i] = ret == 0? 0: NO_PORT_AVAIL;
	}

	task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out);
	task->n_new_mbufs -= n_new;
}
Пример #12
0
/**
 * Convert Ethernet item to EFX filter specification.
 *
 * @param item[in]
 *   Item specification. Only source and destination addresses and
 *   Ethernet type fields are supported. In addition to full and
 *   empty masks of destination address, individual/group mask is
 *   also supported. If the mask is NULL, default mask will be used.
 *   Ranging is not supported.
 * @param efx_spec[in, out]
 *   EFX filter specification to update.
 * @param[out] error
 *   Perform verbose error reporting if not NULL.
 */
static int
sfc_flow_parse_eth(const struct rte_flow_item *item,
		   efx_filter_spec_t *efx_spec,
		   struct rte_flow_error *error)
{
	int rc;
	const struct rte_flow_item_eth *spec = NULL;
	const struct rte_flow_item_eth *mask = NULL;
	const struct rte_flow_item_eth supp_mask = {
		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
		.type = 0xffff,
	};
	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
	};

	rc = sfc_flow_parse_init(item,
				 (const void **)&spec,
				 (const void **)&mask,
				 &supp_mask,
				 &rte_flow_item_eth_mask,
				 sizeof(struct rte_flow_item_eth),
				 error);
	if (rc != 0)
		return rc;

	/* If "spec" is not set, could be any Ethernet */
	if (spec == NULL)
		return 0;

	if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
		rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
			   EFX_MAC_ADDR_LEN);
	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
			  EFX_MAC_ADDR_LEN) == 0) {
		if (is_unicast_ether_addr(&spec->dst))
			efx_spec->efs_match_flags |=
				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
		else
			efx_spec->efs_match_flags |=
				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
	} else if (!is_zero_ether_addr(&mask->dst)) {
		goto fail_bad_mask;
	}

	if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
			   EFX_MAC_ADDR_LEN);
	} else if (!is_zero_ether_addr(&mask->src)) {
		goto fail_bad_mask;
	}

	/*
	 * Ether type is in big-endian byte order in item and
	 * in little-endian in efx_spec, so byte swap is used
	 */
	if (mask->type == supp_mask.type) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
		efx_spec->efs_ether_type = rte_bswap16(spec->type);
	} else if (mask->type != 0) {
		goto fail_bad_mask;
	}

	return 0;

fail_bad_mask:
	rte_flow_error_set(error, EINVAL,
			   RTE_FLOW_ERROR_TYPE_ITEM, item,
			   "Bad mask in the ETH pattern item");
	return -rte_errno;
}

/**
 * Convert VLAN item to EFX filter specification.
 *
 * @param item[in]
 *   Item specification. Only VID field is supported.
 *   The mask can not be NULL. Ranging is not supported.
 * @param efx_spec[in, out]
 *   EFX filter specification to update.
 * @param[out] error
 *   Perform verbose error reporting if not NULL.
 */
static int
sfc_flow_parse_vlan(const struct rte_flow_item *item,
		    efx_filter_spec_t *efx_spec,
		    struct rte_flow_error *error)
{
	int rc;
	uint16_t vid;
	const struct rte_flow_item_vlan *spec = NULL;
	const struct rte_flow_item_vlan *mask = NULL;
	const struct rte_flow_item_vlan supp_mask = {
		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
	};

	rc = sfc_flow_parse_init(item,
				 (const void **)&spec,
				 (const void **)&mask,
				 &supp_mask,
				 NULL,
				 sizeof(struct rte_flow_item_vlan),
				 error);
	if (rc != 0)
		return rc;

	/*
	 * VID is in big-endian byte order in item and
	 * in little-endian in efx_spec, so byte swap is used.
	 * If two VLAN items are included, the first matches
	 * the outer tag and the next matches the inner tag.
	 */
	if (mask->tci == supp_mask.tci) {
		/* Apply mask to keep VID only */
		vid = rte_bswap16(spec->tci & mask->tci);

		if (!(efx_spec->efs_match_flags &
		      EFX_FILTER_MATCH_OUTER_VID)) {
			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
			efx_spec->efs_outer_vid = vid;
		} else if (!(efx_spec->efs_match_flags &
			     EFX_FILTER_MATCH_INNER_VID)) {
			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
			efx_spec->efs_inner_vid = vid;
		} else {
			rte_flow_error_set(error, EINVAL,
					   RTE_FLOW_ERROR_TYPE_ITEM, item,
					   "More than two VLAN items");
			return -rte_errno;
		}
	} else {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ITEM, item,
				   "VLAN ID in TCI match is required");
		return -rte_errno;
	}

	return 0;
}