Пример #1
0
static inline int wait_command_handled(struct lcore_cfg *lconf)
{
	uint64_t t1 = rte_rdtsc(), t2;
	while (lconf_is_req(lconf)) {
		t2 = rte_rdtsc();
		if (t2 - t1 > 5 * rte_get_tsc_hz()) {
			// Failed to handle command ...
			for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
				struct task_args *targs = &lconf->targs[task_id];
				if (!(targs->flags & TASK_ARG_DROP)) {
					plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n");
					return - 1;
				}
			}
			plogx_err("Failed to handle command\n");
			return -1;
		}
	}
	return 0;
}
Пример #2
0
static int refill_mbufs(uint32_t *n_new_mbufs, struct rte_mempool *mempool, struct rte_mbuf **mbufs)
{
	if (*n_new_mbufs == MAX_PKT_BURST)
		return 0;

	if (rte_mempool_get_bulk(mempool, (void **)mbufs, MAX_PKT_BURST - *n_new_mbufs) < 0) {
		plogx_err("4Mempool alloc failed for %d mbufs\n", MAX_PKT_BURST - *n_new_mbufs);
		return -1;
	}

	for (uint32_t i = 0; i < MAX_PKT_BURST - *n_new_mbufs; ++i) {
		init_mbuf_seg(mbufs[i]);
	}

	*n_new_mbufs = MAX_PKT_BURST;

	return 0;
}
Пример #3
0
/* Encapsulate IPv6 packet in QinQ where the QinQ is derived from the IPv6 address */
static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq_encap6 *task)
{
	struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(struct vlan_hdr));

	PROX_RUNTIME_ASSERT(pqinq);
	struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1);

	if (pip6->hop_limits) {
		pip6->hop_limits--;
	}
	else {
		plog_info("TTL = 0 => Dropping\n");
		return NO_PORT_AVAIL;
	}

	// TODO: optimize to use bulk as intended with the rte_table_library
	uint64_t pkts_mask = RTE_LEN2MASK(1, uint64_t);
	uint64_t lookup_hit_mask;
	struct cpe_data* entries[64]; // TODO: use bulk size
	rte_table_hash_ext_dosig_ops.f_lookup(task->cpe_table, &mbuf, pkts_mask, &lookup_hit_mask, (void**)entries);

	if (lookup_hit_mask == 0x1) {
		/* will also overwrite part of the destination addr */
		(*(uint64_t *)pqinq) = entries[0]->mac_port_8bytes;
		pqinq->svlan.eth_proto = task->qinq_tag;
		pqinq->cvlan.eth_proto = ETYPE_VLAN;
		pqinq->svlan.vlan_tci = entries[0]->qinq_svlan;
		pqinq->cvlan.vlan_tci = entries[0]->qinq_cvlan;
		pqinq->ether_type = ETYPE_IPv6;

		/* classification can only be done from this point */
		if (task->runtime_flags & TASK_CLASSIFY) {
			rte_sched_port_pkt_write(mbuf, 0, entries[0]->user, 0, 0, 0);
		}
		return 0;
	}
	else {
		plogx_err("Unknown IP " IPv6_BYTES_FMT "\n", IPv6_BYTES(pip6->dst_addr));
		return NO_PORT_AVAIL;
	}
}
Пример #4
0
static int handle_gen_queued(struct task_gen_server *task)
{
	uint8_t out[MAX_PKT_BURST];
	struct bundle_ctx *conn;
	struct pkt_tuple pkt_tuple;
	struct l4_meta l4_meta;
	uint16_t j;
	uint16_t cancelled = 0;
	int ret;

	if (task->cur_mbufs_beg == task->cur_mbufs_end) {
		task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
		task->cur_mbufs_beg = 0;
	}
	uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
	struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

	j = task->cancelled;
	if (task->cancelled) {
		uint16_t pkt_len = mbuf_wire_size(mbufs[0]);

		if (token_time_take(&task->token_time, pkt_len) != 0)
			return -1;

		out[0] = task->out_saved;
		task->cancelled = 0;
	}

	/* Main proc loop */
	for (; j < n_pkts; ++j) {

		if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) {
			plogdx_err(mbufs[j], "Unknown packet, parsing failed\n");
			out[j] = OUT_DISCARD;
		}

		conn = NULL;
		ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);

		if (ret >= 0)
			conn = task->bundle_ctx_pool.hash_entries[ret];
		else {
			/* If not part of existing connection, try to create a connection */
			struct new_tuple nt;
			nt.dst_addr = pkt_tuple.dst_addr;
			nt.proto_id = pkt_tuple.proto_id;
			nt.dst_port = pkt_tuple.dst_port;
			rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types));
			const struct bundle_cfg *n;

			if (NULL != (n = server_accept(task, &nt))) {
				conn = bundle_ctx_pool_get(&task->bundle_ctx_pool);
				if (!conn) {
					out[j] = OUT_DISCARD;
					plogx_err("No more free bundles to accept new connection\n");
					continue;
				}
				ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);
				if (ret < 0) {
					out[j] = OUT_DISCARD;
					bundle_ctx_pool_put(&task->bundle_ctx_pool, conn);
					plog_err("Adding key failed while trying to accept connection\n");
					continue;
				}

				task->bundle_ctx_pool.hash_entries[ret] = conn;

				bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed);
				conn->tuple = pkt_tuple;

				if (conn->ctx.stream_cfg->proto == IPPROTO_TCP)
					task->l4_stats.tcp_created++;
				else
					task->l4_stats.udp_created++;
			}
			else {
				plog_err("Packet received for service that does not exist :\n"
					 "source ip = %0x:%u\n"
					 "dst ip    = %0x:%u\n",
					 pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port),
					 pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port));
			}
		}

		/* bundle contains either an active connection or a
		   newly created connection. If it is NULL, then not
		   listening. */
		if (NULL != conn) {
			ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			out[j] = ret == 0? 0: OUT_HANDLED;

			if (ret == 0) {
				uint16_t pkt_len = mbuf_wire_size(mbufs[j]);

				if (token_time_take(&task->token_time, pkt_len) != 0) {
					task->out_saved = out[j];
					task->cancelled = 1;
					task->base.tx_pkt(&task->base, mbufs, j, out);
					task->cur_mbufs_beg += j;
					return -1;
				}
			}
		}
		else {
			pkt_tuple_debug(&pkt_tuple);
			plogd_dbg(mbufs[j], NULL);
			out[j] = OUT_DISCARD;
		}
	}

	task->base.tx_pkt(&task->base, mbufs, j, out);

	task->cur_mbufs_beg += j;
	return 0;
}
Пример #5
0
static int handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	uint8_t out[MAX_PKT_BURST] = {0};
	struct bundle_ctx *conn;
	int ret;

	if (n_pkts) {
		for (int i = 0; i < n_pkts; ++i) {
			struct pkt_tuple pt;
			struct l4_meta l4_meta;

			if (parse_pkt(mbufs[i], &pt, &l4_meta)) {
				plogdx_err(mbufs[i], "Parsing failed\n");
				out[i] = OUT_DISCARD;
				continue;
			}

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt);

			if (ret < 0) {
				plogx_dbg("Client: packet RX that does not belong to connection:"
					  "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n",
					  IPv4_BYTES(((uint8_t*)&pt.dst_addr)),
					  rte_bswap16(pt.dst_port),
					  IPv4_BYTES(((uint8_t*)&pt.src_addr)),
					  rte_bswap16(pt.src_port));

				plogdx_dbg(mbufs[i], NULL);

				if (pt.proto_id == IPPROTO_TCP) {
					stream_tcp_create_rst(mbufs[i], &l4_meta, &pt);
					out[i] = 0;
					continue;
				}
				else {
					out[i] = OUT_DISCARD;
					continue;
				}
			}

			conn = task->bundle_ctx_pool.hash_entries[ret];
			ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
			out[i] = ret == 0? 0: OUT_HANDLED;
		}
		task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
	}

	/* If there is at least one callback to handle, handle at most MAX_PKT_BURST */
	if (heap_top_is_lower(task->heap, rte_rdtsc())) {
		if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs))
			return 0;

		uint16_t n_called_back = 0;
		while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}
		plogx_dbg("During callback, will send %d packets\n", n_called_back);

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}

	uint32_t n_new = task->bundle_ctx_pool.n_free_bundles;
	n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new;

	uint64_t diff = (rte_rdtsc() - task->new_conn_last_tsc)/task->new_conn_cost;
	task->new_conn_last_tsc += diff * task->new_conn_cost;
	task->new_conn_tokens += diff;

	if (task->new_conn_tokens > 16)
		task->new_conn_tokens = 16;
	if (n_new > task->new_conn_tokens)
		n_new = task->new_conn_tokens;
	task->new_conn_tokens -= n_new;
	if (n_new == 0)
		return 0;

	if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs))
		return 0;

	for (uint32_t i = 0; i < n_new; ++i) {
		struct bundle_ctx *bundle_ctx = bundle_ctx_pool_get_w_cfg(&task->bundle_ctx_pool);
		PROX_ASSERT(bundle_ctx);

		struct pkt_tuple *pt = &bundle_ctx->tuple;

		int n_retries = 0;
		do {
			/* Note that the actual packet sent will
			   contain swapped addresses and ports
			   (i.e. pkt.src <=> tuple.dst). The incoming
			   packet will match this struct. */
			bundle_init(bundle_ctx, task->heap, PEER_CLIENT, &task->seed);

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt);
			if (ret >= 0) {
				if (n_retries++ == 1000) {
					plogx_err("Already tried 1K times\n");
				}
			}
		} while (ret >= 0);

		ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt);

		if (ret < 0) {
			plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles);
			bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx);

			pkt_tuple_debug2(pt);
			out[i] = OUT_DISCARD;
			continue;
		}

		task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx;

		if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP)
			task->l4_stats.tcp_created++;
		else
			task->l4_stats.udp_created++;

		task->l4_stats.bundles_created++;

		ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
		out[i] = ret == 0? 0: OUT_HANDLED;
	}

	int ret2 = task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out);
	task->n_new_mbufs -= n_new;
	return ret2;
}
Пример #6
0
static void handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct pkt_tuple pkt_tuple[MAX_PKT_BURST];
	uint8_t out[MAX_PKT_BURST];
	struct l4_meta l4_meta[MAX_PKT_BURST];
	struct bundle_ctx *conn;
	int ret;

	for (uint16_t j = 0; j < n_pkts; ++j) {
		if (parse_pkt(mbufs[j], &pkt_tuple[j], &l4_meta[j]))
			plogdx_err(mbufs[j], "Unknown packet, parsing failed\n");
	}
	/* Main proc loop */
	for (uint16_t j = 0; j < n_pkts; ++j) {
		conn = NULL;
		ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]);

		if (ret >= 0)
			conn = task->bundle_ctx_pool.hash_entries[ret];

		/* If not part of existing connection, try to create a connection */
		if (NULL == conn) {
			struct new_tuple nt;
			nt.dst_addr = pkt_tuple[j].dst_addr;
			nt.proto_id = pkt_tuple[j].proto_id;
			nt.dst_port = pkt_tuple[j].dst_port;
			rte_memcpy(nt.l2_types, pkt_tuple[j].l2_types, sizeof(nt.l2_types));

			const struct bundle_cfg *n;

			if (NULL != (n = server_accept(task, &nt))) {
				conn = bundle_ctx_pool_get(&task->bundle_ctx_pool);
				if (!conn) {
					out[j] = NO_PORT_AVAIL;
					plogx_err("No more free bundles to accept new connection\n");
					continue;
				}
				ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]);
				if (ret < 0) {
					out[j] = NO_PORT_AVAIL;
					bundle_ctx_pool_put(&task->bundle_ctx_pool, conn);
					plog_err("Adding key failed while trying to accept connection\n");
					continue;
				}

				task->bundle_ctx_pool.hash_entries[ret] = conn;

				bundle_init(conn, n, task->heap, PEER_SERVER, &task->seed);
				conn->tuple = pkt_tuple[j];

				if (conn->ctx.stream_cfg->proto == IPPROTO_TCP)
					task->l4_stats.tcp_created++;
				else
					task->l4_stats.udp_created++;
			}
		}

		/* bundle contains either an active connection or a
		   newly created connection. If it is NULL, then not
		   listening. */
		if (NULL != conn) {
			int ret = bundle_proc_data(conn, mbufs[j], &l4_meta[j], &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			out[j] = ret == 0? 0: NO_PORT_AVAIL;
		}
		else {
			plog_err("Packet received for service that does not exist\n");
			pkt_tuple_debug(&pkt_tuple[j]);
			plogd_dbg(mbufs[j], NULL);
			out[j] = NO_PORT_AVAIL;
		}
	}
	conn = NULL;

	task->base.tx_pkt(&task->base, mbufs, n_pkts, out);

	if (!(task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)))
		return ;

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			return ;
 		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) {
		uint16_t n_called_back = 0;
		while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}
}
Пример #7
0
static void handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	uint8_t out[MAX_PKT_BURST] = {0};
	struct bundle_ctx *conn;
	int ret;

	if (n_pkts) {
		for (int i = 0; i < n_pkts; ++i) {
			struct pkt_tuple pt;
			struct l4_meta l4_meta;

			if (parse_pkt(mbufs[i], &pt, &l4_meta)) {
				plogdx_err(mbufs[i], "Parsing failed\n");
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt);

			if (ret < 0) {
				plogx_dbg("Client: packet RX that does not belong to connection:"
					  "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port));
				plogdx_dbg(mbufs[i], NULL);
				// if tcp, send RST
				/* pkt_tuple_debug2(&pt); */
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			conn = task->bundle_ctx_pool.hash_entries[ret];
			ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
			out[i] = ret == 0? 0: NO_PORT_AVAIL;
		}
		task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
	}

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
 		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	/* If there is at least one callback to handle, handle at most MAX_PKT_BURST */
	if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) {
		uint16_t n_called_back = 0;
		while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}
		plogx_dbg("During callback, will send %d packets\n", n_called_back);

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}

	int n_new = task->bundle_ctx_pool.n_free_bundles;
	n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new;

	if (n_new == 0)
		return ;

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	for (int i = 0; i < n_new; ++i) {
		int32_t ret = cdf_sample(task->cdf, &task->seed);
		/* Select a new bundle_cfg according to imix */
		struct bundle_cfg *bundle_cfg = &task->bundle_cfgs[ret];
		struct bundle_ctx *bundle_ctx;

		bundle_ctx = bundle_ctx_pool_get(&task->bundle_ctx_pool);

		/* Should be an assert: */
		if (!bundle_ctx) {
			plogx_err("No more available bundles\n");
			exit(-1);
		}

		struct pkt_tuple *pt = &bundle_ctx->tuple;

		int n_retries = 0;
		do {
			/* Note that the actual packet sent will
			   contain swapped addresses and ports
			   (i.e. pkt.src <=> tuple.dst). The incoming
			   packet will match this struct. */
			bundle_init(bundle_ctx, bundle_cfg, task->heap, PEER_CLIENT, &task->seed);

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt);
			if (n_retries == 1000) {
				plogx_err("Already tried 1K times\n");
			}
			if (ret >= 0) {
				n_retries++;
			}
		} while (ret >= 0);

		ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt);

		if (ret < 0) {
			plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles);
			bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx);

			pkt_tuple_debug2(pt);
			out[i] = NO_PORT_AVAIL;
			continue;
		}

		task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx;

		if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP)
			task->l4_stats.tcp_created++;
		else
			task->l4_stats.udp_created++;

		ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
		out[i] = ret == 0? 0: NO_PORT_AVAIL;
	}

	task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out);
	task->n_new_mbufs -= n_new;
}