Esempio n. 1
0
static void handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct pkt_tuple pkt_tuple[MAX_PKT_BURST];
	uint8_t out[MAX_PKT_BURST];
	struct l4_meta l4_meta[MAX_PKT_BURST];
	struct bundle_ctx *conn;
	int ret;

	for (uint16_t j = 0; j < n_pkts; ++j) {
		if (parse_pkt(mbufs[j], &pkt_tuple[j], &l4_meta[j]))
			plogdx_err(mbufs[j], "Unknown packet, parsing failed\n");
	}
	/* Main proc loop */
	for (uint16_t j = 0; j < n_pkts; ++j) {
		conn = NULL;
		ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]);

		if (ret >= 0)
			conn = task->bundle_ctx_pool.hash_entries[ret];

		/* If not part of existing connection, try to create a connection */
		if (NULL == conn) {
			struct new_tuple nt;
			nt.dst_addr = pkt_tuple[j].dst_addr;
			nt.proto_id = pkt_tuple[j].proto_id;
			nt.dst_port = pkt_tuple[j].dst_port;
			rte_memcpy(nt.l2_types, pkt_tuple[j].l2_types, sizeof(nt.l2_types));

			const struct bundle_cfg *n;

			if (NULL != (n = server_accept(task, &nt))) {
				conn = bundle_ctx_pool_get(&task->bundle_ctx_pool);
				if (!conn) {
					out[j] = NO_PORT_AVAIL;
					plogx_err("No more free bundles to accept new connection\n");
					continue;
				}
				ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]);
				if (ret < 0) {
					out[j] = NO_PORT_AVAIL;
					bundle_ctx_pool_put(&task->bundle_ctx_pool, conn);
					plog_err("Adding key failed while trying to accept connection\n");
					continue;
				}

				task->bundle_ctx_pool.hash_entries[ret] = conn;

				bundle_init(conn, n, task->heap, PEER_SERVER, &task->seed);
				conn->tuple = pkt_tuple[j];

				if (conn->ctx.stream_cfg->proto == IPPROTO_TCP)
					task->l4_stats.tcp_created++;
				else
					task->l4_stats.udp_created++;
			}
		}

		/* bundle contains either an active connection or a
		   newly created connection. If it is NULL, then not
		   listening. */
		if (NULL != conn) {
			int ret = bundle_proc_data(conn, mbufs[j], &l4_meta[j], &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			out[j] = ret == 0? 0: NO_PORT_AVAIL;
		}
		else {
			plog_err("Packet received for service that does not exist\n");
			pkt_tuple_debug(&pkt_tuple[j]);
			plogd_dbg(mbufs[j], NULL);
			out[j] = NO_PORT_AVAIL;
		}
	}
	conn = NULL;

	task->base.tx_pkt(&task->base, mbufs, n_pkts, out);

	if (!(task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)))
		return ;

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			return ;
 		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) {
		uint16_t n_called_back = 0;
		while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}
}
Esempio n. 2
0
static int handle_gen_queued(struct task_gen_server *task)
{
	uint8_t out[MAX_PKT_BURST];
	struct bundle_ctx *conn;
	struct pkt_tuple pkt_tuple;
	struct l4_meta l4_meta;
	uint16_t j;
	uint16_t cancelled = 0;
	int ret;

	if (task->cur_mbufs_beg == task->cur_mbufs_end) {
		task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST);
		task->cur_mbufs_beg = 0;
	}
	uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg;
	struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg;

	j = task->cancelled;
	if (task->cancelled) {
		uint16_t pkt_len = mbuf_wire_size(mbufs[0]);

		if (token_time_take(&task->token_time, pkt_len) != 0)
			return -1;

		out[0] = task->out_saved;
		task->cancelled = 0;
	}

	/* Main proc loop */
	for (; j < n_pkts; ++j) {

		if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) {
			plogdx_err(mbufs[j], "Unknown packet, parsing failed\n");
			out[j] = OUT_DISCARD;
		}

		conn = NULL;
		ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);

		if (ret >= 0)
			conn = task->bundle_ctx_pool.hash_entries[ret];
		else {
			/* If not part of existing connection, try to create a connection */
			struct new_tuple nt;
			nt.dst_addr = pkt_tuple.dst_addr;
			nt.proto_id = pkt_tuple.proto_id;
			nt.dst_port = pkt_tuple.dst_port;
			rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types));
			const struct bundle_cfg *n;

			if (NULL != (n = server_accept(task, &nt))) {
				conn = bundle_ctx_pool_get(&task->bundle_ctx_pool);
				if (!conn) {
					out[j] = OUT_DISCARD;
					plogx_err("No more free bundles to accept new connection\n");
					continue;
				}
				ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple);
				if (ret < 0) {
					out[j] = OUT_DISCARD;
					bundle_ctx_pool_put(&task->bundle_ctx_pool, conn);
					plog_err("Adding key failed while trying to accept connection\n");
					continue;
				}

				task->bundle_ctx_pool.hash_entries[ret] = conn;

				bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed);
				conn->tuple = pkt_tuple;

				if (conn->ctx.stream_cfg->proto == IPPROTO_TCP)
					task->l4_stats.tcp_created++;
				else
					task->l4_stats.udp_created++;
			}
			else {
				plog_err("Packet received for service that does not exist :\n"
					 "source ip = %0x:%u\n"
					 "dst ip    = %0x:%u\n",
					 pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port),
					 pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port));
			}
		}

		/* bundle contains either an active connection or a
		   newly created connection. If it is NULL, then not
		   listening. */
		if (NULL != conn) {
			ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			out[j] = ret == 0? 0: OUT_HANDLED;

			if (ret == 0) {
				uint16_t pkt_len = mbuf_wire_size(mbufs[j]);

				if (token_time_take(&task->token_time, pkt_len) != 0) {
					task->out_saved = out[j];
					task->cancelled = 1;
					task->base.tx_pkt(&task->base, mbufs, j, out);
					task->cur_mbufs_beg += j;
					return -1;
				}
			}
		}
		else {
			pkt_tuple_debug(&pkt_tuple);
			plogd_dbg(mbufs[j], NULL);
			out[j] = OUT_DISCARD;
		}
	}

	task->base.tx_pkt(&task->base, mbufs, j, out);

	task->cur_mbufs_beg += j;
	return 0;
}
Esempio n. 3
0
static void handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_client *task = (struct task_gen_client *)tbase;
	uint8_t out[MAX_PKT_BURST] = {0};
	struct bundle_ctx *conn;
	int ret;

	if (n_pkts) {
		for (int i = 0; i < n_pkts; ++i) {
			struct pkt_tuple pt;
			struct l4_meta l4_meta;

			if (parse_pkt(mbufs[i], &pt, &l4_meta)) {
				plogdx_err(mbufs[i], "Parsing failed\n");
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt);

			if (ret < 0) {
				plogx_dbg("Client: packet RX that does not belong to connection:"
					  "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port));
				plogdx_dbg(mbufs[i], NULL);
				// if tcp, send RST
				/* pkt_tuple_debug2(&pt); */
				out[i] = NO_PORT_AVAIL;
				continue;
			}

			conn = task->bundle_ctx_pool.hash_entries[ret];
			ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
			out[i] = ret == 0? 0: NO_PORT_AVAIL;
		}
		task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
	}

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
 		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	/* If there is at least one callback to handle, handle at most MAX_PKT_BURST */
	if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) {
		uint16_t n_called_back = 0;
		while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) {
			conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap));

			/* handle packet TX (retransmit or delayed transmit) */
			ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);

			if (ret == 0) {
				out[n_called_back] = 0;
				n_called_back++;
			}
		}
		plogx_dbg("During callback, will send %d packets\n", n_called_back);

		task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out);
		task->n_new_mbufs -= n_called_back;
	}

	int n_new = task->bundle_ctx_pool.n_free_bundles;
	n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new;

	if (n_new == 0)
		return ;

	if (task->n_new_mbufs < MAX_PKT_BURST) {
		if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) {
			plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST);
			return ;
		}

		for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) {
			init_mbuf_seg(task->new_mbufs[i]);
		}

		task->n_new_mbufs = MAX_PKT_BURST;
	}

	for (int i = 0; i < n_new; ++i) {
		int32_t ret = cdf_sample(task->cdf, &task->seed);
		/* Select a new bundle_cfg according to imix */
		struct bundle_cfg *bundle_cfg = &task->bundle_cfgs[ret];
		struct bundle_ctx *bundle_ctx;

		bundle_ctx = bundle_ctx_pool_get(&task->bundle_ctx_pool);

		/* Should be an assert: */
		if (!bundle_ctx) {
			plogx_err("No more available bundles\n");
			exit(-1);
		}

		struct pkt_tuple *pt = &bundle_ctx->tuple;

		int n_retries = 0;
		do {
			/* Note that the actual packet sent will
			   contain swapped addresses and ports
			   (i.e. pkt.src <=> tuple.dst). The incoming
			   packet will match this struct. */
			bundle_init(bundle_ctx, bundle_cfg, task->heap, PEER_CLIENT, &task->seed);

			ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt);
			if (n_retries == 1000) {
				plogx_err("Already tried 1K times\n");
			}
			if (ret >= 0) {
				n_retries++;
			}
		} while (ret >= 0);

		ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt);

		if (ret < 0) {
			plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles);
			bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx);

			pkt_tuple_debug2(pt);
			out[i] = NO_PORT_AVAIL;
			continue;
		}

		task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx;

		if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP)
			task->l4_stats.tcp_created++;
		else
			task->l4_stats.udp_created++;

		ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats);
		out[i] = ret == 0? 0: NO_PORT_AVAIL;
	}

	task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out);
	task->n_new_mbufs -= n_new;
}