Ejemplo n.º 1
0
/*
 * Function receives messages from the daemon.
 */
static void
receive_request_from_vswitchd(void)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct client *vswd = NULL;
	struct statistics *vswd_stat = NULL;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};

	vswd = &clients[VSWITCHD];
	vswd_stat = &vport_stats[VSWITCHD];

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
			unlikely(rte_ring_sc_dequeue_bulk(
					vswd->tx_q, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
				rte_ring_count(vswd->tx_q), PKT_BURST_SIZE);

	/* Update number of packets transmitted by daemon */
	vswd_stat->rx += dq_pkt;

	for (j = 0; j < dq_pkt; j++) {
		handle_vswitchd_cmd(buf[j]);
	}
}
Ejemplo n.º 2
0
/*
 * Receive burst of packets from client
 */
static void
receive_from_client(uint16_t client)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};
	struct client *cl = NULL;
	struct statistics *s = NULL;

	cl = &clients[client];
	s = &vport_stats[client];

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
			unlikely(rte_ring_sc_dequeue_bulk(
					cl->tx_q, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
				rte_ring_count(cl->tx_q), PKT_BURST_SIZE);

	/* Update number of packets transmitted by client */
	s->tx += dq_pkt;

	for (j = 0; j < dq_pkt; j++) {
		switch_packet(buf[j], client);
	}
}
Ejemplo n.º 3
0
void
app_main_loop_worker(void) {
	struct app_mbuf_array *worker_mbuf;
	uint32_t i;

	RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
		rte_lcore_id());

	worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
			RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (worker_mbuf == NULL)
		rte_panic("Worker thread: cannot allocate buffer space\n");

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		int ret;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings_rx[i],
			(void **) worker_mbuf->array,
			app.burst_size_worker_read);

		if (ret == -ENOENT)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings_tx[i ^ 1],
				(void **) worker_mbuf->array,
				app.burst_size_worker_write);
		} while (ret < 0);
	}
}
Ejemplo n.º 4
0
uint16_t rx_pkt_sw(struct rte_mbuf **rx_mbuf, struct task_base *ptask)
{
	START_EMPTY_MEASSURE();
#ifdef BRAS_RX_BULK
	if (unlikely (rte_ring_sc_dequeue_bulk(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST)) < 0) {
		++ptask->rx_params_sw.last_read_ring;
		if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
			ptask->rx_params_sw.last_read_ring = 0;
		}
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
	else {

		return MAX_RING_BURST;
	}
#else
	uint16_t nb_rx = rte_ring_sc_dequeue_burst(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST);
	++ptask->rx_params_sw.last_read_ring;
	if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
		ptask->rx_params_sw.last_read_ring = 0;
	}

	if (nb_rx != 0) {
		return nb_rx;
	}
	else {
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
#endif
}
Ejemplo n.º 5
0
static uint16_t ring_deq(struct rte_ring* r, struct rte_mbuf **mbufs)
{
	void** v_mbufs = (void **)mbufs;
#ifdef BRAS_RX_BULK
	return rte_ring_sc_dequeue_bulk(r, v_mbufs, MAX_RING_BURST) < 0? 0 : MAX_RING_BURST;
#else
	return rte_ring_sc_dequeue_burst(r, v_mbufs, MAX_RING_BURST);
#endif
}
Ejemplo n.º 6
0
void
app_main_loop_tx(void) {
	uint32_t i;

	RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id());

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		uint16_t n_mbufs, n_pkts;
		int ret;

		n_mbufs = app.mbuf_tx[i].n_mbufs;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings_tx[i],
			(void **) &app.mbuf_tx[i].array[n_mbufs],
			app.burst_size_tx_read);

		if (ret == -ENOENT)
			continue;

		n_mbufs += app.burst_size_tx_read;

		if (n_mbufs < app.burst_size_tx_write) {
			app.mbuf_tx[i].n_mbufs = n_mbufs;
			continue;
		}

		n_pkts = rte_eth_tx_burst(
			app.ports[i],
			0,
			app.mbuf_tx[i].array,
			n_mbufs);

		if (n_pkts < n_mbufs) {
			uint16_t k;

			for (k = n_pkts; k < n_mbufs; k++) {
				struct rte_mbuf *pkt_to_free;

				pkt_to_free = app.mbuf_tx[i].array[k];
				rte_pktmbuf_free(pkt_to_free);
			}
		}

		app.mbuf_tx[i].n_mbufs = 0;
	}
}
Ejemplo n.º 7
0
/*
 * Function handles messages from the daemon.
 */
void
handle_request_from_vswitchd(void)
{
	int j = 0;
	uint16_t dq_pkt = PKT_BURST_SIZE;
	struct rte_mbuf *buf[PKT_BURST_SIZE] = {0};

	/* Attempt to dequeue maximum available number of mbufs from ring */
	while (dq_pkt > 0 &&
	       unlikely(rte_ring_sc_dequeue_bulk(
	       vswitchd_message_ring, (void **)buf, dq_pkt) != 0))
		dq_pkt = (uint16_t)RTE_MIN(
		   rte_ring_count(vswitchd_message_ring), PKT_BURST_SIZE);

	/* Update number of packets transmitted by daemon */
	stats_vport_rx_increment(VSWITCHD, dq_pkt);

	for (j = 0; j < dq_pkt; j++) {
		handle_vswitchd_cmd(buf[j]);
	}
}
Ejemplo n.º 8
0
void
app_main_loop_pipeline_tx(void) {
	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing TX\n", core_id);

	/* Pipeline configuration */
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("%s: Unable to configure the pipeline\n", __func__);

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings[core_params->swq_in[i]],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = (app.ether_hdr_pop_push) ?
				app_pipeline_tx_port_in_action_handler : NULL,
			.arg_ah = NULL,
			.burst_size = app.bsz_swq_rd,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i])) {
			rte_panic("%s: Unable to configure input port for "
				"ring TX %i\n", __func__, i);
		}
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ethdev_writer_params port_ethdev_params = {
			.port_id = app.ports[i],
			.queue_id = 0,
			.tx_burst_sz = app.bsz_hwq_wr,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ethdev_writer_ops,
			.arg_create = (void *) &port_ethdev_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i])) {
			rte_panic("%s: Unable to configure output port for "
				"port %d\n", __func__, app.ports[i]);
		}
	}

	/* Table configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_stub_ops,
			.arg_create = NULL,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id[i])) {
			rte_panic("%s: Unable to configure table %u\n",
				__func__, table_id[i]);
		}
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id[i]))
			rte_panic("%s: Unable to connect input port %u to "
				"table %u\n", __func__, port_in_id[i],
				table_id[i]);

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry default_entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i]},
		};

		struct rte_pipeline_table_entry *default_entry_ptr;

		if (rte_pipeline_table_default_entry_add(p, table_id[i],
			&default_entry, &default_entry_ptr))
			rte_panic("%s: Unable to add default entry to "
				"table %u\n", __func__, table_id[i]);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("%s: Pipeline consistency check failed\n", __func__);

	/* Run-time */
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
}

void
app_main_loop_tx(void) {
	struct app_mbuf_array *m[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing TX (no pipeline)\n", core_id);

	for (i = 0; i < APP_MAX_PORTS; i++) {
		m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
			CACHE_LINE_SIZE, rte_socket_id());
		if (m[i] == NULL)
			rte_panic("%s: Cannot allocate buffer space\n",
				__func__);
	}

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		uint32_t n_mbufs, n_pkts;
		int ret;

		n_mbufs = m[i]->n_mbufs;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings[core_params->swq_in[i]],
			(void **) &m[i]->array[n_mbufs],
			app.bsz_swq_rd);

		if (ret == -ENOENT)
			continue;

		n_mbufs += app.bsz_swq_rd;

		if (n_mbufs < app.bsz_hwq_wr) {
			m[i]->n_mbufs = n_mbufs;
			continue;
		}

		n_pkts = rte_eth_tx_burst(
			app.ports[i],
			0,
			m[i]->array,
			n_mbufs);

		if (n_pkts < n_mbufs) {
			uint32_t k;

			for (k = n_pkts; k < n_mbufs; k++) {
				struct rte_mbuf *pkt_to_free;

				pkt_to_free = m[i]->array[k];
				rte_pktmbuf_free(pkt_to_free);
			}
		}

		m[i]->n_mbufs = 0;
	}
}
Ejemplo n.º 9
0
void
app_main_loop_pipeline_passthrough(void) {
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id);

	/* Pipeline configuration */
	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("%s: Unable to configure the pipeline\n", __func__);

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings[core_params->swq_in[i]],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.arg_ah = NULL,
			.burst_size = app.bsz_swq_rd,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i])) {
			rte_panic("%s: Unable to configure input port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_writer_params port_ring_params = {
			.ring = app.rings[core_params->swq_out[i]],
			.tx_burst_sz = app.bsz_swq_wr,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ring_writer_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i])) {
			rte_panic("%s: Unable to configure output port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Table configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_stub_ops,
			.arg_create = NULL,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
			rte_panic("%s: Unable to configure table %u\n",
				__func__, i);
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++) {
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id[i])) {
			rte_panic("%s: Unable to connect input port %u to "
				"table %u\n", __func__, port_in_id[i],
				table_id[i]);
		}
	}

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry default_entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i]},
		};

		struct rte_pipeline_table_entry *default_entry_ptr;

		if (rte_pipeline_table_default_entry_add(p, table_id[i],
			&default_entry, &default_entry_ptr))
			rte_panic("%s: Unable to add default entry to "
				"table %u\n", __func__, table_id[i]);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("%s: Pipeline consistency check failed\n", __func__);

	/* Run-time */
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
}

void
app_main_loop_passthrough(void) {
	struct app_mbuf_array *m;
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n",
		core_id);

	m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
		RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (m == NULL)
		rte_panic("%s: cannot allocate buffer space\n", __func__);

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		int ret;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings[core_params->swq_in[i]],
			(void **) m->array,
			app.bsz_swq_rd);

		if (ret == -ENOENT)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings[core_params->swq_out[i]],
				(void **) m->array,
				app.bsz_swq_wr);
		} while (ret < 0);
	}
}