Ejemplo n.º 1
0
void
app_main_loop_rx(void) {
	uint32_t i;
	int ret;

	RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id());

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		uint16_t n_mbufs;

		n_mbufs = rte_eth_rx_burst(
			app.ports[i],
			0,
			app.mbuf_rx.array,
			app.burst_size_rx_read);

		if (n_mbufs == 0)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings_rx[i],
				(void **) app.mbuf_rx.array,
				n_mbufs);
		} while (ret < 0);
	}
}
Ejemplo n.º 2
0
void
app_main_loop_worker(void) {
	struct app_mbuf_array *worker_mbuf;
	uint32_t i;

	RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n",
		rte_lcore_id());

	worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
			RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (worker_mbuf == NULL)
		rte_panic("Worker thread: cannot allocate buffer space\n");

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		int ret;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings_rx[i],
			(void **) worker_mbuf->array,
			app.burst_size_worker_read);

		if (ret == -ENOENT)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings_tx[i ^ 1],
				(void **) worker_mbuf->array,
				app.burst_size_worker_write);
		} while (ret < 0);
	}
}
Ejemplo n.º 3
0
/* Sends 'num_pkts' 'packets' and 'request' data to datapath. */
int
dpdk_link_send_bulk(struct dpif_dpdk_message *request,
                    const struct ofpbuf *const *packets, size_t num_pkts)
{
    struct rte_mbuf *mbufs[PKT_BURST_SIZE] = {NULL};
    uint8_t *mbuf_data = NULL;
    int i = 0;
    int ret = 0;

    if (num_pkts > PKT_BURST_SIZE) {
        return EINVAL;
    }

    DPDK_DEBUG()

    for (i = 0; i < num_pkts; i++) {
        mbufs[i] = rte_pktmbuf_alloc(mp);

        if (!mbufs[i]) {
            return ENOBUFS;
        }

        mbuf_data = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
        rte_memcpy(mbuf_data, &request[i], sizeof(request[i]));

        if (request->type == DPIF_DPDK_PACKET_FAMILY) {
            mbuf_data = mbuf_data + sizeof(request[i]);
            if (likely(packets[i]->size <= (mbufs[i]->buf_len - sizeof(request[i])))) {
                rte_memcpy(mbuf_data, packets[i]->data, packets[i]->size);
                rte_pktmbuf_data_len(mbufs[i]) =
                    sizeof(request[i]) + packets[i]->size;
                rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
            } else {
                RTE_LOG(ERR, APP, "%s, %d: %s", __FUNCTION__, __LINE__,
                        "memcpy prevented: packet size exceeds available mbuf space");
                for (i = 0; i < num_pkts; i++) {
                    rte_pktmbuf_free(mbufs[i]);
                }
                return ENOMEM;
            }
        } else {
            rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]);
            rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
        }
    }

    ret = rte_ring_sp_enqueue_bulk(message_ring, (void * const *)mbufs, num_pkts);
    if (ret == -ENOBUFS) {
        for (i = 0; i < num_pkts; i++) {
            rte_pktmbuf_free(mbufs[i]);
        }
        ret = ENOBUFS;
    } else if (unlikely(ret == -EDQUOT)) {
        ret = EDQUOT;
    }

    return ret;
}
Ejemplo n.º 4
0
Archivo: runtime.c Proyecto: ATCP/mtcp
static inline void
app_lcore_io_rx_buffer_to_send (
	struct app_lcore_params_io *lp,
	uint32_t worker,
	struct rte_mbuf *mbuf,
	uint32_t bsz)
{
	uint32_t pos;
	int ret;

	pos = lp->rx.mbuf_out[worker].n_mbufs;
	lp->rx.mbuf_out[worker].array[pos ++] = mbuf;
	if (likely(pos < bsz)) {
		lp->rx.mbuf_out[worker].n_mbufs = pos;
		return;
	}

	ret = rte_ring_sp_enqueue_bulk(
		lp->rx.rings[worker],
		(void **) lp->rx.mbuf_out[worker].array,
		bsz);

	if (unlikely(ret == -ENOBUFS)) {
		uint32_t k;
		for (k = 0; k < bsz; k ++) {
			struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
			rte_pktmbuf_free(m);
		}
	}

	lp->rx.mbuf_out[worker].n_mbufs = 0;
	lp->rx.mbuf_out_flush[worker] = 0;

#if APP_STATS
	lp->rx.rings_iters[worker] ++;
	if (likely(ret == 0)) {
		lp->rx.rings_count[worker] ++;
	}
	if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) {
		unsigned lcore = rte_lcore_id();

		printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n",
			lcore,
			(unsigned)worker,
			((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker]));
		lp->rx.rings_iters[worker] = 0;
		lp->rx.rings_count[worker] = 0;
	}
#endif
}
Ejemplo n.º 5
0
void
app_main_loop_pipeline_passthrough(void) {
	struct rte_pipeline_params pipeline_params = {
		.name = "pipeline",
		.socket_id = rte_socket_id(),
	};

	struct rte_pipeline *p;
	uint32_t port_in_id[APP_MAX_PORTS];
	uint32_t port_out_id[APP_MAX_PORTS];
	uint32_t table_id[APP_MAX_PORTS];
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id);

	/* Pipeline configuration */
	p = rte_pipeline_create(&pipeline_params);
	if (p == NULL)
		rte_panic("%s: Unable to configure the pipeline\n", __func__);

	/* Input port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_reader_params port_ring_params = {
			.ring = app.rings[core_params->swq_in[i]],
		};

		struct rte_pipeline_port_in_params port_params = {
			.ops = &rte_port_ring_reader_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.arg_ah = NULL,
			.burst_size = app.bsz_swq_rd,
		};

		if (rte_pipeline_port_in_create(p, &port_params,
			&port_in_id[i])) {
			rte_panic("%s: Unable to configure input port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Output port configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_port_ring_writer_params port_ring_params = {
			.ring = app.rings[core_params->swq_out[i]],
			.tx_burst_sz = app.bsz_swq_wr,
		};

		struct rte_pipeline_port_out_params port_params = {
			.ops = &rte_port_ring_writer_ops,
			.arg_create = (void *) &port_ring_params,
			.f_action = NULL,
			.f_action_bulk = NULL,
			.arg_ah = NULL,
		};

		if (rte_pipeline_port_out_create(p, &port_params,
			&port_out_id[i])) {
			rte_panic("%s: Unable to configure output port for "
				"ring %d\n", __func__, i);
		}
	}

	/* Table configuration */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_params table_params = {
			.ops = &rte_table_stub_ops,
			.arg_create = NULL,
			.f_action_hit = NULL,
			.f_action_miss = NULL,
			.arg_ah = NULL,
			.action_data_size = 0,
		};

		if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
			rte_panic("%s: Unable to configure table %u\n",
				__func__, i);
	}

	/* Interconnecting ports and tables */
	for (i = 0; i < app.n_ports; i++) {
		if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
			table_id[i])) {
			rte_panic("%s: Unable to connect input port %u to "
				"table %u\n", __func__, port_in_id[i],
				table_id[i]);
		}
	}

	/* Add entries to tables */
	for (i = 0; i < app.n_ports; i++) {
		struct rte_pipeline_table_entry default_entry = {
			.action = RTE_PIPELINE_ACTION_PORT,
			{.port_id = port_out_id[i]},
		};

		struct rte_pipeline_table_entry *default_entry_ptr;

		if (rte_pipeline_table_default_entry_add(p, table_id[i],
			&default_entry, &default_entry_ptr))
			rte_panic("%s: Unable to add default entry to "
				"table %u\n", __func__, table_id[i]);
	}

	/* Enable input ports */
	for (i = 0; i < app.n_ports; i++)
		if (rte_pipeline_port_in_enable(p, port_in_id[i]))
			rte_panic("Unable to enable input port %u\n",
				port_in_id[i]);

	/* Check pipeline consistency */
	if (rte_pipeline_check(p) < 0)
		rte_panic("%s: Pipeline consistency check failed\n", __func__);

	/* Run-time */
	for (i = 0; ; i++) {
		rte_pipeline_run(p);

		if ((i & APP_FLUSH) == 0)
			rte_pipeline_flush(p);
	}
}

void
app_main_loop_passthrough(void) {
	struct app_mbuf_array *m;
	uint32_t i;

	uint32_t core_id = rte_lcore_id();
	struct app_core_params *core_params = app_get_core_params(core_id);

	if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
		rte_panic("Core %u misconfiguration\n", core_id);

	RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n",
		core_id);

	m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
		RTE_CACHE_LINE_SIZE, rte_socket_id());
	if (m == NULL)
		rte_panic("%s: cannot allocate buffer space\n", __func__);

	for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
		int ret;

		ret = rte_ring_sc_dequeue_bulk(
			app.rings[core_params->swq_in[i]],
			(void **) m->array,
			app.bsz_swq_rd);

		if (ret == -ENOENT)
			continue;

		do {
			ret = rte_ring_sp_enqueue_bulk(
				app.rings[core_params->swq_out[i]],
				(void **) m->array,
				app.bsz_swq_wr);
		} while (ret < 0);
	}
}