Esempio n. 1
0
static void
slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
{
	int retval;
	uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
	uint16_t nb_txd = RTE_TX_DESC_DEFAULT;

	if (portid >= rte_eth_dev_count())
		rte_exit(EXIT_FAILURE, "Invalid port\n");

	retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
				portid, retval);

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
				"failed (res=%d)\n", portid, retval);

	/* RX setup */
	retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
					rte_eth_dev_socket_id(portid), NULL,
					mbuf_pool);
	if (retval < 0)
		rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
				portid, retval);

	/* TX setup */
	retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
				rte_eth_dev_socket_id(portid), NULL);

	if (retval < 0)
		rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
				portid, retval);

	retval  = rte_eth_dev_start(portid);
	if (retval < 0)
		rte_exit(retval,
				"Start port %d failed (res=%d)",
				portid, retval);

	struct ether_addr addr;

	rte_eth_macaddr_get(portid, &addr);
	printf("Port %u MAC: ", portid);
	PRINT_MAC(addr);
	printf("\n");
}
Esempio n. 2
0
static void
app_init_nics(void)
{
	unsigned socket;
	uint32_t lcore;
	uint16_t port;
	uint8_t queue;
	int ret;
	uint32_t n_rx_queues, n_tx_queues;

	/* Init NIC ports and queues, then start the ports */
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		struct rte_mempool *pool;
		uint16_t nic_rx_ring_size;
		uint16_t nic_tx_ring_size;
		struct rte_eth_rxconf rxq_conf;
		struct rte_eth_txconf txq_conf;
		struct rte_eth_dev_info dev_info;
		struct rte_eth_conf local_port_conf = port_conf;

		n_rx_queues = app_get_nic_rx_queues_per_port(port);
		n_tx_queues = app.nic_tx_port_mask[port];

		if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
			continue;
		}

		/* Init port */
		printf("Initializing NIC port %u ...\n", port);
		rte_eth_dev_info_get(port, &dev_info);
		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
			local_port_conf.txmode.offloads |=
				DEV_TX_OFFLOAD_MBUF_FAST_FREE;

		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
			dev_info.flow_type_rss_offloads;
		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
				port_conf.rx_adv_conf.rss_conf.rss_hf) {
			printf("Port %u modified RSS hash function based on hardware support,"
				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
				port,
				port_conf.rx_adv_conf.rss_conf.rss_hf,
				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
		}

		ret = rte_eth_dev_configure(
			port,
			(uint8_t) n_rx_queues,
			(uint8_t) n_tx_queues,
			&local_port_conf);
		if (ret < 0) {
			rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
		}
		rte_eth_promiscuous_enable(port);

		nic_rx_ring_size = app.nic_rx_ring_size;
		nic_tx_ring_size = app.nic_tx_ring_size;
		ret = rte_eth_dev_adjust_nb_rx_tx_desc(
			port, &nic_rx_ring_size, &nic_tx_ring_size);
		if (ret < 0) {
			rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
				  port, ret);
		}
		app.nic_rx_ring_size = nic_rx_ring_size;
		app.nic_tx_ring_size = nic_tx_ring_size;

		rxq_conf = dev_info.default_rxconf;
		rxq_conf.offloads = local_port_conf.rxmode.offloads;
		/* Init RX queues */
		for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
			if (app.nic_rx_queue_mask[port][queue] == 0) {
				continue;
			}

			app_get_lcore_for_nic_rx(port, queue, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			pool = app.lcore_params[lcore].pool;

			printf("Initializing NIC port %u RX queue %u ...\n",
				port, queue);
			ret = rte_eth_rx_queue_setup(
				port,
				queue,
				(uint16_t) app.nic_rx_ring_size,
				socket,
				&rxq_conf,
				pool);
			if (ret < 0) {
				rte_panic("Cannot init RX queue %u for port %u (%d)\n",
					  queue, port, ret);
			}
		}

		txq_conf = dev_info.default_txconf;
		txq_conf.offloads = local_port_conf.txmode.offloads;
		/* Init TX queues */
		if (app.nic_tx_port_mask[port] == 1) {
			app_get_lcore_for_nic_tx(port, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			printf("Initializing NIC port %u TX queue 0 ...\n",
				port);
			ret = rte_eth_tx_queue_setup(
				port,
				0,
				(uint16_t) app.nic_tx_ring_size,
				socket,
				&txq_conf);
			if (ret < 0) {
				rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
					port,
					ret);
			}
		}

		/* Start port */
		ret = rte_eth_dev_start(port);
		if (ret < 0) {
			rte_panic("Cannot start port %d (%d)\n", port, ret);
		}
	}

	check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0));
}
Esempio n. 3
0
static void
bond_port_init(struct rte_mempool *mbuf_pool)
{
	int retval;
	uint8_t i;
	uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
	uint16_t nb_txd = RTE_TX_DESC_DEFAULT;

	retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
			0 /*SOCKET_ID_ANY*/);
	if (retval < 0)
		rte_exit(EXIT_FAILURE,
				"Faled to create bond port\n");

	BOND_PORT = retval;

	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
				BOND_PORT, retval);

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
				"failed (res=%d)\n", BOND_PORT, retval);

	/* RX setup */
	retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
					rte_eth_dev_socket_id(BOND_PORT), NULL,
					mbuf_pool);
	if (retval < 0)
		rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
				BOND_PORT, retval);

	/* TX setup */
	retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
				rte_eth_dev_socket_id(BOND_PORT), NULL);

	if (retval < 0)
		rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
				BOND_PORT, retval);

	for (i = 0; i < slaves_count; i++) {
		if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
			rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
					slaves[i], BOND_PORT);

	}

	retval  = rte_eth_dev_start(BOND_PORT);
	if (retval < 0)
		rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);

	rte_eth_promiscuous_enable(BOND_PORT);

	struct ether_addr addr;

	rte_eth_macaddr_get(BOND_PORT, &addr);
	printf("Port %u MAC: ", (unsigned)BOND_PORT);
		PRINT_MAC(addr);
		printf("\n");
}
Esempio n. 4
0
File: init.c Progetto: emmericp/dpdk
static inline int
app_link_filter_arp_add(struct app_link_params *link)
{
	struct rte_eth_ethertype_filter filter = {
		.ether_type = ETHER_TYPE_ARP,
		.flags = 0,
		.queue = link->arp_q,
	};

	return rte_eth_dev_filter_ctrl(link->pmd_id,
		RTE_ETH_FILTER_ETHERTYPE,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_tcp_syn_add(struct app_link_params *link)
{
	struct rte_eth_syn_filter filter = {
		.hig_pri = 1,
		.queue = link->tcp_syn_q,
	};

	return rte_eth_dev_filter_ctrl(link->pmd_id,
		RTE_ETH_FILTER_SYN,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = 0,
		.proto_mask = 0, /* Disable */
		.tcp_flags = 0,
		.priority = 1, /* Lowest */
		.queue = l1->ip_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = 0,
		.proto_mask = 0, /* Disable */
		.tcp_flags = 0,
		.priority = 1, /* Lowest */
		.queue = l1->ip_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_DELETE,
		&filter);
}

static inline int
app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_TCP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->tcp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_TCP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->tcp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_DELETE,
		&filter);
}

static inline int
app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_UDP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->udp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_UDP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->udp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_DELETE,
		&filter);
}

static inline int
app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_SCTP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->sctp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_ADD,
		&filter);
}

static inline int
app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
{
	struct rte_eth_ntuple_filter filter = {
		.flags = RTE_5TUPLE_FLAGS,
		.dst_ip = rte_bswap32(l2->ip),
		.dst_ip_mask = UINT32_MAX, /* Enable */
		.src_ip = 0,
		.src_ip_mask = 0, /* Disable */
		.dst_port = 0,
		.dst_port_mask = 0, /* Disable */
		.src_port = 0,
		.src_port_mask = 0, /* Disable */
		.proto = IPPROTO_SCTP,
		.proto_mask = UINT8_MAX, /* Enable */
		.tcp_flags = 0,
		.priority = 2, /* Higher priority than IP */
		.queue = l1->sctp_local_q,
	};

	return rte_eth_dev_filter_ctrl(l1->pmd_id,
		RTE_ETH_FILTER_NTUPLE,
		RTE_ETH_FILTER_DELETE,
		&filter);
}

static void
app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
{
	if (cp->arp_q != 0) {
		int status = app_link_filter_arp_add(cp);

		APP_LOG(app, LOW, "%s (%" PRIu32 "): "
			"Adding ARP filter (queue = %" PRIu32 ")",
			cp->name, cp->pmd_id, cp->arp_q);

		if (status)
			rte_panic("%s (%" PRIu32 "): "
				"Error adding ARP filter "
				"(queue = %" PRIu32 ") (%" PRId32 ")\n",
				cp->name, cp->pmd_id, cp->arp_q, status);
	}
}

static void
app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
{
	if (cp->tcp_syn_q != 0) {
		int status = app_link_filter_tcp_syn_add(cp);

		APP_LOG(app, LOW, "%s (%" PRIu32 "): "
			"Adding TCP SYN filter (queue = %" PRIu32 ")",
			cp->name, cp->pmd_id, cp->tcp_syn_q);

		if (status)
			rte_panic("%s (%" PRIu32 "): "
				"Error adding TCP SYN filter "
				"(queue = %" PRIu32 ") (%" PRId32 ")\n",
				cp->name, cp->pmd_id, cp->tcp_syn_q,
				status);
	}
}

void
app_link_up_internal(struct app_params *app, struct app_link_params *cp)
{
	uint32_t i;
	int status;

	/* For each link, add filters for IP of current link */
	if (cp->ip != 0) {
		for (i = 0; i < app->n_links; i++) {
			struct app_link_params *p = &app->link_params[i];

			/* IP */
			if (p->ip_local_q != 0) {
				int status = app_link_filter_ip_add(p, cp);

				APP_LOG(app, LOW, "%s (%" PRIu32 "): "
					"Adding IP filter (queue= %" PRIu32
					", IP = 0x%08" PRIx32 ")",
					p->name, p->pmd_id, p->ip_local_q,
					cp->ip);

				if (status)
					rte_panic("%s (%" PRIu32 "): "
						"Error adding IP "
						"filter (queue= %" PRIu32 ", "
						"IP = 0x%08" PRIx32
						") (%" PRId32 ")\n",
						p->name, p->pmd_id,
						p->ip_local_q, cp->ip, status);
			}

			/* TCP */
			if (p->tcp_local_q != 0) {
				int status = app_link_filter_tcp_add(p, cp);

				APP_LOG(app, LOW, "%s (%" PRIu32 "): "
					"Adding TCP filter "
					"(queue = %" PRIu32
					", IP = 0x%08" PRIx32 ")",
					p->name, p->pmd_id, p->tcp_local_q,
					cp->ip);

				if (status)
					rte_panic("%s (%" PRIu32 "): "
						"Error adding TCP "
						"filter (queue = %" PRIu32 ", "
						"IP = 0x%08" PRIx32
						") (%" PRId32 ")\n",
						p->name, p->pmd_id,
						p->tcp_local_q, cp->ip, status);
			}

			/* UDP */
			if (p->udp_local_q != 0) {
				int status = app_link_filter_udp_add(p, cp);

				APP_LOG(app, LOW, "%s (%" PRIu32 "): "
					"Adding UDP filter "
					"(queue = %" PRIu32
					", IP = 0x%08" PRIx32 ")",
					p->name, p->pmd_id, p->udp_local_q,
					cp->ip);

				if (status)
					rte_panic("%s (%" PRIu32 "): "
						"Error adding UDP "
						"filter (queue = %" PRIu32 ", "
						"IP = 0x%08" PRIx32
						") (%" PRId32 ")\n",
						p->name, p->pmd_id,
						p->udp_local_q, cp->ip, status);
			}

			/* SCTP */
			if (p->sctp_local_q != 0) {
				int status = app_link_filter_sctp_add(p, cp);

				APP_LOG(app, LOW, "%s (%" PRIu32
					"): Adding SCTP filter "
					"(queue = %" PRIu32
					", IP = 0x%08" PRIx32 ")",
					p->name, p->pmd_id, p->sctp_local_q,
					cp->ip);

				if (status)
					rte_panic("%s (%" PRIu32 "): "
						"Error adding SCTP "
						"filter (queue = %" PRIu32 ", "
						"IP = 0x%08" PRIx32
						") (%" PRId32 ")\n",
						p->name, p->pmd_id,
						p->sctp_local_q, cp->ip,
						status);
			}
		}
	}

	/* PMD link up */
	status = rte_eth_dev_set_link_up(cp->pmd_id);
	/* Do not panic if PMD does not provide link up functionality */
	if (status < 0 && status != -ENOTSUP)
		rte_panic("%s (%" PRIu32 "): PMD set link up error %"
			PRId32 "\n", cp->name, cp->pmd_id, status);

	/* Mark link as UP */
	cp->state = 1;
}

void
app_link_down_internal(struct app_params *app, struct app_link_params *cp)
{
	uint32_t i;
	int status;

	/* PMD link down */
	status = rte_eth_dev_set_link_down(cp->pmd_id);
	/* Do not panic if PMD does not provide link down functionality */
	if (status < 0 && status != -ENOTSUP)
		rte_panic("%s (%" PRIu32 "): PMD set link down error %"
			PRId32 "\n", cp->name, cp->pmd_id, status);

	/* Mark link as DOWN */
	cp->state = 0;

	/* Return if current link IP is not valid */
	if (cp->ip == 0)
		return;

	/* For each link, remove filters for IP of current link */
	for (i = 0; i < app->n_links; i++) {
		struct app_link_params *p = &app->link_params[i];

		/* IP */
		if (p->ip_local_q != 0) {
			int status = app_link_filter_ip_del(p, cp);

			APP_LOG(app, LOW, "%s (%" PRIu32
				"): Deleting IP filter "
				"(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
				p->name, p->pmd_id, p->ip_local_q, cp->ip);

			if (status)
				rte_panic("%s (%" PRIu32
					"): Error deleting IP filter "
					"(queue = %" PRIu32
					", IP = 0x%" PRIx32
					") (%" PRId32 ")\n",
					p->name, p->pmd_id, p->ip_local_q,
					cp->ip, status);
		}

		/* TCP */
		if (p->tcp_local_q != 0) {
			int status = app_link_filter_tcp_del(p, cp);

			APP_LOG(app, LOW, "%s (%" PRIu32
				"): Deleting TCP filter "
				"(queue = %" PRIu32
				", IP = 0x%" PRIx32 ")",
				p->name, p->pmd_id, p->tcp_local_q, cp->ip);

			if (status)
				rte_panic("%s (%" PRIu32
					"): Error deleting TCP filter "
					"(queue = %" PRIu32
					", IP = 0x%" PRIx32
					") (%" PRId32 ")\n",
					p->name, p->pmd_id, p->tcp_local_q,
					cp->ip, status);
		}

		/* UDP */
		if (p->udp_local_q != 0) {
			int status = app_link_filter_udp_del(p, cp);

			APP_LOG(app, LOW, "%s (%" PRIu32
				"): Deleting UDP filter "
				"(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
				p->name, p->pmd_id, p->udp_local_q, cp->ip);

			if (status)
				rte_panic("%s (%" PRIu32
					"): Error deleting UDP filter "
					"(queue = %" PRIu32
					", IP = 0x%" PRIx32
					") (%" PRId32 ")\n",
					p->name, p->pmd_id, p->udp_local_q,
					cp->ip, status);
		}

		/* SCTP */
		if (p->sctp_local_q != 0) {
			int status = app_link_filter_sctp_del(p, cp);

			APP_LOG(app, LOW, "%s (%" PRIu32
				"): Deleting SCTP filter "
				"(queue = %" PRIu32
				", IP = 0x%" PRIx32 ")",
				p->name, p->pmd_id, p->sctp_local_q, cp->ip);

			if (status)
				rte_panic("%s (%" PRIu32
					"): Error deleting SCTP filter "
					"(queue = %" PRIu32
					", IP = 0x%" PRIx32
					") (%" PRId32 ")\n",
					p->name, p->pmd_id, p->sctp_local_q,
					cp->ip, status);
		}
	}
}

static void
app_check_link(struct app_params *app)
{
	uint32_t all_links_up, i;

	all_links_up = 1;

	for (i = 0; i < app->n_links; i++) {
		struct app_link_params *p = &app->link_params[i];
		struct rte_eth_link link_params;

		memset(&link_params, 0, sizeof(link_params));
		rte_eth_link_get(p->pmd_id, &link_params);

		APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
			p->name,
			p->pmd_id,
			link_params.link_speed / 1000,
			link_params.link_status ? "UP" : "DOWN");

		if (link_params.link_status == ETH_LINK_DOWN)
			all_links_up = 0;
	}

	if (all_links_up == 0)
		rte_panic("Some links are DOWN\n");
}

static uint32_t
is_any_swq_frag_or_ras(struct app_params *app)
{
	uint32_t i;

	for (i = 0; i < app->n_pktq_swq; i++) {
		struct app_pktq_swq_params *p = &app->swq_params[i];

		if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
			(p->ipv4_ras == 1) || (p->ipv6_ras == 1))
			return 1;
	}

	return 0;
}

static void
app_init_link_frag_ras(struct app_params *app)
{
	uint32_t i;

	if (is_any_swq_frag_or_ras(app)) {
		for (i = 0; i < app->n_pktq_hwq_out; i++) {
			struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];

			p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
		}
	}
}

static inline int
app_get_cpu_socket_id(uint32_t pmd_id)
{
	int status = rte_eth_dev_socket_id(pmd_id);

	return (status != SOCKET_ID_ANY) ? status : 0;
}

static inline int
app_link_rss_enabled(struct app_link_params *cp)
{
	return (cp->n_rss_qs) ? 1 : 0;
}

static void
app_link_rss_setup(struct app_link_params *cp)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
	uint32_t i;
	int status;

    /* Get RETA size */
	memset(&dev_info, 0, sizeof(dev_info));
	rte_eth_dev_info_get(cp->pmd_id, &dev_info);

	if (dev_info.reta_size == 0)
		rte_panic("%s (%u): RSS setup error (null RETA size)\n",
			cp->name, cp->pmd_id);

	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
		rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
			cp->name, cp->pmd_id);

	/* Setup RETA contents */
	memset(reta_conf, 0, sizeof(reta_conf));

	for (i = 0; i < dev_info.reta_size; i++)
		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;

	for (i = 0; i < dev_info.reta_size; i++) {
		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
		uint32_t rss_qs_pos = i % cp->n_rss_qs;

		reta_conf[reta_id].reta[reta_pos] =
			(uint16_t) cp->rss_qs[rss_qs_pos];
	}

	/* RETA update */
	status = rte_eth_dev_rss_reta_update(cp->pmd_id,
		reta_conf,
		dev_info.reta_size);
	if (status != 0)
		rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
			cp->name, cp->pmd_id);
}

static void
app_init_link_set_config(struct app_link_params *p)
{
	if (p->n_rss_qs) {
		p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
		p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
			p->rss_proto_ipv6 |
			p->rss_proto_l2;
	}
}

static void
app_init_link(struct app_params *app)
{
	uint32_t i;

	app_init_link_frag_ras(app);

	for (i = 0; i < app->n_links; i++) {
		struct app_link_params *p_link = &app->link_params[i];
		uint32_t link_id, n_hwq_in, n_hwq_out, j;
		int status;

		sscanf(p_link->name, "LINK%" PRIu32, &link_id);
		n_hwq_in = app_link_get_n_rxq(app, p_link);
		n_hwq_out = app_link_get_n_txq(app, p_link);
		app_init_link_set_config(p_link);

		APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
			"(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
			p_link->name,
			p_link->pmd_id,
			n_hwq_in,
			n_hwq_out);

		/* LINK */
		status = rte_eth_dev_configure(
			p_link->pmd_id,
			n_hwq_in,
			n_hwq_out,
			&p_link->conf);
		if (status < 0)
			rte_panic("%s (%" PRId32 "): "
				"init error (%" PRId32 ")\n",
				p_link->name, p_link->pmd_id, status);

		rte_eth_macaddr_get(p_link->pmd_id,
			(struct ether_addr *) &p_link->mac_addr);

		if (p_link->promisc)
			rte_eth_promiscuous_enable(p_link->pmd_id);

		/* RXQ */
		for (j = 0; j < app->n_pktq_hwq_in; j++) {
			struct app_pktq_hwq_in_params *p_rxq =
				&app->hwq_in_params[j];
			uint32_t rxq_link_id, rxq_queue_id;
			uint16_t nb_rxd = p_rxq->size;

			sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
				&rxq_link_id, &rxq_queue_id);
			if (rxq_link_id != link_id)
				continue;

			status = rte_eth_dev_adjust_nb_rx_tx_desc(
				p_link->pmd_id,
				&nb_rxd,
				NULL);
			if (status < 0)
				rte_panic("%s (%" PRIu32 "): "
					"%s adjust number of Rx descriptors "
					"error (%" PRId32 ")\n",
					p_link->name,
					p_link->pmd_id,
					p_rxq->name,
					status);

			status = rte_eth_rx_queue_setup(
				p_link->pmd_id,
				rxq_queue_id,
				nb_rxd,
				app_get_cpu_socket_id(p_link->pmd_id),
				&p_rxq->conf,
				app->mempool[p_rxq->mempool_id]);
			if (status < 0)
				rte_panic("%s (%" PRIu32 "): "
					"%s init error (%" PRId32 ")\n",
					p_link->name,
					p_link->pmd_id,
					p_rxq->name,
					status);
		}

		/* TXQ */
		for (j = 0; j < app->n_pktq_hwq_out; j++) {
			struct app_pktq_hwq_out_params *p_txq =
				&app->hwq_out_params[j];
			uint32_t txq_link_id, txq_queue_id;
			uint16_t nb_txd = p_txq->size;

			sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
				&txq_link_id, &txq_queue_id);
			if (txq_link_id != link_id)
				continue;

			status = rte_eth_dev_adjust_nb_rx_tx_desc(
				p_link->pmd_id,
				NULL,
				&nb_txd);
			if (status < 0)
				rte_panic("%s (%" PRIu32 "): "
					"%s adjust number of Tx descriptors "
					"error (%" PRId32 ")\n",
					p_link->name,
					p_link->pmd_id,
					p_txq->name,
					status);

			status = rte_eth_tx_queue_setup(
				p_link->pmd_id,
				txq_queue_id,
				nb_txd,
				app_get_cpu_socket_id(p_link->pmd_id),
				&p_txq->conf);
			if (status < 0)
				rte_panic("%s (%" PRIu32 "): "
					"%s init error (%" PRId32 ")\n",
					p_link->name,
					p_link->pmd_id,
					p_txq->name,
					status);
		}

		/* LINK START */
		status = rte_eth_dev_start(p_link->pmd_id);
		if (status < 0)
			rte_panic("Cannot start %s (error %" PRId32 ")\n",
				p_link->name, status);

		/* LINK FILTERS */
		app_link_set_arp_filter(app, p_link);
		app_link_set_tcp_syn_filter(app, p_link);
		if (app_link_rss_enabled(p_link))
			app_link_rss_setup(p_link);

		/* LINK UP */
		app_link_up_internal(app, p_link);
	}

	app_check_link(app);
}

static void
app_init_swq(struct app_params *app)
{
	uint32_t i;

	for (i = 0; i < app->n_pktq_swq; i++) {
		struct app_pktq_swq_params *p = &app->swq_params[i];
		unsigned flags = 0;

		if (app_swq_get_readers(app, p) == 1)
			flags |= RING_F_SC_DEQ;
		if (app_swq_get_writers(app, p) == 1)
			flags |= RING_F_SP_ENQ;

		APP_LOG(app, HIGH, "Initializing %s...", p->name);
		app->swq[i] = rte_ring_create(
				p->name,
				p->size,
				p->cpu_socket_id,
				flags);

		if (app->swq[i] == NULL)
			rte_panic("%s init error\n", p->name);
	}
}

static void
app_init_tm(struct app_params *app)
{
	uint32_t i;

	for (i = 0; i < app->n_pktq_tm; i++) {
		struct app_pktq_tm_params *p_tm = &app->tm_params[i];
		struct app_link_params *p_link;
		struct rte_eth_link link_eth_params;
		struct rte_sched_port *sched;
		uint32_t n_subports, subport_id;
		int status;

		p_link = app_get_link_for_tm(app, p_tm);
		/* LINK */
		rte_eth_link_get(p_link->pmd_id, &link_eth_params);

		/* TM */
		p_tm->sched_port_params.name = p_tm->name;
		p_tm->sched_port_params.socket =
			app_get_cpu_socket_id(p_link->pmd_id);
		p_tm->sched_port_params.rate =
			(uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;

		APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
		sched = rte_sched_port_config(&p_tm->sched_port_params);
		if (sched == NULL)
			rte_panic("%s init error\n", p_tm->name);
		app->tm[i] = sched;

		/* Subport */
		n_subports = p_tm->sched_port_params.n_subports_per_port;
		for (subport_id = 0; subport_id < n_subports; subport_id++) {
			uint32_t n_pipes_per_subport, pipe_id;

			status = rte_sched_subport_config(sched,
				subport_id,
				&p_tm->sched_subport_params[subport_id]);
			if (status)
				rte_panic("%s subport %" PRIu32
					" init error (%" PRId32 ")\n",
					p_tm->name, subport_id, status);

			/* Pipe */
			n_pipes_per_subport =
				p_tm->sched_port_params.n_pipes_per_subport;
			for (pipe_id = 0;
				pipe_id < n_pipes_per_subport;
				pipe_id++) {
				int profile_id = p_tm->sched_pipe_to_profile[
					subport_id * APP_MAX_SCHED_PIPES +
					pipe_id];

				if (profile_id == -1)
					continue;

				status = rte_sched_pipe_config(sched,
					subport_id,
					pipe_id,
					profile_id);
				if (status)
					rte_panic("%s subport %" PRIu32
						" pipe %" PRIu32
						" (profile %" PRId32 ") "
						"init error (% " PRId32 ")\n",
						p_tm->name, subport_id, pipe_id,
						profile_id, status);
			}
		}
	}
}

#ifndef RTE_EXEC_ENV_LINUXAPP
static void
app_init_tap(struct app_params *app) {
	if (app->n_pktq_tap == 0)
		return;

	rte_panic("TAP device not supported.\n");
}
#else
static void
app_init_tap(struct app_params *app)
{
	uint32_t i;

	for (i = 0; i < app->n_pktq_tap; i++) {
		struct app_pktq_tap_params *p_tap = &app->tap_params[i];
		struct ifreq ifr;
		int fd, status;

		APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);

		fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
		if (fd < 0)
			rte_panic("Cannot open file /dev/net/tun\n");

		memset(&ifr, 0, sizeof(ifr));
		ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
		snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);

		status = ioctl(fd, TUNSETIFF, (void *) &ifr);
		if (status < 0)
			rte_panic("TAP setup error\n");

		app->tap[i] = fd;
	}
}
Esempio n. 5
0
/*
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_conf port_conf = {0};
	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
	int retval;
	uint16_t q;
	uint16_t queues_per_pool;
	uint32_t max_nb_pools;

	/*
	 * The max pool number from dev_info will be used to validate the pool
	 * number specified in cmd line
	 */
	rte_eth_dev_info_get(port, &dev_info);
	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
	/*
	 * We allow to process part of VMDQ pools specified by num_pools in
	 * command line.
	 */
	if (num_pools > max_nb_pools) {
		printf("num_pools %d >max_nb_pools %d\n",
			num_pools, max_nb_pools);
		return -1;
	}

	/*
	 * NIC queues are divided into pf queues and vmdq queues.
	 * There is assumption here all ports have the same configuration!
	*/
	vmdq_queue_base = dev_info.vmdq_queue_base;
	vmdq_pool_base  = dev_info.vmdq_pool_base;
	printf("vmdq queue base: %d pool base %d\n",
		vmdq_queue_base, vmdq_pool_base);
	if (vmdq_pool_base == 0) {
		num_vmdq_queues = dev_info.max_rx_queues;
		num_queues = dev_info.max_rx_queues;
		if (num_tcs != num_vmdq_queues / num_pools) {
			printf("nb_tcs %d is invalid considering with"
				" nb_pools %d, nb_tcs * nb_pools should = %d\n",
				num_tcs, num_pools, num_vmdq_queues);
			return -1;
		}
	} else {
		queues_per_pool = dev_info.vmdq_queue_num /
				  dev_info.max_vmdq_pools;
		if (num_tcs > queues_per_pool) {
			printf("num_tcs %d > num of queues per pool %d\n",
				num_tcs, queues_per_pool);
			return -1;
		}
		num_vmdq_queues = num_pools * queues_per_pool;
		num_queues = vmdq_queue_base + num_vmdq_queues;
		printf("Configured vmdq pool num: %u,"
			" each vmdq pool has %u queues\n",
			num_pools, queues_per_pool);
	}

	if (port >= rte_eth_dev_count())
		return -1;

	retval = get_eth_conf(&port_conf);
	if (retval < 0)
		return retval;

	/*
	 * Though in this example, all queues including pf queues are setup.
	 * This is because VMDQ queues doesn't always start from zero, and the
	 * PMD layer doesn't support selectively initialising part of rx/tx
	 * queues.
	 */
	retval = rte_eth_dev_configure(port, num_queues, num_queues, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
				&txRingSize);
	if (retval != 0)
		return retval;
	if (RTE_MAX(rxRingSize, txRingSize) >
	    RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, RTE_TEST_TX_DESC_DEFAULT)) {
		printf("Mbuf pool has an insufficient size for port %u.\n",
			port);
		return -1;
	}

	for (q = 0; q < num_queues; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
					rte_eth_dev_socket_id(port),
					NULL,
					mbuf_pool);
		if (retval < 0) {
			printf("initialize rx queue %d failed\n", q);
			return retval;
		}
	}

	for (q = 0; q < num_queues; q++) {
		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
					rte_eth_dev_socket_id(port),
					NULL);
		if (retval < 0) {
			printf("initialize tx queue %d failed\n", q);
			return retval;
		}
	}

	retval  = rte_eth_dev_start(port);
	if (retval < 0) {
		printf("port %d start failed\n", port);
		return retval;
	}

	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			(unsigned)port,
			vmdq_ports_eth_addr[port].addr_bytes[0],
			vmdq_ports_eth_addr[port].addr_bytes[1],
			vmdq_ports_eth_addr[port].addr_bytes[2],
			vmdq_ports_eth_addr[port].addr_bytes[3],
			vmdq_ports_eth_addr[port].addr_bytes[4],
			vmdq_ports_eth_addr[port].addr_bytes[5]);

	/* Set mac for each pool.*/
	for (q = 0; q < num_pools; q++) {
		struct ether_addr mac;

		mac = pool_addr_template;
		mac.addr_bytes[4] = port;
		mac.addr_bytes[5] = q;
		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
			port, q,
			mac.addr_bytes[0], mac.addr_bytes[1],
			mac.addr_bytes[2], mac.addr_bytes[3],
			mac.addr_bytes[4], mac.addr_bytes[5]);
		retval = rte_eth_dev_mac_addr_add(port, &mac,
				q + vmdq_pool_base);
		if (retval) {
			printf("mac addr add failed at pool %d\n", q);
			return retval;
		}
	}

	return 0;
}
Esempio n. 6
0
/**
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
int
vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
	int retval;
	uint16_t q;
	struct rte_eth_dev_info dev_info;
	uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
	uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
	uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
	struct rte_eth_udp_tunnel tunnel_udp;
	struct rte_eth_rxconf *rxconf;
	struct rte_eth_txconf *txconf;
	struct vxlan_conf *pconf = &vxdev;

	pconf->dst_port = udp_port;

	rte_eth_dev_info_get(port, &dev_info);

	if (dev_info.max_rx_queues > MAX_QUEUES) {
		rte_exit(EXIT_FAILURE,
			"please define MAX_QUEUES no less than %u in %s\n",
			dev_info.max_rx_queues, __FILE__);
	}

	rxconf = &dev_info.default_rxconf;
	txconf = &dev_info.default_txconf;
	txconf->txq_flags = 0;

	if (port >= rte_eth_dev_count())
		return -1;

	rx_rings = nb_devices;

	/* Configure ethernet device. */
	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
			&tx_ring_size);
	if (retval != 0)
		return retval;

	/* Setup the queues. */
	for (q = 0; q < rx_rings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
						rte_eth_dev_socket_id(port),
						rxconf,
						mbuf_pool);
		if (retval < 0)
			return retval;
	}
	for (q = 0; q < tx_rings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
						rte_eth_dev_socket_id(port),
						txconf);
		if (retval < 0)
			return retval;
	}

	/* Start the device. */
	retval  = rte_eth_dev_start(port);
	if (retval < 0)
		return retval;

	/* Configure UDP port for UDP tunneling */
	tunnel_udp.udp_port = udp_port;
	tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
	retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
	if (retval < 0)
		return retval;
	rte_eth_macaddr_get(port, &ports_eth_addr[port]);
	RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			port,
			ports_eth_addr[port].addr_bytes[0],
			ports_eth_addr[port].addr_bytes[1],
			ports_eth_addr[port].addr_bytes[2],
			ports_eth_addr[port].addr_bytes[3],
			ports_eth_addr[port].addr_bytes[4],
			ports_eth_addr[port].addr_bytes[5]);

	if (tso_segsz != 0) {
		struct rte_eth_dev_info dev_info;
		rte_eth_dev_info_get(port, &dev_info);
		if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
			RTE_LOG(WARNING, PORT,
				"hardware TSO offload is not supported\n");
	}
	return 0;
}
Esempio n. 7
0
static void
bond_port_init(struct rte_mempool *mbuf_pool)
{
	int retval;
	uint8_t i;
	uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
	uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf rxq_conf;
	struct rte_eth_txconf txq_conf;
	struct rte_eth_conf local_port_conf = port_conf;

	retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
			0 /*SOCKET_ID_ANY*/);
	if (retval < 0)
		rte_exit(EXIT_FAILURE,
				"Faled to create bond port\n");

	BOND_PORT = retval;

	rte_eth_dev_info_get(BOND_PORT, &dev_info);
	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
		local_port_conf.txmode.offloads |=
			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
	retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
				BOND_PORT, retval);

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
				"failed (res=%d)\n", BOND_PORT, retval);

	/* RX setup */
	rxq_conf = dev_info.default_rxconf;
	rxq_conf.offloads = local_port_conf.rxmode.offloads;
	retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
					rte_eth_dev_socket_id(BOND_PORT),
					&rxq_conf, mbuf_pool);
	if (retval < 0)
		rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
				BOND_PORT, retval);

	/* TX setup */
	txq_conf = dev_info.default_txconf;
	txq_conf.offloads = local_port_conf.txmode.offloads;
	retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
				rte_eth_dev_socket_id(BOND_PORT), &txq_conf);

	if (retval < 0)
		rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
				BOND_PORT, retval);

	for (i = 0; i < slaves_count; i++) {
		if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
			rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
					slaves[i], BOND_PORT);

	}

	retval  = rte_eth_dev_start(BOND_PORT);
	if (retval < 0)
		rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);

	rte_eth_promiscuous_enable(BOND_PORT);

	struct ether_addr addr;

	rte_eth_macaddr_get(BOND_PORT, &addr);
	printf("Port %u MAC: ", (unsigned)BOND_PORT);
		PRINT_MAC(addr);
		printf("\n");
}
Esempio n. 8
0
static void
slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
{
	int retval;
	uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
	uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf rxq_conf;
	struct rte_eth_txconf txq_conf;
	struct rte_eth_conf local_port_conf = port_conf;

	if (!rte_eth_dev_is_valid_port(portid))
		rte_exit(EXIT_FAILURE, "Invalid port\n");

	rte_eth_dev_info_get(portid, &dev_info);
	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
		local_port_conf.txmode.offloads |=
			DEV_TX_OFFLOAD_MBUF_FAST_FREE;

	local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
		dev_info.flow_type_rss_offloads;
	if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
			port_conf.rx_adv_conf.rss_conf.rss_hf) {
		printf("Port %u modified RSS hash function based on hardware support,"
			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
			portid,
			port_conf.rx_adv_conf.rss_conf.rss_hf,
			local_port_conf.rx_adv_conf.rss_conf.rss_hf);
	}

	retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
				portid, retval);

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
				"failed (res=%d)\n", portid, retval);

	/* RX setup */
	rxq_conf = dev_info.default_rxconf;
	rxq_conf.offloads = local_port_conf.rxmode.offloads;
	retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
					rte_eth_dev_socket_id(portid),
					&rxq_conf,
					mbuf_pool);
	if (retval < 0)
		rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
				portid, retval);

	/* TX setup */
	txq_conf = dev_info.default_txconf;
	txq_conf.offloads = local_port_conf.txmode.offloads;
	retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
				rte_eth_dev_socket_id(portid), &txq_conf);

	if (retval < 0)
		rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
				portid, retval);

	retval  = rte_eth_dev_start(portid);
	if (retval < 0)
		rte_exit(retval,
				"Start port %d failed (res=%d)",
				portid, retval);

	struct ether_addr addr;

	rte_eth_macaddr_get(portid, &addr);
	printf("Port %u MAC: ", portid);
	PRINT_MAC(addr);
	printf("\n");
}
Esempio n. 9
0
File: main.c Progetto: emmericp/dpdk
/*
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_rxconf *rxconf;
	struct rte_eth_conf port_conf;
	uint16_t rxRings, txRings;
	uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
	uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
	int retval;
	uint16_t q;
	uint16_t queues_per_pool;
	uint32_t max_nb_pools;

	/*
	 * The max pool number from dev_info will be used to validate the pool
	 * number specified in cmd line
	 */
	rte_eth_dev_info_get(port, &dev_info);
	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
	/*
	 * We allow to process part of VMDQ pools specified by num_pools in
	 * command line.
	 */
	if (num_pools > max_nb_pools) {
		printf("num_pools %d >max_nb_pools %d\n",
			num_pools, max_nb_pools);
		return -1;
	}
	retval = get_eth_conf(&port_conf, max_nb_pools);
	if (retval < 0)
		return retval;

	/*
	 * NIC queues are divided into pf queues and vmdq queues.
	 */
	/* There is assumption here all ports have the same configuration! */
	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
	num_vmdq_queues = num_pools * queues_per_pool;
	num_queues = num_pf_queues + num_vmdq_queues;
	vmdq_queue_base = dev_info.vmdq_queue_base;
	vmdq_pool_base  = dev_info.vmdq_pool_base;

	printf("pf queue num: %u, configured vmdq pool num: %u,"
		" each vmdq pool has %u queues\n",
		num_pf_queues, num_pools, queues_per_pool);
	printf("vmdq queue base: %d pool base %d\n",
		vmdq_queue_base, vmdq_pool_base);
	if (port >= rte_eth_dev_count())
		return -1;

	/*
	 * Though in this example, we only receive packets from the first queue
	 * of each pool and send packets through first rte_lcore_count() tx
	 * queues of vmdq queues, all queues including pf queues are setup.
	 * This is because VMDQ queues doesn't always start from zero, and the
	 * PMD layer doesn't support selectively initialising part of rx/tx
	 * queues.
	 */
	rxRings = (uint16_t)dev_info.max_rx_queues;
	txRings = (uint16_t)dev_info.max_tx_queues;
	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
				&txRingSize);
	if (retval != 0)
		return retval;
	if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
			RTE_TEST_TX_DESC_DEFAULT)) {
		printf("Mbuf pool has an insufficient size for port %u.\n",
			port);
		return -1;
	}

	rte_eth_dev_info_get(port, &dev_info);
	rxconf = &dev_info.default_rxconf;
	rxconf->rx_drop_en = 1;
	for (q = 0; q < rxRings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
					rte_eth_dev_socket_id(port),
					rxconf,
					mbuf_pool);
		if (retval < 0) {
			printf("initialise rx queue %d failed\n", q);
			return retval;
		}
	}

	for (q = 0; q < txRings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
					rte_eth_dev_socket_id(port),
					NULL);
		if (retval < 0) {
			printf("initialise tx queue %d failed\n", q);
			return retval;
		}
	}

	retval  = rte_eth_dev_start(port);
	if (retval < 0) {
		printf("port %d start failed\n", port);
		return retval;
	}

	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			(unsigned)port,
			vmdq_ports_eth_addr[port].addr_bytes[0],
			vmdq_ports_eth_addr[port].addr_bytes[1],
			vmdq_ports_eth_addr[port].addr_bytes[2],
			vmdq_ports_eth_addr[port].addr_bytes[3],
			vmdq_ports_eth_addr[port].addr_bytes[4],
			vmdq_ports_eth_addr[port].addr_bytes[5]);

	/*
	 * Set mac for each pool.
	 * There is no default mac for the pools in i40.
	 * Removes this after i40e fixes this issue.
	 */
	for (q = 0; q < num_pools; q++) {
		struct ether_addr mac;
		mac = pool_addr_template;
		mac.addr_bytes[4] = port;
		mac.addr_bytes[5] = q;
		printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
			port, q,
			mac.addr_bytes[0], mac.addr_bytes[1],
			mac.addr_bytes[2], mac.addr_bytes[3],
			mac.addr_bytes[4], mac.addr_bytes[5]);
		retval = rte_eth_dev_mac_addr_add(port, &mac,
				q + vmdq_pool_base);
		if (retval) {
			printf("mac addr add failed at pool %d\n", q);
			return retval;
		}
	}

	return 0;
}
Esempio n. 10
0
/*
 * Initializes a given port using global settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 */
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_conf port_conf = port_conf_default;
	const uint16_t rx_rings = 1, tx_rings = 1;
	uint16_t nb_rxd = RX_RING_SIZE;
	uint16_t nb_txd = TX_RING_SIZE;
	int retval;
	uint16_t q;
	struct rte_eth_dev_info dev_info;
	struct rte_eth_txconf txconf;

	if (!rte_eth_dev_is_valid_port(port))
		return -1;

	rte_eth_dev_info_get(port, &dev_info);
	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
		port_conf.txmode.offloads |=
			DEV_TX_OFFLOAD_MBUF_FAST_FREE;

	/* Configure the Ethernet device. */
	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
	if (retval != 0)
		return retval;

	retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
	if (retval != 0)
		return retval;

	/* Allocate and set up 1 RX queue per Ethernet port. */
	for (q = 0; q < rx_rings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
				rte_eth_dev_socket_id(port), NULL, mbuf_pool);
		if (retval < 0)
			return retval;
	}

	txconf = dev_info.default_txconf;
	txconf.offloads = port_conf.txmode.offloads;
	/* Allocate and set up 1 TX queue per Ethernet port. */
	for (q = 0; q < tx_rings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, nb_txd,
				rte_eth_dev_socket_id(port), &txconf);
		if (retval < 0)
			return retval;
	}

	/* Start the Ethernet port. */
	retval = rte_eth_dev_start(port);
	if (retval < 0)
		return retval;

	/* Display the port MAC address. */
	struct ether_addr addr;
	rte_eth_macaddr_get(port, &addr);
	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
			port,
			addr.addr_bytes[0], addr.addr_bytes[1],
			addr.addr_bytes[2], addr.addr_bytes[3],
			addr.addr_bytes[4], addr.addr_bytes[5]);

	/* Enable RX in promiscuous mode for the Ethernet device. */
	rte_eth_promiscuous_enable(port);

	return 0;
}
Esempio n. 11
0
/**
 * Initialise an individual port:
 * - configure number of rx and tx rings
 * - set up each rx ring, to pull from the main mbuf pool
 * - set up each tx ring
 * - start the port and report its status to stdout
 */
static int
init_port(uint8_t port_num) {
        const uint16_t rx_rings = ONVM_NUM_RX_THREADS;
        uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
        /* Set the number of tx_rings equal to the tx threads. This mimics the onvm_mgr tx thread calculation. */
        const uint16_t tx_rings = rte_lcore_count() - rx_rings - ONVM_NUM_MGR_AUX_THREADS;
        uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;

        struct rte_eth_rxconf rxq_conf;
        struct rte_eth_txconf txq_conf;
        struct rte_eth_dev_info dev_info;
        struct rte_eth_conf local_port_conf = port_conf;

        uint16_t q;
        int retval;

        printf("Port %u init ... \n", (unsigned)port_num);
        printf("Port %u socket id %u ... \n", (unsigned)port_num, (unsigned)rte_eth_dev_socket_id(port_num));
        printf("Port %u Rx rings %u ... \n", (unsigned)port_num, (unsigned)rx_rings);
        printf("Port %u Tx rings %u ... \n", (unsigned)port_num, (unsigned)tx_rings);
        fflush(stdout);

        /* Standard DPDK port initialisation - config port, then set up
         * rx and tx rings */
        rte_eth_dev_info_get(port_num, &dev_info);
        if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
                dev_info.flow_type_rss_offloads;
        if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
                        port_conf.rx_adv_conf.rss_conf.rss_hf) {
                printf("Port %u modified RSS hash function based on hardware support,"
                        "requested:%#"PRIx64" configured:%#"PRIx64"\n",
                        port_num,
                        port_conf.rx_adv_conf.rss_conf.rss_hf,
                        local_port_conf.rx_adv_conf.rss_conf.rss_hf);
        }

        if ((retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings,
                &local_port_conf)) != 0)
                return retval;

        /* Adjust rx,tx ring sizes if not allowed by ethernet device 
         * TODO if this is ajusted store the new values for future reference */
        retval = rte_eth_dev_adjust_nb_rx_tx_desc(
                port_num, &rx_ring_size, &tx_ring_size);
        if (retval < 0) {
                rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
                          port_num, retval);
        }

        rxq_conf = dev_info.default_rxconf;
        rxq_conf.offloads = local_port_conf.rxmode.offloads;
        for (q = 0; q < rx_rings; q++) {
                retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
                                rte_eth_dev_socket_id(port_num),
                                &rxq_conf, pktmbuf_pool);
                if (retval < 0) return retval;
        }

        txq_conf = dev_info.default_txconf;
        txq_conf.offloads = port_conf.txmode.offloads;
        for (q = 0; q < tx_rings; q++) {
                retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
                                rte_eth_dev_socket_id(port_num),
                                &txq_conf);
                if (retval < 0) return retval;
        }

        rte_eth_promiscuous_enable(port_num);

        retval = rte_eth_dev_start(port_num);
        if (retval < 0) return retval;

        printf("done: \n");

        return 0;
}