Пример #1
0
/*
 * Create a scheduler on the current lcore
 */
struct lthread_sched *_lthread_sched_create(size_t stack_size)
{
	int status;
	struct lthread_sched *new_sched;
	unsigned lcoreid = rte_lcore_id();

	RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);

	if (stack_size == 0)
		stack_size = LTHREAD_MAX_STACK_SIZE;

	new_sched =
	     rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched),
				RTE_CACHE_LINE_SIZE,
				rte_socket_id());
	if (new_sched == NULL) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate memory for scheduler\n");
		return NULL;
	}

	_lthread_key_pool_init();

	new_sched->stack_size = stack_size;
	new_sched->birth = rte_rdtsc();
	THIS_SCHED = new_sched;

	status = _lthread_sched_alloc_resources(new_sched);
	if (status != SCHED_ALLOC_OK) {
		RTE_LOG(CRIT, LTHREAD,
			"Failed to allocate resources for scheduler code = %d\n",
			status);
		rte_free(new_sched);
		return NULL;
	}

	bzero(&new_sched->ctx, sizeof(struct ctx));

	new_sched->lcore_id = lcoreid;

	schedcore[lcoreid] = new_sched;

	new_sched->run_flag = 1;

	DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0);

	rte_wmb();
	return new_sched;
}
Пример #2
0
static int
sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
		   const struct rte_pci_addr *pci_addr,
		   int socket_id,
		   const struct sfc_dp_tx_qcreate_info *info,
		   struct sfc_dp_txq **dp_txqp)
{
	struct sfc_efx_txq *txq;
	struct sfc_txq *ctrl_txq;
	int rc;

	rc = ENOMEM;
	txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (txq == NULL)
		goto fail_txq_alloc;

	sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);

	rc = ENOMEM;
	txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
					   EFX_TXQ_LIMIT(info->txq_entries),
					   sizeof(*txq->pend_desc), 0,
					   socket_id);
	if (txq->pend_desc == NULL)
		goto fail_pend_desc_alloc;

	rc = ENOMEM;
	txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
					 info->txq_entries,
					 sizeof(*txq->sw_ring),
					 RTE_CACHE_LINE_SIZE, socket_id);
	if (txq->sw_ring == NULL)
		goto fail_sw_ring_alloc;

	ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
	if (ctrl_txq->evq->sa->tso) {
		rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
						 info->txq_entries, socket_id);
		if (rc != 0)
			goto fail_alloc_tsoh_objs;
	}

	txq->evq = ctrl_txq->evq;
	txq->ptr_mask = info->txq_entries - 1;
	txq->free_thresh = info->free_thresh;
	txq->dma_desc_size_max = info->dma_desc_size_max;

	*dp_txqp = &txq->dp;
	return 0;

fail_alloc_tsoh_objs:
	rte_free(txq->sw_ring);

fail_sw_ring_alloc:
	rte_free(txq->pend_desc);

fail_pend_desc_alloc:
	rte_free(txq);

fail_txq_alloc:
	return rc;
}
Пример #3
0
int
sfc_tx_configure(struct sfc_adapter *sa)
{
	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
	const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
	const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
	int rc = 0;

	sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
		     nb_tx_queues, sa->txq_count);

	/*
	 * The datapath implementation assumes absence of boundary
	 * limits on Tx DMA descriptors. Addition of these checks on
	 * datapath would simply make the datapath slower.
	 */
	if (encp->enc_tx_dma_desc_boundary != 0) {
		rc = ENOTSUP;
		goto fail_tx_dma_desc_boundary;
	}

	rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
	if (rc != 0)
		goto fail_check_mode;

	if (nb_tx_queues == sa->txq_count)
		goto done;

	if (sa->txq_info == NULL) {
		sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
						 sizeof(sa->txq_info[0]), 0,
						 sa->socket_id);
		if (sa->txq_info == NULL)
			goto fail_txqs_alloc;
	} else {
		struct sfc_txq_info *new_txq_info;

		if (nb_tx_queues < sa->txq_count)
			sfc_tx_fini_queues(sa, nb_tx_queues);

		new_txq_info =
			rte_realloc(sa->txq_info,
				    nb_tx_queues * sizeof(sa->txq_info[0]), 0);
		if (new_txq_info == NULL && nb_tx_queues > 0)
			goto fail_txqs_realloc;

		sa->txq_info = new_txq_info;
		if (nb_tx_queues > sa->txq_count)
			memset(&sa->txq_info[sa->txq_count], 0,
			       (nb_tx_queues - sa->txq_count) *
			       sizeof(sa->txq_info[0]));
	}

	while (sa->txq_count < nb_tx_queues) {
		rc = sfc_tx_qinit_info(sa, sa->txq_count);
		if (rc != 0)
			goto fail_tx_qinit_info;

		sa->txq_count++;
	}

done:
	return 0;

fail_tx_qinit_info:
fail_txqs_realloc:
fail_txqs_alloc:
	sfc_tx_close(sa);

fail_check_mode:
fail_tx_dma_desc_boundary:
	sfc_log_init(sa, "failed (rc = %d)", rc);
	return rc;
}
Пример #4
0
/**
 * Configure secondary process queues from a private data pointer (primary
 * or secondary) and update burst callbacks. Can take place only once.
 *
 * All queues must have been previously created by the primary process to
 * avoid undefined behavior.
 *
 * @param priv
 *   Private data pointer from either primary or secondary process.
 *
 * @return
 *   Private data pointer from secondary process, NULL in case of error.
 */
struct priv *
mlx5_secondary_data_setup(struct priv *priv)
{
	unsigned int port_id = 0;
	struct mlx5_secondary_data *sd;
	void **tx_queues;
	void **rx_queues;
	unsigned int nb_tx_queues;
	unsigned int nb_rx_queues;
	unsigned int i;

	/* priv must be valid at this point. */
	assert(priv != NULL);
	/* priv->dev must also be valid but may point to local memory from
	 * another process, possibly with the same address and must not
	 * be dereferenced yet. */
	assert(priv->dev != NULL);
	/* Determine port ID by finding out where priv comes from. */
	while (1) {
		sd = &mlx5_secondary_data[port_id];
		rte_spinlock_lock(&sd->lock);
		/* Primary process? */
		if (sd->primary_priv == priv)
			break;
		/* Secondary process? */
		if (sd->data.dev_private == priv)
			break;
		rte_spinlock_unlock(&sd->lock);
		if (++port_id == RTE_DIM(mlx5_secondary_data))
			port_id = 0;
	}
	/* Switch to secondary private structure. If private data has already
	 * been updated by another thread, there is nothing else to do. */
	priv = sd->data.dev_private;
	if (priv->dev->data == &sd->data)
		goto end;
	/* Sanity checks. Secondary private structure is supposed to point
	 * to local eth_dev, itself still pointing to the shared device data
	 * structure allocated by the primary process. */
	assert(sd->shared_dev_data != &sd->data);
	assert(sd->data.nb_tx_queues == 0);
	assert(sd->data.tx_queues == NULL);
	assert(sd->data.nb_rx_queues == 0);
	assert(sd->data.rx_queues == NULL);
	assert(priv != sd->primary_priv);
	assert(priv->dev->data == sd->shared_dev_data);
	assert(priv->txqs_n == 0);
	assert(priv->txqs == NULL);
	assert(priv->rxqs_n == 0);
	assert(priv->rxqs == NULL);
	nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
	nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
	/* Allocate local storage for queues. */
	tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
				sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
				RTE_CACHE_LINE_SIZE);
	rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
				sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
				RTE_CACHE_LINE_SIZE);
	if (tx_queues == NULL || rx_queues == NULL)
		goto error;
	/* Lock to prevent control operations during setup. */
	priv_lock(priv);
	/* TX queues. */
	for (i = 0; i != nb_tx_queues; ++i) {
		struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
		struct txq *txq;

		if (primary_txq == NULL)
			continue;
		txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0,
					primary_txq->socket);
		if (txq != NULL) {
			if (txq_setup(priv->dev,
				      txq,
				      primary_txq->elts_n * MLX5_PMD_SGE_WR_N,
				      primary_txq->socket,
				      NULL) == 0) {
				txq->stats.idx = primary_txq->stats.idx;
				tx_queues[i] = txq;
				continue;
			}
			rte_free(txq);
		}
		while (i) {
			txq = tx_queues[--i];
			txq_cleanup(txq);
			rte_free(txq);
		}
		goto error;
	}
	/* RX queues. */
	for (i = 0; i != nb_rx_queues; ++i) {
		struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i];

		if (primary_rxq == NULL)
			continue;
		/* Not supported yet. */
		rx_queues[i] = NULL;
	}
	/* Update everything. */
	priv->txqs = (void *)tx_queues;
	priv->txqs_n = nb_tx_queues;
	priv->rxqs = (void *)rx_queues;
	priv->rxqs_n = nb_rx_queues;
	sd->data.rx_queues = rx_queues;
	sd->data.tx_queues = tx_queues;
	sd->data.nb_rx_queues = nb_rx_queues;
	sd->data.nb_tx_queues = nb_tx_queues;
	sd->data.dev_link = sd->shared_dev_data->dev_link;
	sd->data.mtu = sd->shared_dev_data->mtu;
	memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
	       sizeof(sd->data.rx_queue_state));
	memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
	       sizeof(sd->data.tx_queue_state));
	sd->data.dev_flags = sd->shared_dev_data->dev_flags;
	/* Use local data from now on. */
	rte_mb();
	priv->dev->data = &sd->data;
	rte_mb();
	priv->dev->tx_pkt_burst = mlx5_tx_burst;
	priv->dev->rx_pkt_burst = removed_rx_burst;
	priv_unlock(priv);
end:
	/* More sanity checks. */
	assert(priv->dev->tx_pkt_burst == mlx5_tx_burst);
	assert(priv->dev->rx_pkt_burst == removed_rx_burst);
	assert(priv->dev->data == &sd->data);
	rte_spinlock_unlock(&sd->lock);
	return priv;
error:
	priv_unlock(priv);
	rte_free(tx_queues);
	rte_free(rx_queues);
	rte_spinlock_unlock(&sd->lock);
	return NULL;
}
Пример #5
0
static int
test_pmd_perf(void)
{
	uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1;
	uint16_t nb_rxd = MAX_TRAFFIC_BURST;
	uint16_t nb_txd = MAX_TRAFFIC_BURST;
	uint16_t portid;
	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
	int socketid = -1;
	int ret;

	printf("Start PMD RXTX cycles cost test.\n");

	signal(SIGUSR1, signal_handler);
	signal(SIGUSR2, signal_handler);

	nb_ports = rte_eth_dev_count();
	if (nb_ports < NB_ETHPORTS_USED) {
		printf("At least %u port(s) used for perf. test\n",
		       NB_ETHPORTS_USED);
		return -1;
	}

	if (nb_ports > RTE_MAX_ETHPORTS)
		nb_ports = RTE_MAX_ETHPORTS;

	nb_lcores = rte_lcore_count();

	memset(lcore_conf, 0, sizeof(lcore_conf));
	init_lcores();

	init_mbufpool(NB_MBUF);

	if (sc_flag == SC_CONTINUOUS) {
		nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
		nb_txd = RTE_TEST_TX_DESC_DEFAULT;
	}
	printf("CONFIG RXD=%d TXD=%d\n", nb_rxd, nb_txd);

	reset_count();
	num = 0;
	for (portid = 0; portid < nb_ports; portid++) {
		if (socketid == -1) {
			socketid = rte_eth_dev_socket_id(portid);
			slave_id = alloc_lcore(socketid);
			if (slave_id == (uint16_t)-1) {
				printf("No avail lcore to run test\n");
				return -1;
			}
			printf("Performance test runs on lcore %u socket %u\n",
			       slave_id, socketid);
		}

		if (socketid != rte_eth_dev_socket_id(portid)) {
			printf("Skip port %d\n", portid);
			continue;
		}

		/* port configure */
		ret = rte_eth_dev_configure(portid, nb_rx_queue,
					    nb_tx_queue, &port_conf);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"Cannot configure device: err=%d, port=%d\n",
				 ret, portid);

		rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
		printf("Port %u ", portid);
		print_ethaddr("Address:", &ports_eth_addr[portid]);
		printf("\n");

		/* tx queue setup */
		ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
					     socketid, &tx_conf);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"rte_eth_tx_queue_setup: err=%d, "
				"port=%d\n", ret, portid);

		/* rx queue steup */
		ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
						socketid, &rx_conf,
						mbufpool[socketid]);
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
				 "port=%d\n", ret, portid);

		/* Start device */
		stop = 0;
		ret = rte_eth_dev_start(portid);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"rte_eth_dev_start: err=%d, port=%d\n",
				ret, portid);

		/* always eanble promiscuous */
		rte_eth_promiscuous_enable(portid);

		lcore_conf[slave_id].portlist[num++] = portid;
		lcore_conf[slave_id].nb_ports++;
	}
	check_all_ports_link_status(nb_ports, RTE_PORT_ALL);

	if (tx_burst == NULL) {
		tx_burst = (struct rte_mbuf **)
			rte_calloc_socket("tx_buff",
					  MAX_TRAFFIC_BURST * nb_ports,
					  sizeof(void *),
					  RTE_CACHE_LINE_SIZE, socketid);
		if (!tx_burst)
			return -1;
	}

	init_traffic(mbufpool[socketid],
		     tx_burst, MAX_TRAFFIC_BURST * nb_ports);

	printf("Generate %d packets @socket %d\n",
	       MAX_TRAFFIC_BURST * nb_ports, socketid);

	if (sc_flag == SC_CONTINUOUS) {
		/* do both rxtx by default */
		if (NULL == do_measure)
			do_measure = measure_rxtx;

		rte_eal_remote_launch(main_loop, NULL, slave_id);

		if (rte_eal_wait_lcore(slave_id) < 0)
			return -1;
	} else if (sc_flag == SC_BURST_POLL_FIRST ||
		   sc_flag == SC_BURST_XMIT_FIRST)
		if (exec_burst(sc_flag, slave_id) < 0)
			return -1;

	/* port tear down */
	for (portid = 0; portid < nb_ports; portid++) {
		if (socketid != rte_eth_dev_socket_id(portid))
			continue;

		rte_eth_dev_stop(portid);
	}

	return 0;
}
Пример #6
0
static inline int
poll_burst(void *args)
{
#define MAX_IDLE           (10000)
	unsigned lcore_id;
	struct rte_mbuf **pkts_burst;
	uint64_t diff_tsc, cur_tsc;
	uint16_t next[RTE_MAX_ETHPORTS];
	struct lcore_conf *conf;
	uint32_t pkt_per_port = *((uint32_t *)args);
	unsigned i, portid, nb_rx = 0;
	uint64_t total;
	uint64_t timeout = MAX_IDLE;

	lcore_id = rte_lcore_id();
	conf = &lcore_conf[lcore_id];
	if (conf->status != LCORE_USED)
		return 0;

	total = pkt_per_port * conf->nb_ports;
	printf("start to receive total expect %"PRIu64"\n", total);

	pkts_burst = (struct rte_mbuf **)
		rte_calloc_socket("poll_burst",
				  total, sizeof(void *),
				  RTE_CACHE_LINE_SIZE, conf->socketid);
	if (!pkts_burst)
		return -1;

	for (i = 0; i < conf->nb_ports; i++) {
		portid = conf->portlist[i];
		next[portid] = i * pkt_per_port;
	}

	while (!rte_atomic64_read(&start))
		;

	cur_tsc = rte_rdtsc();
	while (total) {
		for (i = 0; i < conf->nb_ports; i++) {
			portid = conf->portlist[i];
			nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
						 &pkts_burst[next[portid]],
						 MAX_PKT_BURST);
			if (unlikely(nb_rx == 0)) {
				timeout--;
				if (unlikely(timeout == 0))
					goto timeout;
				continue;
			}
			next[portid] += nb_rx;
			total -= nb_rx;
		}
	}
timeout:
	diff_tsc = rte_rdtsc() - cur_tsc;

	printf("%"PRIu64" packets lost, IDLE %"PRIu64" times\n",
	       total, MAX_IDLE - timeout);

	/* clean up */
	total = pkt_per_port * conf->nb_ports - total;
	for (i = 0; i < total; i++)
		rte_pktmbuf_free(pkts_burst[i]);

	rte_free(pkts_burst);

	if (total > 0)
		return diff_tsc / total;
	else
		return -1;
}