Пример #1
0
void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
{
	plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
	if (lcore_id > RTE_MAX_LCORE) {
		plog_warn("core_id to high, maximum allowed is: %u\n", RTE_MAX_LCORE);
	}
	else if (task_id >= lcore_cfg[lcore_id].nb_tasks) {
		plog_warn("task_id to high, should be in [0, %u]\n", lcore_cfg[lcore_id].nb_tasks - 1);
	}
	else {
		rte_atomic32_set(&lcore_cfg[lcore_id].task[task_id]->aux->task_dump.n_print, nb_packets);
		lconf_set_dump_req(&lcore_cfg[lcore_id], 1);
	}
}
Пример #2
0
void start_cores(uint32_t *cores, int count)
{
	for (int i = 0; i < count; ++i) {
		if (!dppd_core_active(cores[i], 0)) {
			plog_warn("Can't start core %u: core is not active\n", cores[i]);
		}
		else if (rte_eal_get_lcore_state(cores[i]) != RUNNING) {
			plog_info("Starting core %u\n", cores[i]);
			lconf_set_terminated(&lcore_cfg[cores[i]], 0);
			rte_eal_remote_launch(dppd_work_thread, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running\n", cores[i]);
		}
	}
}
Пример #3
0
void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
{
	struct lcore_cfg *lconf;
	struct rte_ring *ring;
	struct task_args* targ;
	uint32_t count;

	if (!dppd_core_active(lcore_id, 0)) {
		plog_info("lcore %u is not active\n", lcore_id);
		return;
	}
	lconf = &lcore_cfg[lcore_id];
	if (task_id >= lconf->nb_tasks) {
		plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->nb_tasks);
		return;
	}

	targ = &lconf->targs[task_id];
	plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
	for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
		ring = targ->rx_rings[i];
		count = ring->prod.mask + 1;
		plog_info("\tRing %u:\n", i);
		plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
		plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
		plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
	}
}
Пример #4
0
static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
{
	for (int i = 0; i < targ->nb_rxports; i++) {
		uint8_t if_port = targ->rx_port_queue[i].port;

		if (if_port == OUT_DISCARD) {
			return;
		}

		PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);

		if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
			prox_port_cfg[if_port].n_rxq = 0;
		}

		targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
		prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
		prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
		prox_port_cfg[if_port].n_rxq++;

		int dsocket = prox_port_cfg[if_port].socket;
		if (dsocket != -1 && dsocket != socket) {
			plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
		}
	}
}
Пример #5
0
static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix)
{
	for (int i = 0; i < count; ++i) {
		if (!prox_core_active(cores[i], 0)) {
			plog_warn("%s %u: core is not active\n", prefix, cores[i]);
		}
	}
}
Пример #6
0
static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
{
	uint8_t if_port;

	for (uint8_t i = 0; i < targ->nb_txports; ++i) {
		if_port = targ->tx_port_queue[i].port;

		PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");

		PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);

		int dsocket = prox_port_cfg[if_port].socket;
		if (dsocket != -1 && dsocket != socket) {
			plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
		}

		if (prox_port_cfg[if_port].tx_ring[0] == '\0') {  // Rings-backed port can use single queue
			targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
			prox_port_cfg[if_port].n_txq++;
		} else {
			prox_port_cfg[if_port].n_txq = 1;
			targ->tx_port_queue[i].queue = 0;
		}
		/* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
		   the tasks up to the task transmitting to the port
		   does not use refcnt. */
		if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
			plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
		}
		else {
			plog_info("\t\tRefcnt used on port %d\n", if_port);
		}

		/* By default OFFLOAD is enabled, but if the whole
		   chain has NOOFFLOADS set all the way until the
		   first task that receives from a port, it will be
		   disabled for the destination port. */
		if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
			plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
		} else {
			plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
		}

		/* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
		   It should only be enabled when we know for sure that the RX does not split packets.
		   Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
		   transmitting to the port does not use multsegs. */
		if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
			plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
		}
		else {
			plog_info("\t\tMultiSegs used on port %d\n", if_port);
		}
	}
}
Пример #7
0
void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
{
	plog_info("thread_info %u %u \n", lcore_id, task_id);
	if (lcore_id > RTE_MAX_LCORE) {
		plog_warn("core_id to high, maximum allowed is: %u\n", RTE_MAX_LCORE);
	}
	if (!dppd_core_active(lcore_id, 0)) {
		plog_warn("lcore %u is not active\n", lcore_id);
		return;
	}
	if (task_id >= lcore_cfg[lcore_id].nb_tasks) {
		plog_warn("task_id to high, should be in [0, %u]\n", lcore_cfg[lcore_id].nb_tasks - 1);
		return;
	}
	if (lcore_cfg[lcore_id].thread_x == thread_qos) {
		struct task_qos *task;
		if (task_id > 0) {
			plog_warn("for QoS only one port per core\n");
			return;
		}
		task = (struct task_qos *)(lcore_cfg[lcore_id].task[task_id]);
		plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id, task->nb_buffered_pkts);
#ifdef ENABLE_EXTRA_USER_STATISTICS
	}
	else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
		struct task_qinq_encap4 *task;
		task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].task[task_id]);
		for (int i=0;i<task->n_users;i++) {
			if (task->stats_per_user[i])
				plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
		}
#endif
	}
	else {
		// Only QoS thread info so far
		plog_info("core %d, task %d: not a qos core(%p != %p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x,  thread_qos);
	}
}
Пример #8
0
void cmd_port_down(uint8_t port_id)
{
	int err;

	if (!port_is_valid(port_id)) {
		return ;
	}

	if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
		plog_info("Bringing port %d down\n", port_id);
	}
	else {
		plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
	}
}
Пример #9
0
void start_cores(uint32_t *cores, int count, int task_id)
{
	int n_started_cores = 0;
	uint32_t started_cores[RTE_MAX_LCORE];
	struct task_args *targ;

	warn_inactive_cores(cores, count, "Can't start core");

	for (int i = 0; i < count; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[cores[i]];

		if (lconf->n_tasks_run != lconf->n_tasks_all) {
			if (task_id == -1) {
				for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
					targ = &lconf->targs[tid];
					start_l3(targ);
				}
			} else {
				targ = &lconf->targs[task_id];
				start_l3(targ);
			}
			lconf->msg.type = LCONF_MSG_START;
			lconf->msg.task_id = task_id;
			lconf_set_req(lconf);
			if (task_id == -1)
				plog_info("Starting core %u (all tasks)\n", cores[i]);
			else
				plog_info("Starting core %u task %u\n", cores[i], task_id);
			started_cores[n_started_cores++] = cores[i];
			lconf->flags |= LCONF_FLAG_RUNNING;
			rte_eal_remote_launch(lconf_run, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running all its tasks\n", cores[i]);
		}
	}

	/* This function is blocking, so detect when each core has
	   consumed the message. */
	for (int i = 0; i < n_started_cores; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
		plog_info("Waiting for core %u to start...", started_cores[i]);
		if (wait_command_handled(lconf) == -1) return;
		plog_info(" OK\n");
	}
}
Пример #10
0
void stop_cores(uint32_t *cores, int count)
{
	for (int i = 0; i < count; ++i) {
		if (!dppd_core_active(cores[i], 0)) {
			plog_warn("Can't stop core %u: core is not active\n", cores[i]);
		} else
			lconf_set_terminated(&lcore_cfg[cores[i]], 1);
	}

	for (int i = 0; i < count; ++i) {
		if (dppd_core_active(cores[i], 0)) {
			if ((rte_eal_get_lcore_state(cores[i]) == RUNNING) ||
			(rte_eal_get_lcore_state(cores[i]) == FINISHED)) {
				plog_info("stopping core %u...", cores[i]);
				rte_eal_wait_lcore(cores[i]);
				plog_info(" OK\n");
			}
			else {
				plog_info("core %u in state %d\n", cores[i], rte_eal_get_lcore_state(cores[i]));
			}
		}
	}

}
Пример #11
0
static void args_default_callback(const char* string)
{
    plog_warn("Нет обработчика: %s! Запустите программу с аргументом -h для справки", string);
}