コード例 #1
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void setup_all_task_structs(void)
{
	struct lcore_cfg *lconf;
	uint32_t lcore_id = -1;
	struct task_base *tmaster = NULL;

	while(prox_core_next(&lcore_id, 1) == 0) {
		lconf = &lcore_cfg[lcore_id];
		for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
			if (task_is_master(&lconf->targs[task_id])) {
				plog_info("\tInitializing MASTER struct for core %d task %d\n", lcore_id, task_id);
				lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
				tmaster = lconf->tasks_all[task_id];
			}
		}
	}
	PROX_PANIC(tmaster == NULL, "Can't initialize master task\n");
	lcore_id = -1;

	while(prox_core_next(&lcore_id, 1) == 0) {
		lconf = &lcore_cfg[lcore_id];
		plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
		for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
			if (!task_is_master(&lconf->targs[task_id])) {
				plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
				lconf->targs[task_id].tmaster = tmaster;
				lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
			}
		}
	}
}
コード例 #2
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_rx_distr_show(uint32_t lcore_id)
{
	for (uint32_t i = 0; i < lcore_cfg[lcore_id].nb_tasks; ++i) {
		struct task_base *t = lcore_cfg[lcore_id].task[i];
		plog_info("t[%u]: ", i);
		for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) {
			plog_info("%u ", t->aux->rx_bucket[j]);
		}
		plog_info("\n");
	}
}
コード例 #3
0
ファイル: handle_lb_qinq.c プロジェクト: nvf-crucio/PROX
static void init_task_lb_qinq(struct task_base *tbase, struct task_args *targ)
{
	struct task_lb_qinq *task = (struct task_lb_qinq *)tbase;
	const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);

	task->qinq_tag = targ->qinq_tag;
	task->nb_worker_threads = targ->nb_worker_threads;
	task->bit_mask = rte_is_power_of_2(targ->nb_worker_threads) ? targ->nb_worker_threads - 1 : 0xff;

	/* The load distributor is sending to a set of cores. These
	   cores are responsible for handling a set of flows
	   identified by a qinq tag. The load distributor identifies
	   the flows and forwards them to the appropriate worker. The
	   mapping from flow to worker is stored within the
	   work_table. Build the worker_table by asking each worker
	   which flows are handled. */

	task->worker_table = prox_zmalloc(0x1000000, socket_id);
	for (int i = 0; i < targ->nb_worker_threads; ++i) {
		struct core_task ct = targ->core_task_set[0].core_task[i];
		struct task_args *t = core_targ_get(ct.core, ct.task);

		PROX_PANIC(t->task_init->flow_iter.beg == NULL,
			   "Load distributor can't find flows owned by destination worker %d\n", i);

		struct flow_iter *it = &t->task_init->flow_iter;

		int cnt = 0;
		for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) {
			uint16_t svlan = it->get_svlan(it, t);
			uint16_t cvlan = it->get_cvlan(it, t);

			task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)] = i;
		}

	}

	/* Check which protocols we are allowed to send to worker tasks */
	for (int i = 0; i < MAX_PROTOCOLS; ++i) {
		int is_active = !!targ->core_task_set[i].n_elems;
		task->protocols_mask |= is_active << i;
	}
	plog_info("\t\ttask_lb_qinq protocols_mask = 0x%x\n", task->protocols_mask);

	if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_RSS)
		tbase->flags |=  BASE_FLAG_LUT_QINQ_RSS;
	if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_HASH)
		tbase->flags |=  BASE_FLAG_LUT_QINQ_HASH;
	plog_info("\t\ttask_lb_qinq flags = 0x%x\n", tbase->flags);
}
コード例 #4
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
static int port_is_valid(uint8_t port_id)
{
	if (port_id > DPPD_MAX_PORTS) {
		plog_info("requested port is higher than highest supported port ID (%u)\n", DPPD_MAX_PORTS);
		return 0;
	}

	struct dppd_port_cfg* port_cfg = &dppd_port_cfg[port_id];
	if (!port_cfg->active) {
		plog_info("Port %u is not active\n", port_id);
		return 0;
	}
	return 1;
}
コード例 #5
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_mem_layout(void)
{
	const struct rte_memseg* memseg = rte_eal_get_physmem_layout();

	for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (memseg[i].addr == NULL)
			break;

		const char *sz_str;
		switch (memseg[i].hugepage_sz >> 20) {
		case 2:
			sz_str = "2MB";
			break;
		case 1024:
			sz_str = "1GB";
			break;
		default:
			sz_str = "??";
		}

		plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
			i,
			memseg[i].phys_addr,
			memseg[i].phys_addr + memseg[i].len,
			memseg[i].addr,
			memseg[i].len/memseg[i].hugepage_sz, sz_str);
	}
}
コード例 #6
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void __attribute__((noreturn)) prox_usage(const char *prgname)
{
	plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n"
		  "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n"
		  "\t-l LOG_FILE : log file name, ./prox.log by default\n"
		  "\t-p : include PID in log file name if default log file is used\n"
		  "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n"
		  "\t-v verbosity : initial logging verbosity\n"
		  "\t-a : autostart all cores (by default)\n"
		  "\t-e : don't autostart\n"
		  "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n"
		  "\t-m : list supported task modes and exit\n"
		  "\t-s : check configuration file syntax and exit\n"
		  "\t-i : check initialization sequence and exit\n"
		  "\t-u : Listen on UDS /tmp/prox.sock\n"
		  "\t-t : Listen on TCP port 8474\n"
		  "\t-q : Pass argument to Lua interpreter, useful to define variables\n"
		  "\t-w : define variable using syntax varname=value\n"
		  "\t     takes precedence over variables defined in CONFIG_FILE\n"
		  "\t-k : Log statistics to file \"stats_dump\" in current directory\n"
		  "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n"
		  "\t-z : Ignore CPU topology, implies -i\n"
		  "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n"
		  "\t	  screen refreshing will be disabled\n"
		  , prgname);
	exit(EXIT_FAILURE);
}
コード例 #7
0
ファイル: commands.c プロジェクト: nvf-crucio/PROX
void start_cores(uint32_t *cores, int count, int task_id)
{
	int n_started_cores = 0;
	uint32_t started_cores[RTE_MAX_LCORE];
	struct task_args *targ;

	warn_inactive_cores(cores, count, "Can't start core");

	for (int i = 0; i < count; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[cores[i]];

		if (lconf->n_tasks_run != lconf->n_tasks_all) {
			if (task_id == -1) {
				for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
					targ = &lconf->targs[tid];
					start_l3(targ);
				}
			} else {
				targ = &lconf->targs[task_id];
				start_l3(targ);
			}
			lconf->msg.type = LCONF_MSG_START;
			lconf->msg.task_id = task_id;
			lconf_set_req(lconf);
			if (task_id == -1)
				plog_info("Starting core %u (all tasks)\n", cores[i]);
			else
				plog_info("Starting core %u task %u\n", cores[i], task_id);
			started_cores[n_started_cores++] = cores[i];
			lconf->flags |= LCONF_FLAG_RUNNING;
			rte_eal_remote_launch(lconf_run, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running all its tasks\n", cores[i]);
		}
	}

	/* This function is blocking, so detect when each core has
	   consumed the message. */
	for (int i = 0; i < n_started_cores; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
		plog_info("Waiting for core %u to start...", started_cores[i]);
		if (wait_command_handled(lconf) == -1) return;
		plog_info(" OK\n");
	}
}
コード例 #8
0
ファイル: bgp_fsm.c プロジェクト: antonywcl/AR-5315u_PLD
/* This function is the first starting point of all BGP connection. It
   try to connect to remote peer with non-blocking IO. */
int
bgp_start (struct peer *peer)
{
  int status;

  /* If the peer is passive mode, force to move to Active mode. */
  if (CHECK_FLAG (peer->flags, PEER_FLAG_PASSIVE))
    {
      BGP_EVENT_ADD (peer, TCP_connection_open_failed);
      return 0;
    }

  status = bgp_connect (peer);

  switch (status)
    {
    case connect_error:
      if (BGP_DEBUG (fsm, FSM))
	plog_info (peer->log, "%s [FSM] Connect error", peer->host);
      BGP_EVENT_ADD (peer, TCP_connection_open_failed);
      break;
    case connect_success:
      if (BGP_DEBUG (fsm, FSM))
	plog_info (peer->log, "%s [FSM] Connect immediately success",
		   peer->host);
      BGP_EVENT_ADD (peer, TCP_connection_open);
      break;
    case connect_in_progress:
      /* To check nonblocking connect, we wait until socket is
         readable or writable. */
      if (BGP_DEBUG (fsm, FSM))
	plog_info (peer->log, "%s [FSM] Non blocking connect waiting result",
		   peer->host);
      if (peer->fd < 0)
	{
	  zlog_err ("bgp_start peer's fd is negative value %d",
		    peer->fd);
	  return -1;
	}
      BGP_READ_ON (peer->t_read, bgp_read, peer->fd);
      BGP_WRITE_ON (peer->t_write, bgp_write, peer->fd);
      break;
    }
  return 0;
}
コード例 #9
0
ファイル: run.c プロジェクト: gonzopancho/dppd-BNG
static void print_rx_tx_info(void)
{
    uint32_t lcore_id = -1;
    while(dppd_core_next(&lcore_id, 0) == 0) {
        for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].nb_tasks; ++task_id) {
            struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id];

            plog_info("Core %u:", lcore_id);
            if (targ->rx_ports[0] != NO_PORT_AVAIL) {
                for (int i=0; i<targ->nb_rxports; i++) {
                    plog_info(" RX port %u (queue %u)", targ->rx_ports[i], targ->rx_queues[i]);
                }
            }
            else {
                for (uint8_t j = 0; j < targ->nb_rxrings; ++j) {
                    plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]);
                }
            }
            plog_info(" ==>");
            for (uint8_t j = 0; j < targ->nb_txports; ++j) {
                plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port,
                          targ->tx_port_queue[j].queue);
            }

            for (uint8_t j = 0; j < targ->nb_txrings; ++j) {
                plog_info(" TX ring %p", targ->tx_rings[j]);
            }

            plog_info("\n");
        }
    }
}
コード例 #10
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void init_rings(void)
{
	struct lcore_cfg *lconf = NULL;
	struct task_args *starg;
	struct ring_init_stats ris = {0};

	while (core_targ_next(&lconf, &starg, 1) == 0) {
		plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id);
		for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
			for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) {
				PROX_ASSERT(ring_idx < MAX_WT_PER_LB);
				PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK);

				struct core_task ct = starg->core_task_set[idx].core_task[ring_idx];
				init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris);
			}
		}
	}

	plog_info("\tInitialized %d rings:\n"
		  "\t\tNumber of packet rings: %u\n"
		  "\t\tNumber of control rings: %u\n"
		  "\t\tNumber of optimized rings: %u\n",
		  ring_init_stats_total(&ris),
		  ris.n_pkt_rings,
		  ris.n_ctrl_rings,
		  ris.n_opt_rings);

	lconf = NULL;
	struct prox_port_cfg *port;
	while (core_targ_next(&lconf, &starg, 1) == 0) {
		if ((starg->task_init) && (starg->task_init->flag_features & TASK_FEATURE_L3)) {
			struct core_task ct;
			ct.core = prox_cfg.master;
			ct.task = 0;
			ct.type = CTRL_TYPE_PKT;
			struct rte_ring *rx_ring = init_ring_between_tasks(lconf, starg, ct, 0, 0, &ris);

			ct.core = lconf->id;
			ct.task = starg->id;;
			struct rte_ring *tx_ring = init_ring_between_tasks(lcore_cfg, lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
		}
	}
}
コード例 #11
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void check_missing_rx(void)
{
	struct lcore_cfg *lconf = NULL, *rx_lconf = NULL;
	struct task_args *targ, *rx_targ = NULL;
	struct prox_port_cfg *port;
	uint8_t port_id, rx_port_id, ok;

	while (core_targ_next(&lconf, &targ, 0) == 0) {
		PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
			   "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id);
		if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) {
			PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX),
				   "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str);
		}
	}

	lconf = NULL;
	while (core_targ_next(&lconf, &targ, 0) == 0) {
		if (strcmp(targ->task_init->sub_mode_str, "l3") != 0)
			continue;
		port = find_reachable_port(targ);
		if (port == NULL)
			continue;
               	port_id = port - prox_port_cfg;
		rx_lconf = NULL;
		ok = 0;
		plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
		while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
			for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
				rx_port_id = rx_targ->rx_port_queue[i].port;
				if ((rx_port_id == port_id) && (rx_targ->task_init->flag_features & TASK_FEATURE_L3)){
					ok = 1;
					break;
				}
			}
			if (ok == 1) {
				plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
				break;
			}
		}
		PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
	}
}
コード例 #12
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void plog_all_rings(void)
{
	struct lcore_cfg *lconf = NULL;
	struct task_args *targ;

	while (core_targ_next(&lconf, &targ, 0) == 0) {
		for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) {
			plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]);
		}
	}
}
コード例 #13
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void setup_all_task_structs_early_init(void)
{
	struct lcore_cfg *lconf = NULL;
	struct task_args *targ;

	plog_info("\t*** Calling early init on all tasks ***\n");
	while (core_targ_next(&lconf, &targ, 0) == 0) {
		if (targ->task_init->early_init) {
			targ->task_init->early_init(targ);
		}
	}
}
コード例 #14
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id)
{
	struct lcore_cfg *lconf;
	struct rte_ring *ring;
	struct task_args* targ;
	uint32_t count;

	if (!dppd_core_active(lcore_id, 0)) {
		plog_info("lcore %u is not active\n", lcore_id);
		return;
	}
	lconf = &lcore_cfg[lcore_id];
	if (task_id >= lconf->nb_tasks) {
		plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->nb_tasks);
		return;
	}

	targ = &lconf->targs[task_id];
	plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings);
	for (uint8_t i = 0; i < targ->nb_rxrings; ++i) {
		ring = targ->rx_rings[i];
		count = ring->prod.mask + 1;
		plog_info("\tRing %u:\n", i);
		plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc");
		plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count));
		plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count);
	}
}
コード例 #15
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
{
	uint8_t if_port;

	for (uint8_t i = 0; i < targ->nb_txports; ++i) {
		if_port = targ->tx_port_queue[i].port;

		PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n");

		PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port);

		int dsocket = prox_port_cfg[if_port].socket;
		if (dsocket != -1 && dsocket != socket) {
			plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket);
		}

		if (prox_port_cfg[if_port].tx_ring[0] == '\0') {  // Rings-backed port can use single queue
			targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq;
			prox_port_cfg[if_port].n_txq++;
		} else {
			prox_port_cfg[if_port].n_txq = 1;
			targ->tx_port_queue[i].queue = 0;
		}
		/* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
		   the tasks up to the task transmitting to the port
		   does not use refcnt. */
		if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
			plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
		}
		else {
			plog_info("\t\tRefcnt used on port %d\n", if_port);
		}

		/* By default OFFLOAD is enabled, but if the whole
		   chain has NOOFFLOADS set all the way until the
		   first task that receives from a port, it will be
		   disabled for the destination port. */
		if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
			plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
		} else {
			plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
		}

		/* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
		   It should only be enabled when we know for sure that the RX does not split packets.
		   Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
		   transmitting to the port does not use multsegs. */
		if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
			prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
			plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
		}
		else {
			plog_info("\t\tMultiSegs used on port %d\n", if_port);
		}
	}
}
コード例 #16
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_thread_info(uint8_t lcore_id, uint8_t task_id)
{
	plog_info("thread_info %u %u \n", lcore_id, task_id);
	if (lcore_id > RTE_MAX_LCORE) {
		plog_warn("core_id to high, maximum allowed is: %u\n", RTE_MAX_LCORE);
	}
	if (!dppd_core_active(lcore_id, 0)) {
		plog_warn("lcore %u is not active\n", lcore_id);
		return;
	}
	if (task_id >= lcore_cfg[lcore_id].nb_tasks) {
		plog_warn("task_id to high, should be in [0, %u]\n", lcore_cfg[lcore_id].nb_tasks - 1);
		return;
	}
	if (lcore_cfg[lcore_id].thread_x == thread_qos) {
		struct task_qos *task;
		if (task_id > 0) {
			plog_warn("for QoS only one port per core\n");
			return;
		}
		task = (struct task_qos *)(lcore_cfg[lcore_id].task[task_id]);
		plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id, task->nb_buffered_pkts);
#ifdef ENABLE_EXTRA_USER_STATISTICS
	}
	else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) {
		struct task_qinq_encap4 *task;
		task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].task[task_id]);
		for (int i=0;i<task->n_users;i++) {
			if (task->stats_per_user[i])
				plog_info("User %d: %d packets\n", i, task->stats_per_user[i]);
		}
#endif
	}
	else {
		// Only QoS thread info so far
		plog_info("core %d, task %d: not a qos core(%p != %p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x,  thread_qos);
	}
}
コード例 #17
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets)
{
	plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets);
	if (lcore_id > RTE_MAX_LCORE) {
		plog_warn("core_id to high, maximum allowed is: %u\n", RTE_MAX_LCORE);
	}
	else if (task_id >= lcore_cfg[lcore_id].nb_tasks) {
		plog_warn("task_id to high, should be in [0, %u]\n", lcore_cfg[lcore_id].nb_tasks - 1);
	}
	else {
		rte_atomic32_set(&lcore_cfg[lcore_id].task[task_id]->aux->task_dump.n_print, nb_packets);
		lconf_set_dump_req(&lcore_cfg[lcore_id], 1);
	}
}
コード例 #18
0
ファイル: commands.c プロジェクト: nvf-crucio/PROX
void stop_cores(uint32_t *cores, int count, int task_id)
{
	int n_stopped_cores = 0;
	uint32_t stopped_cores[RTE_MAX_LCORE];
	uint32_t c;

	warn_inactive_cores(cores, count, "Can't stop core");

	for (int i = 0; i < count; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
		if (lconf->n_tasks_run) {
			if (wait_command_handled(lconf) == -1) return;

			lconf->msg.type = LCONF_MSG_STOP;
			lconf->msg.task_id = task_id;
			lconf_set_req(lconf);
			stopped_cores[n_stopped_cores++] = cores[i];
		}
	}

	for (int i = 0; i < n_stopped_cores; ++i) {
		c = stopped_cores[i];
		struct lcore_cfg *lconf = &lcore_cfg[c];
		if (wait_command_handled(lconf) == -1) return;

		if (lconf->n_tasks_run == 0) {
			plog_info("All tasks stopped on core %u, waiting for core to stop...", c);
			rte_eal_wait_lcore(c);
			plog_info(" OK\n");
			lconf->flags &= ~LCONF_FLAG_RUNNING;
		}
		else {
			plog_info("Stopped task %u on core %u\n", task_id, c);
		}
	}
}
コード例 #19
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void stop_cores(uint32_t *cores, int count)
{
	for (int i = 0; i < count; ++i) {
		if (!dppd_core_active(cores[i], 0)) {
			plog_warn("Can't stop core %u: core is not active\n", cores[i]);
		} else
			lconf_set_terminated(&lcore_cfg[cores[i]], 1);
	}

	for (int i = 0; i < count; ++i) {
		if (dppd_core_active(cores[i], 0)) {
			if ((rte_eal_get_lcore_state(cores[i]) == RUNNING) ||
			(rte_eal_get_lcore_state(cores[i]) == FINISHED)) {
				plog_info("stopping core %u...", cores[i]);
				rte_eal_wait_lcore(cores[i]);
				plog_info(" OK\n");
			}
			else {
				plog_info("core %u in state %d\n", cores[i], rte_eal_get_lcore_state(cores[i]));
			}
		}
	}

}
コード例 #20
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_port_down(uint8_t port_id)
{
	int err;

	if (!port_is_valid(port_id)) {
		return ;
	}

	if ((err = rte_eth_dev_set_link_down(port_id)) == 0) {
		plog_info("Bringing port %d down\n", port_id);
	}
	else {
		plog_warn("Failed to bring port %d down with error %d\n", port_id, err);
	}
}
コード例 #21
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void start_cores(uint32_t *cores, int count)
{
	for (int i = 0; i < count; ++i) {
		if (!dppd_core_active(cores[i], 0)) {
			plog_warn("Can't start core %u: core is not active\n", cores[i]);
		}
		else if (rte_eal_get_lcore_state(cores[i]) != RUNNING) {
			plog_info("Starting core %u\n", cores[i]);
			lconf_set_terminated(&lcore_cfg[cores[i]], 0);
			rte_eal_remote_launch(dppd_work_thread, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running\n", cores[i]);
		}
	}
}
コード例 #22
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void start_core_all(void)
{
	uint32_t cores[RTE_MAX_LCORE];
	uint32_t lcore_id;
	char tmp[256];
	int cnt = 0;

	dppd_core_to_str(tmp, sizeof(tmp), 0);
	plog_info("Starting cores: %s\n", tmp);

	lcore_id = -1;
	while (dppd_core_next(&lcore_id, 0) == 0) {
		cores[cnt++] = lcore_id;
	}
	start_cores(cores, cnt);
}
コード例 #23
0
ファイル: commands.c プロジェクト: nvf-crucio/PROX
void stop_core_all(int task_id)
{
	uint32_t cores[RTE_MAX_LCORE];
	uint32_t lcore_id;
	char tmp[256];
	int cnt = 0;

	prox_core_to_str(tmp, sizeof(tmp), 0);
	plog_info("Stopping cores: %s\n", tmp);

	lcore_id = -1;
	while (prox_core_next(&lcore_id, 0) == 0) {
		cores[cnt++] = lcore_id;
	}

	stop_cores(cores, cnt, task_id);
}
コード例 #24
0
ファイル: bgp_fsm.c プロジェクト: antonywcl/AR-5315u_PLD
/* Execute event process. */
int
bgp_event (struct thread *thread)
{
  int ret;
  int event;
  int next;
  struct peer *peer;

  peer = THREAD_ARG (thread);
  event = THREAD_VAL (thread);

  /* Logging this event. */
  next = FSM [peer->status -1][event - 1].next_state;

  if (BGP_DEBUG (fsm, FSM))
    plog_info (peer->log, "%s [FSM] %s (%s->%s)", peer->host, 
	       bgp_event_str[event],
	       LOOKUP (bgp_status_msg, peer->status),
	       LOOKUP (bgp_status_msg, next));
  if (BGP_DEBUG (normal, NORMAL)
      && strcmp (LOOKUP (bgp_status_msg, peer->status), LOOKUP (bgp_status_msg, next)))
    zlog_info ("%s went from %s to %s",
	       peer->host,
	       LOOKUP (bgp_status_msg, peer->status),
	       LOOKUP (bgp_status_msg, next));

  /* Call function. */
  ret = (*(FSM [peer->status - 1][event - 1].func))(peer);

  /* When function do not want proceed next job return -1. */
  if (ret < 0)
    return ret;
    
  /* If status is changed. */
  if (next != peer->status)
    bgp_fsm_change_status (peer, next);

  /* Make sure timer is set. */
  bgp_timer_set (peer);

  return 0;
}
コード例 #25
0
/* Encapsulate IPv6 packet in QinQ where the QinQ is derived from the IPv6 address */
static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq_encap6 *task)
{
	struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(struct vlan_hdr));

	PROX_RUNTIME_ASSERT(pqinq);
	struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1);

	if (pip6->hop_limits) {
		pip6->hop_limits--;
	}
	else {
		plog_info("TTL = 0 => Dropping\n");
		return NO_PORT_AVAIL;
	}

	// TODO: optimize to use bulk as intended with the rte_table_library
	uint64_t pkts_mask = RTE_LEN2MASK(1, uint64_t);
	uint64_t lookup_hit_mask;
	struct cpe_data* entries[64]; // TODO: use bulk size
	rte_table_hash_ext_dosig_ops.f_lookup(task->cpe_table, &mbuf, pkts_mask, &lookup_hit_mask, (void**)entries);

	if (lookup_hit_mask == 0x1) {
		/* will also overwrite part of the destination addr */
		(*(uint64_t *)pqinq) = entries[0]->mac_port_8bytes;
		pqinq->svlan.eth_proto = task->qinq_tag;
		pqinq->cvlan.eth_proto = ETYPE_VLAN;
		pqinq->svlan.vlan_tci = entries[0]->qinq_svlan;
		pqinq->cvlan.vlan_tci = entries[0]->qinq_cvlan;
		pqinq->ether_type = ETYPE_IPv6;

		/* classification can only be done from this point */
		if (task->runtime_flags & TASK_CLASSIFY) {
			rte_sched_port_pkt_write(mbuf, 0, entries[0]->user, 0, 0, 0);
		}
		return 0;
	}
	else {
		plogx_err("Unknown IP " IPv6_BYTES_FMT "\n", IPv6_BYTES(pip6->dst_addr));
		return NO_PORT_AVAIL;
	}
}
コード例 #26
0
ファイル: bgp_network.c プロジェクト: AllardJ/Tomato
/* BGP try to connect to the peer.  */
int
bgp_connect (struct peer *peer)
{
  unsigned int ifindex = 0;

  /* Make socket for the peer. */
  peer->fd = sockunion_socket (&peer->su);
  if (peer->fd < 0)
    return -1;

  /* If we can get socket for the peer, adjest TTL and make connection. */
  if (peer_sort (peer) == BGP_PEER_EBGP)
    sockopt_ttl (peer->su.sa.sa_family, peer->fd, peer->ttl);

  sockopt_reuseaddr (peer->fd);
  sockopt_reuseport (peer->fd);

  /* Bind socket. */
  bgp_bind (peer);

  /* Update source bind. */
  bgp_update_source (peer);

#ifdef HAVE_IPV6
  if (peer->ifname)
    ifindex = if_nametoindex (peer->ifname);
#endif /* HAVE_IPV6 */

  if (BGP_DEBUG (events, EVENTS))
    plog_info (peer->log, "%s [Event] Connect start to %s fd %d",
	       peer->host, peer->host, peer->fd);

#ifdef HAVE_TCP_SIGNATURE
  if (CHECK_FLAG (peer->flags, PEER_FLAG_PASSWORD))
    bgp_tcpsig_set (peer->fd, peer);
#endif /* HAVE_TCP_SIGNATURE */

  /* Connect to the remote peer. */
  return sockunion_connect (peer->fd, &peer->su, htons (peer->port), ifindex);
}
コード例 #27
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static int setup_prox(int argc, char **argv)
{
	if (prox_read_config_file() != 0 ||
	    prox_setup_rte(argv[0]) != 0) {
		return -1;
	}

	if (prox_cfg.flags & DSF_CHECK_SYNTAX) {
		plog_info("=== Configuration file syntax has been checked ===\n\n");
		exit(EXIT_SUCCESS);
	}

	init_port_activate();
	plog_info("=== Initializing rte devices ===\n");
	if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES))
		init_rte_ring_dev();
	init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES);
	plog_info("=== Calibrating TSC overhead ===\n");
	clock_init();
	plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz());

	init_lcores();
	plog_info("=== Initializing ports ===\n");
	init_port_all();

	if (prox_cfg.logbuf_size) {
		prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
		PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
	}

	if (prox_cfg.flags & DSF_CHECK_INIT) {
		plog_info("=== Initialization sequence completed ===\n\n");
		exit(EXIT_SUCCESS);
	}

	/* Current way that works to disable DPDK logging */
	FILE *f = fopen("/dev/null", "r");
	rte_openlog_stream(f);
	plog_info("=== PROX started ===\n");
	return 0;
}
コード例 #28
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
/* Initialize cores and allocate mempools */
static void init_lcores(void)
{
	struct lcore_cfg *lconf = 0;
	uint32_t lcore_id = -1;

	while(prox_core_next(&lcore_id, 0) == 0) {
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);
		PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS);
	}

	/* need to allocate mempools as the first thing to use the lowest possible address range */
	plog_info("=== Initializing mempools ===\n");
	setup_mempools();

	lcore_cfg_alloc_hp();

	set_dest_threads();
	set_task_lconf();

	plog_info("=== Initializing port addresses ===\n");
	init_port_addr();

	plog_info("=== Initializing queue numbers on cores ===\n");
	configure_if_queues();

	plog_info("=== Initializing rings on cores ===\n");
	init_rings();

	plog_info("=== Checking configuration consistency ===\n");
	check_cfg_consistent();

	plog_all_rings();

	setup_all_task_structs_early_init();
	plog_info("=== Initializing tasks ===\n");
	setup_all_task_structs();
}
コード例 #29
0
ファイル: main.c プロジェクト: nvf-crucio/PROX
static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ)
{
	const uint8_t socket = rte_lcore_to_socket_id(lconf->id);
	struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
	const struct rte_memzone *mz;
	struct rte_mempool *mp = NULL;
	uint32_t flags = 0;
	char memzone_name[64];
	char name[64];

	/* mbuf size can be set
	 *  - from config file (highest priority, overwriting any other config) - should only be used as workaround
	 *  - through each 'mode', overwriting the default mbuf_size
	 *  - defaulted to MBUF_SIZE i.e. 1518 Bytes
	 * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
	 */
	if (targ->mbuf_size_set_explicitely) {
		flags = MEMPOOL_F_NO_SPREAD;
		/* targ->mbuf_size already set */
	}
	else if (targ->task_init->mbuf_size != 0) {
		/* mbuf_size not set through config file but set through mode */
		targ->mbuf_size = targ->task_init->mbuf_size;
	}
	else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
		if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
			targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
	}

	/* allocate memory pool for packets */
	PROX_ASSERT(targ->nb_mbuf != 0);

	if (targ->pool_name[0] == '\0') {
		sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
	}

	snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
	mz = rte_memzone_lookup(memzone_name);

	if (mz != NULL) {
		mp = (struct rte_mempool*)mz->addr;

		targ->nb_mbuf = mp->size;
		targ->pool = mp;
	}

#ifdef RTE_LIBRTE_IVSHMEM_FALSE
	if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) {
		/* Init mbufs with ioremap_addr for dma */
		mp->phys_addr = mz->ioremap_addr;
		mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp);

		struct prox_pktmbuf_reinit_args init_args;
		init_args.mp = mp;
		init_args.lconf = lconf;

		uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
		rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1,
				     mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args);
	}
#endif

	/* Use this pool for the interface that the core is
	   receiving from if one core receives from multiple
	   ports, all the ports use the same mempool */
	if (targ->pool == NULL) {
		plog_info("\t\tCreating mempool with name '%s'\n", name);
		targ->pool = rte_mempool_create(name,
						targ->nb_mbuf - 1, targ->mbuf_size,
						targ->nb_cache_mbuf,
						sizeof(struct rte_pktmbuf_pool_private),
						rte_pktmbuf_pool_init, NULL,
						prox_pktmbuf_init, lconf,
						socket, flags);
	}

	PROX_PANIC(targ->pool == NULL,
		   "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));

	plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
		  targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
	if (prox_cfg.flags & DSF_SHUFFLE) {
		shuffle_mempool(targ->pool, targ->nb_mbuf);
	}
}
コード例 #30
0
ファイル: commands.c プロジェクト: gonzopancho/dppd-BNG
void cmd_portinfo(uint8_t port_id)
{
	if (!port_is_valid(port_id)) {
		return ;
	}
	struct dppd_port_cfg* port_cfg = &dppd_port_cfg[port_id];

	plog_info("Port info for port %u\n", port_id);
	plog_info("\tName: %s\n", port_cfg->name);
	plog_info("\tDriver: %s\n", port_cfg->driver_name);
	plog_info("\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
	plog_info("\tLink speed: %u Mbps\n", port_cfg->link_speed);
	plog_info("\tLink status: %s\n", port_cfg->link_up? "up" : "down");
	plog_info("\tSocket: %u\n", port_cfg->socket);
	plog_info("\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
	plog_info("\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
	plog_info("\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
	plog_info("\tMemory pools:\n");
	for (uint8_t i = 0; i < 32; ++i) {
		if (port_cfg->pool[i]) {
			plog_info("\t\tname: %s (%p)\n", port_cfg->pool[i]->name, port_cfg->pool[i]);
		}
	}
}