Beispiel #1
0
/**********************************************************************
*@description:
*  display usage
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
int odp_check_lcore_params(struct odp_user_config *user_conf)
{
    uint8_t queue, lcore;
    uint16_t i;
    int socketid;

    for (i = 0; i < user_conf->lcore_param_nb; ++i) 
    {
    	queue = user_conf->lcore_param[i].queue_id;
    	if (queue >= MAX_RX_QUEUE_PER_PORT) 
       {
    		printf("invalid queue number: %hhu\n", queue);
    		return -1;
    	}
    	lcore = user_conf->lcore_param[i].lcore_id;
    	if (!rte_lcore_is_enabled(lcore)) 
       {
    		printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
    		return -1;
    	}
        
    	if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && (user_conf->numa_on == 0)) 
	{
    		printf("warning: lcore %hhu is on socket %d with numa off \n",	lcore, socketid);
    	}
    }
    return 0;
}
Beispiel #2
0
static void setup_mempools(struct lcore_cfg* lcore_cfg)
{
	char name[64];
	struct lcore_cfg *lconf = 0;
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}
		lconf = &lcore_cfg[lcore_id];
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];

			if (startup_cfg->rx_port != NO_PORT_AVAIL) {
				/* allocate memory pool for packets */
				if (startup_cfg->nb_mbuf == 0) {
					startup_cfg->nb_mbuf = tgen_cfg.nb_mbuf;
				}

				/* use this pool for the interface that the core is receiving from */
				sprintf(name, "core_%u_port_%u_pool", lcore_id, task_id);
				startup_cfg->pool = rte_mempool_create(name,
								       startup_cfg->nb_mbuf - 1, MBUF_SIZE,
								       MAX_PKT_BURST * 4,
								       sizeof(struct rte_pktmbuf_pool_private),
								       rte_pktmbuf_pool_init, NULL,
								       tgen_pktmbuf_init, lconf,
								       socket, 0);
				TGEN_PANIC(startup_cfg->pool == NULL, "\t\tError: cannot create mempool for core %u port %u\n", lcore_id, task_id);
				mprintf("\t\tMempool %p size = %u * %u cache %u, socket %d\n", startup_cfg->pool,
					startup_cfg->nb_mbuf, MBUF_SIZE, MAX_PKT_BURST * 4, socket);
			}
		}
	}
}
static int
kni_validate_parameters(uint32_t portmask)
{
	uint32_t i;

	if (!portmask) {
		RTE_LOG(ERR, KNI, "No port configured in port mask\n");
		return -1;
	}

	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
		    (!(portmask & (1 << i)) && kni_port_params_array[i]))
			rte_exit(EXIT_FAILURE,
				 "portmask is not consistent "
				 "to port ids specified in --config\n");
		if (kni_port_params_array[i] &&
		    !rte_lcore_is_enabled(
			(unsigned)(kni_port_params_array[i]->lcore_tx)))
			rte_exit(EXIT_FAILURE,
				 "lcore id %u for "
				 "port %d transmitting not enabled\n",
				 kni_port_params_array[i]->lcore_tx,
				 kni_port_params_array[i]->port_id);
	}

	return 0;
}
Beispiel #4
0
static int
init_mbufpool(unsigned nb_mbuf)
{
	int socketid;
	unsigned lcore_id;
	char s[64];

	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		if (rte_lcore_is_enabled(lcore_id) == 0)
			continue;

		socketid = rte_lcore_to_socket_id(lcore_id);
		if (socketid >= NB_SOCKETS) {
			rte_exit(EXIT_FAILURE,
				"Socket %d of lcore %u is out of range %d\n",
				socketid, lcore_id, NB_SOCKETS);
		}
		if (mbufpool[socketid] == NULL) {
			snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
			mbufpool[socketid] =
				rte_pktmbuf_pool_create(s, nb_mbuf,
					MEMPOOL_CACHE_SIZE, 0,
					RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
			if (mbufpool[socketid] == NULL)
				rte_exit(EXIT_FAILURE,
					"Cannot init mbuf pool on socket %d\n",
					socketid);
			else
				printf("Allocated mbuf pool on socket %d\n",
					socketid);
		}
	}
	return 0;
}
Beispiel #5
0
/*
 * @brief alloc configuration structs for numa and cpu
 * @note this function panics on error
 */
int pal_cpu_init(void)
{
	int cpu, numa;

	memset(&g_pal_config, 0, sizeof(g_pal_config));

	/* record used numa nodes */
	for(cpu = 0; cpu < PAL_MAX_CPU; cpu++) {
		if(!rte_lcore_is_enabled(cpu))
			continue;

		numa = (int)rte_lcore_to_socket_id(cpu);
		if(numa > PAL_MAX_NUMA)
			PAL_PANIC("Numa id %d > PAL_MAX_NUMA(%d)\n", numa, PAL_MAX_NUMA);

		/* alloc cpu configuration struct */
		g_pal_config.cpu[cpu] = (struct cpu_conf *)
		               pal_zalloc_numa(sizeof(struct cpu_conf), numa);
		if(g_pal_config.cpu[cpu] == NULL)
			PAL_PANIC("alloc cpu config failed\n");
		g_pal_config.cpu[cpu]->numa = numa;

		if(g_pal_config.numa[numa] != NULL)
			continue;

		/* alloc numa configuration struct */
		g_pal_config.numa[numa] = (struct numa_conf *)
		               pal_zalloc_numa(sizeof(struct numa_conf), numa);
		if(g_pal_config.numa[numa] == NULL)
			PAL_PANIC("malloc numa config failed\n");
	}

	return 0;
}
static int
parse_arg_w(const char *arg)
{
	const char *p = arg;
	uint32_t n_tuples;

	if (strnlen(arg, APP_ARG_W_MAX_CHARS + 1) == APP_ARG_W_MAX_CHARS + 1) {
		return -1;
	}

	n_tuples = 0;
	while (*p != 0) {
		struct app_lcore_params *lp;
		uint32_t lcore;

		errno = 0;
		lcore = strtoul(p, NULL, 0);
		if ((errno != 0)) {
			return -2;
		}

		/* Check and enable worker lcore */
		if (rte_lcore_is_enabled(lcore) == 0) {
			return -3;
		}

		if (lcore >= APP_MAX_LCORES) {
			return -4;
		}
		lp = &app.lcore_params[lcore];
		if (lp->type == e_APP_LCORE_IO) {
			return -5;
		}
		lp->type = e_APP_LCORE_WORKER;

		n_tuples ++;
		if (n_tuples > APP_ARG_W_MAX_TUPLES) {
			return -6;
		}

		p = strchr(p, ',');
		if (p == NULL) {
			break;
		}
		p ++;
	}

	if (n_tuples == 0) {
		return -7;
	}

	if ((n_tuples & (n_tuples - 1)) != 0) {
		return -8;
	}

	return 0;
}
Beispiel #7
0
static void init_lcore_info(void)
{
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}
		lcore_cfg[lcore_id].corenb = lcore_id;
	}
}
Beispiel #8
0
/**
 * Get the last enabled lcore ID
 *
 * @return
 *   The last enabled lcore ID.
 */
static unsigned int
get_last_lcore_id(void)
{
	int i;

	for (i = RTE_MAX_LCORE - 1; i >= 0; i--)
		if (rte_lcore_is_enabled(i))
			return i;
	return 0;
}
Beispiel #9
0
/**
 * Get the previous enabled lcore ID
 * @param id
 *  The current lcore ID
 * @return
 *   The previous enabled lcore ID or the current lcore
 *   ID if it is the first available core.
 */
static unsigned int
get_previous_lcore_id(unsigned int id)
{
	int i;

	for (i = id - 1; i >= 0; i--)
		if (rte_lcore_is_enabled(i))
			return i;
	return id;
}
Beispiel #10
0
static void check_no_mode_core(void)
{
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
		TGEN_PANIC((lconf->flags & PCFG_MODE) == 0,
		           "No mode assigned for core %u. Add mode= in configuration file\n", lcore_id);
	}
}
Beispiel #11
0
int initDpdk(char* progname)
{
    int ret;
    static char *eal_args[] = {progname, "-c0xf", "-n1", "-m128", "--file-prefix=drone"};

    // TODO: read env var DRONE_RTE_EAL_ARGS to override defaults

    ret = rte_eal_init(sizeof(eal_args)/sizeof(char*), eal_args);
    if (ret < 0)
        rte_panic("Cannot init EAL\n");

    mbufPool_ = rte_mempool_create("DpktPktMbuf",
                                   16*1024, // # of mbufs
                                   2048, // sz of mbuf
                                   32,   // per-lcore cache sz
                                   sizeof(struct rte_pktmbuf_pool_private),
                                   rte_pktmbuf_pool_init, // pool ctor
                                   NULL, // pool ctor arg
                                   rte_pktmbuf_init, // mbuf ctor
                                   NULL, // mbuf ctor arg
                                   SOCKET_ID_ANY,
                                   0     // flags
                                  );

    if (!mbufPool_)
        rte_exit(EXIT_FAILURE, "cannot init mbuf pool\n");

    if (rte_pmd_init_all() < 0)
        rte_exit(EXIT_FAILURE, "cannot init pmd\n");

    if (rte_eal_pci_probe() < 0)
        rte_exit(EXIT_FAILURE, "cannot probe PCI\n");

    // init lcore information
    lcoreCount_ = rte_lcore_count();
    lcoreFreeMask_ = 0;
    for (int i = 0; i < lcoreCount_; i++) {
        if (rte_lcore_is_enabled(i) && (unsigned(i) != rte_get_master_lcore()))
            lcoreFreeMask_ |= (1 << i);
    }
    qDebug("lcore_count = %d, lcore_free_mask = 0x%llx",
           lcoreCount_, lcoreFreeMask_);

    // assign a lcore for Rx polling
    rxLcoreId_ = getFreeLcore();
    if (rxLcoreId_ < 0)
        rte_exit(EXIT_FAILURE, "not enough cores for Rx polling");

    stopRxPoll_ = false;

    return 0;
}
Beispiel #12
0
static uint64_t
app_get_core_mask(void)
{
	uint64_t core_mask = 0;
	uint32_t i;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (rte_lcore_is_enabled(i) == 0)
			continue;

		core_mask |= 1LLU << i;
	}

	return core_mask;
}
Beispiel #13
0
static void setup_all_task_structs(void)
{
	struct lcore_cfg *lconf;

	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}
		lconf = &lcore_cfg[lcore_id];
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			lconf->startup_cfg[task_id].lconf = lconf;
			lconf->task[task_id] = init_task_struct(&lconf->startup_cfg[task_id]);
		}
	}
}
Beispiel #14
0
static int
init_lcores(void)
{
	unsigned lcore_id;

	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		lcore_conf[lcore_id].socketid =
			rte_lcore_to_socket_id(lcore_id);
		if (rte_lcore_is_enabled(lcore_id) == 0) {
			lcore_conf[lcore_id].status = LCORE_INVALID;
			continue;
		} else
			lcore_conf[lcore_id].status = LCORE_AVAIL;
	}
	return 0;
}
Beispiel #15
0
/*
 * Setup default configuration.
 */
static void
set_default_fwd_lcores_config(void)
{
	unsigned int i;
	unsigned int nb_lc;

	nb_lc = 0;
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (! rte_lcore_is_enabled(i))
			continue;
		if (i == rte_get_master_lcore())
			continue;
		fwd_lcores_cpuids[nb_lc++] = i;
	}
	nb_lcores = (lcoreid_t) nb_lc;
	nb_cfg_lcores = nb_lcores;
	nb_fwd_lcores = 1;
}
Beispiel #16
0
static void configure_if_queues(void)
{
	struct lcore_cfg *lconf;
	uint8_t socket;
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		socket = rte_lcore_to_socket_id(lcore_id);
		lconf = &lcore_cfg[lcore_id];
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];
			configure_if_tx_queues(startup_cfg, socket);
			configure_if_rx_queues(startup_cfg, socket);
		}
	}
}
Beispiel #17
0
/**********************************************************************
*@description:
* 
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
static int odp_init_mbuf_pool(unsigned nb_mbuf, struct odp_user_config  *user_conf)
{
    int socketid;
    unsigned lcore_id;
    char str[64];

    memset(odp_pktmbuf_pool, 0, sizeof(odp_pktmbuf_pool));

    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 
    {
        if (rte_lcore_is_enabled(lcore_id) == 0)
            continue;

        if (user_conf->numa_on)
            socketid = rte_lcore_to_socket_id(lcore_id);
        else
            socketid = 0;

        if (socketid >= MAX_NB_SOCKETS)
        {
            rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", socketid, lcore_id, MAX_NB_SOCKETS);
        }
        
        if (odp_pktmbuf_pool[socketid] == NULL) 
        {
            snprintf(str, sizeof(str), "odp_mbuf_pool_%d", socketid);
            odp_pktmbuf_pool[socketid] = rte_mempool_create(str, nb_mbuf, MAX_MBUF_SIZE, MEMPOOL_CACHE_SIZE,
                sizeof(struct rte_pktmbuf_pool_private),
                rte_pktmbuf_pool_init, NULL,
                rte_pktmbuf_init, NULL,
                socketid, 0);
            
            if (odp_pktmbuf_pool[socketid] == NULL)
                rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid);
            else
                printf("Allocated mbuf pool on socket %d, mbuf number: %d \n", socketid, nb_mbuf);

            user_conf->socket_nb++;

        }
    }
    return 0;
}
Beispiel #18
0
/**
 * @brief           Map port TX to a logical core
 *
 * @param devId     Port number
 * @param coreId    Core number
 *
 * @return          true on success
 */
bool DPDKAdapter::portTxCoreMap(uint8_t devId, uint8_t coreId)
{
    if (devId > RTE_MAX_ETHPORTS)
    {
        qCritical("devId %u is not supported", devId);
        return false;
    }

    if (!rte_lcore_is_enabled(coreId))
    {
        qCritical("lcore %u is not enabled", coreId);
        return false;
    }

    LcoreInfo& coreInfo = cores[coreId];
    coreInfo.txPortList.insert(devId);

    qWarning("TX port %u is mapped to lcore %u", devId, coreId);

    return true;
}
Beispiel #19
0
static void check_consistent_cfg(void)
{
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}
		const struct lcore_cfg *lconf = &lcore_cfg[lcore_id];
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			const struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];
			TGEN_PANIC((startup_cfg->flags & PORT_STARTUP_RX_RING) && (startup_cfg->rx_rings[0] == 0),
			           "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lcore_id, task_id);

			for (uint8_t ring_idx = 0; ring_idx < startup_cfg->nb_rxrings; ++ring_idx) {
				mprintf("\t\tCore %u, task %u, rx_ring[%u] %p\n", lcore_id, task_id, ring_idx, startup_cfg->rx_rings[ring_idx]);
			}
			if (startup_cfg->nb_txports == 0 && startup_cfg->nb_txrings == 0) {
				TGEN_PANIC(!(startup_cfg->mode & QINQ_DECAP_ARP) && !(lconf->flags & PCFG_DROP),
				           "\t\tCore %u task %u does not transmit or drop packet: no tx_ports and no tx_rings\n", lcore_id, task_id);
			}
		}
	}
}
Beispiel #20
0
static void init_routing_ports(void)
{
	struct lcore_cfg *lconf;
	struct task_startup_cfg *startup_cfg;
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}

		lconf = &lcore_cfg[lcore_id];
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			startup_cfg = &lconf->startup_cfg[task_id];
			/* need to add the actual i/f numbers to the tx ports of all the ports that are routing */
			if (startup_cfg->runtime_flags & TASK_ROUTING) {
				startup_cfg->nb_txports = get_nb_hop_ports();
				TGEN_ASSERT(startup_cfg->nb_txports < TGEN_MAX_PORTS);
				for (uint8_t i = 0; i < startup_cfg->nb_txports; ++i) {
					startup_cfg->tx_port_queue[i].port = get_hop_port(i);
				}
			}
		}
	}
}
Beispiel #21
0
/* Decide if there are cores that need specific configurations to be loaded. */
static uint16_t data_structs_needed(struct lcore_cfg* lconf, uint8_t socket_id)
{
	uint16_t config_files = 0;
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master ||
		    rte_lcore_to_socket_id(lcore_id) != socket_id) {
			continue;
		}

		lconf = &lcore_cfg[lcore_id];

		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];

			if (QINQ_DECAP_ARP == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_GRE_TABLE;
			}
			if (QOS == startup_cfg->mode || CLASSIFY == startup_cfg->mode || QINQ_DECAP_V6 == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_USER_TABLE;
			}
			if (ROUTING == startup_cfg->mode || FWD == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_NEXT_HOP;
			}
			if (QINQ_DECAP_V4 == startup_cfg->mode || FWD == startup_cfg->mode || ROUTING == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_LPM_V4;
			}
			if (QINQ_DECAP_V6 == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_LPM_V6;
			}
			if (LB_QINQ == startup_cfg->mode) {
				config_files |= DATA_STRUCTS_NEED_WT_TABLE;
			}
		}
	}
	return config_files;
}
Beispiel #22
0
/* Parse the argument given in the command line of the application */
int
app_parse_args(int argc, const char *argv[]) {
  int opt, ret;
  char **argvopt;
  int option_index;
  const char *prgname = argv[0];
  static const struct option lgopts[] = {
    {"rx", 1, 0, 0},
    {"tx", 1, 0, 0},
    {"w", 1, 0, 0},
    {"rsz", 1, 0, 0},
    {"bsz", 1, 0, 0},
    {"no-cache", 0, 0, 0},
    {"core-assign", 1, 0, 0},
    {"kvstype", 1, 0, 0},
#ifdef __SSE4_2__
    {"hashtype", 1, 0, 0},
#endif /* __SSE4_2__ */
    {"fifoness", 1, 0, 0},
    {"show-core-config", 0, 0, 0},
    {NULL, 0, 0, 0}
  };
  uint32_t arg_p = 0;
  uint32_t arg_w = 0;
  uint32_t arg_rx = 0;
  uint32_t arg_tx = 0;
  uint32_t arg_rsz = 0;
  uint32_t arg_bsz = 0;

  uint64_t portmask = 0;
  char *end = NULL;
  struct app_lcore_params *lp, *htlp;
  unsigned lcore, htcore, lcore_count, i, wk_lcore_count = 0;
  unsigned rx_lcore_start, tx_lcore_start, wk_lcore_start;
  int rx_lcore_inc, tx_lcore_inc, wk_lcore_inc;
  uint8_t portid, port_count, count, n;
  bool show_core_assign = false;

  argvopt = (char **)argv;

  while ((opt = getopt_long(argc, argvopt, "p:w:",
                            lgopts, &option_index)) != EOF) {
    switch (opt) {
      case 'p':
        if (optarg[0] == '\0') {
          printf("Require value for -p argument\n");
          return -1;
        }
        portmask = (uint64_t)strtoull(optarg, &end, 16);
        if (end == NULL || *end != '\0') {
          printf("Non-numerical value for -p argument\n");
          return -1;
        }
        if (portmask == 0) {
          printf("Incorrect value for -p argument\n");
          return -1;
        }
        arg_p = 1;
        break;
      case 'w':
        if (optarg[0] == '\0') {
          printf("Require value for -w argument\n");
          return -1;
        }
        wk_lcore_count = (unsigned)strtoul(optarg, &end, 10);
        if (end == NULL || *end != '\0') {
          printf("Non-numerical value for -w argument\n");
          return -1;
        }
        if (wk_lcore_count == 0) {
          printf("Incorrect value for -w argument\n");
          return -1;
        }
        break;
        /* long options */
      case 0:
        if (!strcmp(lgopts[option_index].name, "rx")) {
          arg_rx = 1;
          ret = parse_arg_rx(optarg);
          if (ret) {
            printf("Incorrect value for --rx argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "tx")) {
          arg_tx = 1;
          ret = parse_arg_tx(optarg);
          if (ret) {
            printf("Incorrect value for --tx argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "w")) {
          arg_w = 1;
          ret = parse_arg_w(optarg);
          if (ret) {
            printf("Incorrect value for --w argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "rsz")) {
          arg_rsz = 1;
          ret = parse_arg_rsz(optarg);
          if (ret) {
            printf("Incorrect value for --rsz argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "bsz")) {
          arg_bsz = 1;
          ret = parse_arg_bsz(optarg);
          if (ret) {
            printf("Incorrect value for --bsz argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "core-assign")) {
          ret = parse_arg_core_assign(optarg);
          if (ret) {
            printf("Incorrect value for --core-assign argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "no-cache")) {
          app.no_cache = 1;
        }
        if (!strcmp(lgopts[option_index].name, "kvstype")) {
          ret = parse_arg_kvstype(optarg);
          if (ret) {
            printf("Incorrect value for --ksvtype argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "hashtype")) {
          ret = parse_arg_hashtype(optarg);
          if (ret) {
            printf("Incorrect value for --hashtype argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "fifoness")) {
          ret = parse_arg_fifoness(optarg);
          if (ret) {
            printf("Incorrect value for --fifoness argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "show-core-config")) {
          show_core_assign = true;
        }
        break;

      default:
        printf("Incorrect option\n");
        return -1;
    }
  }

  /* Check that all mandatory arguments are provided */
  if ((arg_rx == 0 || arg_tx == 0 || arg_w == 0) && arg_p == 0) {
    lagopus_exit_error(EXIT_FAILURE,
                       "Not all mandatory arguments are present\n");
  }

  port_count = 0;
  if (arg_p != 0) {
    /**
     * Assign lcore for each thread automatically.
     */
    for (i = 0; i < 32; i++) {
      if ((portmask & (uint64_t)(1ULL << i)) != 0) {
        port_count++;
      }
    }
    if (port_count == 0) {
      lagopus_exit_error(EXIT_FAILURE,
                         "error: port is not specified.  use -p HEXNUM or --rx, --tx.\n");
    }
  }
  for (lcore_count = 0, lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
    if (lcore == rte_get_master_lcore()) {
      continue;
    }
    if (!rte_lcore_is_enabled(lcore)) {
      continue;
    }
    lp = &app.lcore_params[lcore];
    lp->socket_id = lcore_config[lcore].socket_id;
    lp->core_id = lcore_config[lcore].core_id;

    /* add lcore id except for hyper-threading core. */
    for (htcore = 0; htcore < lcore; htcore++) {
      if (!rte_lcore_is_enabled(htcore)) {
        continue;
      }
      htlp = &app.lcore_params[htcore];
      if (app.core_assign == CORE_ASSIGN_PERFORMANCE) {
        if (lp->socket_id == htlp->socket_id &&
            lp->core_id == htlp->core_id) {
          break;
        }
      }
    }
    if (htcore == lcore) {
      lcores[lcore_count++] = lcore;
    }
  }

  if (lcore_count == 0) {
    lagopus_exit_error(
      EXIT_FAILURE,
      "Not enough active core "
      "(need at least 2 active core%s)\n",
      app.core_assign == CORE_ASSIGN_PERFORMANCE ?
      " except for HTT core" : "");
  }
  if (app.core_assign == CORE_ASSIGN_MINIMUM) {
    lcore_count = 1;
  }
  if (lcore_count == 1) {
    /*
     * I/O and worker shares single lcore.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = 0;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 0;
  } else if (port_count * 4 <= lcore_count) {
    /*
     * each ports rx has own lcore.
     * each ports tx has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = rx_lcore_start + port_count;
    wk_lcore_start = tx_lcore_start + port_count;
    rx_lcore_inc = 1;
    tx_lcore_inc = 1;
    wk_lcore_inc = 1;
  } else if (port_count * 2 <= lcore_count) {
    /*
     * each ports (rx/tx) has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + port_count;
    rx_lcore_inc = 1;
    tx_lcore_inc = 1;
    wk_lcore_inc = 1;
  } else if (port_count <= lcore_count) {
    /*
     * odd ports and even ports (rx/tx) shared lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + (port_count + 1) / 2;
    rx_lcore_inc = 2;
    tx_lcore_inc = 2;
    wk_lcore_inc = 1;
  } else if (port_count <= lcore_count * 2) {
    /*
     * four ports (rx/tx) shared lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + (port_count + 3) / 4;
    rx_lcore_inc = 4;
    tx_lcore_inc = 4;
    wk_lcore_inc = 1;
  } else if (lcore_count >= 4) {
    /*
     * rx for all ports has own lcore.
     * tx for all ports has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 1;
    wk_lcore_start = 2;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 1;
  } else {
    /*
     * I/O has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = 1;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 1;
  }
  /* assign core automatically */
  if (arg_rx == 0) {
    lcore = rx_lcore_start;
    count = rx_lcore_inc;
    for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
      if ((portmask & (uint64_t)(1ULL << portid)) == 0) {
        continue;
      }
      if (assign_rx_lcore(portid, lcore) != 0) {
        return -5;
      }
      if (--count == 0) {
        lcore++;
        count = rx_lcore_inc;
      }
    }
  }
  if (arg_tx == 0) {
    lcore = tx_lcore_start;
    count = tx_lcore_inc;
    for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
      if ((portmask & (uint64_t)(1ULL << portid)) == 0) {
        continue;
      }
      if (assign_tx_lcore(portid, lcore) != 0) {
        return -5;
      }
      if (--count == 0) {
        lcore++;
        count = tx_lcore_inc;
      }
    }
  }
  if (arg_w == 0) {
    if (wk_lcore_count == 0) {
      wk_lcore_count = lcore_count - wk_lcore_start;
    }
    for (lcore = wk_lcore_start;
         lcore < wk_lcore_start + wk_lcore_count;
         lcore++) {
      lp = &app.lcore_params[lcores[lcore]];
      if (lp->type == e_APP_LCORE_IO) {
        /* core is shared by I/O and worker. */
        lp->type = e_APP_LCORE_IO_WORKER;
      } else {
        lp->type = e_APP_LCORE_WORKER;
      }
      if (wk_lcore_inc != 1) {
        break;
      }
    }
  }

  /* Assign default values for the optional arguments not provided */
  if (arg_rsz == 0) {
    app.nic_rx_ring_size = APP_DEFAULT_NIC_RX_RING_SIZE;
    app.nic_tx_ring_size = APP_DEFAULT_NIC_TX_RING_SIZE;
    app.ring_rx_size = APP_DEFAULT_RING_RX_SIZE;
    app.ring_tx_size = APP_DEFAULT_RING_TX_SIZE;
  }

  if (arg_bsz == 0) {
    app.burst_size_io_rx_read = APP_DEFAULT_BURST_SIZE_IO_RX_READ;
    app.burst_size_io_rx_write = APP_DEFAULT_BURST_SIZE_IO_RX_WRITE;
    app.burst_size_io_tx_read = APP_DEFAULT_BURST_SIZE_IO_TX_READ;
    app.burst_size_io_tx_write = APP_DEFAULT_BURST_SIZE_IO_TX_WRITE;
    app.burst_size_worker_read = APP_DEFAULT_BURST_SIZE_WORKER_READ;
    app.burst_size_worker_write = APP_DEFAULT_BURST_SIZE_WORKER_WRITE;
  }

  /* Check cross-consistency of arguments */
  if (app_check_every_rx_port_is_tx_enabled() < 0) {
    lagopus_msg_error("At least one RX port is not enabled for TX.\n");
    return -2;
  }

  if (show_core_assign == true) {
    printf("core assign:\n");
    for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
      if (lcore_config[lcore].detected != true) {
        continue;
      }
      lp = &app.lcore_params[lcore];
      printf("  lcore %d:\n", lcore);
      if (lp->type == e_APP_LCORE_IO) {
        printf("    type: I/O\n");
      } else if (lp->type == e_APP_LCORE_WORKER) {
        printf("    type: WORKER\n");
      } else if (lp->type == e_APP_LCORE_IO_WORKER) {
        printf("    type: I/O WORKER\n");
      } else {
        printf("    type: not used\n");
      }
      for (n = 0; n < lp->io.rx.n_nic_queues; n++) {
        printf("    RX port %d (queue %d)\n",
               lp->io.rx.nic_queues[n].port,
               lp->io.rx.nic_queues[n].queue);
      }
      for (n = 0; n < lp->io.tx.n_nic_ports; n++) {
        printf("    TX port %d\n",
               lp->io.tx.nic_ports[n]);
      }
    }
    exit(0);
  }

  if (optind >= 0) {
    argv[optind - 1] = prgname;
  }
  ret = optind - 1;
  optind = 0; /* reset getopt lib */
  return ret;
}
Beispiel #23
0
/**********************************************************************
*@description:
* 
*
*@parameters:
* [in]: 
* [in]: 
*
*@return values: 
*
**********************************************************************/
static int odp_init_ports(unsigned short nb_ports, struct odp_user_config  *user_conf, struct odp_lcore_config *lcore_conf)
{
    int ret;
    uint8_t portid; 
    uint16_t queueid;
    unsigned lcore_id;
    uint8_t nb_rx_queue =0;
    uint8_t max_rx_queue =0;
    uint8_t queue, socketid;
    uint32_t n_tx_queue, nb_lcores, nb_mbuf;
    struct ether_addr eth_addr;
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;


    nb_lcores = rte_lcore_count();
    n_tx_queue = nb_lcores;
    if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
    	n_tx_queue = MAX_TX_QUEUE_PER_PORT;

    printf("\nStart to Init port \n" );

    /* initialize all ports */
    for (portid = 0; portid < nb_ports; portid++) 
    {
        /* skip ports that are not enabled */
        if ((user_conf->port_mask & (1 << portid)) == 0) 
        {
            printf("\nSkipping disabled port %d\n", portid);
            continue;
        }

        /* init port */
        printf("\t port %d:  \n", portid );

        nb_rx_queue = odp_get_port_rx_queues_nb(portid, user_conf);

        if(max_rx_queue < nb_rx_queue)
            max_rx_queue = nb_rx_queue;
        
        printf("\t Creating queues: rx queue number=%d tx queue number=%u... \n", nb_rx_queue, (unsigned)n_tx_queue );

        ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &odp_port_conf);
        if (ret < 0)
        	rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid);

        rte_eth_macaddr_get(portid, &eth_addr);

        printf ("\t MAC Address:%02X:%02X:%02X:%02X:%02X:%02X \n", 
        	eth_addr.addr_bytes[0], eth_addr.addr_bytes[1],
        	eth_addr.addr_bytes[2], eth_addr.addr_bytes[3],
        	eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]);

        /* init one TX queue per couple (lcore,port) */
        queueid = 0;
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
        {
            if (rte_lcore_is_enabled(lcore_id) == 0)
            	continue;

            if (user_conf->numa_on)
            	socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
            	socketid = 0;

            printf("\t lcore id:%u, tx queue id:%d, socket id:%d \n", lcore_id, queueid, socketid);
            
            ret = rte_eth_tx_queue_setup(portid, queueid, ODP_TX_DESC_DEFAULT, socketid, &odp_tx_conf);
            if (ret < 0)
            	rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%d\n", ret, portid);

            lcore_conf[lcore_id].tx_queue_id[portid] = queueid;
            
            queueid++;
        }
        
        printf("\n");

    }

    nb_mbuf = RTE_MAX((nb_ports*nb_rx_queue*ODP_RX_DESC_DEFAULT +	
				nb_ports*nb_lcores*MAX_PKT_BURST +					
				nb_ports*n_tx_queue*ODP_TX_DESC_DEFAULT +	
				nb_lcores*MEMPOOL_CACHE_SIZE), (unsigned)8192);
				
    /* init memory */
    ret = odp_init_mbuf_pool(nb_mbuf, user_conf);
    if (ret < 0)
    	rte_exit(EXIT_FAILURE, "init_mem failed\n");

    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) 
    {
        if (rte_lcore_is_enabled(lcore_id) == 0)
            continue;
        
        printf("\nInitializing rx queues on lcore %u ... \n", lcore_id );

        /* init RX queues */
        for(queue = 0; queue < lcore_conf[lcore_id].n_rx_queue; ++queue) 
        {
            portid = lcore_conf[lcore_id].rx_queue_list[queue].port_id;
            queueid = lcore_conf[lcore_id].rx_queue_list[queue].queue_id;

            if (user_conf->numa_on)
                socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
            else
                socketid = 0;

            printf("port id:%d, rx queue id: %d, socket id:%d \n", portid, queueid, socketid);

            ret = rte_eth_rx_queue_setup(portid, queueid, ODP_RX_DESC_DEFAULT, socketid, &odp_rx_conf, odp_pktmbuf_pool[socketid]);
            if (ret < 0)
                rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," "port=%d\n", ret, portid);
        }
    }

    return 0;
}
Beispiel #24
0
int
main(int argc, char **argv)
{
	int ret;
	unsigned nb_ports;
	unsigned int lcore_id, last_lcore_id, master_lcore_id;
	uint8_t port_id;
	uint8_t nb_ports_available;
	struct worker_thread_args worker_args = {NULL, NULL};
	struct send_thread_args send_args = {NULL, NULL};
	struct rte_ring *rx_to_workers;
	struct rte_ring *workers_to_tx;

	/* catch ctrl-c so we can print on exit */
	signal(SIGINT, int_handler);

	/* Initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* Parse the application specific arguments */
	ret = parse_args(argc, argv);
	if (ret < 0)
		return -1;

	/* Check if we have enought cores */
	if (rte_lcore_count() < 3)
		rte_exit(EXIT_FAILURE, "Error, This application needs at "
				"least 3 logical cores to run:\n"
				"1 lcore for packet RX\n"
				"1 lcore for packet TX\n"
				"and at least 1 lcore for worker threads\n");

	nb_ports = rte_eth_dev_count();
	if (nb_ports == 0)
		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
	if (nb_ports != 1 && (nb_ports & 1))
		rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
				"when using a single port\n");

	mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
			MBUF_POOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
			rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	nb_ports_available = nb_ports;

	/* initialize all ports */
	for (port_id = 0; port_id < nb_ports; port_id++) {
		/* skip ports that are not enabled */
		if ((portmask & (1 << port_id)) == 0) {
			printf("\nSkipping disabled port %d\n", port_id);
			nb_ports_available--;
			continue;
		}
		/* init port */
		printf("Initializing port %u... done\n", (unsigned) port_id);

		if (configure_eth_port(port_id) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
					port_id);
	}

	if (!nb_ports_available) {
		rte_exit(EXIT_FAILURE,
			"All available ports are disabled. Please set portmask.\n");
	}

	/* Create rings for inter core communication */
	rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
			RING_F_SP_ENQ);
	if (rx_to_workers == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
			RING_F_SC_DEQ);
	if (workers_to_tx == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	if (!disable_reorder) {
		send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
				REORDER_BUFFER_SIZE);
		if (send_args.buffer == NULL)
			rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
	}

	last_lcore_id   = get_last_lcore_id();
	master_lcore_id = rte_get_master_lcore();

	worker_args.ring_in  = rx_to_workers;
	worker_args.ring_out = workers_to_tx;

	/* Start worker_thread() on all the available slave cores but the last 1 */
	for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
		if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
			rte_eal_remote_launch(worker_thread, (void *)&worker_args,
					lcore_id);

	if (disable_reorder) {
		/* Start tx_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
				last_lcore_id);
	} else {
		send_args.ring_in = workers_to_tx;
		/* Start send_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)send_thread,
				(void *)&send_args, last_lcore_id);
	}

	/* Start rx_thread() on the master core */
	rx_thread(rx_to_workers);

	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (rte_eal_wait_lcore(lcore_id) < 0)
			return -1;
	}

	print_stats();
	return 0;
}
Beispiel #25
0
int32_t populateNodeInfo (void)
{
    int32_t i = 0, socketId = -1, lcoreIndex = 0, enable = 0;
    uint8_t coreCount, portCount;
    struct rte_eth_dev_info devInfo;

    /* fetch total lcore count under DPDK */
    coreCount = rte_lcore_count();
    for (i = 0; i < coreCount; i++)
    {
       socketId = rte_lcore_to_socket_id(i);
       lcoreIndex = rte_lcore_index(i);
       enable = rte_lcore_is_enabled(i);

       //printf ("\n Logical %d Physical %d Socket %d Enabled %d \n",
       //        i, lcoreIndex, socketId, enable);

       if (likely(enable)) {
           /* classify the lcore info per NUMA node */
           numaNodeInfo[socketId].lcoreAvail = numaNodeInfo[socketId].lcoreAvail | (1 << lcoreIndex);
           numaNodeInfo[socketId].lcoreTotal += 1;
       }
       else {
            rte_panic("ERROR: Lcore %d Socket %d not enabled\n", lcoreIndex, socketId);
            exit(EXIT_FAILURE);
       }
    }

    /* Create mempool per numa node based on interface available */
    portCount = rte_eth_dev_count();
    for (i =0; i < portCount; i++)
    {
        rte_eth_dev_info_get(i, &devInfo);
        printf("\n Inteface %d", i);
        printf("\n - driver: %s", devInfo.driver_name);
        printf("\n - if_index: %d", devInfo.if_index);
        if (devInfo.pci_dev) {
            printf("\n - PCI INFO ");
            printf("\n -- ADDR - domain:bus:devid:function %x:%x:%x:%x",
                  devInfo.pci_dev->addr.domain,
                  devInfo.pci_dev->addr.bus,
                  devInfo.pci_dev->addr.devid,
                  devInfo.pci_dev->addr.function);
            printf("\n == PCI ID - vendor:device:sub-vendor:sub-device %x:%x:%x:%x",
                  devInfo.pci_dev->id.vendor_id,
                  devInfo.pci_dev->id.device_id,
                  devInfo.pci_dev->id.subsystem_vendor_id,
                  devInfo.pci_dev->id.subsystem_device_id);
            printf("\n -- numa node: %d", devInfo.pci_dev->numa_node);
        }

        socketId = (devInfo.pci_dev->numa_node == -1)?0:devInfo.pci_dev->numa_node;
        numaNodeInfo[socketId].intfAvail = numaNodeInfo[socketId].intfAvail | (1 << i);
        numaNodeInfo[socketId].intfTotal += 1;
    }

    /* allocate mempool for numa which has NIC interfaces */
    for (i = 0; i < MAX_NUMANODE; i++)
    {
        if (likely(numaNodeInfo[i].intfAvail)) {
            /* ToDo: per interface */
            uint8_t portIndex = 0;
            char mempoolName[25];

            /* create mempool for TX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-tx", i, portIndex);
            numaNodeInfo[i].tx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].tx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for tx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

            /* create mempool for RX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-rx", i, portIndex);
            numaNodeInfo[i].rx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].rx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for rx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

        }
    }

    return 0;
}
Beispiel #26
0
void
pktj_stats_display(struct cmdline *cl, int option, int delay)
{
	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
	uint64_t total_packets_kni_tx, total_packets_kni_rx,
	    total_packets_kni_dropped;
	uint64_t total_packets_ratel_dropped, total_packets_acl_dropped;
	unsigned lcoreid;
	time_t _time;
	const char *fmt_pre, *fmt_lcore, *fmt_mid, *fmt_total;

	total_packets_dropped = 0;
	total_packets_tx = 0;
	total_packets_rx = 0;
	total_packets_kni_tx = 0;
	total_packets_kni_rx = 0;
	total_packets_kni_dropped = 0;
	total_packets_acl_dropped = 0;
	total_packets_ratel_dropped = 0;

	if (option == CMD_STATS_JSON) { // json
		fmt_pre = STATS_JSON_PRE;
		fmt_lcore = STATS_JSON_LCORE;
		fmt_mid = STATS_JSON_MID;
		fmt_total = STATS_JSON_TOTAL;
	} else if (option == CMD_STATS_CSV) { // csv
		fmt_pre = STATS_CSV_PRE;
		fmt_lcore = STATS_CSV_LCORE;
		fmt_mid = STATS_CSV_MID;
		fmt_total = STATS_CSV_TOTAL;
	} else {
		fmt_pre = STATS_HUM_PRE;
		fmt_lcore = STATS_HUM_LCORE;
		fmt_mid = STATS_HUM_MID;
		fmt_total = STATS_HUM_TOTAL;
	}

	_time = time(NULL);

	for (lcoreid = 0; lcoreid < CMDLINE_MAX_CLIENTS; lcoreid++) {
		if (cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid].cl ==
		    cl) {
			cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid]
			    .csv_delay = delay;
			cmdline_clients[RTE_PER_LCORE(g_socket_id)][lcoreid]
			    .delay_timer = _time;
			break;
		}
	}

	cmdline_printf(cl, "%s", fmt_pre);

	for (lcoreid = 0; lcoreid < RTE_MAX_LCORE; lcoreid++) {
		if (!rte_lcore_is_enabled(lcoreid))
			continue;

		cmdline_printf(
		    cl, fmt_lcore, _time, lcoreid, stats[lcoreid].port_id,
		    stats[lcoreid].nb_iteration_looped, stats[lcoreid].nb_tx,
		    stats[lcoreid].nb_rx, stats[lcoreid].nb_kni_tx,
		    stats[lcoreid].nb_kni_rx, stats[lcoreid].nb_dropped,
		    stats[lcoreid].nb_kni_dropped,
		    stats[lcoreid].nb_acl_dropped,
		    stats[lcoreid].nb_ratel_dropped);

		total_packets_dropped += stats[lcoreid].nb_dropped;
		total_packets_tx += stats[lcoreid].nb_tx;
		total_packets_rx += stats[lcoreid].nb_rx;
		total_packets_kni_tx += stats[lcoreid].nb_kni_tx;
		total_packets_kni_rx += stats[lcoreid].nb_kni_rx;
		total_packets_kni_dropped += stats[lcoreid].nb_kni_dropped;
		total_packets_acl_dropped += stats[lcoreid].nb_acl_dropped;
		total_packets_ratel_dropped += stats[lcoreid].nb_ratel_dropped;
	}

	// add a null object to end the array
	cmdline_printf(cl, "%s", fmt_mid);

	cmdline_printf(cl, fmt_total, total_packets_tx, total_packets_rx,
		       total_packets_kni_tx, total_packets_kni_rx,
		       total_packets_dropped, total_packets_kni_dropped,
		       total_packets_acl_dropped, total_packets_ratel_dropped);
}
Beispiel #27
0
int
main(int argc, char **argv)
{
	uint32_t i;
	int32_t ret;

	printf("\n%s %s\n", wr_copyright_msg(), wr_powered_by()); fflush(stdout);

	wr_scrn_setw(1);/* Reset the window size */

	/* call before the rte_eal_init() */
	(void)rte_set_application_usage_hook(pktgen_usage);

	memset(&pktgen, 0, sizeof(pktgen));

	pktgen.flags            = PRINT_LABELS_FLAG;
	pktgen.ident            = 0x1234;
	pktgen.nb_rxd           = DEFAULT_RX_DESC;
	pktgen.nb_txd           = DEFAULT_TX_DESC;
	pktgen.nb_ports_per_page = DEFAULT_PORTS_PER_PAGE;

	if ( (pktgen.l2p = wr_l2p_create()) == NULL)
		pktgen_log_panic("Unable to create l2p");

	pktgen.portdesc_cnt = wr_get_portdesc(pktgen.portlist, pktgen.portdesc, RTE_MAX_ETHPORTS, 0);

	/* Initialize the screen and logging */
	pktgen_init_log();
	pktgen_cpu_init();

	/* initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;
	argc -= ret;
	argv += ret;

	pktgen.hz = rte_get_timer_hz();	/* Get the starting HZ value. */

	/* parse application arguments (after the EAL ones) */
	ret = pktgen_parse_args(argc, argv);
	if (ret < 0)
		return -1;

	pktgen_init_screen((pktgen.flags & ENABLE_THEME_FLAG) ? THEME_ON : THEME_OFF);

	rte_delay_ms(100);	/* Wait a bit for things to settle. */

	wr_print_copyright(PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	lua_newlib_add(_lua_openlib);

	/* Open the Lua script handler. */
	if ( (pktgen.L = lua_create_instance()) == NULL) {
		pktgen_log_error("Failed to open Lua pktgen support library");
		return -1;
	}

	pktgen_log_info(">>> Packet Burst %d, RX Desc %d, TX Desc %d, mbufs/port %d, mbuf cache %d",
	                DEFAULT_PKT_BURST, DEFAULT_RX_DESC, DEFAULT_TX_DESC, MAX_MBUFS_PER_PORT, MBUF_CACHE_SIZE);

	/* Configure and initialize the ports */
	pktgen_config_ports();

	pktgen_log_info("");
	pktgen_log_info("=== Display processing on lcore %d", rte_lcore_id());

	/* launch per-lcore init on every lcore except master and master + 1 lcores */
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if ( (i == rte_get_master_lcore()) || !rte_lcore_is_enabled(i) )
			continue;
		ret = rte_eal_remote_launch(pktgen_launch_one_lcore, NULL, i);
		if (ret != 0)
			pktgen_log_error("Failed to start lcore %d, return %d", i, ret);
	}
	rte_delay_ms(1000);	/* Wait for the lcores to start up. */

	/* Disable printing log messages of level info and below to screen, */
	/* erase the screen and start updating the screen again. */
	pktgen_log_set_screen_level(LOG_LEVEL_WARNING);
	wr_scrn_erase(pktgen.scrn->nrows);

	wr_logo(3, 16, PKTGEN_APP_NAME);
	wr_splash_screen(3, 16, PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	wr_scrn_resume();

	pktgen_redisplay(1);

	rte_timer_setup();

	if (pktgen.flags & ENABLE_GUI_FLAG) {
		if (!wr_scrn_is_paused() ) {
			wr_scrn_pause();
			wr_scrn_cls();
			wr_scrn_setw(1);
			wr_scrn_pos(pktgen.scrn->nrows, 1);
		}

		lua_init_socket(pktgen.L, &pktgen.thread, pktgen.hostname, pktgen.socket_port);
	}

	pktgen_cmdline_start();

	execute_lua_close(pktgen.L);
	pktgen_stop_running();

	wr_scrn_pause();

	wr_scrn_setw(1);
	wr_scrn_printf(100, 1, "\n");	/* Put the cursor on the last row and do a newline. */

	/* Wait for all of the cores to stop running and exit. */
	rte_eal_mp_wait_lcore();

	return 0;
}
Beispiel #28
0
static int dpdk_main(int port_id, int argc, char* argv[])
{
    struct rte_eth_dev_info dev_info;
    unsigned nb_queues;
    FILE* lfile;
    uint8_t core_id;
    int ret;

    printf("In dpdk_main\n");

    // Open the log file
    lfile = fopen("./vrouter.log", "w");

    // Program the rte log
    rte_openlog_stream(lfile);

    ret = rte_eal_init(argc, argv);
    if (ret < 0) {
		log_crit( "Invalid EAL parameters\n");
        return -1;
    }

    log_info( "Programming cmd rings now!\n");
    rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count());
    if (!rx_event_fd) {
        log_crit("Failed to allocate memory for rx event fd arrays\n");
        return -ENOMEM;
    }

    rte_eth_macaddr_get(port_id, &port_eth_addr);
    log_info("Port%d: MAC Address: ", port_id);
    print_ethaddr(&port_eth_addr);


    /* Determine the number of RX/TX pairs supported by NIC */
    rte_eth_dev_info_get(port_id, &dev_info);

    dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
    dev_info.pci_dev->intr_handle.max_intr =
                    dev_info.max_rx_queues + dev_info.max_tx_queues;
    ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle,
            dev_info.max_rx_queues);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n");
    }

    ret = rte_intr_enable(&dev_info.pci_dev->intr_handle);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n");
    }

    ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues,
                dev_info.max_tx_queues, &port_conf);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n");
    }

    /* For each RX/TX pair */
    nb_queues = dev_info.max_tx_queues;
    for (core_id = 0; core_id < nb_queues; core_id++) {
        char s[64];
        if (rte_lcore_is_enabled(core_id) == 0)
            continue;

        /* NUMA socket number */
        unsigned socketid = rte_lcore_to_socket_id(core_id);
        if (socketid >= NB_SOCKETS) {
            log_crit( "Socket %d of lcore %u is out of range %d\n",
				socketid, core_id, NB_SOCKETS);
            return -EBADF;
        }

        /* Create memory pool */
        if (pktmbuf_pool[socketid] == NULL) {
            log_info("Creating mempool on %d of ~%lx bytes\n",
                            socketid, NB_MBUF * MBUF_SIZE);
            printf("Creating mempool on %d of ~%lx bytes\n",
                        socketid, NB_MBUF * MBUF_SIZE);
            snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
            pktmbuf_pool[socketid] = rte_mempool_create(s,
                                                        NB_MBUF,
                                                        MBUF_SIZE,
                                                        MEMPOOL_CACHE_SIZE,
                                                        PKTMBUF_PRIV_SZ,
                                                        rte_pktmbuf_pool_init,
                                                        NULL,
                                                        rte_pktmbuf_init,
                                                        NULL,
                                                        socketid,
                                                        0);
            if (!pktmbuf_pool[socketid]) {
                log_crit( "Cannot init mbuf pool on socket %d\n", socketid);
                return -ENOMEM;
            }
        }

        /* Setup the TX queue */
        ret = rte_eth_tx_queue_setup(port_id,
                                     core_id,
                                     RTE_TX_DESC_DEFAULT,
                                     socketid,
                                     &tx_conf);
        if (ret < 0) {
            log_crit( "Cannot initialize TX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Setup the RX queue */
        ret = rte_eth_rx_queue_setup(port_id,
                                     core_id,
                                     RTE_RX_DESC_DEFAULT,
                                     socketid,
                                     &rx_conf,
                                     pktmbuf_pool[socketid]);
        if (ret < 0) {
            log_crit( "Cannot initialize RX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Create the event fds for event notification */
        lcore_cmd_event_fd[core_id] = eventfd(0, 0);
    }

    // Start the eth device
    ret = rte_eth_dev_start(port_id);
    if (ret < 0) {
        log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id);
        return -ENODEV;
    }

    // Put the device in promiscuous mode
    rte_eth_promiscuous_enable(port_id);

    // Wait for link up
    //check_all_ports_link_status(1, 1u << port_id);

    log_info( "Starting engines on every core\n");

    rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER);

    return 0;
}
Beispiel #29
0
int
acl_init(int is_ipv4)
{
	unsigned int i;
	struct rte_acl_rule *acl_base_ipv4 = NULL, *acl_base_ipv6 = NULL;
	unsigned int acl_num_ipv4 = 0, acl_num_ipv6 = 0;
	struct rte_acl_ctx *acl_ctx;

	if (check_acl_config() != 0) {
		acl_log("Failed to get valid ACL options\n");
		return -1;
	}

	dump_acl_config();

	if (is_ipv4) {
		/* Load  rules from the input file */
		if (add_rules(acl_parm_config.rule_ipv4_name, &acl_base_ipv4,
			      &acl_num_ipv4, sizeof(struct acl4_rule),
			      &parse_cb_ipv4vlan_rule) < 0) {
			acl_log("Failed to add ipv4 rules\n");
			return -1;
		}

		acl_log("IPv4 ACL entries %u:\n", acl_num_ipv4);
		dump_ipv4_rules((struct acl4_rule *)acl_base_ipv4, acl_num_ipv4,
				1);
		for (i = 0; i < NB_SOCKETS; i++) {
			if ((acl_ctx = setup_acl(acl_base_ipv4, acl_num_ipv4, 0,
						 i)) != NULL) {
				ipv4_acx[i] = acl_ctx;
			} else if (acl_num_ipv4 == 0) {
				ipv4_acx[i] = NULL;
			} else {
				acl_log("setup_acl failed for ipv4 with "
					"socketid %d, keeping previous rules "
					"for that socket\n",
					i);
			}
		}
#ifdef L3FWDACL_DEBUG
		if (acl_base_ipv4) {
			acl_config.rule_ipv4 =
			    (struct acl4_rule *)acl_base_ipv4;
		}
#else
		free(acl_base_ipv4);
#endif
	} else {
		if (add_rules(acl_parm_config.rule_ipv6_name, &acl_base_ipv6,
			      &acl_num_ipv6, sizeof(struct acl6_rule),
			      &parse_cb_ipv6_rule) < 0) {
			acl_log("Failed to add ipv6 rules\n");
			return -1;
		}

		acl_log("IPv6 ACL entries %u:\n", acl_num_ipv6);
		dump_ipv6_rules((struct acl6_rule *)acl_base_ipv6, acl_num_ipv6,
				1);
		for (i = 0; i < NB_SOCKETS; i++) {
			if ((acl_ctx = setup_acl(acl_base_ipv6, acl_num_ipv6, 1,
						 i)) != NULL) {
				ipv6_acx[i] = acl_ctx;
			} else if (acl_num_ipv6 == 0) {
				ipv6_acx[i] = NULL;
			} else {
				acl_log("setup_acl failed for ipv6 with "
					"socketid %d, keeping previous rules "
					"for that socket\n",
					i);
			}
		}
#ifdef L3FWDACL_DEBUG
		if (acl_base_ipv6) {
			acl_config.rule_ipv6 =
			    (struct acl6_rule *)acl_base_ipv6;
		}
#else
		free(acl_base_ipv6);
#endif
	}

	int socketid, lcore_id;
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		if (rte_lcore_is_enabled(lcore_id) == 0)
			continue;

		if (numa_on)
			socketid = rte_lcore_to_socket_id(lcore_id);
		else
			socketid = 0;

		rte_atomic64_cmpset(
		    (uintptr_t *)&lcore_conf[lcore_id].new_acx_ipv4,
		    (uintptr_t)lcore_conf[lcore_id].new_acx_ipv4,
		    (uintptr_t)ipv4_acx[socketid]);
		rte_atomic64_cmpset(
		    (uintptr_t *)&lcore_conf[lcore_id].new_acx_ipv6,
		    (uintptr_t)lcore_conf[lcore_id].new_acx_ipv6,
		    (uintptr_t)ipv6_acx[socketid]);
	}

	return 0;
}
Beispiel #30
0
static int
parse_arg_tx(const char *arg)
{
	const char *p0 = arg, *p = arg;
	uint32_t n_tuples;

	if (strnlen(arg, APP_ARG_TX_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) {
		return -1;
	}

	n_tuples = 0;
	while ((p = strchr(p0,'(')) != NULL) {
		struct app_lcore_params *lp;
		uint32_t port, lcore, i;

		p0 = strchr(p++, ')');
		if ((p0 == NULL) ||
		    (str_to_unsigned_vals(p, p0 - p, ',', 2, &port, &lcore) !=  2)) {
			return -2;
		}

		/* Enable port and queue for later initialization */
		if (port >= APP_MAX_NIC_PORTS) {
			return -3;
		}
		if (app.nic_tx_port_mask[port] != 0) {
			return -4;
		}
		app.nic_tx_port_mask[port] = 1;

		/* Check and assign (port, queue) to I/O lcore */
		if (rte_lcore_is_enabled(lcore) == 0) {
			return -5;
		}

		if (lcore >= APP_MAX_LCORES) {
			return -6;
		}
		lp = &app.lcore_params[lcore];
		if (lp->type == e_APP_LCORE_WORKER) {
			return -7;
		}
		lp->type = e_APP_LCORE_IO;
		for (i = 0; i < lp->io.tx.n_nic_ports; i ++) {
			if (lp->io.tx.nic_ports[i] == port) {
				return -8;
			}
		}
		if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
			return -9;
		}
		lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
		lp->io.tx.n_nic_ports ++;

		n_tuples ++;
		if (n_tuples > APP_ARG_TX_MAX_TUPLES) {
			return -10;
		}
	}

	if (n_tuples == 0) {
		return -11;
	}

	return 0;
}