Пример #1
1
/*
 * Check that every SLAVE lcores are in WAIT state, then call
 * rte_eal_remote_launch() for all of them. If call_master is true
 * (set to CALL_MASTER), also call the function on the master lcore.
 */
int
rte_eal_mp_remote_launch(int (*f)(void *), void *arg,
			 enum rte_rmt_call_master_t call_master)
{
	int lcore_id;
	int master = rte_get_master_lcore();

	/* check state of lcores */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (lcore_config[lcore_id].state != WAIT)
			return -EBUSY;
	}

	/* send messages to cores */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		rte_eal_remote_launch(f, arg, lcore_id);
	}

	if (call_master == CALL_MASTER) {
		lcore_config[master].ret = f(arg);
		lcore_config[master].state = FINISHED;
	}

	return 0;
}
Пример #2
0
static void
l2sw_main_process(struct lcore_env *env)
{
    struct rte_mbuf *pkt_burst[MAX_PKT_BURST];
    uint8_t n_ports = rte_eth_dev_count();
    unsigned lcore_id = rte_lcore_id();
    uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
    const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
                               * BURST_TX_DRAIN_US;

    //RTE_LOG(INFO, MARIO, "[%u] Starting main processing.\n", lcore_id);

    prev_tsc = 0;
    timer_tsc = 0;
    while(1) {
        cur_tsc = rte_rdtsc();

        diff_tsc = cur_tsc - prev_tsc;
        if (unlikely(diff_tsc > drain_tsc)) {
            uint8_t port_id;
            for(port_id = 0; port_id < n_ports; port_id++) {
                if (env->tx_mbufs[port_id].len == 0)
                    continue;
                l2sw_send_burst(env, port_id, env->tx_mbufs[port_id].len);
                env->tx_mbufs[port_id].len = 0;
            }

            /* if timer is enabled */
            if (timer_period > 0) {
                /* advance the timer */
                timer_tsc += diff_tsc;
                /* if timer has reached its timeout */
                if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
                    /* do this only on master core */
                    if (lcore_id == rte_get_master_lcore()) {
                        //print_stats(env);
                        /* reset the timer */
                        timer_tsc = 0;
                    }
                }
            }
            prev_tsc = cur_tsc;
        }

        /* RX */
        uint8_t port_id;
        for (port_id = 0; port_id < n_ports; port_id++) {
            unsigned n_rx = rte_eth_rx_burst(port_id, lcore_id,
                                             pkt_burst, MAX_PKT_BURST);
            if (n_rx != 0)
                //RTE_LOG(INFO, MARIO, "[%u-%u] %u packet(s) came.\n",
                //        lcore_id, port_id,  n_rx);

                __sync_fetch_and_add(&port_statistics[port_id].rx, n_rx);

            ether_in(env, pkt_burst, n_rx, port_id);
        }
    }
    return ;
}
Пример #3
0
static int
register_workers(void)
{
    unsigned lcore;
    struct worker_thread *worker;
    struct worker_thread *prev_worker;

    worker = malloc(sizeof(struct worker_thread));
    if (worker == NULL) {
        perror("worker_thread malloc");
        return -1;
    }

    memset(worker, 0, sizeof(struct worker_thread));
    worker->lcore = rte_get_master_lcore();

    g_workers = worker;
    g_num_workers = 1;

    RTE_LCORE_FOREACH_SLAVE(lcore) {
        prev_worker = worker;
        worker = malloc(sizeof(struct worker_thread));
        if (worker == NULL) {
            perror("worker_thread malloc");
            return -1;
        }

        memset(worker, 0, sizeof(struct worker_thread));
        worker->lcore = lcore;
        prev_worker->next = worker;
        g_num_workers++;
    }

    return 0;
}
Пример #4
0
int main(int argc, char **argv)
{
    int c;
    int ret;
    int sp_sc;
    unsigned socket_io;

    /* initialize EAL first */
    ret = rte_eal_init(argc, argv);

    argc -= ret;
    argv += ret;

    sp_sc = 1;
    bulk_size = 1;
    while ((c = getopt(argc, argv, "sm:b:w:")) != -1) {
        switch (c) {
        case 's':
            sp_sc = 1;
            break;
        case 'm':
            sp_sc = 0;
            nb_producers = atoi(optarg);
            break;
        case 'b':
            bulk_size = atoi(optarg);
            break;
        case 'w':
            work_cycles = atoi(optarg);
            break;
        case '?':
            break;
        }
    }

    setlocale(LC_NUMERIC, "");

    socket_io = rte_lcore_to_socket_id(rte_get_master_lcore());

    ring = rte_ring_create(ring_name,
                           ring_size, socket_io, RING_F_SP_ENQ | RING_F_SC_DEQ);

    if (ring == NULL) {
        rte_panic("Cannot create ring");
    }

    if (sp_sc) {
        printf("[MASTER] Single Producer/Consumer\n");
        printf("[MASTER] Bulk size: %d\n", bulk_size);
        driver_sp_sc();
    } else {
        printf("[MASTER] Number of Producers/Consumers: %d\n", nb_producers);
        printf("[MASTER] Bulk size: %d\n", bulk_size);
        driver_mp_mc();
    }
    rte_eal_mp_wait_lcore();
}
Пример #5
0
void
wr_port_matrix_dump(l2p_t * l2p)
{
    uint32_t    pid, lid;
    uint8_t		first, last;
    rxtx_t		cnt;

    first = last = 0;

    printf("\n=== port to lcore mapping table (# lcores %d) ===\n",
    		wr_lcore_mask(&first, &last));

    printf("   lcore: ");
    for(lid = first; lid <= last; lid++)
    		printf("   %2d ", lid);
    printf("\n");

    for(pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
    	cnt.rxtx = wr_get_map(l2p, pid, RTE_MAX_LCORE);
    	if ( cnt.rxtx == 0 )
    		continue;
        printf("port  %2d:", pid);
        for(lid = first; lid <= last; lid++) {
			cnt.rxtx = wr_get_map(l2p, pid, lid);
			if ( lid == rte_get_master_lcore() )
				printf(" %s:%s", " D", " T");
			else
				printf(" %2d:%2d", cnt.rx, cnt.tx);
        }
        cnt.rxtx = wr_get_map(l2p, pid, RTE_MAX_LCORE);
        printf(" = %2d:%2d\n", cnt.rx, cnt.tx);
    }

    printf("Total   :");
    for(lid = first; lid <= last; lid++) {
    	cnt.rxtx = wr_get_map(l2p, RTE_MAX_ETHPORTS, lid);
    	printf(" %2d:%2d", cnt.rx, cnt.tx);
    }

    printf("\n    Display and Timer on lcore %d, rx:tx counts per port/lcore\n\n",
    		rte_get_master_lcore());
    fflush(stdout);
}
Пример #6
0
/*
 * This function is called in the primary i.e. main test, to spawn off secondary
 * processes to run actual mp tests. Uses fork() and exec pair
 */
static int
run_secondary_instances(void)
{
	int ret = 0;
	char coremask[10];

#ifdef RTE_EXEC_ENV_LINUXAPP
	char tmp[PATH_MAX] = {0};
	char prefix[PATH_MAX] = {0};

	get_current_prefix(tmp, sizeof(tmp));

	snprintf(prefix, sizeof(prefix), "--file-prefix=%s", tmp);
#else
	const char *prefix = "";
#endif

	/* good case, using secondary */
	const char *argv1[] = {
			prgname, "-c", coremask, "--proc-type=secondary",
			prefix
	};
	/* good case, using auto */
	const char *argv2[] = {
			prgname, "-c", coremask, "--proc-type=auto",
			prefix
	};
	/* bad case, using invalid type */
	const char *argv3[] = {
			prgname, "-c", coremask, "--proc-type=ERROR",
			prefix
	};
#ifdef RTE_EXEC_ENV_LINUXAPP
	/* bad case, using invalid file prefix */
	const char *argv4[]  = {
			prgname, "-c", coremask, "--proc-type=secondary",
					"--file-prefix=ERROR"
	};
#endif

	snprintf(coremask, sizeof(coremask), "%x", \
			(1 << rte_get_master_lcore()));

	ret |= launch_proc(argv1);
	ret |= launch_proc(argv2);

	ret |= !(launch_proc(argv3));
#ifdef RTE_EXEC_ENV_LINUXAPP
	ret |= !(launch_proc(argv4));
#endif

	return ret;
}
Пример #7
0
int initDpdk(char* progname)
{
    int ret;
    static char *eal_args[] = {progname, "-c0xf", "-n1", "-m128", "--file-prefix=drone"};

    // TODO: read env var DRONE_RTE_EAL_ARGS to override defaults

    ret = rte_eal_init(sizeof(eal_args)/sizeof(char*), eal_args);
    if (ret < 0)
        rte_panic("Cannot init EAL\n");

    mbufPool_ = rte_mempool_create("DpktPktMbuf",
                                   16*1024, // # of mbufs
                                   2048, // sz of mbuf
                                   32,   // per-lcore cache sz
                                   sizeof(struct rte_pktmbuf_pool_private),
                                   rte_pktmbuf_pool_init, // pool ctor
                                   NULL, // pool ctor arg
                                   rte_pktmbuf_init, // mbuf ctor
                                   NULL, // mbuf ctor arg
                                   SOCKET_ID_ANY,
                                   0     // flags
                                  );

    if (!mbufPool_)
        rte_exit(EXIT_FAILURE, "cannot init mbuf pool\n");

    if (rte_pmd_init_all() < 0)
        rte_exit(EXIT_FAILURE, "cannot init pmd\n");

    if (rte_eal_pci_probe() < 0)
        rte_exit(EXIT_FAILURE, "cannot probe PCI\n");

    // init lcore information
    lcoreCount_ = rte_lcore_count();
    lcoreFreeMask_ = 0;
    for (int i = 0; i < lcoreCount_; i++) {
        if (rte_lcore_is_enabled(i) && (unsigned(i) != rte_get_master_lcore()))
            lcoreFreeMask_ |= (1 << i);
    }
    qDebug("lcore_count = %d, lcore_free_mask = 0x%llx",
           lcoreCount_, lcoreFreeMask_);

    // assign a lcore for Rx polling
    rxLcoreId_ = getFreeLcore();
    if (rxLcoreId_ < 0)
        rte_exit(EXIT_FAILURE, "not enough cores for Rx polling");

    stopRxPoll_ = false;

    return 0;
}
Пример #8
0
static inline uint8_t efd_get_all_sockets_bitmask(void)
{
	uint8_t all_cpu_sockets_bitmask = 0;
	unsigned int i;
	unsigned int next_lcore = rte_get_master_lcore();
	const int val_true = 1, val_false = 0;
	for (i = 0; i < rte_lcore_count(); i++) {
		all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
		next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true);
	}

	return all_cpu_sockets_bitmask;
}
Пример #9
0
static uint16_t
alloc_lcore(uint16_t socketid)
{
	unsigned lcore_id;

	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		if (LCORE_AVAIL != lcore_conf[lcore_id].status ||
		    lcore_conf[lcore_id].socketid != socketid ||
		    lcore_id == rte_get_master_lcore())
			continue;
		lcore_conf[lcore_id].status = LCORE_USED;
		lcore_conf[lcore_id].nb_ports = 0;
		return lcore_id;
	}

	return (uint16_t)-1;
}
Пример #10
0
Файл: app.c Проект: lkpdn/spdk
int
spdk_app_start(spdk_event_fn start_fn, void *arg1, void *arg2)
{
	spdk_event_t event;

	g_spdk_app.rc = 0;

	event = spdk_event_allocate(rte_get_master_lcore(), start_fn,
				    arg1, arg2, NULL);
	/* Queues up the event, but can't run it until the reactors start */
	spdk_event_call(event);

	/* This blocks until spdk_app_stop is called */
	spdk_reactors_start();

	return g_spdk_app.rc;
}
Пример #11
0
Файл: reset.c Проект: ceph/spdk
static int
register_workers(void)
{
	struct worker_thread *worker;

	worker = malloc(sizeof(struct worker_thread));
	if (worker == NULL) {
		perror("worker_thread malloc");
		return -1;
	}

	memset(worker, 0, sizeof(struct worker_thread));
	worker->lcore = rte_get_master_lcore();

	g_workers = worker;

	return 0;
}
Пример #12
0
/*
 * Setup default configuration.
 */
static void
set_default_fwd_lcores_config(void)
{
	unsigned int i;
	unsigned int nb_lc;

	nb_lc = 0;
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (! rte_lcore_is_enabled(i))
			continue;
		if (i == rte_get_master_lcore())
			continue;
		fwd_lcores_cpuids[nb_lc++] = i;
	}
	nb_lcores = (lcoreid_t) nb_lc;
	nb_cfg_lcores = nb_lcores;
	nb_fwd_lcores = 1;
}
Пример #13
0
void spdk_shutdown_nvmf_conns(void)
{
	struct spdk_nvmf_conn	*conn;
	int				i;

	pthread_mutex_lock(&g_conns_mutex);

	for (i = 0; i < g_max_conns; i++) {
		conn = &g_conns_array[i];
		if (!conn->is_valid)
			continue;
		SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Set conn %d state to exiting\n", i);
		conn->state = CONN_STATE_EXITING;
	}

	pthread_mutex_unlock(&g_conns_mutex);
	rte_timer_init(&g_shutdown_timer);
	rte_timer_reset(&g_shutdown_timer, rte_get_timer_hz() / 1000, PERIODICAL,
			rte_get_master_lcore(), spdk_nvmf_conn_check_shutdown, NULL);
}
Пример #14
0
static int
register_workers(void)
{
	unsigned lcore;
	struct worker_thread *worker;
	struct worker_thread *prev_worker;

	worker = malloc(sizeof(struct worker_thread));
	memset(worker, 0, sizeof(struct worker_thread));
	worker->lcore = rte_get_master_lcore();

	g_workers = g_current_worker = worker;

	RTE_LCORE_FOREACH_SLAVE(lcore) {
		prev_worker = worker;
		worker = malloc(sizeof(struct worker_thread));
		memset(worker, 0, sizeof(struct worker_thread));
		worker->lcore = lcore;
		prev_worker->next = worker;
	}

	return 0;
}
Пример #15
0
/* 
 * Parses the argument given in the command line of the application,
 * calculates mask for used cores and initializes EAL with calculated core mask
 */
int
app_parse_args(int argc, char **argv)
{
	int opt, ret;
	int option_index;
	const char *optname;
	char *prgname = argv[0];
	uint32_t i, nb_lcores;

	static struct option lgopts[] = {
		{ "pfc", 1, 0, 0 },
		{ "mst", 1, 0, 0 },
		{ "rsz", 1, 0, 0 },
		{ "bsz", 1, 0, 0 },
		{ "msz", 1, 0, 0 },
		{ "rth", 1, 0, 0 },
		{ "tth", 1, 0, 0 },
		{ "cfg", 1, 0, 0 },
		{ NULL,  0, 0, 0 }
	};

	/* initialize EAL first */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* set en_US locale to print big numbers with ',' */
	setlocale(LC_NUMERIC, "en_US.utf-8");

	while ((opt = getopt_long(argc, argv, "i",
		lgopts, &option_index)) != EOF) {

			switch (opt) {
			case 'i':
				printf("Interactive-mode selected\n");
				interactive = 1;
				break;
			/* long options */
			case 0:
				optname = lgopts[option_index].name;
				if (str_is(optname, "pfc")) {
					ret = app_parse_flow_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid pipe configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "mst")) {
					app_master_core = (uint32_t)atoi(optarg);
					break;
				}
				if (str_is(optname, "rsz")) {
					ret = app_parse_ring_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid ring configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "bsz")) {
					ret = app_parse_burst_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid burst configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "msz")) {
					mp_size = atoi(optarg);
					if (mp_size <= 0) {
						RTE_LOG(ERR, APP, "Invalid mempool size %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "rth")) {
					ret = app_parse_rth_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid RX threshold configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "tth")) {
					ret = app_parse_tth_conf(optarg);
					if (ret) {
						RTE_LOG(ERR, APP, "Invalid TX threshold configuration %s\n", optarg);
						return -1;
					}
					break;
				}
				if (str_is(optname, "cfg")) {
					cfg_profile = optarg;
					break;
				}
				break;

			default:
				app_usage(prgname);
				return -1;
			}
	}

	/* check master core index validity */
	for(i = 0; i <= app_master_core; i++) {
		if (app_used_core_mask & (1u << app_master_core)) {
			RTE_LOG(ERR, APP, "Master core index is not configured properly\n");
			app_usage(prgname);
			return -1;
		}
	}
	app_used_core_mask |= 1u << app_master_core;

	if ((app_used_core_mask != app_eal_core_mask()) ||
			(app_master_core != rte_get_master_lcore())) {
		RTE_LOG(ERR, APP, "EAL core mask not configured properly, must be %" PRIx64
				" instead of %" PRIx64 "\n" , app_used_core_mask, app_eal_core_mask());
		return -1;
	}

	if (nb_pfc == 0) {
		RTE_LOG(ERR, APP, "Packet flow not configured!\n");
		app_usage(prgname);
		return -1;
	}

	/* sanity check for cores assignment */
	nb_lcores = app_cpu_core_count();

	for(i = 0; i < nb_pfc; i++) {
		if (qos_conf[i].rx_core >= nb_lcores) {
			RTE_LOG(ERR, APP, "pfc %u: invalid RX lcore index %u\n", i + 1,
					qos_conf[i].rx_core);
			return -1;
		}
		if (qos_conf[i].wt_core >= nb_lcores) {
			RTE_LOG(ERR, APP, "pfc %u: invalid WT lcore index %u\n", i + 1,
					qos_conf[i].wt_core);
			return -1;
		}
		uint32_t rx_sock = rte_lcore_to_socket_id(qos_conf[i].rx_core);
		uint32_t wt_sock = rte_lcore_to_socket_id(qos_conf[i].wt_core);
		if (rx_sock != wt_sock) {
			RTE_LOG(ERR, APP, "pfc %u: RX and WT must be on the same socket\n", i + 1);
			return -1;
		}
		app_numa_mask |= 1 << rte_lcore_to_socket_id(qos_conf[i].rx_core);
	}

	return 0;
}
Пример #16
0
static int
nvmf_allocate_reactor(uint64_t cpumask)
{
	int i, selected_core;
	enum rte_lcore_state_t state;
	int master_lcore = rte_get_master_lcore();
	int32_t num_pollers, min_pollers;

	cpumask &= spdk_app_get_core_mask();
	if (cpumask == 0) {
		return 0;
	}

	min_pollers = INT_MAX;
	selected_core = 0;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (!((1ULL << i) & cpumask)) {
			continue;
		}

		/*
		 * DPDK returns WAIT for the master lcore instead of RUNNING.
		 * So we always treat the reactor on master core as RUNNING.
		 */
		if (i == master_lcore) {
			state = RUNNING;
		} else {
			state = rte_eal_get_lcore_state(i);
		}
		if (state == FINISHED) {
			rte_eal_wait_lcore(i);
		}

		switch (state) {
		case WAIT:
		case FINISHED:
			/* Idle cores have 0 pollers */
			if (0 < min_pollers) {
				selected_core = i;
				min_pollers = 0;
			}
			break;
		case RUNNING:
			/* This lcore is running, check how many pollers it already has */
			num_pollers = rte_atomic32_read(&g_num_connections[i]);

			/* Fill each lcore to target minimum, else select least loaded lcore */
			if (num_pollers < (SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE *
					   g_nvmf_tgt.MaxConnectionsPerSession)) {
				/* If fewer than the target number of session connections
				 * exist then add to this lcore
				 */
				return i;
			} else if (num_pollers < min_pollers) {
				/* Track the lcore that has the minimum number of pollers
				 * to be used if no lcores have already met our criteria
				 */
				selected_core = i;
				min_pollers = num_pollers;
			}
			break;
		}
	}

	return selected_core;
}
Пример #17
0
int
perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
{
	unsigned int lcores;

	/* N producer + N worker + 1 master when producer cores are used
	 * Else N worker + 1 master when Rx adapter is used
	 */
	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;

	if (rte_lcore_count() < lcores) {
		evt_err("test need minimum %d lcores", lcores);
		return -1;
	}

	/* Validate worker lcores */
	if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
		evt_err("worker lcores overlaps with master lcore");
		return -1;
	}
	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
		evt_err("worker lcores overlaps producer lcores");
		return -1;
	}
	if (evt_has_disabled_lcore(opt->wlcores)) {
		evt_err("one or more workers lcores are not enabled");
		return -1;
	}
	if (!evt_has_active_lcore(opt->wlcores)) {
		evt_err("minimum one worker is required");
		return -1;
	}

	if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
		/* Validate producer lcores */
		if (evt_lcores_has_overlap(opt->plcores,
					rte_get_master_lcore())) {
			evt_err("producer lcores overlaps with master lcore");
			return -1;
		}
		if (evt_has_disabled_lcore(opt->plcores)) {
			evt_err("one or more producer lcores are not enabled");
			return -1;
		}
		if (!evt_has_active_lcore(opt->plcores)) {
			evt_err("minimum one producer is required");
			return -1;
		}
	}

	if (evt_has_invalid_stage(opt))
		return -1;

	if (evt_has_invalid_sched_type(opt))
		return -1;

	if (nb_queues > EVT_MAX_QUEUES) {
		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
		return -1;
	}
	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
		return -1;
	}

	/* Fixups */
	if (opt->nb_stages == 1 && opt->fwd_latency) {
		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
		opt->fwd_latency = 0;
	}
	if (opt->fwd_latency && !opt->q_priority) {
		evt_info("enabled queue priority for latency measurement");
		opt->q_priority = 1;
	}
	if (opt->nb_pkts == 0)
		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);

	return 0;
}
Пример #18
0
int
main(int argc, char **argv)
{
	uint32_t i;
	int32_t ret;

	printf("\n%s %s\n", wr_copyright_msg(), wr_powered_by()); fflush(stdout);

	wr_scrn_setw(1);/* Reset the window size */

	/* call before the rte_eal_init() */
	(void)rte_set_application_usage_hook(pktgen_usage);

	memset(&pktgen, 0, sizeof(pktgen));

	pktgen.flags            = PRINT_LABELS_FLAG;
	pktgen.ident            = 0x1234;
	pktgen.nb_rxd           = DEFAULT_RX_DESC;
	pktgen.nb_txd           = DEFAULT_TX_DESC;
	pktgen.nb_ports_per_page = DEFAULT_PORTS_PER_PAGE;

	if ( (pktgen.l2p = wr_l2p_create()) == NULL)
		pktgen_log_panic("Unable to create l2p");

	pktgen.portdesc_cnt = wr_get_portdesc(pktgen.portlist, pktgen.portdesc, RTE_MAX_ETHPORTS, 0);

	/* Initialize the screen and logging */
	pktgen_init_log();
	pktgen_cpu_init();

	/* initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;
	argc -= ret;
	argv += ret;

	pktgen.hz = rte_get_timer_hz();	/* Get the starting HZ value. */

	/* parse application arguments (after the EAL ones) */
	ret = pktgen_parse_args(argc, argv);
	if (ret < 0)
		return -1;

	pktgen_init_screen((pktgen.flags & ENABLE_THEME_FLAG) ? THEME_ON : THEME_OFF);

	rte_delay_ms(100);	/* Wait a bit for things to settle. */

	wr_print_copyright(PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	lua_newlib_add(_lua_openlib);

	/* Open the Lua script handler. */
	if ( (pktgen.L = lua_create_instance()) == NULL) {
		pktgen_log_error("Failed to open Lua pktgen support library");
		return -1;
	}

	pktgen_log_info(">>> Packet Burst %d, RX Desc %d, TX Desc %d, mbufs/port %d, mbuf cache %d",
	                DEFAULT_PKT_BURST, DEFAULT_RX_DESC, DEFAULT_TX_DESC, MAX_MBUFS_PER_PORT, MBUF_CACHE_SIZE);

	/* Configure and initialize the ports */
	pktgen_config_ports();

	pktgen_log_info("");
	pktgen_log_info("=== Display processing on lcore %d", rte_lcore_id());

	/* launch per-lcore init on every lcore except master and master + 1 lcores */
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if ( (i == rte_get_master_lcore()) || !rte_lcore_is_enabled(i) )
			continue;
		ret = rte_eal_remote_launch(pktgen_launch_one_lcore, NULL, i);
		if (ret != 0)
			pktgen_log_error("Failed to start lcore %d, return %d", i, ret);
	}
	rte_delay_ms(1000);	/* Wait for the lcores to start up. */

	/* Disable printing log messages of level info and below to screen, */
	/* erase the screen and start updating the screen again. */
	pktgen_log_set_screen_level(LOG_LEVEL_WARNING);
	wr_scrn_erase(pktgen.scrn->nrows);

	wr_logo(3, 16, PKTGEN_APP_NAME);
	wr_splash_screen(3, 16, PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	wr_scrn_resume();

	pktgen_redisplay(1);

	rte_timer_setup();

	if (pktgen.flags & ENABLE_GUI_FLAG) {
		if (!wr_scrn_is_paused() ) {
			wr_scrn_pause();
			wr_scrn_cls();
			wr_scrn_setw(1);
			wr_scrn_pos(pktgen.scrn->nrows, 1);
		}

		lua_init_socket(pktgen.L, &pktgen.thread, pktgen.hostname, pktgen.socket_port);
	}

	pktgen_cmdline_start();

	execute_lua_close(pktgen.L);
	pktgen_stop_running();

	wr_scrn_pause();

	wr_scrn_setw(1);
	wr_scrn_printf(100, 1, "\n");	/* Put the cursor on the last row and do a newline. */

	/* Wait for all of the cores to stop running and exit. */
	rte_eal_mp_wait_lcore();

	return 0;
}
Пример #19
0
/* Parse the argument given in the command line of the application */
int
app_parse_args(int argc, const char *argv[]) {
  int opt, ret;
  char **argvopt;
  int option_index;
  const char *prgname = argv[0];
  static const struct option lgopts[] = {
    {"rx", 1, 0, 0},
    {"tx", 1, 0, 0},
    {"w", 1, 0, 0},
    {"rsz", 1, 0, 0},
    {"bsz", 1, 0, 0},
    {"no-cache", 0, 0, 0},
    {"core-assign", 1, 0, 0},
    {"kvstype", 1, 0, 0},
#ifdef __SSE4_2__
    {"hashtype", 1, 0, 0},
#endif /* __SSE4_2__ */
    {"fifoness", 1, 0, 0},
    {"show-core-config", 0, 0, 0},
    {NULL, 0, 0, 0}
  };
  uint32_t arg_p = 0;
  uint32_t arg_w = 0;
  uint32_t arg_rx = 0;
  uint32_t arg_tx = 0;
  uint32_t arg_rsz = 0;
  uint32_t arg_bsz = 0;

  uint64_t portmask = 0;
  char *end = NULL;
  struct app_lcore_params *lp, *htlp;
  unsigned lcore, htcore, lcore_count, i, wk_lcore_count = 0;
  unsigned rx_lcore_start, tx_lcore_start, wk_lcore_start;
  int rx_lcore_inc, tx_lcore_inc, wk_lcore_inc;
  uint8_t portid, port_count, count, n;
  bool show_core_assign = false;

  argvopt = (char **)argv;

  while ((opt = getopt_long(argc, argvopt, "p:w:",
                            lgopts, &option_index)) != EOF) {
    switch (opt) {
      case 'p':
        if (optarg[0] == '\0') {
          printf("Require value for -p argument\n");
          return -1;
        }
        portmask = (uint64_t)strtoull(optarg, &end, 16);
        if (end == NULL || *end != '\0') {
          printf("Non-numerical value for -p argument\n");
          return -1;
        }
        if (portmask == 0) {
          printf("Incorrect value for -p argument\n");
          return -1;
        }
        arg_p = 1;
        break;
      case 'w':
        if (optarg[0] == '\0') {
          printf("Require value for -w argument\n");
          return -1;
        }
        wk_lcore_count = (unsigned)strtoul(optarg, &end, 10);
        if (end == NULL || *end != '\0') {
          printf("Non-numerical value for -w argument\n");
          return -1;
        }
        if (wk_lcore_count == 0) {
          printf("Incorrect value for -w argument\n");
          return -1;
        }
        break;
        /* long options */
      case 0:
        if (!strcmp(lgopts[option_index].name, "rx")) {
          arg_rx = 1;
          ret = parse_arg_rx(optarg);
          if (ret) {
            printf("Incorrect value for --rx argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "tx")) {
          arg_tx = 1;
          ret = parse_arg_tx(optarg);
          if (ret) {
            printf("Incorrect value for --tx argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "w")) {
          arg_w = 1;
          ret = parse_arg_w(optarg);
          if (ret) {
            printf("Incorrect value for --w argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "rsz")) {
          arg_rsz = 1;
          ret = parse_arg_rsz(optarg);
          if (ret) {
            printf("Incorrect value for --rsz argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "bsz")) {
          arg_bsz = 1;
          ret = parse_arg_bsz(optarg);
          if (ret) {
            printf("Incorrect value for --bsz argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "core-assign")) {
          ret = parse_arg_core_assign(optarg);
          if (ret) {
            printf("Incorrect value for --core-assign argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "no-cache")) {
          app.no_cache = 1;
        }
        if (!strcmp(lgopts[option_index].name, "kvstype")) {
          ret = parse_arg_kvstype(optarg);
          if (ret) {
            printf("Incorrect value for --ksvtype argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "hashtype")) {
          ret = parse_arg_hashtype(optarg);
          if (ret) {
            printf("Incorrect value for --hashtype argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "fifoness")) {
          ret = parse_arg_fifoness(optarg);
          if (ret) {
            printf("Incorrect value for --fifoness argument (%d)\n", ret);
            return -1;
          }
        }
        if (!strcmp(lgopts[option_index].name, "show-core-config")) {
          show_core_assign = true;
        }
        break;

      default:
        printf("Incorrect option\n");
        return -1;
    }
  }

  /* Check that all mandatory arguments are provided */
  if ((arg_rx == 0 || arg_tx == 0 || arg_w == 0) && arg_p == 0) {
    lagopus_exit_error(EXIT_FAILURE,
                       "Not all mandatory arguments are present\n");
  }

  port_count = 0;
  if (arg_p != 0) {
    /**
     * Assign lcore for each thread automatically.
     */
    for (i = 0; i < 32; i++) {
      if ((portmask & (uint64_t)(1ULL << i)) != 0) {
        port_count++;
      }
    }
    if (port_count == 0) {
      lagopus_exit_error(EXIT_FAILURE,
                         "error: port is not specified.  use -p HEXNUM or --rx, --tx.\n");
    }
  }
  for (lcore_count = 0, lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
    if (lcore == rte_get_master_lcore()) {
      continue;
    }
    if (!rte_lcore_is_enabled(lcore)) {
      continue;
    }
    lp = &app.lcore_params[lcore];
    lp->socket_id = lcore_config[lcore].socket_id;
    lp->core_id = lcore_config[lcore].core_id;

    /* add lcore id except for hyper-threading core. */
    for (htcore = 0; htcore < lcore; htcore++) {
      if (!rte_lcore_is_enabled(htcore)) {
        continue;
      }
      htlp = &app.lcore_params[htcore];
      if (app.core_assign == CORE_ASSIGN_PERFORMANCE) {
        if (lp->socket_id == htlp->socket_id &&
            lp->core_id == htlp->core_id) {
          break;
        }
      }
    }
    if (htcore == lcore) {
      lcores[lcore_count++] = lcore;
    }
  }

  if (lcore_count == 0) {
    lagopus_exit_error(
      EXIT_FAILURE,
      "Not enough active core "
      "(need at least 2 active core%s)\n",
      app.core_assign == CORE_ASSIGN_PERFORMANCE ?
      " except for HTT core" : "");
  }
  if (app.core_assign == CORE_ASSIGN_MINIMUM) {
    lcore_count = 1;
  }
  if (lcore_count == 1) {
    /*
     * I/O and worker shares single lcore.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = 0;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 0;
  } else if (port_count * 4 <= lcore_count) {
    /*
     * each ports rx has own lcore.
     * each ports tx has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = rx_lcore_start + port_count;
    wk_lcore_start = tx_lcore_start + port_count;
    rx_lcore_inc = 1;
    tx_lcore_inc = 1;
    wk_lcore_inc = 1;
  } else if (port_count * 2 <= lcore_count) {
    /*
     * each ports (rx/tx) has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + port_count;
    rx_lcore_inc = 1;
    tx_lcore_inc = 1;
    wk_lcore_inc = 1;
  } else if (port_count <= lcore_count) {
    /*
     * odd ports and even ports (rx/tx) shared lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + (port_count + 1) / 2;
    rx_lcore_inc = 2;
    tx_lcore_inc = 2;
    wk_lcore_inc = 1;
  } else if (port_count <= lcore_count * 2) {
    /*
     * four ports (rx/tx) shared lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = rx_lcore_start + (port_count + 3) / 4;
    rx_lcore_inc = 4;
    tx_lcore_inc = 4;
    wk_lcore_inc = 1;
  } else if (lcore_count >= 4) {
    /*
     * rx for all ports has own lcore.
     * tx for all ports has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 1;
    wk_lcore_start = 2;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 1;
  } else {
    /*
     * I/O has own lcore.
     * all other lcores are assigned as worker.
     */
    rx_lcore_start = 0;
    tx_lcore_start = 0;
    wk_lcore_start = 1;
    rx_lcore_inc = 0;
    tx_lcore_inc = 0;
    wk_lcore_inc = 1;
  }
  /* assign core automatically */
  if (arg_rx == 0) {
    lcore = rx_lcore_start;
    count = rx_lcore_inc;
    for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
      if ((portmask & (uint64_t)(1ULL << portid)) == 0) {
        continue;
      }
      if (assign_rx_lcore(portid, lcore) != 0) {
        return -5;
      }
      if (--count == 0) {
        lcore++;
        count = rx_lcore_inc;
      }
    }
  }
  if (arg_tx == 0) {
    lcore = tx_lcore_start;
    count = tx_lcore_inc;
    for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
      if ((portmask & (uint64_t)(1ULL << portid)) == 0) {
        continue;
      }
      if (assign_tx_lcore(portid, lcore) != 0) {
        return -5;
      }
      if (--count == 0) {
        lcore++;
        count = tx_lcore_inc;
      }
    }
  }
  if (arg_w == 0) {
    if (wk_lcore_count == 0) {
      wk_lcore_count = lcore_count - wk_lcore_start;
    }
    for (lcore = wk_lcore_start;
         lcore < wk_lcore_start + wk_lcore_count;
         lcore++) {
      lp = &app.lcore_params[lcores[lcore]];
      if (lp->type == e_APP_LCORE_IO) {
        /* core is shared by I/O and worker. */
        lp->type = e_APP_LCORE_IO_WORKER;
      } else {
        lp->type = e_APP_LCORE_WORKER;
      }
      if (wk_lcore_inc != 1) {
        break;
      }
    }
  }

  /* Assign default values for the optional arguments not provided */
  if (arg_rsz == 0) {
    app.nic_rx_ring_size = APP_DEFAULT_NIC_RX_RING_SIZE;
    app.nic_tx_ring_size = APP_DEFAULT_NIC_TX_RING_SIZE;
    app.ring_rx_size = APP_DEFAULT_RING_RX_SIZE;
    app.ring_tx_size = APP_DEFAULT_RING_TX_SIZE;
  }

  if (arg_bsz == 0) {
    app.burst_size_io_rx_read = APP_DEFAULT_BURST_SIZE_IO_RX_READ;
    app.burst_size_io_rx_write = APP_DEFAULT_BURST_SIZE_IO_RX_WRITE;
    app.burst_size_io_tx_read = APP_DEFAULT_BURST_SIZE_IO_TX_READ;
    app.burst_size_io_tx_write = APP_DEFAULT_BURST_SIZE_IO_TX_WRITE;
    app.burst_size_worker_read = APP_DEFAULT_BURST_SIZE_WORKER_READ;
    app.burst_size_worker_write = APP_DEFAULT_BURST_SIZE_WORKER_WRITE;
  }

  /* Check cross-consistency of arguments */
  if (app_check_every_rx_port_is_tx_enabled() < 0) {
    lagopus_msg_error("At least one RX port is not enabled for TX.\n");
    return -2;
  }

  if (show_core_assign == true) {
    printf("core assign:\n");
    for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
      if (lcore_config[lcore].detected != true) {
        continue;
      }
      lp = &app.lcore_params[lcore];
      printf("  lcore %d:\n", lcore);
      if (lp->type == e_APP_LCORE_IO) {
        printf("    type: I/O\n");
      } else if (lp->type == e_APP_LCORE_WORKER) {
        printf("    type: WORKER\n");
      } else if (lp->type == e_APP_LCORE_IO_WORKER) {
        printf("    type: I/O WORKER\n");
      } else {
        printf("    type: not used\n");
      }
      for (n = 0; n < lp->io.rx.n_nic_queues; n++) {
        printf("    RX port %d (queue %d)\n",
               lp->io.rx.nic_queues[n].port,
               lp->io.rx.nic_queues[n].queue);
      }
      for (n = 0; n < lp->io.tx.n_nic_ports; n++) {
        printf("    TX port %d\n",
               lp->io.tx.nic_ports[n]);
      }
    }
    exit(0);
  }

  if (optind >= 0) {
    argv[optind - 1] = prgname;
  }
  ret = optind - 1;
  optind = 0; /* reset getopt lib */
  return ret;
}
Пример #20
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
Пример #21
0
SystemCore DpdkDeviceList::getDpdkMasterCore()
{
	return SystemCores::IdToSystemCore[rte_get_master_lcore()];
}
Пример #22
0
Файл: main.c Проект: Cosios/dpdk
int
main(int argc, char **argv)
{
	int ret;
	unsigned nb_ports;
	unsigned int lcore_id, last_lcore_id, master_lcore_id;
	uint8_t port_id;
	uint8_t nb_ports_available;
	struct worker_thread_args worker_args = {NULL, NULL};
	struct send_thread_args send_args = {NULL, NULL};
	struct rte_ring *rx_to_workers;
	struct rte_ring *workers_to_tx;

	/* catch ctrl-c so we can print on exit */
	signal(SIGINT, int_handler);

	/* Initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* Parse the application specific arguments */
	ret = parse_args(argc, argv);
	if (ret < 0)
		return -1;

	/* Check if we have enought cores */
	if (rte_lcore_count() < 3)
		rte_exit(EXIT_FAILURE, "Error, This application needs at "
				"least 3 logical cores to run:\n"
				"1 lcore for packet RX\n"
				"1 lcore for packet TX\n"
				"and at least 1 lcore for worker threads\n");

	nb_ports = rte_eth_dev_count();
	if (nb_ports == 0)
		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
	if (nb_ports != 1 && (nb_ports & 1))
		rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
				"when using a single port\n");

	mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
			MBUF_POOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
			rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	nb_ports_available = nb_ports;

	/* initialize all ports */
	for (port_id = 0; port_id < nb_ports; port_id++) {
		/* skip ports that are not enabled */
		if ((portmask & (1 << port_id)) == 0) {
			printf("\nSkipping disabled port %d\n", port_id);
			nb_ports_available--;
			continue;
		}
		/* init port */
		printf("Initializing port %u... done\n", (unsigned) port_id);

		if (configure_eth_port(port_id) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
					port_id);
	}

	if (!nb_ports_available) {
		rte_exit(EXIT_FAILURE,
			"All available ports are disabled. Please set portmask.\n");
	}

	/* Create rings for inter core communication */
	rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
			RING_F_SP_ENQ);
	if (rx_to_workers == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
			RING_F_SC_DEQ);
	if (workers_to_tx == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	if (!disable_reorder) {
		send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
				REORDER_BUFFER_SIZE);
		if (send_args.buffer == NULL)
			rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
	}

	last_lcore_id   = get_last_lcore_id();
	master_lcore_id = rte_get_master_lcore();

	worker_args.ring_in  = rx_to_workers;
	worker_args.ring_out = workers_to_tx;

	/* Start worker_thread() on all the available slave cores but the last 1 */
	for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
		if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
			rte_eal_remote_launch(worker_thread, (void *)&worker_args,
					lcore_id);

	if (disable_reorder) {
		/* Start tx_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
				last_lcore_id);
	} else {
		send_args.ring_in = workers_to_tx;
		/* Start send_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)send_thread,
				(void *)&send_args, last_lcore_id);
	}

	/* Start rx_thread() on the master core */
	rx_thread(rx_to_workers);

	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (rte_eal_wait_lcore(lcore_id) < 0)
			return -1;
	}

	print_stats();
	return 0;
}
Пример #23
0
int main(int argc, char ** argv)
{
    int ret, socket;
    unsigned pid, nb_ports, lcore_id, rx_lcore_id;
    struct sock_parameter sk_param;
    struct sock *sk;
    struct txrx_queue *rxq;
    struct port_queue_conf *port_q;
    struct lcore_queue_conf *lcore_q;

    ret = rte_eal_init(argc, argv);
    if (ret < 0)
        return -1;
    argc -= ret;
    argv += ret;

    /*parse gw ip and mac from cmdline*/
    if (argc > 1) {
        default_host_addr = argv[1];
        if (argc == 3)
            default_gw_addr = argv[2];
        else if (argc == 4)
            default_gw_mac = argv[3];
        else
            rte_exit(EXIT_FAILURE, "invalid arguments\n");
    }

    /*config nic*/
    nb_ports = rte_eth_dev_count();
    if (nb_ports == 0)
        rte_exit(EXIT_FAILURE, "No available NIC\n");
    for (pid = 0; pid < nb_ports; pid++) {
        ret = net_device_init(pid);
        if (ret) {
            RTE_LOG(WARNING, LDNS, "fail to initialize port %u\n", pid);
            goto release_net_device;
        }
    }
    pkt_rx_pool = rte_pktmbuf_pool_create("ldns rx pkt pool",
            PKT_RX_NB,
            32,
            0,
            RTE_MBUF_DEFAULT_BUF_SIZE,
            rte_socket_id());
    if (pkt_rx_pool == NULL)
        rte_exit(EXIT_FAILURE, "cannot alloc rx_mbuf_pool");
    
    /*sock create*/
    sk_param.mode = SOCK_MODE_COMPLETE;
    sk_param.func = dns_process;
    sk = create_sock(0, SOCK_PTOTO_IPPROTO_UDP, &sk_param);
    if (sk == NULL)
        rte_exit(EXIT_FAILURE, "cannot create sock\n");
    if (sock_bind(sk, inet_network(default_host_addr), DNS_PORT))
        rte_exit(EXIT_FAILURE, "cannot bind addr:%s port:%u",
                default_host_addr, DNS_PORT);

    /*init ethdev*/
    lcore_id = 0;
    lcore_q = lcore_q_conf_get(lcore_id);
    for (pid = 0; pid < nb_ports; pid++) {
        port_q = port_q_conf_get(pid);
        ret = rte_eth_dev_configure(pid, rx_rings, tx_rings, &default_rte_eth_conf);
        if (ret != 0)
            rte_exit(EXIT_FAILURE, "port %u configure error\n", pid);

        while (rx_lcore_id == rte_get_master_lcore()
                || !rte_lcore_is_enabled(rx_lcore_id)
                || lcore_q->nb_rxq == nb_rx_queue_per_core) {
            rx_lcore_id++;
            if (rx_lcore_id == RTE_MAX_LCORE)
                rte_exit(EXIT_FAILURE, "not enough core for port %u\n", pid);
            lcore_q = lcore_q_conf_get(lcore_id);
        }

        rxq = &lcore_q->rxq[lcore_q->nb_rxq];
        rxq->port = pid;
        rxq->lcore = rx_lcore_id;
        rxq->qid = port_q->nb_rxq;
        lcore_q->nb_rxq++;
        port_q->nb_rxq++;

        socket = rte_lcore_to_socket_id(rx_lcore_id);
        if (socket == SOCKET_ID_ANY)
            socket = 0;

        ret = rte_eth_tx_queue_setup(pid, rxq->qid, nb_txd, socket, NULL);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "fail to setup txq %u on port %u",
                    rxq->qid, pid);
        ret = rte_eth_rx_queue_setup(pid, rxq->qid, nb_rxd, socket, NULL, pkt_rx_pool);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "failt to setup rxq %u on port %u",
                    rxq->qid, pid);

        ret = rte_eth_dev_start(pid);
        if (ret < 0)
            rte_exit(EXIT_FAILURE, "fail to start port %u\n", pid);
    }

	if (dns_set_cfg(&default_dns_cfg))
		rte_exit(EXIT_FAILURE, "fail to set dns configuration%u\n", pid);

    rte_eal_mp_remote_launch(packet_launch_one_lcore, NULL, SKIP_MASTER);
    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
        if (rte_eal_wait_lcore(lcore_id) < 0)
            return -1;
    }

    return 0;

release_net_device:
    for (pid; pid != 0; pid--) {
        net_device_release(pid - 1);
    }
    return -1;
}