Example #1
1
/*
 * Check that every SLAVE lcores are in WAIT state, then call
 * rte_eal_remote_launch() for all of them. If call_master is true
 * (set to CALL_MASTER), also call the function on the master lcore.
 */
int
rte_eal_mp_remote_launch(int (*f)(void *), void *arg,
			 enum rte_rmt_call_master_t call_master)
{
	int lcore_id;
	int master = rte_get_master_lcore();

	/* check state of lcores */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (lcore_config[lcore_id].state != WAIT)
			return -EBUSY;
	}

	/* send messages to cores */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		rte_eal_remote_launch(f, arg, lcore_id);
	}

	if (call_master == CALL_MASTER) {
		lcore_config[master].ret = f(arg);
		lcore_config[master].state = FINISHED;
	}

	return 0;
}
/* test case to time the number of cycles to round-trip a cache line between
 * two cores and back again.
 */
static void
time_cache_line_switch(void)
{
	/* allocate a full cache line for data, we use only first byte of it */
	uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];

	unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
	volatile uint64_t *pdata = &data[0];
	*pdata = 1;
	rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
	while (*pdata)
		rte_pause();

	const uint64_t start_time = rte_rdtsc();
	for (i = 0; i < (1 << ITER_POWER); i++) {
		while (*pdata)
			rte_pause();
		*pdata = 1;
	}
	const uint64_t end_time = rte_rdtsc();

	while (*pdata)
		rte_pause();
	*pdata = 2;
	rte_eal_wait_lcore(slaveid);
	printf("==== Cache line switch test ===\n");
	printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER),
			end_time-start_time);
	printf("Ticks per iteration = %"PRIu64"\n\n",
			(end_time-start_time) >> ITER_POWER);
}
Example #3
0
int dpdpcap_transmit_in_loop(pcap_t *p, const u_char *buf, int size, int number)
{
    int transmitLcoreId = 0;
    int i = 0;

    if (p == NULL || buf == NULL ||
        p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS)
    {
        snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter");
        return DPDKPCAP_FAILURE;
    }

    for (i = 0; i < DEF_PKT_BURST; i++)
    {
        mbuf_g[i] = rte_pktmbuf_alloc(txPool);
        if (mbuf_g[i] == NULL)
        {
            snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Could not allocate buffer on port %d\n", p->deviceId);
            return DPDKPCAP_FAILURE;
        }

        struct rte_mbuf* mbuf = mbuf_g[i];

        if (mbuf->buf_len < size)
        {
            snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not copy packet data : packet size %d, mbuf length %d, port %d\n",
                   size, mbuf->buf_len, p->deviceId);
            return DPDKPCAP_FAILURE;
        }

        rte_memcpy(mbuf->pkt.data, buf, size);
        mbuf->pkt.data_len = size;
        mbuf->pkt.pkt_len = size;
        mbuf->pkt.nb_segs = 1;

        rte_pktmbuf_refcnt_update(mbuf, 1);
    }

    dpdkpcap_tx_args_t args;
    args.number = number;
    args.portId = p->deviceId;
    transmitLcoreId = p->deviceId + 1;

    debug("Transferring TX loop to the core %u\n", transmitLcoreId);

    if (rte_eal_remote_launch(txLoop, &args, transmitLcoreId) < 0)
    {
        snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Can not run TX on a slave core: transmitLcoreId %d\n",
                  transmitLcoreId);
        return DPDKPCAP_FAILURE;
    }

    rte_eal_wait_lcore(transmitLcoreId);

    return DPDKPCAP_OK;
}
Example #4
0
/*****************************************************************************
 * start_cores()
 ****************************************************************************/
static void start_cores(void)
{
    uint32_t core;

    /*
     * Fire up the packet processing cores
     */
    RTE_LCORE_FOREACH_SLAVE(core) {
        int index = rte_lcore_index(core);

        switch (index) {
        case TPG_CORE_IDX_CLI:
            assert(false);
        break;
        case TPG_CORE_IDX_TEST_MGMT:
            rte_eal_remote_launch(test_mgmt_loop, NULL, core);
        break;
        default:
            assert(index >= TPG_NR_OF_NON_PACKET_PROCESSING_CORES);
            rte_eal_remote_launch(pkt_receive_loop, NULL, core);
        }
    }

    /*
     * Wait for packet cores to finish initialization.
     */
    RTE_LCORE_FOREACH_SLAVE(core) {
        int   error;
        msg_t msg;

        if (!cfg_is_pkt_core(core))
            continue;

        msg_init(&msg, MSG_PKTLOOP_INIT_WAIT, core, 0);
        /* BLOCK waiting for msg to be processed */
        error = msg_send(&msg, 0);
        if (error)
            TPG_ERROR_ABORT("ERROR: Failed to send pktloop init wait msg: %s(%d)!\n",
                            rte_strerror(-error), -error);
    }
}
Example #5
0
static int
exec_burst(uint32_t flags, int lcore)
{
	unsigned i, portid, nb_tx = 0;
	struct lcore_conf *conf;
	uint32_t pkt_per_port;
	int num, idx = 0;
	int diff_tsc;

	conf = &lcore_conf[lcore];

	pkt_per_port = MAX_TRAFFIC_BURST;
	num = pkt_per_port;

	rte_atomic64_init(&start);

	/* start polling thread, but not actually poll yet */
	rte_eal_remote_launch(poll_burst,
			      (void *)&pkt_per_port, lcore);

	/* Only when polling first */
	if (flags == SC_BURST_POLL_FIRST)
		rte_atomic64_set(&start, 1);

	/* start xmit */
	while (num) {
		nb_tx = RTE_MIN(MAX_PKT_BURST, num);
		for (i = 0; i < conf->nb_ports; i++) {
			portid = conf->portlist[i];
			rte_eth_tx_burst(portid, 0,
					 &tx_burst[idx], nb_tx);
			idx += nb_tx;
		}
		num -= nb_tx;
	}

	sleep(5);

	/* only when polling second  */
	if (flags == SC_BURST_XMIT_FIRST)
		rte_atomic64_set(&start, 1);

	/* wait for polling finished */
	diff_tsc = rte_eal_wait_lcore(lcore);
	if (diff_tsc < 0) {
		printf("exec_burst: Failed to measure cycles per packet\n");
		return -1;
	}

	printf("Result: %d cycles per packet\n", diff_tsc);

	return 0;
}
Example #6
0
void start_cores(uint32_t *cores, int count)
{
	for (int i = 0; i < count; ++i) {
		if (!dppd_core_active(cores[i], 0)) {
			plog_warn("Can't start core %u: core is not active\n", cores[i]);
		}
		else if (rte_eal_get_lcore_state(cores[i]) != RUNNING) {
			plog_info("Starting core %u\n", cores[i]);
			lconf_set_terminated(&lcore_cfg[cores[i]], 0);
			rte_eal_remote_launch(dppd_work_thread, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running\n", cores[i]);
		}
	}
}
Example #7
0
void start_cores(uint32_t *cores, int count, int task_id)
{
	int n_started_cores = 0;
	uint32_t started_cores[RTE_MAX_LCORE];
	struct task_args *targ;

	warn_inactive_cores(cores, count, "Can't start core");

	for (int i = 0; i < count; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[cores[i]];

		if (lconf->n_tasks_run != lconf->n_tasks_all) {
			if (task_id == -1) {
				for (uint8_t tid = 0; tid < lconf->n_tasks_all; ++tid) {
					targ = &lconf->targs[tid];
					start_l3(targ);
				}
			} else {
				targ = &lconf->targs[task_id];
				start_l3(targ);
			}
			lconf->msg.type = LCONF_MSG_START;
			lconf->msg.task_id = task_id;
			lconf_set_req(lconf);
			if (task_id == -1)
				plog_info("Starting core %u (all tasks)\n", cores[i]);
			else
				plog_info("Starting core %u task %u\n", cores[i], task_id);
			started_cores[n_started_cores++] = cores[i];
			lconf->flags |= LCONF_FLAG_RUNNING;
			rte_eal_remote_launch(lconf_run, NULL, cores[i]);
		}
		else {
			plog_warn("Core %u is already running all its tasks\n", cores[i]);
		}
	}

	/* This function is blocking, so detect when each core has
	   consumed the message. */
	for (int i = 0; i < n_started_cores; ++i) {
		struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]];
		plog_info("Waiting for core %u to start...", started_cores[i]);
		if (wait_command_handled(lconf) == -1) return;
		plog_info(" OK\n");
	}
}
Example #8
0
int main(int argc, char **argv)
{
    int32_t ret;
    uint8_t lcore_id;

    /* Signal */
    signal(SIGINT,(void *)netflow_print);
 

    clrscr();
    // call before the rte_eal_init()
    (void)rte_set_application_usage_hook(netflow_usage);

    init_probe(&probe);
    
    netflow_logo(8, 0, NETFLOW_APP_NAME); 
    sleep(2);
    
    ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Failed in rte_eal_init\n");
    argc -= ret;
    argv += ret;
    
    ret = netflow_parse_args(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Invalid arguments\n");
  
    netflow_init(&probe);

    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
        rte_eal_remote_launch(launch_probe, NULL, lcore_id);
    }
    rte_delay_ms(5000);     // wait for the lcores to start up

    // Wait for all of the cores to stop runing and exit.

    process_hashtable();
    rte_eal_mp_wait_lcore(); 

    return 0;
}
Example #9
0
int32_t
rte_service_lcore_start(uint32_t lcore)
{
	if (lcore >= RTE_MAX_LCORE)
		return -EINVAL;

	struct core_state *cs = &lcore_states[lcore];
	if (!cs->is_service_core)
		return -EINVAL;

	if (cs->runstate == RUNSTATE_RUNNING)
		return -EALREADY;

	/* set core to run state first, and then launch otherwise it will
	 * return immediately as runstate keeps it in the service poll loop
	 */
	lcore_states[lcore].runstate = RUNSTATE_RUNNING;

	int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
	/* returns -EBUSY if the core is already launched, 0 on success */
	return ret;
}
Example #10
0
err_t iperf_server_init(void)
{
    int i;
    err_t ret = ERR_OK;
    sys_init();
    lwip_init();
    ticks_sec = rte_get_timer_hz();
    ///////////////////////////////////////////
    ip_addr_t ipaddr, netmask, gateway;

    dpdkif_get_if_params(&ipaddr, &netmask, &gateway, netif.hwaddr);

    netif_set_default(netif_add(&netif, &ipaddr, &netmask, &gateway, NULL, dpdkif_init, ethernet_input));

    netif_set_up(&netif);

    rte_eal_remote_launch (dpdkif_rx_thread_func, NULL,1);
   ////////////////////////////////////////////

    for (i = 0; i < sizeof(send_data) / sizeof(*send_data); i++)
        send_data[i] = i;

    struct tcp_pcb *pcb;
    pcb = tcp_new();

    if (pcb == NULL)
        return ERR_MEM;

    if ((ret = tcp_bind(pcb, IP_ADDR_ANY, IPERF_SERVER_PORT)) != ERR_OK) {
        tcp_close(pcb);
        return ret;
    }

    pcb = tcp_listen(pcb);
    tcp_accept(pcb, accept);

    return ret;
}
Example #11
0
File: perf.c Project: ninataki/spdk
int main(int argc, char **argv)
{
    int rc;
    struct worker_thread *worker;

    rc = parse_args(argc, argv);
    if (rc != 0) {
        return rc;
    }

    ealargs[1] = sprintf_alloc("-c %s", g_core_mask ? g_core_mask : "0x1");
    if (ealargs[1] == NULL) {
        perror("ealargs sprintf_alloc");
        return 1;
    }

    rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), ealargs);

    free(ealargs[1]);

    if (rc < 0) {
        fprintf(stderr, "could not initialize dpdk\n");
        return 1;
    }

    request_mempool = rte_mempool_create("nvme_request", 8192,
                                         nvme_request_size(), 128, 0,
                                         NULL, NULL, NULL, NULL,
                                         SOCKET_ID_ANY, 0);

    if (request_mempool == NULL) {
        fprintf(stderr, "could not initialize request mempool\n");
        return 1;
    }

    task_pool = rte_mempool_create("task_pool", 8192,
                                   sizeof(struct perf_task),
                                   64, 0, NULL, NULL, task_ctor, NULL,
                                   SOCKET_ID_ANY, 0);

    g_tsc_rate = rte_get_timer_hz();

    if (register_workers() != 0) {
        return 1;
    }

    if (register_aio_files(argc, argv) != 0) {
        return 1;
    }

    if (register_controllers() != 0) {
        return 1;
    }

    if (associate_workers_with_ns() != 0) {
        return 1;
    }

    printf("Initialization complete. Launching workers.\n");

    /* Launch all of the slave workers */
    worker = g_workers->next;
    while (worker != NULL) {
        rte_eal_remote_launch(work_fn, worker, worker->lcore);
        worker = worker->next;
    }

    rc = work_fn(g_workers);

    worker = g_workers->next;
    while (worker != NULL) {
        if (rte_eal_wait_lcore(worker->lcore) < 0) {
            rc = -1;
        }
        worker = worker->next;
    }

    print_stats();

    unregister_controllers();

    if (rc != 0) {
        fprintf(stderr, "%s: errors occured\n", argv[0]);
    }

    return rc;
}
Example #12
0
int
perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
		int (*worker)(void *))
{
	int ret, lcore_id;
	struct test_perf *t = evt_test_priv(test);

	int port_idx = 0;
	/* launch workers */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (!(opt->wlcores[lcore_id]))
			continue;

		ret = rte_eal_remote_launch(worker,
				 &t->worker[port_idx], lcore_id);
		if (ret) {
			evt_err("failed to launch worker %d", lcore_id);
			return ret;
		}
		port_idx++;
	}

	/* launch producers */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (!(opt->plcores[lcore_id]))
			continue;

		ret = rte_eal_remote_launch(perf_producer_wrapper,
				&t->prod[port_idx], lcore_id);
		if (ret) {
			evt_err("failed to launch perf_producer %d", lcore_id);
			return ret;
		}
		port_idx++;
	}

	const uint64_t total_pkts = opt->nb_pkts *
			evt_nr_active_lcores(opt->plcores);

	uint64_t dead_lock_cycles = rte_get_timer_cycles();
	int64_t dead_lock_remaining  =  total_pkts;
	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;

	uint64_t perf_cycles = rte_get_timer_cycles();
	int64_t perf_remaining  = total_pkts;
	const uint64_t perf_sample = rte_get_timer_hz();

	static float total_mpps;
	static uint64_t samples;

	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
	int64_t remaining = t->outstand_pkts - processed_pkts(t);

	while (t->done == false) {
		const uint64_t new_cycles = rte_get_timer_cycles();

		if ((new_cycles - perf_cycles) > perf_sample) {
			const uint64_t latency = total_latency(t);
			const uint64_t pkts = processed_pkts(t);

			remaining = t->outstand_pkts - pkts;
			float mpps = (float)(perf_remaining-remaining)/1000000;

			perf_remaining = remaining;
			perf_cycles = new_cycles;
			total_mpps += mpps;
			++samples;
			if (opt->fwd_latency && pkts > 0) {
				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
					mpps, total_mpps/samples,
					(float)(latency/pkts)/freq_mhz);
			} else {
				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
					mpps, total_mpps/samples);
			}
			fflush(stdout);

			if (remaining <= 0) {
				t->result = EVT_TEST_SUCCESS;
				if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
					t->done = true;
					rte_smp_wmb();
					break;
				}
			}
		}

		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
				opt->prod_type == EVT_PROD_TYPE_SYNT) {
			remaining = t->outstand_pkts - processed_pkts(t);
			if (dead_lock_remaining == remaining) {
				rte_event_dev_dump(opt->dev_id, stdout);
				evt_err("No schedules for seconds, deadlock");
				t->done = true;
				rte_smp_wmb();
				break;
			}
			dead_lock_remaining = remaining;
			dead_lock_cycles = new_cycles;
		}
	}
	printf("\n");
	return 0;
}
Example #13
0
static int
paxos_rx_process(struct rte_mbuf *pkt, struct proposer* proposer)
{
    int ret = 0;
    uint8_t l4_proto = 0;
    uint16_t outer_header_len;
    union tunnel_offload_info info = { .data = 0 };
    struct udp_hdr *udp_hdr;
    struct paxos_hdr *paxos_hdr;
    struct ether_hdr *phdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);

    parse_ethernet(phdr, &info, &l4_proto);

    if (l4_proto != IPPROTO_UDP)
        return -1;

    udp_hdr = (struct udp_hdr *)((char *)phdr +
                                 info.outer_l2_len + info.outer_l3_len);

    /* if UDP dst port is not either PROPOSER or LEARNER port */
    if (!(udp_hdr->dst_port == rte_cpu_to_be_16(PROPOSER_PORT) ||
            udp_hdr->dst_port == rte_cpu_to_be_16(LEARNER_PORT)) &&
            (pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
        return -1;

    paxos_hdr = (struct paxos_hdr *)((char *)udp_hdr + sizeof(struct udp_hdr));

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        //rte_hexdump(stdout, "udp", udp_hdr, sizeof(struct udp_hdr));
        //rte_hexdump(stdout, "paxos", paxos_hdr, sizeof(struct paxos_hdr));
        print_paxos_hdr(paxos_hdr);
    }

    int value_len = rte_be_to_cpu_16(paxos_hdr->value_len);
    struct paxos_value *v = paxos_value_new((char *)paxos_hdr->paxosval, value_len);
    switch(rte_be_to_cpu_16(paxos_hdr->msgtype)) {
    case PAXOS_PROMISE: {
        struct paxos_promise promise = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_promise(proposer, &promise);
        break;
    }
    case PAXOS_ACCEPT: {
        if (first_time) {
            proposer_preexecute(proposer);
            first_time = false;
        }
        struct paxos_accept acpt = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accept(proposer, &acpt);
        break;
    }
    case PAXOS_ACCEPTED: {
        struct paxos_accepted ack = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accepted(proposer, &ack);
        break;
    }
    default:
        break;
    }
    outer_header_len = info.outer_l2_len + info.outer_l3_len
                       + sizeof(struct udp_hdr) + sizeof(struct paxos_hdr);

    rte_pktmbuf_adj(pkt, outer_header_len);

    return ret;

}

static uint16_t
add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
               struct rte_mbuf **pkts, uint16_t nb_pkts,
               uint16_t max_pkts __rte_unused, void *user_param)
{
    struct proposer* proposer = (struct proposer *)user_param;
    unsigned i;
    uint64_t now = rte_rdtsc();

    for (i = 0; i < nb_pkts; i++) {
        pkts[i]->udata64 = now;
        paxos_rx_process(pkts[i], proposer);
    }
    return nb_pkts;
}


static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool, struct proposer* proposer)
{
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;
    struct rte_eth_rxconf *rxconf;
    struct rte_eth_conf port_conf = port_conf_default;
    const uint16_t rx_rings = 1, tx_rings = 1;
    int retval;
    uint16_t q;

    rte_eth_dev_info_get(port, &dev_info);

    rxconf = &dev_info.default_rxconf;
    txconf = &dev_info.default_txconf;

    txconf->txq_flags &= PKT_TX_IPV4;
    txconf->txq_flags &= PKT_TX_UDP_CKSUM;
    if (port >= rte_eth_dev_count())
        return -1;

    retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
    if (retval != 0)
        return retval;

    for (q = 0; q < rx_rings; q++) {
        retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), rxconf, mbuf_pool);
        if (retval < 0)
            return retval;
    }

    for (q = 0; q < tx_rings; q++) {
        retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), txconf);
        if (retval < 0)
            return retval;
    }

    retval = rte_eth_dev_start(port);
    if (retval < 0)
        return retval;

    struct ether_addr addr;
    rte_eth_macaddr_get(port, &addr);
    rte_eth_promiscuous_enable(port);

    rte_eth_add_rx_callback(port, 0, add_timestamps, proposer);
    rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
    return 0;
}


static void
lcore_main(uint8_t port, __rte_unused struct proposer *p)
{
    proposer_preexecute(p);

    for (;;) {
        // Check if signal is received
        if (force_quit)
            break;
        struct rte_mbuf *bufs[BURST_SIZE];
        const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE);
        if (unlikely(nb_rx == 0))
            continue;
        uint16_t buf;
        for (buf = 0; buf < nb_rx; buf++)
            rte_pktmbuf_free(bufs[buf]);
    }
}



static __attribute__((noreturn)) int
lcore_mainloop(__attribute__((unused)) void *arg)
{
    uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
    unsigned lcore_id;

    lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_TIMER,
            "Starting mainloop on core %u\n", lcore_id);

    while(1) {
        cur_tsc = rte_rdtsc();
        diff_tsc = cur_tsc - prev_tsc;
        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
            rte_timer_manage();
            prev_tsc = cur_tsc;
        }
    }
}

static void
report_stat(struct rte_timer *tim, __attribute((unused)) void *arg)
{
    /* print stat */
    uint32_t count = rte_atomic32_read(&stat);
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER8,
            "Throughput = %8u msg/s\n", count);
    /* reset stat */
    rte_atomic32_set(&stat, 0);
    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}


static void
check_timeout(struct rte_timer *tim, void *arg)
{
    struct proposer* p = (struct proposer *) arg;
    unsigned lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "%s() on lcore_id %i\n", __func__, lcore_id);

    struct paxos_message out;
    out.type = PAXOS_PREPARE;
    struct timeout_iterator* iter = proposer_timeout_iterator(p);
    while(timeout_iterator_prepare(iter, &out.u.prepare)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s Send PREPARE inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    out.type = PAXOS_ACCEPT;
    while(timeout_iterator_accept(iter, &out.u.accept)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s: Send ACCEPT inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    timeout_iterator_free(iter);

    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}

int
main(int argc, char *argv[])
{
    uint8_t portid = 0;
    unsigned master_core, lcore_id;
    signal(SIGTERM, signal_handler);
    signal(SIGINT, signal_handler);
    force_quit = false;
    int proposer_id = 0;

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        paxos_config.verbosity = PAXOS_LOG_DEBUG;
    }

    struct proposer *proposer = proposer_new(proposer_id, NUM_ACCEPTORS);
    first_time = true;
    /* init EAL */
    int ret = rte_eal_init(argc, argv);

    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    /* init timer structure */
    rte_timer_init(&timer);
    rte_timer_init(&stat_timer);

    /* load deliver_timer, every 1 s, on a slave lcore, reloaded automatically */
    uint64_t hz = rte_get_timer_hz();

    /* Call rte_timer_manage every 10ms */
    TIMER_RESOLUTION_CYCLES = hz / 100;
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "Clock: %"PRIu64"\n", hz);

    /* master core */
    master_core = rte_lcore_id();
    /* slave core */
    lcore_id = rte_get_next_lcore(master_core, 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&timer, hz, PERIODICAL, lcore_id, check_timeout, proposer);
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    /* stat core */
    lcore_id = rte_get_next_lcore(lcore_id , 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&stat_timer, hz, PERIODICAL, lcore_id,
                    report_stat, NULL);

    /* init RTE timer library */
    rte_timer_subsystem_init();

    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
                                        NUM_MBUFS, MBUF_CACHE_SIZE, 0,
                                        RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create mbuf_pool\n");
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    if (port_init(portid, mbuf_pool, proposer) != 0)
        rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", portid);


    lcore_main(portid, proposer);

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Free proposer\n");
    proposer_free(proposer);
    return 0;
}
Example #14
0
int
main(int argc, char **argv)
{
	uint32_t i;
	int32_t ret;

	printf("\n%s %s\n", wr_copyright_msg(), wr_powered_by()); fflush(stdout);

	wr_scrn_setw(1);/* Reset the window size */

	/* call before the rte_eal_init() */
	(void)rte_set_application_usage_hook(pktgen_usage);

	memset(&pktgen, 0, sizeof(pktgen));

	pktgen.flags            = PRINT_LABELS_FLAG;
	pktgen.ident            = 0x1234;
	pktgen.nb_rxd           = DEFAULT_RX_DESC;
	pktgen.nb_txd           = DEFAULT_TX_DESC;
	pktgen.nb_ports_per_page = DEFAULT_PORTS_PER_PAGE;

	if ( (pktgen.l2p = wr_l2p_create()) == NULL)
		pktgen_log_panic("Unable to create l2p");

	pktgen.portdesc_cnt = wr_get_portdesc(pktgen.portlist, pktgen.portdesc, RTE_MAX_ETHPORTS, 0);

	/* Initialize the screen and logging */
	pktgen_init_log();
	pktgen_cpu_init();

	/* initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;
	argc -= ret;
	argv += ret;

	pktgen.hz = rte_get_timer_hz();	/* Get the starting HZ value. */

	/* parse application arguments (after the EAL ones) */
	ret = pktgen_parse_args(argc, argv);
	if (ret < 0)
		return -1;

	pktgen_init_screen((pktgen.flags & ENABLE_THEME_FLAG) ? THEME_ON : THEME_OFF);

	rte_delay_ms(100);	/* Wait a bit for things to settle. */

	wr_print_copyright(PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	lua_newlib_add(_lua_openlib);

	/* Open the Lua script handler. */
	if ( (pktgen.L = lua_create_instance()) == NULL) {
		pktgen_log_error("Failed to open Lua pktgen support library");
		return -1;
	}

	pktgen_log_info(">>> Packet Burst %d, RX Desc %d, TX Desc %d, mbufs/port %d, mbuf cache %d",
	                DEFAULT_PKT_BURST, DEFAULT_RX_DESC, DEFAULT_TX_DESC, MAX_MBUFS_PER_PORT, MBUF_CACHE_SIZE);

	/* Configure and initialize the ports */
	pktgen_config_ports();

	pktgen_log_info("");
	pktgen_log_info("=== Display processing on lcore %d", rte_lcore_id());

	/* launch per-lcore init on every lcore except master and master + 1 lcores */
	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if ( (i == rte_get_master_lcore()) || !rte_lcore_is_enabled(i) )
			continue;
		ret = rte_eal_remote_launch(pktgen_launch_one_lcore, NULL, i);
		if (ret != 0)
			pktgen_log_error("Failed to start lcore %d, return %d", i, ret);
	}
	rte_delay_ms(1000);	/* Wait for the lcores to start up. */

	/* Disable printing log messages of level info and below to screen, */
	/* erase the screen and start updating the screen again. */
	pktgen_log_set_screen_level(LOG_LEVEL_WARNING);
	wr_scrn_erase(pktgen.scrn->nrows);

	wr_logo(3, 16, PKTGEN_APP_NAME);
	wr_splash_screen(3, 16, PKTGEN_APP_NAME, PKTGEN_CREATED_BY);

	wr_scrn_resume();

	pktgen_redisplay(1);

	rte_timer_setup();

	if (pktgen.flags & ENABLE_GUI_FLAG) {
		if (!wr_scrn_is_paused() ) {
			wr_scrn_pause();
			wr_scrn_cls();
			wr_scrn_setw(1);
			wr_scrn_pos(pktgen.scrn->nrows, 1);
		}

		lua_init_socket(pktgen.L, &pktgen.thread, pktgen.hostname, pktgen.socket_port);
	}

	pktgen_cmdline_start();

	execute_lua_close(pktgen.L);
	pktgen_stop_running();

	wr_scrn_pause();

	wr_scrn_setw(1);
	wr_scrn_printf(100, 1, "\n");	/* Put the cursor on the last row and do a newline. */

	/* Wait for all of the cores to stop running and exit. */
	rte_eal_mp_wait_lcore();

	return 0;
}
Example #15
0
File: main.c Project: btw616/dpdk
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
{
	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
	uint32_t sessions_needed = 0;
	unsigned int i, j;
	int ret;

	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
	if (enabled_cdev_count == 0) {
		printf("No crypto devices type %s available\n",
				opts->device_type);
		return -EINVAL;
	}

	nb_lcores = rte_lcore_count() - 1;

	if (nb_lcores < 1) {
		RTE_LOG(ERR, USER1,
			"Number of enabled cores need to be higher than 1\n");
		return -EINVAL;
	}

	/*
	 * Use less number of devices,
	 * if there are more available than cores.
	 */
	if (enabled_cdev_count > nb_lcores)
		enabled_cdev_count = nb_lcores;

	/* Create a mempool shared by all the devices */
	uint32_t max_sess_size = 0, sess_size;

	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
		if (sess_size > max_sess_size)
			max_sess_size = sess_size;
	}

	/*
	 * Calculate number of needed queue pairs, based on the amount
	 * of available number of logical cores and crypto devices.
	 * For instance, if there are 4 cores and 2 crypto devices,
	 * 2 queue pairs will be set up per device.
	 */
	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
				(nb_lcores / enabled_cdev_count) + 1 :
				nb_lcores / enabled_cdev_count;

	for (i = 0; i < enabled_cdev_count &&
			i < RTE_CRYPTO_MAX_DEVS; i++) {
		cdev_id = enabled_cdevs[i];
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
		/*
		 * If multi-core scheduler is used, limit the number
		 * of queue pairs to 1, as there is no way to know
		 * how many cores are being used by the PMD, and
		 * how many will be available for the application.
		 */
		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
				rte_cryptodev_scheduler_mode_get(cdev_id) ==
				CDEV_SCHED_MODE_MULTICORE)
			opts->nb_qps = 1;
#endif

		struct rte_cryptodev_info cdev_info;
		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		rte_cryptodev_info_get(cdev_id, &cdev_info);
		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
			printf("Number of needed queue pairs is higher "
				"than the maximum number of queue pairs "
				"per device.\n");
			printf("Lower the number of cores or increase "
				"the number of crypto devices\n");
			return -EINVAL;
		}
		struct rte_cryptodev_config conf = {
			.nb_queue_pairs = opts->nb_qps,
			.socket_id = socket_id
		};

		struct rte_cryptodev_qp_conf qp_conf = {
			.nb_descriptors = opts->nb_descriptors
		};

		/**
		 * Device info specifies the min headroom and tailroom
		 * requirement for the crypto PMD. This need to be honoured
		 * by the application, while creating mbuf.
		 */
		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
			/* Update headroom */
			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
		}
		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
			/* Update tailroom */
			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
		}

		/* Update segment size to include headroom & tailroom */
		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);

		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
		/*
		 * Two sessions objects are required for each session
		 * (one for the header, one for the private data)
		 */
		if (!strcmp((const char *)opts->device_type,
					"crypto_scheduler")) {
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
			uint32_t nb_slaves =
				rte_cryptodev_scheduler_slaves_get(cdev_id,
								NULL);

			sessions_needed = enabled_cdev_count *
				opts->nb_qps * nb_slaves;
#endif
		} else
			sessions_needed = enabled_cdev_count *
						opts->nb_qps;

		/*
		 * A single session is required per queue pair
		 * in each device
		 */
		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
			RTE_LOG(ERR, USER1,
				"Device does not support at least "
				"%u sessions\n", opts->nb_qps);
			return -ENOTSUP;
		}

		ret = fill_session_pool_socket(socket_id, max_sess_size,
				sessions_needed);
		if (ret < 0)
			return ret;

		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
		qp_conf.mp_session_private =
				session_pool_socket[socket_id].priv_mp;

		ret = rte_cryptodev_configure(cdev_id, &conf);
		if (ret < 0) {
			printf("Failed to configure cryptodev %u", cdev_id);
			return -EINVAL;
		}

		for (j = 0; j < opts->nb_qps; j++) {
			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
				&qp_conf, socket_id);
			if (ret < 0) {
				printf("Failed to setup queue pair %u on "
					"cryptodev %u",	j, cdev_id);
				return -EINVAL;
			}
		}

		ret = rte_cryptodev_start(cdev_id);
		if (ret < 0) {
			printf("Failed to start device %u: error %d\n",
					cdev_id, ret);
			return -EPERM;
		}
	}

	return enabled_cdev_count;
}

static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
	struct rte_cryptodev_sym_capability_idx cap_idx;
	const struct rte_cryptodev_symmetric_capability *capability;

	uint8_t i, cdev_id;
	int ret;

	for (i = 0; i < nb_cryptodevs; i++) {

		cdev_id = enabled_cdevs[i];

		if (opts->op_type == CPERF_AUTH_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
			cap_idx.algo.auth = opts->auth_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_auth(
					capability,
					opts->auth_key_sz,
					opts->digest_sz,
					opts->auth_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_CIPHER_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
			cap_idx.algo.cipher = opts->cipher_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_cipher(
					capability,
					opts->cipher_key_sz,
					opts->cipher_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_AEAD) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
			cap_idx.algo.aead = opts->aead_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_aead(
					capability,
					opts->aead_key_sz,
					opts->digest_sz,
					opts->aead_aad_sz,
					opts->aead_iv_sz);
			if (ret != 0)
				return ret;
		}
	}

	return 0;
}

static int
cperf_check_test_vector(struct cperf_options *opts,
		struct cperf_test_vector *test_vec)
{
	if (opts->op_type == CPERF_CIPHER_ONLY) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			/* Cipher IV is only required for some algorithms */
			if (opts->cipher_iv_sz &&
					test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AUTH_ONLY) {
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			/* Auth key is only required for some algorithms */
			if (opts->auth_key_sz &&
					test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}

	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AEAD) {
		if (test_vec->plaintext.data == NULL)
			return -1;
		if (test_vec->plaintext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->ciphertext.data == NULL)
			return -1;
		if (test_vec->ciphertext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->aead_key.data == NULL)
			return -1;
		if (test_vec->aead_key.length != opts->aead_key_sz)
			return -1;
		if (test_vec->aead_iv.data == NULL)
			return -1;
		if (test_vec->aead_iv.length != opts->aead_iv_sz)
			return -1;
		if (test_vec->aad.data == NULL)
			return -1;
		if (test_vec->aad.length != opts->aead_aad_sz)
			return -1;
		if (test_vec->digest.data == NULL)
			return -1;
		if (test_vec->digest.length < opts->digest_sz)
			return -1;
	}
	return 0;
}

int
main(int argc, char **argv)
{
	struct cperf_options opts = {0};
	struct cperf_test_vector *t_vec = NULL;
	struct cperf_op_fns op_fns;
	void *ctx[RTE_MAX_LCORE] = { };
	int nb_cryptodevs = 0;
	uint16_t total_nb_qps = 0;
	uint8_t cdev_id, i;
	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };

	uint8_t buffer_size_idx = 0;

	int ret;
	uint32_t lcore_id;

	/* Initialise DPDK EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
	argc -= ret;
	argv += ret;

	cperf_options_default(&opts);

	ret = cperf_options_parse(&opts, argc, argv);
	if (ret) {
		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
		goto err;
	}

	ret = cperf_options_check(&opts);
	if (ret) {
		RTE_LOG(ERR, USER1,
				"Checking on or more user options failed\n");
		goto err;
	}

	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);

	if (!opts.silent)
		cperf_options_dump(&opts);

	if (nb_cryptodevs < 1) {
		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
				"device type\n");
		nb_cryptodevs = 0;
		goto err;
	}

	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
			nb_cryptodevs);
	if (ret) {
		RTE_LOG(ERR, USER1, "Crypto device type does not support "
				"capabilities requested\n");
		goto err;
	}

	if (opts.test_file != NULL) {
		t_vec = cperf_test_vector_get_from_file(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified file\n");
			goto err;
		}

		if (cperf_check_test_vector(&opts, t_vec)) {
			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
					"\n");
			goto err;
		}
	} else {
		t_vec = cperf_test_vector_get_dummy(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified algorithms\n");
			goto err;
		}
	}

	ret = cperf_get_op_functions(&opts, &op_fns);
	if (ret) {
		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
				"specified algorithms combination\n");
		goto err;
	}

	if (!opts.silent)
		show_test_vector(t_vec);

	total_nb_qps = nb_cryptodevs * opts.nb_qps;

	i = 0;
	uint8_t qp_id = 0, cdev_index = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[cdev_index];

		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		ctx[i] = cperf_testmap[opts.test].constructor(
				session_pool_socket[socket_id].sess_mp,
				session_pool_socket[socket_id].priv_mp,
				cdev_id, qp_id,
				&opts, t_vec, &op_fns);
		if (ctx[i] == NULL) {
			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
			goto err;
		}
		qp_id = (qp_id + 1) % opts.nb_qps;
		if (qp_id == 0)
			cdev_index++;
		i++;
	}

	if (opts.imix_distribution_count != 0) {
		uint8_t buffer_size_count = opts.buffer_size_count;
		uint16_t distribution_total[buffer_size_count];
		uint32_t op_idx;
		uint32_t test_average_size = 0;
		const uint32_t *buffer_size_list = opts.buffer_size_list;
		const uint32_t *imix_distribution_list = opts.imix_distribution_list;

		opts.imix_buffer_sizes = rte_malloc(NULL,
					sizeof(uint32_t) * opts.pool_sz,
					0);
		/*
		 * Calculate accumulated distribution of
		 * probabilities per packet size
		 */
		distribution_total[0] = imix_distribution_list[0];
		for (i = 1; i < buffer_size_count; i++)
			distribution_total[i] = imix_distribution_list[i] +
				distribution_total[i-1];

		/* Calculate a random sequence of packet sizes, based on distribution */
		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
			uint16_t random_number = rte_rand() %
				distribution_total[buffer_size_count - 1];
			for (i = 0; i < buffer_size_count; i++)
				if (random_number < distribution_total[i])
					break;

			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
		}

		/* Calculate average buffer size for the IMIX distribution */
		for (i = 0; i < buffer_size_count; i++)
			test_average_size += buffer_size_list[i] *
				imix_distribution_list[i];

		opts.test_buffer_size = test_average_size /
				distribution_total[buffer_size_count - 1];

		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;

			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
				ctx[i], lcore_id);
			i++;
		}
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;
			rte_eal_wait_lcore(lcore_id);
			i++;
		}
	} else {

		/* Get next size from range or list */
		if (opts.inc_buffer_size != 0)
			opts.test_buffer_size = opts.min_buffer_size;
		else
			opts.test_buffer_size = opts.buffer_size_list[0];

		while (opts.test_buffer_size <= opts.max_buffer_size) {
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;

				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
					ctx[i], lcore_id);
				i++;
			}
			i = 0;
			RTE_LCORE_FOREACH_SLAVE(lcore_id) {

				if (i == total_nb_qps)
					break;
				rte_eal_wait_lcore(lcore_id);
				i++;
			}

			/* Get next size from range or list */
			if (opts.inc_buffer_size != 0)
				opts.test_buffer_size += opts.inc_buffer_size;
			else {
				if (++buffer_size_idx == opts.buffer_size_count)
					break;
				opts.test_buffer_size =
					opts.buffer_size_list[buffer_size_idx];
			}
		}
	}

	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_SUCCESS;

err:
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (i == total_nb_qps)
			break;

		if (ctx[i] && cperf_testmap[opts.test].destructor)
			cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);
	rte_free(opts.imix_buffer_sizes);
	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_FAILURE;
}
Example #16
0
int MAIN(int argc, char **argv) {

	int ret;
	int i=0;
	int nb_lcore;
	struct user_params params;

	struct lcore_config lcore[APP_MAX_LCORES];
	uint8_t lcore_id;

	/* Associate signal_hanlder function with appropriate signals */
	signal(SIGUSR1, signal_handler);
	signal(SIGUSR2, signal_handler);

	/* Parse EAL arguments and init DPDK EAL
	 *
	 * After this function call, all lcores are initialized in WAIT state and
	 * ready to receive functions to execute
	 */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
	argc -= ret;
	argv += ret;

	nb_lcore = rte_lcore_count();
	if (nb_lcore < 2) 
		rte_exit(EXIT_FAILURE, "Too few locres. At least 2 required (one for packet fwd, one for control plane), %d given\n", nb_lcore);
	/*
	 * This call sets log level in the sense that log messages for a lower
	 * layer that this will not be shown but will still take CPU cycles.
	 * To actually remove logging code from the program, set log level in
	 * DPDK config files located in $RTE_SDK/config.
	 */
	rte_set_log_level(RTE_LOG_DEBUG);

	/*
	 * Parse application-specific arguments, which comes after the EAL ones and
	 * are separated from the latter by a double dash (--)
	 */
	ret = parse_args(argc, argv, &params);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid content router arguments\n");

	// Configure the app config object
	app_conf.fib_num_buckets = FIB_NUM_BUCKETS;
	app_conf.fib_max_elements = FIB_MAX_ELEMENTS;

	app_conf.pit_num_buckets = PIT_NUM_BUCKETS;
	app_conf.pit_max_elements = PIT_MAX_ELEMENTS;
	app_conf.pit_ttl_us = PIT_TTL_US;

	/* CS settings */
	app_conf.cs_num_buckets = CS_NUM_BUCKETS;
	app_conf.cs_max_elements = CS_MAX_ELEMENTS;

	/* Packet burst settings */
	app_conf.tx_burst_size = MAX_PKT_BURST;
	app_conf.rx_burst_size = MAX_PKT_BURST;

	/* Packet pool settings */
	app_conf.nb_mbuf = NB_MBUF;
	app_conf.mbuf_size = MBUF_SIZE;
	app_conf.mempool_cache_size = MEMPOOL_CACHE_SIZE;

	/* Other config */
	app_conf.promic_mode = params.promisc_mode;
	app_conf.portmask = params.portmask;
	app_conf.numa_on = params.numa_on;
	
	for(i=0; i< APP_MAX_ETH_PORTS;i++)
		memcpy(app_conf.config_remote_addr[i],params.config_remote_addr[i] ,18);

	init_app(&app_conf, lcore_conf);
	reset_stats();

	MAIN_LOG("All configuration done. Launching worker lcores\n");

	/* launch per-lcore init on every lcore but lcore 0 and control plane lcore*/
	for(lcore_id = 1; lcore_id < nb_lcore; lcore_id++){
		if (lcore_id == CONTROL_PLANE_LCORE)
			continue;
		ret = rte_eal_remote_launch(pkt_fwd_loop, NULL,lcore_id);
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "lcore %u busy\n",lcore_id);
	}
	/* launch control plane core if not MASTER (LCORE=0, current)*/
	if (CONTROL_PLANE_LCORE != 0){
		ret = rte_eal_remote_launch(ctrl_loop, NULL, CONTROL_PLANE_LCORE);
		MAIN_LOG("Fwd and Ctrl loops Launched\n");
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "lcore %u busy\n",CONTROL_PLANE_LCORE);
		pkt_fwd_loop(NULL);
	}
	else
		ctrl_loop(NULL); /* launch control plane core if not MASTER (LCORE=0, current)*/
	
	RTE_LCORE_FOREACH_SLAVE(lcore_id)
	{
		if (rte_eal_wait_lcore(lcore_id) < 0)
			return -1;
	}
	return 0;
}
Example #17
0
File: main.c Project: Cosios/dpdk
int
main(int argc, char **argv)
{
	int ret;
	unsigned nb_ports;
	unsigned int lcore_id, last_lcore_id, master_lcore_id;
	uint8_t port_id;
	uint8_t nb_ports_available;
	struct worker_thread_args worker_args = {NULL, NULL};
	struct send_thread_args send_args = {NULL, NULL};
	struct rte_ring *rx_to_workers;
	struct rte_ring *workers_to_tx;

	/* catch ctrl-c so we can print on exit */
	signal(SIGINT, int_handler);

	/* Initialize EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		return -1;

	argc -= ret;
	argv += ret;

	/* Parse the application specific arguments */
	ret = parse_args(argc, argv);
	if (ret < 0)
		return -1;

	/* Check if we have enought cores */
	if (rte_lcore_count() < 3)
		rte_exit(EXIT_FAILURE, "Error, This application needs at "
				"least 3 logical cores to run:\n"
				"1 lcore for packet RX\n"
				"1 lcore for packet TX\n"
				"and at least 1 lcore for worker threads\n");

	nb_ports = rte_eth_dev_count();
	if (nb_ports == 0)
		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
	if (nb_ports != 1 && (nb_ports & 1))
		rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
				"when using a single port\n");

	mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
			MBUF_POOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
			rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	nb_ports_available = nb_ports;

	/* initialize all ports */
	for (port_id = 0; port_id < nb_ports; port_id++) {
		/* skip ports that are not enabled */
		if ((portmask & (1 << port_id)) == 0) {
			printf("\nSkipping disabled port %d\n", port_id);
			nb_ports_available--;
			continue;
		}
		/* init port */
		printf("Initializing port %u... done\n", (unsigned) port_id);

		if (configure_eth_port(port_id) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
					port_id);
	}

	if (!nb_ports_available) {
		rte_exit(EXIT_FAILURE,
			"All available ports are disabled. Please set portmask.\n");
	}

	/* Create rings for inter core communication */
	rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
			RING_F_SP_ENQ);
	if (rx_to_workers == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
			RING_F_SC_DEQ);
	if (workers_to_tx == NULL)
		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

	if (!disable_reorder) {
		send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
				REORDER_BUFFER_SIZE);
		if (send_args.buffer == NULL)
			rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
	}

	last_lcore_id   = get_last_lcore_id();
	master_lcore_id = rte_get_master_lcore();

	worker_args.ring_in  = rx_to_workers;
	worker_args.ring_out = workers_to_tx;

	/* Start worker_thread() on all the available slave cores but the last 1 */
	for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
		if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
			rte_eal_remote_launch(worker_thread, (void *)&worker_args,
					lcore_id);

	if (disable_reorder) {
		/* Start tx_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
				last_lcore_id);
	} else {
		send_args.ring_in = workers_to_tx;
		/* Start send_thread() on the last slave core */
		rte_eal_remote_launch((lcore_function_t *)send_thread,
				(void *)&send_args, last_lcore_id);
	}

	/* Start rx_thread() on the master core */
	rx_thread(rx_to_workers);

	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (rte_eal_wait_lcore(lcore_id) < 0)
			return -1;
	}

	print_stats();
	return 0;
}
Example #18
0
QList<AbstractPort*> createDpdkPorts(int baseId)
{
    QList<AbstractPort*> portList;
    int ret, count = rte_eth_dev_count();

    DpdkPort::setBaseId(baseId);

    for (int i = 0; i < count ; i++) {
        struct rte_eth_dev_info info;
        char if_name[IF_NAMESIZE];
        DpdkPort *port;
        int lcore_id;

        rte_eth_dev_info_get(i, &info);

        // Use Predictable Interface Naming Convention
        // <http://www.freedesktop.org/wiki/Software/systemd/PredictableNetworkInterfaceNames/>
        if (info.pci_dev->addr.domain)
            snprintf(if_name, sizeof(if_name), "enP%up%us%u",
                     info.pci_dev->addr.domain,
                     info.pci_dev->addr.bus,
                     info.pci_dev->addr.devid);
        else
            snprintf(if_name, sizeof(if_name), "enp%us%u",
                     info.pci_dev->addr.bus,
                     info.pci_dev->addr.devid);

        qDebug("%d. %s", baseId, if_name);
        qDebug("dpdk %d: %u "
               "min_rx_buf = %u, max_rx_pktlen = %u, "
               "maxq rx/tx = %u/%u",
               i, info.if_index,
               info.min_rx_bufsize, info.max_rx_pktlen,
               info.max_rx_queues, info.max_tx_queues);
        port = new DpdkPort(baseId++, if_name, mbufPool_);
        if (!port->isUsable())
        {
            qDebug("%s: unable to open %s. Skipping!", __FUNCTION__,
                   if_name);
            delete port;
            baseId--;
            continue;
        }

        lcore_id = getFreeLcore();
        if (lcore_id >=  0) {
            port->setTransmitLcoreId(lcore_id);
        }
        else {
            qWarning("Not enough cores - port %d.%s cannot transmit",
                     baseId, if_name);
        }

        portList.append(port);
    }

    ret = rte_eal_remote_launch(pollRxRings, NULL, rxLcoreId_);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Cannot launch poll-rx-rings\n");

    return portList;
}
Example #19
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
bool DpdkDeviceList::startDpdkWorkerThreads(CoreMask coreMask, std::vector<DpdkWorkerThread*>& workerThreadsVec)
{
	if (!isInitialized())
	{
		LOG_ERROR("DpdkDeviceList not initialized");
		return false;
	}

	CoreMask tempCoreMask = coreMask;
	size_t numOfCoresInMask = 0;
	int coreNum = 0;
	while (tempCoreMask > 0)
	{
		if (tempCoreMask & 1)
		{
			if (!rte_lcore_is_enabled(coreNum))
			{
				LOG_ERROR("Trying to use core #%d which isn't initialized by DPDK", coreNum);
				return false;
			}

			numOfCoresInMask++;
		}
		tempCoreMask = tempCoreMask >> 1;
		coreNum++;
	}

	if (numOfCoresInMask == 0)
	{
		LOG_ERROR("Number of cores in mask is 0");
		return false;
	}

	if (numOfCoresInMask != workerThreadsVec.size())
	{
		LOG_ERROR("Number of cores in core mask different from workerThreadsVec size");
		return false;
	}

	if (coreMask & getDpdkMasterCore().Mask)
	{
		LOG_ERROR("Cannot run worker thread on DPDK master core");
		return false;
	}

	m_WorkerThreads.clear();
	uint32_t index = 0;
	std::vector<DpdkWorkerThread*>::iterator iter = workerThreadsVec.begin();
	while (iter != workerThreadsVec.end())
	{
		SystemCore core = SystemCores::IdToSystemCore[index];
		if (!(coreMask & core.Mask))
		{
			index++;
			continue;
		}

		int err = rte_eal_remote_launch(dpdkWorkerThreadStart, *iter, core.Id);
		if (err != 0)
		{
			for (std::vector<DpdkWorkerThread*>::iterator iter2 = workerThreadsVec.begin(); iter2 != iter; iter2++)
			{
				(*iter)->stop();
				rte_eal_wait_lcore((*iter)->getCoreId());
				LOG_DEBUG("Thread on core [%d] stopped", (*iter)->getCoreId());
			}
			LOG_ERROR("Cannot create worker thread #%d. Error was: [%s]", core.Id, strerror(err));
			return false;
		}
		m_WorkerThreads.push_back(*iter);

		index++;
		iter++;
	}

	return true;
}
Example #21
0
static int
test_pmd_perf(void)
{
	uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1;
	uint16_t nb_rxd = MAX_TRAFFIC_BURST;
	uint16_t nb_txd = MAX_TRAFFIC_BURST;
	uint16_t portid;
	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
	int socketid = -1;
	int ret;

	printf("Start PMD RXTX cycles cost test.\n");

	signal(SIGUSR1, signal_handler);
	signal(SIGUSR2, signal_handler);

	nb_ports = rte_eth_dev_count();
	if (nb_ports < NB_ETHPORTS_USED) {
		printf("At least %u port(s) used for perf. test\n",
		       NB_ETHPORTS_USED);
		return -1;
	}

	if (nb_ports > RTE_MAX_ETHPORTS)
		nb_ports = RTE_MAX_ETHPORTS;

	nb_lcores = rte_lcore_count();

	memset(lcore_conf, 0, sizeof(lcore_conf));
	init_lcores();

	init_mbufpool(NB_MBUF);

	if (sc_flag == SC_CONTINUOUS) {
		nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
		nb_txd = RTE_TEST_TX_DESC_DEFAULT;
	}
	printf("CONFIG RXD=%d TXD=%d\n", nb_rxd, nb_txd);

	reset_count();
	num = 0;
	for (portid = 0; portid < nb_ports; portid++) {
		if (socketid == -1) {
			socketid = rte_eth_dev_socket_id(portid);
			slave_id = alloc_lcore(socketid);
			if (slave_id == (uint16_t)-1) {
				printf("No avail lcore to run test\n");
				return -1;
			}
			printf("Performance test runs on lcore %u socket %u\n",
			       slave_id, socketid);
		}

		if (socketid != rte_eth_dev_socket_id(portid)) {
			printf("Skip port %d\n", portid);
			continue;
		}

		/* port configure */
		ret = rte_eth_dev_configure(portid, nb_rx_queue,
					    nb_tx_queue, &port_conf);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"Cannot configure device: err=%d, port=%d\n",
				 ret, portid);

		rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
		printf("Port %u ", portid);
		print_ethaddr("Address:", &ports_eth_addr[portid]);
		printf("\n");

		/* tx queue setup */
		ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
					     socketid, &tx_conf);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"rte_eth_tx_queue_setup: err=%d, "
				"port=%d\n", ret, portid);

		/* rx queue steup */
		ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
						socketid, &rx_conf,
						mbufpool[socketid]);
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
				 "port=%d\n", ret, portid);

		/* Start device */
		stop = 0;
		ret = rte_eth_dev_start(portid);
		if (ret < 0)
			rte_exit(EXIT_FAILURE,
				"rte_eth_dev_start: err=%d, port=%d\n",
				ret, portid);

		/* always eanble promiscuous */
		rte_eth_promiscuous_enable(portid);

		lcore_conf[slave_id].portlist[num++] = portid;
		lcore_conf[slave_id].nb_ports++;
	}
	check_all_ports_link_status(nb_ports, RTE_PORT_ALL);

	if (tx_burst == NULL) {
		tx_burst = (struct rte_mbuf **)
			rte_calloc_socket("tx_buff",
					  MAX_TRAFFIC_BURST * nb_ports,
					  sizeof(void *),
					  RTE_CACHE_LINE_SIZE, socketid);
		if (!tx_burst)
			return -1;
	}

	init_traffic(mbufpool[socketid],
		     tx_burst, MAX_TRAFFIC_BURST * nb_ports);

	printf("Generate %d packets @socket %d\n",
	       MAX_TRAFFIC_BURST * nb_ports, socketid);

	if (sc_flag == SC_CONTINUOUS) {
		/* do both rxtx by default */
		if (NULL == do_measure)
			do_measure = measure_rxtx;

		rte_eal_remote_launch(main_loop, NULL, slave_id);

		if (rte_eal_wait_lcore(slave_id) < 0)
			return -1;
	} else if (sc_flag == SC_BURST_POLL_FIRST ||
		   sc_flag == SC_BURST_XMIT_FIRST)
		if (exec_burst(sc_flag, slave_id) < 0)
			return -1;

	/* port tear down */
	for (portid = 0; portid < nb_ports; portid++) {
		if (socketid != rte_eth_dev_socket_id(portid))
			continue;

		rte_eth_dev_stop(portid);
	}

	return 0;
}
Example #22
0
/* Main function, does initialisation and calls the per-lcore functions */
int
main(int argc, char *argv[])
{
	unsigned cores;
	struct rte_mempool *mbuf_pool;
	unsigned lcore_id;
	uintptr_t i;
	int ret;
	unsigned nb_ports, valid_num_ports;
	uint16_t portid;

	signal(SIGHUP, sighup_handler);

	/* init EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
	argc -= ret;
	argv += ret;

	/* parse app arguments */
	ret = vmdq_parse_args(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");

	cores = rte_lcore_count();
	if ((cores & (cores - 1)) != 0 || cores > RTE_MAX_LCORE) {
		rte_exit(EXIT_FAILURE,"This program can only run on an even"
				" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
	}

	nb_ports = rte_eth_dev_count();

	/*
	 * Update the global var NUM_PORTS and global array PORTS
	 * and get value of var VALID_NUM_PORTS according to system ports number
	 */
	valid_num_ports = check_ports_num(nb_ports);

	if (valid_num_ports < 2 || valid_num_ports % 2) {
		printf("Current valid ports number is %u\n", valid_num_ports);
		rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
	}

	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
		NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
		0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

	/* initialize all ports */
	for (portid = 0; portid < nb_ports; portid++) {
		/* skip ports that are not enabled */
		if ((enabled_port_mask & (1 << portid)) == 0) {
			printf("\nSkipping disabled port %d\n", portid);
			continue;
		}
		if (port_init(portid, mbuf_pool) != 0)
			rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
	}

	/* call lcore_main() on every slave lcore */
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
	}
	/* call on master too */
	(void) lcore_main((void*)i);

	return 0;
}
Example #23
0
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
			struct rte_mempool *session_pool_socket[])
{
	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
	unsigned int i, j;
	int ret;

	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
	if (enabled_cdev_count == 0) {
		printf("No crypto devices type %s available\n",
				opts->device_type);
		return -EINVAL;
	}

	nb_lcores = rte_lcore_count() - 1;

	if (nb_lcores < 1) {
		RTE_LOG(ERR, USER1,
			"Number of enabled cores need to be higher than 1\n");
		return -EINVAL;
	}

	/*
	 * Use less number of devices,
	 * if there are more available than cores.
	 */
	if (enabled_cdev_count > nb_lcores)
		enabled_cdev_count = nb_lcores;

	/* Create a mempool shared by all the devices */
	uint32_t max_sess_size = 0, sess_size;

	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
		sess_size = rte_cryptodev_get_private_session_size(cdev_id);
		if (sess_size > max_sess_size)
			max_sess_size = sess_size;
	}

	/*
	 * Calculate number of needed queue pairs, based on the amount
	 * of available number of logical cores and crypto devices.
	 * For instance, if there are 4 cores and 2 crypto devices,
	 * 2 queue pairs will be set up per device.
	 */
	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
				(nb_lcores / enabled_cdev_count) + 1 :
				nb_lcores / enabled_cdev_count;

	for (i = 0; i < enabled_cdev_count &&
			i < RTE_CRYPTO_MAX_DEVS; i++) {
		cdev_id = enabled_cdevs[i];
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
		/*
		 * If multi-core scheduler is used, limit the number
		 * of queue pairs to 1, as there is no way to know
		 * how many cores are being used by the PMD, and
		 * how many will be available for the application.
		 */
		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
				rte_cryptodev_scheduler_mode_get(cdev_id) ==
				CDEV_SCHED_MODE_MULTICORE)
			opts->nb_qps = 1;
#endif

		struct rte_cryptodev_info cdev_info;
		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		rte_cryptodev_info_get(cdev_id, &cdev_info);
		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
			printf("Number of needed queue pairs is higher "
				"than the maximum number of queue pairs "
				"per device.\n");
			printf("Lower the number of cores or increase "
				"the number of crypto devices\n");
			return -EINVAL;
		}
		struct rte_cryptodev_config conf = {
			.nb_queue_pairs = opts->nb_qps,
			.socket_id = socket_id
		};

		struct rte_cryptodev_qp_conf qp_conf = {
			.nb_descriptors = opts->nb_descriptors
		};

		if (session_pool_socket[socket_id] == NULL) {
			char mp_name[RTE_MEMPOOL_NAMESIZE];
			struct rte_mempool *sess_mp;

			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
				"sess_mp_%u", socket_id);

			sess_mp = rte_mempool_create(mp_name,
						NUM_SESSIONS,
						max_sess_size,
						SESS_MEMPOOL_CACHE_SIZE,
						0, NULL, NULL, NULL,
						NULL, socket_id,
						0);

			if (sess_mp == NULL) {
				printf("Cannot create session pool on socket %d\n",
					socket_id);
				return -ENOMEM;
			}

			printf("Allocated session pool on socket %d\n", socket_id);
			session_pool_socket[socket_id] = sess_mp;
		}

		ret = rte_cryptodev_configure(cdev_id, &conf);
		if (ret < 0) {
			printf("Failed to configure cryptodev %u", cdev_id);
			return -EINVAL;
		}

		for (j = 0; j < opts->nb_qps; j++) {
			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
				&qp_conf, socket_id,
				session_pool_socket[socket_id]);
			if (ret < 0) {
				printf("Failed to setup queue pair %u on "
					"cryptodev %u",	j, cdev_id);
				return -EINVAL;
			}
		}

		ret = rte_cryptodev_start(cdev_id);
		if (ret < 0) {
			printf("Failed to start device %u: error %d\n",
					cdev_id, ret);
			return -EPERM;
		}
	}

	return enabled_cdev_count;
}

static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
	struct rte_cryptodev_sym_capability_idx cap_idx;
	const struct rte_cryptodev_symmetric_capability *capability;

	uint8_t i, cdev_id;
	int ret;

	for (i = 0; i < nb_cryptodevs; i++) {

		cdev_id = enabled_cdevs[i];

		if (opts->op_type == CPERF_AUTH_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
			cap_idx.algo.auth = opts->auth_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_auth(
					capability,
					opts->auth_key_sz,
					opts->digest_sz,
					opts->auth_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_CIPHER_ONLY ||
				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
				opts->op_type == CPERF_AUTH_THEN_CIPHER) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
			cap_idx.algo.cipher = opts->cipher_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_cipher(
					capability,
					opts->cipher_key_sz,
					opts->cipher_iv_sz);
			if (ret != 0)
				return ret;
		}

		if (opts->op_type == CPERF_AEAD) {

			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
			cap_idx.algo.aead = opts->aead_algo;

			capability = rte_cryptodev_sym_capability_get(cdev_id,
					&cap_idx);
			if (capability == NULL)
				return -1;

			ret = rte_cryptodev_sym_capability_check_aead(
					capability,
					opts->aead_key_sz,
					opts->digest_sz,
					opts->aead_aad_sz,
					opts->aead_iv_sz);
			if (ret != 0)
				return ret;
		}
	}

	return 0;
}

static int
cperf_check_test_vector(struct cperf_options *opts,
		struct cperf_test_vector *test_vec)
{
	if (opts->op_type == CPERF_CIPHER_ONLY) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AUTH_ONLY) {
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}

	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
			if (test_vec->plaintext.data == NULL)
				return -1;
			if (test_vec->plaintext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->ciphertext.data == NULL)
				return -1;
			if (test_vec->ciphertext.length < opts->max_buffer_size)
				return -1;
			if (test_vec->cipher_iv.data == NULL)
				return -1;
			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
				return -1;
			if (test_vec->cipher_key.data == NULL)
				return -1;
			if (test_vec->cipher_key.length != opts->cipher_key_sz)
				return -1;
		}
		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
			if (test_vec->auth_key.data == NULL)
				return -1;
			if (test_vec->auth_key.length != opts->auth_key_sz)
				return -1;
			if (test_vec->auth_iv.length != opts->auth_iv_sz)
				return -1;
			/* Auth IV is only required for some algorithms */
			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
				return -1;
			if (test_vec->digest.data == NULL)
				return -1;
			if (test_vec->digest.length < opts->digest_sz)
				return -1;
		}
	} else if (opts->op_type == CPERF_AEAD) {
		if (test_vec->plaintext.data == NULL)
			return -1;
		if (test_vec->plaintext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->ciphertext.data == NULL)
			return -1;
		if (test_vec->ciphertext.length < opts->max_buffer_size)
			return -1;
		if (test_vec->aead_iv.data == NULL)
			return -1;
		if (test_vec->aead_iv.length != opts->aead_iv_sz)
			return -1;
		if (test_vec->aad.data == NULL)
			return -1;
		if (test_vec->aad.length != opts->aead_aad_sz)
			return -1;
		if (test_vec->digest.data == NULL)
			return -1;
		if (test_vec->digest.length < opts->digest_sz)
			return -1;
	}
	return 0;
}

int
main(int argc, char **argv)
{
	struct cperf_options opts = {0};
	struct cperf_test_vector *t_vec = NULL;
	struct cperf_op_fns op_fns;

	void *ctx[RTE_MAX_LCORE] = { };
	struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };

	int nb_cryptodevs = 0;
	uint16_t total_nb_qps = 0;
	uint8_t cdev_id, i;
	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };

	uint8_t buffer_size_idx = 0;

	int ret;
	uint32_t lcore_id;

	/* Initialise DPDK EAL */
	ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
	argc -= ret;
	argv += ret;

	cperf_options_default(&opts);

	ret = cperf_options_parse(&opts, argc, argv);
	if (ret) {
		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
		goto err;
	}

	ret = cperf_options_check(&opts);
	if (ret) {
		RTE_LOG(ERR, USER1,
				"Checking on or more user options failed\n");
		goto err;
	}

	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
			session_pool_socket);

	if (!opts.silent)
		cperf_options_dump(&opts);

	if (nb_cryptodevs < 1) {
		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
				"device type\n");
		nb_cryptodevs = 0;
		goto err;
	}

	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
			nb_cryptodevs);
	if (ret) {
		RTE_LOG(ERR, USER1, "Crypto device type does not support "
				"capabilities requested\n");
		goto err;
	}

	if (opts.test_file != NULL) {
		t_vec = cperf_test_vector_get_from_file(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified file\n");
			goto err;
		}

		if (cperf_check_test_vector(&opts, t_vec)) {
			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
					"\n");
			goto err;
		}
	} else {
		t_vec = cperf_test_vector_get_dummy(&opts);
		if (t_vec == NULL) {
			RTE_LOG(ERR, USER1,
					"Failed to create test vector for"
					" specified algorithms\n");
			goto err;
		}
	}

	ret = cperf_get_op_functions(&opts, &op_fns);
	if (ret) {
		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
				"specified algorithms combination\n");
		goto err;
	}

	if (!opts.silent)
		show_test_vector(t_vec);

	total_nb_qps = nb_cryptodevs * opts.nb_qps;

	i = 0;
	uint8_t qp_id = 0, cdev_index = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[cdev_index];

		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);

		ctx[i] = cperf_testmap[opts.test].constructor(
				session_pool_socket[socket_id], cdev_id, qp_id,
				&opts, t_vec, &op_fns);
		if (ctx[i] == NULL) {
			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
			goto err;
		}
		qp_id = (qp_id + 1) % opts.nb_qps;
		if (qp_id == 0)
			cdev_index++;
		i++;
	}

	/* Get first size from range or list */
	if (opts.inc_buffer_size != 0)
		opts.test_buffer_size = opts.min_buffer_size;
	else
		opts.test_buffer_size = opts.buffer_size_list[0];

	while (opts.test_buffer_size <= opts.max_buffer_size) {
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;

			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
				ctx[i], lcore_id);
			i++;
		}
		i = 0;
		RTE_LCORE_FOREACH_SLAVE(lcore_id) {

			if (i == total_nb_qps)
				break;
			rte_eal_wait_lcore(lcore_id);
			i++;
		}

		/* Get next size from range or list */
		if (opts.inc_buffer_size != 0)
			opts.test_buffer_size += opts.inc_buffer_size;
		else {
			if (++buffer_size_idx == opts.buffer_size_count)
				break;
			opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
		}
	}

	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {

		if (i == total_nb_qps)
			break;

		cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_SUCCESS;

err:
	i = 0;
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (i == total_nb_qps)
			break;

		cdev_id = enabled_cdevs[i];

		if (ctx[i] && cperf_testmap[opts.test].destructor)
			cperf_testmap[opts.test].destructor(ctx[i]);
		i++;
	}

	for (i = 0; i < nb_cryptodevs &&
			i < RTE_CRYPTO_MAX_DEVS; i++)
		rte_cryptodev_stop(enabled_cdevs[i]);

	free_test_vector(t_vec, &opts);

	printf("\n");
	return EXIT_FAILURE;
}