Ejemplo n.º 1
0
static int
globalinit(struct virtif_user *viu)
{
	int rv;

	if ((rv = rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
	    /*UNCONST*/(void *)(uintptr_t)ealargs)) < 0)
		OUT("eal init");

	if ((mbpool_tx = rte_mempool_create("mbuf_pool_tx", NMBUF_TX, MBSIZE, 0/*MBCACHE*/,
	    sizeof(struct rte_pktmbuf_pool_private),
	    rte_pktmbuf_pool_init, NULL,
	    rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
		rv = -EINVAL;
		OUT("mbuf pool tx");
	}
	if ((mbpool_rx = rte_mempool_create("mbuf_pool_rx", NMBUF_RX, MBSIZE, 0/*MBCACHE*/,
	    sizeof(struct rte_pktmbuf_pool_private),
	    rte_pktmbuf_pool_init, NULL,
	    rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
		rv = -EINVAL;
		OUT("mbuf pool tx");
	}

	if (rte_eth_dev_count() == 0) {
		rv = -1;
		OUT("no ports");
	}
	rv = 0;

 out:
 	return rv;
}
Ejemplo n.º 2
0
static void
app_init_mbuf_pools(void)
{
	/* Init the buffer pool */
	RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
	app.pool = rte_mempool_create(
		"mempool",
		app.pool_size,
		app.pool_buffer_size,
		app.pool_cache_size,
		sizeof(struct rte_pktmbuf_pool_private),
		rte_pktmbuf_pool_init, NULL,
		rte_pktmbuf_init, NULL,
		rte_socket_id(),
		0);
	if (app.pool == NULL)
		rte_panic("Cannot create mbuf pool\n");

	/* Init the indirect buffer pool */
	RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
	app.indirect_pool = rte_mempool_create(
		"indirect mempool",
		app.pool_size,
		sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
		app.pool_cache_size,
		0,
		NULL, NULL,
		rte_pktmbuf_init, NULL,
		rte_socket_id(),
		0);
	if (app.indirect_pool == NULL)
		rte_panic("Cannot create mbuf pool\n");

	/* Init the message buffer pool */
	RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
	app.msg_pool = rte_mempool_create(
		"mempool msg",
		app.msg_pool_size,
		app.msg_pool_buffer_size,
		app.msg_pool_cache_size,
		0,
		NULL, NULL,
		rte_ctrlmbuf_init, NULL,
		rte_socket_id(),
		0);
	if (app.msg_pool == NULL)
		rte_panic("Cannot create message pool\n");
}
Ejemplo n.º 3
0
static int
globalinit(void)
{
    int rv;

    if (rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
                     /*UNCONST*/(void *)(uintptr_t)ealargs) < 0)
        OUT("eal init\n");

    if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, MBALIGN,
                                     sizeof(struct rte_pktmbuf_pool_private),
                                     rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0)) == NULL)
        OUT("mbuf pool\n");

    if (PMD_INIT() < 0)
        OUT("wm driver\n");
    if (rte_eal_pci_probe() < 0)
        OUT("PCI probe\n");
    if (rte_eth_dev_count() == 0)
        OUT("no ports\n");
    rv = 0;

out:
    return rv;
}
Ejemplo n.º 4
0
Archivo: nvmf.c Proyecto: famz/spdk
static int
spdk_nvmf_initialize_pools(void)
{
	SPDK_NOTICELOG("\n*** NVMf Pool Creation ***\n");

	g_num_requests = MAX_SUBSYSTEMS * g_nvmf_tgt.MaxConnectionsPerSession * g_nvmf_tgt.MaxQueueDepth;

	/* create NVMe backend request pool */
	request_mempool = rte_mempool_create("NVMe_Pool",
					     g_num_requests,
					     spdk_nvme_request_size(),
					     128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);
	if (!request_mempool) {
		SPDK_ERRLOG("create NVMe request pool failed\n");
		return -1;
	}

	SPDK_TRACELOG(SPDK_TRACE_DEBUG, "NVMe request_mempool %p, size %" PRIu64 " bytes\n",
		      request_mempool,
		      (uint64_t)g_num_requests * spdk_nvme_request_size());

	return 0;
}
Ejemplo n.º 5
0
/**
 * @brief           Initialize MBuf pool for device
 * 
 * @param name      const char*, name of MemPool object
 * 
 * @return          true if success and false otherwice
 */
bool DPDKAdapter::initDevMBufPool(const char* name)
{
    if(!name)
        return false;
        
    // Don't create MemPool if it already exists
    MemPool_t* pool = rte_mempool_lookup(name);
    if(pool)
        return pool;
    
    pool = rte_mempool_create(name,
                DPDK_MEMPOOL_SIZE,
                MBUF_SIZE,
                DPDK_MEMPOOL_CACHE_SIZE,
                sizeof(struct rte_pktmbuf_pool_private),
                rte_pktmbuf_pool_init, NULL,
                rte_pktmbuf_init, NULL,
                SOCKET_ID_ANY,
                MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);

    if(pool == NULL)
    {
        qCritical("Can not init memory pool");
        return false;
    }

    if(rte_mempool_lookup(name) != pool)
    {
        qCritical("Can not lookup memory pool by its name");
        return false;
    }

    return true;
}
Ejemplo n.º 6
0
static void setup_mempools(struct lcore_cfg* lcore_cfg)
{
	char name[64];
	struct lcore_cfg *lconf = 0;
	for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) {
		if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) {
			continue;
		}
		lconf = &lcore_cfg[lcore_id];
		uint8_t socket = rte_lcore_to_socket_id(lcore_id);
		for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) {
			struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id];

			if (startup_cfg->rx_port != NO_PORT_AVAIL) {
				/* allocate memory pool for packets */
				if (startup_cfg->nb_mbuf == 0) {
					startup_cfg->nb_mbuf = tgen_cfg.nb_mbuf;
				}

				/* use this pool for the interface that the core is receiving from */
				sprintf(name, "core_%u_port_%u_pool", lcore_id, task_id);
				startup_cfg->pool = rte_mempool_create(name,
								       startup_cfg->nb_mbuf - 1, MBUF_SIZE,
								       MAX_PKT_BURST * 4,
								       sizeof(struct rte_pktmbuf_pool_private),
								       rte_pktmbuf_pool_init, NULL,
								       tgen_pktmbuf_init, lconf,
								       socket, 0);
				TGEN_PANIC(startup_cfg->pool == NULL, "\t\tError: cannot create mempool for core %u port %u\n", lcore_id, task_id);
				mprintf("\t\tMempool %p size = %u * %u cache %u, socket %d\n", startup_cfg->pool,
					startup_cfg->nb_mbuf, MBUF_SIZE, MAX_PKT_BURST * 4, socket);
			}
		}
	}
}
Ejemplo n.º 7
0
/**
 * Initialise the mbuf pool for packet reception for the NIC, and any other
 * buffer pools needed by the app - currently none.
 */
static int
init_mbuf_pools(void)
{
	const unsigned num_mbufs = (num_rings * MBUFS_PER_RING);

	/* don't pass single-producer/single-consumer flags to mbuf create as it
	 * seems faster to use a cache instead */
	printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
			HSM_POOL_NAME, num_mbufs);

	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		pktmbuf_pool = rte_mempool_lookup(HSM_POOL_NAME);
		if (pktmbuf_pool == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
	}
	else
	{
		pktmbuf_pool = rte_mempool_create(HSM_POOL_NAME, num_mbufs,
				MBUF_SIZE, MBUF_CACHE_SIZE,
				sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
				NULL, rte_pktmbuf_init, NULL, rte_socket_id(), NO_FLAGS );
				

	}

	return (pktmbuf_pool == NULL); /* 0  on success */
}
Ejemplo n.º 8
0
static struct rte_mempool *
pktgen_mbuf_pool_create(const char * type, uint8_t pid, uint8_t queue_id,
		uint32_t nb_mbufs, int socket_id, int cache_size )
{
    struct rte_mempool * mp;
    char    name[RTE_MEMZONE_NAMESIZE];

    snprintf(name, sizeof(name), "%-12s%u:%u", type, pid, queue_id);
    pktgen_log_info("    Create: %-*s - Memory used (MBUFs %4u x (size %u + Hdr %lu)) + %lu = %6lu KB",
            16, name, nb_mbufs, MBUF_SIZE, sizeof(struct rte_mbuf), sizeof(struct rte_mempool),
            (((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool))) + 1023)/1024);
    pktgen.mem_used += ((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool)));
    pktgen.total_mem_used += ((nb_mbufs * (MBUF_SIZE + sizeof(struct rte_mbuf)) + sizeof(struct rte_mempool)));

    /* create the mbuf pool */
    mp = rte_mempool_create(name, nb_mbufs, MBUF_SIZE, cache_size,
                   sizeof(struct rte_pktmbuf_pool_private),
                   rte_pktmbuf_pool_init, NULL,
                   rte_pktmbuf_init, NULL,
                   socket_id, MEMPOOL_F_DMA);
    if (mp == NULL)
        pktgen_log_panic("Cannot create mbuf pool (%s) port %d, queue %d, nb_mbufs %d, socket_id %d: %s",
        		name, pid, queue_id, nb_mbufs, socket_id, rte_strerror(errno));

    return mp;
}
Ejemplo n.º 9
0
static int
globalinit(struct virtif_user *viu)
{
	int rv;

	if ((rv = rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]),
	    /*UNCONST*/(void *)(uintptr_t)ealargs)) < 0)
		OUT("eal init\n");

	/* disable mempool cache due to DPDK bug, not thread safe */
	if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, 0/*MBCACHE*/,
	    sizeof(struct rte_pktmbuf_pool_private),
	    rte_pktmbuf_pool_init, NULL,
	    rte_pktmbuf_init, NULL, 0, 0)) == NULL) {
		rv = -EINVAL;
		OUT("mbuf pool\n");
	}

	if ((rv = PMD_INIT()) < 0)
		OUT("pmd init\n");
	if ((rv = rte_eal_pci_probe()) < 0)
		OUT("PCI probe\n");
	if (rte_eth_dev_count() == 0) {
		rv = -1;
		OUT("no ports\n");
	}
	rv = 0;

 out:
 	return rv;
}
Ejemplo n.º 10
0
/* Creates mempool for VIRTIO TXQ */
static struct rte_mempool* create_mempool(int core_id, struct virtio_net* dev,
                                    int q_no)
{
    unsigned socketid = rte_lcore_to_socket_id(core_id);
    struct rte_mempool *pool;
    uint32_t mp_size;
    char name[32];

    /* Create memory pool */
    mp_size = VIRTIO_MAX_NB_BUF;
    snprintf(name, 32, "virtio_%ld_%d", dev->device_fh, q_no);
    do {
        pool = rte_mempool_create(name,
                mp_size,
                VIRTIO_MBUF_SIZE,
                VIRTIO_MP_CACHE_SIZE,
                sizeof(struct rte_pktmbuf_pool_private),
                rte_pktmbuf_pool_init,
                NULL,
                rte_pktmbuf_init,
                NULL,
                socketid,
                0);
    } while(!pool &&
        rte_errno == ENOMEM &&
        (mp_size /= 2) >= VIRTIO_MIN_NB_BUF);

    return pool;
}
Ejemplo n.º 11
0
Archivo: init.c Proyecto: exuuwen/study
static void udpi_init_mbuf_pools(void)
{
	/* Init the buffer pool */
	RTE_LOG(INFO, MEMPOOL, "Creating the mbuf pool ...\n");
	udpi.pool = rte_mempool_create("mempool", 
		udpi.pool_size,
		udpi.pool_buffer_size, 
		udpi.pool_cache_size, 
		sizeof(struct rte_pktmbuf_pool_private),
		rte_pktmbuf_pool_init, 
		NULL, 
		rte_pktmbuf_init, 
		NULL, 
		rte_socket_id(), 
		0);
	if(NULL == udpi.pool)
	{
		rte_panic("Cannot create mbuf pool\n");
	}

	/* Init the indirect buffer pool */
	/*RTE_LOG(INFO, MEMPOOL, "Creating the indirect mbuf pool ...\n");
	udpi.indirect_pool = rte_mempool_create("indirect mempool", 
		udpi.pool_size, 
		sizeof(struct rte_mbuf), udpi.pool_cache_size, 0, NULL, NULL,
		rte_pktmbuf_init, NULL, rte_socket_id(), 0);
	if(NULL == udpi.indirect_pool)
	{
		rte_panic("Cannot create indirect mbuf pool\n");
	}*/
	
	/* Init the message buffer pool */
	RTE_LOG(INFO, MEMPOOL, "Creating the message mbuf pool ...\n");
	udpi.msg_pool = rte_mempool_create("msg mempool ", 
		udpi.msg_pool_size,
		udpi.msg_pool_buffer_size,
		udpi.msg_pool_cache_size,
		0, NULL, NULL, 
		rte_ctrlmbuf_init, NULL,
		rte_socket_id(), 0);
	if(NULL == udpi.msg_pool)
	{
		rte_panic("Cannot create message mbuf pool\n");
	}

	return;
}
Ejemplo n.º 12
0
int main(int argc, char **argv)
{
	int rc;

	/*
	 * By default, the SPDK NVMe driver uses DPDK for huge page-based
	 *  memory management and NVMe request buffer pools.  Huge pages can
	 *  be either 2MB or 1GB in size (instead of 4KB) and are pinned in
	 *  memory.  Pinned memory is important to ensure DMA operations
	 *  never target swapped out memory.
	 *
	 * So first we must initialize DPDK.  "-c 0x1" indicates to only use
	 *  core 0.
	 */
	rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]), ealargs);
	if (rc < 0) {
		fprintf(stderr, "could not initialize dpdk\n");
		return 1;
	}

	/*
	 * Create the NVMe request buffer pool.  This will be used internally
	 *  by the SPDK NVMe driver to allocate an spdk_nvme_request data
	 *  structure for each I/O request.  This is implicitly passed to
	 *  the SPDK NVMe driver via an extern declaration in nvme_impl.h.
	 */
	request_mempool = rte_mempool_create("nvme_request", 8192,
					     spdk_nvme_request_size(), 128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);

	if (request_mempool == NULL) {
		fprintf(stderr, "could not initialize request mempool\n");
		return 1;
	}

	printf("Initializing NVMe Controllers\n");

	/*
	 * Start the SPDK NVMe enumeration process.  probe_cb will be called
	 *  for each NVMe controller found, giving our application a choice on
	 *  whether to attach to each controller.  attach_cb will then be
	 *  called for each controller after the SPDK NVMe driver has completed
	 *  initializing the controller we chose to attach.
	 */
	rc = spdk_nvme_probe(NULL, probe_cb, attach_cb, NULL);
	if (rc != 0) {
		fprintf(stderr, "spdk_nvme_probe() failed\n");
		cleanup();
		return 1;
	}

	printf("Initialization complete.\n");
	hello_world();
	cleanup();
	return 0;
}
Ejemplo n.º 13
0
static void pg_alloc_mempool(void)
{
	mp = rte_mempool_create("test_mempool", NUM_MBUFS, MBUF_SIZE,
				MBUF_CACHE_SIZE,
				sizeof(struct rte_pktmbuf_pool_private),
				rte_pktmbuf_pool_init, NULL,
				rte_pktmbuf_init, NULL,
				rte_socket_id(), 0);
	g_assert(mp);
}
Ejemplo n.º 14
0
int main(int argc, char **argv)
{
	struct dev			*iter;
	int				rc, i;

	printf("NVMe Write/Read with End-to-End data protection test\n");

	rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]),
			  (char **)(void *)(uintptr_t)ealargs);

	if (rc < 0) {
		fprintf(stderr, "could not initialize dpdk\n");
		exit(1);
	}

	request_mempool = rte_mempool_create("nvme_request", 8192,
					     spdk_nvme_request_size(), 128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);

	if (request_mempool == NULL) {
		fprintf(stderr, "could not initialize request mempool\n");
		exit(1);
	}

	if (spdk_nvme_probe(NULL, probe_cb, attach_cb, NULL) != 0) {
		fprintf(stderr, "nvme_probe() failed\n");
		exit(1);
	}

	rc = 0;
	foreach_dev(iter) {
#define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
		if (TEST(dp_with_pract_test)
		    || TEST(dp_without_pract_extended_lba_test)
		    || TEST(dp_without_flags_extended_lba_test)
		    || TEST(dp_without_pract_separate_meta_test)
		    || TEST(dp_without_pract_separate_meta_apptag_test)
		    || TEST(dp_without_flags_separate_meta_test)) {
#undef TEST
			rc = 1;
			printf("%s: failed End-to-End data protection tests\n", iter->name);
		}
	}

	printf("Cleaning up...\n");

	for (i = 0; i < num_devs; i++) {
		struct dev *dev = &devs[i];

		spdk_nvme_detach(dev->ctrlr);
	}

	return rc;
}
/* Main function */
int main(int argc, char **argv)
{
	int ret;
	int i;

	/* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/
	signal(SIGINT, sig_handler);
	signal(SIGALRM, alarm_routine);

	/* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */
	ret = rte_eal_init(argc, argv);
	if (ret < 0) FATAL_ERROR("Cannot init EAL\n");
	argc -= ret;
	argv += ret;

	/* Check if this application can use 1 core*/
	ret = rte_lcore_count ();
	if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores.");

	/* Parse arguments */
	parse_args(argc, argv);
	if (ret < 0) FATAL_ERROR("Wrong arguments\n");

	/* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */
	#if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8
		ret = rte_eal_pci_probe();
		if (ret < 0) FATAL_ERROR("Cannot probe PCI\n");
	#endif

	/* Get number of ethernet devices */
	nb_sys_ports = rte_eth_dev_count();
	if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n");
	
	/* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */
	pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0);
	if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST  );
	
	/* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */
	intermediate_ring = rte_ring_create 	(RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ );
 	if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring");


	/* Operations needed for each ethernet device */			
	for(i=0; i < nb_sys_ports; i++)
		init_port(i);

	/* Start consumer and producer routine on 2 different cores: producer launched first... */
	ret =  rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER);
	if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n");	

	/* ... and then loop in consumer */
	main_loop_consumer ( NULL );	

	return 0;
}
Ejemplo n.º 16
0
static struct rte_mempool* make_mempool() {
	static int pool_id = 0;
	char pool_name[32];
	sprintf(pool_name, "pool%d", __sync_fetch_and_add(&pool_id, 1));
	return rte_mempool_create(pool_name, NB_MBUF, MBUF_SIZE, 32,
		sizeof(struct rte_pktmbuf_pool_private),
		rte_pktmbuf_pool_init, NULL,
		rte_pktmbuf_init, NULL,
		rte_socket_id(), 0
	);
}
Ejemplo n.º 17
0
struct rte_mempool * __rte_experimental
rte_comp_op_pool_create(const char *name,
		unsigned int nb_elts, unsigned int cache_size,
		uint16_t user_size, int socket_id)
{
	struct rte_comp_op_pool_private *priv;

	unsigned int elt_size = sizeof(struct rte_comp_op) + user_size;

	/* lookup mempool in case already allocated */
	struct rte_mempool *mp = rte_mempool_lookup(name);

	if (mp != NULL) {
		priv = (struct rte_comp_op_pool_private *)
				rte_mempool_get_priv(mp);

		if (mp->elt_size != elt_size ||
				mp->cache_size < cache_size ||
				mp->size < nb_elts ||
				priv->user_size <  user_size) {
			mp = NULL;
			COMPRESSDEV_LOG(ERR,
		"Mempool %s already exists but with incompatible parameters",
					name);
			return NULL;
		}
		return mp;
	}

	mp = rte_mempool_create(
			name,
			nb_elts,
			elt_size,
			cache_size,
			sizeof(struct rte_comp_op_pool_private),
			NULL,
			NULL,
			rte_comp_op_init,
			NULL,
			socket_id,
			0);

	if (mp == NULL) {
		COMPRESSDEV_LOG(ERR, "Failed to create mempool %s", name);
		return NULL;
	}

	priv = (struct rte_comp_op_pool_private *)
			rte_mempool_get_priv(mp);

	priv->user_size = user_size;

	return mp;
}
Ejemplo n.º 18
0
/**
 * Set up a mempool to store nf_info structs
 */
static int
init_nf_info_pool(void)
{
        /* don't pass single-producer/single-consumer flags to mbuf
         * create as it seems faster to use a cache instead */
        printf("Creating mbuf pool '%s' ...\n", _NF_MEMPOOL_NAME);
        nf_info_pool = rte_mempool_create(_NF_MEMPOOL_NAME, MAX_NFS,
                        NF_INFO_SIZE, 0,
                        0, NULL, NULL, NULL, NULL, rte_socket_id(), NO_FLAGS);

        return (nf_info_pool == NULL); /* 0 on success */
}
Ejemplo n.º 19
0
int initDpdk(char* progname)
{
    int ret;
    static char *eal_args[] = {progname, "-c0xf", "-n1", "-m128", "--file-prefix=drone"};

    // TODO: read env var DRONE_RTE_EAL_ARGS to override defaults

    ret = rte_eal_init(sizeof(eal_args)/sizeof(char*), eal_args);
    if (ret < 0)
        rte_panic("Cannot init EAL\n");

    mbufPool_ = rte_mempool_create("DpktPktMbuf",
                                   16*1024, // # of mbufs
                                   2048, // sz of mbuf
                                   32,   // per-lcore cache sz
                                   sizeof(struct rte_pktmbuf_pool_private),
                                   rte_pktmbuf_pool_init, // pool ctor
                                   NULL, // pool ctor arg
                                   rte_pktmbuf_init, // mbuf ctor
                                   NULL, // mbuf ctor arg
                                   SOCKET_ID_ANY,
                                   0     // flags
                                  );

    if (!mbufPool_)
        rte_exit(EXIT_FAILURE, "cannot init mbuf pool\n");

    if (rte_pmd_init_all() < 0)
        rte_exit(EXIT_FAILURE, "cannot init pmd\n");

    if (rte_eal_pci_probe() < 0)
        rte_exit(EXIT_FAILURE, "cannot probe PCI\n");

    // init lcore information
    lcoreCount_ = rte_lcore_count();
    lcoreFreeMask_ = 0;
    for (int i = 0; i < lcoreCount_; i++) {
        if (rte_lcore_is_enabled(i) && (unsigned(i) != rte_get_master_lcore()))
            lcoreFreeMask_ |= (1 << i);
    }
    qDebug("lcore_count = %d, lcore_free_mask = 0x%llx",
           lcoreCount_, lcoreFreeMask_);

    // assign a lcore for Rx polling
    rxLcoreId_ = getFreeLcore();
    if (rxLcoreId_ < 0)
        rte_exit(EXIT_FAILURE, "not enough cores for Rx polling");

    stopRxPoll_ = false;

    return 0;
}
Ejemplo n.º 20
0
int main(int argc, char **argv)
{
	struct dev			*iter;
	int				rc, i;

	rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]),
			  (char **)(void *)(uintptr_t)ealargs);

	if (rc < 0) {
		fprintf(stderr, "could not initialize dpdk\n");
		exit(1);
	}

	request_mempool = rte_mempool_create("nvme_request", 8192,
					     spdk_nvme_request_size(), 128, 0,
					     NULL, NULL, NULL, NULL,
					     SOCKET_ID_ANY, 0);

	if (request_mempool == NULL) {
		fprintf(stderr, "could not initialize request mempool\n");
		exit(1);
	}

	if (spdk_nvme_probe(NULL, probe_cb, attach_cb, NULL) != 0) {
		fprintf(stderr, "spdk_nvme_probe() failed\n");
		return 1;
	}

	rc = 0;

	foreach_dev(iter) {
		struct spdk_nvme_qpair *qpair;

		qpair = spdk_nvme_ctrlr_alloc_io_qpair(iter->ctrlr, 0);
		if (!qpair) {
			fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
			rc = 1;
		} else {
			reserve_controller(iter->ctrlr, qpair, iter->pci_dev);
		}
	}

	printf("Cleaning up...\n");

	for (i = 0; i < num_devs; i++) {
		struct dev *dev = &devs[i];
		spdk_nvme_detach(dev->ctrlr);
	}

	return rc;
}
Ejemplo n.º 21
0
Archivo: main.c Proyecto: btw616/dpdk
static int
fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
		uint32_t nb_sessions)
{
	char mp_name[RTE_MEMPOOL_NAMESIZE];
	struct rte_mempool *sess_mp;

	if (session_pool_socket[socket_id].priv_mp == NULL) {
		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
			"priv_sess_mp_%u", socket_id);

		sess_mp = rte_mempool_create(mp_name,
					nb_sessions,
					session_priv_size,
					0, 0, NULL, NULL, NULL,
					NULL, socket_id,
					0);

		if (sess_mp == NULL) {
			printf("Cannot create pool \"%s\" on socket %d\n",
				mp_name, socket_id);
			return -ENOMEM;
		}

		printf("Allocated pool \"%s\" on socket %d\n",
			mp_name, socket_id);
		session_pool_socket[socket_id].priv_mp = sess_mp;
	}

	if (session_pool_socket[socket_id].sess_mp == NULL) {

		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
			"sess_mp_%u", socket_id);

		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
					nb_sessions, 0, 0, 0, socket_id);

		if (sess_mp == NULL) {
			printf("Cannot create pool \"%s\" on socket %d\n",
				mp_name, socket_id);
			return -ENOMEM;
		}

		printf("Allocated pool \"%s\" on socket %d\n",
			mp_name, socket_id);
		session_pool_socket[socket_id].sess_mp = sess_mp;
	}

	return 0;
}
Ejemplo n.º 22
0
/**
 * Initialise the mbuf pool for packet reception for the NIC, and any other
 * buffer pools needed by the app - currently none.
 */
static int
init_mbuf_pools(void)
{
	const unsigned num_mbufs = (num_clients * MBUFS_PER_CLIENT) \
			+ (ports->num_ports * MBUFS_PER_PORT);

	/* don't pass single-producer/single-consumer flags to mbuf create as it
	 * seems faster to use a cache instead */
	printf("Lookup mbuf pool '%s' [%u mbufs] ...\n",
			VM_PKTMBUF_POOL_NAME, num_mbufs);

	pktmbuf_pool = rte_mempool_lookup(VM_PKTMBUF_POOL_NAME);
	if (pktmbuf_pool == NULL)
	{
		printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
			VM_PKTMBUF_POOL_NAME, num_mbufs);

		pktmbuf_pool = rte_mempool_create(VM_PKTMBUF_POOL_NAME, num_mbufs,
				MBUF_SIZE, MBUF_CACHE_SIZE,
				sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
				NULL, rte_pktmbuf_init, NULL, rte_socket_id(), NO_FLAGS );

	}
	if (pktmbuf_pool == NULL)
	{
		printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
			VM_PKTMBUF_POOL_NAME, num_mbufs);

		pktmbuf_pool = rte_mempool_create(VM_PKTMBUF_POOL_NAME, num_mbufs,
				MBUF_SIZE, MBUF_CACHE_SIZE,
				sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
				NULL, rte_pktmbuf_init, NULL, rte_socket_id(), NO_FLAGS );

	}

	return (pktmbuf_pool == NULL); /* 0  on success */
}
Ejemplo n.º 23
0
static int
u2_init(void)
{
	if (rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]),ealargs) < 0) {
		fprintf(stderr, "failed to initialize DPDK EAL!\n");
		return 1;
	}

	printf("\n========================================\n");
	printf(  "  nvme_lat/u2_lat - ict.ncic.syssw.ufo"    );
	printf("\n========================================\n");

	request_mempool = rte_mempool_create("nvme_request",
	                                     U2_REQUEST_POOL_SIZE, spdk_nvme_request_size(),
	                                     U2_REQUEST_CACHE_SIZE, U2_REQUEST_PRIVATE_SIZE,
	                                     NULL, NULL, NULL, NULL,
	                                     SOCKET_ID_ANY, 0);
	if (request_mempool == NULL) {
		fprintf(stderr, "failed to create request pool!\n");
		return 1;
	}

	if (spdk_nvme_probe(NULL, probe_cb, attach_cb)) {
		fprintf(stderr, "failed to probe and attach to NVMe device!\n");
		return 1;
	}

	if (!u2_ctrlr) {
		fprintf(stderr, "failed to probe a suitable controller!\n");
		return 1;
	}

	if (!spdk_nvme_ns_is_active(u2_ns)) {
		fprintf(stderr, "namespace %d is IN-ACTIVE!\n", u2_ns_id);
		return 1;
	}

	if (u2_ns_size < io_size) {
		fprintf(stderr, "invalid I/O size %"PRIu32"!\n", io_size);
		return 1;
	}

	if (!u2_qpair) {
		fprintf(stderr, "failed to allocate queue pair!\n");
		return 1;
	}

	return 0;
}
Ejemplo n.º 24
0
static int
test_distributor_perf(void)
{
	static struct rte_distributor *d;
	static struct rte_mempool *p;

	if (rte_lcore_count() < 2) {
		printf("ERROR: not enough cores to test distributor\n");
		return -1;
	}

	/* first time how long it takes to round-trip a cache line */
	time_cache_line_switch();

	if (d == NULL) {
		d = rte_distributor_create("Test_perf", rte_socket_id(),
				rte_lcore_count() - 1);
		if (d == NULL) {
			printf("Error creating distributor\n");
			return -1;
		}
	} else {
		rte_distributor_flush(d);
		rte_distributor_clear_returns(d);
	}

	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
	if (p == NULL) {
		p = rte_mempool_create("DPT_MBUF_POOL", nb_bufs,
				MBUF_SIZE, BURST,
				sizeof(struct rte_pktmbuf_pool_private),
				rte_pktmbuf_pool_init, NULL,
				rte_pktmbuf_init, NULL,
				rte_socket_id(), 0);
		if (p == NULL) {
			printf("Error creating mempool\n");
			return -1;
		}
	}

	rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
	if (perf_test(d, p) < 0)
		return -1;
	quit_workers(d, p);

	return 0;
}
Ejemplo n.º 25
0
/*
 * The main function, which does initialization and calls the per-lcore
 * functions.
 */
int
main(int argc, char *argv[])
{
	struct rte_mempool *mbuf_pool;
	unsigned nb_ports;
	uint8_t portid;

	/* Initialize the Environment Abstraction Layer (EAL). */
	int ret = rte_eal_init(argc, argv);
	if (ret < 0)
		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

	argc -= ret;
	argv += ret;

	/* Check that there is an even number of ports to send/receive on. */
	nb_ports = rte_eth_dev_count();
	if (nb_ports < 2 || (nb_ports & 1))
		rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");

	/* Creates a new mempool in memory to hold the mbufs. */
	mbuf_pool = rte_mempool_create("MBUF_POOL",
				       NUM_MBUFS * nb_ports,
				       MBUF_SIZE,
				       MBUF_CACHE_SIZE,
				       sizeof(struct rte_pktmbuf_pool_private),
				       rte_pktmbuf_pool_init, NULL,
				       rte_pktmbuf_init,      NULL,
				       rte_socket_id(),
				       0);

	if (mbuf_pool == NULL)
		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");

	/* Initialize all ports. */
	for (portid = 0; portid < nb_ports; portid++)
		if (port_init(portid, mbuf_pool) != 0)
			rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
					portid);

	if (rte_lcore_count() > 1)
		printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");

	/* Call lcore_main on the master core only. */
	lcore_main();

	return 0;
}
Ejemplo n.º 26
0
/* Modify packet ethernet header */
static void
test_action_execute_set_ethernet(int argc, char *argv[])
{
	struct rte_mempool *pktmbuf_pool;
	struct action action_multiple[MAX_ACTIONS] = {0};

	pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool",
                    20, /* num mbufs */
                    2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */
                    32, /*cache size */
                    sizeof(struct rte_pktmbuf_pool_private),
                    rte_pktmbuf_pool_init,
                    NULL, rte_pktmbuf_init, NULL, 0, 0);

	struct rte_mbuf *ethernet_buf = rte_pktmbuf_alloc(pktmbuf_pool);

	struct ovs_key_ethernet set_ethernet;
	__u8 eth_src_set[6] = {0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE};
	__u8 eth_dst_set[6] = {0xCA, 0xFE, 0xDE, 0xAD, 0xBE, 0xEF};
	memcpy(&set_ethernet.eth_src, &eth_src_set, sizeof(eth_src_set));
	memcpy(&set_ethernet.eth_dst, &eth_dst_set, sizeof(eth_dst_set));

	struct ovs_key_ethernet ethernet_orig;
	__u8 eth_src_orig[6] = {0xFF, 0xFF, 0xFF, 0xCC, 0xCC, 0xCC};
	__u8 eth_dst_orig[6] = {0xAA, 0xAA, 0xAA, 0xEE, 0xEE, 0xEE};
	memcpy(&ethernet_orig.eth_src, &eth_src_orig, sizeof(eth_src_orig));
	memcpy(&ethernet_orig.eth_dst, &eth_dst_orig, sizeof(eth_dst_orig));

	vport_init();
	action_multiple[0].type = ACTION_SET_ETHERNET;
	action_multiple[0].data.ethernet = set_ethernet;
	action_null_build(&action_multiple[1]);

	struct ovs_key_ethernet *pktmbuf_data =
		rte_pktmbuf_mtod(ethernet_buf, struct ovs_key_ethernet *);
	memcpy(pktmbuf_data, &ethernet_orig, sizeof(ethernet_orig));

	action_execute(action_multiple, ethernet_buf);
	pktmbuf_data = rte_pktmbuf_mtod(ethernet_buf, struct ovs_key_ethernet *);
	/* Can't compare struct directly as ovs_key_ethernet has src first then
	 * dst whereas the real ethernet header has dst first then source
	 */
	assert(memcmp(pktmbuf_data, &set_ethernet.eth_dst, sizeof(eth_dst_set)) == 0);
	assert(memcmp((uint8_t *)pktmbuf_data + sizeof(eth_dst_set),
	              &set_ethernet.eth_src, sizeof(eth_src_set)) == 0);
	rte_pktmbuf_free(ethernet_buf);
}
Ejemplo n.º 27
0
/* Try to execute action with a pop vlan and output action, which should succeed */
static void
test_action_execute_multiple_actions__pop_vlan_and_output(int argc, char *argv[])
{
	/* We write some data into the place where a VLAN tag would be and the 4
	 * bytes after. We then call action execute and make sure the fake VLAN tag
	 * is gone and has been replaced by the data in the 4 bytes after
	 *
	 * We then output the packet to a port and make the same checks
	 */
	struct rte_mempool *pktmbuf_pool;
	struct action action_multiple[MAX_ACTIONS] = {0};
	int count = 0;

	pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool",
                    20, /* num mbufs */
                    2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */
                    32, /*cache size */
                    sizeof(struct rte_pktmbuf_pool_private),
                    rte_pktmbuf_pool_init,
                    NULL, rte_pktmbuf_init, NULL, 0, 0);

	struct rte_mbuf *vlan_output_buf = rte_pktmbuf_alloc(pktmbuf_pool);

	vport_init();

	/* We have no real packet but the function which pops the VLAN does
	 * some checks of pkt len so we define a fake one here
	 */
	vlan_output_buf->pkt.pkt_len = 20;
	action_pop_vlan_build(&action_multiple[0]);
	action_output_build(&action_multiple[1], 17);
	action_null_build(&action_multiple[2]);
	int *pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *);
	*(pktmbuf_data + 2) = 0xCAFED00D;
	/* Note last 2 bytes must be 0081, ie 8100 in network format */
	*(pktmbuf_data + 3) = 0x00000081; /* 12 bytes after src/dst MAC is vlan */
	*(pktmbuf_data + 4) = 0xBABEFACE;
	action_execute(action_multiple, vlan_output_buf);
	pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *);
	assert(*(pktmbuf_data + 3) != 0x00000081);
	assert(*(pktmbuf_data + 3) == 0xBABEFACE);
	count = receive_from_vport(17, &vlan_output_buf);
	pktmbuf_data = rte_pktmbuf_mtod(vlan_output_buf, int *);
	assert(count == 1);
	assert(*(pktmbuf_data + 3) != 0x00000081);
	assert(*(pktmbuf_data + 3) == 0xBABEFACE);
}
Ejemplo n.º 28
0
static void
app_init_mbuf_pools(void)
{
	/* Init the buffer pool */
	RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
	app.pool = rte_mempool_create(
		"mempool",
		app.pool_size,
		app.pool_buffer_size,
		app.pool_cache_size,
		sizeof(struct rte_pktmbuf_pool_private),
		rte_pktmbuf_pool_init, NULL,
		rte_pktmbuf_init, NULL,
		rte_socket_id(),
		0);
	if (app.pool == NULL)
		rte_panic("Cannot create mbuf pool\n");
}
Ejemplo n.º 29
0
/* Try to execute action with the push vlan (PCP) action, which should
 * succeed */
static void
test_action_execute_push_vlan__pcp(int argc, char *argv[])
{
	/* Write Ethertype value of 0x0800 to byte 11 of the packet,
	 * where it is expected, and assign a length to the packet.
	 * After call to action_execute:
	 * - the length of the packet should have increased by 4 bytes
	 * - the value of byte 11 should by 0x8100 (0081 in network format)
	 * - the value of byte 15 should by 0x0800 (0008 in network format)
	 * - the value of the TCI field should be equal to the assigned
	 *   value
	*/
	struct rte_mempool *pktmbuf_pool;
	struct action action_multiple[MAX_ACTIONS] = {0};

	pktmbuf_pool = rte_mempool_create("MProc_pktmbuf_pool",
                    20, /* num mbufs */
                    2048 + sizeof(struct rte_mbuf) + 128, /*pktmbuf size */
                    32, /*cache size */
                    sizeof(struct rte_pktmbuf_pool_private),
                    rte_pktmbuf_pool_init,
                    NULL, rte_pktmbuf_init, NULL, 0, 0);

	struct rte_mbuf *pcp_buf = rte_pktmbuf_alloc(pktmbuf_pool);
	uint16_t pcp_tci = htons(0x2000); /* PCP is the upper 3 bits of the TCI */

	vport_init();

	/* Set the packet length - after the VLAN tag has been inserted,
	 * the value should increase by 4 bytes (i.e. the length of the tag)
	 */
	pcp_buf->pkt.pkt_len = 64;
	action_push_vlan_build(&action_multiple[0], pcp_tci);
	action_null_build(&action_multiple[1]);
	short *pkt_data = rte_pktmbuf_mtod(pcp_buf, short *);
	*(pkt_data + 6) = 0x0008; /* Set Ethertype to 0008, i.e. 0800 in network format */
	action_execute(action_multiple, pcp_buf);
	pkt_data = rte_pktmbuf_mtod(pcp_buf, short *);
	assert(*(pkt_data + 6) == 0x0081); /* 802.1Q Ethertype has been inserted */
	assert(*(pkt_data + 7) == 0x0020); /* TCI value has been inserted */
	assert(*(pkt_data + 8) == 0x0008); /* Ethertype has been shifted by 4 bytes */
	assert(pcp_buf->pkt.pkt_len == 68);/* Packet length has increased by 4 bytes */
	rte_pktmbuf_free(pcp_buf);
}
Ejemplo n.º 30
0
void InitIpToEtherRing(void )
{
 //  socket_tcb_ring_send = rte_ring_create(TCB_TO_SOCKET, socket_tcb_ring_size, SOCKET_ID_ANY, 0);
   int buffer_size = sizeof(struct rte_mbuf *);
   buffer_message_pool = rte_mempool_create(_MSG_POOL, pool_size,
            buffer_size, 32, 0,
            NULL, NULL, NULL, NULL,
            SOCKET_ID_ANY, 0);
   if(buffer_message_pool == NULL) {
      printf("ERROR **** ip -- ether Message failed\n");
   }
   else {
      printf("ip - ether message pool OK.\n");
   }
   ip_to_ether_ring_send = rte_ring_create(IP_ETHER_RING_NAME, ring_size, SOCKET_ID_ANY, 0);
   if(ip_to_ether_ring_send) {
      printf("ip_to_ether_ring_send ring OK\n");
   }
   else {
      printf("ERROR * ring ip_to_ether_ring_send failed\n");
   }
   ether_to_ip_ring_send = rte_ring_create(ETHER_IP_RING_NAME, ring_size, SOCKET_ID_ANY, 0);
   if(ether_to_ip_ring_send) {
      printf("ether_to_ip_ring_send ring OK\n");
   }
   else {
      printf("ERROR * ring ether_to_ip_ring_send failed\n");
   }
   ether_to_ip_ring_recv = rte_ring_lookup(ETHER_IP_RING_NAME);
   if(ether_to_ip_ring_recv) {
      printf("ether_to_ip_ring_recv ring OK\n");
   }
   else {
      printf("ERROR * ring ether_to_ip_ring_recv failed\n");
   }
   ip_to_ether_ring_recv = rte_ring_lookup(IP_ETHER_RING_NAME);
   if(ip_to_ether_ring_recv) {
      printf("ip_to_ether_ring_recv ring OK\n");
   }
   else {
      printf("ERROR * ring ip_to_ether_ring_recv failed\n");
   }
}