Exemple #1
0
/*
 * Clear flow table statistics at entry pos
 */
static void
flow_table_clear_stats(int32_t pos)
{
	rte_spinlock_init((rte_spinlock_t *)&flow_table->stats[pos].lock);

	flow_table->stats[pos].used = 0;
	flow_table->stats[pos].tcp_flags = 0;
	flow_table->stats[pos].packet_count = 0;
	flow_table->stats[pos].byte_count = 0;
}
int
sfc_mcdi_init(struct sfc_adapter *sa)
{
	struct sfc_mcdi *mcdi;
	size_t max_msg_size;
	efx_mcdi_transport_t *emtp;
	int rc;

	sfc_log_init(sa, "entry");

	mcdi = &sa->mcdi;

	SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED);

	rte_spinlock_init(&mcdi->lock);

	mcdi->state = SFC_MCDI_INITIALIZED;

	max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
	rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id,
			   &mcdi->mem);
	if (rc != 0)
		goto fail_dma_alloc;

	/* Convert negative error to positive used in the driver */
	rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
				sfc_kvarg_bool_handler, &mcdi->logging);
	if (rc != 0)
		goto fail_kvargs_process;

	emtp = &mcdi->transport;
	emtp->emt_context = sa;
	emtp->emt_dma_mem = &mcdi->mem;
	emtp->emt_execute = sfc_mcdi_execute;
	emtp->emt_ev_cpl = sfc_mcdi_ev_cpl;
	emtp->emt_exception = sfc_mcdi_exception;
	emtp->emt_logger = sfc_mcdi_logger;

	sfc_log_init(sa, "init MCDI");
	rc = efx_mcdi_init(sa->nic, emtp);
	if (rc != 0)
		goto fail_mcdi_init;

	return 0;

fail_mcdi_init:
	memset(emtp, 0, sizeof(*emtp));

fail_kvargs_process:
	sfc_dma_free(sa, &mcdi->mem);

fail_dma_alloc:
	mcdi->state = SFC_MCDI_UNINITIALIZED;
	return rc;
}
Exemple #3
0
static struct h_scalar *ftp_cache_create_fn(struct h_table *ht, void *key)
{
	struct ftp_cache_key *k = (struct ftp_cache_key *)key;
	struct ftp_cache *fc;
	
    uint32_t lcore_id = rte_lcore_id();	
    if(rte_mempool_mc_get(ftp_cache_tbl.mem_cache[lcore_id], (void **)&fc) <0)
		return NULL;
    fc->sip = k->sip;
    fc->dip = k->dip;
    fc->dport = k->dport;
    fc->proto_mark = k->proto_mark;
    fc->lcore_id = lcore_id;
	rte_spinlock_init(&fc->lock);
	return &fc->h_scalar;
}
Exemple #4
0
/**
 * Give the library a mempool of rte_mbufs with which it can do the
 * rte_mbuf <--> netmap slot conversions.
 */
int
rte_netmap_init(const struct rte_netmap_conf *conf)
{
	size_t buf_ofs, nmif_sz, sz;
	size_t port_rings, port_slots, port_bufs;
	uint32_t i, port_num;

	port_num = RTE_MAX_ETHPORTS;
	port_rings = 2 * conf->max_rings;
	port_slots = port_rings * conf->max_slots;
    	port_bufs = port_slots;

	nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
	sz = nmif_sz * port_num;

	buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE);
	sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;

	if (sz > UINT32_MAX ||
			(netmap.mem = rte_zmalloc_socket(__func__, sz,
			CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
		RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
			__func__, sz);
		return (-ENOMEM);
	}

	netmap.mem_sz = sz;
	netmap.netif_memsz = nmif_sz;
	netmap.buf_start = (uintptr_t)netmap.mem + buf_ofs;
	netmap.conf = *conf;

	rte_spinlock_init(&netmap_lock);

	/* Mark all ports as unused and set NETIF pointer. */
	for (i = 0; i != RTE_DIM(ports); i++) {
		ports[i].fd = UINT32_MAX;
		ports[i].nmif = (struct netmap_if *)
			((uintptr_t)netmap.mem + nmif_sz * i);
	}

	/* Mark all fd_ports as unused. */
	for (i = 0; i != RTE_DIM(fd_port); i++) {
		fd_port[i].port = FD_PORT_FREE;
	}

	return (0);
}
Exemple #5
0
static int
cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
{
	struct lio_mbox *mbox;

	PMD_INIT_FUNC_TRACE();

	if (lio_dev->mbox == NULL) {
		lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
		if (lio_dev->mbox == NULL)
			return -ENOMEM;
	}

	mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
	if (mbox == NULL) {
		rte_free(lio_dev->mbox);
		lio_dev->mbox = NULL;
		return -ENOMEM;
	}

	rte_spinlock_init(&mbox->lock);

	mbox->lio_dev = lio_dev;

	mbox->q_no = 0;

	mbox->state = LIO_MBOX_STATE_IDLE;

	/* VF mbox interrupt reg */
	mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_VF_SLI_PKT_MBOX_INT(0);
	/* VF reads from SIG0 reg */
	mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
	/* VF writes into SIG1 reg */
	mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);

	lio_dev->mbox[0] = mbox;

	rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);

	return 0;
}
Exemple #6
0
int
alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
	struct vhost_virtqueue *vq;

	vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
	if (vq == NULL) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"Failed to allocate memory for vring:%u.\n", vring_idx);
		return -1;
	}

	dev->virtqueue[vring_idx] = vq;
	init_vring_queue(dev, vring_idx);
	rte_spinlock_init(&vq->access_lock);

	dev->nr_vring += 1;

	return 0;
}
Exemple #7
0
int
octeontx_mbox_set_reg(uint8_t *reg)
{
	struct mbox *m = &octeontx_mbox;

	if (m->init_once)
		return -EALREADY;

	if (reg == NULL) {
		mbox_log_err("Invalid reg=%p", reg);
		return -EINVAL;
	}

	m->reg = reg;

	if (m->ram_mbox_base != NULL) {
		rte_spinlock_init(&m->lock);
		m->init_once = 1;
	}

	return 0;
}
Exemple #8
0
int
sfc_ev_attach(struct sfc_adapter *sa)
{
	int rc;

	sfc_log_init(sa, "entry");

	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
				sfc_kvarg_perf_profile_handler,
				&sa->evq_flags);
	if (rc != 0) {
		sfc_err(sa, "invalid %s parameter value",
			SFC_KVARG_PERF_PROFILE);
		goto fail_kvarg_perf_profile;
	}

	sa->mgmt_evq_index = 0;
	rte_spinlock_init(&sa->mgmt_evq_lock);

	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
			  sa->socket_id, &sa->mgmt_evq);
	if (rc != 0)
		goto fail_mgmt_evq_init;

	/*
	 * Rx/Tx event queues are created/destroyed when corresponding
	 * Rx/Tx queue is created/destroyed.
	 */

	return 0;

fail_mgmt_evq_init:

fail_kvarg_perf_profile:
	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
static int
stack_alloc(struct rte_mempool *mp)
{
	struct rte_mempool_stack *s;
	unsigned n = mp->size;
	int size = sizeof(*s) + (n+16)*sizeof(void *);

	/* Allocate our local memory structure */
	s = rte_zmalloc_socket("mempool-stack",
			size,
			RTE_CACHE_LINE_SIZE,
			mp->socket_id);
	if (s == NULL) {
		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
		return -ENOMEM;
	}

	rte_spinlock_init(&s->sl);

	s->size = n;
	mp->pool_data = s;

	return 0;
}
Exemple #10
0
/* Shall be called before any allocation happens */
void
rte_kni_init(unsigned int max_kni_ifaces)
{
	uint32_t i;
	struct rte_kni_memzone_slot *it;
	const struct rte_memzone *mz;
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	char mz_name[RTE_MEMZONE_NAMESIZE];

	/* Immediately return if KNI is already initialized */
	if (kni_memzone_pool.initialized) {
		RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
		return;
	}

	if (max_kni_ifaces == 0) {
		RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
							max_kni_ifaces);
		rte_panic("Unable to initialize KNI\n");
	}

	/* Check FD and open */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0)
			rte_panic("Can not open /dev/%s\n", KNI_DEVICE);
	}
#ifdef RTE_LIBRW_PIOT
        else{
          return;
        }
#endif
	/* Allocate slot objects */
	kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
					rte_malloc(NULL,
					sizeof(struct rte_kni_memzone_slot) *
					max_kni_ifaces,
					0);
	KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);

	/* Initialize general pool variables */
	kni_memzone_pool.initialized = 1;
	kni_memzone_pool.max_ifaces = max_kni_ifaces;
	kni_memzone_pool.free = &kni_memzone_pool.slots[0];
	rte_spinlock_init(&kni_memzone_pool.mutex);

	/* Pre-allocate all memzones of all the slots; panic on error */
	for (i = 0; i < max_kni_ifaces; i++) {

		/* Recover current slot */
		it = &kni_memzone_pool.slots[i];
		it->id = i;

		/* Allocate KNI context */
		snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
		mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
					SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_ctx = mz;

		/* TX RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_tx_q = mz;

		/* RX RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_rx_q = mz;

		/* ALLOC RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_alloc_q = mz;

		/* FREE RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_free_q = mz;
#ifndef RTE_LIBRW_PIOT
		/* Request RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_req_q = mz;

		/* Response RING */
		snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_resp_q = mz;

		/* Req/Resp sync mem area */
		snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
							SOCKET_ID_ANY, 0);
		KNI_MEM_CHECK(mz == NULL);
		it->m_sync_addr = mz;
#endif
		if ((i+1) == max_kni_ifaces) {
			it->next = NULL;
			kni_memzone_pool.free_tail = it;
		} else
			it->next = &kni_memzone_pool.slots[i+1];
	}

	return;

kni_fail:
	rte_panic("Unable to allocate memory for max_kni_ifaces:%d. Increase the amount of hugepages memory\n",
			 max_kni_ifaces);
}
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
	struct rte_pci_device *pci_dev = NULL;
	struct bond_dev_private *internals = NULL;
	struct rte_eth_dev *eth_dev = NULL;
	struct eth_driver *eth_drv = NULL;
	struct rte_pci_driver *pci_drv = NULL;
	struct rte_pci_id *pci_id_table = NULL;
	/* now do all data allocation - for eth_dev structure, dummy pci driver
	 * and internal (private) data
	 */

	if (name == NULL) {
		RTE_BOND_LOG(ERR, "Invalid name specified");
		goto err;
	}

	if (socket_id >= number_of_sockets()) {
		RTE_BOND_LOG(ERR,
				"Invalid socket id specified to create bonded device on.");
		goto err;
	}

	pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
	if (pci_dev == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc pci dev on socket");
		goto err;
	}

	eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
	if (eth_drv == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc eth_drv on socket");
		goto err;
	}

	pci_drv = &eth_drv->pci_drv;

	pci_id_table = rte_zmalloc_socket(name, sizeof(*pci_id_table), 0, socket_id);
	if (pci_id_table == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc pci_id_table on socket");
		goto err;
	}

	pci_drv->id_table = pci_id_table;

	pci_drv->id_table->device_id = PCI_ANY_ID;
	pci_drv->id_table->subsystem_device_id = PCI_ANY_ID;
	pci_drv->id_table->vendor_id = PCI_ANY_ID;
	pci_drv->id_table->subsystem_vendor_id = PCI_ANY_ID;

	pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;

	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
	if (internals == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
		goto err;
	}

	/* reserve an ethdev entry */
	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
	if (eth_dev == NULL) {
		RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
		goto err;
	}

	pci_dev->numa_node = socket_id;
	pci_drv->name = driver_name;

	eth_dev->driver = eth_drv;
	eth_dev->data->dev_private = internals;
	eth_dev->data->nb_rx_queues = (uint16_t)1;
	eth_dev->data->nb_tx_queues = (uint16_t)1;

	TAILQ_INIT(&(eth_dev->link_intr_cbs));

	eth_dev->data->dev_link.link_status = 0;

	eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
			socket_id);

	eth_dev->data->dev_started = 0;
	eth_dev->data->promiscuous = 0;
	eth_dev->data->scattered_rx = 0;
	eth_dev->data->all_multicast = 0;

	eth_dev->dev_ops = &default_dev_ops;
	eth_dev->pci_dev = pci_dev;

	rte_spinlock_init(&internals->lock);

	internals->port_id = eth_dev->data->port_id;
	internals->mode = BONDING_MODE_INVALID;
	internals->current_primary_port = 0;
	internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
	internals->xmit_hash = xmit_l2_hash;
	internals->user_defined_mac = 0;
	internals->link_props_set = 0;

	internals->link_status_polling_enabled = 0;

	internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
	internals->link_down_delay_ms = 0;
	internals->link_up_delay_ms = 0;

	internals->slave_count = 0;
	internals->active_slave_count = 0;
	internals->rx_offload_capa = 0;
	internals->tx_offload_capa = 0;

	memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
	memset(internals->slaves, 0, sizeof(internals->slaves));

	/* Set mode 4 default configuration */
	bond_mode_8023ad_setup(eth_dev, NULL);
	if (bond_ethdev_mode_set(eth_dev, mode)) {
		RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
				 eth_dev->data->port_id, mode);
		goto err;
	}

	return eth_dev->data->port_id;

err:
	if (pci_dev)
		rte_free(pci_dev);
	if (pci_id_table)
		rte_free(pci_id_table);
	if (eth_drv)
		rte_free(eth_drv);
	if (internals)
		rte_free(internals);
	return -1;
}
int
rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
				rte_event_eth_rx_adapter_conf_cb conf_cb,
				void *conf_arg)
{
	struct rte_event_eth_rx_adapter *rx_adapter;
	int ret;
	int socket_id;
	uint8_t i;
	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
	const uint8_t default_rss_key[] = {
		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
	};

	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	if (conf_cb == NULL)
		return -EINVAL;

	if (event_eth_rx_adapter == NULL) {
		ret = rte_event_eth_rx_adapter_init();
		if (ret)
			return ret;
	}

	rx_adapter = id_to_rx_adapter(id);
	if (rx_adapter != NULL) {
		RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
		return -EEXIST;
	}

	socket_id = rte_event_dev_socket_id(dev_id);
	snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
		"rte_event_eth_rx_adapter_%d",
		id);

	rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (rx_adapter == NULL) {
		RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
		return -ENOMEM;
	}

	rx_adapter->eventdev_id = dev_id;
	rx_adapter->socket_id = socket_id;
	rx_adapter->conf_cb = conf_cb;
	rx_adapter->conf_arg = conf_arg;
	strcpy(rx_adapter->mem_name, mem_name);
	rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
					rte_eth_dev_count() *
					sizeof(struct eth_device_info), 0,
					socket_id);
	rte_convert_rss_key((const uint32_t *)default_rss_key,
			(uint32_t *)rx_adapter->rss_key_be,
			    RTE_DIM(default_rss_key));

	if (rx_adapter->eth_devices == NULL) {
		RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
		rte_free(rx_adapter);
		return -ENOMEM;
	}
	rte_spinlock_init(&rx_adapter->rx_lock);
	for (i = 0; i < rte_eth_dev_count(); i++)
		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];

	event_eth_rx_adapter[id] = rx_adapter;
	if (conf_cb == default_conf_cb)
		rx_adapter->default_cb_arg = 1;
	return 0;
}
void
pktgen_config_ports(void)
{
	uint32_t lid, pid, i, s, q, sid;
	rxtx_t rt;
	pkt_seq_t   *pkt;
	port_info_t     *info;
	char buff[RTE_MEMZONE_NAMESIZE];
	int32_t ret, cache_size;
	char output_buff[256] = { 0 };

	/* Find out the total number of ports in the system. */
	/* We have already blacklisted the ones we needed to in main routine. */
	pktgen.nb_ports = rte_eth_dev_count();
	if (pktgen.nb_ports > RTE_MAX_ETHPORTS)
		pktgen.nb_ports = RTE_MAX_ETHPORTS;

	if (pktgen.nb_ports == 0)
		pktgen_log_panic("*** Did not find any ports to use ***");

	pktgen.starting_port = 0;

	/* Setup the number of ports to display at a time */
	if (pktgen.nb_ports > pktgen.nb_ports_per_page)
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports_per_page;
	else
		pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports;

	wr_port_matrix_dump(pktgen.l2p);

	pktgen_log_info("Configuring %d ports, MBUF Size %d, MBUF Cache Size %d",
	                pktgen.nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE);

	/* For each lcore setup each port that is handled by that lcore. */
	for (lid = 0; lid < RTE_MAX_LCORE; lid++) {

		if (wr_get_map(pktgen.l2p, RTE_MAX_ETHPORTS, lid) == 0)
			continue;

		/* For each port attached or handled by the lcore */
		for (pid = 0; pid < pktgen.nb_ports; pid++) {

			/* If non-zero then this port is handled by this lcore. */
			if (wr_get_map(pktgen.l2p, pid, lid) == 0)
				continue;
			wr_set_port_private(pktgen.l2p, pid, &pktgen.info[pid]);
			pktgen.info[pid].pid = pid;
		}
	}
	wr_dump_l2p(pktgen.l2p);

	pktgen.total_mem_used = 0;

	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		/* Skip if we do not have any lcores attached to a port. */
		if ( (rt.rxtx = wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE)) == 0)
			continue;

		pktgen.port_cnt++;
		snprintf(output_buff, sizeof(output_buff),
		         "Initialize Port %d -- TxQ %d, RxQ %d", pid, rt.tx, rt.rx);

		info = wr_get_port_private(pktgen.l2p, pid);

		info->fill_pattern_type  = ABC_FILL_PATTERN;
		strncpy(info->user_pattern, "0123456789abcdef", USER_PATTERN_SIZE);

		rte_spinlock_init(&info->port_lock);

		/* Create the pkt header structures for transmitting sequence of packets. */
		snprintf(buff, sizeof(buff), "seq_hdr_%d", pid);
		info->seq_pkt = (pkt_seq_t *)rte_zmalloc_socket(buff, (sizeof(pkt_seq_t) * NUM_TOTAL_PKTS),
														RTE_CACHE_LINE_SIZE, rte_socket_id());
		if (info->seq_pkt == NULL)
			pktgen_log_panic("Unable to allocate %d pkt_seq_t headers", NUM_TOTAL_PKTS);

		info->seqIdx    = 0;
		info->seqCnt    = 0;

		info->nb_mbufs  = MAX_MBUFS_PER_PORT;
		cache_size = (info->nb_mbufs > RTE_MEMPOOL_CACHE_MAX_SIZE) ?
		        RTE_MEMPOOL_CACHE_MAX_SIZE : info->nb_mbufs;

		pktgen_port_conf_setup(pid, &rt, &default_port_conf);

		if ( (ret = rte_eth_dev_configure(pid, rt.rx, rt.tx, &info->port_conf)) < 0)
			pktgen_log_panic("Cannot configure device: port=%d, Num queues %d,%d (%d)%s",
			                 pid, rt.rx, rt.tx, errno, rte_strerror(-ret));

		pkt = &info->seq_pkt[SINGLE_PKT];

		/* Grab the source MAC addresses * / */
		rte_eth_macaddr_get(pid, &pkt->eth_src_addr);
		pktgen_log_info("%s,  Src MAC %02x:%02x:%02x:%02x:%02x:%02x", output_buff,
		                pkt->eth_src_addr.addr_bytes[0],
		                pkt->eth_src_addr.addr_bytes[1],
		                pkt->eth_src_addr.addr_bytes[2],
		                pkt->eth_src_addr.addr_bytes[3],
		                pkt->eth_src_addr.addr_bytes[4],
		                pkt->eth_src_addr.addr_bytes[5]);

		/* Copy the first Src MAC address in SINGLE_PKT to the rest of the sequence packets. */
		for (i = 0; i < NUM_SEQ_PKTS; i++)
			ethAddrCopy(&info->seq_pkt[i].eth_src_addr, &pkt->eth_src_addr);

		pktgen.mem_used = 0;

		for (q = 0; q < rt.rx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Receive buffers. */
			info->q[q].rx_mp = pktgen_mbuf_pool_create("Default RX", pid, q, info->nb_mbufs, sid, cache_size);
			if (info->q[q].rx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default RX mbufs", pid);

			ret = rte_eth_rx_queue_setup(pid, q, pktgen.nb_rxd, sid, &info->rx_conf, pktgen.info[pid].q[q].rx_mp);
			if (ret < 0)
				pktgen_log_panic("rte_eth_rx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
		}
		pktgen_log_info("");

		for (q = 0; q < rt.tx; q++) {
			/* grab the socket id value based on the lcore being used. */
			sid     = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q));

			/* Create and initialize the default Transmit buffers. */
			info->q[q].tx_mp = pktgen_mbuf_pool_create("Default TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].tx_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Default TX mbufs", pid);

			/* Create and initialize the range Transmit buffers. */
			info->q[q].range_mp = pktgen_mbuf_pool_create("Range TX", pid, q, MAX_MBUFS_PER_PORT,   sid, 0);
			if (info->q[q].range_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Range TX mbufs", pid);

			/* Create and initialize the sequence Transmit buffers. */
			info->q[q].seq_mp = pktgen_mbuf_pool_create("Sequence TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size);
			if (info->q[q].seq_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Sequence TX mbufs", pid);

			/* Used for sending special packets like ARP requests */
			info->q[q].special_mp = pktgen_mbuf_pool_create("Special TX", pid, q, MAX_SPECIAL_MBUFS, sid, 0);
			if (info->q[q].special_mp == NULL)
				pktgen_log_panic("Cannot init port %d for Special TX mbufs", pid);

			/* Setup the PCAP file for each port */
			if (pktgen.info[pid].pcap != NULL)
				if (pktgen_pcap_parse(pktgen.info[pid].pcap, info, q) == -1)
					pktgen_log_panic("Cannot load PCAP file for port %d", pid);
			/* Find out the link speed to program the WTHRESH value correctly. */
			pktgen_get_link_status(info, pid, 0);

			ret = rte_eth_tx_queue_setup(pid, q, pktgen.nb_txd, sid, &info->tx_conf);
			if (ret < 0)
				pktgen_log_panic("rte_eth_tx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret));
			pktgen_log_info("");
		}
		pktgen_log_info("%*sPort memory used = %6lu KB", 71, " ", (pktgen.mem_used + 1023) / 1024);
	}
	pktgen_log_info("%*sTotal memory used = %6lu KB", 70, " ", (pktgen.total_mem_used + 1023) / 1024);

	/* Start up the ports and display the port Link status */
	for (pid = 0; pid < pktgen.nb_ports; pid++) {
		if (wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE) == 0)
			continue;

		info = wr_get_port_private(pktgen.l2p, pid);

		/* Start device */
		if ( (ret = rte_eth_dev_start(pid)) < 0)
			pktgen_log_panic("rte_eth_dev_start: port=%d, %s", pid, rte_strerror(-ret));

		pktgen_get_link_status(info, pid, 1);

		if (info->link.link_status)
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Up - speed %u Mbps - %s", pid,
			         (uint32_t)info->link.link_speed,
			         (info->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
			         ("full-duplex") : ("half-duplex"));
		else
			snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Down", pid);

		/* If enabled, put device in promiscuous mode. */
		if (pktgen.flags & PROMISCUOUS_ON_FLAG) {
			strncatf(output_buff, " <Enable promiscuous mode>");
			rte_eth_promiscuous_enable(pid);
		}

		pktgen_log_info("%s", output_buff);
		pktgen.info[pid].seq_pkt[SINGLE_PKT].pktSize = MIN_PKT_SIZE;

		/* Setup the port and packet defaults. (must be after link speed is found) */
		for (s = 0; s < NUM_TOTAL_PKTS; s++)
			pktgen_port_defaults(pid, s);

		pktgen_range_setup(info);

		rte_eth_stats_get(pid, &info->init_stats);

		pktgen_rnd_bits_init(&pktgen.info[pid].rnd_bitfields);
	}

	/* Clear the log information by putting a blank line */
	pktgen_log_info("");

	/* Setup the packet capture per port if needed. */
	for (sid = 0; sid < wr_coremap_cnt(pktgen.core_info, pktgen.core_cnt, 0); sid++)
		pktgen_packet_capture_init(&pktgen.capture[sid], sid);
}
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
	struct bond_dev_private *internals = NULL;
	struct rte_eth_dev *eth_dev = NULL;
	uint32_t vlan_filter_bmp_size;

	/* now do all data allocation - for eth_dev structure, dummy pci driver
	 * and internal (private) data
	 */

	if (name == NULL) {
		RTE_BOND_LOG(ERR, "Invalid name specified");
		goto err;
	}

	if (socket_id >= number_of_sockets()) {
		RTE_BOND_LOG(ERR,
				"Invalid socket id specified to create bonded device on.");
		goto err;
	}

	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
	if (internals == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
		goto err;
	}

	/* reserve an ethdev entry */
	eth_dev = rte_eth_dev_allocate(name);
	if (eth_dev == NULL) {
		RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
		goto err;
	}

	eth_dev->data->dev_private = internals;
	eth_dev->data->nb_rx_queues = (uint16_t)1;
	eth_dev->data->nb_tx_queues = (uint16_t)1;

	eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
			socket_id);
	if (eth_dev->data->mac_addrs == NULL) {
		RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
		goto err;
	}

	eth_dev->dev_ops = &default_dev_ops;
	eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
		RTE_ETH_DEV_DETACHABLE;
	eth_dev->driver = NULL;
	eth_dev->data->kdrv = RTE_KDRV_NONE;
	eth_dev->data->drv_name = pmd_bond_drv.driver.name;
	eth_dev->data->numa_node =  socket_id;

	rte_spinlock_init(&internals->lock);

	internals->port_id = eth_dev->data->port_id;
	internals->mode = BONDING_MODE_INVALID;
	internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
	internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
	internals->xmit_hash = xmit_l2_hash;
	internals->user_defined_mac = 0;
	internals->link_props_set = 0;

	internals->link_status_polling_enabled = 0;

	internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
	internals->link_down_delay_ms = 0;
	internals->link_up_delay_ms = 0;

	internals->slave_count = 0;
	internals->active_slave_count = 0;
	internals->rx_offload_capa = 0;
	internals->tx_offload_capa = 0;
	internals->candidate_max_rx_pktlen = 0;
	internals->max_rx_pktlen = 0;

	/* Initially allow to choose any offload type */
	internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;

	memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
	memset(internals->slaves, 0, sizeof(internals->slaves));

	/* Set mode 4 default configuration */
	bond_mode_8023ad_setup(eth_dev, NULL);
	if (bond_ethdev_mode_set(eth_dev, mode)) {
		RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
				 eth_dev->data->port_id, mode);
		goto err;
	}

	vlan_filter_bmp_size =
		rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
	internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
						   RTE_CACHE_LINE_SIZE);
	if (internals->vlan_filter_bmpmem == NULL) {
		RTE_BOND_LOG(ERR,
			     "Failed to allocate vlan bitmap for bonded device %u\n",
			     eth_dev->data->port_id);
		goto err;
	}

	internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
			internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
	if (internals->vlan_filter_bmp == NULL) {
		RTE_BOND_LOG(ERR,
			     "Failed to init vlan bitmap for bonded device %u\n",
			     eth_dev->data->port_id);
		rte_free(internals->vlan_filter_bmpmem);
		goto err;
	}

	return eth_dev->data->port_id;

err:
	rte_free(internals);
	if (eth_dev != NULL) {
		rte_free(eth_dev->data->mac_addrs);
		rte_eth_dev_release_port(eth_dev);
	}
	return -1;
}