Пример #1
0
/*
 * Initialize driver
 * It returns 0 on success.
 */
static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = NULL;
	char name[RTE_ETH_NAME_MAX_LEN];
	int err = 0;

	CXGBE_FUNC_TRACE();

	eth_dev->dev_ops = &cxgbe_eth_dev_ops;
	eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
	eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;

	/* for secondary processes, we don't initialise any further as primary
	 * has already done this work.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	pci_dev = eth_dev->pci_dev;

	snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
	adapter = rte_zmalloc(name, sizeof(*adapter), 0);
	if (!adapter)
		return -1;

	adapter->use_unpacked_mode = 1;
	adapter->regs = (void *)pci_dev->mem_resource[0].addr;
	if (!adapter->regs) {
		dev_err(adapter, "%s: cannot map device registers\n", __func__);
		err = -ENOMEM;
		goto out_free_adapter;
	}
	adapter->pdev = pci_dev;
	adapter->eth_dev = eth_dev;
	pi->adapter = adapter;

	err = cxgbe_probe(adapter);
	if (err) {
		dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
			__func__, err);
		goto out_free_adapter;
	}

	return 0;

out_free_adapter:
	rte_free(adapter);
	return err;
}
Пример #2
0
/* map the PCI resource of a PCI device in virtual memory */
int
pci_uio_map_resource(struct rte_pci_device *dev)
{
	int i, map_idx = 0, ret;
	uint64_t phaddr;
	struct mapped_pci_resource *uio_res = NULL;
	struct mapped_pci_res_list *uio_res_list =
		RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);

	dev->intr_handle.fd = -1;
	dev->intr_handle.uio_cfg_fd = -1;
	dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;

	/* secondary processes - use already recorded details */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return pci_uio_map_secondary(dev);

	/* allocate uio resource */
	ret = pci_uio_alloc_resource(dev, &uio_res);
	if (ret)
		return ret;

	/* Map all BARs */
	for (i = 0; i != PCI_MAX_RESOURCE; i++) {
		/* skip empty BAR */
		phaddr = dev->mem_resource[i].phys_addr;
		if (phaddr == 0)
			continue;

		ret = pci_uio_map_resource_by_index(dev, i,
				uio_res, map_idx);
		if (ret)
			goto error;

		map_idx++;
	}

	uio_res->nb_maps = map_idx;

	TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);

	return 0;
error:
	for (i = 0; i < map_idx; i++) {
		pci_unmap_resource(uio_res->maps[i].addr,
				(size_t)uio_res->maps[i].size);
		rte_free(uio_res->maps[i].path);
	}
	pci_uio_free_resource(dev, uio_res);
	return -1;
}
Пример #3
0
/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
{
	struct rte_eth_link link;
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic_disable(enic);

	memset(&link, 0, sizeof(link));
	rte_eth_linkstatus_set(eth_dev, &link);
}
Пример #4
0
static int
rte_eal_memdevice_init(void)
{
	struct rte_config *config;

	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	config = rte_eal_get_configuration();
	config->mem_config->nchannel = internal_config.force_nchannel;
	config->mem_config->nrank = internal_config.force_nrank;

	return 0;
}
Пример #5
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	uint8_t i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;
	
	/* get total number of ports */
	total_ports = 0;

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	
	init_shm_rings();
	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		/* create metadata, output cmdline */
		if (rte_ivshmem_metadata_create(IVSHMEN_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n");	
		if (rte_ivshmem_metadata_add_mempool(pktmbuf_pool, IVSHMEN_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n");				
		
		for (i = 0; i < num_rings; i++) 
		{
			if (rte_ivshmem_metadata_add_ring(clients[i].rx_q,
					IVSHMEN_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to IVSHMEM metadata\n", i);
		}
		generate_ivshmem_cmdline(IVSHMEN_METADATA_NAME);
	}
	
	
	return 0;
}
Пример #6
0
/* init memory subsystem */
int
rte_eal_memory_init(void)
{
	RTE_LOG(INFO, EAL, "Setting up physically contiguous memory...\n");
	const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
			rte_eal_contigmem_init() :
			rte_eal_contigmem_attach();
	if (retval < 0)
		return -1;

	if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
		return -1;

	return 0;
}
Пример #7
0
int
test_ivshmem(void)
{
	int testid;

	/* We want to have a clean execution for every test without exposing
	 * private global data structures in rte_ivshmem so we launch each test
	 * on a different secondary process. */
	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {

		/* first, create metadata */
		ASSERT(create_duplicate() == 0, "Creating metadata failed");

		return launch_all_tests_on_secondary_processes();
	}

	testid = *(getenv(RTE_IVSHMEM_TEST_ID)) - FIRST_TEST;

	printf("Secondary process running test %d \n", testid);

	switch (testid) {
	case _test_ivshmem_api_test:
		return test_ivshmem_api_test();

	case _test_ivshmem_create_metadata_config:
		return test_ivshmem_create_metadata_config();

	case _test_ivshmem_create_multiple_metadata_configs:
		return test_ivshmem_create_multiple_metadata_configs();

	case _test_ivshmem_create_too_many_metadata_configs:
		return test_ivshmem_create_too_many_metadata_configs();

	case _test_ivshmem_create_duplicate_metadata:
		return test_ivshmem_create_duplicate_metadata();

	case _test_ivshmem_create_lots_of_memzones:
		return test_ivshmem_create_lots_of_memzones();

	case _test_ivshmem_create_duplicate_memzone:
		return test_ivshmem_create_duplicate_memzone();

	default:
		break;
	}

	return -1;
}
Пример #8
0
static int
skeleton_eventdev_init(struct rte_eventdev *eventdev)
{
	struct rte_pci_device *pci_dev;
	struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
	int ret = 0;

	PMD_DRV_FUNC_TRACE();

	eventdev->dev_ops       = &skeleton_eventdev_ops;
	eventdev->schedule      = NULL;
	eventdev->enqueue       = skeleton_eventdev_enqueue;
	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
	eventdev->dequeue       = skeleton_eventdev_dequeue;
	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;

	/* For secondary processes, the primary has done all the work */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);

	skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
	if (!skel->reg_base) {
		PMD_DRV_ERR("Failed to map BAR0");
		ret = -ENODEV;
		goto fail;
	}

	skel->device_id = pci_dev->id.device_id;
	skel->vendor_id = pci_dev->id.vendor_id;
	skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
	skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;

	PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
			pci_dev->id.vendor_id, pci_dev->id.device_id,
			pci_dev->addr.domain, pci_dev->addr.bus,
			pci_dev->addr.devid, pci_dev->addr.function);

	PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
		eventdev->data->dev_id, eventdev->data->socket_id,
		skel->vendor_id, skel->device_id);

fail:
	return ret;
}
Пример #9
0
/**
 * Set up the DPDK rings which will be used to pass packets, via
 * pointers, between the multi-process server and client processes.
 * Each client needs one RX queue.
 */
static int
init_shm_rings(void)
{
	unsigned i;
	unsigned socket_id;
	const char * q_name;
	const unsigned ringsize = CLIENT_QUEUE_RINGSIZE;
	int retval;

	clients = rte_malloc("client details",
		sizeof(*clients) * num_clients, 0);
	if (clients == NULL)
		rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n");

	for (i = 0; i < num_clients; i++) {
		/* Create an RX queue for each client */
		socket_id = rte_socket_id();
		q_name = get_rx_queue_name(i);
		if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		{
			clients[i].rx_q = rte_ring_lookup(get_rx_queue_name(i));
		}
		else
		{
			clients[i].rx_q = rte_ring_create(q_name,
					ringsize, socket_id,
					RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
		}
		if (clients[i].rx_q == NULL)
			rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i);
		
		/*
		int port = rte_eth_from_rings("PORT1", &clients[i].rx_q, 1, &clients[i].rx_q, 1, rte_socket_id());
		RTE_LOG(INFO, APP, "Ring port Number: %d\n", port);
		retval = init_port(port);
		*/
	}
	if (retval < 0) return retval;
	
	return 0;
}
Пример #10
0
static int
eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
{
	struct vmxnet3_hw *hw = eth_dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	if (hw->adapter_stopped == 0)
		vmxnet3_dev_close(eth_dev);

	eth_dev->dev_ops = NULL;
	eth_dev->rx_pkt_burst = NULL;
	eth_dev->tx_pkt_burst = NULL;

	rte_free(eth_dev->data->mac_addrs);
	eth_dev->data->mac_addrs = NULL;

	return 0;
}
Пример #11
0
/* unmap the PCI resource of a PCI device in virtual memory */
void
pci_uio_unmap_resource(struct rte_pci_device *dev)
{
	struct mapped_pci_resource *uio_res;
	struct mapped_pci_res_list *uio_res_list =
			RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);

	if (dev == NULL)
		return;

	/* find an entry for the device */
	uio_res = pci_uio_find_resource(dev);
	if (uio_res == NULL)
		return;

	/* secondary processes - just free maps */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return pci_uio_unmap(uio_res);

	TAILQ_REMOVE(uio_res_list, uio_res, next);

	/* unmap all resources */
	pci_uio_unmap(uio_res);

	/* free uio resource */
	rte_free(uio_res);

	/* close fd if in primary process */
	close(dev->intr_handle.fd);
	if (dev->intr_handle.uio_cfg_fd >= 0) {
		close(dev->intr_handle.uio_cfg_fd);
		dev->intr_handle.uio_cfg_fd = -1;
	}

	dev->intr_handle.fd = -1;
	dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
}
Пример #12
0
static int
avf_dev_uninit(struct rte_eth_dev *dev)
{
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -EPERM;

	dev->dev_ops = NULL;
	dev->rx_pkt_burst = NULL;
	dev->tx_pkt_burst = NULL;
	if (hw->adapter_stopped == 0)
		avf_dev_close(dev);

	rte_free(vf->vf_res);
	vf->vsi_res = NULL;
	vf->vf_res = NULL;

	rte_free(vf->aq_resp);
	vf->aq_resp = NULL;

	rte_free(dev->data->mac_addrs);
	dev->data->mac_addrs = NULL;

	if (vf->rss_lut) {
		rte_free(vf->rss_lut);
		vf->rss_lut = NULL;
	}
	if (vf->rss_key) {
		rte_free(vf->rss_key);
		vf->rss_key = NULL;
	}

	return 0;
}
Пример #13
0
Файл: env.c Проект: spdk/spdk
bool
spdk_process_is_primary(void)
{
	return (rte_eal_process_type() == RTE_PROC_PRIMARY);
}
Пример #14
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	uint8_t i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(VM_MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else
	{
		mz = rte_memzone_reserve(VM_MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		;
	}
	else
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms 
	init_shm_rings();
	*/

	return 0;
}
Пример #15
0
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
	struct rte_eth_dev_info *device_info)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
	device_info->max_rx_queues = enic->conf_rq_count / 2;
	device_info->max_tx_queues = enic->conf_wq_count;
	device_info->min_rx_bufsize = ENIC_MIN_MTU;
	/* "Max" mtu is not a typo. HW receives packet sizes up to the
	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
	 * a hint to the driver to size receive buffers accordingly so that
	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
	 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
	 * ignoring vNIC mtu.
	 */
	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
	device_info->rx_offload_capa = enic->rx_offload_capa;
	device_info->tx_offload_capa = enic->tx_offload_capa;
	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
	device_info->default_rxconf = (struct rte_eth_rxconf) {
		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
	};
	device_info->reta_size = enic->reta_size;
	device_info->hash_key_size = enic->hash_key_size;
	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.rq_desc_count,
		.nb_min = ENIC_MIN_RQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
	};
	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.wq_desc_count,
		.nb_min = ENIC_MIN_WQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
		.nb_seg_max = ENIC_TX_XMIT_MAX,
		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
	};
	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_RX_BURST,
		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
			ENIC_DEFAULT_RX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_RX_RINGS,
	};
	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_TX_BURST,
		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
			ENIC_DEFAULT_TX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_TX_RINGS,
	};
}

static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};
	static const uint32_t ptypes_overlay[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_TUNNEL_GRENAT,
		RTE_PTYPE_INNER_L2_ETHER,
		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L4_TCP,
		RTE_PTYPE_INNER_L4_UDP,
		RTE_PTYPE_INNER_L4_FRAG,
		RTE_PTYPE_INNER_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};

	if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
	    dev->rx_pkt_burst != NULL) {
		struct enic *enic = pmd_priv(dev);
		if (enic->overlay_offload)
			return ptypes_overlay;
		else
			return ptypes;
	}
	return NULL;
}

static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();

	enic->promisc = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->promisc = 0;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 0;
	enic_add_packet_filter(enic);
}

static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
	struct ether_addr *mac_addr,
	__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	return enic_set_mac_address(enic, mac_addr->addr_bytes);
}

static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	if (enic_del_mac_address(enic, index))
		dev_err(enic, "del mac addr failed\n");
}

static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
				struct ether_addr *addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	ret = enic_del_mac_address(enic, 0);
	if (ret)
		return ret;
	return enic_set_mac_address(enic, addr->addr_bytes);
}

static void debug_log_add_del_addr(struct ether_addr *addr, bool add)
{
	char mac_str[ETHER_ADDR_FMT_SIZE];

	ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
	PMD_INIT_LOG(DEBUG, " %s address %s\n",
		     add ? "add" : "remove", mac_str);
}

static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
				    struct ether_addr *mc_addr_set,
				    uint32_t nb_mc_addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	char mac_str[ETHER_ADDR_FMT_SIZE];
	struct ether_addr *addr;
	uint32_t i, j;
	int ret;

	ENICPMD_FUNC_TRACE();

	/* Validate the given addresses first */
	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
		addr = &mc_addr_set[i];
		if (!is_multicast_ether_addr(addr) ||
		    is_broadcast_ether_addr(addr)) {
			ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
			PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
				     mac_str);
			return -EINVAL;
		}
	}

	/* Flush all if requested */
	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
		PMD_INIT_LOG(DEBUG, " flush multicast addresses\n");
		for (i = 0; i < enic->mc_count; i++) {
			addr = &enic->mc_addrs[i];
			debug_log_add_del_addr(addr, false);
			ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
			if (ret)
				return ret;
		}
		enic->mc_count = 0;
		return 0;
	}

	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
		PMD_INIT_LOG(ERR, " too many multicast addresses: max=%d\n",
			     ENIC_MULTICAST_PERFECT_FILTERS);
		return -ENOSPC;
	}
	/*
	 * devcmd is slow, so apply the difference instead of flushing and
	 * adding everything.
	 * 1. Delete addresses on the NIC but not on the host
	 */
	for (i = 0; i < enic->mc_count; i++) {
		addr = &enic->mc_addrs[i];
		for (j = 0; j < nb_mc_addr; j++) {
			if (is_same_ether_addr(addr, &mc_addr_set[j]))
				break;
		}
		if (j < nb_mc_addr)
			continue;
		debug_log_add_del_addr(addr, false);
		ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* 2. Add addresses on the host but not on the NIC */
	for (i = 0; i < nb_mc_addr; i++) {
		addr = &mc_addr_set[i];
		for (j = 0; j < enic->mc_count; j++) {
			if (is_same_ether_addr(addr, &enic->mc_addrs[j]))
				break;
		}
		if (j < enic->mc_count)
			continue;
		debug_log_add_del_addr(addr, true);
		ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* Keep a copy so we can flush/apply later on.. */
	memcpy(enic->mc_addrs, mc_addr_set,
	       nb_mc_addr * sizeof(struct ether_addr));
	enic->mc_count = nb_mc_addr;
	return 0;
}

static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_mtu(enic, mtu);
}

static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
				      struct rte_eth_rss_reta_entry64
				      *reta_conf,
				      uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
				enic->rss_cpu.cpu[i / 4].b[i % 4]);
	}

	return 0;
}

static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_reta_entry64
				       *reta_conf,
				       uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	union vnic_rss_cpu rss_cpu;
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_update: wrong reta_size. given=%u"
			" expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}
	/*
	 * Start with the current reta and modify it per reta_conf, as we
	 * need to push the entire reta even if we only modify one entry.
	 */
	rss_cpu = enic->rss_cpu;
	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			rss_cpu.cpu[i / 4].b[i % 4] =
				enic_rte_rq_idx_to_sop_idx(
					reta_conf[idx].reta[shift]);
	}
	return enic_set_rss_reta(enic, &rss_cpu);
}

static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_rss_conf(enic, rss_conf);
}

static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
					 struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	if (rss_conf == NULL)
		return -EINVAL;
	if (rss_conf->rss_key != NULL &&
	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
			" expected=%u+\n",
			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
		return -EINVAL;
	}
	rss_conf->rss_hf = enic->rss_hf;
	if (rss_conf->rss_key != NULL) {
		int i;
		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
			rss_conf->rss_key[i] =
				enic->rss_key.key[i / 10].b[i % 10];
		}
		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
	}
	return 0;
}

static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
				     uint16_t rx_queue_id,
				     struct rte_eth_rxq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_rq *rq_sop;
	struct vnic_rq *rq_data;
	struct rte_eth_rxconf *conf;
	uint16_t sop_queue_idx;
	uint16_t data_queue_idx;

	ENICPMD_FUNC_TRACE();
	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
	rq_sop = &enic->rq[sop_queue_idx];
	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
	qinfo->mp = rq_sop->mp;
	qinfo->scattered_rx = rq_sop->data_queue_enable;
	qinfo->nb_desc = rq_sop->ring.desc_count;
	if (qinfo->scattered_rx)
		qinfo->nb_desc += rq_data->ring.desc_count;
	conf = &qinfo->conf;
	memset(conf, 0, sizeof(*conf));
	conf->rx_free_thresh = rq_sop->rx_free_thresh;
	conf->rx_drop_en = 1;
	/*
	 * Except VLAN stripping (port setting), all the checksum offloads
	 * are always enabled.
	 */
	conf->offloads = enic->rx_offload_capa;
	if (!enic->ig_vlan_strip_en)
		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
	/* rx_thresh and other fields are not applicable for enic */
}

static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
				     uint16_t tx_queue_id,
				     struct rte_eth_txq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_wq *wq = &enic->wq[tx_queue_id];

	ENICPMD_FUNC_TRACE();
	qinfo->nb_desc = wq->ring.desc_count;
	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
	qinfo->conf.offloads = wq->offloads;
	/* tx_thresh, and all the other fields are not applicable for enic */
}

static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
					    uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
					     uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int udp_tunnel_common_check(struct enic *enic,
				   struct rte_eth_udp_tunnel *tnl)
{
	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
		return -ENOTSUP;
	if (!enic->overlay_offload) {
		PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
			     "supported\n");
		return -ENOTSUP;
	}
	return 0;
}

static int update_vxlan_port(struct enic *enic, uint16_t port)
{
	if (vnic_dev_overlay_offload_cfg(enic->vdev,
					 OVERLAY_CFG_VXLAN_PORT_UPDATE,
					 port)) {
		PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
		return -EINVAL;
	}
	PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
	enic->vxlan_port = port;
	return 0;
}

static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
	 * number replaces it.
	 */
	if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
		PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, tnl->udp_port);
}

static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * Clear the previously set port number and restore the
	 * hardware default port number. Some drivers disable VXLAN
	 * offloads when there are no configured port numbers. But
	 * enic does not do that as VXLAN is part of overlay offload,
	 * which is tied to inner RSS and TSO.
	 */
	if (tnl->udp_port != enic->vxlan_port) {
		PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
}

static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
				      char *fw_version, size_t fw_size)
{
	struct vnic_devcmd_fw_info *info;
	struct enic *enic;
	int ret;

	ENICPMD_FUNC_TRACE();
	if (fw_version == NULL || fw_size <= 0)
		return -EINVAL;
	enic = pmd_priv(eth_dev);
	ret = vnic_dev_fw_info(enic->vdev, &info);
	if (ret)
		return ret;
	snprintf(fw_version, fw_size, "%s %s",
		 info->fw_version, info->fw_build);
	fw_version[fw_size - 1] = '\0';
	return 0;
}

static const struct eth_dev_ops enicpmd_eth_dev_ops = {
	.dev_configure        = enicpmd_dev_configure,
	.dev_start            = enicpmd_dev_start,
	.dev_stop             = enicpmd_dev_stop,
	.dev_set_link_up      = NULL,
	.dev_set_link_down    = NULL,
	.dev_close            = enicpmd_dev_close,
	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
	.link_update          = enicpmd_dev_link_update,
	.stats_get            = enicpmd_dev_stats_get,
	.stats_reset          = enicpmd_dev_stats_reset,
	.queue_stats_mapping_set = NULL,
	.dev_infos_get        = enicpmd_dev_info_get,
	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
	.mtu_set              = enicpmd_mtu_set,
	.vlan_filter_set      = NULL,
	.vlan_tpid_set        = NULL,
	.vlan_offload_set     = enicpmd_vlan_offload_set,
	.vlan_strip_queue_set = NULL,
	.rx_queue_start       = enicpmd_dev_rx_queue_start,
	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
	.tx_queue_start       = enicpmd_dev_tx_queue_start,
	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
	.rx_queue_release     = enicpmd_dev_rx_queue_release,
	.rx_queue_count       = enicpmd_dev_rx_queue_count,
	.rx_descriptor_done   = NULL,
	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
	.tx_queue_release     = enicpmd_dev_tx_queue_release,
	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
	.rxq_info_get         = enicpmd_dev_rxq_info_get,
	.txq_info_get         = enicpmd_dev_txq_info_get,
	.dev_led_on           = NULL,
	.dev_led_off          = NULL,
	.flow_ctrl_get        = NULL,
	.flow_ctrl_set        = NULL,
	.priority_flow_ctrl_set = NULL,
	.mac_addr_add         = enicpmd_add_mac_addr,
	.mac_addr_remove      = enicpmd_remove_mac_addr,
	.mac_addr_set         = enicpmd_set_mac_addr,
	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
	.filter_ctrl          = enicpmd_dev_filter_ctrl,
	.reta_query           = enicpmd_dev_rss_reta_query,
	.reta_update          = enicpmd_dev_rss_reta_update,
	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
	.rss_hash_update      = enicpmd_dev_rss_hash_update,
	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
	.fw_version_get       = enicpmd_dev_fw_version_get,
};

static int enic_parse_zero_one(const char *key,
			       const char *value,
			       void *opaque)
{
	struct enic *enic;
	bool b;

	enic = (struct enic *)opaque;
	if (strcmp(value, "0") == 0) {
		b = false;
	} else if (strcmp(value, "1") == 0) {
		b = true;
	} else {
		dev_err(enic, "Invalid value for %s"
			": expected=0|1 given=%s\n", key, value);
		return -EINVAL;
	}
	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
		enic->disable_overlay = b;
	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
		enic->enable_avx2_rx = b;
	return 0;
}

static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
				      const char *value,
				      void *opaque)
{
	struct enic *enic;

	enic = (struct enic *)opaque;
	if (strcmp(value, "trunk") == 0) {
		/* Trunk mode: always tag */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
	} else if (strcmp(value, "untag") == 0) {
		/* Untag default VLAN mode: untag if VLAN = default VLAN */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
	} else if (strcmp(value, "priority") == 0) {
		/*
		 * Priority-tag default VLAN mode: priority tag (VLAN header
		 * with ID=0) if VLAN = default
		 */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
	} else if (strcmp(value, "pass") == 0) {
		/* Pass through mode: do not touch tags */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	} else {
		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
			": expected=trunk|untag|priority|pass given=%s\n",
			value);
		return -EINVAL;
	}
	return 0;
}

static int enic_check_devargs(struct rte_eth_dev *dev)
{
	static const char *const valid_keys[] = {
		ENIC_DEVARG_DISABLE_OVERLAY,
		ENIC_DEVARG_ENABLE_AVX2_RX,
		ENIC_DEVARG_IG_VLAN_REWRITE,
		NULL};
	struct enic *enic = pmd_priv(dev);
	struct rte_kvargs *kvlist;

	ENICPMD_FUNC_TRACE();

	enic->disable_overlay = false;
	enic->enable_avx2_rx = false;
	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	if (!dev->device->devargs)
		return 0;
	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
	if (!kvlist)
		return -EINVAL;
	if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
			       enic_parse_ig_vlan_rewrite, enic) < 0) {
		rte_kvargs_free(kvlist);
		return -EINVAL;
	}
	rte_kvargs_free(kvlist);
	return 0;
}

/* Initialize the driver
 * It returns 0 on success.
 */
static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pdev;
	struct rte_pci_addr *addr;
	struct enic *enic = pmd_priv(eth_dev);
	int err;

	ENICPMD_FUNC_TRACE();

	enic->port_id = eth_dev->data->port_id;
	enic->rte_dev = eth_dev;
	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
	eth_dev->rx_pkt_burst = &enic_recv_pkts;
	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
	/* Let rte_eth_dev_close() release the port resources */
	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;

	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
	rte_eth_copy_pci_info(eth_dev, pdev);
	enic->pdev = pdev;
	addr = &pdev->addr;

	snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
		addr->domain, addr->bus, addr->devid, addr->function);

	err = enic_check_devargs(eth_dev);
	if (err)
		return err;
	return enic_probe(enic);
}

static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
	struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
		eth_enicpmd_dev_init);
}

static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}

static struct rte_pci_driver rte_enic_pmd = {
	.id_table = pci_id_enic_map,
	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
		     RTE_PCI_DRV_IOVA_AS_VA,
	.probe = eth_enic_pci_probe,
	.remove = eth_enic_pci_remove,
};

RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
Пример #16
0
int
pktgen_save(char * path)
{
	port_info_t	  * info;
	pkt_seq_t	  * pkt;
	range_info_t  * range;
	uint32_t		flags;
	char		buff[64];
	FILE	  * fd;
	int			i, j;
	uint32_t	lcore;
    struct ether_addr eaddr;

	fd = fopen(path, "w");
	if ( fd == NULL ) {
		return -1;
	}

	for(i=0, lcore=0; i<RTE_MAX_LCORE; i++)
		if ( rte_lcore_is_enabled(i) )
			lcore |= (1 << i);

	fprintf(fd, "#\n# Pktgen - %s\n", pktgen_version());
	fprintf(fd, "# %s, %s\n\n", wr_copyright_msg(), wr_powered_by());

	// TODO: Determine DPDK arguments for rank and memory, default for now.
	fprintf(fd, "# Command line arguments: (DPDK args are defaults)\n");
	fprintf(fd, "# %s -c %x -n 3 -m 512 --proc-type %s -- ", pktgen.argv[0], lcore, (rte_eal_process_type() == RTE_PROC_PRIMARY)? "primary" : "secondary");
	for(i=1; i < pktgen.argc; i++)
		fprintf(fd, "%s ", pktgen.argv[i]);
	fprintf(fd, "\n\n");

	fprintf(fd, "#######################################################################\n");
	fprintf(fd, "# Pktgen Configuration script information:\n");
	fprintf(fd, "#   GUI socket is %s\n", (pktgen.flags & ENABLE_GUI_FLAG)? "Enabled" : "Not Enabled");
	fprintf(fd, "#   Enabled Port mask: %08x\n", pktgen.enabled_port_mask);
	fprintf(fd, "#   Flags %08x\n", pktgen.flags);
	fprintf(fd, "#   Number of ports: %d\n", pktgen.nb_ports);
	fprintf(fd, "#   Number ports per page: %d\n", pktgen.nb_ports_per_page);
	fprintf(fd, "#   Coremask 0x%08x\n", pktgen.coremask);
	fprintf(fd, "#   Number descriptors: RX %d TX: %d\n", pktgen.nb_rxd, pktgen.nb_txd);
	fprintf(fd, "#   Promiscuous mode is %s\n\n", (pktgen.flags & PROMISCUOUS_ON_FLAG)? "Enabled" : "Disabled");

	fprintf(fd, "# Port Descriptions (-- = blacklisted port):\n");
	for(i=0; i < RTE_MAX_ETHPORTS; i++) {
		if ( strlen(pktgen.portdesc[i]) ) {
	    	if ( (pktgen.enabled_port_mask & (1 << i)) == 0 )
	    		strcpy(buff, "--");
	    	else
	    		strcpy(buff, "++");

			fprintf(fd, "#   %s %s\n", buff, pktgen.portdesc[i]);
		}
	}
	fprintf(fd, "\n#######################################################################\n");

	fprintf(fd, "# Global configuration:\n");
	fprintf(fd, "geometry %dx%d\n", pktgen.scrn->ncols, pktgen.scrn->nrows);
	fprintf(fd, "mac_from_arp %s\n\n", (pktgen.flags & MAC_FROM_ARP_FLAG)? "enable" : "disable");

	for(i=0; i < RTE_MAX_ETHPORTS; i++) {
		info = &pktgen.info[i];
		pkt = &info->seq_pkt[SINGLE_PKT];
		range = &info->range;

		if ( info->tx_burst == 0 )
			continue;

		fprintf(fd, "######################### Port %2d ##################################\n", i);
		if ( info->transmit_count == 0 )
			strcpy(buff, "Forever");
		else
			snprintf(buff, sizeof(buff), "%ld", info->transmit_count);
		fprintf(fd, "#\n");
		flags = rte_atomic32_read(&info->port_flags);
		fprintf(fd, "# Port: %2d, Burst:%3d, Rate:%3d%%, Flags:%08x, TX Count:%s\n",
				info->pid, info->tx_burst, info->tx_rate, flags, buff);
		fprintf(fd, "#           SeqCnt:%d, Prime:%d VLAN ID:%04x, ",
				info->seqCnt, info->prime_cnt, info->vlanid);
		pktgen_link_state(info->pid, buff, sizeof(buff));
		fprintf(fd, "Link: %s\n", buff);

		fprintf(fd, "#\n# Set up the primary port information:\n");
		fprintf(fd, "set %d count %ld\n", info->pid, info->transmit_count);
		fprintf(fd, "set %d size %d\n", info->pid, pkt->pktSize+FCS_SIZE);
		fprintf(fd, "set %d rate %d\n", info->pid, info->tx_rate);
		fprintf(fd, "set %d burst %d\n", info->pid, info->tx_burst);
		fprintf(fd, "set %d sport %d\n", info->pid, pkt->sport);
		fprintf(fd, "set %d dport %d\n", info->pid, pkt->dport);
		fprintf(fd, "set %d prime %d\n", info->pid, info->prime_cnt);
		fprintf(fd, "set %s %d\n",
				(pkt->ethType == ETHER_TYPE_IPv4)? "ipv4" :
				(pkt->ethType == ETHER_TYPE_IPv6)? "ipv6" :
				(pkt->ethType == ETHER_TYPE_VLAN)? "vlan" : "unknown", i);
		fprintf(fd, "set %s %d\n",
				(pkt->ipProto == PG_IPPROTO_TCP)? "tcp" :
				(pkt->ipProto == PG_IPPROTO_ICMP)? "icmp" : "udp", i);
		fprintf(fd, "set ip dst %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(pkt->ip_dst_addr), 0xFFFFFFFF));
		fprintf(fd, "set ip src %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(pkt->ip_src_addr), pkt->ip_mask));
		fprintf(fd, "set mac %d %s\n", info->pid, inet_mtoa(buff, sizeof(buff), &pkt->eth_dst_addr));
		fprintf(fd, "vlanid %d %d\n\n", i, pkt->vlanid);

		fprintf(fd, "#\n# Port flag values:\n");
		fprintf(fd, "icmp.echo %d %sable\n", i, (flags & ICMP_ECHO_ENABLE_FLAG)? "en" : "dis");
		fprintf(fd, "pcap %d %sable\n", i, (flags & SEND_PCAP_PKTS)? "en" : "dis");
		fprintf(fd, "range %d %sable\n", i, (flags & SEND_RANGE_PKTS)? "en" : "dis");
		fprintf(fd, "process %d %sable\n", i, (flags & PROCESS_INPUT_PKTS)? "en" : "dis");
		fprintf(fd, "tap %d %sable\n", i, (flags & PROCESS_TAP_PKTS)? "en" : "dis");
		fprintf(fd, "vlan %d %sable\n\n", i, (flags & SEND_VLAN_ID)? "en" : "dis");

		fprintf(fd, "#\n# Range packet information:\n");
		fprintf(fd, "src.mac start %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac, &eaddr)));
		fprintf(fd, "src.mac min %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_min, &eaddr)));
		fprintf(fd, "src.mac max %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_max, &eaddr)));
        fprintf(fd, "src.mac inc %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_inc, &eaddr)));

		fprintf(fd, "dst.mac start %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac, &eaddr)));
		fprintf(fd, "dst.mac min %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_min, &eaddr)));
		fprintf(fd, "dst.mac max %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_max, &eaddr)));
		fprintf(fd, "dst.mac inc %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_inc, &eaddr)));

		fprintf(fd, "\n");
		fprintf(fd, "src.ip start %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip), 0xFFFFFFFF));
		fprintf(fd, "src.ip min %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_min), 0xFFFFFFFF));
		fprintf(fd, "src.ip max %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_max), 0xFFFFFFFF));
		fprintf(fd, "src.ip inc %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_inc), 0xFFFFFFFF));

		fprintf(fd, "\n");
		fprintf(fd, "dst.ip start %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip), 0xFFFFFFFF));
		fprintf(fd, "dst.ip min %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_min), 0xFFFFFFFF));
		fprintf(fd, "dst.ip max %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_max), 0xFFFFFFFF));
		fprintf(fd, "dst.ip inc %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_inc), 0xFFFFFFFF));

		fprintf(fd, "\n");
		fprintf(fd, "src.port start %d %d\n", i, range->src_port);
		fprintf(fd, "src.port min %d %d\n", i, range->src_port_min);
		fprintf(fd, "src.port max %d %d\n", i, range->src_port_max);
		fprintf(fd, "src.port inc %d %d\n", i, range->src_port_inc);

		fprintf(fd, "\n");
		fprintf(fd, "dst.port start %d %d\n", i, range->dst_port);
		fprintf(fd, "dst.port min %d %d\n", i, range->dst_port_min);
		fprintf(fd, "dst.port max %d %d\n", i, range->dst_port_max);
		fprintf(fd, "dst.port inc %d %d\n", i, range->dst_port_inc);

		fprintf(fd, "\n");
		fprintf(fd, "vlan.id start %d %d\n", i, range->vlan_id);
		fprintf(fd, "vlan.id min %d %d\n", i, range->vlan_id_min);
		fprintf(fd, "vlan.id max %d %d\n", i, range->vlan_id_max);
		fprintf(fd, "vlan.id inc %d %d\n", i, range->vlan_id_inc);

		fprintf(fd, "\n");
		fprintf(fd, "pkt.size start %d %d\n", i, range->pkt_size + FCS_SIZE);
		fprintf(fd, "pkt.size min %d %d\n", i, range->pkt_size_min + FCS_SIZE);
		fprintf(fd, "pkt.size max %d %d\n", i, range->pkt_size_max + FCS_SIZE);
		fprintf(fd, "pkt.size inc %d %d\n\n", i, range->pkt_size_inc);

		fprintf(fd, "#\n# Set up the sequence data for the port.\n");
		fprintf(fd, "set %d seqCnt %d\n", info->pid, info->seqCnt);
		for(j=0; j<info->seqCnt; j++) {
			pkt = &info->seq_pkt[j];
			fprintf(fd, "seq %d %d %s ", j, i, inet_mtoa(buff, sizeof(buff), &pkt->eth_dst_addr));
			fprintf(fd, "%s ", inet_mtoa(buff, sizeof(buff), &pkt->eth_src_addr));
			fprintf(fd, "%s ", inet_ntop4(buff, sizeof(buff), htonl(pkt->ip_dst_addr), 0xFFFFFFFF));
			fprintf(fd, "%s ", inet_ntop4(buff, sizeof(buff), htonl(pkt->ip_src_addr), pkt->ip_mask));
			fprintf(fd, "%d %d %s %s %d %d\n",
					pkt->sport,
					pkt->dport,
					(pkt->ethType == ETHER_TYPE_IPv4)? "ipv4" :
							(pkt->ethType == ETHER_TYPE_IPv6)? "ipv6" :
							(pkt->ethType == ETHER_TYPE_VLAN)? "vlan" : "Other",
					(pkt->ipProto == PG_IPPROTO_TCP)? "tcp" :
							(pkt->ipProto == PG_IPPROTO_ICMP)? "icmp" : "udp",
					pkt->vlanid,
					pkt->pktSize+FCS_SIZE);
		}

		if ( pktgen.info[i].pcap ) {
			fprintf(fd, "#\n# PCAP port %d\n", i);
			fprintf(fd, "#    Packet count: %d\n", pktgen.info[i].pcap->pkt_count);
			fprintf(fd, "#    Filename    : %s\n", pktgen.info[i].pcap->filename);
		}
		fprintf(fd, "\n");
	}
	fprintf(fd, "################################ Done #################################\n");

	fclose(fd);
	return 0;
}
Пример #17
0
void *
eal_get_virtual_area(void *requested_addr, size_t *size,
		size_t page_sz, int flags, int mmap_flags)
{
	bool addr_is_hint, allow_shrink, unmap, no_align;
	uint64_t map_sz;
	void *mapped_addr, *aligned_addr;

	if (system_page_sz == 0)
		system_page_sz = sysconf(_SC_PAGESIZE);

	mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;

	RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);

	addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
	allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
	unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;

	if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
			rte_eal_process_type() == RTE_PROC_PRIMARY)
		next_baseaddr = (void *) internal_config.base_virtaddr;

	if (requested_addr == NULL && next_baseaddr != NULL) {
		requested_addr = next_baseaddr;
		requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
		addr_is_hint = true;
	}

	/* we don't need alignment of resulting pointer in the following cases:
	 *
	 * 1. page size is equal to system size
	 * 2. we have a requested address, and it is page-aligned, and we will
	 *    be discarding the address if we get a different one.
	 *
	 * for all other cases, alignment is potentially necessary.
	 */
	no_align = (requested_addr != NULL &&
		requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
		!addr_is_hint) ||
		page_sz == system_page_sz;

	do {
		map_sz = no_align ? *size : *size + page_sz;
		if (map_sz > SIZE_MAX) {
			RTE_LOG(ERR, EAL, "Map size too big\n");
			rte_errno = E2BIG;
			return NULL;
		}

		mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
				mmap_flags, -1, 0);
		if (mapped_addr == MAP_FAILED && allow_shrink)
			*size -= page_sz;
	} while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);

	/* align resulting address - if map failed, we will ignore the value
	 * anyway, so no need to add additional checks.
	 */
	aligned_addr = no_align ? mapped_addr :
			RTE_PTR_ALIGN(mapped_addr, page_sz);

	if (*size == 0) {
		RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
			strerror(errno));
		rte_errno = errno;
		return NULL;
	} else if (mapped_addr == MAP_FAILED) {
		RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
			strerror(errno));
		/* pass errno up the call chain */
		rte_errno = errno;
		return NULL;
	} else if (requested_addr != NULL && !addr_is_hint &&
			aligned_addr != requested_addr) {
		RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
			requested_addr, aligned_addr);
		munmap(mapped_addr, map_sz);
		rte_errno = EADDRNOTAVAIL;
		return NULL;
	} else if (requested_addr != NULL && addr_is_hint &&
			aligned_addr != requested_addr) {
		RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
			requested_addr, aligned_addr);
		RTE_LOG(WARNING, EAL, "   This may cause issues with mapping memory into secondary processes\n");
	} else if (next_baseaddr != NULL) {
		next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
	}

	RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
		aligned_addr, *size);

	if (unmap) {
		munmap(mapped_addr, map_sz);
	} else if (!no_align) {
		void *map_end, *aligned_end;
		size_t before_len, after_len;

		/* when we reserve space with alignment, we add alignment to
		 * mapping size. On 32-bit, if 1GB alignment was requested, this
		 * would waste 1GB of address space, which is a luxury we cannot
		 * afford. so, if alignment was performed, check if any unneeded
		 * address space can be unmapped back.
		 */

		map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
		aligned_end = RTE_PTR_ADD(aligned_addr, *size);

		/* unmap space before aligned mmap address */
		before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
		if (before_len > 0)
			munmap(mapped_addr, before_len);

		/* unmap space after aligned end mmap address */
		after_len = RTE_PTR_DIFF(map_end, aligned_end);
		if (after_len > 0)
			munmap(aligned_end, after_len);
	}

	return aligned_addr;
}
Пример #18
0
/* map the PCI resource of a PCI device in virtual memory */
int
pci_uio_map_resource(struct rte_pci_device *dev)
{
	int i, map_idx;
	char dirname[PATH_MAX];
	char cfgname[PATH_MAX];
	char devname[PATH_MAX]; /* contains the /dev/uioX */
	void *mapaddr;
	int uio_num;
	uint64_t phaddr;
	struct rte_pci_addr *loc = &dev->addr;
	struct mapped_pci_resource *uio_res;
	struct mapped_pci_res_list *uio_res_list = RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
	struct pci_map *maps;

	dev->intr_handle.fd = -1;
	dev->intr_handle.uio_cfg_fd = -1;
	dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;

	/* secondary processes - use already recorded details */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return pci_uio_map_secondary(dev);

	/* find uio resource */
	uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname));
	if (uio_num < 0) {
		RTE_LOG(WARNING, EAL, "  "PCI_PRI_FMT" not managed by UIO driver, "
				"skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
		return 1;
	}
	snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);

	/* save fd if in primary process */
	dev->intr_handle.fd = open(devname, O_RDWR);
	if (dev->intr_handle.fd < 0) {
		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
			devname, strerror(errno));
		return -1;
	}
	dev->intr_handle.type = RTE_INTR_HANDLE_UIO;

	snprintf(cfgname, sizeof(cfgname),
			"/sys/class/uio/uio%u/device/config", uio_num);
	dev->intr_handle.uio_cfg_fd = open(cfgname, O_RDWR);
	if (dev->intr_handle.uio_cfg_fd < 0) {
		RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
			cfgname, strerror(errno));
		return -1;
	}

	/* set bus master that is not done by uio_pci_generic */
	if (pci_uio_set_bus_master(dev->intr_handle.uio_cfg_fd)) {
		RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
		return -1;
	}

	/* allocate the mapping details for secondary processes*/
	uio_res = rte_zmalloc("UIO_RES", sizeof(*uio_res), 0);
	if (uio_res == NULL) {
		RTE_LOG(ERR, EAL,
			"%s(): cannot store uio mmap details\n", __func__);
		return -1;
	}

	snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
	memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));

	/* Map all BARs */
	maps = uio_res->maps;
	for (i = 0, map_idx = 0; i != PCI_MAX_RESOURCE; i++) {
		int fd;
		int fail = 0;

		/* skip empty BAR */
		phaddr = dev->mem_resource[i].phys_addr;
		if (phaddr == 0)
			continue;


		/* update devname for mmap  */
		snprintf(devname, sizeof(devname),
				SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%d",
				loc->domain, loc->bus, loc->devid, loc->function,
				i);

		/*
		 * open resource file, to mmap it
		 */
		fd = open(devname, O_RDWR);
		if (fd < 0) {
			RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
					devname, strerror(errno));
			return -1;
		}

		/* try mapping somewhere close to the end of hugepages */
		if (pci_map_addr == NULL)
			pci_map_addr = pci_find_max_end_va();

		mapaddr = pci_map_resource(pci_map_addr, fd, 0,
				(size_t)dev->mem_resource[i].len, 0);
		if (mapaddr == MAP_FAILED)
			fail = 1;

		pci_map_addr = RTE_PTR_ADD(mapaddr,
				(size_t)dev->mem_resource[i].len);

		maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
		if (maps[map_idx].path == NULL)
			fail = 1;

		if (fail) {
			rte_free(uio_res);
			close(fd);
			return -1;
		}
		close(fd);

		maps[map_idx].phaddr = dev->mem_resource[i].phys_addr;
		maps[map_idx].size = dev->mem_resource[i].len;
		maps[map_idx].addr = mapaddr;
		maps[map_idx].offset = 0;
		strcpy(maps[map_idx].path, devname);
		map_idx++;
		dev->mem_resource[i].addr = mapaddr;
	}

	uio_res->nb_maps = map_idx;

	TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);

	return 0;
}
Пример #19
0
/*
 * This function is run in the secondary instance to test that creation of
 * objects fails in a secondary
 */
static int
run_object_creation_tests(void)
{
	const unsigned flags = 0;
	const unsigned size = 1024;
	const unsigned elt_size = 64;
	const unsigned cache_size = 64;
	const unsigned priv_data_size = 32;

	printf("### Testing object creation - expect lots of mz reserve errors!\n");

	rte_errno = 0;
	if ((rte_memzone_reserve("test_mz", size, rte_socket_id(),
				 flags) == NULL) &&
	    (rte_memzone_lookup("test_mz") == NULL)) {
		printf("Error: unexpected return value from rte_memzone_reserve\n");
		return -1;
	}
	printf("# Checked rte_memzone_reserve() OK\n");

	rte_errno = 0;
	if ((rte_ring_create(
		     "test_ring", size, rte_socket_id(), flags) == NULL) &&
		    (rte_ring_lookup("test_ring") == NULL)){
		printf("Error: unexpected return value from rte_ring_create()\n");
		return -1;
	}
	printf("# Checked rte_ring_create() OK\n");

	rte_errno = 0;
	if ((rte_mempool_create("test_mp", size, elt_size, cache_size,
				priv_data_size, NULL, NULL, NULL, NULL,
				rte_socket_id(), flags) == NULL) &&
	     (rte_mempool_lookup("test_mp") == NULL)){
		printf("Error: unexpected return value from rte_mempool_create()\n");
		return -1;
	}
	printf("# Checked rte_mempool_create() OK\n");

#ifdef RTE_LIBRTE_HASH
	const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" };
	rte_errno=0;
	if ((rte_hash_create(&hash_params) != NULL) &&
	    (rte_hash_find_existing(hash_params.name) == NULL)){
		printf("Error: unexpected return value from rte_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_hash_create() OK\n");

	const struct rte_fbk_hash_params fbk_params = { .name = "test_fbk_mp_hash" };
	rte_errno=0;
	if ((rte_fbk_hash_create(&fbk_params) != NULL) &&
	    (rte_fbk_hash_find_existing(fbk_params.name) == NULL)){
		printf("Error: unexpected return value from rte_fbk_hash_create()\n");
		return -1;
	}
	printf("# Checked rte_fbk_hash_create() OK\n");
#endif

#ifdef RTE_LIBRTE_LPM
	rte_errno=0;
	struct rte_lpm_config config;

	config.max_rules = rte_socket_id();
	config.number_tbl8s = 256;
	config.flags = 0;
	if ((rte_lpm_create("test_lpm", size, &config) != NULL) &&
	    (rte_lpm_find_existing("test_lpm") == NULL)){
		printf("Error: unexpected return value from rte_lpm_create()\n");
		return -1;
	}
	printf("# Checked rte_lpm_create() OK\n");
#endif

	/* Run a test_pci call */
	if (test_pci() != 0) {
		printf("PCI scan failed in secondary\n");
		if (getuid() == 0) /* pci scans can fail as non-root */
			return -1;
	} else
		printf("PCI scan succeeded in secondary\n");

	return 0;
}

/* if called in a primary process, just spawns off a secondary process to
 * run validation tests - which brings us right back here again...
 * if called in a secondary process, this runs a series of API tests to check
 * how things run in a secondary instance.
 */
int
test_mp_secondary(void)
{
	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		if (!test_pci_run) {
			printf("=== Running pre-requisite test of test_pci\n");
			test_pci();
			printf("=== Requisite test done\n");
		}
		return run_secondary_instances();
	}

	printf("IN SECONDARY PROCESS\n");

	return run_object_creation_tests();
}

static struct test_command multiprocess_cmd = {
	.command = "multiprocess_autotest",
	.callback = test_mp_secondary,
};
REGISTER_TEST_COMMAND(multiprocess_cmd);
Пример #20
0
static int
ssovf_vdev_probe(struct rte_vdev_device *vdev)
{
	struct octeontx_ssovf_info oinfo;
	struct ssovf_mbox_dev_info info;
	struct ssovf_evdev *edev;
	struct rte_eventdev *eventdev;
	static int ssovf_init_once;
	const char *name;
	int ret;

	name = rte_vdev_device_name(vdev);
	/* More than one instance is not supported */
	if (ssovf_init_once) {
		ssovf_log_err("Request to create >1 %s instance", name);
		return -EINVAL;
	}

	eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
				rte_socket_id());
	if (eventdev == NULL) {
		ssovf_log_err("Failed to create eventdev vdev %s", name);
		return -ENOMEM;
	}
	eventdev->dev_ops = &ssovf_ops;

	/* For secondary processes, the primary has done all the work */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
		ssovf_fastpath_fns_set(eventdev);
		return 0;
	}

	ret = octeontx_ssovf_info(&oinfo);
	if (ret) {
		ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
		goto error;
	}

	edev = ssovf_pmd_priv(eventdev);
	edev->max_event_ports = oinfo.total_ssowvfs;
	edev->max_event_queues = oinfo.total_ssovfs;
	edev->is_timeout_deq = 0;

	ret = ssovf_mbox_dev_info(&info);
	if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
		ssovf_log_err("Failed to get mbox devinfo %d", ret);
		goto error;
	}

	edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
	edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
	edev->max_num_events =  info.max_num_events;
	ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
			info.min_deq_timeout_ns, info.max_deq_timeout_ns,
			info.max_num_events);

	if (!edev->max_event_ports || !edev->max_event_queues) {
		ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
			edev->max_event_queues, edev->max_event_ports);
		ret = -ENODEV;
		goto error;
	}

	ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
			name, oinfo.domain, edev->max_event_queues,
			edev->max_event_ports);

	ssovf_init_once = 1;
	return 0;

error:
	rte_event_pmd_vdev_uninit(name);
	return ret;
}
Пример #21
0
static int
avf_dev_init(struct rte_eth_dev *eth_dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);

	PMD_INIT_FUNC_TRACE();

	/* assign ops func pointer */
	eth_dev->dev_ops = &avf_eth_dev_ops;
	eth_dev->rx_pkt_burst = &avf_recv_pkts;
	eth_dev->tx_pkt_burst = &avf_xmit_pkts;
	eth_dev->tx_pkt_prepare = &avf_prep_pkts;

	/* For secondary processes, we don't initialise any further as primary
	 * has already done this work. Only check if we need a different RX
	 * and TX function.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
		avf_set_rx_function(eth_dev);
		avf_set_tx_function(eth_dev);
		return 0;
	}
	rte_eth_copy_pci_info(eth_dev, pci_dev);

	hw->vendor_id = pci_dev->id.vendor_id;
	hw->device_id = pci_dev->id.device_id;
	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
	hw->bus.bus_id = pci_dev->addr.bus;
	hw->bus.device = pci_dev->addr.devid;
	hw->bus.func = pci_dev->addr.function;
	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
	hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
	adapter->eth_dev = eth_dev;

	if (avf_init_vf(eth_dev) != 0) {
		PMD_INIT_LOG(ERR, "Init vf failed");
		return -1;
	}

	/* copy mac addr */
	eth_dev->data->mac_addrs = rte_zmalloc(
					"avf_mac",
					ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
					0);
	if (!eth_dev->data->mac_addrs) {
		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
			     " store MAC addresses",
			     ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
		return -ENOMEM;
	}
	/* If the MAC address is not configured by host,
	 * generate a random one.
	 */
	if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
		eth_random_addr(hw->mac.addr);
	ether_addr_copy((struct ether_addr *)hw->mac.addr,
			&eth_dev->data->mac_addrs[0]);

	/* register callback func to eal lib */
	rte_intr_callback_register(&pci_dev->intr_handle,
				   avf_dev_interrupt_handler,
				   (void *)eth_dev);

	/* enable uio intr after callback register */
	rte_intr_enable(&pci_dev->intr_handle);

	/* configure and enable device interrupt */
	avf_enable_irq0(hw);

	return 0;
}
Пример #22
0
/**
 * Main init function for the multi-process server app,
 * calls subfunctions to do each stage of the initialisation.
 */
int
init(int argc, char *argv[])
{
	int retval;
	const struct rte_memzone *mz;
	unsigned i, total_ports;

	/* init EAL, parsing EAL args */
	retval = rte_eal_init(argc, argv);
	if (retval < 0)
		return -1;
	argc -= retval;
	argv += retval;

	/* get total number of ports */
	total_ports = rte_eth_dev_count();

	/* set up array for port data */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
	{
		mz = rte_memzone_lookup(MZ_PORT_INFO);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
		ports = mz->addr;
	}
	else /* RTE_PROC_PRIMARY */
	{
		mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
					rte_socket_id(), NO_FLAGS);
		if (mz == NULL)
			rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
		memset(mz->addr, 0, sizeof(*ports));
		ports = mz->addr;
	}

	/* parse additional, application arguments */
	retval = parse_app_args(total_ports, argc, argv);
	if (retval != 0)
		return -1;

	/* initialise mbuf pools */
	retval = init_mbuf_pools();
	if (retval != 0)
		rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");

	/* now initialise the ports we will use */	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		for (i = 0; i < ports->num_ports; i++) {
			retval = init_port(ports->id[i]);
			if (retval != 0)
				rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
						(unsigned)i);
		}
	}
	check_all_ports_link_status(ports->num_ports, (~0x0));

	/* initialise the client queues/rings for inter-eu comms */
	
	init_shm_rings();
	
	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
	{
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init.\n");
		/* create metadata, output cmdline 
		if (rte_hostshmem_metadata_create(HOSTSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create HOSTSHMEM metadata\n");
		if (rte_hostshmem_metadata_add_memzone(mz, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to HOSTSHMEM metadata\n");					
		if (rte_hostshmem_metadata_add_mempool(pktmbuf_pool, HOSTSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to HOSTSHMEM metadata\n");	
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_hostshmem_metadata_add_ring(clients[i].rx_q,
					HOSTSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to HOSTSHMEM metadata\n", i);
		}
		generate_hostshmem_cmdline(HOSTSHMEM_METADATA_NAME);
		*/
		
		const struct rte_mem_config *mcfg;
		/* get pointer to global configuration */
		mcfg = rte_eal_get_configuration()->mem_config;

		for (i = 0; i < RTE_MAX_MEMSEG; i++) {
			if (mcfg->memseg[i].addr == NULL)
				break;

			printf("Segment %u: phys:0x%"PRIx64", len:%zu, "
				   "virt:%p, socket_id:%"PRId32", "
				   "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
				   "nrank:%"PRIx32"\n", i,
				   mcfg->memseg[i].phys_addr,
				   mcfg->memseg[i].len,
				   mcfg->memseg[i].addr,
				   mcfg->memseg[i].socket_id,
				   mcfg->memseg[i].hugepage_sz,
				   mcfg->memseg[i].nchannel,
				   mcfg->memseg[i].nrank);
		}
		
		RTE_LOG(INFO, APP, "HOST SHARE MEM Init. done\n");
		
		RTE_LOG(INFO, APP, "IV SHARE MEM Init.\n");
		/* create metadata, output cmdline */
		if (rte_ivshmem_metadata_create(IVSHMEM_METADATA_NAME) < 0)
			rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n");	
		if (rte_ivshmem_metadata_add_memzone(mz, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add memzone to IVSHMEM metadata\n");					
		if (rte_ivshmem_metadata_add_mempool(pktmbuf_pool, IVSHMEM_METADATA_NAME))
			rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n");				
		
		for (i = 0; i < num_clients; i++) 
		{
			if (rte_ivshmem_metadata_add_ring(clients[i].rx_q,
					IVSHMEM_METADATA_NAME) < 0)
				rte_exit(EXIT_FAILURE, "Cannot add ring client %d to IVSHMEM metadata\n", i);
		}
		generate_ivshmem_cmdline(IVSHMEM_METADATA_NAME);
		RTE_LOG(INFO, APP, "IV SHARE MEM Done.\n");
	}
	
	
	return 0;
}
Пример #23
0
/*
 * It returns 0 on success.
 */
static int
eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
	uint32_t mac_hi, mac_lo, ver;

	PMD_INIT_FUNC_TRACE();

	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
	pci_dev = eth_dev->pci_dev;

	/*
	 * for secondary processes, we don't initialize any further as primary
	 * has already done this work.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	rte_eth_copy_pci_info(eth_dev, pci_dev);

	/* Vendor and Device ID need to be set before init of shared code */
	hw->device_id = pci_dev->id.device_id;
	hw->vendor_id = pci_dev->id.vendor_id;
	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;

	hw->num_rx_queues = 1;
	hw->num_tx_queues = 1;
	hw->bufs_per_pkt = 1;

	/* Check h/w version compatibility with driver. */
	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
	PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
	if (ver & 0x1)
		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
	else {
		PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
		return -EIO;
	}

	/* Check UPT version compatibility with driver. */
	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
	if (ver & 0x1)
		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
	else {
		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
		return -EIO;
	}

	/* Getting MAC Address */
	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
	memcpy(hw->perm_addr  , &mac_lo, 4);
	memcpy(hw->perm_addr+4, &mac_hi, 2);

	/* Allocate memory for storing MAC addresses */
	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
					       VMXNET3_MAX_MAC_ADDRS, 0);
	if (eth_dev->data->mac_addrs == NULL) {
		PMD_INIT_LOG(ERR,
			     "Failed to allocate %d bytes needed to store MAC addresses",
			     ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
		return -ENOMEM;
	}
	/* Copy the permanent MAC address */
	ether_addr_copy((struct ether_addr *) hw->perm_addr,
			&eth_dev->data->mac_addrs[0]);

	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);

	/* Put device in Quiesce Mode */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);

	/* allow untagged pkts */
	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);

	return 0;
}
Пример #24
0
/**
 * Check if running as a secondary process.
 *
 * @return
 *   Nonzero if running as a secondary process.
 */
inline int
mlx5_is_secondary(void)
{
	return rte_eal_process_type() != RTE_PROC_PRIMARY;
}
Пример #25
0
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct e1000_adapter *adapter =
		E1000_DEV_PRIVATE(eth_dev->data->dev_private);
	struct e1000_hw *hw =
		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
	struct e1000_vfta * shadow_vfta =
		E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);

	pci_dev = eth_dev->pci_dev;

	eth_dev->dev_ops = &eth_em_ops;
	eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
	eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;

	/* for secondary processes, we don't initialise any further as primary
	 * has already done this work. Only check we don't need a different
	 * RX function */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
		if (eth_dev->data->scattered_rx)
			eth_dev->rx_pkt_burst =
				(eth_rx_burst_t)&eth_em_recv_scattered_pkts;
		return 0;
	}

	rte_eth_copy_pci_info(eth_dev, pci_dev);

	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
	hw->device_id = pci_dev->id.device_id;
	adapter->stopped = 0;

	/* For ICH8 support we'll need to map the flash memory BAR */

	if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
			em_hw_init(hw) != 0) {
		PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
			"failed to init HW",
			eth_dev->data->port_id, pci_dev->id.vendor_id,
			pci_dev->id.device_id);
		return -(ENODEV);
	}

	/* Allocate memory for storing MAC addresses */
	eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
			hw->mac.rar_entry_count, 0);
	if (eth_dev->data->mac_addrs == NULL) {
		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
			"store MAC addresses",
			ETHER_ADDR_LEN * hw->mac.rar_entry_count);
		return -(ENOMEM);
	}

	/* Copy the permanent MAC address */
	ether_addr_copy((struct ether_addr *) hw->mac.addr,
		eth_dev->data->mac_addrs);

	/* initialize the vfta */
	memset(shadow_vfta, 0, sizeof(*shadow_vfta));

	PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
		     eth_dev->data->port_id, pci_dev->id.vendor_id,
		     pci_dev->id.device_id);

	rte_intr_callback_register(&(pci_dev->intr_handle),
		eth_em_interrupt_handler, (void *)eth_dev);

	return (0);
}
Пример #26
0
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
{
	struct ecore_dev *edev = &qdev->edev;
	struct qed_update_vport_params params = {
		.vport_id = 0,
		.accept_any_vlan = action,
		.update_accept_any_vlan_flg = 1,
	};
	int rc;

	/* Proceed only if action actually needs to be performed */
	if (qdev->accept_any_vlan == action)
		return;

	rc = qdev->ops->vport_update(edev, &params);
	if (rc) {
		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
		       action ? "enable" : "disable");
	} else {
		DP_INFO(edev, "%s accept-any-vlan\n",
			action ? "enabled" : "disabled");
		qdev->accept_any_vlan = action;
	}
}

void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	/* TODO: - QED_FILTER_TYPE_UCAST */
	enum qed_filter_rx_mode_type accept_flags =
			QED_FILTER_RX_MODE_TYPE_REGULAR;
	struct qed_filter_params rx_mode;
	int rc;

	/* Configure the struct for the Rx mode */
	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
	rx_mode.type = QED_FILTER_TYPE_RX_MODE;

	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
				   eth_dev->data->mac_addrs[0].addr_bytes);
	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
	} else {
		rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
					   eth_dev->data->
					   mac_addrs[0].addr_bytes);
		if (rc) {
			DP_ERR(edev, "Unable to add filter\n");
			return;
		}
	}

	/* take care of VLAN mode */
	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
		qede_config_accept_any_vlan(qdev, true);
	} else if (!qdev->non_configured_vlans) {
		/* If we dont have non-configured VLANs and promisc
		 * is not set, then check if we need to disable
		 * accept_any_vlan mode.
		 * Because in this case, accept_any_vlan mode is set
		 * as part of IFF_RPOMISC flag handling.
		 */
		qede_config_accept_any_vlan(qdev, false);
	}
	rx_mode.filter.accept_flags = accept_flags;
	rc = qdev->ops->filter_config(edev, &rx_mode);
	if (rc)
		DP_ERR(edev, "Filter config failed rc=%d\n", rc);
}

static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
{
	struct qed_update_vport_params vport_update_params;
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
	int rc;

	memset(&vport_update_params, 0, sizeof(vport_update_params));
	vport_update_params.vport_id = 0;
	vport_update_params.update_inner_vlan_removal_flg = 1;
	vport_update_params.inner_vlan_removal_flg = set_stripping;
	rc = qdev->ops->vport_update(edev, &vport_update_params);
	if (rc) {
		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
		return rc;
	}

	return 0;
}

static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);

	if (mask & ETH_VLAN_STRIP_MASK) {
		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
			(void)qede_vlan_stripping(eth_dev, 1);
		else
			(void)qede_vlan_stripping(eth_dev, 0);
	}

	DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
		mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
}

static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
				  enum qed_filter_xcast_params_type opcode,
				  uint16_t vid)
{
	struct qed_filter_params filter_cmd;
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);

	memset(&filter_cmd, 0, sizeof(filter_cmd));
	filter_cmd.type = QED_FILTER_TYPE_UCAST;
	filter_cmd.filter.ucast.type = opcode;
	filter_cmd.filter.ucast.vlan_valid = 1;
	filter_cmd.filter.ucast.vlan = vid;

	return qdev->ops->filter_config(edev, &filter_cmd);
}

static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
				uint16_t vlan_id, int on)
{
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
	int rc;

	if (vlan_id != 0 &&
	    qdev->configured_vlans == dev_info->num_vlan_filters) {
		DP_NOTICE(edev, false, "Reached max VLAN filter limit"
				     " enabling accept_any_vlan\n");
		qede_config_accept_any_vlan(qdev, true);
		return 0;
	}

	if (on) {
		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
					    vlan_id);
		if (rc)
			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
			       rc);
		else
			if (vlan_id != 0)
				qdev->configured_vlans++;
	} else {
		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
					    vlan_id);
		if (rc)
			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
			       vlan_id, rc);
		else
			if (vlan_id != 0)
				qdev->configured_vlans--;
	}

	DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
			vlan_id, on, rc, qdev->configured_vlans);

	return rc;
}

static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;

	PMD_INIT_FUNC_TRACE(edev);

	if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
		DP_NOTICE(edev, false,
			  "Unequal number of rx/tx queues "
			  "is not supported RX=%u TX=%u\n",
			  eth_dev->data->nb_rx_queues,
			  eth_dev->data->nb_tx_queues);
		return -EINVAL;
	}

	qdev->num_rss = eth_dev->data->nb_rx_queues;

	/* Initial state */
	qdev->state = QEDE_CLOSE;

	/* Sanity checks and throw warnings */

	if (rxmode->enable_scatter == 1) {
		DP_ERR(edev, "RX scatter packets is not supported\n");
		return -EINVAL;
	}

	if (rxmode->enable_lro == 1) {
		DP_INFO(edev, "LRO is not supported\n");
		return -EINVAL;
	}

	if (!rxmode->hw_strip_crc)
		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");

	if (!rxmode->hw_ip_checksum)
		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
			      "in hw\n");


	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
		QEDE_RSS_CNT(qdev), qdev->num_tc);

	DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
		" port %u first_on_engine %d\n",
		edev->hwfns[0].my_id,
		edev->hwfns[0].rel_pf_id,
		edev->hwfns[0].abs_pf_id,
		edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);

	return 0;
}

/* Info about HW descriptor ring limitations */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
	.nb_max = NUM_RX_BDS_MAX,
	.nb_min = 128,
	.nb_align = 128	/* lowest common multiple */
};

static const struct rte_eth_desc_lim qede_tx_desc_lim = {
	.nb_max = NUM_TX_BDS_MAX,
	.nb_min = 256,
	.nb_align = 256
};

static void
qede_dev_info_get(struct rte_eth_dev *eth_dev,
		  struct rte_eth_dev_info *dev_info)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;

	PMD_INIT_FUNC_TRACE(edev);

	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
					      QEDE_ETH_OVERHEAD);
	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
	dev_info->rx_desc_lim = qede_rx_desc_lim;
	dev_info->tx_desc_lim = qede_tx_desc_lim;
	dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
	dev_info->max_tx_queues = dev_info->max_rx_queues;
	dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
	if (IS_VF(edev))
		dev_info->max_vfs = 0;
	else
		dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
	dev_info->driver_name = qdev->drv_ver;
	dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;

	dev_info->default_txconf = (struct rte_eth_txconf) {
		.txq_flags = QEDE_TXQ_FLAGS,
	};

	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
				     DEV_RX_OFFLOAD_IPV4_CKSUM |
				     DEV_RX_OFFLOAD_UDP_CKSUM |
				     DEV_RX_OFFLOAD_TCP_CKSUM);
	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
				     DEV_TX_OFFLOAD_IPV4_CKSUM |
				     DEV_TX_OFFLOAD_UDP_CKSUM |
				     DEV_TX_OFFLOAD_TCP_CKSUM);

	dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
}

/* return 0 means link status changed, -1 means not changed */
static int
qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	uint16_t link_duplex;
	struct qed_link_output link;
	struct rte_eth_link *curr = &eth_dev->data->dev_link;

	memset(&link, 0, sizeof(struct qed_link_output));
	qdev->ops->common->get_link(edev, &link);

	/* Link Speed */
	curr->link_speed = link.speed;

	/* Link Mode */
	switch (link.duplex) {
	case QEDE_DUPLEX_HALF:
		link_duplex = ETH_LINK_HALF_DUPLEX;
		break;
	case QEDE_DUPLEX_FULL:
		link_duplex = ETH_LINK_FULL_DUPLEX;
		break;
	case QEDE_DUPLEX_UNKNOWN:
	default:
		link_duplex = -1;
	}
	curr->link_duplex = link_duplex;

	/* Link Status */
	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;

	/* AN */
	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;

	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
		curr->link_speed, curr->link_duplex,
		curr->link_autoneg, curr->link_status);

	/* return 0 means link status changed, -1 means not changed */
	return ((curr->link_status == link.link_up) ? -1 : 0);
}

static void
qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
		     enum qed_filter_rx_mode_type accept_flags)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	struct qed_filter_params rx_mode;

	DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);

	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
	rx_mode.filter.accept_flags = accept_flags;
	qdev->ops->filter_config(edev, &rx_mode);
}

static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;

	PMD_INIT_FUNC_TRACE(edev);

	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;

	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;

	qede_rx_mode_setting(eth_dev, type);
}

static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;

	PMD_INIT_FUNC_TRACE(edev);

	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
		qede_rx_mode_setting(eth_dev,
				     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
	else
		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
}

static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;

	PMD_INIT_FUNC_TRACE(edev);

	/* dev_stop() shall cleanup fp resources in hw but without releasing
	 * dma memories and sw structures so that dev_start() can be called
	 * by the app without reconfiguration. However, in dev_close() we
	 * can release all the resources and device can be brought up newly
	 */
	if (qdev->state != QEDE_STOP)
		qede_dev_stop(eth_dev);
	else
		DP_INFO(edev, "Device is already stopped\n");

	qede_free_mem_load(qdev);

	qede_free_fp_arrays(qdev);

	qede_dev_set_link_state(eth_dev, false);

	qdev->ops->common->slowpath_stop(edev);

	qdev->ops->common->remove(edev);

	rte_intr_disable(&eth_dev->pci_dev->intr_handle);

	rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
				     qede_interrupt_handler, (void *)eth_dev);

	qdev->state = QEDE_CLOSE;
}

static void
qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	struct ecore_eth_stats stats;

	qdev->ops->get_vport_stats(edev, &stats);

	/* RX Stats */
	eth_stats->ipackets = stats.rx_ucast_pkts +
	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;

	eth_stats->ibytes = stats.rx_ucast_bytes +
	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;

	eth_stats->ierrors = stats.rx_crc_errors +
	    stats.rx_align_errors +
	    stats.rx_carrier_errors +
	    stats.rx_oversize_packets +
	    stats.rx_jabbers + stats.rx_undersize_packets;

	eth_stats->rx_nombuf = stats.no_buff_discards;

	eth_stats->imissed = stats.mftag_filter_discards +
	    stats.mac_filter_discards +
	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;

	/* TX stats */
	eth_stats->opackets = stats.tx_ucast_pkts +
	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;

	eth_stats->obytes = stats.tx_ucast_bytes +
	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;

	eth_stats->oerrors = stats.tx_err_drop_pkts;

	DP_INFO(edev,
		"no_buff_discards=%" PRIu64 ""
		" mac_filter_discards=%" PRIu64 ""
		" brb_truncates=%" PRIu64 ""
		" brb_discards=%" PRIu64 "\n",
		stats.no_buff_discards,
		stats.mac_filter_discards,
		stats.brb_truncates, stats.brb_discards);
}

int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
{
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
	struct qed_link_params link_params;
	int rc;

	DP_INFO(edev, "setting link state %d\n", link_up);
	memset(&link_params, 0, sizeof(link_params));
	link_params.link_up = link_up;
	rc = qdev->ops->common->set_link(edev, &link_params);
	if (rc != ECORE_SUCCESS)
		DP_ERR(edev, "Unable to set link state %d\n", link_up);

	return rc;
}

static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
{
	return qede_dev_set_link_state(eth_dev, true);
}

static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
{
	return qede_dev_set_link_state(eth_dev, false);
}

static void qede_reset_stats(struct rte_eth_dev *eth_dev)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;

	ecore_reset_vport_stats(edev);
}

static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	enum qed_filter_rx_mode_type type =
	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;

	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;

	qede_rx_mode_setting(eth_dev, type);
}

static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
	else
		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
}

static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
			      struct rte_eth_fc_conf *fc_conf)
{
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
	struct qed_link_output current_link;
	struct qed_link_params params;

	memset(&current_link, 0, sizeof(current_link));
	qdev->ops->common->get_link(edev, &current_link);

	memset(&params, 0, sizeof(params));
	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
	if (fc_conf->autoneg) {
		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
			DP_ERR(edev, "Autoneg not supported\n");
			return -EINVAL;
		}
		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
	}

	/* Pause is assumed to be supported (SUPPORTED_Pause) */
	if (fc_conf->mode == RTE_FC_FULL)
		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
					QED_LINK_PAUSE_RX_ENABLE);
	if (fc_conf->mode == RTE_FC_TX_PAUSE)
		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
	if (fc_conf->mode == RTE_FC_RX_PAUSE)
		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;

	params.link_up = true;
	(void)qdev->ops->common->set_link(edev, &params);

	return 0;
}

static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
			      struct rte_eth_fc_conf *fc_conf)
{
	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
	struct qed_link_output current_link;

	memset(&current_link, 0, sizeof(current_link));
	qdev->ops->common->get_link(edev, &current_link);

	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
		fc_conf->autoneg = true;

	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
					 QED_LINK_PAUSE_TX_ENABLE))
		fc_conf->mode = RTE_FC_FULL;
	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
		fc_conf->mode = RTE_FC_RX_PAUSE;
	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
		fc_conf->mode = RTE_FC_TX_PAUSE;
	else
		fc_conf->mode = RTE_FC_NONE;

	return 0;
}

static const uint32_t *
qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L3_IPV4,
		RTE_PTYPE_L3_IPV6,
		RTE_PTYPE_UNKNOWN
	};

	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
		return ptypes;

	return NULL;
}

static const struct eth_dev_ops qede_eth_dev_ops = {
	.dev_configure = qede_dev_configure,
	.dev_infos_get = qede_dev_info_get,
	.rx_queue_setup = qede_rx_queue_setup,
	.rx_queue_release = qede_rx_queue_release,
	.tx_queue_setup = qede_tx_queue_setup,
	.tx_queue_release = qede_tx_queue_release,
	.dev_start = qede_dev_start,
	.dev_set_link_up = qede_dev_set_link_up,
	.dev_set_link_down = qede_dev_set_link_down,
	.link_update = qede_link_update,
	.promiscuous_enable = qede_promiscuous_enable,
	.promiscuous_disable = qede_promiscuous_disable,
	.allmulticast_enable = qede_allmulticast_enable,
	.allmulticast_disable = qede_allmulticast_disable,
	.dev_stop = qede_dev_stop,
	.dev_close = qede_dev_close,
	.stats_get = qede_get_stats,
	.stats_reset = qede_reset_stats,
	.mac_addr_add = qede_mac_addr_add,
	.mac_addr_remove = qede_mac_addr_remove,
	.mac_addr_set = qede_mac_addr_set,
	.vlan_offload_set = qede_vlan_offload_set,
	.vlan_filter_set = qede_vlan_filter_set,
	.flow_ctrl_set = qede_flow_ctrl_set,
	.flow_ctrl_get = qede_flow_ctrl_get,
	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
};

static const struct eth_dev_ops qede_eth_vf_dev_ops = {
	.dev_configure = qede_dev_configure,
	.dev_infos_get = qede_dev_info_get,
	.rx_queue_setup = qede_rx_queue_setup,
	.rx_queue_release = qede_rx_queue_release,
	.tx_queue_setup = qede_tx_queue_setup,
	.tx_queue_release = qede_tx_queue_release,
	.dev_start = qede_dev_start,
	.dev_set_link_up = qede_dev_set_link_up,
	.dev_set_link_down = qede_dev_set_link_down,
	.link_update = qede_link_update,
	.promiscuous_enable = qede_promiscuous_enable,
	.promiscuous_disable = qede_promiscuous_disable,
	.allmulticast_enable = qede_allmulticast_enable,
	.allmulticast_disable = qede_allmulticast_disable,
	.dev_stop = qede_dev_stop,
	.dev_close = qede_dev_close,
	.stats_get = qede_get_stats,
	.stats_reset = qede_reset_stats,
	.vlan_offload_set = qede_vlan_offload_set,
	.vlan_filter_set = qede_vlan_filter_set,
	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
};

static void qede_update_pf_params(struct ecore_dev *edev)
{
	struct ecore_pf_params pf_params;
	/* 32 rx + 32 tx */
	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
	pf_params.eth_pf_params.num_cons = 64;
	qed_ops->common->update_pf_params(edev, &pf_params);
}

static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
{
	struct rte_pci_device *pci_dev;
	struct rte_pci_addr pci_addr;
	struct qede_dev *adapter;
	struct ecore_dev *edev;
	struct qed_dev_eth_info dev_info;
	struct qed_slowpath_params params;
	uint32_t qed_ver;
	static bool do_once = true;
	uint8_t bulletin_change;
	uint8_t vf_mac[ETHER_ADDR_LEN];
	uint8_t is_mac_forced;
	bool is_mac_exist;
	/* Fix up ecore debug level */
	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
	uint32_t max_mac_addrs;
	int rc;

	/* Extract key data structures */
	adapter = eth_dev->data->dev_private;
	edev = &adapter->edev;
	pci_addr = eth_dev->pci_dev->addr;

	PMD_INIT_FUNC_TRACE(edev);

	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
		 pci_addr.bus, pci_addr.devid, pci_addr.function,
		 eth_dev->data->port_id);

	eth_dev->rx_pkt_burst = qede_recv_pkts;
	eth_dev->tx_pkt_burst = qede_xmit_pkts;

	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
		DP_NOTICE(edev, false,
			  "Skipping device init from secondary process\n");
		return 0;
	}

	pci_dev = eth_dev->pci_dev;

	rte_eth_copy_pci_info(eth_dev, pci_dev);

	qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);

	qed_ops = qed_get_eth_ops();
	if (!qed_ops) {
		DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
		return -EINVAL;
	}

	DP_INFO(edev, "Starting qede probe\n");

	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
				    dp_module, dp_level, is_vf);

	if (rc != 0) {
		DP_ERR(edev, "qede probe failed rc %d\n", rc);
		return -ENODEV;
	}

	qede_update_pf_params(edev);

	rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
				   qede_interrupt_handler, (void *)eth_dev);

	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
		DP_ERR(edev, "rte_intr_enable() failed\n");
		return -ENODEV;
	}

	/* Start the Slowpath-process */
	memset(&params, 0, sizeof(struct qed_slowpath_params));
	params.int_mode = ECORE_INT_MODE_MSIX;
	params.drv_major = QEDE_MAJOR_VERSION;
	params.drv_minor = QEDE_MINOR_VERSION;
	params.drv_rev = QEDE_REVISION_VERSION;
	params.drv_eng = QEDE_ENGINEERING_VERSION;
	strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);

	rc = qed_ops->common->slowpath_start(edev, &params);
	if (rc) {
		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
		return -ENODEV;
	}

	rc = qed_ops->fill_dev_info(edev, &dev_info);
	if (rc) {
		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
		qed_ops->common->slowpath_stop(edev);
		qed_ops->common->remove(edev);
		return -ENODEV;
	}

	qede_alloc_etherdev(adapter, &dev_info);

	adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);

	if (!is_vf)
		adapter->dev_info.num_mac_addrs =
			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
					    ECORE_MAC);
	else
		ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
					     &adapter->dev_info.num_mac_addrs);

	/* Allocate memory for storing MAC addr */
	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
					(ETHER_ADDR_LEN *
					adapter->dev_info.num_mac_addrs),
					RTE_CACHE_LINE_SIZE);

	if (eth_dev->data->mac_addrs == NULL) {
		DP_ERR(edev, "Failed to allocate MAC address\n");
		qed_ops->common->slowpath_stop(edev);
		qed_ops->common->remove(edev);
		return -ENOMEM;
	}

	if (!is_vf) {
		ether_addr_copy((struct ether_addr *)edev->hwfns[0].
				hw_info.hw_mac_addr,
				&eth_dev->data->mac_addrs[0]);
		ether_addr_copy(&eth_dev->data->mac_addrs[0],
				&adapter->primary_mac);
	} else {
		ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
				       &bulletin_change);
		if (bulletin_change) {
			is_mac_exist =
			    ecore_vf_bulletin_get_forced_mac(
						ECORE_LEADING_HWFN(edev),
						vf_mac,
						&is_mac_forced);
			if (is_mac_exist && is_mac_forced) {
				DP_INFO(edev, "VF macaddr received from PF\n");
				ether_addr_copy((struct ether_addr *)&vf_mac,
						&eth_dev->data->mac_addrs[0]);
				ether_addr_copy(&eth_dev->data->mac_addrs[0],
						&adapter->primary_mac);
			} else {
				DP_NOTICE(edev, false,
					  "No VF macaddr assigned\n");
			}
		}
	}

	eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;

	if (do_once) {
		qede_print_adapter_info(adapter);
		do_once = false;
	}

	DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
		  adapter->primary_mac.addr_bytes[0],
		  adapter->primary_mac.addr_bytes[1],
		  adapter->primary_mac.addr_bytes[2],
		  adapter->primary_mac.addr_bytes[3],
		  adapter->primary_mac.addr_bytes[4],
		  adapter->primary_mac.addr_bytes[5]);

	return rc;
}

static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
{
	return qede_common_dev_init(eth_dev, 1);
}

static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
{
	return qede_common_dev_init(eth_dev, 0);
}

static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
	/* only uninitialize in the primary process */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	/* safe to close dev here */
	qede_dev_close(eth_dev);

	eth_dev->dev_ops = NULL;
	eth_dev->rx_pkt_burst = NULL;
	eth_dev->tx_pkt_burst = NULL;

	if (eth_dev->data->mac_addrs)
		rte_free(eth_dev->data->mac_addrs);

	eth_dev->data->mac_addrs = NULL;

	return 0;
}

static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
	return qede_dev_common_uninit(eth_dev);
}

static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
	return qede_dev_common_uninit(eth_dev);
}

static struct rte_pci_id pci_id_qedevf_map[] = {
#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
	{
		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
	},
	{
		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
	},
	{.vendor_id = 0,}
};

static struct rte_pci_id pci_id_qede_map[] = {
#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
	{
		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
	},
	{
		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
	},
	{
		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
	},
	{
		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
	},
	{.vendor_id = 0,}
};

static struct eth_driver rte_qedevf_pmd = {
	.pci_drv = {
		    .name = "rte_qedevf_pmd",
		    .id_table = pci_id_qedevf_map,
		    .drv_flags =
		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
		    },
	.eth_dev_init = qedevf_eth_dev_init,
	.eth_dev_uninit = qedevf_eth_dev_uninit,
	.dev_private_size = sizeof(struct qede_dev),
};

static struct eth_driver rte_qede_pmd = {
	.pci_drv = {
		    .name = "rte_qede_pmd",
		    .id_table = pci_id_qede_map,
		    .drv_flags =
		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
		    },
	.eth_dev_init = qede_eth_dev_init,
	.eth_dev_uninit = qede_eth_dev_uninit,
	.dev_private_size = sizeof(struct qede_dev),
};

static int
rte_qedevf_pmd_init(const char *name __rte_unused,
		    const char *params __rte_unused)
{
	rte_eth_driver_register(&rte_qedevf_pmd);

	return 0;
}

static int
rte_qede_pmd_init(const char *name __rte_unused,
		  const char *params __rte_unused)
{
	rte_eth_driver_register(&rte_qede_pmd);

	return 0;
}

static struct rte_driver rte_qedevf_driver = {
	.type = PMD_PDEV,
	.init = rte_qede_pmd_init
};

static struct rte_driver rte_qede_driver = {
	.type = PMD_PDEV,
	.init = rte_qedevf_pmd_init
};

PMD_REGISTER_DRIVER(rte_qede_driver);
PMD_REGISTER_DRIVER(rte_qedevf_driver);