コード例 #1
0
ファイル: vmxnet3_ethdev.c プロジェクト: olgk/dpdk.org
/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
static void
vmxnet3_dev_stop(struct rte_eth_dev *dev)
{
	struct rte_eth_link link;
	struct vmxnet3_hw *hw = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	if (hw->adapter_stopped == 1) {
		PMD_INIT_LOG(DEBUG, "Device already closed.");
		return;
	}

	/* disable interrupts */
	vmxnet3_disable_intr(hw);

	/* quiesce the device first */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);

	/* reset the device */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
	PMD_INIT_LOG(DEBUG, "Device reset.");
	hw->adapter_stopped = 0;

	vmxnet3_dev_clear_queues(dev);

	/* Clear recorded link status */
	memset(&link, 0, sizeof(link));
	vmxnet3_dev_atomic_write_link_status(dev, &link);
}
コード例 #2
0
ファイル: dpaa2_hw_dpni.c プロジェクト: Leon555/dpdk
int
dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
		     void *blist)
{
	/* Function to attach a DPNI with a buffer pool list. Buffer pool list
	 * handle is passed in blist.
	 */
	int32_t retcode;
	struct fsl_mc_io *dpni = priv->hw;
	struct dpni_pools_cfg bpool_cfg;
	struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
	struct dpni_buffer_layout layout;
	int tot_size;

	/* ... rx buffer layout .
	 * Check alignment for buffer layouts first
	 */

	/* ... rx buffer layout ... */
	tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
	tot_size = RTE_ALIGN_CEIL(tot_size,
				  DPAA2_PACKET_LAYOUT_ALIGN);

	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
	layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;

	layout.data_head_room =
		tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
	retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
					 DPNI_QUEUE_RX, &layout);
	if (retcode) {
		PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
			     retcode);
		return retcode;
	}

	/*Attach buffer pool to the network interface as described by the user*/
	bpool_cfg.num_dpbp = 1;
	bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
	bpool_cfg.pools[0].backup_pool = 0;
	bpool_cfg.pools[0].buffer_size =
		RTE_ALIGN_CEIL(bp_list->buf_pool.size,
			       256 /*DPAA2_PACKET_LAYOUT_ALIGN*/);

	retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
	if (retcode != 0) {
		PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
				" bpid = %d Error code = %d\n",
				bpool_cfg.pools[0].dpbp_id, retcode);
		return retcode;
	}

	priv->bp_list = bp_list;
	return 0;
}
コード例 #3
0
ファイル: dpaa2_hw_dpbp.c プロジェクト: Leon555/dpdk
int
dpaa2_create_dpbp_device(
		int dpbp_id)
{
	struct dpaa2_dpbp_dev *dpbp_node;
	int ret;

	if (!dpbp_dev_list) {
		dpbp_dev_list = malloc(sizeof(struct dpbp_device_list));
		if (!dpbp_dev_list) {
			PMD_INIT_LOG(ERR, "Memory alloc failed in DPBP list\n");
			return -1;
		}
		/* Initialize the DPBP List */
		TAILQ_INIT(dpbp_dev_list);
	}

	/* Allocate DPAA2 dpbp handle */
	dpbp_node = (struct dpaa2_dpbp_dev *)
			malloc(sizeof(struct dpaa2_dpbp_dev));
	if (!dpbp_node) {
		PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
		return -1;
	}

	/* Open the dpbp object */
	dpbp_node->dpbp.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
	ret = dpbp_open(&dpbp_node->dpbp,
			CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
	if (ret) {
		PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
			     ret);
		free(dpbp_node);
		return -1;
	}

	/* Clean the device first */
	ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
	if (ret) {
		PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
					" error code %d\n", ret);
		dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
		free(dpbp_node);
		return -1;
	}

	dpbp_node->dpbp_id = dpbp_id;
	rte_atomic16_init(&dpbp_node->in_use);

	TAILQ_INSERT_HEAD(dpbp_dev_list, dpbp_node, next);

	PMD_INIT_LOG(DEBUG, "Buffer pool resource initialized %d", dpbp_id);

	return 0;
}
コード例 #4
0
ファイル: vmxnet3_ethdev.c プロジェクト: olgk/dpdk.org
/*
 * Configure device link speed and setup link.
 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
 * It returns 0 on success.
 */
static int
vmxnet3_dev_start(struct rte_eth_dev *dev)
{
	int status, ret;
	struct vmxnet3_hw *hw = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	ret = vmxnet3_setup_driver_shared(dev);
	if (ret != VMXNET3_SUCCESS)
		return ret;

	/* Exchange shared data with device */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
			       VMXNET3_GET_ADDR_HI(hw->sharedPA));

	/* Activate device by register write */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
	status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);

	if (status != 0) {
		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
		return -1;
	}

	/* Disable interrupts */
	vmxnet3_disable_intr(hw);

	/*
	 * Load RX queues with blank mbufs and update next2fill index for device
	 * Update RxMode of the device
	 */
	ret = vmxnet3_dev_rxtx_init(dev);
	if (ret != VMXNET3_SUCCESS) {
		PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
		return ret;
	}

	/* Setting proper Rx Mode and issue Rx Mode Update command */
	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);

	/*
	 * Don't need to handle events for now
	 */
#if PROCESS_SYS_EVENTS == 1
	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
	PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
	vmxnet3_process_events(hw);
#endif
	return status;
}
コード例 #5
0
ファイル: vmxnet3_ethdev.c プロジェクト: olgk/dpdk.org
static void
vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
{
	uint32_t val;

	PMD_INIT_LOG(DEBUG,
		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
		     addr[0], addr[1], addr[2],
		     addr[3], addr[4], addr[5]);

	val = *(const uint32_t *)addr;
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);

	val = (addr[5] << 8) | addr[4];
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
}
コード例 #6
0
static int
i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
			    uint32_t opcode,
			    uint32_t retval,
			    uint8_t *msg,
			    uint16_t msglen)
{
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
	int ret;

	ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
						msg, msglen, NULL);
	if (ret) {
		PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
			     hw->aq.asq_last_status);
	}

	return ret;
}
コード例 #7
0
ファイル: vmxnet3_ethdev.c プロジェクト: olgk/dpdk.org
static int
vmxnet3_dev_configure(struct rte_eth_dev *dev)
{
	const struct rte_memzone *mz;
	struct vmxnet3_hw *hw = dev->data->dev_private;
	size_t size;

	PMD_INIT_FUNC_TRACE();

	if (dev->data->nb_rx_queues > UINT8_MAX ||
	    dev->data->nb_tx_queues > UINT8_MAX)
		return -EINVAL;

	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);

	if (size > UINT16_MAX)
		return -EINVAL;

	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;

	/*
	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
	 * on current socket
	 */
	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
			      "shared", rte_socket_id(), 8);

	if (mz == NULL) {
		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
		return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);

	hw->shared = mz->addr;
	hw->sharedPA = mz->phys_addr;

	/*
	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
	 * on current socket
	 */
	mz = gpa_zone_reserve(dev, size, "queuedesc",
			      rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
	if (mz == NULL) {
		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
		return -ENOMEM;
	}
	memset(mz->addr, 0, mz->len);

	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);

	hw->queueDescPA = mz->phys_addr;
	hw->queue_desc_len = (uint16_t)size;

	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {

		/* Allocate memory structure for UPT1_RSSConf and configure */
		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
				      rte_socket_id(), RTE_CACHE_LINE_SIZE);
		if (mz == NULL) {
			PMD_INIT_LOG(ERR,
				     "ERROR: Creating rss_conf structure zone");
			return -ENOMEM;
		}
		memset(mz->addr, 0, mz->len);

		hw->rss_conf = mz->addr;
		hw->rss_confPA = mz->phys_addr;
	}

	return 0;
}
コード例 #8
0
ファイル: vmxnet3_ethdev.c プロジェクト: olgk/dpdk.org
/*
 * It returns 0 on success.
 */
static int
eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
	uint32_t mac_hi, mac_lo, ver;

	PMD_INIT_FUNC_TRACE();

	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
	pci_dev = eth_dev->pci_dev;

	/*
	 * for secondary processes, we don't initialize any further as primary
	 * has already done this work.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	rte_eth_copy_pci_info(eth_dev, pci_dev);

	/* Vendor and Device ID need to be set before init of shared code */
	hw->device_id = pci_dev->id.device_id;
	hw->vendor_id = pci_dev->id.vendor_id;
	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;

	hw->num_rx_queues = 1;
	hw->num_tx_queues = 1;
	hw->bufs_per_pkt = 1;

	/* Check h/w version compatibility with driver. */
	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
	PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
	if (ver & 0x1)
		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
	else {
		PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
		return -EIO;
	}

	/* Check UPT version compatibility with driver. */
	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
	if (ver & 0x1)
		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
	else {
		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
		return -EIO;
	}

	/* Getting MAC Address */
	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
	memcpy(hw->perm_addr  , &mac_lo, 4);
	memcpy(hw->perm_addr+4, &mac_hi, 2);

	/* Allocate memory for storing MAC addresses */
	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
					       VMXNET3_MAX_MAC_ADDRS, 0);
	if (eth_dev->data->mac_addrs == NULL) {
		PMD_INIT_LOG(ERR,
			     "Failed to allocate %d bytes needed to store MAC addresses",
			     ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
		return -ENOMEM;
	}
	/* Copy the permanent MAC address */
	ether_addr_copy((struct ether_addr *) hw->perm_addr,
			&eth_dev->data->mac_addrs[0]);

	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);

	/* Put device in Quiesce Mode */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);

	/* allow untagged pkts */
	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);

	return 0;
}
コード例 #9
0
ファイル: qat_qp.c プロジェクト: 0day-ci/dpdk
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
	const struct rte_cryptodev_qp_conf *qp_conf,
	int socket_id)
{
	struct qat_qp *qp;
	int ret;

	PMD_INIT_FUNC_TRACE();

	/* If qp is already in use free ring memory and qp metadata. */
	if (dev->data->queue_pairs[queue_pair_id] != NULL) {
		ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
		if (ret < 0)
			return ret;
	}

	if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
		(qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
		PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
				qp_conf->nb_descriptors);
		return -EINVAL;
	}

	if (dev->pci_dev->mem_resource[0].addr == NULL) {
		PMD_DRV_LOG(ERR, "Could not find VF config space "
				"(UIO driver attached?).");
		return -EINVAL;
	}

	if (queue_pair_id >=
			(ADF_NUM_SYM_QPS_PER_BUNDLE *
					ADF_NUM_BUNDLES_PER_DEV)) {
		PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
				queue_pair_id);
		return -EINVAL;
	}

	/* Allocate the queue pair data structure. */
	qp = rte_zmalloc("qat PMD qp metadata",
			sizeof(*qp), RTE_CACHE_LINE_SIZE);
	if (qp == NULL) {
		PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
		return -ENOMEM;
	}
	qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
	rte_atomic16_init(&qp->inflights16);

	if (qat_tx_queue_create(dev, &(qp->tx_q),
		queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
		PMD_INIT_LOG(ERR, "Tx queue create failed "
				"queue_pair_id=%u", queue_pair_id);
		goto create_err;
	}

	if (qat_rx_queue_create(dev, &(qp->rx_q),
		queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
		PMD_DRV_LOG(ERR, "Rx queue create failed "
				"queue_pair_id=%hu", queue_pair_id);
		qat_queue_delete(&(qp->tx_q));
		goto create_err;
	}
	adf_configure_queues(qp);
	adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
	dev->data->queue_pairs[queue_pair_id] = qp;
	return 0;

create_err:
	rte_free(qp);
	return -EFAULT;
}
コード例 #10
0
ファイル: avf_ethdev.c プロジェクト: bisdn/dpdk-dev
static int
avf_dev_init(struct rte_eth_dev *eth_dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);

	PMD_INIT_FUNC_TRACE();

	/* assign ops func pointer */
	eth_dev->dev_ops = &avf_eth_dev_ops;
	eth_dev->rx_pkt_burst = &avf_recv_pkts;
	eth_dev->tx_pkt_burst = &avf_xmit_pkts;
	eth_dev->tx_pkt_prepare = &avf_prep_pkts;

	/* For secondary processes, we don't initialise any further as primary
	 * has already done this work. Only check if we need a different RX
	 * and TX function.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
		avf_set_rx_function(eth_dev);
		avf_set_tx_function(eth_dev);
		return 0;
	}
	rte_eth_copy_pci_info(eth_dev, pci_dev);

	hw->vendor_id = pci_dev->id.vendor_id;
	hw->device_id = pci_dev->id.device_id;
	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
	hw->bus.bus_id = pci_dev->addr.bus;
	hw->bus.device = pci_dev->addr.devid;
	hw->bus.func = pci_dev->addr.function;
	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
	hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
	adapter->eth_dev = eth_dev;

	if (avf_init_vf(eth_dev) != 0) {
		PMD_INIT_LOG(ERR, "Init vf failed");
		return -1;
	}

	/* copy mac addr */
	eth_dev->data->mac_addrs = rte_zmalloc(
					"avf_mac",
					ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
					0);
	if (!eth_dev->data->mac_addrs) {
		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
			     " store MAC addresses",
			     ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
		return -ENOMEM;
	}
	/* If the MAC address is not configured by host,
	 * generate a random one.
	 */
	if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
		eth_random_addr(hw->mac.addr);
	ether_addr_copy((struct ether_addr *)hw->mac.addr,
			&eth_dev->data->mac_addrs[0]);

	/* register callback func to eal lib */
	rte_intr_callback_register(&pci_dev->intr_handle,
				   avf_dev_interrupt_handler,
				   (void *)eth_dev);

	/* enable uio intr after callback register */
	rte_intr_enable(&pci_dev->intr_handle);

	/* configure and enable device interrupt */
	avf_enable_irq0(hw);

	return 0;
}
コード例 #11
0
ファイル: virtio_rxtx.c プロジェクト: P4-vSwitch/dpdk
static void
virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
{
	struct rte_mbuf *m;
	int i, nbufs, error, size = vq->vq_nentries;
	struct vring *vr = &vq->vq_ring;
	uint8_t *ring_mem = vq->vq_ring_virt_mem;

	PMD_INIT_FUNC_TRACE();

	/*
	 * Reinitialise since virtio port might have been stopped and restarted
	 */
	memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
	vq->vq_used_cons_idx = 0;
	vq->vq_desc_head_idx = 0;
	vq->vq_avail_idx = 0;
	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
	vq->vq_free_cnt = vq->vq_nentries;
	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);

	/* Chain all the descriptors in the ring with an END */
	for (i = 0; i < size - 1; i++)
		vr->desc[i].next = (uint16_t)(i + 1);
	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;

	/*
	 * Disable device(host) interrupting guest
	 */
	virtqueue_disable_intr(vq);

	/* Only rx virtqueue needs mbufs to be allocated at initialization */
	if (queue_type == VTNET_RQ) {
		if (vq->mpool == NULL)
			rte_exit(EXIT_FAILURE,
			"Cannot allocate initial mbufs for rx virtqueue");

		/* Allocate blank mbufs for the each rx descriptor */
		nbufs = 0;
		error = ENOSPC;
		while (!virtqueue_full(vq)) {
			m = rte_rxmbuf_alloc(vq->mpool);
			if (m == NULL)
				break;

			/******************************************
			*         Enqueue allocated buffers        *
			*******************************************/
			error = virtqueue_enqueue_recv_refill(vq, m);

			if (error) {
				rte_pktmbuf_free(m);
				break;
			}
			nbufs++;
		}

		vq_update_avail_idx(vq);

		PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);

		VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
			vq->vq_queue_index);
		VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
			vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
	} else if (queue_type == VTNET_TQ) {
コード例 #12
0
ファイル: bnx2x_rxtx.c プロジェクト: 0day-ci/dpdk
int
bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
		       uint16_t queue_idx,
		       uint16_t nb_desc,
		       unsigned int socket_id,
		       const struct rte_eth_rxconf *rx_conf,
		       struct rte_mempool *mp)
{
	uint16_t j, idx;
	const struct rte_memzone *dma;
	struct bnx2x_rx_queue *rxq;
	uint32_t dma_size;
	struct rte_mbuf *mbuf;
	struct bnx2x_softc *sc = dev->data->dev_private;
	struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
	struct eth_rx_cqe_next_page *nextpg;
	phys_addr_t *rx_bd;
	phys_addr_t busaddr;

	/* First allocate the rx queue data structure */
	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
				 RTE_CACHE_LINE_SIZE, socket_id);
	if (NULL == rxq) {
		PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
		return -ENOMEM;
	}
	rxq->sc = sc;
	rxq->mb_pool = mp;
	rxq->queue_id = queue_idx;
	rxq->port_id = dev->data->port_id;
	rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);

	rxq->nb_rx_pages = 1;
	while (USABLE_RX_BD(rxq) < nb_desc)
		rxq->nb_rx_pages <<= 1;

	rxq->nb_rx_desc  = TOTAL_RX_BD(rxq);
	sc->rx_ring_size = USABLE_RX_BD(rxq);
	rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);

	rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
		rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;

	PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
		       "total_bd=%lu, rx_pages=%u, cq_pages=%u",
		       queue_idx, nb_desc, rxq->rx_free_thresh,
		       (unsigned long)USABLE_RX_BD(rxq),
		       (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
		       rxq->nb_cq_pages);

	/* Allocate RX ring hardware descriptors */
	dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
	dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
	if (NULL == dma) {
		PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
		bnx2x_rx_queue_release(rxq);
		return -ENOMEM;
	}
	fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
	rxq->rx_ring = (uint64_t*)dma->addr;
	memset((void *)rxq->rx_ring, 0, dma_size);

	/* Link the RX chain pages. */
	for (j = 1; j <= rxq->nb_rx_pages; j++) {
		rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
		busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
		*rx_bd = busaddr;
	}

	/* Allocate software ring */
	dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
	rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
					  RTE_CACHE_LINE_SIZE,
					  socket_id);
	if (NULL == rxq->sw_ring) {
		PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
		bnx2x_rx_queue_release(rxq);
		return -ENOMEM;
	}

	/* Initialize software ring entries */
	rxq->rx_mbuf_alloc = 0;
	for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
		mbuf = bnx2x_rxmbuf_alloc(mp);
		if (NULL == mbuf) {
			PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
				   (unsigned)rxq->queue_id, idx);
			bnx2x_rx_queue_release(rxq);
			return -ENOMEM;
		}
		rxq->sw_ring[idx] = mbuf;
		rxq->rx_ring[idx] = mbuf->buf_physaddr;
		rxq->rx_mbuf_alloc++;
	}
	rxq->pkt_first_seg = NULL;
	rxq->pkt_last_seg = NULL;
	rxq->rx_bd_head = 0;
	rxq->rx_bd_tail = rxq->nb_rx_desc;

	/* Allocate CQ chain. */
	dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
	dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
	if (NULL == dma) {
		PMD_RX_LOG(ERR, "RCQ  alloc failed");
		return -ENOMEM;
	}
	fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
	rxq->cq_ring = (union eth_rx_cqe*)dma->addr;

	/* Link the CQ chain pages. */
	for (j = 1; j <= rxq->nb_cq_pages; j++) {
		nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
		busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
		nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
		nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
	}
	rxq->rx_cq_head = 0;
	rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);

	dev->data->rx_queues[queue_idx] = rxq;
	if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;

	return 0;
}
コード例 #13
0
ファイル: bnx2x_rxtx.c プロジェクト: 0day-ci/dpdk
int
bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
		       uint16_t queue_idx,
		       uint16_t nb_desc,
		       unsigned int socket_id,
		       const struct rte_eth_txconf *tx_conf)
{
	uint16_t i;
	unsigned int tsize;
	const struct rte_memzone *tz;
	struct bnx2x_tx_queue *txq;
	struct eth_tx_next_bd *tx_n_bd;
	uint64_t busaddr;
	struct bnx2x_softc *sc = dev->data->dev_private;
	struct bnx2x_fastpath *fp = &sc->fp[queue_idx];

	/* First allocate the tx queue data structure */
	txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
			  RTE_CACHE_LINE_SIZE);
	if (txq == NULL)
		return -ENOMEM;
	txq->sc = sc;

	txq->nb_tx_pages = 1;
	while (USABLE_TX_BD(txq) < nb_desc)
		txq->nb_tx_pages <<= 1;

	txq->nb_tx_desc  = TOTAL_TX_BD(txq);
	sc->tx_ring_size = TOTAL_TX_BD(txq);

	txq->tx_free_thresh = tx_conf->tx_free_thresh ?
		tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;

	PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
		     "total_bd=%lu, tx_pages=%u",
		     queue_idx, nb_desc, txq->tx_free_thresh,
		     (unsigned long)USABLE_TX_BD(txq),
		     (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages);

	/* Allocate TX ring hardware descriptors */
	tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
	tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
	if (tz == NULL) {
		bnx2x_tx_queue_release(txq);
		return -ENOMEM;
	}
	fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
	txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
	memset(txq->tx_ring, 0, tsize);

	/* Allocate software ring */
	tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
	txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
				   RTE_CACHE_LINE_SIZE);
	if (txq->sw_ring == NULL) {
		bnx2x_tx_queue_release(txq);
		return -ENOMEM;
	}

	/* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
	   txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */

	/* Link TX pages */
	for (i = 1; i <= txq->nb_tx_pages; i++) {
		tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
		busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
		tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
		tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
		/* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
	}

	txq->queue_id = queue_idx;
	txq->port_id = dev->data->port_id;
	txq->tx_pkt_tail = 0;
	txq->tx_pkt_head = 0;
	txq->tx_bd_tail = 0;
	txq->tx_bd_head = 0;
	txq->nb_tx_avail = txq->nb_tx_desc;
	dev->tx_pkt_burst = bnx2x_xmit_pkts;
	dev->data->tx_queues[queue_idx] = txq;
	if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;

	return 0;
}
コード例 #14
0
ファイル: avf_ethdev.c プロジェクト: bisdn/dpdk-dev
static void
avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	memset(dev_info, 0, sizeof(*dev_info));
	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
	dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
	dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
	dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
	dev_info->hash_key_size = vf->vf_res->rss_key_size;
	dev_info->reta_size = vf->vf_res->rss_lut_size;
	dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
	dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
	dev_info->rx_offload_capa =
		DEV_RX_OFFLOAD_VLAN_STRIP |
		DEV_RX_OFFLOAD_IPV4_CKSUM |
		DEV_RX_OFFLOAD_UDP_CKSUM |
		DEV_RX_OFFLOAD_TCP_CKSUM;
	dev_info->tx_offload_capa =
		DEV_TX_OFFLOAD_VLAN_INSERT |
		DEV_TX_OFFLOAD_IPV4_CKSUM |
		DEV_TX_OFFLOAD_UDP_CKSUM |
		DEV_TX_OFFLOAD_TCP_CKSUM |
		DEV_TX_OFFLOAD_SCTP_CKSUM |
		DEV_TX_OFFLOAD_TCP_TSO;

	dev_info->default_rxconf = (struct rte_eth_rxconf) {
		.rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
		.rx_drop_en = 0,
	};

	dev_info->default_txconf = (struct rte_eth_txconf) {
		.tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
		.tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
				ETH_TXQ_FLAGS_NOOFFLOADS,
	};

	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = AVF_MAX_RING_DESC,
		.nb_min = AVF_MIN_RING_DESC,
		.nb_align = AVF_ALIGN_RING_DESC,
	};

	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = AVF_MAX_RING_DESC,
		.nb_min = AVF_MIN_RING_DESC,
		.nb_align = AVF_ALIGN_RING_DESC,
	};
}

static const uint32_t *
avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_ICMP,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_L4_SCTP,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_UNKNOWN
	};
	return ptypes;
}

int
avf_dev_link_update(struct rte_eth_dev *dev,
		    __rte_unused int wait_to_complete)
{
	struct rte_eth_link new_link;
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	/* Only read status info stored in VF, and the info is updated
	 *  when receive LINK_CHANGE evnet from PF by Virtchnnl.
	 */
	switch (vf->link_speed) {
	case VIRTCHNL_LINK_SPEED_100MB:
		new_link.link_speed = ETH_SPEED_NUM_100M;
		break;
	case VIRTCHNL_LINK_SPEED_1GB:
		new_link.link_speed = ETH_SPEED_NUM_1G;
		break;
	case VIRTCHNL_LINK_SPEED_10GB:
		new_link.link_speed = ETH_SPEED_NUM_10G;
		break;
	case VIRTCHNL_LINK_SPEED_20GB:
		new_link.link_speed = ETH_SPEED_NUM_20G;
		break;
	case VIRTCHNL_LINK_SPEED_25GB:
		new_link.link_speed = ETH_SPEED_NUM_25G;
		break;
	case VIRTCHNL_LINK_SPEED_40GB:
		new_link.link_speed = ETH_SPEED_NUM_40G;
		break;
	default:
		new_link.link_speed = ETH_SPEED_NUM_NONE;
		break;
	}

	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	new_link.link_status = vf->link_up ? ETH_LINK_UP :
					     ETH_LINK_DOWN;
	new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
				ETH_LINK_SPEED_FIXED);

	if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
				*(uint64_t *)&dev->data->dev_link,
				*(uint64_t *)&new_link) == 0)
		return -1;

	return 0;
}

static void
avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (vf->promisc_unicast_enabled)
		return;

	ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
	if (!ret)
		vf->promisc_unicast_enabled = TRUE;
}

static void
avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (!vf->promisc_unicast_enabled)
		return;

	ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
	if (!ret)
		vf->promisc_unicast_enabled = FALSE;
}

static void
avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (vf->promisc_multicast_enabled)
		return;

	ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
	if (!ret)
		vf->promisc_multicast_enabled = TRUE;
}

static void
avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int ret;

	if (!vf->promisc_multicast_enabled)
		return;

	ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
	if (!ret)
		vf->promisc_multicast_enabled = FALSE;
}

static int
avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
		     __rte_unused uint32_t index,
		     __rte_unused uint32_t pool)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int err;

	if (is_zero_ether_addr(addr)) {
		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
		return -EINVAL;
	}

	err = avf_add_del_eth_addr(adapter, addr, TRUE);
	if (err) {
		PMD_DRV_LOG(ERR, "fail to add MAC address");
		return -EIO;
	}

	vf->mac_num++;

	return 0;
}

static void
avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	struct ether_addr *addr;
	int err;

	addr = &dev->data->mac_addrs[index];

	err = avf_add_del_eth_addr(adapter, addr, FALSE);
	if (err)
		PMD_DRV_LOG(ERR, "fail to delete MAC address");

	vf->mac_num--;
}

static int
avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	int err;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
		return -ENOTSUP;

	err = avf_add_del_vlan(adapter, vlan_id, on);
	if (err)
		return -EIO;
	return 0;
}

static int
avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
	int err;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
		return -ENOTSUP;

	/* Vlan stripping setting */
	if (mask & ETH_VLAN_STRIP_MASK) {
		/* Enable or disable VLAN stripping */
		if (dev_conf->rxmode.hw_vlan_strip)
			err = avf_enable_vlan_strip(adapter);
		else
			err = avf_disable_vlan_strip(adapter);

		if (err)
			return -EIO;
	}
	return 0;
}

static int
avf_dev_rss_reta_update(struct rte_eth_dev *dev,
			struct rte_eth_rss_reta_entry64 *reta_conf,
			uint16_t reta_size)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	uint8_t *lut;
	uint16_t i, idx, shift;
	int ret;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	if (reta_size != vf->vf_res->rss_lut_size) {
		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
			"(%d) doesn't match the number of hardware can "
			"support (%d)", reta_size, vf->vf_res->rss_lut_size);
		return -EINVAL;
	}

	lut = rte_zmalloc("rss_lut", reta_size, 0);
	if (!lut) {
		PMD_DRV_LOG(ERR, "No memory can be allocated");
		return -ENOMEM;
	}
	/* store the old lut table temporarily */
	rte_memcpy(lut, vf->rss_lut, reta_size);

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			lut[i] = reta_conf[idx].reta[shift];
	}

	rte_memcpy(vf->rss_lut, lut, reta_size);
	/* send virtchnnl ops to configure rss*/
	ret = avf_configure_rss_lut(adapter);
	if (ret) /* revert back */
		rte_memcpy(vf->rss_lut, lut, reta_size);
	rte_free(lut);

	return ret;
}

static int
avf_dev_rss_reta_query(struct rte_eth_dev *dev,
		       struct rte_eth_rss_reta_entry64 *reta_conf,
		       uint16_t reta_size)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
	uint16_t i, idx, shift;

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	if (reta_size != vf->vf_res->rss_lut_size) {
		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
			"(%d) doesn't match the number of hardware can "
			"support (%d)", reta_size, vf->vf_res->rss_lut_size);
		return -EINVAL;
	}

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			reta_conf[idx].reta[shift] = vf->rss_lut[i];
	}

	return 0;
}

static int
avf_dev_rss_hash_update(struct rte_eth_dev *dev,
			struct rte_eth_rss_conf *rss_conf)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	/* HENA setting, it is enabled by default, no change */
	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
		PMD_DRV_LOG(DEBUG, "No key to be configured");
		return 0;
	} else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
		PMD_DRV_LOG(ERR, "The size of hash key configured "
			"(%d) doesn't match the size of hardware can "
			"support (%d)", rss_conf->rss_key_len,
			vf->vf_res->rss_key_size);
		return -EINVAL;
	}

	rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);

	return avf_configure_rss_key(adapter);
}

static int
avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
			  struct rte_eth_rss_conf *rss_conf)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);

	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
		return -ENOTSUP;

	 /* Just set it to default value now. */
	rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;

	if (!rss_conf->rss_key)
		return 0;

	rss_conf->rss_key_len = vf->vf_res->rss_key_size;
	rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);

	return 0;
}

static int
avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
	uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
	int ret = 0;

	if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
		return -EINVAL;

	/* mtu setting is forbidden if port is start */
	if (dev->data->dev_started) {
		PMD_DRV_LOG(ERR, "port must be stopped before configuration");
		return -EBUSY;
	}

	if (frame_size > ETHER_MAX_LEN)
		dev->data->dev_conf.rxmode.offloads |=
				DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		dev->data->dev_conf.rxmode.offloads &=
				~DEV_RX_OFFLOAD_JUMBO_FRAME;

	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;

	return ret;
}

static void
avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
			     struct ether_addr *mac_addr)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	struct ether_addr *perm_addr, *old_addr;
	int ret;

	old_addr = (struct ether_addr *)hw->mac.addr;
	perm_addr = (struct ether_addr *)hw->mac.perm_addr;

	if (is_same_ether_addr(mac_addr, old_addr))
		return;

	/* If the MAC address is configured by host, skip the setting */
	if (is_valid_assigned_ether_addr(perm_addr))
		return;

	ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
	if (ret)
		PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
			    " %02X:%02X:%02X:%02X:%02X:%02X",
			    old_addr->addr_bytes[0],
			    old_addr->addr_bytes[1],
			    old_addr->addr_bytes[2],
			    old_addr->addr_bytes[3],
			    old_addr->addr_bytes[4],
			    old_addr->addr_bytes[5]);

	ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
	if (ret)
		PMD_DRV_LOG(ERR, "Fail to add new MAC:"
			    " %02X:%02X:%02X:%02X:%02X:%02X",
			    mac_addr->addr_bytes[0],
			    mac_addr->addr_bytes[1],
			    mac_addr->addr_bytes[2],
			    mac_addr->addr_bytes[3],
			    mac_addr->addr_bytes[4],
			    mac_addr->addr_bytes[5]);

	ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
}

static int
avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct virtchnl_eth_stats *pstats = NULL;
	int ret;

	ret = avf_query_stats(adapter, &pstats);
	if (ret == 0) {
		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
						pstats->rx_broadcast;
		stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
						pstats->tx_unicast;
		stats->imissed = pstats->rx_discards;
		stats->oerrors = pstats->tx_errors + pstats->tx_discards;
		stats->ibytes = pstats->rx_bytes;
		stats->obytes = pstats->tx_bytes;
	} else {
		PMD_DRV_LOG(ERR, "Get statistics failed");
	}
	return -EIO;
}

static int
avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
	uint16_t msix_intr;

	msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
	if (msix_intr == AVF_MISC_VEC_ID) {
		PMD_DRV_LOG(INFO, "MISC is also enabled for control");
		AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
			      AVFINT_DYN_CTL01_INTENA_MASK |
			      AVFINT_DYN_CTL01_ITR_INDX_MASK);
	} else {
		AVF_WRITE_REG(hw,
			      AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
			      AVFINT_DYN_CTLN1_INTENA_MASK |
			      AVFINT_DYN_CTLN1_ITR_INDX_MASK);
	}

	AVF_WRITE_FLUSH(hw);

	rte_intr_enable(&pci_dev->intr_handle);

	return 0;
}

static int
avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	uint16_t msix_intr;

	msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
	if (msix_intr == AVF_MISC_VEC_ID) {
		PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
		return -EIO;
	}

	AVF_WRITE_REG(hw,
		      AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
		      0);

	AVF_WRITE_FLUSH(hw);
	return 0;
}

static int
avf_check_vf_reset_done(struct avf_hw *hw)
{
	int i, reset;

	for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
		reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
			AVFGEN_RSTAT_VFR_STATE_MASK;
		reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
		if (reset == VIRTCHNL_VFR_VFACTIVE ||
		    reset == VIRTCHNL_VFR_COMPLETED)
			break;
		rte_delay_ms(20);
	}

	if (i >= AVF_RESET_WAIT_CNT)
		return -1;

	return 0;
}

static int
avf_init_vf(struct rte_eth_dev *dev)
{
	int i, err, bufsz;
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);

	err = avf_set_mac_type(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
		goto err;
	}

	err = avf_check_vf_reset_done(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "VF is still resetting");
		goto err;
	}

	avf_init_adminq_parameter(hw);
	err = avf_init_adminq(hw);
	if (err) {
		PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
		goto err;
	}

	vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
	if (!vf->aq_resp) {
		PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
		goto err_aq;
	}
	if (avf_check_api_version(adapter) != 0) {
		PMD_INIT_LOG(ERR, "check_api version failed");
		goto err_api;
	}

	bufsz = sizeof(struct virtchnl_vf_resource) +
		(AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
	vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
	if (!vf->vf_res) {
		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
		goto err_api;
	}
	if (avf_get_vf_resource(adapter) != 0) {
		PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
		goto err_alloc;
	}
	/* Allocate memort for RSS info */
	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
		vf->rss_key = rte_zmalloc("rss_key",
					  vf->vf_res->rss_key_size, 0);
		if (!vf->rss_key) {
			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
			goto err_rss;
		}
		vf->rss_lut = rte_zmalloc("rss_lut",
					  vf->vf_res->rss_lut_size, 0);
		if (!vf->rss_lut) {
			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
			goto err_rss;
		}
	}
	return 0;
err_rss:
	rte_free(vf->rss_key);
	rte_free(vf->rss_lut);
err_alloc:
	rte_free(vf->vf_res);
	vf->vsi_res = NULL;
err_api:
	rte_free(vf->aq_resp);
err_aq:
	avf_shutdown_adminq(hw);
err:
	return -1;
}
コード例 #15
0
ファイル: enic_ethdev.c プロジェクト: btw616/dpdk
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
	struct rte_eth_dev_info *device_info)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
	device_info->max_rx_queues = enic->conf_rq_count / 2;
	device_info->max_tx_queues = enic->conf_wq_count;
	device_info->min_rx_bufsize = ENIC_MIN_MTU;
	/* "Max" mtu is not a typo. HW receives packet sizes up to the
	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
	 * a hint to the driver to size receive buffers accordingly so that
	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
	 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
	 * ignoring vNIC mtu.
	 */
	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
	device_info->rx_offload_capa = enic->rx_offload_capa;
	device_info->tx_offload_capa = enic->tx_offload_capa;
	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
	device_info->default_rxconf = (struct rte_eth_rxconf) {
		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
	};
	device_info->reta_size = enic->reta_size;
	device_info->hash_key_size = enic->hash_key_size;
	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.rq_desc_count,
		.nb_min = ENIC_MIN_RQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
	};
	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.wq_desc_count,
		.nb_min = ENIC_MIN_WQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
		.nb_seg_max = ENIC_TX_XMIT_MAX,
		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
	};
	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_RX_BURST,
		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
			ENIC_DEFAULT_RX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_RX_RINGS,
	};
	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_TX_BURST,
		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
			ENIC_DEFAULT_TX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_TX_RINGS,
	};
}

static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};
	static const uint32_t ptypes_overlay[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_TUNNEL_GRENAT,
		RTE_PTYPE_INNER_L2_ETHER,
		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L4_TCP,
		RTE_PTYPE_INNER_L4_UDP,
		RTE_PTYPE_INNER_L4_FRAG,
		RTE_PTYPE_INNER_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};

	if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
	    dev->rx_pkt_burst != NULL) {
		struct enic *enic = pmd_priv(dev);
		if (enic->overlay_offload)
			return ptypes_overlay;
		else
			return ptypes;
	}
	return NULL;
}

static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();

	enic->promisc = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->promisc = 0;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 0;
	enic_add_packet_filter(enic);
}

static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
	struct ether_addr *mac_addr,
	__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	return enic_set_mac_address(enic, mac_addr->addr_bytes);
}

static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	if (enic_del_mac_address(enic, index))
		dev_err(enic, "del mac addr failed\n");
}

static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
				struct ether_addr *addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	ret = enic_del_mac_address(enic, 0);
	if (ret)
		return ret;
	return enic_set_mac_address(enic, addr->addr_bytes);
}

static void debug_log_add_del_addr(struct ether_addr *addr, bool add)
{
	char mac_str[ETHER_ADDR_FMT_SIZE];

	ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
	PMD_INIT_LOG(DEBUG, " %s address %s\n",
		     add ? "add" : "remove", mac_str);
}

static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
				    struct ether_addr *mc_addr_set,
				    uint32_t nb_mc_addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	char mac_str[ETHER_ADDR_FMT_SIZE];
	struct ether_addr *addr;
	uint32_t i, j;
	int ret;

	ENICPMD_FUNC_TRACE();

	/* Validate the given addresses first */
	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
		addr = &mc_addr_set[i];
		if (!is_multicast_ether_addr(addr) ||
		    is_broadcast_ether_addr(addr)) {
			ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
			PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
				     mac_str);
			return -EINVAL;
		}
	}

	/* Flush all if requested */
	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
		PMD_INIT_LOG(DEBUG, " flush multicast addresses\n");
		for (i = 0; i < enic->mc_count; i++) {
			addr = &enic->mc_addrs[i];
			debug_log_add_del_addr(addr, false);
			ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
			if (ret)
				return ret;
		}
		enic->mc_count = 0;
		return 0;
	}

	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
		PMD_INIT_LOG(ERR, " too many multicast addresses: max=%d\n",
			     ENIC_MULTICAST_PERFECT_FILTERS);
		return -ENOSPC;
	}
	/*
	 * devcmd is slow, so apply the difference instead of flushing and
	 * adding everything.
	 * 1. Delete addresses on the NIC but not on the host
	 */
	for (i = 0; i < enic->mc_count; i++) {
		addr = &enic->mc_addrs[i];
		for (j = 0; j < nb_mc_addr; j++) {
			if (is_same_ether_addr(addr, &mc_addr_set[j]))
				break;
		}
		if (j < nb_mc_addr)
			continue;
		debug_log_add_del_addr(addr, false);
		ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* 2. Add addresses on the host but not on the NIC */
	for (i = 0; i < nb_mc_addr; i++) {
		addr = &mc_addr_set[i];
		for (j = 0; j < enic->mc_count; j++) {
			if (is_same_ether_addr(addr, &enic->mc_addrs[j]))
				break;
		}
		if (j < enic->mc_count)
			continue;
		debug_log_add_del_addr(addr, true);
		ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* Keep a copy so we can flush/apply later on.. */
	memcpy(enic->mc_addrs, mc_addr_set,
	       nb_mc_addr * sizeof(struct ether_addr));
	enic->mc_count = nb_mc_addr;
	return 0;
}

static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_mtu(enic, mtu);
}

static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
				      struct rte_eth_rss_reta_entry64
				      *reta_conf,
				      uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
				enic->rss_cpu.cpu[i / 4].b[i % 4]);
	}

	return 0;
}

static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_reta_entry64
				       *reta_conf,
				       uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	union vnic_rss_cpu rss_cpu;
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_update: wrong reta_size. given=%u"
			" expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}
	/*
	 * Start with the current reta and modify it per reta_conf, as we
	 * need to push the entire reta even if we only modify one entry.
	 */
	rss_cpu = enic->rss_cpu;
	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			rss_cpu.cpu[i / 4].b[i % 4] =
				enic_rte_rq_idx_to_sop_idx(
					reta_conf[idx].reta[shift]);
	}
	return enic_set_rss_reta(enic, &rss_cpu);
}

static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_rss_conf(enic, rss_conf);
}

static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
					 struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	if (rss_conf == NULL)
		return -EINVAL;
	if (rss_conf->rss_key != NULL &&
	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
			" expected=%u+\n",
			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
		return -EINVAL;
	}
	rss_conf->rss_hf = enic->rss_hf;
	if (rss_conf->rss_key != NULL) {
		int i;
		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
			rss_conf->rss_key[i] =
				enic->rss_key.key[i / 10].b[i % 10];
		}
		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
	}
	return 0;
}

static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
				     uint16_t rx_queue_id,
				     struct rte_eth_rxq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_rq *rq_sop;
	struct vnic_rq *rq_data;
	struct rte_eth_rxconf *conf;
	uint16_t sop_queue_idx;
	uint16_t data_queue_idx;

	ENICPMD_FUNC_TRACE();
	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
	rq_sop = &enic->rq[sop_queue_idx];
	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
	qinfo->mp = rq_sop->mp;
	qinfo->scattered_rx = rq_sop->data_queue_enable;
	qinfo->nb_desc = rq_sop->ring.desc_count;
	if (qinfo->scattered_rx)
		qinfo->nb_desc += rq_data->ring.desc_count;
	conf = &qinfo->conf;
	memset(conf, 0, sizeof(*conf));
	conf->rx_free_thresh = rq_sop->rx_free_thresh;
	conf->rx_drop_en = 1;
	/*
	 * Except VLAN stripping (port setting), all the checksum offloads
	 * are always enabled.
	 */
	conf->offloads = enic->rx_offload_capa;
	if (!enic->ig_vlan_strip_en)
		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
	/* rx_thresh and other fields are not applicable for enic */
}

static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
				     uint16_t tx_queue_id,
				     struct rte_eth_txq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_wq *wq = &enic->wq[tx_queue_id];

	ENICPMD_FUNC_TRACE();
	qinfo->nb_desc = wq->ring.desc_count;
	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
	qinfo->conf.offloads = wq->offloads;
	/* tx_thresh, and all the other fields are not applicable for enic */
}

static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
					    uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
					     uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int udp_tunnel_common_check(struct enic *enic,
				   struct rte_eth_udp_tunnel *tnl)
{
	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
		return -ENOTSUP;
	if (!enic->overlay_offload) {
		PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
			     "supported\n");
		return -ENOTSUP;
	}
	return 0;
}

static int update_vxlan_port(struct enic *enic, uint16_t port)
{
	if (vnic_dev_overlay_offload_cfg(enic->vdev,
					 OVERLAY_CFG_VXLAN_PORT_UPDATE,
					 port)) {
		PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
		return -EINVAL;
	}
	PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
	enic->vxlan_port = port;
	return 0;
}

static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
	 * number replaces it.
	 */
	if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
		PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, tnl->udp_port);
}

static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * Clear the previously set port number and restore the
	 * hardware default port number. Some drivers disable VXLAN
	 * offloads when there are no configured port numbers. But
	 * enic does not do that as VXLAN is part of overlay offload,
	 * which is tied to inner RSS and TSO.
	 */
	if (tnl->udp_port != enic->vxlan_port) {
		PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
}

static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
				      char *fw_version, size_t fw_size)
{
	struct vnic_devcmd_fw_info *info;
	struct enic *enic;
	int ret;

	ENICPMD_FUNC_TRACE();
	if (fw_version == NULL || fw_size <= 0)
		return -EINVAL;
	enic = pmd_priv(eth_dev);
	ret = vnic_dev_fw_info(enic->vdev, &info);
	if (ret)
		return ret;
	snprintf(fw_version, fw_size, "%s %s",
		 info->fw_version, info->fw_build);
	fw_version[fw_size - 1] = '\0';
	return 0;
}

static const struct eth_dev_ops enicpmd_eth_dev_ops = {
	.dev_configure        = enicpmd_dev_configure,
	.dev_start            = enicpmd_dev_start,
	.dev_stop             = enicpmd_dev_stop,
	.dev_set_link_up      = NULL,
	.dev_set_link_down    = NULL,
	.dev_close            = enicpmd_dev_close,
	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
	.link_update          = enicpmd_dev_link_update,
	.stats_get            = enicpmd_dev_stats_get,
	.stats_reset          = enicpmd_dev_stats_reset,
	.queue_stats_mapping_set = NULL,
	.dev_infos_get        = enicpmd_dev_info_get,
	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
	.mtu_set              = enicpmd_mtu_set,
	.vlan_filter_set      = NULL,
	.vlan_tpid_set        = NULL,
	.vlan_offload_set     = enicpmd_vlan_offload_set,
	.vlan_strip_queue_set = NULL,
	.rx_queue_start       = enicpmd_dev_rx_queue_start,
	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
	.tx_queue_start       = enicpmd_dev_tx_queue_start,
	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
	.rx_queue_release     = enicpmd_dev_rx_queue_release,
	.rx_queue_count       = enicpmd_dev_rx_queue_count,
	.rx_descriptor_done   = NULL,
	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
	.tx_queue_release     = enicpmd_dev_tx_queue_release,
	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
	.rxq_info_get         = enicpmd_dev_rxq_info_get,
	.txq_info_get         = enicpmd_dev_txq_info_get,
	.dev_led_on           = NULL,
	.dev_led_off          = NULL,
	.flow_ctrl_get        = NULL,
	.flow_ctrl_set        = NULL,
	.priority_flow_ctrl_set = NULL,
	.mac_addr_add         = enicpmd_add_mac_addr,
	.mac_addr_remove      = enicpmd_remove_mac_addr,
	.mac_addr_set         = enicpmd_set_mac_addr,
	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
	.filter_ctrl          = enicpmd_dev_filter_ctrl,
	.reta_query           = enicpmd_dev_rss_reta_query,
	.reta_update          = enicpmd_dev_rss_reta_update,
	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
	.rss_hash_update      = enicpmd_dev_rss_hash_update,
	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
	.fw_version_get       = enicpmd_dev_fw_version_get,
};

static int enic_parse_zero_one(const char *key,
			       const char *value,
			       void *opaque)
{
	struct enic *enic;
	bool b;

	enic = (struct enic *)opaque;
	if (strcmp(value, "0") == 0) {
		b = false;
	} else if (strcmp(value, "1") == 0) {
		b = true;
	} else {
		dev_err(enic, "Invalid value for %s"
			": expected=0|1 given=%s\n", key, value);
		return -EINVAL;
	}
	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
		enic->disable_overlay = b;
	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
		enic->enable_avx2_rx = b;
	return 0;
}

static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
				      const char *value,
				      void *opaque)
{
	struct enic *enic;

	enic = (struct enic *)opaque;
	if (strcmp(value, "trunk") == 0) {
		/* Trunk mode: always tag */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
	} else if (strcmp(value, "untag") == 0) {
		/* Untag default VLAN mode: untag if VLAN = default VLAN */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
	} else if (strcmp(value, "priority") == 0) {
		/*
		 * Priority-tag default VLAN mode: priority tag (VLAN header
		 * with ID=0) if VLAN = default
		 */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
	} else if (strcmp(value, "pass") == 0) {
		/* Pass through mode: do not touch tags */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	} else {
		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
			": expected=trunk|untag|priority|pass given=%s\n",
			value);
		return -EINVAL;
	}
	return 0;
}

static int enic_check_devargs(struct rte_eth_dev *dev)
{
	static const char *const valid_keys[] = {
		ENIC_DEVARG_DISABLE_OVERLAY,
		ENIC_DEVARG_ENABLE_AVX2_RX,
		ENIC_DEVARG_IG_VLAN_REWRITE,
		NULL};
	struct enic *enic = pmd_priv(dev);
	struct rte_kvargs *kvlist;

	ENICPMD_FUNC_TRACE();

	enic->disable_overlay = false;
	enic->enable_avx2_rx = false;
	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	if (!dev->device->devargs)
		return 0;
	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
	if (!kvlist)
		return -EINVAL;
	if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
			       enic_parse_ig_vlan_rewrite, enic) < 0) {
		rte_kvargs_free(kvlist);
		return -EINVAL;
	}
	rte_kvargs_free(kvlist);
	return 0;
}

/* Initialize the driver
 * It returns 0 on success.
 */
static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pdev;
	struct rte_pci_addr *addr;
	struct enic *enic = pmd_priv(eth_dev);
	int err;

	ENICPMD_FUNC_TRACE();

	enic->port_id = eth_dev->data->port_id;
	enic->rte_dev = eth_dev;
	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
	eth_dev->rx_pkt_burst = &enic_recv_pkts;
	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
	/* Let rte_eth_dev_close() release the port resources */
	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;

	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
	rte_eth_copy_pci_info(eth_dev, pdev);
	enic->pdev = pdev;
	addr = &pdev->addr;

	snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
		addr->domain, addr->bus, addr->devid, addr->function);

	err = enic_check_devargs(eth_dev);
	if (err)
		return err;
	return enic_probe(enic);
}

static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
	struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
		eth_enicpmd_dev_init);
}

static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}

static struct rte_pci_driver rte_enic_pmd = {
	.id_table = pci_id_enic_map,
	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
		     RTE_PCI_DRV_IOVA_AS_VA,
	.probe = eth_enic_pci_probe,
	.remove = eth_enic_pci_remove,
};

RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
コード例 #16
0
ファイル: i40e_fdir.c プロジェクト: RIFTIO/dpdk
/*
 * i40e_fdir_setup - reserve and initialize the Flow Director resources
 * @pf: board private structure
 */
int
i40e_fdir_setup(struct i40e_pf *pf)
{
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	struct i40e_vsi *vsi;
	int err = I40E_SUCCESS;
	char z_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz = NULL;
	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;

	if ((pf->flags & I40E_FLAG_FDIR) == 0) {
		PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
		return I40E_NOT_SUPPORTED;
	}

	PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
			" num_filters_best_effort = %u.",
			hw->func_caps.fd_filters_guaranteed,
			hw->func_caps.fd_filters_best_effort);

	vsi = pf->fdir.fdir_vsi;
	if (vsi) {
		PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
		return I40E_SUCCESS;
	}
	/* make new FDIR VSI */
	vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
	if (!vsi) {
		PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
		return I40E_ERR_NO_AVAILABLE_VSI;
	}
	pf->fdir.fdir_vsi = vsi;

	/*Fdir tx queue setup*/
	err = i40e_fdir_setup_tx_resources(pf);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
		goto fail_setup_tx;
	}

	/*Fdir rx queue setup*/
	err = i40e_fdir_setup_rx_resources(pf);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
		goto fail_setup_rx;
	}

	err = i40e_tx_queue_init(pf->fdir.txq);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
		goto fail_mem;
	}

	/* need switch on before dev start*/
	err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
		goto fail_mem;
	}

	/* Init the rx queue in hardware */
	err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
		goto fail_mem;
	}

	/* switch on rx queue */
	err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
	if (err) {
		PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
		goto fail_mem;
	}

	/* reserve memory for the fdir programming packet */
	snprintf(z_name, sizeof(z_name), "%s_%s_%d",
			eth_dev->driver->pci_drv.name,
			I40E_FDIR_MZ_NAME,
			eth_dev->data->port_id);
	mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
	if (!mz) {
		PMD_DRV_LOG(ERR, "Cannot init memzone for "
				 "flow director program packet.");
		err = I40E_ERR_NO_MEMORY;
		goto fail_mem;
	}
	pf->fdir.prg_pkt = mz->addr;
	pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);

	pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
		    vsi->base_queue);
	return I40E_SUCCESS;

fail_mem:
	i40e_dev_rx_queue_release(pf->fdir.rxq);
	pf->fdir.rxq = NULL;
fail_setup_rx:
	i40e_dev_tx_queue_release(pf->fdir.txq);
	pf->fdir.txq = NULL;
fail_setup_tx:
	i40e_vsi_release(vsi);
	pf->fdir.fdir_vsi = NULL;
	return err;
}
コード例 #17
0
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct e1000_adapter *adapter =
		E1000_DEV_PRIVATE(eth_dev->data->dev_private);
	struct e1000_hw *hw =
		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
	struct e1000_vfta * shadow_vfta =
		E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);

	pci_dev = eth_dev->pci_dev;

	eth_dev->dev_ops = &eth_em_ops;
	eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
	eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;

	/* for secondary processes, we don't initialise any further as primary
	 * has already done this work. Only check we don't need a different
	 * RX function */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
		if (eth_dev->data->scattered_rx)
			eth_dev->rx_pkt_burst =
				(eth_rx_burst_t)&eth_em_recv_scattered_pkts;
		return 0;
	}

	rte_eth_copy_pci_info(eth_dev, pci_dev);

	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
	hw->device_id = pci_dev->id.device_id;
	adapter->stopped = 0;

	/* For ICH8 support we'll need to map the flash memory BAR */

	if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
			em_hw_init(hw) != 0) {
		PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
			"failed to init HW",
			eth_dev->data->port_id, pci_dev->id.vendor_id,
			pci_dev->id.device_id);
		return -(ENODEV);
	}

	/* Allocate memory for storing MAC addresses */
	eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
			hw->mac.rar_entry_count, 0);
	if (eth_dev->data->mac_addrs == NULL) {
		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
			"store MAC addresses",
			ETHER_ADDR_LEN * hw->mac.rar_entry_count);
		return -(ENOMEM);
	}

	/* Copy the permanent MAC address */
	ether_addr_copy((struct ether_addr *) hw->mac.addr,
		eth_dev->data->mac_addrs);

	/* initialize the vfta */
	memset(shadow_vfta, 0, sizeof(*shadow_vfta));

	PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
		     eth_dev->data->port_id, pci_dev->id.vendor_id,
		     pci_dev->id.device_id);

	rte_intr_callback_register(&(pci_dev->intr_handle),
		eth_em_interrupt_handler, (void *)eth_dev);

	return (0);
}