Exemplo n.º 1
0
static void
cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
{
	struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
	uint64_t pkt_in_done = 0;

	PMD_INIT_FUNC_TRACE();

	/* Write the start of the input queue's ring and its size */
	lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
			iq->base_addr_dma);
	lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);

	/* Remember the doorbell & instruction count register addr
	 * for this queue
	 */
	iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_IQ_DOORBELL(iq_no);
	iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
	lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
		    iq_no, iq->doorbell_reg, iq->inst_cnt_reg);

	/* Store the current instruction counter (used in flush_iq
	 * calculation)
	 */
	pkt_in_done = rte_read64(iq->inst_cnt_reg);

	/* Clear the count by writing back what we read, but don't
	 * enable data traffic here
	 */
	rte_write64(pkt_in_done, iq->inst_cnt_reg);
}
Exemplo n.º 2
0
static int
cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
{
	uint64_t q_no;
	uint64_t d64;

	PMD_INIT_FUNC_TRACE();

	if (cn23xx_vf_reset_io_queues(lio_dev,
				      lio_dev->sriov_info.rings_per_vf))
		return -1;

	for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
				0xFFFFFFFF);

		d64 = lio_read_csr64(lio_dev,
				     CN23XX_SLI_IQ_INSTR_COUNT64(q_no));

		d64 &= 0xEFFFFFFFFFFFFFFFL;

		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
				d64);

		/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
		 * the Input Queues
		 */
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				CN23XX_PKT_INPUT_CTL_MASK);
	}

	return 0;
}
int
i40e_pf_host_uninit(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if ((!hw->func_caps.sr_iov_1_1) ||
		(pf->vf_num == 0) ||
		(pf->vf_nb_qps == 0))
		return I40E_SUCCESS;

	/* free memory to store VF structure */
	rte_free(pf->vfs);
	pf->vfs = NULL;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
Exemplo n.º 4
0
static void
qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
		  uint32_t index, __rte_unused uint32_t pool)
{
	struct qede_dev *qdev = eth_dev->data->dev_private;
	struct ecore_dev *edev = &qdev->edev;
	int rc;

	PMD_INIT_FUNC_TRACE(edev);

	if (index >= qdev->dev_info.num_mac_addrs) {
		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
		       index, qdev->dev_info.num_mac_addrs);
		return;
	}

	/* Adding macaddr even though promiscuous mode is set */
	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
		DP_INFO(edev, "Port is in promisc mode, yet adding it\n");

	/* Add MAC filters according to the unicast secondary macs */
	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
				   mac_addr->addr_bytes);
	if (rc)
		DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
}
Exemplo n.º 5
0
void
vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
{
	unsigned i;

	PMD_INIT_FUNC_TRACE();

	for (i = 0; i < dev->data->nb_tx_queues; i++) {
		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];

		if (txq != NULL) {
			txq->stopped = TRUE;
			vmxnet3_dev_tx_queue_reset(txq);
		}
	}

	for (i = 0; i < dev->data->nb_rx_queues; i++) {
		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];

		if (rxq != NULL) {
			rxq->stopped = TRUE;
			vmxnet3_dev_rx_queue_reset(rxq);
		}
	}
}
Exemplo n.º 6
0
static int
bnx2x_dev_start(struct rte_eth_dev *dev)
{
	struct bnx2x_softc *sc = dev->data->dev_private;
	int ret = 0;

	PMD_INIT_FUNC_TRACE();

	ret = bnx2x_init(sc);
	if (ret) {
		PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
		return -1;
	}

	if (IS_PF(sc)) {
		rte_intr_callback_register(&(dev->pci_dev->intr_handle),
				bnx2x_interrupt_handler, (void *)dev);

		if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
			PMD_DRV_LOG(ERR, "rte_intr_enable failed");
	}

	ret = bnx2x_dev_rx_init(dev);
	if (ret != 0) {
		PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
		return -3;
	}

	/* Print important adapter info for the user. */
	bnx2x_print_adapter_info(sc);

	DELAY_MS(2500);

	return ret;
}
Exemplo n.º 7
0
static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
	struct bnx2x_softc *sc = dev->data->dev_private;
	int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
	int ret;

	PMD_INIT_FUNC_TRACE();

	if (dev->data->dev_conf.rxmode.jumbo_frame)
		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;

	if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
		PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
		return -EINVAL;
	}

	sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
	if (sc->num_queues > mp_ncpus) {
		PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
		return -EINVAL;
	}

	PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
		       sc->num_queues, sc->mtu);

	/* allocate ilt */
	if (bnx2x_alloc_ilt_mem(sc) != 0) {
		PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
		return -ENXIO;
	}

	/* allocate the host hardware/software hsi structures */
	if (bnx2x_alloc_hsi_mem(sc) != 0) {
		PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
		bnx2x_free_ilt_mem(sc);
		return -ENXIO;
	}

	if (IS_VF(sc)) {
		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
				  &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
				  RTE_CACHE_LINE_SIZE) != 0)
			return -ENOMEM;

		sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)sc->vf2pf_mbox_mapping.vaddr;
		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
				  &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
				  RTE_CACHE_LINE_SIZE) != 0)
			return -ENOMEM;

		sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)sc->pf2vf_bulletin_mapping.vaddr;

		ret = bnx2x_vf_get_resources(sc, sc->num_queues, sc->num_queues);
		if (ret)
			return ret;
	}

	return 0;
}
Exemplo n.º 8
0
/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
static void
vmxnet3_dev_stop(struct rte_eth_dev *dev)
{
	struct rte_eth_link link;
	struct vmxnet3_hw *hw = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	if (hw->adapter_stopped == 1) {
		PMD_INIT_LOG(DEBUG, "Device already closed.");
		return;
	}

	/* disable interrupts */
	vmxnet3_disable_intr(hw);

	/* quiesce the device first */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);

	/* reset the device */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
	PMD_INIT_LOG(DEBUG, "Device reset.");
	hw->adapter_stopped = 0;

	vmxnet3_dev_clear_queues(dev);

	/* Clear recorded link status */
	memset(&link, 0, sizeof(link));
	vmxnet3_dev_atomic_write_link_status(dev, &link);
}
Exemplo n.º 9
0
static void
avf_dev_stop(struct rte_eth_dev *dev)
{
	struct avf_adapter *adapter =
		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
	struct rte_intr_handle *intr_handle = dev->intr_handle;
	int ret, i;

	PMD_INIT_FUNC_TRACE();

	if (hw->adapter_stopped == 1)
		return;

	avf_stop_queues(dev);

	/* Disable the interrupt for Rx */
	rte_intr_efd_disable(intr_handle);
	/* Rx interrupt vector mapping free */
	if (intr_handle->intr_vec) {
		rte_free(intr_handle->intr_vec);
		intr_handle->intr_vec = NULL;
	}

	/* remove all mac addrs */
	avf_add_del_all_mac_addr(adapter, FALSE);
	hw->adapter_stopped = 1;
}
Exemplo n.º 10
0
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct e1000_adapter *adapter =
		E1000_DEV_PRIVATE(eth_dev->data->dev_private);

	PMD_INIT_FUNC_TRACE();

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -EPERM;

	pci_dev = eth_dev->pci_dev;

	if (adapter->stopped == 0)
		eth_em_close(eth_dev);

	eth_dev->dev_ops = NULL;
	eth_dev->rx_pkt_burst = NULL;
	eth_dev->tx_pkt_burst = NULL;

	rte_free(eth_dev->data->mac_addrs);
	eth_dev->data->mac_addrs = NULL;

	/* disable uio intr before callback unregister */
	rte_intr_disable(&(pci_dev->intr_handle));
	rte_intr_callback_unregister(&(pci_dev->intr_handle),
		eth_em_interrupt_handler, (void *)eth_dev);

	return 0;
}
Exemplo n.º 11
0
static int qat_qp_check_queue_alignment(uint64_t phys_addr,
					uint32_t queue_size_bytes)
{
	PMD_INIT_FUNC_TRACE();
	if (((queue_size_bytes - 1) & phys_addr) != 0)
		return -EINVAL;
	return 0;
}
Exemplo n.º 12
0
static void
bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
	struct bnx2x_softc *sc = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();
	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
	bnx2x_set_rx_mode(sc);
}
Exemplo n.º 13
0
static void
bnx2x_promisc_enable(struct rte_eth_dev *dev)
{
	struct bnx2x_softc *sc = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();
	sc->rx_mode = BNX2X_RX_MODE_PROMISC;
	bnx2x_set_rx_mode(sc);
}
Exemplo n.º 14
0
void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
		struct rte_crypto_sym_xform *xform, void *session_private)
{
	struct qat_pmd_private *internals = dev->data->dev_private;

	struct qat_session *session = session_private;

	int qat_cmd_id;

	PMD_INIT_FUNC_TRACE();

	/* Get requested QAT command id */
	qat_cmd_id = qat_get_cmd_id(xform);
	if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
		PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
		goto error_out;
	}
	session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
	switch (session->qat_cmd) {
	case ICP_QAT_FW_LA_CMD_CIPHER:
	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
		break;
	case ICP_QAT_FW_LA_CMD_AUTH:
	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
		break;
	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
		break;
	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
	session = qat_crypto_sym_configure_session_auth(dev, xform, session);
	session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
		break;
	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
	case ICP_QAT_FW_LA_CMD_TRNG_TEST:
	case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
	case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
	case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
	case ICP_QAT_FW_LA_CMD_MGF1:
	case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
	case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
	case ICP_QAT_FW_LA_CMD_DELIMITER:
	PMD_DRV_LOG(ERR, "Unsupported Service %u",
		session->qat_cmd);
		goto error_out;
	default:
	PMD_DRV_LOG(ERR, "Unsupported Service %u",
		session->qat_cmd);
		goto error_out;
	}
	return session;

error_out:
	rte_mempool_put(internals->sess_mp, session);
	return NULL;
}
Exemplo n.º 15
0
static int
cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
{
	uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
	uint64_t d64, q_no;
	int ret_val = 0;

	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < num_queues; q_no++) {
		/* set RST bit to 1. This bit applies to both IQ and OQ */
		d64 = lio_read_csr64(lio_dev,
				     CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				d64);
	}

	/* wait until the RST bit is clear or the RST and QUIET bits are set */
	for (q_no = 0; q_no < num_queues; q_no++) {
		volatile uint64_t reg_val;

		reg_val	= lio_read_csr64(lio_dev,
					 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
				!(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
				loop) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			loop = loop - 1;
		}

		if (loop == 0) {
			lio_dev_err(lio_dev,
				    "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
				    (unsigned long)q_no);
			return -1;
		}

		reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
		lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
				reg_val);

		reg_val = lio_read_csr64(
		    lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
		if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
			lio_dev_err(lio_dev,
				    "clearing the reset failed for qno: %lu\n",
				    (unsigned long)q_no);
			ret_val = -1;
		}
	}

	return ret_val;
}
Exemplo n.º 16
0
/*
 * Reset and stop device.
 */
static void
vmxnet3_dev_close(struct rte_eth_dev *dev)
{
	struct vmxnet3_hw *hw = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	vmxnet3_dev_stop(dev);
	hw->adapter_stopped = 1;
}
Exemplo n.º 17
0
static void
cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
{
	uint32_t reg_val;
	uint32_t q_no;

	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
		lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
			      0xFFFFFFFF);

		reg_val =
		    lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));

		reg_val &= 0xEFFFFFFFFFFFFFFFL;

		lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val);

		reg_val =
		    lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));

		/* set IPTR & DPTR */
		reg_val |=
		    (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);

		/* reset BMODE */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);

		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue Scatter List
		 * reset ROR_P, NSR_P
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);

#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
#endif
		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
		 * for Output Queue Data
		 * reset ROR, NSR
		 */
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
		/* set the ES bit */
		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);

		/* write all the selected settings */
		lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
			      reg_val);
	}
}
Exemplo n.º 18
0
/*
 * This function is based on vmxnet3_disable_intr()
 */
static void
vmxnet3_disable_intr(struct vmxnet3_hw *hw)
{
	int i;

	PMD_INIT_FUNC_TRACE();

	hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
	for (i = 0; i < VMXNET3_MAX_INTRS; i++)
			VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
Exemplo n.º 19
0
static const struct rte_memzone *
queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
			int socket_id)
{
	const struct rte_memzone *mz;
	unsigned memzone_flags = 0;
	const struct rte_memseg *ms;

	PMD_INIT_FUNC_TRACE();
	mz = rte_memzone_lookup(queue_name);
	if (mz != 0) {
		if (((size_t)queue_size <= mz->len) &&
				((socket_id == SOCKET_ID_ANY) ||
					(socket_id == mz->socket_id))) {
			PMD_DRV_LOG(DEBUG, "re-use memzone already "
					"allocated for %s", queue_name);
			return mz;
		}

		PMD_DRV_LOG(ERR, "Incompatible memzone already "
				"allocated %s, size %u, socket %d. "
				"Requested size %u, socket %u",
				queue_name, (uint32_t)mz->len,
				mz->socket_id, queue_size, socket_id);
		return NULL;
	}

	PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
					queue_name, queue_size, socket_id);
	ms = rte_eal_get_physmem_layout();
	switch (ms[0].hugepage_sz) {
	case(RTE_PGSIZE_2M):
		memzone_flags = RTE_MEMZONE_2MB;
	break;
	case(RTE_PGSIZE_1G):
		memzone_flags = RTE_MEMZONE_1GB;
	break;
	case(RTE_PGSIZE_16M):
		memzone_flags = RTE_MEMZONE_16MB;
	break;
	case(RTE_PGSIZE_16G):
		memzone_flags = RTE_MEMZONE_16GB;
	break;
	default:
		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
}
#ifdef RTE_LIBRTE_XEN_DOM0
	return rte_memzone_reserve_bounded(queue_name, queue_size,
		socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
#else
	return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
		memzone_flags, queue_size);
#endif
}
Exemplo n.º 20
0
static int
bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
	PMD_INIT_FUNC_TRACE();

	int old_link_status = dev->data->dev_link.link_status;

	bnx2x_link_update(dev);

	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
}
Exemplo n.º 21
0
static void
cn23xx_vf_free_mbox(struct lio_device *lio_dev)
{
	PMD_INIT_FUNC_TRACE();

	rte_free(lio_dev->mbox[0]);
	lio_dev->mbox[0] = NULL;

	rte_free(lio_dev->mbox);
	lio_dev->mbox = NULL;
}
Exemplo n.º 22
0
/*
 * Configure device link speed and setup link.
 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
 * It returns 0 on success.
 */
static int
vmxnet3_dev_start(struct rte_eth_dev *dev)
{
	int status, ret;
	struct vmxnet3_hw *hw = dev->data->dev_private;

	PMD_INIT_FUNC_TRACE();

	ret = vmxnet3_setup_driver_shared(dev);
	if (ret != VMXNET3_SUCCESS)
		return ret;

	/* Exchange shared data with device */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
			       VMXNET3_GET_ADDR_HI(hw->sharedPA));

	/* Activate device by register write */
	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
	status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);

	if (status != 0) {
		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
		return -1;
	}

	/* Disable interrupts */
	vmxnet3_disable_intr(hw);

	/*
	 * Load RX queues with blank mbufs and update next2fill index for device
	 * Update RxMode of the device
	 */
	ret = vmxnet3_dev_rxtx_init(dev);
	if (ret != VMXNET3_SUCCESS) {
		PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
		return ret;
	}

	/* Setting proper Rx Mode and issue Rx Mode Update command */
	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);

	/*
	 * Don't need to handle events for now
	 */
#if PROCESS_SYS_EVENTS == 1
	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
	PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
	vmxnet3_process_events(hw);
#endif
	return status;
}
Exemplo n.º 23
0
static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
{
	uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
					(ADF_ARB_REG_SLOT *
							txq->hw_bundle_number);
	uint32_t value;

	PMD_INIT_FUNC_TRACE();
	value = ADF_CSR_RD(base_addr, arb_csr_offset);
	value ^= (0x01 << txq->hw_queue_number);
	ADF_CSR_WR(base_addr, arb_csr_offset, value);
}
Exemplo n.º 24
0
static int
cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)
{
	PMD_INIT_FUNC_TRACE();

	if (cn23xx_vf_setup_global_input_regs(lio_dev))
		return -1;

	cn23xx_vf_setup_global_output_regs(lio_dev);

	return 0;
}
int
i40e_pf_host_init(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	int ret, i;
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
		return I40E_SUCCESS;

	/* Allocate memory to store VF structure */
	pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
	if(pf->vfs == NULL)
		return -ENOMEM;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	for (i = 0; i < pf->vf_num; i++) {
		pf->vfs[i].pf = pf;
		pf->vfs[i].state = I40E_VF_INACTIVE;
		pf->vfs[i].vf_idx = i;
		ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
		if (ret != I40E_SUCCESS)
			goto fail;
		eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
	}

	/* restore irq0 */
	i40e_pf_enable_irq0(hw);

	return I40E_SUCCESS;

fail:
	rte_free(pf->vfs);
	i40e_pf_enable_irq0(hw);

	return ret;
}
Exemplo n.º 26
0
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
		void *session)
{
	struct qat_session *sess = session;
	phys_addr_t cd_paddr = sess->cd_paddr;

	PMD_INIT_FUNC_TRACE();
	if (session) {
		memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));

		sess->cd_paddr = cd_paddr;
	}
}
Exemplo n.º 27
0
void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
                                 int alg, const uint8_t *key,
                                 unsigned int keylen)
{
    struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
    struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
    struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;

    PMD_INIT_FUNC_TRACE();
    qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
    cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
    dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
}
Exemplo n.º 28
0
static int
cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
{
	uint32_t q_no;

	PMD_INIT_FUNC_TRACE();

	for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
		uint64_t reg_val;

		/* set the corresponding IQ IS_64B bit */
		if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
			lio_write_csr64(lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}

		/* set the corresponding IQ ENB bit */
		if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
			reg_val = lio_read_csr64(
					lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
			reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
			lio_write_csr64(lio_dev,
					CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
					reg_val);
		}
	}
	for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
		uint32_t reg_val;

		/* set the corresponding OQ ENB bit */
		if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
			reg_val = lio_read_csr(
					lio_dev,
					CN23XX_SLI_OQ_PKT_CONTROL(q_no));
			reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
			lio_write_csr(lio_dev,
				      CN23XX_SLI_OQ_PKT_CONTROL(q_no),
				      reg_val);
		}
	}

	return 0;
}
Exemplo n.º 29
0
static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
	uint32_t *p_queue_size_for_csr)
{
	uint8_t i = ADF_MIN_RING_SIZE;

	PMD_INIT_FUNC_TRACE();
	for (; i <= ADF_MAX_RING_SIZE; i++)
		if ((msg_size * msg_num) ==
				(uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
			*p_queue_size_for_csr = i;
			return 0;
		}
	PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
	return -EINVAL;
}
Exemplo n.º 30
0
static int qat_rx_queue_create(struct rte_cryptodev *dev,
		struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
		int socket_id)
{
	PMD_INIT_FUNC_TRACE();
	queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
	queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
						ADF_SYM_RX_QUEUE_STARTOFF;

	PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
		nb_desc, qp_id, queue->hw_bundle_number,
		queue->hw_queue_number);
	return qat_queue_create(dev, queue, nb_desc,
				ADF_SYM_RX_RING_DESC_SIZE, socket_id);
}