コード例 #1
0
int
i40e_pf_host_uninit(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if ((!hw->func_caps.sr_iov_1_1) ||
		(pf->vf_num == 0) ||
		(pf->vf_nb_qps == 0))
		return I40E_SUCCESS;

	/* free memory to store VF structure */
	rte_free(pf->vfs);
	pf->vfs = NULL;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
コード例 #2
0
int
i40e_pf_host_init(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	int ret, i;
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
		return I40E_SUCCESS;

	/* Allocate memory to store VF structure */
	pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
	if(pf->vfs == NULL)
		return -ENOMEM;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	for (i = 0; i < pf->vf_num; i++) {
		pf->vfs[i].pf = pf;
		pf->vfs[i].state = I40E_VF_INACTIVE;
		pf->vfs[i].vf_idx = i;
		ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
		if (ret != I40E_SUCCESS)
			goto fail;
		eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
	}

	/* restore irq0 */
	i40e_pf_enable_irq0(hw);

	return I40E_SUCCESS;

fail:
	rte_free(pf->vfs);
	i40e_pf_enable_irq0(hw);

	return ret;
}
コード例 #3
0
ファイル: i40e_pf.c プロジェクト: AMildner/MoonGen
/**
 * Bind PF queues with VSI and VF.
 **/
static int
i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
{
	int i;
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint16_t vsi_id = vf->vsi->vsi_id;
	uint16_t vf_id  = vf->vf_idx;
	uint16_t nb_qps = vf->vsi->nb_qps;
	uint16_t qbase  = vf->vsi->base_queue;
	uint16_t q1, q2;
	uint32_t val;

	/*
	 * VF should use scatter range queues. So, it needn't
	 * to set QBASE in this register.
	 */
	I40E_WRITE_REG(hw, I40E_VSILAN_QBASE(vsi_id),
	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);

	/* Set to enable VFLAN_QTABLE[] registers valid */
	I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
		I40E_VPLAN_MAPENA_TXRX_ENA_MASK);

	/* map PF queues to VF */
	for (i = 0; i < nb_qps; i++) {
		val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
		I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
	}

	/* map PF queues to VSI */
	for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
		if (2 * i > nb_qps - 1)
			q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
		else
			q1 = qbase + 2 * i;

		if (2 * i + 1 > nb_qps - 1)
			q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
		else
			q2 = qbase + 2 * i + 1;

		val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
		I40E_WRITE_REG(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
	}
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
コード例 #4
0
ファイル: i40e_pf.c プロジェクト: AMildner/MoonGen
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
			    struct i40e_pf_vf *vf,
			    struct i40e_virtchnl_txq_info *txq)
{
	int err = I40E_SUCCESS;
	struct i40e_hmc_obj_txq tx_ctx;
	uint32_t qtx_ctl;
	uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;


	/* clear the context structure first */
	memset(&tx_ctx, 0, sizeof(tx_ctx));
	tx_ctx.new_context = 1;
	tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
	tx_ctx.qlen = txq->ring_len;
	tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
	err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
	if (err != I40E_SUCCESS)
		return err;

	err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
	if (err != I40E_SUCCESS)
		return err;

	/* bind queue with VF function, since TX/QX will appear in pair,
	 * so only has QTX_CTL to set.
	 */
	qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
				((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
				I40E_QTX_CTL_PF_INDX_MASK) |
				(((vf->vf_idx + hw->func_caps.vf_base_id) <<
				I40E_QTX_CTL_VFVM_INDX_SHIFT) &
				I40E_QTX_CTL_VFVM_INDX_MASK);
	I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
コード例 #5
0
ファイル: i40e_pf.c プロジェクト: AMildner/MoonGen
/**
 * Proceed VF reset operation.
 */
int
i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
{
	uint32_t val, i;
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint16_t vf_id, abs_vf_id, vf_msix_num;
	int ret;
	struct i40e_virtchnl_queue_select qsel;

	if (vf == NULL)
		return -EINVAL;

	vf_id = vf->vf_idx;
	abs_vf_id = vf_id + hw->func_caps.vf_base_id;

	/* Notify VF that we are in VFR progress */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);

	/*
	 * If require a SW VF reset, a VFLR interrupt will be generated,
	 * this function will be called again. To avoid it,
	 * disable interrupt first.
	 */
	if (do_hw_reset) {
		vf->state = I40E_VF_INRESET;
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
		val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
		I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
		I40E_WRITE_FLUSH(hw);
	}

#define VFRESET_MAX_WAIT_CNT 100
	/* Wait until VF reset is done */
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(10);
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
		if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "VF reset timeout\n");
		return -ETIMEDOUT;
	}

	/* This is not first time to do reset, do cleanup job first */
	if (vf->vsi) {
		/* Disable queues */
		memset(&qsel, 0, sizeof(qsel));
		for (i = 0; i < vf->vsi->nb_qps; i++)
			qsel.rx_queues |= 1 << i;
		qsel.tx_queues = qsel.rx_queues;
		ret = i40e_pf_host_switch_queues(vf, &qsel, false);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Disable VF queues failed\n");
			return -EFAULT;
		}

		/* Disable VF interrupt setting */
		vf_msix_num = hw->func_caps.num_msix_vectors_vf;
		for (i = 0; i < vf_msix_num; i++) {
			if (!i)
				val = I40E_VFINT_DYN_CTL0(vf_id);
			else
				val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
							(vf_id)) + (i - 1));
			I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
		}
		I40E_WRITE_FLUSH(hw);

		/* remove VSI */
		ret = i40e_vsi_release(vf->vsi);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Release VSI failed\n");
			return -EFAULT;
		}
	}

#define I40E_VF_PCI_ADDR  0xAA
#define I40E_VF_PEND_MASK 0x20
	/* Check the pending transactions of this VF */
	/* Use absolute VF id, refer to datasheet for details */
	I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
		(abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(1);
		val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
		if ((val & I40E_VF_PEND_MASK) == 0)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n");
		return -ETIMEDOUT;
	}

	/* Reset done, Set COMPLETE flag and clear reset bit */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
	val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
	val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
	I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
	vf->reset_cnt++;
	I40E_WRITE_FLUSH(hw);

	/* Allocate resource again */
	vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
			vf->pf->main_vsi, vf->vf_idx);
	if (vf->vsi == NULL) {
		PMD_DRV_LOG(ERR, "Add vsi failed\n");
		return -EFAULT;
	}

	ret = i40e_pf_vf_queues_mapping(vf);
	if (ret != I40E_SUCCESS) {
		PMD_DRV_LOG(ERR, "queue mapping error\n");
		i40e_vsi_release(vf->vsi);
		return -EFAULT;
	}

	return ret;
}