Esempio n. 1
0
/* check whether the flow director table in empty */
static inline int
i40e_fdir_empty(struct i40e_hw *hw)
{
	uint32_t guarant_cnt, best_cnt;

	guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
				 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
				 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
	best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
			      I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
			      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
	if (best_cnt + guarant_cnt > 0)
		return -1;

	return 0;
}
int
i40e_pf_host_uninit(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if ((!hw->func_caps.sr_iov_1_1) ||
		(pf->vf_num == 0) ||
		(pf->vf_nb_qps == 0))
		return I40E_SUCCESS;

	/* free memory to store VF structure */
	rte_free(pf->vfs);
	pf->vfs = NULL;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
int
i40e_pf_host_init(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	int ret, i;
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
		return I40E_SUCCESS;

	/* Allocate memory to store VF structure */
	pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
	if(pf->vfs == NULL)
		return -ENOMEM;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	for (i = 0; i < pf->vf_num; i++) {
		pf->vfs[i].pf = pf;
		pf->vfs[i].state = I40E_VF_INACTIVE;
		pf->vfs[i].vf_idx = i;
		ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
		if (ret != I40E_SUCCESS)
			goto fail;
		eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
	}

	/* restore irq0 */
	i40e_pf_enable_irq0(hw);

	return I40E_SUCCESS;

fail:
	rte_free(pf->vfs);
	i40e_pf_enable_irq0(hw);

	return ret;
}
/*
 * Configure flow director related setting
 */
int
i40e_fdir_configure(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
	struct rte_eth_fdir_flex_conf *conf;
	enum i40e_filter_pctype pctype;
	uint32_t val;
	uint8_t i;
	int ret = 0;

	/*
	* configuration need to be done before
	* flow director filters are added
	* If filters exist, flush them.
	*/
	if (i40e_fdir_empty(hw) < 0) {
		ret = i40e_fdir_flush(dev);
		if (ret) {
			PMD_DRV_LOG(ERR, "failed to flush fdir table.");
			return ret;
		}
	}

	/* enable FDIR filter */
	val = I40E_READ_REG(hw, I40E_PFQF_CTL_0);
	val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val);

	i40e_init_flx_pld(pf); /* set flex config to default value */

	conf = &dev->data->dev_conf.fdir_conf.flex_conf;
	ret = i40e_check_fdir_flex_conf(conf);
	if (ret < 0) {
		PMD_DRV_LOG(ERR, " invalid configuration arguments.");
		return -EINVAL;
	}
	/* configure flex payload */
	for (i = 0; i < conf->nb_payloads; i++)
		i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
	/* configure flex mask*/
	for (i = 0; i < conf->nb_flexmasks; i++) {
		pctype = i40e_flowtype_to_pctype(
			conf->flex_mask[i].flow_type);
		i40e_set_flex_mask_on_pctype(pf,
				pctype,
				&conf->flex_mask[i]);
	}

	return ret;
}
Esempio n. 5
0
/**
 * Proceed VF reset operation.
 */
int
i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
{
	uint32_t val, i;
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint16_t vf_id, abs_vf_id, vf_msix_num;
	int ret;
	struct i40e_virtchnl_queue_select qsel;

	if (vf == NULL)
		return -EINVAL;

	vf_id = vf->vf_idx;
	abs_vf_id = vf_id + hw->func_caps.vf_base_id;

	/* Notify VF that we are in VFR progress */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);

	/*
	 * If require a SW VF reset, a VFLR interrupt will be generated,
	 * this function will be called again. To avoid it,
	 * disable interrupt first.
	 */
	if (do_hw_reset) {
		vf->state = I40E_VF_INRESET;
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
		val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
		I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
		I40E_WRITE_FLUSH(hw);
	}

#define VFRESET_MAX_WAIT_CNT 100
	/* Wait until VF reset is done */
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(10);
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
		if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "VF reset timeout\n");
		return -ETIMEDOUT;
	}

	/* This is not first time to do reset, do cleanup job first */
	if (vf->vsi) {
		/* Disable queues */
		memset(&qsel, 0, sizeof(qsel));
		for (i = 0; i < vf->vsi->nb_qps; i++)
			qsel.rx_queues |= 1 << i;
		qsel.tx_queues = qsel.rx_queues;
		ret = i40e_pf_host_switch_queues(vf, &qsel, false);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Disable VF queues failed\n");
			return -EFAULT;
		}

		/* Disable VF interrupt setting */
		vf_msix_num = hw->func_caps.num_msix_vectors_vf;
		for (i = 0; i < vf_msix_num; i++) {
			if (!i)
				val = I40E_VFINT_DYN_CTL0(vf_id);
			else
				val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
							(vf_id)) + (i - 1));
			I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
		}
		I40E_WRITE_FLUSH(hw);

		/* remove VSI */
		ret = i40e_vsi_release(vf->vsi);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Release VSI failed\n");
			return -EFAULT;
		}
	}

#define I40E_VF_PCI_ADDR  0xAA
#define I40E_VF_PEND_MASK 0x20
	/* Check the pending transactions of this VF */
	/* Use absolute VF id, refer to datasheet for details */
	I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
		(abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(1);
		val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
		if ((val & I40E_VF_PEND_MASK) == 0)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n");
		return -ETIMEDOUT;
	}

	/* Reset done, Set COMPLETE flag and clear reset bit */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
	val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
	val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
	I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
	vf->reset_cnt++;
	I40E_WRITE_FLUSH(hw);

	/* Allocate resource again */
	vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
			vf->pf->main_vsi, vf->vf_idx);
	if (vf->vsi == NULL) {
		PMD_DRV_LOG(ERR, "Add vsi failed\n");
		return -EFAULT;
	}

	ret = i40e_pf_vf_queues_mapping(vf);
	if (ret != I40E_SUCCESS) {
		PMD_DRV_LOG(ERR, "queue mapping error\n");
		i40e_vsi_release(vf->vsi);
		return -EFAULT;
	}

	return ret;
}