/** * Bind PF queues with VSI and VF. **/ static int i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf) { int i; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint16_t vsi_id = vf->vsi->vsi_id; uint16_t vf_id = vf->vf_idx; uint16_t nb_qps = vf->vsi->nb_qps; uint16_t qbase = vf->vsi->base_queue; uint16_t q1, q2; uint32_t val; /* * VF should use scatter range queues. So, it needn't * to set QBASE in this register. */ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* Set to enable VFLAN_QTABLE[] registers valid */ I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); /* map PF queues to VF */ for (i = 0; i < nb_qps; i++) { val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK); I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val); } /* map PF queues to VSI */ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) { if (2 * i > nb_qps - 1) q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; else q1 = qbase + 2 * i; if (2 * i + 1 > nb_qps - 1) q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; else q2 = qbase + 2 * i + 1; val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1; i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val); } I40E_WRITE_FLUSH(hw); return I40E_SUCCESS; }
/* * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload * @pf: board private structure * @pctype: packet classify type * @flex_masks: mask for flexible payload */ static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, enum i40e_filter_pctype pctype, const struct rte_eth_fdir_flex_mask *mask_cfg) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_flex_mask *flex_mask; uint32_t flxinset, fd_mask; uint16_t mask_tmp; uint8_t i, nb_bitmask = 0; flex_mask = &pf->fdir.flex_mask[pctype]; memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]); if (mask_tmp != 0x0) { flex_mask->word_mask |= I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); if (mask_tmp != UINT16_MAX) { /* set bit mask */ flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp; flex_mask->bitmask[nb_bitmask].offset = i / sizeof(uint16_t); nb_bitmask++; } } } /* write mask to hw */ flxinset = (flex_mask->word_mask << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & I40E_PRTQF_FD_FLXINSET_INSET_MASK; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); for (i = 0; i < nb_bitmask; i++) { fd_mask = (flex_mask->bitmask[i].mask << I40E_PRTQF_FD_MSK_MASK_SHIFT) & I40E_PRTQF_FD_MSK_MASK_MASK; fd_mask |= ((flex_mask->bitmask[i].offset + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & I40E_PRTQF_FD_MSK_OFFSET_MASK; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); } }
/* * Initialize the configuration about bytes stream extracted as flexible payload * and mask setting */ static inline void i40e_init_flx_pld(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint8_t pctype; int i, index; /* * Define the bytes stream extracted as flexible payload in * field vector. By default, select 8 words from the beginning * of payload as flexible payload. */ for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) { index = i * I40E_MAX_FLXPLD_FIED; pf->fdir.flex_set[index].src_offset = 0; pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM; pf->fdir.flex_set[index].dst_offset = 0; I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900); I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ } /* initialize the masks */ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype)) continue; pf->fdir.flex_mask[pctype].word_mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { pf->fdir.flex_mask[pctype].bitmask[i].offset = 0; pf->fdir.flex_mask[pctype].bitmask[i].mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0); } } }
/* * Configure flow director related setting */ int i40e_fdir_configure(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_fdir_flex_conf *conf; enum i40e_filter_pctype pctype; uint32_t val; uint8_t i; int ret = 0; /* * configuration need to be done before * flow director filters are added * If filters exist, flush them. */ if (i40e_fdir_empty(hw) < 0) { ret = i40e_fdir_flush(dev); if (ret) { PMD_DRV_LOG(ERR, "failed to flush fdir table."); return ret; } } /* enable FDIR filter */ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); val |= I40E_PFQF_CTL_0_FD_ENA_MASK; i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); i40e_init_flx_pld(pf); /* set flex config to default value */ conf = &dev->data->dev_conf.fdir_conf.flex_conf; ret = i40e_check_fdir_flex_conf(conf); if (ret < 0) { PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } /* configure flex payload */ for (i = 0; i < conf->nb_payloads; i++) i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); /* configure flex mask*/ for (i = 0; i < conf->nb_flexmasks; i++) { pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type); i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } return ret; }