/* * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload * @pf: board private structure * @cfg: the rule how bytes stream is extracted as flexible payload */ static void i40e_set_flx_pld_cfg(struct i40e_pf *pf, const struct rte_eth_flex_payload_cfg *cfg) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED]; uint32_t flx_pit; uint16_t num, min_next_off; /* in words */ uint8_t field_idx = 0; uint8_t layer_idx = 0; uint16_t i; if (cfg->type == RTE_ETH_L2_PAYLOAD) layer_idx = I40E_FLXPLD_L2_IDX; else if (cfg->type == RTE_ETH_L3_PAYLOAD) layer_idx = I40E_FLXPLD_L3_IDX; else if (cfg->type == RTE_ETH_L4_PAYLOAD) layer_idx = I40E_FLXPLD_L4_IDX; memset(flex_pit, 0, sizeof(flex_pit)); num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit); for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) { field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; /* record the info in fdir structure */ pf->fdir.flex_set[field_idx].src_offset = flex_pit[i].src_offset / sizeof(uint16_t); pf->fdir.flex_set[field_idx].size = flex_pit[i].size / sizeof(uint16_t); pf->fdir.flex_set[field_idx].dst_offset = flex_pit[i].dst_offset / sizeof(uint16_t); flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, pf->fdir.flex_set[field_idx].size, pf->fdir.flex_set[field_idx].dst_offset); I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); } min_next_off = pf->fdir.flex_set[field_idx].src_offset + pf->fdir.flex_set[field_idx].size; for (; i < I40E_MAX_FLXPLD_FIED; i++) { /* set the non-used register obeying register's constrain */ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, NONUSE_FLX_PIT_DEST_OFF); I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i), flx_pit); min_next_off++; } }
/** * Bind PF queues with VSI and VF. **/ static int i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf) { int i; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint16_t vsi_id = vf->vsi->vsi_id; uint16_t vf_id = vf->vf_idx; uint16_t nb_qps = vf->vsi->nb_qps; uint16_t qbase = vf->vsi->base_queue; uint16_t q1, q2; uint32_t val; /* * VF should use scatter range queues. So, it needn't * to set QBASE in this register. */ I40E_WRITE_REG(hw, I40E_VSILAN_QBASE(vsi_id), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* Set to enable VFLAN_QTABLE[] registers valid */ I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); /* map PF queues to VF */ for (i = 0; i < nb_qps; i++) { val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK); I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val); } /* map PF queues to VSI */ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) { if (2 * i > nb_qps - 1) q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; else q1 = qbase + 2 * i; if (2 * i + 1 > nb_qps - 1) q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; else q2 = qbase + 2 * i + 1; val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1; I40E_WRITE_REG(hw, I40E_VSILAN_QTABLE(i, vsi_id), val); } I40E_WRITE_FLUSH(hw); return I40E_SUCCESS; }
int i40e_pf_host_uninit(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint32_t val; PMD_INIT_FUNC_TRACE(); /** * return if SRIOV not enabled, VF number not configured or * no queue assigned. */ if ((!hw->func_caps.sr_iov_1_1) || (pf->vf_num == 0) || (pf->vf_nb_qps == 0)) return I40E_SUCCESS; /* free memory to store VF structure */ rte_free(pf->vfs); pf->vfs = NULL; /* Disable irq0 for VFR event */ i40e_pf_disable_irq0(hw); /* Disable VF link status interrupt */ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); I40E_WRITE_FLUSH(hw); return I40E_SUCCESS; }
/* * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload * @pf: board private structure * @pctype: packet classify type * @flex_masks: mask for flexible payload */ static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, enum i40e_filter_pctype pctype, const struct rte_eth_fdir_flex_mask *mask_cfg) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_flex_mask *flex_mask; uint32_t flxinset, fd_mask; uint16_t mask_tmp; uint8_t i, nb_bitmask = 0; flex_mask = &pf->fdir.flex_mask[pctype]; memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]); if (mask_tmp != 0x0) { flex_mask->word_mask |= I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); if (mask_tmp != UINT16_MAX) { /* set bit mask */ flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp; flex_mask->bitmask[nb_bitmask].offset = i / sizeof(uint16_t); nb_bitmask++; } } } /* write mask to hw */ flxinset = (flex_mask->word_mask << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & I40E_PRTQF_FD_FLXINSET_INSET_MASK; I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); for (i = 0; i < nb_bitmask; i++) { fd_mask = (flex_mask->bitmask[i].mask << I40E_PRTQF_FD_MSK_MASK_SHIFT) & I40E_PRTQF_FD_MSK_MASK_MASK; fd_mask |= ((flex_mask->bitmask[i].offset + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & I40E_PRTQF_FD_MSK_OFFSET_MASK; I40E_WRITE_REG(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); } }
int i40e_pf_host_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); int ret, i; uint32_t val; PMD_INIT_FUNC_TRACE(); /** * return if SRIOV not enabled, VF number not configured or * no queue assigned. */ if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0) return I40E_SUCCESS; /* Allocate memory to store VF structure */ pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0); if(pf->vfs == NULL) return -ENOMEM; /* Disable irq0 for VFR event */ i40e_pf_disable_irq0(hw); /* Disable VF link status interrupt */ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); I40E_WRITE_FLUSH(hw); for (i = 0; i < pf->vf_num; i++) { pf->vfs[i].pf = pf; pf->vfs[i].state = I40E_VF_INACTIVE; pf->vfs[i].vf_idx = i; ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); if (ret != I40E_SUCCESS) goto fail; eth_random_addr(pf->vfs[i].mac_addr.addr_bytes); } /* restore irq0 */ i40e_pf_enable_irq0(hw); return I40E_SUCCESS; fail: rte_free(pf->vfs); i40e_pf_enable_irq0(hw); return ret; }
/* * Configure flow director related setting */ int i40e_fdir_configure(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_fdir_flex_conf *conf; enum i40e_filter_pctype pctype; uint32_t val; uint8_t i; int ret = 0; /* * configuration need to be done before * flow director filters are added * If filters exist, flush them. */ if (i40e_fdir_empty(hw) < 0) { ret = i40e_fdir_flush(dev); if (ret) { PMD_DRV_LOG(ERR, "failed to flush fdir table."); return ret; } } /* enable FDIR filter */ val = I40E_READ_REG(hw, I40E_PFQF_CTL_0); val |= I40E_PFQF_CTL_0_FD_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); i40e_init_flx_pld(pf); /* set flex config to default value */ conf = &dev->data->dev_conf.fdir_conf.flex_conf; ret = i40e_check_fdir_flex_conf(conf); if (ret < 0) { PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } /* configure flex payload */ for (i = 0; i < conf->nb_payloads; i++) i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); /* configure flex mask*/ for (i = 0; i < conf->nb_flexmasks; i++) { pctype = i40e_flowtype_to_pctype( conf->flex_mask[i].flow_type); i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } return ret; }
/* * Initialize the configuration about bytes stream extracted as flexible payload * and mask setting */ static inline void i40e_init_flx_pld(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint8_t pctype; int i, index; /* * Define the bytes stream extracted as flexible payload in * field vector. By default, select 8 words from the beginning * of payload as flexible payload. */ for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) { index = i * I40E_MAX_FLXPLD_FIED; pf->fdir.flex_set[index].src_offset = 0; pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM; pf->fdir.flex_set[index].dst_offset = 0; I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900); I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ } /* initialize the masks */ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype)) continue; pf->fdir.flex_mask[pctype].word_mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { pf->fdir.flex_mask[pctype].bitmask[i].offset = 0; pf->fdir.flex_mask[pctype].bitmask[i].mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0); } } }
static int i40e_pf_host_hmc_config_txq(struct i40e_hw *hw, struct i40e_pf_vf *vf, struct i40e_virtchnl_txq_info *txq) { int err = I40E_SUCCESS; struct i40e_hmc_obj_txq tx_ctx; uint32_t qtx_ctl; uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id; /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); tx_ctx.new_context = 1; tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->ring_len; tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]); err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id); if (err != I40E_SUCCESS) return err; err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx); if (err != I40E_SUCCESS) return err; /* bind queue with VF function, since TX/QX will appear in pair, * so only has QTX_CTL to set. */ qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) | ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK) | (((vf->vf_idx + hw->func_caps.vf_base_id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK); I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl); I40E_WRITE_FLUSH(hw); return I40E_SUCCESS; }
/** * Proceed VF reset operation. */ int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) { uint32_t val, i; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint16_t vf_id, abs_vf_id, vf_msix_num; int ret; struct i40e_virtchnl_queue_select qsel; if (vf == NULL) return -EINVAL; vf_id = vf->vf_idx; abs_vf_id = vf_id + hw->func_caps.vf_base_id; /* Notify VF that we are in VFR progress */ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS); /* * If require a SW VF reset, a VFLR interrupt will be generated, * this function will be called again. To avoid it, * disable interrupt first. */ if (do_hw_reset) { vf->state = I40E_VF_INRESET; val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); I40E_WRITE_FLUSH(hw); } #define VFRESET_MAX_WAIT_CNT 100 /* Wait until VF reset is done */ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { rte_delay_us(10); val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id)); if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i >= VFRESET_MAX_WAIT_CNT) { PMD_DRV_LOG(ERR, "VF reset timeout\n"); return -ETIMEDOUT; } /* This is not first time to do reset, do cleanup job first */ if (vf->vsi) { /* Disable queues */ memset(&qsel, 0, sizeof(qsel)); for (i = 0; i < vf->vsi->nb_qps; i++) qsel.rx_queues |= 1 << i; qsel.tx_queues = qsel.rx_queues; ret = i40e_pf_host_switch_queues(vf, &qsel, false); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Disable VF queues failed\n"); return -EFAULT; } /* Disable VF interrupt setting */ vf_msix_num = hw->func_caps.num_msix_vectors_vf; for (i = 0; i < vf_msix_num; i++) { if (!i) val = I40E_VFINT_DYN_CTL0(vf_id); else val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) * (vf_id)) + (i - 1)); I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } I40E_WRITE_FLUSH(hw); /* remove VSI */ ret = i40e_vsi_release(vf->vsi); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Release VSI failed\n"); return -EFAULT; } } #define I40E_VF_PCI_ADDR 0xAA #define I40E_VF_PEND_MASK 0x20 /* Check the pending transactions of this VF */ /* Use absolute VF id, refer to datasheet for details */ I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR | (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { rte_delay_us(1); val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD); if ((val & I40E_VF_PEND_MASK) == 0) break; } if (i >= VFRESET_MAX_WAIT_CNT) { PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n"); return -ETIMEDOUT; } /* Reset done, Set COMPLETE flag and clear reset bit */ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED); val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); vf->reset_cnt++; I40E_WRITE_FLUSH(hw); /* Allocate resource again */ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, vf->pf->main_vsi, vf->vf_idx); if (vf->vsi == NULL) { PMD_DRV_LOG(ERR, "Add vsi failed\n"); return -EFAULT; } ret = i40e_pf_vf_queues_mapping(vf); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "queue mapping error\n"); i40e_vsi_release(vf->vsi); return -EFAULT; } return ret; }