/* * i40e_fdir_setup - reserve and initialize the Flow Director resources * @pf: board private structure */ int i40e_fdir_setup(struct i40e_pf *pf) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi; int err = I40E_SUCCESS; char z_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz = NULL; struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; if ((pf->flags & I40E_FLAG_FDIR) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); return I40E_NOT_SUPPORTED; } PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u," " num_filters_best_effort = %u.", hw->func_caps.fd_filters_guaranteed, hw->func_caps.fd_filters_best_effort); vsi = pf->fdir.fdir_vsi; if (vsi) { PMD_DRV_LOG(INFO, "FDIR initialization has been done."); return I40E_SUCCESS; } /* make new FDIR VSI */ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0); if (!vsi) { PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); return I40E_ERR_NO_AVAILABLE_VSI; } pf->fdir.fdir_vsi = vsi; /*Fdir tx queue setup*/ err = i40e_fdir_setup_tx_resources(pf); if (err) { PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); goto fail_setup_tx; } /*Fdir rx queue setup*/ err = i40e_fdir_setup_rx_resources(pf); if (err) { PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); goto fail_setup_rx; } err = i40e_tx_queue_init(pf->fdir.txq); if (err) { PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization."); goto fail_mem; } /* need switch on before dev start*/ err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE); if (err) { PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on."); goto fail_mem; } /* Init the rx queue in hardware */ err = i40e_fdir_rx_queue_init(pf->fdir.rxq); if (err) { PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization."); goto fail_mem; } /* switch on rx queue */ err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE); if (err) { PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on."); goto fail_mem; } /* reserve memory for the fdir programming packet */ snprintf(z_name, sizeof(z_name), "%s_%s_%d", eth_dev->driver->pci_drv.name, I40E_FDIR_MZ_NAME, eth_dev->data->port_id); mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY); if (!mz) { PMD_DRV_LOG(ERR, "Cannot init memzone for " "flow director program packet."); err = I40E_ERR_NO_MEMORY; goto fail_mem; } pf->fdir.prg_pkt = mz->addr; pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id); PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", vsi->base_queue); return I40E_SUCCESS; fail_mem: i40e_dev_rx_queue_release(pf->fdir.rxq); pf->fdir.rxq = NULL; fail_setup_rx: i40e_dev_tx_queue_release(pf->fdir.txq); pf->fdir.txq = NULL; fail_setup_tx: i40e_vsi_release(vsi); pf->fdir.fdir_vsi = NULL; return err; }
/** * Proceed VF reset operation. */ int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) { uint32_t val, i; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint16_t vf_id, abs_vf_id, vf_msix_num; int ret; struct i40e_virtchnl_queue_select qsel; if (vf == NULL) return -EINVAL; vf_id = vf->vf_idx; abs_vf_id = vf_id + hw->func_caps.vf_base_id; /* Notify VF that we are in VFR progress */ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS); /* * If require a SW VF reset, a VFLR interrupt will be generated, * this function will be called again. To avoid it, * disable interrupt first. */ if (do_hw_reset) { vf->state = I40E_VF_INRESET; val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); I40E_WRITE_FLUSH(hw); } #define VFRESET_MAX_WAIT_CNT 100 /* Wait until VF reset is done */ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { rte_delay_us(10); val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id)); if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i >= VFRESET_MAX_WAIT_CNT) { PMD_DRV_LOG(ERR, "VF reset timeout\n"); return -ETIMEDOUT; } /* This is not first time to do reset, do cleanup job first */ if (vf->vsi) { /* Disable queues */ memset(&qsel, 0, sizeof(qsel)); for (i = 0; i < vf->vsi->nb_qps; i++) qsel.rx_queues |= 1 << i; qsel.tx_queues = qsel.rx_queues; ret = i40e_pf_host_switch_queues(vf, &qsel, false); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Disable VF queues failed\n"); return -EFAULT; } /* Disable VF interrupt setting */ vf_msix_num = hw->func_caps.num_msix_vectors_vf; for (i = 0; i < vf_msix_num; i++) { if (!i) val = I40E_VFINT_DYN_CTL0(vf_id); else val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) * (vf_id)) + (i - 1)); I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } I40E_WRITE_FLUSH(hw); /* remove VSI */ ret = i40e_vsi_release(vf->vsi); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Release VSI failed\n"); return -EFAULT; } } #define I40E_VF_PCI_ADDR 0xAA #define I40E_VF_PEND_MASK 0x20 /* Check the pending transactions of this VF */ /* Use absolute VF id, refer to datasheet for details */ I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR | (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { rte_delay_us(1); val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD); if ((val & I40E_VF_PEND_MASK) == 0) break; } if (i >= VFRESET_MAX_WAIT_CNT) { PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n"); return -ETIMEDOUT; } /* Reset done, Set COMPLETE flag and clear reset bit */ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED); val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); vf->reset_cnt++; I40E_WRITE_FLUSH(hw); /* Allocate resource again */ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, vf->pf->main_vsi, vf->vf_idx); if (vf->vsi == NULL) { PMD_DRV_LOG(ERR, "Add vsi failed\n"); return -EFAULT; } ret = i40e_pf_vf_queues_mapping(vf); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "queue mapping error\n"); i40e_vsi_release(vf->vsi); return -EFAULT; } return ret; }