int i40e_pf_host_uninit(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint32_t val; PMD_INIT_FUNC_TRACE(); /** * return if SRIOV not enabled, VF number not configured or * no queue assigned. */ if ((!hw->func_caps.sr_iov_1_1) || (pf->vf_num == 0) || (pf->vf_nb_qps == 0)) return I40E_SUCCESS; /* free memory to store VF structure */ rte_free(pf->vfs); pf->vfs = NULL; /* Disable irq0 for VFR event */ i40e_pf_disable_irq0(hw); /* Disable VF link status interrupt */ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); I40E_WRITE_FLUSH(hw); return I40E_SUCCESS; }
static int rte_pmd_i40e_get_vf_native_stats(uint16_t port, uint16_t vf_id, struct i40e_eth_stats *stats) { struct rte_eth_dev *dev; struct i40e_pf *pf; struct i40e_vsi *vsi; RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; if (!is_i40e_supported(dev)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); if (vf_id >= pf->vf_num || !pf->vfs) { PMD_DRV_LOG(ERR, "Invalid VF ID."); return -EINVAL; } vsi = pf->vfs[vf_id].vsi; if (!vsi) { PMD_DRV_LOG(ERR, "Invalid VSI."); return -EINVAL; } i40e_update_vsi_stats(vsi); memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats)); return 0; }
int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) { struct i40e_vf_representor *representor = ethdev->data->dev_private; struct i40e_pf *pf; struct i40e_pf_vf *vf; struct rte_eth_link *link; representor->vf_id = ((struct i40e_vf_representor *)init_params)->vf_id; representor->switch_domain_id = ((struct i40e_vf_representor *)init_params)->switch_domain_id; representor->adapter = ((struct i40e_vf_representor *)init_params)->adapter; pf = I40E_DEV_PRIVATE_TO_PF( representor->adapter->eth_dev->data->dev_private); if (representor->vf_id >= pf->vf_num) return -ENODEV; /* Set representor device ops */ ethdev->dev_ops = &i40e_representor_dev_ops; /* No data-path, but need stub Rx/Tx functions to avoid crash * when testing with the likes of testpmd. */ ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst; ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; vf = &pf->vfs[representor->vf_id]; if (!vf->vsi) { PMD_DRV_LOG(ERR, "Invalid VSI."); return -ENODEV; } ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; ethdev->data->representor_id = representor->vf_id; /* Setting the number queues allocated to the VF */ ethdev->data->nb_rx_queues = vf->vsi->nb_qps; ethdev->data->nb_tx_queues = vf->vsi->nb_qps; ethdev->data->mac_addrs = &vf->mac_addr; /* Link state. Inherited from PF */ link = &representor->adapter->eth_dev->data->dev_link; ethdev->data->dev_link.link_speed = link->link_speed; ethdev->data->dev_link.link_duplex = link->link_duplex; ethdev->data->dev_link.link_status = link->link_status; ethdev->data->dev_link.link_autoneg = link->link_autoneg; return 0; }
int i40e_pf_host_init(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_PF_TO_HW(pf); int ret, i; uint32_t val; PMD_INIT_FUNC_TRACE(); /** * return if SRIOV not enabled, VF number not configured or * no queue assigned. */ if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0) return I40E_SUCCESS; /* Allocate memory to store VF structure */ pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0); if(pf->vfs == NULL) return -ENOMEM; /* Disable irq0 for VFR event */ i40e_pf_disable_irq0(hw); /* Disable VF link status interrupt */ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); I40E_WRITE_FLUSH(hw); for (i = 0; i < pf->vf_num; i++) { pf->vfs[i].pf = pf; pf->vfs[i].state = I40E_VF_INACTIVE; pf->vfs[i].vf_idx = i; ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); if (ret != I40E_SUCCESS) goto fail; eth_random_addr(pf->vfs[i].mac_addr.addr_bytes); } /* restore irq0 */ i40e_pf_enable_irq0(hw); return I40E_SUCCESS; fail: rte_free(pf->vfs); i40e_pf_enable_irq0(hw); return ret; }
static int i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask) { struct i40e_vf_representor *representor = ethdev->data->dev_private; struct rte_eth_dev *pdev; struct i40e_pf_vf *vf; struct i40e_vsi *vsi; struct i40e_pf *pf; uint32_t vfid; pdev = representor->adapter->eth_dev; vfid = representor->vf_id; if (!is_i40e_supported(pdev)) { PMD_DRV_LOG(ERR, "Invalid PF dev."); return -EINVAL; } pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private); if (vfid >= pf->vf_num || !pf->vfs) { PMD_DRV_LOG(ERR, "Invalid VF ID."); return -EINVAL; } vf = &pf->vfs[vfid]; vsi = vf->vsi; if (!vsi) { PMD_DRV_LOG(ERR, "Invalid VSI."); return -EINVAL; } if (mask & ETH_VLAN_FILTER_MASK) { /* Enable or disable VLAN filtering offload */ if (ethdev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) return i40e_vsi_config_vlan_filter(vsi, TRUE); else return i40e_vsi_config_vlan_filter(vsi, FALSE); } if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping offload */ if (ethdev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) return i40e_vsi_config_vlan_stripping(vsi, TRUE); else return i40e_vsi_config_vlan_stripping(vsi, FALSE); } return -EINVAL; }
/* * Configure flow director related setting */ int i40e_fdir_configure(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_fdir_flex_conf *conf; enum i40e_filter_pctype pctype; uint32_t val; uint8_t i; int ret = 0; /* * configuration need to be done before * flow director filters are added * If filters exist, flush them. */ if (i40e_fdir_empty(hw) < 0) { ret = i40e_fdir_flush(dev); if (ret) { PMD_DRV_LOG(ERR, "failed to flush fdir table."); return ret; } } /* enable FDIR filter */ val = I40E_READ_REG(hw, I40E_PFQF_CTL_0); val |= I40E_PFQF_CTL_0_FD_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); i40e_init_flx_pld(pf); /* set flex config to default value */ conf = &dev->data->dev_conf.fdir_conf.flex_conf; ret = i40e_check_fdir_flex_conf(conf); if (ret < 0) { PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } /* configure flex payload */ for (i = 0; i < conf->nb_payloads; i++) i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); /* configure flex mask*/ for (i = 0; i < conf->nb_flexmasks; i++) { pctype = i40e_flowtype_to_pctype( conf->flex_mask[i].flow_type); i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } return ret; }
void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, uint16_t abs_vf_id, uint32_t opcode, __rte_unused uint32_t retval, uint8_t *msg, uint16_t msglen) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf_vf *vf; /* AdminQ will pass absolute VF id, transfer to internal vf id */ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id; if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) { PMD_DRV_LOG(ERR, "invalid argument\n"); return; } vf = &pf->vfs[vf_id]; if (!vf->vsi) { PMD_DRV_LOG(ERR, "NO VSI associated with VF found\n"); i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_NO_AVAILABLE_VSI, NULL, 0); return; } switch (opcode) { case I40E_VIRTCHNL_OP_VERSION : PMD_DRV_LOG(INFO, "OP_VERSION received\n"); i40e_pf_host_process_cmd_version(vf); break; case I40E_VIRTCHNL_OP_RESET_VF : PMD_DRV_LOG(INFO, "OP_RESET_VF received\n"); i40e_pf_host_process_cmd_reset_vf(vf); break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received\n"); i40e_pf_host_process_cmd_get_vf_resource(vf); break; case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received\n"); i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received\n"); i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received\n"); i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received\n"); i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received\n"); i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received\n"); i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_ADD_VLAN: PMD_DRV_LOG(INFO, "OP_ADD_VLAN received\n"); i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_DEL_VLAN: PMD_DRV_LOG(INFO, "OP_DEL_VLAN received\n"); i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received\n"); i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_GET_STATS: PMD_DRV_LOG(INFO, "OP_GET_STATS received\n"); i40e_pf_host_process_cmd_get_stats(vf); break; case I40E_VIRTCHNL_OP_GET_LINK_STAT: PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received\n"); i40e_pf_host_process_cmd_get_link_status(vf); break; case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD: PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received\n"); i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen); break; case I40E_VIRTCHNL_OP_CFG_VLAN_PVID: PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received\n"); i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen); break; /* Don't add command supported below, which will * return an error code. */ case I40E_VIRTCHNL_OP_FCOE: PMD_DRV_LOG(ERR, "OP_FCOE received, not supported\n"); default: PMD_DRV_LOG(ERR, "%u received, not supported\n", opcode); i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM, NULL, 0); break; } }