int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); return 0; } rtnl_lock(); if (!netif_running(dev)) { netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); rtnl_unlock(); return 0; } bp->sriov_cfg = true; rtnl_unlock(); if (pci_vfs_assigned(bp->pdev)) { netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); num_vfs = 0; goto sriov_cfg_exit; } /* Check if enabled VFs is same as requested */ if (num_vfs && num_vfs == bp->pf.active_vfs) goto sriov_cfg_exit; /* if there are previous existing VFs, clean them up */ bnxt_sriov_disable(bp); if (!num_vfs) goto sriov_cfg_exit; bnxt_sriov_enable(bp, &num_vfs); sriov_cfg_exit: bp->sriov_cfg = false; wake_up(&bp->sriov_cfg_wait); return num_vfs; }
static int nfp_pcie_sriov_disable(struct pci_dev *pdev) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); /* If the VFs are assigned we cannot shut down SR-IOV without * causing issues, so just leave the hardware available but * disabled */ if (pci_vfs_assigned(pdev)) { dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); return -EPERM; } pf->num_vfs = 0; pci_disable_sriov(pdev); dev_dbg(&pdev->dev, "Removed VFs.\n"); #endif return 0; }
void efx_ef10_sriov_fini(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int i; int rc; if (!nic_data->vf) { /* Remove any un-assigned orphaned VFs */ #if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) #endif pci_disable_sriov(efx->pci_dev); return; } /* Remove any VFs in the host */ for (i = 0; i < efx->vf_count; ++i) { struct efx_nic *vf_efx = nic_data->vf[i].efx; if (vf_efx) { efx_device_detach_sync(vf_efx); rtnl_lock(); efx_net_stop(vf_efx->net_dev); rtnl_unlock(); down_write(&vf_efx->filter_sem); vf_efx->type->filter_table_remove(vf_efx); up_write(&vf_efx->filter_sem); efx_ef10_vadaptor_free(vf_efx, EVB_PORT_ID_ASSIGNED); vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); } } rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, "Disabling SRIOV was not successful rc=%d\n", rc); else netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n"); #endif }
static int nitrox_sriov_disable(struct pci_dev *pdev) { struct nitrox_device *ndev = pci_get_drvdata(pdev); if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags)) return 0; if (pci_vfs_assigned(pdev)) { dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n"); return -EPERM; } pci_disable_sriov(pdev); /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); ndev->num_vfs = 0; ndev->mode = __NDEV_MODE_PF; config_nps_core_vfcfg_mode(ndev, ndev->mode); return pf_sriov_init(ndev); }
void bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); if (!num_vfs) return; if (pci_vfs_assigned(bp->pdev)) { netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", num_vfs); } else { pci_disable_sriov(bp->pdev); /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } bnxt_free_vf_resources(bp); bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ bnxt_hwrm_func_qcaps(bp); }
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; unsigned int vfs_assigned = 0; #if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) vfs_assigned = pci_vfs_assigned(dev); if (vfs_assigned && !force) { netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " "please detach them before disabling SR-IOV\n"); return -EBUSY; } #endif if (!vfs_assigned) pci_disable_sriov(dev); efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; return 0; }
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; mlx5_core_cleanup_vfs(dev); if (!num_vfs) { #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif kfree(sriov->vfs_ctx); sriov->vfs_ctx = NULL; if (!pci_vfs_assigned(pdev)) pci_disable_sriov(pdev); else pr_info("unloading PF driver while leaving orphan VFs\n"); return 0; } err = mlx5_core_sriov_enable(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); return err; } mlx5_core_init_vfs(dev, num_vfs); #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); #endif return num_vfs; }
/* On top of the default firmware vswitch setup, create a VEB vswitch and * expansion vport for use by this function. */ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; struct net_device *net_dev = efx->net_dev; int rc; #if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_GET_TOTALVFS) if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0 && !enable_vswitch) { #else if (efx->max_vfs <= 0 && !enable_vswitch) { #endif /* vswitch not needed as we have no VFs */ efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); return 0; } rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED, MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB); if (rc) goto fail1; rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED, MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, EFX_EF10_NO_VLAN, &nic_data->vport_id); if (rc) goto fail2; efx->ef10_resources.vport_id = nic_data->vport_id; rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr); if (rc) goto fail3; ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); if (rc) goto fail4; return 0; fail4: efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac); eth_zero_addr(nic_data->vport_mac); fail3: efx_ef10_vport_free(efx, nic_data->vport_id); nic_data->vport_id = EVB_PORT_ID_ASSIGNED; fail2: efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED); fail1: return rc; #else return 0; #endif } int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); #else return 0; #endif } int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; if (!nic_data->must_probe_vswitching) return 0; rc = efx_ef10_vswitching_probe_pf(efx); if (rc) goto fail; rc = efx_ef10_sriov_restore_vf_vswitching(efx); if (rc) goto fail; nic_data->must_probe_vswitching = false; fail: return rc; #else return 0; #endif } int efx_ef10_vswitching_restore_vf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; if (!nic_data->must_probe_vswitching) return 0; rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); if (rc) return rc; nic_data->must_probe_vswitching = false; #endif return 0; } void efx_ef10_vswitching_remove_pf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; efx_ef10_sriov_free_vf_vswitching(efx); efx_ef10_vadaptor_free(efx, nic_data->vport_id); if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED) return; /* No vswitch was ever created */ if (!is_zero_ether_addr(nic_data->vport_mac)) { efx_ef10_vport_del_mac(efx, nic_data->vport_id, efx->net_dev->dev_addr); eth_zero_addr(nic_data->vport_mac); } efx_ef10_vport_free(efx, nic_data->vport_id); nic_data->vport_id = EVB_PORT_ID_ASSIGNED; #if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) /* Only free the vswitch if no VFs are assigned */ if (!pci_vfs_assigned(efx->pci_dev)) #endif efx_ef10_vswitch_free(efx, nic_data->vport_id); #endif } void efx_ef10_vswitching_remove_vf(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); #endif }