int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; int cur_vfs; if (!mlx5_core_is_pf(dev)) return 0; if (!sync_required(dev->pdev)) return 0; cur_vfs = pci_num_vf(pdev); sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); if (!sriov->vfs_ctx) return -ENOMEM; sriov->enabled_vfs = cur_vfs; mlx5_core_init_vfs(dev, cur_vfs); #ifdef CONFIG_MLX5_CORE_EN if (cur_vfs) mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs, SRIOV_LEGACY); #endif enable_vfs(dev, cur_vfs); return 0; }
static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); int err; if (!num_vfs_valid(num_vfs)) { dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs); return -EINVAL; } if (pci_num_vf(pdev) == num_vfs) return num_vfs; err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err); return err; } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); ndev->num_vfs = num_vfs; ndev->mode = num_vfs_to_mode(num_vfs); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ pf_sriov_cleanup(ndev); config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; }
static int sync_required(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_sriov *sriov = &dev->priv.sriov; int cur_vfs = pci_num_vf(pdev); if (cur_vfs != sriov->num_vfs) { pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs); return 1; } return 0; }
static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; if (pci_num_vf(pdev)) { mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n"); return -EBUSY; } err = pci_enable_sriov(pdev, num_vfs); if (err) mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); return err; }
int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; int total_vfs; if (!mlx5_core_is_pf(dev)) return 0; total_vfs = pci_sriov_get_totalvfs(pdev); sriov->num_vfs = pci_num_vf(pdev); sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); if (!sriov->vfs_ctx) return -ENOMEM; return 0; }
static ssize_t store_max_vfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; unsigned long max_vfs; struct pci_dev *pdev = to_pci_dev(dev); if (0 != kstrtoul(buf, 0, &max_vfs)) return -EINVAL; if (0 == max_vfs) pci_disable_sriov(pdev); else if (0 == pci_num_vf(pdev)) err = pci_enable_sriov(pdev, max_vfs); else /* do nothing if change max_vfs number */ err = -EINVAL; return err ? err : count; }
void efx_ef10_sriov_fini(struct efx_nic *efx) { #ifdef CONFIG_SFC_SRIOV struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int i; int rc; if (!nic_data->vf) { /* Remove any un-assigned orphaned VFs */ #if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) #endif pci_disable_sriov(efx->pci_dev); return; } /* Remove any VFs in the host */ for (i = 0; i < efx->vf_count; ++i) { struct efx_nic *vf_efx = nic_data->vf[i].efx; if (vf_efx) { efx_device_detach_sync(vf_efx); rtnl_lock(); efx_net_stop(vf_efx->net_dev); rtnl_unlock(); down_write(&vf_efx->filter_sem); vf_efx->type->filter_table_remove(vf_efx); up_write(&vf_efx->filter_sem); efx_ef10_vadaptor_free(vf_efx, EVB_PORT_ID_ASSIGNED); vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); } } rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, "Disabling SRIOV was not successful rc=%d\n", rc); else netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n"); #endif }
void bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); if (!num_vfs) return; if (pci_vfs_assigned(bp->pdev)) { netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", num_vfs); } else { pci_disable_sriov(bp->pdev); /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } bnxt_free_vf_resources(bp); bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ bnxt_hwrm_func_qcaps(bp); }
static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; if (pci_num_vf(pdev)) pci_disable_sriov(pdev); enable_vfs(dev, num_vfs); err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "enable sriov failed %d\n", err); goto ex; } return 0; ex: disable_vfs(dev, num_vfs); return err; }