int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; if (num_vfs) { int ret; ret = mlx5_lag_forbid(dev); if (ret && (ret != -ENODEV)) return ret; } if (num_vfs) { err = mlx5_sriov_enable(pdev, num_vfs); } else { mlx5_sriov_disable(pdev); mlx5_lag_allow(dev); } return err ? err : num_vfs; }
void mlx5_sriov_detach(struct mlx5_core_dev *dev) { if (!mlx5_core_is_pf(dev)) return; mlx5_device_disable_sriov(dev); }
int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; int cur_vfs; if (!mlx5_core_is_pf(dev)) return 0; if (!sync_required(dev->pdev)) return 0; cur_vfs = pci_num_vf(pdev); sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); if (!sriov->vfs_ctx) return -ENOMEM; sriov->enabled_vfs = cur_vfs; mlx5_core_init_vfs(dev, cur_vfs); #ifdef CONFIG_MLX5_CORE_EN if (cur_vfs) mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs, SRIOV_LEGACY); #endif enable_vfs(dev, cur_vfs); return 0; }
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; if (!mlx5_core_is_pf(dev)) return; kfree(sriov->vfs_ctx); }
int mlx5_sriov_attach(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; if (!mlx5_core_is_pf(dev) || !sriov->num_vfs) return 0; /* If sriov VFs exist in PCI level, enable them in device level */ return mlx5_device_enable_sriov(dev, sriov->num_vfs); }
int mlx5_sriov_cleanup(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; int err; if (!mlx5_core_is_pf(dev)) return 0; err = mlx5_core_sriov_configure(pdev, 0); if (err) return err; return 0; }
int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; int total_vfs; if (!mlx5_core_is_pf(dev)) return 0; total_vfs = pci_sriov_get_totalvfs(pdev); sriov->num_vfs = pci_num_vf(pdev); sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); if (!sriov->vfs_ctx) return -ENOMEM; return 0; }
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; if (num_vfs && mlx5_lag_is_active(dev)) { mlx5_core_warn(dev, "can't turn sriov on while LAG is active"); return -EINVAL; } if (num_vfs) err = mlx5_sriov_enable(pdev, num_vfs); else mlx5_sriov_disable(pdev); return err ? err : num_vfs; }
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; mlx5_core_cleanup_vfs(dev); if (!num_vfs) { #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif kfree(sriov->vfs_ctx); sriov->vfs_ctx = NULL; if (!pci_vfs_assigned(pdev)) pci_disable_sriov(pdev); else pr_info("unloading PF driver while leaving orphan VFs\n"); return 0; } err = mlx5_core_sriov_enable(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); return err; } mlx5_core_init_vfs(dev, num_vfs); #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); #endif return num_vfs; }