int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; int cur_vfs; if (!mlx5_core_is_pf(dev)) return 0; if (!sync_required(dev->pdev)) return 0; cur_vfs = pci_num_vf(pdev); sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); if (!sriov->vfs_ctx) return -ENOMEM; sriov->enabled_vfs = cur_vfs; mlx5_core_init_vfs(dev, cur_vfs); #ifdef CONFIG_MLX5_CORE_EN if (cur_vfs) mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs, SRIOV_LEGACY); #endif enable_vfs(dev, cur_vfs); return 0; }
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (sriov->enabled_vfs) { mlx5_core_warn(dev, "failed to enable SRIOV on device, already enabled with %d vfs\n", sriov->enabled_vfs); return -EBUSY; } if (!MLX5_ESWITCH_MANAGER(dev)) goto enable_vfs_hca; err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); continue; } sriov->vfs_ctx[vf].enabled = 1; sriov->enabled_vfs++; if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { err = sriov_restore_guids(dev, vf); if (err) { mlx5_core_warn(dev, "failed to restore VF %d settings, err %d\n", vf, err); continue; } } mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); } return 0; }
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; mlx5_core_cleanup_vfs(dev); if (!num_vfs) { #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif kfree(sriov->vfs_ctx); sriov->vfs_ctx = NULL; if (!pci_vfs_assigned(pdev)) pci_disable_sriov(pdev); else pr_info("unloading PF driver while leaving orphan VFs\n"); return 0; } err = mlx5_core_sriov_enable(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); return err; } mlx5_core_init_vfs(dev, num_vfs); #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); #endif return num_vfs; }
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (sriov->enabled_vfs) { mlx5_core_warn(dev, "failed to enable SRIOV on device, already enabled with %d vfs\n", sriov->enabled_vfs); return -EBUSY; } #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } #endif for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); continue; } sriov->vfs_ctx[vf].enabled = 1; sriov->enabled_vfs++; mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); } return 0; }