static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (!sriov->enabled_vfs) #ifdef CONFIG_MLX5_CORE_EN goto disable_sriov_resources; #else return; #endif for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) continue; err = mlx5_core_disable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to disable VF %d\n", vf); continue; } sriov->vfs_ctx[vf].enabled = 0; sriov->enabled_vfs--; } #ifdef CONFIG_MLX5_CORE_EN disable_sriov_resources: mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); }
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; int vf; if (!sriov->enabled_vfs) goto out; for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) continue; err = mlx5_core_disable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to disable VF %d\n", vf); continue; } sriov->vfs_ctx[vf].enabled = 0; sriov->enabled_vfs--; } out: if (MLX5_ESWITCH_MANAGER(dev)) mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); }
static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov; sriov = &dev->priv.sriov; disable_vfs(dev, sriov->num_vfs); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout claiming VFs pages\n"); sriov->num_vfs = 0; }