static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); int err; if (num_vfs > pf->limit_vfs) { nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", pf->limit_vfs); return -EINVAL; } err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err); return err; } pf->num_vfs = num_vfs; dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); return num_vfs; #endif return 0; }
static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); int err; if (!num_vfs_valid(num_vfs)) { dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs); return -EINVAL; } if (pci_num_vf(pdev) == num_vfs) return num_vfs; err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err); return err; } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); ndev->num_vfs = num_vfs; ndev->mode = num_vfs_to_mode(num_vfs); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ pf_sriov_cleanup(ndev); config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; }
static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) { struct pci_dev *dev = efx->pci_dev; int rc; efx->vf_count = num_vfs; rc = efx_ef10_sriov_alloc_vf_vswitching(efx); if (rc) goto fail1; rc = pci_enable_sriov(dev, num_vfs); if (rc) goto fail2; return 0; fail2: efx_ef10_sriov_free_vf_vswitching(efx); fail1: efx->vf_count = 0; netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); return rc; }
static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) { struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); if (numvfs > 0) { genwqe_setup_vf_jtimer(cd); pci_enable_sriov(dev, numvfs); return numvfs; } if (numvfs == 0) { pci_disable_sriov(dev); return 0; } return 0; }
static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err = 0; if (pci_num_vf(pdev)) { mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n"); return -EBUSY; } err = pci_enable_sriov(pdev, num_vfs); if (err) mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); return err; }
static ssize_t store_max_vfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; unsigned long max_vfs; struct pci_dev *pdev = to_pci_dev(dev); if (0 != kstrtoul(buf, 0, &max_vfs)) return -EINVAL; if (0 == max_vfs) pci_disable_sriov(pdev); else if (0 == pci_num_vf(pdev)) err = pci_enable_sriov(pdev, max_vfs); else /* do nothing if change max_vfs number */ err = -EINVAL; return err ? err : count; }
static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; if (pci_num_vf(pdev)) pci_disable_sriov(pdev); enable_vfs(dev, num_vfs); err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "enable sriov failed %d\n", err); goto ex; } return 0; ex: disable_vfs(dev, num_vfs); return err; }
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) { int rc = 0, vfs_supported; int min_rx_rings, min_tx_rings, min_rss_ctxs; int tx_ok = 0, rx_ok = 0, rss_ok = 0; /* Check if we can enable requested num of vf's. At a mininum * we require 1 RX 1 TX rings for each VF. In this minimum conf * features like TPA will not be available. */ vfs_supported = *num_vfs; while (vfs_supported) { min_rx_rings = vfs_supported; min_tx_rings = vfs_supported; min_rss_ctxs = vfs_supported; if (bp->flags & BNXT_FLAG_AGG_RINGS) { if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= min_rx_rings) rx_ok = 1; } else { if (bp->pf.max_rx_rings - bp->rx_nr_rings >= min_rx_rings) rx_ok = 1; } if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) tx_ok = 1; if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) rss_ok = 1; if (tx_ok && rx_ok && rss_ok) break; vfs_supported--; } if (!vfs_supported) { netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); return -EINVAL; } if (vfs_supported != *num_vfs) { netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", *num_vfs, vfs_supported); *num_vfs = vfs_supported; } rc = bnxt_alloc_vf_resources(bp, *num_vfs); if (rc) goto err_out1; /* Reserve resources for VFs */ rc = bnxt_hwrm_func_cfg(bp, *num_vfs); if (rc) goto err_out2; /* Register buffers for VFs */ rc = bnxt_hwrm_func_buf_rgtr(bp); if (rc) goto err_out2; rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; return 0; err_out2: /* Free the resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); err_out1: bnxt_free_vf_resources(bp); return rc; }