static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); int err; if (num_vfs > pf->limit_vfs) { nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", pf->limit_vfs); return -EINVAL; } err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err); return err; } pf->num_vfs = num_vfs; dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); return num_vfs; #endif return 0; }
void nfp_repr_clean_and_free(struct nfp_repr *repr) { nfp_info(repr->app->cpp, "Destroying Representor(%s)\n", repr->netdev->name); nfp_repr_clean(repr); __nfp_repr_free(repr); }
static int bpe_lookup(struct nfp_device *nfp, int nbi, u32 *bpe, int bpe_max) { int err, i; const struct nfp_rtsym *sym; u32 id, tmp; u32 __iomem *ptr; struct nfp_cpp_area *area; char buff[] = "nbi0_dma_bpe_credits"; buff[3] += nbi; sym = nfp_rtsym_lookup(nfp, buff); if (!sym) { nfp_info(nfp, "%s: Symbol not present\n", buff); return 0; } id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); area = nfp_cpp_area_alloc_acquire(nfp_device_cpp(nfp), id, sym->addr, sym->size); if (IS_ERR_OR_NULL(area)) { nfp_err(nfp, "%s: Can't acquire area\n", buff); return area ? PTR_ERR(area) : -ENOMEM; } ptr = nfp_cpp_area_iomem(area); if (IS_ERR_OR_NULL(ptr)) { nfp_err(nfp, "%s: Can't map area\n", buff); err = ptr ? PTR_ERR(ptr) : -ENOMEM; goto exit; } tmp = readl(ptr++); if (!BPECFG_MAGIC_CHECK(tmp)) { nfp_err(nfp, "%s: Magic value (0x%08x) unrecognized\n", buff, tmp); err = -EINVAL; goto exit; } if (BPECFG_MAGIC_COUNT(tmp) > bpe_max) { nfp_err(nfp, "%s: Magic count (%d) too large (> %d)\n", buff, BPECFG_MAGIC_COUNT(tmp), bpe_max); err = -EINVAL; goto exit; } for (i = 0; i < bpe_max; i++) bpe[i] = readl(ptr++); err = BPECFG_MAGIC_COUNT(tmp); exit: nfp_cpp_area_release_free(area); return err; }
/** * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine * @mutex: NFP CPP Mutex handle * * Return: 0 on success, or -errno on failure */ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) { unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ; unsigned int timeout_ms = 1; int err; /* We can't use a waitqueue here, because the unlocker * might be on a separate CPU. * * So just wait for now. */ for (;;) { err = nfp_cpp_mutex_trylock(mutex); if (err != -EBUSY) break; err = msleep_interruptible(timeout_ms); if (err != 0) { nfp_info(mutex->cpp, "interrupted waiting for NFP mutex\n"); return -ERESTARTSYS; } if (time_is_before_eq_jiffies(warn_at)) { warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; nfp_warn(mutex->cpp, "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n", mutex->depth, mutex->target, mutex->address, mutex->key); } if (time_is_before_eq_jiffies(err_at)) { nfp_err(mutex->cpp, "Error: mutex wait timed out\n"); return -EBUSY; } } return err; }
static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } for (i = 0; i < eth_tbl->count; i++) { unsigned int phys_port = eth_tbl->ports[i].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); nfp_net_get_mac_addr(app->pf, repr, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, eth_tbl->ports[i].nbi, eth_tbl->ports[i].base, phys_port); RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); err_free_ctrl_skb: kfree_skb(ctrl_skb); return err; }
static int nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; int i, err, reify_cnt; const u8 queue = 0; port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : NFP_PORT_VF_PORT; reprs = nfp_reprs_alloc(cnt); if (!reprs) return -ENOMEM; for (i = 0; i < cnt; i++) { struct net_device *repr; struct nfp_port *port; u32 port_id; repr = nfp_repr_alloc(app); if (!repr) { err = -ENOMEM; goto err_reprs_clean; } repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; goto err_reprs_clean; } nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); nfp_repr_free(repr); goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; } else { port->pf_id = 0; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } eth_hw_addr_random(repr); port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, i, queue); err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; } RCU_INIT_POINTER(reprs->reprs[i], repr); nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, repr->name); } nfp_app_reprs_set(app, repr_type, reprs); atomic_set(replies, 0); reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); if (reify_cnt < 0) { err = reify_cnt; nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); goto err_reprs_remove; } err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); if (err) goto err_reprs_remove; return 0; err_reprs_remove: reprs = nfp_app_reprs_set(app, repr_type, NULL); err_reprs_clean: nfp_reprs_clean_and_free(app, reprs); return err; }