/** * nfp_reset_soft() - Perform a soft reset of the NFP * @nfp: NFP Device handle * * Return: 0, or -ERRNO */ int nfp_reset_soft(struct nfp_device *nfp) { struct nfp_cpp *cpp = nfp_device_cpp(nfp); struct nfp_cpp_area *area; struct nfp_resource *res; u32 model; int i, err; model = nfp_cpp_model(cpp); /* Claim the nfp.nffw resource page */ res = nfp_resource_acquire(nfp, NFP_RESOURCE_NFP_NFFW); if (IS_ERR(res)) { nfp_err(nfp, "Can't aquire %s resource\n", NFP_RESOURCE_NFP_NFFW); return -EBUSY; } if (NFP_CPP_MODEL_IS_3200(model)) err = nfp3200_reset_soft(nfp); else if (NFP_CPP_MODEL_IS_6000(model)) err = nfp6000_reset_soft(nfp); else err = -EINVAL; if (err < 0) goto exit; /* Clear all NFP NFFW page */ area = nfp_cpp_area_alloc_acquire(cpp, nfp_resource_cpp_id(res), nfp_resource_address(res), nfp_resource_size(res)); if (!area) { nfp_err(nfp, "Can't acquire area for %s resource\n", NFP_RESOURCE_NFP_NFFW); err = -ENOMEM; goto exit; } for (i = 0; i < nfp_resource_size(res); i += 8) { err = nfp_cpp_area_writeq(area, i, 0); if (err < 0) break; } nfp_cpp_area_release_free(area); if (err < 0) { nfp_err(nfp, "Can't erase area of %s resource\n", NFP_RESOURCE_NFP_NFFW); goto exit; } err = 0; exit: nfp_resource_release(res); return err; }
static struct nfp_hwinfo * hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) { struct nfp_hwinfo *header; struct nfp_resource *res; u64 cpp_addr; u32 cpp_id; int err; u8 *db; res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); if (!IS_ERR(res)) { cpp_id = nfp_resource_cpp_id(res); cpp_addr = nfp_resource_address(res); *cpp_size = nfp_resource_size(res); nfp_resource_release(res); if (*cpp_size < HWINFO_SIZE_MIN) return NULL; } else if (PTR_ERR(res) == -ENOENT) { /* Try getting the HWInfo table from the 'classic' location */ cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0, 1); cpp_addr = 0x30000; *cpp_size = 0x0e000; } else { return NULL; } db = kmalloc(*cpp_size + 1, GFP_KERNEL); if (!db) return NULL; err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); if (err != *cpp_size) goto exit_free; header = (void *)db; if (nfp_hwinfo_is_updating(header)) goto exit_free; if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) { nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n", le32_to_cpu(header->version)); goto exit_free; } /* NULL-terminate for safety */ db[*cpp_size] = '\0'; return (void *)db; exit_free: kfree(db); return NULL; }
/* * nfp_nffw_info_open() - Acquire the lock on the NFFW table * @cpp: NFP CPP handle * * Return: 0, or -ERRNO */ struct nfp_nffw_info * nfp_nffw_info_open(struct nfp_cpp *cpp) { struct nfp_nffw_info_data *fwinf; struct nfp_nffw_info *state; uint32_t info_ver; int err; state = malloc(sizeof(*state)); if (!state) return NULL; memset(state, 0, sizeof(*state)); state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); if (!state->res) goto err_free; fwinf = &state->fwinf; if (sizeof(*fwinf) > nfp_resource_size(state->res)) goto err_release; err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); if (err < (int)sizeof(*fwinf)) goto err_release; if (!nffw_res_flg_init_get(fwinf)) goto err_release; info_ver = nffw_res_info_version_get(fwinf); if (info_ver > NFFW_INFO_VERSION_CURRENT) goto err_release; state->cpp = cpp; return state; err_release: nfp_resource_release(state->res); err_free: free(state); return NULL; }
static void *nfp_nsp_con(struct nfp_device *nfp) { struct nfp_resource *res; struct nfp_nsp *priv; res = nfp_resource_acquire(nfp, NSP_RESOURCE); if (IS_ERR(res)) return NULL; priv = nfp_device_private_alloc(nfp, sizeof(*priv), nfp_nsp_des); if (!priv) { nfp_resource_release(res); return priv; } priv->res = res; return priv; }
/* Other debug dumps */ static int nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) { struct nfp_resource *res; int ret; if (!nn->cpp) return -EOPNOTSUPP; dump->version = 1; dump->flag = NFP_DUMP_NSP_DIAG; res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG); if (IS_ERR(res)) return PTR_ERR(res); if (buffer) { if (dump->len != nfp_resource_size(res)) { ret = -EINVAL; goto exit_release; } ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res), nfp_resource_address(res), buffer, dump->len); if (ret != dump->len) ret = ret < 0 ? ret : -EIO; else ret = 0; } else { dump->len = nfp_resource_size(res); ret = 0; } exit_release: nfp_resource_release(res); return ret; }
/* Perform a soft reset of the NFP6000: * - Disable traffic ingress * - Verify all NBI MAC packet buffers have returned * - Wait for PCIE DMA Queues to empty * - Stop all MEs * - Clear all PCIe DMA Queues * - Reset MAC NBI gaskets * - Verify that all NBI/MAC buffers/credits have returned * - Soft reset subcomponents relevant to this model * - TODO: Crypto reset */ static int nfp6000_reset_soft(struct nfp_device *nfp) { struct nfp_cpp *cpp = nfp_device_cpp(nfp); struct nfp_nbi_dev *nbi[2] = {}; struct nfp_resource *res; int mac_enable[2]; int i, p, err, nbi_mask = 0; u32 bpe[2][32]; int bpes[2]; /* Lock out the MAC from any stats updaters, * such as the NSP */ res = nfp_resource_acquire(nfp, NFP_RESOURCE_MAC_STATISTICS); if (!res) return -EBUSY; for (i = 0; i < 2; i++) { u32 tmp; int state; err = nfp_power_get(nfp, NFP6000_DEVICE_NBI(i, 0), &state); if (err < 0) { if (err == -ENODEV) { nbi[i] = NULL; continue; } goto exit; } if (state != NFP_DEVICE_STATE_ON) { nbi[i] = NULL; continue; } nbi[i] = nfp_nbi_open(nfp, i); if (!nbi[i]) continue; nbi_mask |= BIT(i); err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, &tmp); if (err < 0) goto exit; mac_enable[i] = 0; if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST)) mac_enable[i] |= BIT(0); if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST)) mac_enable[i] |= BIT(1); /* No MACs at all? Then we don't care. */ if (mac_enable[i] == 0) { nfp_nbi_close(nbi[i]); nbi[i] = NULL; continue; } /* Make sure we have the BPE list */ err = bpe_lookup(nfp, i, &bpe[i][0], ARRAY_SIZE(bpe[i])); if (err < 0) goto exit; bpes[i] = err; } /* Verify that traffic ingress is disabled */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; for (p = 0; p < 24; p++) { u32 r, mask, tmp; mask = NFP_NBI_MACX_ETH_SEG_CMD_CONFIG_ETH_RX_ENA; r = NFP_NBI_MACX_ETH_SEG_CMD_CONFIG(p % 12); err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_ETH(p / 12), r, &tmp); if (err < 0) { nfp_err(nfp, "Can't verify RX is disabled for port %d.%d\n", i, p); goto exit; } if (tmp & mask) { nfp_warn(nfp, "HAZARD: RX for traffic was not disabled by firmware for port %d.%d\n", i, p); } err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_ETH(p / 12), r, mask, 0); if (err < 0) { nfp_err(nfp, "Can't disable RX traffic for port %d.%d\n", i, p); goto exit; } } } /* Wait for packets to drain from NBI to NFD or to be freed. * Worst case guess is: * 512 pkts per CTM, 12 MEs per CTM, 800MHz clock rate * ~1000 cycles to sink a single packet. * 512/12 = 42 pkts per ME, therefore 1000*42=42,000 cycles * 42K cycles at 800Mhz = 52.5us. Round up to 60us. * * TODO: Account for cut-through traffic. */ usleep_range(60, 100); /* Verify all NBI MAC packet buffers have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]); if (err < 0) goto exit; } /* Wait for PCIE DMA Queues to empty. * * How we calculate the wait time for DMA Queues to be empty: * * Max CTM buffers that could be enqueued to one island: * 512 x (7 ME islands + 2 other islands) = 4608 CTM buffers * * The minimum rate at which NFD would process that ring would * occur if NFD records the queues as "up" so that it DMAs the * whole packet to the host, and if the CTM buffers in the ring * are all associated with jumbo frames. * * Jumbo frames are <10kB, and NFD 3.0 processes ToPCI jumbo * frames at ±35Gbps (measured on star fighter card). * 35e9 / 10 x 1024 x 8 = 427kpps. * * The time to empty a ring holding 4608 packets at 427kpps * is 10.79ms. * * To be conservative we round up to nearest whole number, i.e. 11ms. */ mdelay(11); /* Check all PCIE DMA Queues are empty. */ for (i = 0; i < 4; i++) { int state; int empty; unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty); if (err < 0) goto exit; if (!empty) { nfp_err(nfp, "PCI%d DMA queues did not drain\n", i); err = -ETIMEDOUT; goto exit; } /* Set ARM PCIe Monitor to defaults */ err = nfp6000_pcie_monitor_set(cpp, i, 0); if (err < 0) goto exit; } /* Stop all MEs */ for (i = 0; i < 64; i++) { err = nfp6000_stop_me_island(nfp, i); if (err < 0) goto exit; } /* Verify again that PCIe DMA Queues are now empty */ for (i = 0; i < 4; i++) { int state; int empty; unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty); if (err < 0) goto exit; if (!empty) { nfp_err(nfp, "PCI%d DMA queue is not empty\n", i); err = -ETIMEDOUT; goto exit; } } /* Clear all PCIe DMA Queues */ for (i = 0; i < 4; i++) { unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); int state; const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE, 3, 0, i + 4); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; for (p = 0; p < 256; p++) { u32 q = NFP_PCIE_Q(p); err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_LO, NFP_QCTLR_STS_LO_RPTR_ENABLE); if (err < 0) goto exit; err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_HI, NFP_QCTLR_STS_HI_EMPTY); if (err < 0) goto exit; } } /* Reset MAC NBI gaskets */ for (i = 0; i < 2; i++) { u32 mask = NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_MPB | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_MPB | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_CORE | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_CORE | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST; if (!nbi[i]) continue; err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, mask, mask); if (err < 0) goto exit; err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, mask, 0); if (err < 0) goto exit; } /* Wait for the reset to propagate */ usleep_range(60, 100); /* Verify all NBI MAC packet buffers have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]); if (err < 0) goto exit; } /* Verify that all NBI/MAC credits have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_check_dma_credits(nfp, nbi[i], &bpe[i][0], bpes[i]); if (err < 0) goto exit; } /* Soft reset subcomponents relevant to this model */ err = nfp6000_island_reset(nfp, nbi_mask); if (err < 0) goto exit; err = nfp6000_island_on(nfp, nbi_mask); if (err < 0) goto exit; exit: /* No need for NBI access anymore.. */ for (i = 0; i < 2; i++) { if (nbi[i]) nfp_nbi_close(nbi[i]); } nfp_resource_release(res); return err; }