/* Determine if a given PCIe's DMA Queues are empty */ static int nfp6000_check_empty_pcie_dma_queues(struct nfp_device *nfp, int pci_island, int *empty) { u32 tmp; const int dma_low = 128, dma_med = 64, dma_hi = 64; int hi, med, low, ok, err; struct nfp_cpp *cpp = nfp_device_cpp(nfp); const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE, 2, 0, pci_island + 4); ok = 1; err = nfp_cpp_readl(cpp, pci, NFP_PCIE_DMA + NFP_PCIE_DMA_QSTS0_TOPCI, &tmp); if (err < 0) return err; low = NFP_PCIE_DMA_DMAQUEUESTATUS0_DMA_LO_AVAIL_of(tmp); err = nfp_cpp_readl(cpp, pci, NFP_PCIE_DMA + NFP_PCIE_DMA_QSTS1_TOPCI, &tmp); if (err < 0) return err; med = NFP_PCIE_DMA_DMAQUEUESTATUS1_DMA_MED_AVAIL_of(tmp); hi = NFP_PCIE_DMA_DMAQUEUESTATUS1_DMA_HI_AVAIL_of(tmp); ok &= low == dma_low; ok &= med == dma_med; ok &= hi == dma_hi; *empty = ok; return 0; }
static struct nfp_hwinfo * hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) { struct nfp_hwinfo *header; struct nfp_resource *res; u64 cpp_addr; u32 cpp_id; int err; u8 *db; res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); if (!IS_ERR(res)) { cpp_id = nfp_resource_cpp_id(res); cpp_addr = nfp_resource_address(res); *cpp_size = nfp_resource_size(res); nfp_resource_release(res); if (*cpp_size < HWINFO_SIZE_MIN) return NULL; } else if (PTR_ERR(res) == -ENOENT) { /* Try getting the HWInfo table from the 'classic' location */ cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0, 1); cpp_addr = 0x30000; *cpp_size = 0x0e000; } else { return NULL; } db = kmalloc(*cpp_size + 1, GFP_KERNEL); if (!db) return NULL; err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); if (err != *cpp_size) goto exit_free; header = (void *)db; if (nfp_hwinfo_is_updating(header)) goto exit_free; if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) { nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n", le32_to_cpu(header->version)); goto exit_free; } /* NULL-terminate for safety */ db[*cpp_size] = '\0'; return (void *)db; exit_free: kfree(db); return NULL; }
static int bpe_lookup(struct nfp_device *nfp, int nbi, u32 *bpe, int bpe_max) { int err, i; const struct nfp_rtsym *sym; u32 id, tmp; u32 __iomem *ptr; struct nfp_cpp_area *area; char buff[] = "nbi0_dma_bpe_credits"; buff[3] += nbi; sym = nfp_rtsym_lookup(nfp, buff); if (!sym) { nfp_info(nfp, "%s: Symbol not present\n", buff); return 0; } id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); area = nfp_cpp_area_alloc_acquire(nfp_device_cpp(nfp), id, sym->addr, sym->size); if (IS_ERR_OR_NULL(area)) { nfp_err(nfp, "%s: Can't acquire area\n", buff); return area ? PTR_ERR(area) : -ENOMEM; } ptr = nfp_cpp_area_iomem(area); if (IS_ERR_OR_NULL(ptr)) { nfp_err(nfp, "%s: Can't map area\n", buff); err = ptr ? PTR_ERR(ptr) : -ENOMEM; goto exit; } tmp = readl(ptr++); if (!BPECFG_MAGIC_CHECK(tmp)) { nfp_err(nfp, "%s: Magic value (0x%08x) unrecognized\n", buff, tmp); err = -EINVAL; goto exit; } if (BPECFG_MAGIC_COUNT(tmp) > bpe_max) { nfp_err(nfp, "%s: Magic count (%d) too large (> %d)\n", buff, BPECFG_MAGIC_COUNT(tmp), bpe_max); err = -EINVAL; goto exit; } for (i = 0; i < bpe_max; i++) bpe[i] = readl(ptr++); err = BPECFG_MAGIC_COUNT(tmp); exit: nfp_cpp_area_release_free(area); return err; }
/** * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol * @cpp: NFP CPP handle * @name: Symbol name * @error: Poniter to error code (optional) * * Lookup a symbol, map, read it and return it's value. Value of the symbol * will be interpreted as a simple little-endian unsigned value. Symbol can * be 4 or 8 bytes in size. * * Return: value read, on error sets the error and returns ~0ULL. */ u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error) { const struct nfp_rtsym *sym; u32 val32, id; u64 val; int err; sym = nfp_rtsym_lookup(cpp, name); if (!sym) { err = -ENOENT; goto exit; } id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); switch (sym->size) { case 4: err = nfp_cpp_readl(cpp, id, sym->addr, &val32); val = val32; break; case 8: err = nfp_cpp_readq(cpp, id, sym->addr, &val); break; default: nfp_err(cpp, "rtsym '%s' unsupported or non-scalar size: %lld\n", name, sym->size); err = -EINVAL; break; } if (err == sym->size) err = 0; else if (err >= 0) err = -EIO; exit: if (error) *error = err; if (err) return ~0ULL; return val; }
/* Perform a soft reset of the NFP6000: * - Disable traffic ingress * - Verify all NBI MAC packet buffers have returned * - Wait for PCIE DMA Queues to empty * - Stop all MEs * - Clear all PCIe DMA Queues * - Reset MAC NBI gaskets * - Verify that all NBI/MAC buffers/credits have returned * - Soft reset subcomponents relevant to this model * - TODO: Crypto reset */ static int nfp6000_reset_soft(struct nfp_device *nfp) { struct nfp_cpp *cpp = nfp_device_cpp(nfp); struct nfp_nbi_dev *nbi[2] = {}; struct nfp_resource *res; int mac_enable[2]; int i, p, err, nbi_mask = 0; u32 bpe[2][32]; int bpes[2]; /* Lock out the MAC from any stats updaters, * such as the NSP */ res = nfp_resource_acquire(nfp, NFP_RESOURCE_MAC_STATISTICS); if (!res) return -EBUSY; for (i = 0; i < 2; i++) { u32 tmp; int state; err = nfp_power_get(nfp, NFP6000_DEVICE_NBI(i, 0), &state); if (err < 0) { if (err == -ENODEV) { nbi[i] = NULL; continue; } goto exit; } if (state != NFP_DEVICE_STATE_ON) { nbi[i] = NULL; continue; } nbi[i] = nfp_nbi_open(nfp, i); if (!nbi[i]) continue; nbi_mask |= BIT(i); err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, &tmp); if (err < 0) goto exit; mac_enable[i] = 0; if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST)) mac_enable[i] |= BIT(0); if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST)) mac_enable[i] |= BIT(1); /* No MACs at all? Then we don't care. */ if (mac_enable[i] == 0) { nfp_nbi_close(nbi[i]); nbi[i] = NULL; continue; } /* Make sure we have the BPE list */ err = bpe_lookup(nfp, i, &bpe[i][0], ARRAY_SIZE(bpe[i])); if (err < 0) goto exit; bpes[i] = err; } /* Verify that traffic ingress is disabled */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; for (p = 0; p < 24; p++) { u32 r, mask, tmp; mask = NFP_NBI_MACX_ETH_SEG_CMD_CONFIG_ETH_RX_ENA; r = NFP_NBI_MACX_ETH_SEG_CMD_CONFIG(p % 12); err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_ETH(p / 12), r, &tmp); if (err < 0) { nfp_err(nfp, "Can't verify RX is disabled for port %d.%d\n", i, p); goto exit; } if (tmp & mask) { nfp_warn(nfp, "HAZARD: RX for traffic was not disabled by firmware for port %d.%d\n", i, p); } err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_ETH(p / 12), r, mask, 0); if (err < 0) { nfp_err(nfp, "Can't disable RX traffic for port %d.%d\n", i, p); goto exit; } } } /* Wait for packets to drain from NBI to NFD or to be freed. * Worst case guess is: * 512 pkts per CTM, 12 MEs per CTM, 800MHz clock rate * ~1000 cycles to sink a single packet. * 512/12 = 42 pkts per ME, therefore 1000*42=42,000 cycles * 42K cycles at 800Mhz = 52.5us. Round up to 60us. * * TODO: Account for cut-through traffic. */ usleep_range(60, 100); /* Verify all NBI MAC packet buffers have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]); if (err < 0) goto exit; } /* Wait for PCIE DMA Queues to empty. * * How we calculate the wait time for DMA Queues to be empty: * * Max CTM buffers that could be enqueued to one island: * 512 x (7 ME islands + 2 other islands) = 4608 CTM buffers * * The minimum rate at which NFD would process that ring would * occur if NFD records the queues as "up" so that it DMAs the * whole packet to the host, and if the CTM buffers in the ring * are all associated with jumbo frames. * * Jumbo frames are <10kB, and NFD 3.0 processes ToPCI jumbo * frames at ±35Gbps (measured on star fighter card). * 35e9 / 10 x 1024 x 8 = 427kpps. * * The time to empty a ring holding 4608 packets at 427kpps * is 10.79ms. * * To be conservative we round up to nearest whole number, i.e. 11ms. */ mdelay(11); /* Check all PCIE DMA Queues are empty. */ for (i = 0; i < 4; i++) { int state; int empty; unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty); if (err < 0) goto exit; if (!empty) { nfp_err(nfp, "PCI%d DMA queues did not drain\n", i); err = -ETIMEDOUT; goto exit; } /* Set ARM PCIe Monitor to defaults */ err = nfp6000_pcie_monitor_set(cpp, i, 0); if (err < 0) goto exit; } /* Stop all MEs */ for (i = 0; i < 64; i++) { err = nfp6000_stop_me_island(nfp, i); if (err < 0) goto exit; } /* Verify again that PCIe DMA Queues are now empty */ for (i = 0; i < 4; i++) { int state; int empty; unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty); if (err < 0) goto exit; if (!empty) { nfp_err(nfp, "PCI%d DMA queue is not empty\n", i); err = -ETIMEDOUT; goto exit; } } /* Clear all PCIe DMA Queues */ for (i = 0; i < 4; i++) { unsigned int subdev = NFP6000_DEVICE_PCI(i, NFP6000_DEVICE_PCI_PCI); int state; const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE, 3, 0, i + 4); err = nfp_power_get(nfp, subdev, &state); if (err < 0) { if (err == -ENODEV) continue; goto exit; } if (state != NFP_DEVICE_STATE_ON) continue; for (p = 0; p < 256; p++) { u32 q = NFP_PCIE_Q(p); err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_LO, NFP_QCTLR_STS_LO_RPTR_ENABLE); if (err < 0) goto exit; err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_HI, NFP_QCTLR_STS_HI_EMPTY); if (err < 0) goto exit; } } /* Reset MAC NBI gaskets */ for (i = 0; i < 2; i++) { u32 mask = NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_MPB | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_MPB | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_CORE | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_CORE | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST | NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST; if (!nbi[i]) continue; err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, mask, mask); if (err < 0) goto exit; err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR, NFP_NBI_MACX_MAC_BLOCK_RST, mask, 0); if (err < 0) goto exit; } /* Wait for the reset to propagate */ usleep_range(60, 100); /* Verify all NBI MAC packet buffers have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]); if (err < 0) goto exit; } /* Verify that all NBI/MAC credits have returned */ for (i = 0; i < 2; i++) { if (!nbi[i]) continue; err = nfp6000_nbi_check_dma_credits(nfp, nbi[i], &bpe[i][0], bpes[i]); if (err < 0) goto exit; } /* Soft reset subcomponents relevant to this model */ err = nfp6000_island_reset(nfp, nbi_mask); if (err < 0) goto exit; err = nfp6000_island_on(nfp, nbi_mask); if (err < 0) goto exit; exit: /* No need for NBI access anymore.. */ for (i = 0; i < 2; i++) { if (nbi[i]) nfp_nbi_close(nbi[i]); } nfp_resource_release(res); return err; }