示例#1
0
/**
 * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
 * @cpp:	NFP CPP handle
 * @target:	NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
 * @address:	Offset into the address space of the NFP CPP target ID
 *
 * Release lock if held by local system.  Extreme care is advised, call only
 * when no local lock users can exist.
 *
 * Return:      0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
 */
int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
			  unsigned long long address)
{
	const u32 mur = NFP_CPP_ID(target, 3, 0);	/* atomic_read */
	const u32 muw = NFP_CPP_ID(target, 4, 0);	/* atomic_write */
	u16 interface = nfp_cpp_interface(cpp);
	int err;
	u32 tmp;

	err = nfp_cpp_mutex_validate(interface, &target, address);
	if (err)
		return err;

	/* Check lock */
	err = nfp_cpp_readl(cpp, mur, address, &tmp);
	if (err < 0)
		return err;

	if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
		return 0;

	/* Bust the lock */
	err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
	if (err < 0)
		return err;

	return 1;
}
示例#2
0
/**
 * nfp_cpp_mutex_init() - Initialize a mutex location
 * @cpp:	NFP CPP handle
 * @target:	NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
 * @address:	Offset into the address space of the NFP CPP target ID
 * @key:	Unique 32-bit value for this mutex
 *
 * The CPP target:address must point to a 64-bit aligned location, and
 * will initialize 64 bits of data at the location.
 *
 * This creates the initial mutex state, as locked by this
 * nfp_cpp_interface().
 *
 * This function should only be called when setting up
 * the initial lock state upon boot-up of the system.
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
		       int target, unsigned long long address, u32 key)
{
	const u32 muw = NFP_CPP_ID(target, 4, 0);    /* atomic_write */
	u16 interface = nfp_cpp_interface(cpp);
	int err;

	err = nfp_cpp_mutex_validate(interface, &target, address);
	if (err)
		return err;

	err = nfp_cpp_writel(cpp, muw, address + 4, key);
	if (err)
		return err;

	err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
	if (err)
		return err;

	return 0;
}
示例#3
0
static int nfp6000_stop_me(struct nfp_device *nfp, int island, int menum)
{
	int err;
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	u32 tmp;
	u32 me_r = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 2, 1);
	u32 me_w = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 3, 1);
	u64 mecsr = (island << 24) | NFP_CT_ME(menum);

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_CTXENABLES, &tmp);
	if (err < 0)
		return err;

	tmp &= ~(NFP_ME_CTXENABLES_INUSECONTEXTS |
		 NFP_ME_CTXENABLES_CTXENABLES(0xff));
	tmp &= ~NFP_ME_CTXENABLES_CSECCERROR;
	tmp &= ~NFP_ME_CTXENABLES_BREAKPOINT;
	tmp &= ~NFP_ME_CTXENABLES_REGISTERPARITYERR;

	err = nfp_cpp_writel(cpp, me_w, mecsr + NFP_ME_CTXENABLES, tmp);
	if (err < 0)
		return err;

	mdelay(1);

	/* This may seem like a rushed test, but in the 1 microsecond sleep
	 * the ME has executed about a 1000 instructions and even more during
	 * the time it took the host to execute this code and for the CPP
	 * command to reach the CSR in the test read anyway.
	 *
	 * If one of those instructions did not swap out, the code is a very
	 * inefficient single-threaded sequence of instructions which would
	 * be very rare or very specific.
	*/

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_ACTCTXSTATUS, &tmp);
	if (err < 0)
		return err;

	if (tmp & NFP_ME_ACTCTXSTATUS_AB0) {
		nfp_err(nfp, "ME%d.%d did not stop after 1000us\n",
			island, menum);
		return -EIO;
	}

	return 0;
}
示例#4
0
/**
 * nfp_cpp_resource_init() - Construct a new NFP Resource table
 * @cpp:		NFP CPP handle
 * @mutexp:		Location to place the resource table's mutex
 *
 * NOTE: If mutexp is NULL, the mutex of the resource table is
 * implictly unlocked.
 *
 * Return: 0, or -ERRNO
 */
int nfp_cpp_resource_init(struct nfp_cpp *cpp, struct nfp_cpp_mutex **mutexp)
{
	u32 cpp_id;
	struct nfp_cpp_mutex *mutex;
	int err;
	int target, i, entries;
	u64 base;
	size_t size;
	struct nfp_resource_entry_region region = {
		.name = { NFP_RESOURCE_TABLE_NAME },
		.cpp_action = NFP_CPP_ACTION_RW,
		.cpp_token  = 1
	};

	entries = nfp_cpp_resource_table(cpp, &target, &base, &size);
	if (entries < 0)
		return entries;

	region.cpp_target = target;
	region.page_offset = base >> 8;
	region.page_size   = size >> 8;

	cpp_id = NFP_CPP_ID(target, 4, 0);  /* Atomic write */

	err = __nfp_resource_entry_init(cpp, 0, &region, &mutex);
	if (err < 0)
		return err;

	entries = size / sizeof(struct nfp_resource_entry);

	/* We have a lock, initialize entires after 0.*/
	for (i = sizeof(struct nfp_resource_entry); i < size; i += 4) {
		err = nfp_cpp_writel(cpp, cpp_id, base + i, 0);
		if (err < 0)
			return err;
	}

	if (mutexp) {
		*mutexp = mutex;
	} else {
		nfp_cpp_mutex_unlock(mutex);
		nfp_cpp_mutex_free(mutex);
	}

	return 0;
}
示例#5
0
static int nfp6000_pcie_monitor_set(struct nfp_cpp *cpp, int pci, u32 flags)
{
	u32 cls = NFP_CPP_ID(NFP_CPP_TARGET_CLS, NFP_CPP_ACTION_RW, 0);
	u64 base = (1ULL << 34) | 0x4000;
	u32 tmp;
	int err;

	/* Get PCIe Monitor ABI */
	err = nfp_cpp_readl(cpp, cls, base + NFP_MON_PCIE_MAGIC, &tmp);
	if (err < 0)
		return err;

	/* Mask off ABI minor */
	tmp &= ~0xf;

	if (tmp != NFP_MON_PCIE_ABI(0))
		return 0;

	return nfp_cpp_writel(cpp, cls, base + NFP_MON_PCIE_CTL(pci), flags);
}
示例#6
0
/**
 * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
 * @mutex:	NFP CPP Mutex handle
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
{
	const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
	const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
	struct nfp_cpp *cpp = mutex->cpp;
	u32 key, value;
	u16 interface;
	int err;

	interface = nfp_cpp_interface(cpp);

	if (mutex->depth > 1) {
		mutex->depth--;
		return 0;
	}

	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
	if (err < 0)
		return err;

	if (key != mutex->key)
		return -EPERM;

	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
	if (err < 0)
		return err;

	if (value != nfp_mutex_locked(interface))
		return -EACCES;

	err = nfp_cpp_writel(cpp, muw, mutex->address,
			     nfp_mutex_unlocked(interface));
	if (err < 0)
		return err;

	mutex->depth = 0;
	return 0;
}
示例#7
0
/**
 * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
 * @mutex:	NFP CPP Mutex handle
 *
 * Return:      0 if the lock succeeded, -errno on failure
 */
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
{
	const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
	const u32 mus = NFP_CPP_ID(mutex->target, 5, 3);    /* test_set_imm */
	const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
	struct nfp_cpp *cpp = mutex->cpp;
	u32 key, value, tmp;
	int err;

	if (mutex->depth > 0) {
		if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
			return -E2BIG;
		mutex->depth++;
		return 0;
	}

	/* Verify that the lock marker is not damaged */
	err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
	if (err < 0)
		return err;

	if (key != mutex->key)
		return -EPERM;

	/* Compare against the unlocked state, and if true,
	 * write the interface id into the top 16 bits, and
	 * mark as locked.
	 */
	value = nfp_mutex_locked(nfp_cpp_interface(cpp));

	/* We use test_set_imm here, as it implies a read
	 * of the current state, and sets the bits in the
	 * bytemask of the command to 1s. Since the mutex
	 * is guaranteed to be 64-bit aligned, the bytemask
	 * of this 32-bit command is ensured to be 8'b00001111,
	 * which implies that the lower 4 bits will be set to
	 * ones regardless of the initial state.
	 *
	 * Since this is a 'Readback' operation, with no Pull
	 * data, we can treat this as a normal Push (read)
	 * atomic, which returns the original value.
	 */
	err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
	if (err < 0)
		return err;

	/* Was it unlocked? */
	if (nfp_mutex_is_unlocked(tmp)) {
		/* The read value can only be 0x....0000 in the unlocked state.
		 * If there was another contending for this lock, then
		 * the lock state would be 0x....000f
		 */

		/* Write our owner ID into the lock
		 * While not strictly necessary, this helps with
		 * debug and bookkeeping.
		 */
		err = nfp_cpp_writel(cpp, muw, mutex->address, value);
		if (err < 0)
			return err;

		mutex->depth = 1;
		return 0;
	}

	return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
示例#8
0
/* Perform a soft reset of the NFP6000:
 *   - Disable traffic ingress
 *   - Verify all NBI MAC packet buffers have returned
 *   - Wait for PCIE DMA Queues to empty
 *   - Stop all MEs
 *   - Clear all PCIe DMA Queues
 *   - Reset MAC NBI gaskets
 *   - Verify that all NBI/MAC buffers/credits have returned
 *   - Soft reset subcomponents relevant to this model
 *     - TODO: Crypto reset
 */
static int nfp6000_reset_soft(struct nfp_device *nfp)
{
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	struct nfp_nbi_dev *nbi[2] = {};
	struct nfp_resource *res;
	int mac_enable[2];
	int i, p, err, nbi_mask = 0;
	u32 bpe[2][32];
	int bpes[2];

	/* Lock out the MAC from any stats updaters,
	 * such as the NSP
	 */
	res = nfp_resource_acquire(nfp, NFP_RESOURCE_MAC_STATISTICS);
	if (!res)
		return -EBUSY;

	for (i = 0; i < 2; i++) {
		u32 tmp;
		int state;

		err = nfp_power_get(nfp, NFP6000_DEVICE_NBI(i, 0), &state);
		if (err < 0) {
			if (err == -ENODEV) {
				nbi[i] = NULL;
				continue;
			}
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON) {
			nbi[i] = NULL;
			continue;
		}

		nbi[i] = nfp_nbi_open(nfp, i);
		if (!nbi[i])
			continue;

		nbi_mask |= BIT(i);

		err = nfp_nbi_mac_regr(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST,
				       &tmp);
		if (err < 0)
			goto exit;

		mac_enable[i] = 0;
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST))
			mac_enable[i] |= BIT(0);
		if (!(tmp & NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST))
			mac_enable[i] |= BIT(1);

		/* No MACs at all? Then we don't care. */
		if (mac_enable[i] == 0) {
			nfp_nbi_close(nbi[i]);
			nbi[i] = NULL;
			continue;
		}

		/* Make sure we have the BPE list */
		err = bpe_lookup(nfp, i, &bpe[i][0], ARRAY_SIZE(bpe[i]));
		if (err < 0)
			goto exit;

		bpes[i] = err;
	}

	/* Verify that traffic ingress is disabled */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		for (p = 0; p < 24; p++) {
			u32 r, mask, tmp;

			mask =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG_ETH_RX_ENA;
			r =  NFP_NBI_MACX_ETH_SEG_CMD_CONFIG(p % 12);

			err = nfp_nbi_mac_regr(nbi[i],
					       NFP_NBI_MACX_ETH(p / 12),
					       r, &tmp);
			if (err < 0) {
				nfp_err(nfp, "Can't verify RX is disabled for port %d.%d\n",
					i, p);
				goto exit;
			}

			if (tmp & mask) {
				nfp_warn(nfp, "HAZARD: RX for traffic was not disabled by firmware for port %d.%d\n",
					 i, p);
			}

			err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_ETH(p / 12),
					       r, mask, 0);
			if (err < 0) {
				nfp_err(nfp, "Can't disable RX traffic for port %d.%d\n",
					i, p);
				goto exit;
			}
		}
	}

	/* Wait for packets to drain from NBI to NFD or to be freed.
	 * Worst case guess is:
	 *      512 pkts per CTM, 12 MEs per CTM, 800MHz clock rate
	 *      ~1000 cycles to sink a single packet.
	 *      512/12 = 42 pkts per ME, therefore 1000*42=42,000 cycles
	 *      42K cycles at 800Mhz = 52.5us. Round up to 60us.
	 *
	 * TODO: Account for cut-through traffic.
	 */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Wait for PCIE DMA Queues to empty.
	 *
	 *  How we calculate the wait time for DMA Queues to be empty:
	 *
	 *  Max CTM buffers that could be enqueued to one island:
	 *  512 x (7 ME islands + 2 other islands) = 4608 CTM buffers
	 *
	 *  The minimum rate at which NFD would process that ring would
	 *  occur if NFD records the queues as "up" so that it DMAs the
	 *  whole packet to the host, and if the CTM buffers in the ring
	 *  are all associated with jumbo frames.
	 *
	 *  Jumbo frames are <10kB, and NFD 3.0 processes ToPCI jumbo
	 *  frames at ±35Gbps (measured on star fighter card).
	 *  35e9 / 10 x 1024 x 8 = 427kpps.
	 *
	 *  The time to empty a ring holding 4608 packets at 427kpps
	 *  is 10.79ms.
	 *
	 *  To be conservative we round up to nearest whole number, i.e. 11ms.
	 */
	mdelay(11);

	/* Check all PCIE DMA Queues are empty. */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queues did not drain\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}

		/* Set ARM PCIe Monitor to defaults */
		err = nfp6000_pcie_monitor_set(cpp, i, 0);
		if (err < 0)
			goto exit;
	}

	/* Stop all MEs */
	for (i = 0; i < 64; i++) {
		err = nfp6000_stop_me_island(nfp, i);
		if (err < 0)
			goto exit;
	}

	/* Verify again that PCIe DMA Queues are now empty */
	for (i = 0; i < 4; i++) {
		int state;
	int empty;
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		err = nfp6000_check_empty_pcie_dma_queues(nfp, i, &empty);
		if (err < 0)
			goto exit;

		if (!empty) {
			nfp_err(nfp, "PCI%d DMA queue is not empty\n", i);
			err = -ETIMEDOUT;
			goto exit;
		}
	}

	/* Clear all PCIe DMA Queues */
	for (i = 0; i < 4; i++) {
		unsigned int subdev = NFP6000_DEVICE_PCI(i,
					NFP6000_DEVICE_PCI_PCI);
		int state;
		const u32 pci = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_PCIE,
						       3, 0, i + 4);

		err = nfp_power_get(nfp, subdev, &state);
		if (err < 0) {
			if (err == -ENODEV)
				continue;
			goto exit;
		}

		if (state != NFP_DEVICE_STATE_ON)
			continue;

		for (p = 0; p < 256; p++) {
			u32 q = NFP_PCIE_Q(p);

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_LO,
					     NFP_QCTLR_STS_LO_RPTR_ENABLE);
			if (err < 0)
				goto exit;

			err = nfp_cpp_writel(cpp, pci, q + NFP_QCTLR_STS_HI,
					     NFP_QCTLR_STS_HI_EMPTY);
			if (err < 0)
				goto exit;
		}
	}

	/* Reset MAC NBI gaskets */
	for (i = 0; i < 2; i++) {
		u32 mask = NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_MPB |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_TX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_RX_RST_CORE |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY0_STAT_RST |
			NFP_NBI_MACX_MAC_BLOCK_RST_MAC_HY1_STAT_RST;

		if (!nbi[i])
			continue;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, mask);
		if (err < 0)
			goto exit;

		err = nfp_nbi_mac_regw(nbi[i], NFP_NBI_MACX_CSR,
				       NFP_NBI_MACX_MAC_BLOCK_RST, mask, 0);
		if (err < 0)
			goto exit;
	}

	/* Wait for the reset to propagate */
	usleep_range(60, 100);

	/* Verify all NBI MAC packet buffers have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_mac_check_freebufs(nfp, nbi[i]);
		if (err < 0)
			goto exit;
	}

	/* Verify that all NBI/MAC credits have returned */
	for (i = 0; i < 2; i++) {
		if (!nbi[i])
			continue;

		err = nfp6000_nbi_check_dma_credits(nfp, nbi[i],
						    &bpe[i][0], bpes[i]);
		if (err < 0)
			goto exit;
	}

	/* Soft reset subcomponents relevant to this model */
	err = nfp6000_island_reset(nfp, nbi_mask);
	if (err < 0)
		goto exit;

	err = nfp6000_island_on(nfp, nbi_mask);
	if (err < 0)
		goto exit;

exit:
	/* No need for NBI access anymore.. */
	for (i = 0; i < 2; i++) {
		if (nbi[i])
			nfp_nbi_close(nbi[i]);
	}

	nfp_resource_release(res);

	return err;
}