Esempio n. 1
0
/**
 * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
 * @cpp:	NFP CPP handle
 * @target:	NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
 * @address:	Offset into the address space of the NFP CPP target ID
 *
 * Release lock if held by local system.  Extreme care is advised, call only
 * when no local lock users can exist.
 *
 * Return:      0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
 */
int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
			  unsigned long long address)
{
	const u32 mur = NFP_CPP_ID(target, 3, 0);	/* atomic_read */
	const u32 muw = NFP_CPP_ID(target, 4, 0);	/* atomic_write */
	u16 interface = nfp_cpp_interface(cpp);
	int err;
	u32 tmp;

	err = nfp_cpp_mutex_validate(interface, &target, address);
	if (err)
		return err;

	/* Check lock */
	err = nfp_cpp_readl(cpp, mur, address, &tmp);
	if (err < 0)
		return err;

	if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
		return 0;

	/* Bust the lock */
	err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
	if (err < 0)
		return err;

	return 1;
}
Esempio n. 2
0
/**
 * nfp_cpp_mutex_alloc() - Create a mutex handle
 * @cpp:	NFP CPP handle
 * @target:	NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
 * @address:	Offset into the address space of the NFP CPP target ID
 * @key:	32-bit unique key (must match the key at this location)
 *
 * The CPP target:address must point to a 64-bit aligned location, and
 * reserve 64 bits of data at the location for use by the handle.
 *
 * Only target/address pairs that point to entities that support the
 * MU Atomic Engine's CmpAndSwap32 command are supported.
 *
 * Return:	A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
 */
struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
					  unsigned long long address, u32 key)
{
	const u32 mur = NFP_CPP_ID(target, 3, 0);    /* atomic_read */
	u16 interface = nfp_cpp_interface(cpp);
	struct nfp_cpp_mutex *mutex;
	int err;
	u32 tmp;

	err = nfp_cpp_mutex_validate(interface, &target, address);
	if (err)
		return NULL;

	err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
	if (err < 0)
		return NULL;

	if (tmp != key)
		return NULL;

	mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
	if (!mutex)
		return NULL;

	mutex->cpp = cpp;
	mutex->target = target;
	mutex->address = address;
	mutex->key = key;
	mutex->depth = 0;

	return mutex;
}
Esempio n. 3
0
static int nfp6000_stop_me(struct nfp_device *nfp, int island, int menum)
{
	int err;
	struct nfp_cpp *cpp = nfp_device_cpp(nfp);
	u32 tmp;
	u32 me_r = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 2, 1);
	u32 me_w = NFP_CPP_ID(NFP_CPP_TARGET_CT_XPB, 3, 1);
	u64 mecsr = (island << 24) | NFP_CT_ME(menum);

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_CTXENABLES, &tmp);
	if (err < 0)
		return err;

	tmp &= ~(NFP_ME_CTXENABLES_INUSECONTEXTS |
		 NFP_ME_CTXENABLES_CTXENABLES(0xff));
	tmp &= ~NFP_ME_CTXENABLES_CSECCERROR;
	tmp &= ~NFP_ME_CTXENABLES_BREAKPOINT;
	tmp &= ~NFP_ME_CTXENABLES_REGISTERPARITYERR;

	err = nfp_cpp_writel(cpp, me_w, mecsr + NFP_ME_CTXENABLES, tmp);
	if (err < 0)
		return err;

	mdelay(1);

	/* This may seem like a rushed test, but in the 1 microsecond sleep
	 * the ME has executed about a 1000 instructions and even more during
	 * the time it took the host to execute this code and for the CPP
	 * command to reach the CSR in the test read anyway.
	 *
	 * If one of those instructions did not swap out, the code is a very
	 * inefficient single-threaded sequence of instructions which would
	 * be very rare or very specific.
	*/

	err = nfp_cpp_readl(cpp, me_r, mecsr + NFP_ME_ACTCTXSTATUS, &tmp);
	if (err < 0)
		return err;

	if (tmp & NFP_ME_ACTCTXSTATUS_AB0) {
		nfp_err(nfp, "ME%d.%d did not stop after 1000us\n",
			island, menum);
		return -EIO;
	}

	return 0;
}
Esempio n. 4
0
static int __nfp_resource_entry_init(struct nfp_cpp *cpp, int entry,
				     const struct nfp_resource_entry_region
				     *region,
				     struct nfp_cpp_mutex **resource_mutex)
{
	struct nfp_cpp_mutex *mutex;
	int target, entries;
	size_t size;
	u32 cpp_id;
	u32 key;
	int err;
	u64 base;

	entries = nfp_cpp_resource_table(cpp, &target, &base, &size);
	if (entries < 0)
		return entries;

	if (entry >= entries)
		return -EINVAL;

	base += sizeof(struct nfp_resource_entry) * entry;

	if (entry == 0)
		key = NFP_RESOURCE_TABLE_KEY;
	else
		key = crc32_posix(region->name, 8);

	err = nfp_cpp_mutex_init(cpp, target, base, key);
	if (err < 0)
		return err;

	/* We already own the initialized lock */
	mutex = nfp_cpp_mutex_alloc(cpp, target, base, key);
	if (!mutex)
		return -ENOMEM;

	/* Mutex Owner and Key are already set */
	cpp_id = NFP_CPP_ID(target, 4, 0);  /* Atomic write */

	err = nfp_cpp_write(cpp, cpp_id, base +
			offsetof(struct nfp_resource_entry, region),
			region, sizeof(*region));
	if (err < 0) {
		/* Try to unlock in the face of adversity */
		nfp_cpp_mutex_unlock(mutex);
		nfp_cpp_mutex_free(mutex);
		return err;
	}

	if (resource_mutex) {
		*resource_mutex = mutex;
	} else {
		nfp_cpp_mutex_unlock(mutex);
		nfp_cpp_mutex_free(mutex);
	}

	return 0;
}
Esempio n. 5
0
static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
{
	char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
	struct nfp_resource_entry entry;
	u32 cpp_id, key;
	int ret, i;

	cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0);  /* Atomic read */

	strncpy(name_pad, res->name, sizeof(name_pad));

	/* Search for a matching entry */
	key = NFP_RESOURCE_TBL_KEY;
	if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
		key = crc32_posix(name_pad, sizeof(name_pad));

	for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
		u64 addr = NFP_RESOURCE_TBL_BASE +
			sizeof(struct nfp_resource_entry) * i;

		ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
		if (ret != sizeof(entry))
			return -EIO;

		if (entry.mutex.key != key)
			continue;

		/* Found key! */
		res->mutex =
			nfp_cpp_mutex_alloc(cpp,
					    NFP_RESOURCE_TBL_TARGET, addr, key);
		res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
					 entry.region.cpp_action,
					 entry.region.cpp_token);
		res->addr = (u64)entry.region.page_offset << 8;
		res->size = (u64)entry.region.page_size << 8;

		return 0;
	}

	return -ENOENT;
}
Esempio n. 6
0
/**
 * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
 * @mutex:	NFP CPP Mutex handle
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
{
	const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
	const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
	struct nfp_cpp *cpp = mutex->cpp;
	u32 key, value;
	u16 interface;
	int err;

	interface = nfp_cpp_interface(cpp);

	if (mutex->depth > 1) {
		mutex->depth--;
		return 0;
	}

	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
	if (err < 0)
		return err;

	if (key != mutex->key)
		return -EPERM;

	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
	if (err < 0)
		return err;

	if (value != nfp_mutex_locked(interface))
		return -EACCES;

	err = nfp_cpp_writel(cpp, muw, mutex->address,
			     nfp_mutex_unlocked(interface));
	if (err < 0)
		return err;

	mutex->depth = 0;
	return 0;
}
Esempio n. 7
0
/**
 * nfp_cpp_resource_init() - Construct a new NFP Resource table
 * @cpp:		NFP CPP handle
 * @mutexp:		Location to place the resource table's mutex
 *
 * NOTE: If mutexp is NULL, the mutex of the resource table is
 * implictly unlocked.
 *
 * Return: 0, or -ERRNO
 */
int nfp_cpp_resource_init(struct nfp_cpp *cpp, struct nfp_cpp_mutex **mutexp)
{
	u32 cpp_id;
	struct nfp_cpp_mutex *mutex;
	int err;
	int target, i, entries;
	u64 base;
	size_t size;
	struct nfp_resource_entry_region region = {
		.name = { NFP_RESOURCE_TABLE_NAME },
		.cpp_action = NFP_CPP_ACTION_RW,
		.cpp_token  = 1
	};

	entries = nfp_cpp_resource_table(cpp, &target, &base, &size);
	if (entries < 0)
		return entries;

	region.cpp_target = target;
	region.page_offset = base >> 8;
	region.page_size   = size >> 8;

	cpp_id = NFP_CPP_ID(target, 4, 0);  /* Atomic write */

	err = __nfp_resource_entry_init(cpp, 0, &region, &mutex);
	if (err < 0)
		return err;

	entries = size / sizeof(struct nfp_resource_entry);

	/* We have a lock, initialize entires after 0.*/
	for (i = sizeof(struct nfp_resource_entry); i < size; i += 4) {
		err = nfp_cpp_writel(cpp, cpp_id, base + i, 0);
		if (err < 0)
			return err;
	}

	if (mutexp) {
		*mutexp = mutex;
	} else {
		nfp_cpp_mutex_unlock(mutex);
		nfp_cpp_mutex_free(mutex);
	}

	return 0;
}
Esempio n. 8
0
static int nfp6000_pcie_monitor_set(struct nfp_cpp *cpp, int pci, u32 flags)
{
	u32 cls = NFP_CPP_ID(NFP_CPP_TARGET_CLS, NFP_CPP_ACTION_RW, 0);
	u64 base = (1ULL << 34) | 0x4000;
	u32 tmp;
	int err;

	/* Get PCIe Monitor ABI */
	err = nfp_cpp_readl(cpp, cls, base + NFP_MON_PCIE_MAGIC, &tmp);
	if (err < 0)
		return err;

	/* Mask off ABI minor */
	tmp &= ~0xf;

	if (tmp != NFP_MON_PCIE_ABI(0))
		return 0;

	return nfp_cpp_writel(cpp, cls, base + NFP_MON_PCIE_CTL(pci), flags);
}
Esempio n. 9
0
/**
 * nfp_cpp_mutex_init() - Initialize a mutex location
 * @cpp:	NFP CPP handle
 * @target:	NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
 * @address:	Offset into the address space of the NFP CPP target ID
 * @key:	Unique 32-bit value for this mutex
 *
 * The CPP target:address must point to a 64-bit aligned location, and
 * will initialize 64 bits of data at the location.
 *
 * This creates the initial mutex state, as locked by this
 * nfp_cpp_interface().
 *
 * This function should only be called when setting up
 * the initial lock state upon boot-up of the system.
 *
 * Return: 0 on success, or -errno on failure
 */
int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
		       int target, unsigned long long address, u32 key)
{
	const u32 muw = NFP_CPP_ID(target, 4, 0);    /* atomic_write */
	u16 interface = nfp_cpp_interface(cpp);
	int err;

	err = nfp_cpp_mutex_validate(interface, &target, address);
	if (err)
		return err;

	err = nfp_cpp_writel(cpp, muw, address + 4, key);
	if (err)
		return err;

	err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
	if (err)
		return err;

	return 0;
}
Esempio n. 10
0
/**
 * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
 * @mutex:	NFP CPP Mutex handle
 *
 * Return:      0 if the lock succeeded, -errno on failure
 */
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
{
	const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
	const u32 mus = NFP_CPP_ID(mutex->target, 5, 3);    /* test_set_imm */
	const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
	struct nfp_cpp *cpp = mutex->cpp;
	u32 key, value, tmp;
	int err;

	if (mutex->depth > 0) {
		if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
			return -E2BIG;
		mutex->depth++;
		return 0;
	}

	/* Verify that the lock marker is not damaged */
	err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
	if (err < 0)
		return err;

	if (key != mutex->key)
		return -EPERM;

	/* Compare against the unlocked state, and if true,
	 * write the interface id into the top 16 bits, and
	 * mark as locked.
	 */
	value = nfp_mutex_locked(nfp_cpp_interface(cpp));

	/* We use test_set_imm here, as it implies a read
	 * of the current state, and sets the bits in the
	 * bytemask of the command to 1s. Since the mutex
	 * is guaranteed to be 64-bit aligned, the bytemask
	 * of this 32-bit command is ensured to be 8'b00001111,
	 * which implies that the lower 4 bits will be set to
	 * ones regardless of the initial state.
	 *
	 * Since this is a 'Readback' operation, with no Pull
	 * data, we can treat this as a normal Push (read)
	 * atomic, which returns the original value.
	 */
	err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
	if (err < 0)
		return err;

	/* Was it unlocked? */
	if (nfp_mutex_is_unlocked(tmp)) {
		/* The read value can only be 0x....0000 in the unlocked state.
		 * If there was another contending for this lock, then
		 * the lock state would be 0x....000f
		 */

		/* Write our owner ID into the lock
		 * While not strictly necessary, this helps with
		 * debug and bookkeeping.
		 */
		err = nfp_cpp_writel(cpp, muw, mutex->address, value);
		if (err < 0)
			return err;

		mutex->depth = 1;
		return 0;
	}

	return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
Esempio n. 11
0
static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
{
	const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
		NFP_ISL_EMEM0;
	u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
	struct nfp_rtsym_entry *rtsymtab;
	struct nfp_rtsym_cache *cache;
	const struct nfp_mip *mip;
	int err, n, size;

	mip = nfp_mip_open(cpp);
	if (!mip)
		return -EIO;

	nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
	nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
	nfp_mip_close(mip);

	if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
		return -ENXIO;

	/* Align to 64 bits */
	symtab_size = round_up(symtab_size, 8);
	strtab_size = round_up(strtab_size, 8);

	rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
	if (!rtsymtab)
		return -ENOMEM;

	size = sizeof(*cache);
	size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
	size +=	strtab_size + 1;
	cache = kmalloc(size, GFP_KERNEL);
	if (!cache) {
		err = -ENOMEM;
		goto err_free_rtsym_raw;
	}

	cache->num = symtab_size / sizeof(*rtsymtab);
	cache->strtab = (void *)&cache->symtab[cache->num];

	err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
	if (err != symtab_size)
		goto err_free_cache;

	err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
	if (err != strtab_size)
		goto err_free_cache;
	cache->strtab[strtab_size] = '\0';

	for (n = 0; n < cache->num; n++)
		nfp_rtsym_sw_entry_init(cache, strtab_size,
					&cache->symtab[n], &rtsymtab[n]);

	kfree(rtsymtab);
	nfp_rtsym_cache_set(cpp, cache);
	return 0;

err_free_cache:
	kfree(cache);
err_free_rtsym_raw:
	kfree(rtsymtab);
	return err;
}
Esempio n. 12
0
static int nfp_cpp_resource_acquire(struct nfp_cpp *cpp, const char *name,
				    u32 *r_cpp, u64 *r_addr, u64 *r_size,
				    struct nfp_cpp_mutex **resource_mutex)
{
	struct nfp_resource_entry_region region;
	struct nfp_resource_entry tmp;
	struct nfp_cpp_mutex *mutex;
	int target, err, i, entries;
	u64 base;
	u32 key;
	u32 cpp_id;

	for (i = 0; i < sizeof(region.name); i++) {
		if (*name != 0)
			region.name[i] = *(name++);
		else
			region.name[i] = 0;
	}

	entries = nfp_cpp_resource_table(cpp, &target, &base, NULL);
	if (entries < 0)
		return entries;

	cpp_id = NFP_CPP_ID(target, 3, 0);  /* Atomic read */

	key = NFP_RESOURCE_TABLE_KEY;
	mutex = nfp_cpp_mutex_alloc(cpp, target, base, key);
	if (!mutex)
		return -ENOMEM;

	/* Wait for the lock.. */
	err = nfp_cpp_mutex_lock(mutex);
	if (err < 0) {
		nfp_cpp_mutex_free(mutex);
		return err;
	}

	/* Search for a matching entry */
	if (memcmp(region.name,
		   NFP_RESOURCE_TABLE_NAME "\0\0\0\0\0\0\0\0", 8) != 0)
		key = crc32_posix(&region.name[0], sizeof(region.name));
	for (i = 0; i < entries; i++) {
		u64 addr = base + sizeof(struct nfp_resource_entry) * i;

		err = nfp_cpp_read(cpp, cpp_id, addr, &tmp, sizeof(tmp));
		if (err < 0) {
			/* Unlikely to work if the read failed,
			 * but we should at least try... */
			nfp_cpp_mutex_unlock(mutex);
			nfp_cpp_mutex_free(mutex);
			return err;
		}

		if (tmp.mutex.key == key) {
			/* Found key! */
			if (resource_mutex)
				*resource_mutex = nfp_cpp_mutex_alloc(cpp,
							target, addr, key);

			if (r_cpp)
				*r_cpp = NFP_CPP_ID(tmp.region.cpp_target,
						tmp.region.cpp_action,
						tmp.region.cpp_token);

			if (r_addr)
				*r_addr = (u64)tmp.region.page_offset << 8;

			if (r_size)
				*r_size = (u64)tmp.region.page_size << 8;

			nfp_cpp_mutex_unlock(mutex);
			nfp_cpp_mutex_free(mutex);

			return 0;
		}
	}

	nfp_cpp_mutex_unlock(mutex);
	nfp_cpp_mutex_free(mutex);

	return -ENOENT;
}
Esempio n. 13
0
/**
 * nfp_cpp_resource_add() - Construct a new NFP Resource entry
 * @cpp:		NFP CPP handle
 * @name:		Name of the resource
 * @cpp_id:		NFP CPP ID of the resource
 * @address:		NFP CPP address of the resource
 * @size:		Size, in bytes, of the resource area
 * @resource_mutex:	Location to place the resource's mutex
 *
 * NOTE: If resource_mutex is NULL, the mutex of the resource is
 * implictly unlocked.
 *
 * Return: 0, or -ERRNO
 */
int nfp_cpp_resource_add(struct nfp_cpp *cpp, const char *name,
			 u32 cpp_id, u64 address, u64 size,
			 struct nfp_cpp_mutex **resource_mutex)
{
	int target, err, i, entries, minfree;
	u64 base;
	u32 key;
	struct nfp_resource_entry_region region = {
		.cpp_action = NFP_CPP_ID_ACTION_of(cpp_id),
		.cpp_token  = NFP_CPP_ID_TOKEN_of(cpp_id),
		.cpp_target = NFP_CPP_ID_TARGET_of(cpp_id),
		.page_offset = (u32)(address >> 8),
		.page_size  = (u32)(size >> 8),
	};
	struct nfp_cpp_mutex *mutex;
	u32 tmp;

	for (i = 0; i < sizeof(region.name); i++) {
		if (*name != 0)
			region.name[i] = *(name++);
		else
			region.name[i] = 0;
	}

	entries = nfp_cpp_resource_table(cpp, &target, &base, NULL);
	if (entries < 0)
		return entries;

	cpp_id = NFP_CPP_ID(target, 3, 0);  /* Atomic read */

	key = NFP_RESOURCE_TABLE_KEY;
	mutex = nfp_cpp_mutex_alloc(cpp, target, base, key);
	if (!mutex)
		return -ENOMEM;

	/* Wait for the lock.. */
	err = nfp_cpp_mutex_lock(mutex);
	if (err < 0) {
		nfp_cpp_mutex_free(mutex);
		return err;
	}

	/* Search for a free entry, or a duplicate */
	minfree = 0;
	key = crc32_posix(name, 8);
	for (i = 1; i < entries; i++) {
		u64 addr = base + sizeof(struct nfp_resource_entry) * i;

		err = nfp_cpp_readl(cpp, cpp_id, addr +
				offsetof(struct nfp_resource_entry, mutex.key),
				&tmp);
		if (err < 0) {
			/* Unlikely to work if the read failed,
			 * but we should at least try... */
			nfp_cpp_mutex_unlock(mutex);
			nfp_cpp_mutex_free(mutex);
			return err;
		}

		if (tmp == key) {
			/* Duplicate key! */
			nfp_cpp_mutex_unlock(mutex);
			nfp_cpp_mutex_free(mutex);
			return -EEXIST;
		}

		if (tmp == 0 && minfree == 0)
			minfree = i;
	}

	/* No available space in the table! */
	if (minfree == 0)
		return -ENOSPC;

	err = __nfp_resource_entry_init(cpp, minfree, &region, resource_mutex);
	nfp_cpp_mutex_unlock(mutex);
	nfp_cpp_mutex_free(mutex);

	return err;
}