static int nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, struct nfp_cpp_mutex *dev_mutex) { int err; if (nfp_cpp_mutex_lock(dev_mutex)) return -EINVAL; err = nfp_cpp_resource_find(cpp, res); if (err) goto err_unlock_dev; err = nfp_cpp_mutex_trylock(res->mutex); if (err) goto err_res_mutex_free; nfp_cpp_mutex_unlock(dev_mutex); return 0; err_res_mutex_free: nfp_cpp_mutex_free(res->mutex); err_unlock_dev: nfp_cpp_mutex_unlock(dev_mutex); return err; }
/** * nfp_resource_acquire() - Acquire a resource handle * @nfp: NFP Device handle * @name: Name of the resource * * NOTE: This function implictly locks the acquired resource * * Return: NFP Resource handle, or ERR_PTR() */ struct nfp_resource *nfp_resource_acquire(struct nfp_device *nfp, const char *name) { struct nfp_cpp *cpp = nfp_device_cpp(nfp); struct nfp_cpp_mutex *mutex; struct nfp_resource *res; u64 addr, size; u32 cpp_id; int err; err = nfp_cpp_resource_acquire(cpp, name, &cpp_id, &addr, &size, &mutex); if (err < 0) return ERR_PTR(err); err = nfp_cpp_mutex_lock(mutex); if (err < 0) { nfp_cpp_mutex_free(mutex); return ERR_PTR(err); } res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) { nfp_cpp_mutex_free(mutex); return ERR_PTR(-ENOMEM); } strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); res->cpp_id = cpp_id; res->addr = addr; res->size = size; res->mutex = mutex; return res; }
static int nfp_cpp_resource_acquire(struct nfp_cpp *cpp, const char *name, u32 *r_cpp, u64 *r_addr, u64 *r_size, struct nfp_cpp_mutex **resource_mutex) { struct nfp_resource_entry_region region; struct nfp_resource_entry tmp; struct nfp_cpp_mutex *mutex; int target, err, i, entries; u64 base; u32 key; u32 cpp_id; for (i = 0; i < sizeof(region.name); i++) { if (*name != 0) region.name[i] = *(name++); else region.name[i] = 0; } entries = nfp_cpp_resource_table(cpp, &target, &base, NULL); if (entries < 0) return entries; cpp_id = NFP_CPP_ID(target, 3, 0); /* Atomic read */ key = NFP_RESOURCE_TABLE_KEY; mutex = nfp_cpp_mutex_alloc(cpp, target, base, key); if (!mutex) return -ENOMEM; /* Wait for the lock.. */ err = nfp_cpp_mutex_lock(mutex); if (err < 0) { nfp_cpp_mutex_free(mutex); return err; } /* Search for a matching entry */ if (memcmp(region.name, NFP_RESOURCE_TABLE_NAME "\0\0\0\0\0\0\0\0", 8) != 0) key = crc32_posix(®ion.name[0], sizeof(region.name)); for (i = 0; i < entries; i++) { u64 addr = base + sizeof(struct nfp_resource_entry) * i; err = nfp_cpp_read(cpp, cpp_id, addr, &tmp, sizeof(tmp)); if (err < 0) { /* Unlikely to work if the read failed, * but we should at least try... */ nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return err; } if (tmp.mutex.key == key) { /* Found key! */ if (resource_mutex) *resource_mutex = nfp_cpp_mutex_alloc(cpp, target, addr, key); if (r_cpp) *r_cpp = NFP_CPP_ID(tmp.region.cpp_target, tmp.region.cpp_action, tmp.region.cpp_token); if (r_addr) *r_addr = (u64)tmp.region.page_offset << 8; if (r_size) *r_size = (u64)tmp.region.page_size << 8; nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return 0; } } nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return -ENOENT; }
/** * nfp_cpp_resource_add() - Construct a new NFP Resource entry * @cpp: NFP CPP handle * @name: Name of the resource * @cpp_id: NFP CPP ID of the resource * @address: NFP CPP address of the resource * @size: Size, in bytes, of the resource area * @resource_mutex: Location to place the resource's mutex * * NOTE: If resource_mutex is NULL, the mutex of the resource is * implictly unlocked. * * Return: 0, or -ERRNO */ int nfp_cpp_resource_add(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 address, u64 size, struct nfp_cpp_mutex **resource_mutex) { int target, err, i, entries, minfree; u64 base; u32 key; struct nfp_resource_entry_region region = { .cpp_action = NFP_CPP_ID_ACTION_of(cpp_id), .cpp_token = NFP_CPP_ID_TOKEN_of(cpp_id), .cpp_target = NFP_CPP_ID_TARGET_of(cpp_id), .page_offset = (u32)(address >> 8), .page_size = (u32)(size >> 8), }; struct nfp_cpp_mutex *mutex; u32 tmp; for (i = 0; i < sizeof(region.name); i++) { if (*name != 0) region.name[i] = *(name++); else region.name[i] = 0; } entries = nfp_cpp_resource_table(cpp, &target, &base, NULL); if (entries < 0) return entries; cpp_id = NFP_CPP_ID(target, 3, 0); /* Atomic read */ key = NFP_RESOURCE_TABLE_KEY; mutex = nfp_cpp_mutex_alloc(cpp, target, base, key); if (!mutex) return -ENOMEM; /* Wait for the lock.. */ err = nfp_cpp_mutex_lock(mutex); if (err < 0) { nfp_cpp_mutex_free(mutex); return err; } /* Search for a free entry, or a duplicate */ minfree = 0; key = crc32_posix(name, 8); for (i = 1; i < entries; i++) { u64 addr = base + sizeof(struct nfp_resource_entry) * i; err = nfp_cpp_readl(cpp, cpp_id, addr + offsetof(struct nfp_resource_entry, mutex.key), &tmp); if (err < 0) { /* Unlikely to work if the read failed, * but we should at least try... */ nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return err; } if (tmp == key) { /* Duplicate key! */ nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return -EEXIST; } if (tmp == 0 && minfree == 0) minfree = i; } /* No available space in the table! */ if (minfree == 0) return -ENOSPC; err = __nfp_resource_entry_init(cpp, minfree, ®ion, resource_mutex); nfp_cpp_mutex_unlock(mutex); nfp_cpp_mutex_free(mutex); return err; }