static dm_cblock_t smq_residency(struct dm_cache_policy *p) { dm_cblock_t r; struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); r = to_cblock(mq->cache_alloc.nr_allocated); mutex_unlock(&mq->lock); return r; }
static dm_cblock_t smq_residency(struct dm_cache_policy *p) { dm_cblock_t r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); spin_lock_irqsave(&mq->lock, flags); r = to_cblock(mq->cache_alloc.nr_allocated); spin_unlock_irqrestore(&mq->lock, flags); return r; }
/* * Fills result out with a cache block that isn't in use, or return * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is * reponsible for that. */ static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end, dm_cblock_t *result, unsigned *last_word) { int r = -ENOSPC; unsigned w; for (w = begin; w < end; w++) { /* * ffz is undefined if no zero exists */ if (mq->allocation_bitset[w] != ~0UL) { *last_word = w; *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w])); if (from_cblock(*result) < from_cblock(mq->cache_size)) r = 0; break; } } return r; }
static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) { return to_cblock(e - ep->entries); }
static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) { return to_cblock(get_index(&mq->cache_alloc, e)); }