static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock) { BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset)); clear_bit(from_cblock(cblock), mq->allocation_bitset); mq->nr_cblocks_allocated--; }
/* * Mark cache blocks allocated or not in the bitset. */ static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock) { BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset)); set_bit(from_cblock(cblock), mq->allocation_bitset); mq->nr_cblocks_allocated++; }
static bool clean_target_met(struct smq_policy *mq, bool critical) { if (critical) { /* * Cache entries may not be populated. So we're cannot rely on the * size of the clean queue. */ unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_CRITICAL / 100u; return nr_clean >= target; } else return !q_size(&mq->dirty); }
/* * This assumes the cblock hasn't already been allocated. */ static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) { struct entry *e = ep->entries + from_cblock(cblock); list_del_init(&e->list); INIT_HLIST_NODE(&e->hlist); ep->nr_allocated++; return e; }
static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock) { struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); if (!e || !e->allocated) return -ENODATA; del(mq, e); free_entry(&mq->cache_alloc, e); return 0; }
static void end_cache_period(struct smq_policy *mq) { if (time_after(jiffies, mq->next_cache_period)) { clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); q_redistribute(&mq->dirty); q_redistribute(&mq->clean); stats_reset(&mq->cache_stats); mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; } }
/* * Fills result out with a cache block that isn't in use, or return * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is * reponsible for that. */ static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end, dm_cblock_t *result, unsigned *last_word) { int r = -ENOSPC; unsigned w; for (w = begin; w < end; w++) { /* * ffz is undefined if no zero exists */ if (mq->allocation_bitset[w] != ~0UL) { *last_word = w; *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w])); if (from_cblock(*result) < from_cblock(mq->cache_size)) r = 0; break; } } return r; }
static void requeue(struct smq_policy *mq, struct entry *e) { struct entry *sentinel; if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { if (e->dirty) { sentinel = writeback_sentinel(mq, e->level); q_requeue_before(&mq->dirty, sentinel, e, 1u); } else { sentinel = demote_sentinel(mq, e->level); q_requeue_before(&mq->clean, sentinel, e, 1u); } } }
static int smq_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) { struct smq_policy *mq = to_smq_policy(p); struct entry *e; e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); e->oblock = oblock; e->dirty = false; /* this gets corrected in a minute */ e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1; push(mq, e); return 0; }
static void check_generation(struct mq_policy *mq) { unsigned total = 0, nr = 0, count = 0, level; struct list_head *head; struct entry *e; if ((mq->hit_count >= mq->generation_period) && (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) { mq->hit_count = 0; mq->generation++; for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { head = mq->cache.qs + level; list_for_each_entry(e, head, list) { nr++; total += e->hit_count; if (++count >= MAX_TO_AVERAGE) break; } }
/* * Returns NULL if the entry is free. */ static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) { struct entry *e = ep->entries + from_cblock(cblock); return !hlist_unhashed(&e->hlist) ? e : NULL; }
static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { unsigned i; unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; unsigned total_sentinels = 2u * nr_sentinels_per_queue; struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) return NULL; init_policy_functions(mq); mq->cache_size = cache_size; mq->cache_block_size = cache_block_size; calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size), &mq->hotspot_block_size, &mq->nr_hotspot_blocks); mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); mq->hotspot_level_jump = 1u; if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { DMERR("couldn't initialize entry space"); goto bad_pool_init; } init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); for (i = 0; i < nr_sentinels_per_queue; i++) get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); for (i = 0; i < nr_sentinels_per_queue; i++) get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, total_sentinels + mq->nr_hotspot_blocks); init_allocator(&mq->cache_alloc, &mq->es, total_sentinels + mq->nr_hotspot_blocks, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); if (!mq->hotspot_hit_bits) { DMERR("couldn't allocate hotspot hit bitset"); goto bad_hotspot_hit_bits; } clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); if (from_cblock(cache_size)) { mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); if (!mq->cache_hit_bits) { DMERR("couldn't allocate cache hit bitset"); goto bad_cache_hit_bits; } clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); } else mq->cache_hit_bits = NULL; mq->tick = 0; spin_lock_init(&mq->lock); q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); mq->hotspot.nr_top_levels = 8; mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); stats_init(&mq->cache_stats, NR_CACHE_LEVELS); if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) goto bad_alloc_table; if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) goto bad_alloc_hotspot_table; sentinels_init(mq); mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; mq->next_hotspot_period = jiffies; mq->next_cache_period = jiffies; return &mq->policy; bad_alloc_hotspot_table: h_exit(&mq->table); bad_alloc_table: free_bitset(mq->cache_hit_bits); bad_cache_hit_bits: free_bitset(mq->hotspot_hit_bits); bad_hotspot_hit_bits: space_exit(&mq->es); bad_pool_init: kfree(mq); return NULL; }
static bool any_free_cblocks(struct mq_policy *mq) { return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); }