static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock) { struct entry *e; e = h_lookup(&mq->table, oblock); BUG_ON(!e); del(mq, e); free_entry(&mq->cache_alloc, e); }
static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, bool set) { struct entry *e; e = h_lookup(&mq->table, oblock); BUG_ON(!e); del(mq, e); e->dirty = set; push(mq, e); }
static void __force_mapping(struct smq_policy *mq, dm_oblock_t current_oblock, dm_oblock_t new_oblock) { struct entry *e = h_lookup(&mq->table, current_oblock); if (e) { del(mq, e); e->oblock = new_oblock; e->dirty = true; push(mq, e); } }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); struct entry *e; spin_lock_irqsave(&mq->lock, flags); e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; spin_unlock_irqrestore(&mq->lock, flags); return r; }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; struct smq_policy *mq = to_smq_policy(p); struct entry *e; if (!mutex_trylock(&mq->lock)) return -EWOULDBLOCK; e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; mutex_unlock(&mq->lock); return r; }
static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio) { unsigned hi; dm_oblock_t hb = to_hblock(mq, b); struct entry *e = h_lookup(&mq->hotspot_table, hb); if (e) { stats_level_accessed(&mq->hotspot_stats, e->level); hi = get_index(&mq->hotspot_alloc, e); q_requeue(&mq->hotspot, e, test_and_set_bit(hi, mq->hotspot_hit_bits) ? 0u : mq->hotspot_level_jump); } else { stats_miss(&mq->hotspot_stats); e = alloc_entry(&mq->hotspot_alloc); if (!e) { e = q_pop(&mq->hotspot); if (e) { h_remove(&mq->hotspot_table, e); hi = get_index(&mq->hotspot_alloc, e); clear_bit(hi, mq->hotspot_hit_bits); } } if (e) { e->oblock = hb; q_push(&mq->hotspot, e); h_insert(&mq->hotspot_table, e); } } return e; }
/* * Looks the oblock up in the hash table, then decides whether to put in * pre_cache, or cache etc. */ static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock, bool can_migrate, bool fast_promote, struct policy_locker *locker, struct policy_result *result) { struct entry *e, *hs_e; enum promote_result pr; hs_e = update_hotspot_queue(mq, oblock, bio); e = h_lookup(&mq->table, oblock); if (e) { stats_level_accessed(&mq->cache_stats, e->level); requeue(mq, e); result->op = POLICY_HIT; result->cblock = infer_cblock(mq, e); } else { stats_miss(&mq->cache_stats); pr = should_promote(mq, hs_e, bio, fast_promote); if (pr == PROMOTE_NOT) result->op = POLICY_MISS; else { if (!can_migrate) { result->op = POLICY_MISS; return -EWOULDBLOCK; } insert_in_cache(mq, oblock, locker, result, pr); } } return 0; }