static int __smq_writeback_work(struct smq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) { struct entry *e = NULL; bool target_met = clean_target_met(mq, critical_only); if (critical_only) /* * Always try and keep the bottom level clean. */ e = pop_old(mq, &mq->dirty, target_met ? 1u : mq->dirty.nr_levels); else e = pop_old(mq, &mq->dirty, mq->dirty.nr_levels); if (!e) return -ENODATA; *oblock = e->oblock; *cblock = infer_cblock(mq, e); e->dirty = false; push_new(mq, e); return 0; }
static void insert_in_cache(struct smq_policy *mq, dm_oblock_t oblock, struct policy_locker *locker, struct policy_result *result, enum promote_result pr) { int r; struct entry *e; if (allocator_empty(&mq->cache_alloc)) { result->op = POLICY_REPLACE; r = demote_cblock(mq, locker, &result->old_oblock); if (r) { result->op = POLICY_MISS; return; } } else result->op = POLICY_NEW; e = alloc_entry(&mq->cache_alloc); BUG_ON(!e); e->oblock = oblock; if (pr == PROMOTE_TEMPORARY) push(mq, e); else push_new(mq, e); result->cblock = infer_cblock(mq, e); }
static void requeue(struct smq_policy *mq, struct entry *e) { struct entry *sentinel; if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { if (e->dirty) { sentinel = writeback_sentinel(mq, e->level); q_requeue_before(&mq->dirty, sentinel, e, 1u); } else { sentinel = demote_sentinel(mq, e->level); q_requeue_before(&mq->clean, sentinel, e, 1u); } } }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); struct entry *e; spin_lock_irqsave(&mq->lock, flags); e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; spin_unlock_irqrestore(&mq->lock, flags); return r; }
static int smq_save_hints(struct smq_policy *mq, struct queue *q, policy_walk_fn fn, void *context) { int r; unsigned level; struct entry *e; for (level = 0; level < q->nr_levels; level++) for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { if (!e->sentinel) { r = fn(context, infer_cblock(mq, e), e->oblock, e->level); if (r) return r; } } return 0; }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; struct smq_policy *mq = to_smq_policy(p); struct entry *e; if (!mutex_trylock(&mq->lock)) return -EWOULDBLOCK; e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; mutex_unlock(&mq->lock); return r; }
/* * Looks the oblock up in the hash table, then decides whether to put in * pre_cache, or cache etc. */ static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock, bool can_migrate, bool fast_promote, struct policy_locker *locker, struct policy_result *result) { struct entry *e, *hs_e; enum promote_result pr; hs_e = update_hotspot_queue(mq, oblock, bio); e = h_lookup(&mq->table, oblock); if (e) { stats_level_accessed(&mq->cache_stats, e->level); requeue(mq, e); result->op = POLICY_HIT; result->cblock = infer_cblock(mq, e); } else { stats_miss(&mq->cache_stats); pr = should_promote(mq, hs_e, bio, fast_promote); if (pr == PROMOTE_NOT) result->op = POLICY_MISS; else { if (!can_migrate) { result->op = POLICY_MISS; return -EWOULDBLOCK; } insert_in_cache(mq, oblock, locker, result, pr); } } return 0; }