static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) { struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); __remove_mapping(mq, oblock); mutex_unlock(&mq->lock); }
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) { struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); __smq_set_clear_dirty(mq, oblock, false); mutex_unlock(&mq->lock); }
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) { struct smq_policy *mq = to_smq_policy(p); unsigned long flags; spin_lock_irqsave(&mq->lock, flags); __smq_set_clear_dirty(mq, oblock, false); spin_unlock_irqrestore(&mq->lock, flags); }
static void smq_force_mapping(struct dm_cache_policy *p, dm_oblock_t current_oblock, dm_oblock_t new_oblock) { struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); __force_mapping(mq, current_oblock, new_oblock); mutex_unlock(&mq->lock); }
static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) { struct smq_policy *mq = to_smq_policy(p); unsigned long flags; spin_lock_irqsave(&mq->lock, flags); __remove_mapping(mq, oblock); spin_unlock_irqrestore(&mq->lock, flags); }
static void smq_force_mapping(struct dm_cache_policy *p, dm_oblock_t current_oblock, dm_oblock_t new_oblock) { unsigned long flags; struct smq_policy *mq = to_smq_policy(p); spin_lock_irqsave(&mq->lock, flags); __force_mapping(mq, current_oblock, new_oblock); spin_unlock_irqrestore(&mq->lock, flags); }
static void smq_destroy(struct dm_cache_policy *p) { struct smq_policy *mq = to_smq_policy(p); h_exit(&mq->hotspot_table); h_exit(&mq->table); free_bitset(mq->hotspot_hit_bits); free_bitset(mq->cache_hit_bits); space_exit(&mq->es); kfree(mq); }
static dm_cblock_t smq_residency(struct dm_cache_policy *p) { dm_cblock_t r; struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); r = to_cblock(mq->cache_alloc.nr_allocated); mutex_unlock(&mq->lock); return r; }
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) { int r; struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); r = __remove_cblock(mq, cblock); mutex_unlock(&mq->lock); return r; }
static dm_cblock_t smq_residency(struct dm_cache_policy *p) { dm_cblock_t r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); spin_lock_irqsave(&mq->lock, flags); r = to_cblock(mq->cache_alloc.nr_allocated); spin_unlock_irqrestore(&mq->lock, flags); return r; }
static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) { int r; struct smq_policy *mq = to_smq_policy(p); mutex_lock(&mq->lock); r = __smq_writeback_work(mq, oblock, cblock, critical_only); mutex_unlock(&mq->lock); return r; }
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); spin_lock_irqsave(&mq->lock, flags); r = __remove_cblock(mq, cblock); spin_unlock_irqrestore(&mq->lock, flags); return r; }
static void smq_tick(struct dm_cache_policy *p, bool can_block) { struct smq_policy *mq = to_smq_policy(p); unsigned long flags; spin_lock_irqsave(&mq->lock, flags); mq->tick++; update_sentinels(mq); end_hotspot_period(mq); end_cache_period(mq); spin_unlock_irqrestore(&mq->lock, flags); }
static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock, bool critical_only) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); spin_lock_irqsave(&mq->lock, flags); r = __smq_writeback_work(mq, oblock, cblock, critical_only); spin_unlock_irqrestore(&mq->lock, flags); return r; }
static void smq_tick(struct dm_cache_policy *p, bool can_block) { struct smq_policy *mq = to_smq_policy(p); unsigned long flags; spin_lock_irqsave(&mq->tick_lock, flags); mq->tick_protected++; spin_unlock_irqrestore(&mq->tick_lock, flags); if (can_block) { mutex_lock(&mq->lock); copy_tick(mq); mutex_unlock(&mq->lock); } }
static int smq_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) { struct smq_policy *mq = to_smq_policy(p); struct entry *e; e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); e->oblock = oblock; e->dirty = false; /* this gets corrected in a minute */ e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1; push(mq, e); return 0; }
static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, void *context) { struct smq_policy *mq = to_smq_policy(p); int r = 0; mutex_lock(&mq->lock); r = smq_save_hints(mq, &mq->clean, fn, context); if (!r) r = smq_save_hints(mq, &mq->dirty, fn, context); mutex_unlock(&mq->lock); return r; }
static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, void *context) { struct smq_policy *mq = to_smq_policy(p); int r = 0; /* * We don't need to lock here since this method is only called once * the IO has stopped. */ r = smq_save_hints(mq, &mq->clean, fn, context); if (!r) r = smq_save_hints(mq, &mq->dirty, fn, context); return r; }
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock, bool can_block, bool can_migrate, bool fast_promote, struct bio *bio, struct policy_locker *locker, struct policy_result *result) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); result->op = POLICY_MISS; spin_lock_irqsave(&mq->lock, flags); r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result); spin_unlock_irqrestore(&mq->lock, flags); return r; }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; unsigned long flags; struct smq_policy *mq = to_smq_policy(p); struct entry *e; spin_lock_irqsave(&mq->lock, flags); e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; spin_unlock_irqrestore(&mq->lock, flags); return r; }
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock, bool can_block, bool can_migrate, bool fast_promote, struct bio *bio, struct policy_locker *locker, struct policy_result *result) { int r; struct smq_policy *mq = to_smq_policy(p); result->op = POLICY_MISS; if (!maybe_lock(mq, can_block)) return -EWOULDBLOCK; copy_tick(mq); r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result); mutex_unlock(&mq->lock); return r; }
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; struct smq_policy *mq = to_smq_policy(p); struct entry *e; if (!mutex_trylock(&mq->lock)) return -EWOULDBLOCK; e = h_lookup(&mq->table, oblock); if (e) { *cblock = infer_cblock(mq, e); r = 0; } else r = -ENOENT; mutex_unlock(&mq->lock); return r; }