static void dm_start_request(struct mapped_device *md, struct request *orig) { if (!orig->q->mq_ops) blk_start_request(orig); else blk_mq_start_request(orig); atomic_inc(&md->pending[rq_data_dir(orig)]); if (md->seq_rq_merge_deadline_usecs) { md->last_rq_pos = rq_end_sector(orig); md->last_rq_rw = rq_data_dir(orig); md->last_rq_start_time = ktime_get(); } if (unlikely(dm_stats_used(&md->stats))) { struct dm_rq_target_io *tio = tio_from_request(orig); tio->duration_jiffies = jiffies; tio->n_sectors = blk_rq_sectors(orig); dm_stats_account_io(&md->stats, rq_data_dir(orig), blk_rq_pos(orig), tio->n_sectors, false, 0, &tio->stats_aux); } /* * Hold the md reference here for the in-flight I/O. * We can't rely on the reference count by device opener, * because the device may be closed during the request completion * when all bios are completed. * See the comment in rq_completed() too. */ dm_get(md); }
/* * Block device functions */ static int dm_blk_open(struct inode *inode, struct file *file) { struct mapped_device *md; md = inode->i_bdev->bd_disk->private_data; dm_get(md); return 0; }
struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md = dm_find_md(dev); if (md) dm_get(md); return md; }
/* * Block device functions */ static int dm_blk_open(struct inode *inode, struct file *file) { struct mapped_device *md; spin_lock(&_minor_lock); md = inode->i_bdev->bd_disk->private_data; if (!md) goto out; if (test_bit(DMF_FREEING, &md->flags) || test_bit(DMF_DELETING, &md->flags)) { md = NULL; goto out; } dm_get(md); atomic_inc(&md->open_count); out: spin_unlock(&_minor_lock); return md ? 0 : -ENXIO; }
struct mapped_device *dm_table_get_md(struct dm_table *t) { dm_get(t->md); return t->md; }