static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; io->start_time = jiffies; preempt_disable(); disk_round_stats(dm_disk(md)); preempt_enable(); dm_disk(md)->in_flight = atomic_inc_return(&md->pending); }
/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; /* * There is no use in forwarding any barrier request since we can't * guarantee it is (or can be) handled by the targets correctly. */ if (unlikely(bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }
static int end_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; unsigned long duration = jiffies - io->start_time; int pending; int rw = bio_data_dir(bio); preempt_disable(); disk_round_stats(dm_disk(md)); preempt_enable(); dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); disk_stat_add(dm_disk(md), ticks[rw], duration); return !pending; }
/* * Close a device that we've been using. */ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) { if (!d->dm_dev.bdev) return; bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); blkdev_put(d->dm_dev.bdev); d->dm_dev.bdev = NULL; }
/* * Returns: * DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request *clone = NULL; blk_status_t ret; r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); check_again: switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); tio->ti->type->release_clone_rq(clone); tio->clone = NULL; if (!rq->q->mq_ops) r = DM_MAPIO_DELAY_REQUEUE; else r = DM_MAPIO_REQUEUE; goto check_again; } break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; case DM_MAPIO_KILL: /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } return r; }
/* * The request function that just remaps the bio built up by * dm_merge_bvec. */ static int dm_request(request_queue_t *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; down_read(&md->io_lock); disk_stat_inc(dm_disk(md), ios[rw]); disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); /* * If we're suspended we have to queue * this io for later. */ while (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->io_lock); if (bio_rw(bio) == READA) { bio_io_error(bio, bio->bi_size); return 0; } r = queue_io(md, bio); if (r < 0) { bio_io_error(bio, bio->bi_size); return 0; } else if (r == 0) return 0; /* deferred successfully */ /* * We're in a while loop, because someone could suspend * before we get to the following read lock. */ down_read(&md->io_lock); } __split_bio(md, bio); up_read(&md->io_lock); return 0; }
/* * Returns: * 0 : the request has been processed * DM_MAPIO_REQUEUE : the original request needs to be requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) { int r; struct dm_target *ti = tio->ti; struct request *clone = NULL; if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } if (r != DM_MAPIO_REMAPPED) return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } } switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); dm_dispatch_clone_request(clone, rq); break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(md, tio->orig); break; default: if (r > 0) { DMWARN("unimplemented target map return value: %d", r); BUG(); } /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } return 0; }
static struct mapped_device *dm_find_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; down(&_minor_lock); md = idr_find(&_minor_idr, minor); if (!md || (dm_disk(md)->first_minor != minor)) md = NULL; up(&_minor_lock); return md; }
/* * Open a device so we can use it as a map destination. */ static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(d->bdev); bdev = open_by_devnum(dev, d->mode); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); if (r) blkdev_put(bdev); else d->bdev = bdev; return r; }
void dm_put(struct mapped_device *md) { struct dm_table *map; BUG_ON(test_bit(DMF_FREEING, &md->flags)); if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { map = dm_get_table(md); idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); if (!dm_suspended(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } __unbind(md); dm_table_put(map); free_dev(md); } }
void *dm_get_mdptr(dev_t dev) { struct mapped_device *md; void *mdptr = NULL; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; down(&_minor_lock); md = idr_find(&_minor_idr, minor); if (md && (dm_disk(md)->first_minor == minor)) mdptr = md->interface_ptr; up(&_minor_lock); return mdptr; }
static struct mapped_device *dm_find_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); if (md && (md == MINOR_ALLOCED || (dm_disk(md)->first_minor != minor) || test_bit(DMF_FREEING, &md->flags))) { md = NULL; goto out; } out: spin_unlock(&_minor_lock); return md; }
static void __init dm_setup_drive(void) { struct mapped_device *md = NULL; struct dm_table *table = NULL; struct dm_setup_target *target; char *uuid = dm_setup_args.uuid; fmode_t fmode = FMODE_READ; /* Finish parsing the targets. */ if (dm_setup_parse_targets(dm_setup_args.targets)) goto parse_fail; if (dm_create(dm_setup_args.minor, &md)) { DMDEBUG("failed to create the device"); goto dm_create_fail; } DMDEBUG("created device '%s'", dm_device_name(md)); /* In addition to flagging the table below, the disk must be * set explicitly ro/rw. */ set_disk_ro(dm_disk(md), dm_setup_args.ro); if (!dm_setup_args.ro) fmode |= FMODE_WRITE; if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) { DMDEBUG("failed to create the table"); goto dm_table_create_fail; } target = dm_setup_args.target; while (target) { DMINFO("adding target '%llu %llu %s %s'", (unsigned long long) target->begin, (unsigned long long) target->length, target->type, target->params); if (dm_table_add_target(table, target->type, target->begin, target->length, target->params)) { DMDEBUG("failed to add the target to the table"); goto add_target_fail; } target = target->next; } if (dm_table_complete(table)) { DMDEBUG("failed to complete the table"); goto table_complete_fail; } /* Suspend the device so that we can bind it to the table. */ if (dm_suspend(md, 0)) { DMDEBUG("failed to suspend the device pre-bind"); goto suspend_fail; } /* Bind the table to the device. This is the only way to associate * md->map with the table and set the disk capacity directly. */ if (dm_swap_table(md, table)) { /* should return NULL. */ DMDEBUG("failed to bind the device to the table"); goto table_bind_fail; } /* Finally, resume and the device should be ready. */ if (dm_resume(md)) { DMDEBUG("failed to resume the device"); goto resume_fail; } /* Export the dm device via the ioctl interface */ if (!strcmp(DM_NO_UUID, dm_setup_args.uuid)) uuid = NULL; if (dm_ioctl_export(md, dm_setup_args.name, uuid)) { DMDEBUG("failed to export device with given name and uuid"); goto export_fail; } printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor); dm_setup_cleanup(); return; export_fail: resume_fail: table_bind_fail: suspend_fail: table_complete_fail: add_target_fail: dm_table_put(table); dm_table_create_fail: dm_put(md); dm_create_fail: dm_setup_cleanup(); parse_fail: printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n", dm_setup_args.minor, dm_setup_args.name); }