int dm_resume(struct mapped_device *md) { struct bio *def; struct dm_table *map = dm_get_table(md); down_write(&md->lock); if (!map || !test_bit(DMF_SUSPENDED, &md->flags) || !dm_table_get_size(map)) { up_write(&md->lock); dm_table_put(map); return -EINVAL; } dm_table_resume_targets(map); clear_bit(DMF_SUSPENDED, &md->flags); clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->lock); __unlock_fs(md); dm_table_unplug_all(map); dm_table_put(map); return 0; }
static int __bind(struct mapped_device *md, struct dm_table *t) { request_queue_t *q = md->queue; sector_t size; size = dm_table_get_size(t); /* * Wipe any geometry if the size of the table changed. */ if (size != get_capacity(md->disk)) memset(&md->geometry, 0, sizeof(md->geometry)); if (md->suspended_bdev) __set_size(md, size); if (size == 0) return 0; dm_table_get(t); dm_table_event_callback(t, event_callback, md); write_lock(&md->map_lock); md->map = t; dm_table_set_restrictions(t, q); write_unlock(&md->map_lock); return 0; }
int dm_resume(struct mapped_device *md) { int r = -EINVAL; struct bio *def; struct dm_table *map = NULL; down(&md->suspend_lock); if (!dm_suspended(md)) goto out; map = dm_get_table(md); if (!map || !dm_table_get_size(map)) goto out; r = dm_table_resume_targets(map); if (r) goto out; down_write(&md->io_lock); clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); if (md->suspended_bdev) { bdput(md->suspended_bdev); md->suspended_bdev = NULL; } clear_bit(DMF_SUSPENDED, &md->flags); dm_table_unplug_all(map); kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); r = 0; out: dm_table_put(map); up(&md->suspend_lock); return r; }
static int __bind(struct mapped_device *md, struct dm_table *t) { request_queue_t *q = md->queue; sector_t size; size = dm_table_get_size(t); __set_size(md->disk, size); if (size == 0) return 0; write_lock(&md->map_lock); md->map = t; write_unlock(&md->map_lock); dm_table_get(t); dm_table_event_callback(md->map, event_callback, md); dm_table_set_restrictions(t, q); return 0; }
/* * Swap in a new table (destroying old one). */ int dm_swap_table(struct mapped_device *md, struct dm_table *table) { int r = -EINVAL; down(&md->suspend_lock); /* device must be suspended */ if (!dm_suspended(md)) goto out; /* without bdev, the device size cannot be changed */ if (!md->suspended_bdev) if (get_capacity(md->disk) != dm_table_get_size(table)) goto out; __unbind(md); r = __bind(md, table); out: up(&md->suspend_lock); return r; }
static int dm_blk_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct mapped_device *md; struct dm_table *map; struct dm_target *tgt; int r = -ENOTTY; /* We don't really need this lock, but we do need 'inode'. */ unlock_kernel(); md = inode->i_bdev->bd_disk->private_data; map = dm_get_table(md); if (!map || !dm_table_get_size(map)) goto out; /* We only support devices that have a single target */ if (dm_table_get_num_targets(map) != 1) goto out; tgt = dm_table_get_target(map, 0); if (dm_suspended(md)) { r = -EAGAIN; goto out; } if (tgt->type->ioctl) r = tgt->type->ioctl(tgt, inode, file, cmd, arg); out: dm_table_put(map); lock_kernel(); return r; }
int dm_create_error_table(struct dm_table **result, struct mapped_device *md) { struct dm_table *t; sector_t dev_size = 1; int r; /* * Find current size of device. * Default to 1 sector if inactive. */ t = dm_get_table(md); if (t) { dev_size = dm_table_get_size(t); dm_table_put(t); } r = dm_table_create(&t, FMODE_READ, 1, md); if (r) return r; r = dm_table_add_target(t, "error", 0, dev_size, NULL); if (r) goto out; r = dm_table_complete(t); if (r) goto out; *result = t; out: if (r) dm_table_put(t); return r; }