void dm_put(struct mapped_device *md) { struct dm_table *map = dm_get_table(md); if (atomic_dec_and_test(&md->holders)) { if (!test_bit(DMF_SUSPENDED, &md->flags) && map) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } __unbind(md); free_dev(md); } dm_table_put(map); }
void dm_put(struct mapped_device *md) { struct dm_table *map = dm_get_table(md); if (atomic_dec_and_test(&md->holders)) { if (!dm_suspended(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } __unbind(md); free_dev(md); } dm_table_put(map); }
void dm_put(struct mapped_device *md) { struct dm_table *map; BUG_ON(test_bit(DMF_FREEING, &md->flags)); if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { map = dm_get_table(md); idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); if (!dm_suspended(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } __unbind(md); dm_table_put(map); free_dev(md); } }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; unsigned long flags; DECLARE_WAITQUEUE(wait, current); struct bio *def; int r = -EINVAL; int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; down(&md->suspend_lock); if (dm_suspended(md)) goto out_unlock; map = dm_get_table(md); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); /* bdget() can stall if the pending I/Os are not flushed */ if (!noflush) { md->suspended_bdev = bdget_disk(md->disk, 0); if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; goto flush_and_out; } } /* * Flush I/O to the device. * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os. */ if (do_lockfs && !noflush) { r = lock_fs(md); if (r) goto out; } /* * First we set the BLOCK_IO flag so no more ios will be mapped. */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->io_lock); /* unplug */ if (map) dm_table_unplug_all(map); /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->io_lock); remove_wait_queue(&md->wait, &wait); if (noflush) { spin_lock_irqsave(&md->pushback_lock, flags); clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); bio_list_merge_head(&md->deferred, &md->pushback); bio_list_init(&md->pushback); spin_unlock_irqrestore(&md->pushback_lock, flags); } /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); goto out; /* pushback list is already flushed, so skip flush */ } up_write(&md->io_lock); dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); r = 0; flush_and_out: if (r && noflush) { /* * Because there may be already I/Os in the pushback list, * flush them before return. */ down_write(&md->io_lock); spin_lock_irqsave(&md->pushback_lock, flags); clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); bio_list_merge_head(&md->deferred, &md->pushback); bio_list_init(&md->pushback); spin_unlock_irqrestore(&md->pushback_lock, flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); } out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); md->suspended_bdev = NULL; } dm_table_put(map); out_unlock: up(&md->suspend_lock); return r; }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md) { struct dm_table *map; DECLARE_WAITQUEUE(wait, current); /* Flush I/O to the device. */ down_read(&md->lock); if (test_bit(DMF_BLOCK_IO, &md->flags)) { up_read(&md->lock); return -EINVAL; } map = dm_get_table(md); if (map) dm_table_presuspend_targets(map); __lock_fs(md); up_read(&md->lock); /* * First we set the BLOCK_IO flag so no more ios will be * mapped. */ down_write(&md->lock); if (test_bit(DMF_BLOCK_IO, &md->flags)) { /* * If we get here we know another thread is * trying to suspend as well, so we leave the fs * locked for this thread. */ up_write(&md->lock); return -EINVAL; } set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->lock); /* unplug */ if (map) { dm_table_unplug_all(map); dm_table_put(map); } /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->lock); remove_wait_queue(&md->wait, &wait); /* were we interrupted ? */ if (atomic_read(&md->pending)) { __unlock_fs(md); clear_bit(DMF_BLOCK_IO, &md->flags); up_write(&md->lock); return -EINTR; } set_bit(DMF_SUSPENDED, &md->flags); map = dm_get_table(md); if (map) dm_table_postsuspend_targets(map); dm_table_put(map); up_write(&md->lock); return 0; }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md, int do_lockfs) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); struct bio *def; int r = -EINVAL; down(&md->suspend_lock); if (dm_suspended(md)) goto out; map = dm_get_table(md); /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); md->suspended_bdev = bdget_disk(md->disk, 0); if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; goto out; } /* Flush I/O to the device. */ if (do_lockfs) { r = lock_fs(md); if (r) goto out; } /* * First we set the BLOCK_IO flag so no more ios will be mapped. */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->io_lock); /* unplug */ if (map) dm_table_unplug_all(map); /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->io_lock); remove_wait_queue(&md->wait, &wait); /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); goto out; } up_write(&md->io_lock); dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); r = 0; out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); md->suspended_bdev = NULL; } dm_table_put(map); up(&md->suspend_lock); return r; }