Exemplo n.º 1
0
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
                             const char *replaces,
                             int64_t speed, int64_t granularity,
                             int64_t buf_size,
                             BlockdevOnError on_source_error,
                             BlockdevOnError on_target_error,
                             BlockDriverCompletionFunc *cb,
                             void *opaque, Error **errp,
                             const BlockJobDriver *driver,
                             bool is_none_mode, BlockDriverState *base)
{
    MirrorBlockJob *s;

    if (granularity == 0) {
        /* Choose the default granularity based on the target file's cluster
         * size, clamped between 4k and 64k.  */
        BlockDriverInfo bdi;
        if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
            granularity = MAX(4096, bdi.cluster_size);
            granularity = MIN(65536, granularity);
        } else {
            granularity = 65536;
        }
    }

    assert ((granularity & (granularity - 1)) == 0);

    if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
         on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
        !bdrv_iostatus_is_enabled(bs)) {
        error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
        return;
    }


    s = block_job_create(driver, bs, speed, cb, opaque, errp);
    if (!s) {
        return;
    }

    s->replaces = g_strdup(replaces);
    s->on_source_error = on_source_error;
    s->on_target_error = on_target_error;
    s->target = target;
    s->is_none_mode = is_none_mode;
    s->base = base;
    s->granularity = granularity;
    s->buf_size = MAX(buf_size, granularity);

    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp);
    if (!s->dirty_bitmap) {
        return;
    }
    bdrv_set_enable_write_cache(s->target, true);
    bdrv_set_on_error(s->target, on_target_error, on_target_error);
    bdrv_iostatus_enable(s->target);
    s->common.co = qemu_coroutine_create(mirror_run);
    trace_mirror_start(bs, s, s->common.co, opaque);
    qemu_coroutine_enter(s->common.co, s);
}
Exemplo n.º 2
0
/* Create a block job that completes with a given return code after a given
 * number of event loop iterations.  The return code is stored in the given
 * result pointer.
 *
 * The event loop iterations can either be handled automatically with a 0 delay
 * timer, or they can be stepped manually by entering the coroutine.
 */
static BlockJob *test_block_job_start(unsigned int iterations,
                                      bool use_timer,
                                      int rc, int *result, BlockJobTxn *txn)
{
    BlockDriverState *bs;
    TestBlockJob *s;
    TestBlockJobCBData *data;
    static unsigned counter;
    char job_id[24];

    data = g_new0(TestBlockJobCBData, 1);

    bs = bdrv_open("null-co://", NULL, NULL, 0, &error_abort);
    g_assert_nonnull(bs);

    snprintf(job_id, sizeof(job_id), "job%u", counter++);
    s = block_job_create(job_id, &test_block_job_driver, txn, bs,
                         0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT,
                         test_block_job_cb, data, &error_abort);
    s->iterations = iterations;
    s->use_timer = use_timer;
    s->rc = rc;
    s->result = result;
    data->job = s;
    data->result = result;
    return &s->common;
}
Exemplo n.º 3
0
/* Create a block job that completes with a given return code after a given
 * number of event loop iterations.  The return code is stored in the given
 * result pointer.
 *
 * The event loop iterations can either be handled automatically with a 0 delay
 * timer, or they can be stepped manually by entering the coroutine.
 */
static BlockJob *test_block_job_start(unsigned int iterations,
                                      bool use_timer,
                                      int rc, int *result)
{
    BlockDriverState *bs;
    TestBlockJob *s;
    TestBlockJobCBData *data;
    static unsigned counter;
    char job_id[24];

    data = g_new0(TestBlockJobCBData, 1);
    bs = bdrv_new();
    snprintf(job_id, sizeof(job_id), "job%u", counter++);
    s = block_job_create(job_id, &test_block_job_driver, bs, 0,
                         BLOCK_JOB_DEFAULT, test_block_job_cb,
                         data, &error_abort);
    s->iterations = iterations;
    s->use_timer = use_timer;
    s->rc = rc;
    s->result = result;
    s->common.co = qemu_coroutine_create(test_block_job_run, s);
    data->job = s;
    data->result = result;
    qemu_coroutine_enter(s->common.co);
    return &s->common;
}
Exemplo n.º 4
0
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
                             const char *replaces,
                             int64_t speed, uint32_t granularity,
                             int64_t buf_size,
                             BlockdevOnError on_source_error,
                             BlockdevOnError on_target_error,
                             BlockCompletionFunc *cb,
                             void *opaque, Error **errp,
                             const BlockJobDriver *driver,
                             bool is_none_mode, BlockDriverState *base)
{
    MirrorBlockJob *s;

    if (granularity == 0) {
        granularity = bdrv_get_default_bitmap_granularity(target);
    }

    assert ((granularity & (granularity - 1)) == 0);

    if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
         on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
        !bdrv_iostatus_is_enabled(bs)) {
        error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
        return;
    }


    s = block_job_create(driver, bs, speed, cb, opaque, errp);
    if (!s) {
        return;
    }

    s->replaces = g_strdup(replaces);
    s->on_source_error = on_source_error;
    s->on_target_error = on_target_error;
    s->target = target;
    s->is_none_mode = is_none_mode;
    s->base = base;
    s->granularity = granularity;
    s->buf_size = MAX(buf_size, granularity);

    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
    if (!s->dirty_bitmap) {
        return;
    }
    bdrv_set_enable_write_cache(s->target, true);
    bdrv_set_on_error(s->target, on_target_error, on_target_error);
    bdrv_iostatus_enable(s->target);
    s->common.co = qemu_coroutine_create(mirror_run);
    trace_mirror_start(bs, s, s->common.co, opaque);
    qemu_coroutine_enter(s->common.co, s);
}
Exemplo n.º 5
0
static BlockJob *do_test_id(BlockBackend *blk, const char *id,
                            bool should_succeed)
{
    BlockJob *job;
    Error *errp = NULL;

    job = block_job_create(id, &test_block_job_driver, blk_bs(blk),
                           0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, block_job_cb,
                           NULL, &errp);
    if (should_succeed) {
        g_assert_null(errp);
        g_assert_nonnull(job);
        if (id) {
            g_assert_cmpstr(job->id, ==, id);
        } else {
            g_assert_cmpstr(job->id, ==, blk_name(blk));
        }
    } else {
Exemplo n.º 6
0
/* Create a block job that completes with a given return code after a given
 * number of event loop iterations.  The return code is stored in the given
 * result pointer.
 *
 * The event loop iterations can either be handled automatically with a 0 delay
 * timer, or they can be stepped manually by entering the coroutine.
 */
static BlockJob *test_block_job_start(unsigned int iterations,
                                      bool use_timer,
                                      int rc, int *result)
{
    BlockDriverState *bs;
    TestBlockJob *s;
    TestBlockJobCBData *data;

    data = g_new0(TestBlockJobCBData, 1);
    bs = bdrv_new();
    s = block_job_create(&test_block_job_driver, bs, 0, test_block_job_cb,
                         data, &error_abort);
    s->iterations = iterations;
    s->use_timer = use_timer;
    s->rc = rc;
    s->result = result;
    s->common.co = qemu_coroutine_create(test_block_job_run);
    data->job = s;
    data->result = result;
    qemu_coroutine_enter(s->common.co, s);
    return &s->common;
}
Exemplo n.º 7
0
static void mirror_start_job(const char *job_id, BlockDriverState *bs,
                             int creation_flags, BlockDriverState *target,
                             const char *replaces, int64_t speed,
                             uint32_t granularity, int64_t buf_size,
                             BlockMirrorBackingMode backing_mode,
                             BlockdevOnError on_source_error,
                             BlockdevOnError on_target_error,
                             bool unmap,
                             BlockCompletionFunc *cb,
                             void *opaque,
                             const BlockJobDriver *driver,
                             bool is_none_mode, BlockDriverState *base,
                             bool auto_complete, const char *filter_node_name,
                             bool is_mirror,
                             Error **errp)
{
    MirrorBlockJob *s;
    BlockDriverState *mirror_top_bs;
    bool target_graph_mod;
    bool target_is_backing;
    Error *local_err = NULL;
    int ret;

    if (granularity == 0) {
        granularity = bdrv_get_default_bitmap_granularity(target);
    }

    assert(is_power_of_2(granularity));

    if (buf_size < 0) {
        error_setg(errp, "Invalid parameter 'buf-size'");
        return;
    }

    if (buf_size == 0) {
        buf_size = DEFAULT_MIRROR_BUF_SIZE;
    }

    /* In the case of active commit, add dummy driver to provide consistent
     * reads on the top, while disabling it in the intermediate nodes, and make
     * the backing chain writable. */
    mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
                                         BDRV_O_RDWR, errp);
    if (mirror_top_bs == NULL) {
        return;
    }
    if (!filter_node_name) {
        mirror_top_bs->implicit = true;
    }
    mirror_top_bs->total_sectors = bs->total_sectors;
    mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
    mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
    bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));

    /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
     * it alive until block_job_create() succeeds even if bs has no parent. */
    bdrv_ref(mirror_top_bs);
    bdrv_drained_begin(bs);
    bdrv_append(mirror_top_bs, bs, &local_err);
    bdrv_drained_end(bs);

    if (local_err) {
        bdrv_unref(mirror_top_bs);
        error_propagate(errp, local_err);
        return;
    }

    /* Make sure that the source is not resized while the job is running */
    s = block_job_create(job_id, driver, NULL, mirror_top_bs,
                         BLK_PERM_CONSISTENT_READ,
                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                         BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
                         creation_flags, cb, opaque, errp);
    if (!s) {
        goto fail;
    }
    /* The block job now has a reference to this node */
    bdrv_unref(mirror_top_bs);

    s->source = bs;
    s->mirror_top_bs = mirror_top_bs;

    /* No resize for the target either; while the mirror is still running, a
     * consistent read isn't necessarily possible. We could possibly allow
     * writes and graph modifications, though it would likely defeat the
     * purpose of a mirror, so leave them blocked for now.
     *
     * In the case of active commit, things look a bit different, though,
     * because the target is an already populated backing file in active use.
     * We can allow anything except resize there.*/
    target_is_backing = bdrv_chain_contains(bs, target);
    target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
    s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
                        (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
                        BLK_PERM_WRITE_UNCHANGED |
                        (target_is_backing ? BLK_PERM_CONSISTENT_READ |
                                             BLK_PERM_WRITE |
                                             BLK_PERM_GRAPH_MOD : 0));
    ret = blk_insert_bs(s->target, target, errp);
    if (ret < 0) {
        goto fail;
    }
    if (is_mirror) {
        /* XXX: Mirror target could be a NBD server of target QEMU in the case
         * of non-shared block migration. To allow migration completion, we
         * have to allow "inactivate" of the target BB.  When that happens, we
         * know the job is drained, and the vcpus are stopped, so no write
         * operation will be performed. Block layer already has assertions to
         * ensure that. */
        blk_set_force_allow_inactivate(s->target);
    }

    s->replaces = g_strdup(replaces);
    s->on_source_error = on_source_error;
    s->on_target_error = on_target_error;
    s->is_none_mode = is_none_mode;
    s->backing_mode = backing_mode;
    s->base = base;
    s->granularity = granularity;
    s->buf_size = ROUND_UP(buf_size, granularity);
    s->unmap = unmap;
    if (auto_complete) {
        s->should_complete = true;
    }

    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
    if (!s->dirty_bitmap) {
        goto fail;
    }

    /* Required permissions are already taken with blk_new() */
    block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
                       &error_abort);

    /* In commit_active_start() all intermediate nodes disappear, so
     * any jobs in them must be blocked */
    if (target_is_backing) {
        BlockDriverState *iter;
        for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
            /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
             * ourselves at s->base (if writes are blocked for a node, they are
             * also blocked for its backing file). The other options would be a
             * second filter driver above s->base (== target). */
            ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
                                     BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
                                     errp);
            if (ret < 0) {
                goto fail;
            }
        }
    }

    trace_mirror_start(bs, s, opaque);
    block_job_start(&s->common);
    return;

fail:
    if (s) {
        /* Make sure this BDS does not go away until we have completed the graph
         * changes below */
        bdrv_ref(mirror_top_bs);

        g_free(s->replaces);
        blk_unref(s->target);
        block_job_early_fail(&s->common);
    }

    bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
                            &error_abort);
    bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);

    bdrv_unref(mirror_top_bs);
}
Exemplo n.º 8
0
void stream_start(const char *job_id, BlockDriverState *bs,
                  BlockDriverState *base, const char *backing_file_str,
                  int creation_flags, int64_t speed,
                  BlockdevOnError on_error, Error **errp)
{
    StreamBlockJob *s;
    BlockDriverState *iter;
    bool bs_read_only;

    if (bdrv_freeze_backing_chain(bs, base, errp) < 0) {
        return;
    }

    /* Make sure that the image is opened in read-write mode */
    bs_read_only = bdrv_is_read_only(bs);
    if (bs_read_only) {
        if (bdrv_reopen_set_read_only(bs, false, errp) != 0) {
            bs_read_only = false;
            goto fail;
        }
    }

    /* Prevent concurrent jobs trying to modify the graph structure here, we
     * already have our own plans. Also don't allow resize as the image size is
     * queried only at the job start and then cached. */
    s = block_job_create(job_id, &stream_job_driver, NULL, bs,
                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                         BLK_PERM_GRAPH_MOD,
                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                         BLK_PERM_WRITE,
                         speed, creation_flags, NULL, NULL, errp);
    if (!s) {
        goto fail;
    }

    /* Block all intermediate nodes between bs and base, because they will
     * disappear from the chain after this operation. The streaming job reads
     * every block only once, assuming that it doesn't change, so block writes
     * and resizes. */
    for (iter = backing_bs(bs); iter && iter != base; iter = backing_bs(iter)) {
        block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
                           BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED,
                           &error_abort);
    }

    s->base = base;
    s->backing_file_str = g_strdup(backing_file_str);
    s->bs_read_only = bs_read_only;
    s->chain_frozen = true;

    s->on_error = on_error;
    trace_stream_start(bs, base, s);
    job_start(&s->common.job);
    return;

fail:
    if (bs_read_only) {
        bdrv_reopen_set_read_only(bs, true, NULL);
    }
    bdrv_unfreeze_backing_chain(bs, base);
}
Exemplo n.º 9
0
void commit_start(const char *job_id, BlockDriverState *bs,
                  BlockDriverState *base, BlockDriverState *top,
                  int creation_flags, int64_t speed,
                  BlockdevOnError on_error, const char *backing_file_str,
                  const char *filter_node_name, Error **errp)
{
    CommitBlockJob *s;
    BlockDriverState *iter;
    BlockDriverState *commit_top_bs = NULL;
    Error *local_err = NULL;
    int ret;

    assert(top != bs);
    if (top == base) {
        error_setg(errp, "Invalid files for merge: top and base are the same");
        return;
    }

    s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
                         speed, creation_flags, NULL, NULL, errp);
    if (!s) {
        return;
    }

    /* convert base to r/w, if necessary */
    s->base_read_only = bdrv_is_read_only(base);
    if (s->base_read_only) {
        if (bdrv_reopen_set_read_only(base, false, errp) != 0) {
            goto fail;
        }
    }

    /* Insert commit_top block node above top, so we can block consistent read
     * on the backing chain below it */
    commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
                                         errp);
    if (commit_top_bs == NULL) {
        goto fail;
    }
    if (!filter_node_name) {
        commit_top_bs->implicit = true;
    }
    commit_top_bs->total_sectors = top->total_sectors;
    bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top));

    bdrv_set_backing_hd(commit_top_bs, top, &local_err);
    if (local_err) {
        bdrv_unref(commit_top_bs);
        commit_top_bs = NULL;
        error_propagate(errp, local_err);
        goto fail;
    }
    bdrv_replace_node(top, commit_top_bs, &local_err);
    if (local_err) {
        bdrv_unref(commit_top_bs);
        commit_top_bs = NULL;
        error_propagate(errp, local_err);
        goto fail;
    }

    s->commit_top_bs = commit_top_bs;
    bdrv_unref(commit_top_bs);

    /* Block all nodes between top and base, because they will
     * disappear from the chain after this operation. */
    assert(bdrv_chain_contains(top, base));
    for (iter = top; iter != base; iter = backing_bs(iter)) {
        /* XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
         * at s->base (if writes are blocked for a node, they are also blocked
         * for its backing file). The other options would be a second filter
         * driver above s->base. */
        ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
                                 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
                                 errp);
        if (ret < 0) {
            goto fail;
        }
    }

    if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
        goto fail;
    }
    s->chain_frozen = true;

    ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
    if (ret < 0) {
        goto fail;
    }

    s->base = blk_new(BLK_PERM_CONSISTENT_READ
                      | BLK_PERM_WRITE
                      | BLK_PERM_RESIZE,
                      BLK_PERM_CONSISTENT_READ
                      | BLK_PERM_GRAPH_MOD
                      | BLK_PERM_WRITE_UNCHANGED);
    ret = blk_insert_bs(s->base, base, errp);
    if (ret < 0) {
        goto fail;
    }
    s->base_bs = base;

    /* Required permissions are already taken with block_job_add_bdrv() */
    s->top = blk_new(0, BLK_PERM_ALL);
    ret = blk_insert_bs(s->top, top, errp);
    if (ret < 0) {
        goto fail;
    }

    s->backing_file_str = g_strdup(backing_file_str);
    s->on_error = on_error;

    trace_commit_start(bs, base, top, s);
    job_start(&s->common.job);
    return;

fail:
    if (s->chain_frozen) {
        bdrv_unfreeze_backing_chain(commit_top_bs, base);
    }
    if (s->base) {
        blk_unref(s->base);
    }
    if (s->top) {
        blk_unref(s->top);
    }
    job_early_fail(&s->common.job);
    /* commit_top_bs has to be replaced after deleting the block job,
     * otherwise this would fail because of lack of permissions. */
    if (commit_top_bs) {
        bdrv_replace_node(commit_top_bs, top, &error_abort);
    }
}
void commit_start(BlockDriverState *bs, BlockDriverState *base,
                  BlockDriverState *top, int64_t speed,
                  BlockErrorAction on_error, BlockDriverCompletionFunc *cb,
                  void *opaque, Error **errp)
{
    CommitBlockJob *s;
    BlockReopenQueue *reopen_queue = NULL;
    int orig_overlay_flags;
    int orig_base_flags;
    BlockDriverState *overlay_bs;
    Error *local_err = NULL;

    if ((on_error == BLOCK_ERR_STOP_ANY ||
         on_error == BLOCK_ERR_STOP_ENOSPC) &&
        !bdrv_iostatus_is_enabled(bs)) {
        error_set(errp, QERR_INVALID_PARAMETER_COMBINATION);
        return;
    }

    /* Once we support top == active layer, remove this check */
    if (top == bs) {
        error_set(errp, QERR_TOP_IS_ACTIVE);
        return;
    }

    if (top == base) {
        error_set(errp, QERR_TOP_AND_BASE_IDENTICAL);
        return;
    }

    overlay_bs = bdrv_find_overlay(bs, top);

    if (overlay_bs == NULL) {
        error_set(errp, QERR_TOP_NOT_FOUND, top->filename);
        return;
    }

    orig_base_flags    = bdrv_get_flags(base);
    orig_overlay_flags = bdrv_get_flags(overlay_bs);

    /* convert base & overlay_bs to r/w, if necessary */
    if (!(orig_base_flags & BDRV_O_RDWR)) {
        reopen_queue = bdrv_reopen_queue(reopen_queue, base,
                                         orig_base_flags | BDRV_O_RDWR);
    }
    if (!(orig_overlay_flags & BDRV_O_RDWR)) {
        reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs,
                                         orig_overlay_flags | BDRV_O_RDWR);
    }
    if (reopen_queue) {
        bdrv_reopen_multiple(reopen_queue, &local_err);
        if (local_err != NULL) {
            error_propagate(errp, local_err);
            return;
        }
    }


    s = block_job_create(&commit_job_type, bs, speed, cb, opaque);
    if (!s) {
        error_set(errp, QERR_DEVICE_IN_USE, bs->device_name);
        return;
    }

    s->base   = base;
    s->top    = top;
    s->active = bs;

    s->base_flags          = orig_base_flags;
    s->orig_overlay_flags  = orig_overlay_flags;

    s->on_error = on_error;
    s->common.co = qemu_coroutine_create(commit_run);

    trace_commit_start(bs, base, top, s, s->common.co, opaque);
    qemu_coroutine_enter(s->common.co, s);
}
Exemplo n.º 11
0
void commit_start(BlockDriverState *bs, BlockDriverState *base,
                  BlockDriverState *top, int64_t speed,
                  BlockdevOnError on_error, BlockDriverCompletionFunc *cb,
                  void *opaque, Error **errp)
{
    CommitBlockJob *s;
    BlockReopenQueue *reopen_queue = NULL;
    int orig_overlay_flags;
    int orig_base_flags;
    BlockDriverState *overlay_bs;
    Error *local_err = NULL;

    if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
         on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
        !bdrv_iostatus_is_enabled(bs)) {
        error_set(errp, QERR_INVALID_PARAMETER_COMBINATION);
        return;
    }

    /* Once we support top == active layer, remove this check */
    if (top == bs) {
        error_setg(errp,
                   "Top image as the active layer is currently unsupported");
        return;
    }

    if (top == base) {
        error_setg(errp, "Invalid files for merge: top and base are the same");
        return;
    }

    overlay_bs = bdrv_find_overlay(bs, top);

    if (overlay_bs == NULL) {
        error_setg(errp, "Could not find overlay image for %s:", top->filename);
        return;
    }

    orig_base_flags    = bdrv_get_flags(base);
    orig_overlay_flags = bdrv_get_flags(overlay_bs);

    /* convert base & overlay_bs to r/w, if necessary */
    if (!(orig_base_flags & BDRV_O_RDWR)) {
        reopen_queue = bdrv_reopen_queue(reopen_queue, base,
                                         orig_base_flags | BDRV_O_RDWR);
    }
    if (!(orig_overlay_flags & BDRV_O_RDWR)) {
        reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs,
                                         orig_overlay_flags | BDRV_O_RDWR);
    }
    if (reopen_queue) {
        bdrv_reopen_multiple(reopen_queue, &local_err);
        if (local_err != NULL) {
            error_propagate(errp, local_err);
            return;
        }
    }


    s = block_job_create(&commit_job_type, bs, speed, cb, opaque, errp);
    if (!s) {
        return;
    }

    s->base   = base;
    s->top    = top;
    s->active = bs;

    s->base_flags          = orig_base_flags;
    s->orig_overlay_flags  = orig_overlay_flags;

    s->on_error = on_error;
    s->common.co = qemu_coroutine_create(commit_run);

    trace_commit_start(bs, base, top, s, s->common.co, opaque);
    qemu_coroutine_enter(s->common.co, s);
}