static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->common.bs; int nb_sectors, sectors_per_chunk, nb_chunks; int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; uint64_t delay_ns = 0; MirrorOp *op; int pnum; int64_t ret; s->sector_num = hbitmap_iter_next(&s->hbi); if (s->sector_num < 0) { bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); s->sector_num = hbitmap_iter_next(&s->hbi); trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); assert(s->sector_num >= 0); } hbitmap_next_sector = s->sector_num; sector_num = s->sector_num; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; end = s->bdev_length / BDRV_SECTOR_SIZE; /* Extend the QEMUIOVector to include all adjacent blocks that will * be copied in this operation. * * We have to do this if we have no backing file yet in the destination, * and the cluster size is very large. Then we need to do COW ourselves. * The first time a cluster is copied, copy it entirely. Note that, * because both the granularity and the cluster size are powers of two, * the number of sectors to copy cannot exceed one cluster. * * We also want to extend the QEMUIOVector to include more adjacent * dirty blocks if possible, to limit the number of I/O operations and * run efficiently even with a small granularity. */ nb_chunks = 0; nb_sectors = 0; next_sector = sector_num; next_chunk = sector_num / sectors_per_chunk; /* Wait for I/O to this cluster (from a previous iteration) to be done. */ while (test_bit(next_chunk, s->in_flight_bitmap)) { trace_mirror_yield_in_flight(s, sector_num, s->in_flight); s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; } do { int added_sectors, added_chunks; if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || test_bit(next_chunk, s->in_flight_bitmap)) { assert(nb_sectors > 0); break; } added_sectors = sectors_per_chunk; if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { bdrv_round_to_clusters(s->target, next_sector, added_sectors, &next_sector, &added_sectors); /* On the first iteration, the rounding may make us copy * sectors before the first dirty one. */ if (next_sector < sector_num) { assert(nb_sectors == 0); sector_num = next_sector; next_chunk = next_sector / sectors_per_chunk; } } added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; /* When doing COW, it may happen that there is not enough space for * a full cluster. Wait if that is the case. */ while (nb_chunks == 0 && s->buf_free_count < added_chunks) { trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; } if (s->buf_free_count < nb_chunks + added_chunks) { trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); break; } if (IOV_MAX < nb_chunks + added_chunks) { trace_mirror_break_iov_max(s, nb_chunks, added_chunks); break; } /* We have enough free space to copy these sectors. */ bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); nb_sectors += added_sectors; nb_chunks += added_chunks; next_sector += added_sectors; next_chunk += added_chunks; if (!s->synced && s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); } } while (delay_ns == 0 && next_sector < end); /* Allocate a MirrorOp that is used as an AIO callback. */ op = g_new(MirrorOp, 1); op->s = s; op->sector_num = sector_num; op->nb_sectors = nb_sectors; /* Now make a QEMUIOVector taking enough granularity-sized chunks * from s->buf_free. */ qemu_iovec_init(&op->qiov, nb_chunks); next_sector = sector_num; while (nb_chunks-- > 0) { MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); s->buf_free_count--; qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); /* Advance the HBitmapIter in parallel, so that we do not examine * the same sector twice. */ if (next_sector > hbitmap_next_sector && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { hbitmap_next_sector = hbitmap_iter_next(&s->hbi); } next_sector += sectors_per_chunk; } bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); /* Copy the dirty cluster. */ s->in_flight++; s->sectors_in_flight += nb_sectors; trace_mirror_one_iteration(s, sector_num, nb_sectors); ret = bdrv_get_block_status_above(source, NULL, sector_num, nb_sectors, &pnum); if (ret < 0 || pnum < nb_sectors || (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, mirror_read_complete, op); } else if (ret & BDRV_BLOCK_ZERO) { bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, mirror_write_complete, op); } else { assert(!(ret & BDRV_BLOCK_DATA)); bdrv_aio_discard(s->target, sector_num, op->nb_sectors, mirror_write_complete, op); } return delay_ns; }
static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->source; int64_t offset, first_chunk; uint64_t delay_ns = 0; /* At least the first dirty chunk is mirrored in one iteration. */ int nb_chunks = 1; bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); bdrv_dirty_bitmap_lock(s->dirty_bitmap); offset = bdrv_dirty_iter_next(s->dbi); if (offset < 0) { bdrv_set_dirty_iter(s->dbi, 0); offset = bdrv_dirty_iter_next(s->dbi); trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); assert(offset >= 0); } bdrv_dirty_bitmap_unlock(s->dirty_bitmap); first_chunk = offset / s->granularity; while (test_bit(first_chunk, s->in_flight_bitmap)) { trace_mirror_yield_in_flight(s, offset, s->in_flight); mirror_wait_for_io(s); } block_job_pause_point(&s->common); /* Find the number of consective dirty chunks following the first dirty * one, and wait for in flight requests in them. */ bdrv_dirty_bitmap_lock(s->dirty_bitmap); while (nb_chunks * s->granularity < s->buf_size) { int64_t next_dirty; int64_t next_offset = offset + nb_chunks * s->granularity; int64_t next_chunk = next_offset / s->granularity; if (next_offset >= s->bdev_length || !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) { break; } if (test_bit(next_chunk, s->in_flight_bitmap)) { break; } next_dirty = bdrv_dirty_iter_next(s->dbi); if (next_dirty > next_offset || next_dirty < 0) { /* The bitmap iterator's cache is stale, refresh it */ bdrv_set_dirty_iter(s->dbi, next_offset); next_dirty = bdrv_dirty_iter_next(s->dbi); } assert(next_dirty == next_offset); nb_chunks++; } /* Clear dirty bits before querying the block status, because * calling bdrv_block_status_above could yield - if some blocks are * marked dirty in this window, we need to know. */ bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, nb_chunks * s->granularity); bdrv_dirty_bitmap_unlock(s->dirty_bitmap); bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); while (nb_chunks > 0 && offset < s->bdev_length) { int ret; int64_t io_bytes; int64_t io_bytes_acct; enum MirrorMethod { MIRROR_METHOD_COPY, MIRROR_METHOD_ZERO, MIRROR_METHOD_DISCARD } mirror_method = MIRROR_METHOD_COPY; assert(!(offset % s->granularity)); ret = bdrv_block_status_above(source, NULL, offset, nb_chunks * s->granularity, &io_bytes, NULL, NULL); if (ret < 0) { io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); } else if (ret & BDRV_BLOCK_DATA) { io_bytes = MIN(io_bytes, max_io_bytes); } io_bytes -= io_bytes % s->granularity; if (io_bytes < s->granularity) { io_bytes = s->granularity; } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { int64_t target_offset; int64_t target_bytes; bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, &target_offset, &target_bytes); if (target_offset == offset && target_bytes == io_bytes) { mirror_method = ret & BDRV_BLOCK_ZERO ? MIRROR_METHOD_ZERO : MIRROR_METHOD_DISCARD; } } while (s->in_flight >= MAX_IN_FLIGHT) { trace_mirror_yield_in_flight(s, offset, s->in_flight); mirror_wait_for_io(s); } if (s->ret < 0) { return 0; } io_bytes = mirror_clip_bytes(s, offset, io_bytes); switch (mirror_method) { case MIRROR_METHOD_COPY: io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes); break; case MIRROR_METHOD_ZERO: case MIRROR_METHOD_DISCARD: mirror_do_zero_or_discard(s, offset, io_bytes, mirror_method == MIRROR_METHOD_DISCARD); if (write_zeroes_ok) { io_bytes_acct = 0; } else { io_bytes_acct = io_bytes; } break; default: abort(); } assert(io_bytes); offset += io_bytes; nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct); } } return delay_ns; }
static void coroutine_fn mirror_run(void *opaque) { MirrorBlockJob *s = opaque; BlockDriverState *bs = s->common.bs; int64_t sector_num, end, sectors_per_chunk, length; uint64_t last_pause_ns; BlockDriverInfo bdi; char backing_filename[1024]; int ret = 0; int n; if (block_job_is_cancelled(&s->common)) { goto immediate_exit; } s->common.len = bdrv_getlength(bs); if (s->common.len <= 0) { block_job_completed(&s->common, s->common.len); return; } length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity; s->in_flight_bitmap = bitmap_new(length); /* If we have no backing file yet in the destination, we cannot let * the destination do COW. Instead, we copy sectors around the * dirty data if needed. We need a bitmap to do that. */ bdrv_get_backing_filename(s->target, backing_filename, sizeof(backing_filename)); if (backing_filename[0] && !s->target->backing_hd) { bdrv_get_info(s->target, &bdi); if (s->granularity < bdi.cluster_size) { s->buf_size = MAX(s->buf_size, bdi.cluster_size); s->cow_bitmap = bitmap_new(length); } } end = s->common.len >> BDRV_SECTOR_BITS; s->buf = qemu_blockalign(bs, s->buf_size); sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; mirror_free_init(s); if (s->mode != MIRROR_SYNC_MODE_NONE) { /* First part, loop on the sectors and initialize the dirty bitmap. */ BlockDriverState *base; base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd; for (sector_num = 0; sector_num < end; ) { int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; ret = bdrv_is_allocated_above(bs, base, sector_num, next - sector_num, &n); if (ret < 0) { goto immediate_exit; } assert(n > 0); if (ret == 1) { bdrv_set_dirty(bs, sector_num, n); sector_num = next; } else { sector_num += n; } } } bdrv_dirty_iter_init(bs, &s->hbi); last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); for (;;) { uint64_t delay_ns; int64_t cnt; bool should_complete; if (s->ret < 0) { ret = s->ret; goto immediate_exit; } cnt = bdrv_get_dirty_count(bs); /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that qemu_aio_flush() returns. * We do so every SLICE_TIME nanoseconds, or when there is an error, * or when the source is clean, whichever comes first. */ if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); qemu_coroutine_yield(); continue; } else if (cnt != 0) { mirror_iteration(s); continue; } } should_complete = false; if (s->in_flight == 0 && cnt == 0) { trace_mirror_before_flush(s); ret = bdrv_flush(s->target); if (ret < 0) { if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) { goto immediate_exit; } } else { /* We're out of the streaming phase. From now on, if the job * is cancelled we will actually complete all pending I/O and * report completion. This way, block-job-cancel will leave * the target in a consistent state. */ s->common.offset = end * BDRV_SECTOR_SIZE; if (!s->synced) { block_job_ready(&s->common); s->synced = true; } should_complete = s->should_complete || block_job_is_cancelled(&s->common); cnt = bdrv_get_dirty_count(bs); } } if (cnt == 0 && should_complete) { /* The dirty bitmap is not updated while operations are pending. * If we're about to exit, wait for pending operations before * calling bdrv_get_dirty_count(bs), or we may exit while the * source has dirty data to copy! * * Note that I/O can be submitted by the guest while * mirror_populate runs. */ trace_mirror_before_drain(s, cnt); bdrv_drain_all(); cnt = bdrv_get_dirty_count(bs); } ret = 0; trace_mirror_before_sleep(s, cnt, s->synced); if (!s->synced) { /* Publish progress */ s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk); } else { delay_ns = 0; } block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); if (block_job_is_cancelled(&s->common)) { break; } } else if (!should_complete) { delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); } else if (cnt == 0) { /* The two disks are in sync. Exit and report successful * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); s->common.cancelled = false; break; } last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } immediate_exit: if (s->in_flight > 0) { /* We get here only if something went wrong. Either the job failed, * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); mirror_drain(s); } assert(s->in_flight == 0); qemu_vfree(s->buf); g_free(s->cow_bitmap); g_free(s->in_flight_bitmap); bdrv_set_dirty_tracking(bs, 0); bdrv_iostatus_disable(s->target); if (s->should_complete && ret == 0) { if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); } bdrv_swap(s->target, s->common.bs); } bdrv_close(s->target); bdrv_unref(s->target); block_job_completed(&s->common, ret); }
static void coroutine_fn commit_run(void *opaque) { CommitBlockJob *s = opaque; BlockDriverState *active = s->active; BlockDriverState *top = s->top; BlockDriverState *base = s->base; BlockDriverState *overlay_bs = NULL; int64_t sector_num, end; int ret = 0; int n = 0; void *buf; int bytes_written = 0; int64_t base_len; ret = s->common.len = bdrv_getlength(top); if (s->common.len < 0) { goto exit_restore_reopen; } ret = base_len = bdrv_getlength(base); if (base_len < 0) { goto exit_restore_reopen; } if (base_len < s->common.len) { ret = bdrv_truncate(base, s->common.len); if (ret) { goto exit_restore_reopen; } } overlay_bs = bdrv_find_overlay(active, top); end = s->common.len >> BDRV_SECTOR_BITS; buf = qemu_blockalign(top, COMMIT_BUFFER_SIZE); for (sector_num = 0; sector_num < end; sector_num += n) { uint64_t delay_ms = 0; bool copy; wait: /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that qemu_aio_flush() returns. */ block_job_sleep(&s->common, rt_clock, delay_ms); if (block_job_is_cancelled(&s->common)) { break; } /* Copy if allocated above the base */ ret = bdrv_co_is_allocated_above(top, base, sector_num, COMMIT_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); copy = (ret == 1); trace_commit_one_iteration(s, sector_num, n, ret); if (copy) { if (s->common.speed) { delay_ms = ratelimit_calculate_delay(&s->limit, n); if (delay_ms > 0) { goto wait; } } ret = commit_populate(top, base, sector_num, n, buf); bytes_written += n * BDRV_SECTOR_SIZE; } if (ret < 0) { if (s->on_error == BLOCK_ERR_STOP_ANY || s->on_error == BLOCK_ERR_REPORT || (s->on_error == BLOCK_ERR_STOP_ENOSPC && ret == -ENOSPC)) { goto exit_free_buf; } else { n = 0; continue; } } /* Publish progress */ s->common.offset += n * BDRV_SECTOR_SIZE; } ret = 0; if (!block_job_is_cancelled(&s->common) && sector_num == end) { /* success */ ret = bdrv_drop_intermediate(active, top, base); } exit_free_buf: qemu_vfree(buf); exit_restore_reopen: /* restore base open flags here if appropriate (e.g., change the base back * to r/o). These reopens do not need to be atomic, since we won't abort * even on failure here */ if (s->base_flags != bdrv_get_flags(base)) { bdrv_reopen(base, s->base_flags, NULL); } if (s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) { bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL); } block_job_complete(&s->common, ret); }