static inline void mirror_wait_for_io(MirrorBlockJob *s) { assert(!s->waiting_for_io); s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; }
static void test_nesting(void) { Coroutine *root; NestData nd = { .n_enter = 0, .n_return = 0, .max = 128, }; root = qemu_coroutine_create(nest); qemu_coroutine_enter(root, &nd); /* Must enter and return from max nesting level */ g_assert_cmpint(nd.n_enter, ==, nd.max); g_assert_cmpint(nd.n_return, ==, nd.max); } /* * Check that yield/enter transfer control correctly */ static void coroutine_fn yield_5_times(void *opaque) { bool *done = opaque; int i; for (i = 0; i < 5; i++) { qemu_coroutine_yield(); } *done = true; }
ssize_t coroutine_fn qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, size_t offset, size_t bytes, bool do_send) { size_t done = 0; ssize_t ret; while (done < bytes) { ret = iov_send_recv(sockfd, iov, iov_cnt, offset + done, bytes - done, do_send); if (ret > 0) { done += ret; } else if (ret < 0) { if (errno == EAGAIN) { qemu_coroutine_yield(); } else if (done == 0) { return -1; } else { break; } } else if (ret == 0 && !do_send) { /* write (send) should never return 0. * read (recv) returns 0 for end-of-file (-data). * In both cases there's little point retrying, * but we do for write anyway, just in case */ break; } } return done; }
static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; size_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = size; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); if (write) { ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } else { ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, &gluster_finish_aiocb, acb); } if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; }
/* A non-blocking call returned EAGAIN, so yield, ensuring the * handlers are set up so that we'll be rescheduled when there is an * interesting event on the socket. */ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) { int r; IOHandler *rd_handler = NULL, *wr_handler = NULL; Coroutine *co = qemu_coroutine_self(); r = libssh2_session_block_directions(s->session); if (r & LIBSSH2_SESSION_BLOCK_INBOUND) { rd_handler = restart_coroutine; } if (r & LIBSSH2_SESSION_BLOCK_OUTBOUND) { wr_handler = restart_coroutine; } DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock, rd_handler, wr_handler); aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false, rd_handler, wr_handler, NULL, co); qemu_coroutine_yield(); DPRINTF("s->sock=%d - back", s->sock); aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false, NULL, NULL, NULL, NULL); }
static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; size_t size = nb_sectors * BDRV_SECTOR_SIZE; off_t offset = sector_num * BDRV_SECTOR_SIZE; acb->size = 0; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; }
void coroutine_fn qemu_co_queue_wait(CoQueue *queue) { Coroutine *self = qemu_coroutine_self(); QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); qemu_coroutine_yield(); assert(qemu_in_coroutine()); }
static void nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, NBDReply *reply, QEMUIOVector *qiov) { int ret; /* Wait until we're woken up by nbd_read_reply_entry. */ qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle || !s->ioc) { reply->error = EIO; } else { if (qiov && reply->error == 0) { ret = nbd_rwv(s->ioc, qiov->iov, qiov->niov, request->len, true, NULL); if (ret != request->len) { reply->error = EIO; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } }
static void nbd_co_receive_reply(NbdClientSession *s, struct nbd_request *request, struct nbd_reply *reply, QEMUIOVector *qiov, int offset) { int ret; /* Wait until we're woken up by the read handler. TODO: perhaps * peek at the next reply and avoid yielding if it's ours? */ qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle) { reply->error = EIO; } else { if (qiov && reply->error == 0) { ret = qemu_co_recvv(s->sock, qiov->iov, qiov->niov, offset, request->len); if (ret != request->len) { reply->error = EIO; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } }
static void mirror_drain(MirrorBlockJob *s) { while (s->in_flight > 0) { s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; } }
ssize_t nbd_wr_syncv(QIOChannel *ioc, struct iovec *iov, size_t niov, size_t length, bool do_read) { ssize_t done = 0; Error *local_err = NULL; struct iovec *local_iov = g_new(struct iovec, niov); struct iovec *local_iov_head = local_iov; unsigned int nlocal_iov = niov; nlocal_iov = iov_copy(local_iov, nlocal_iov, iov, niov, 0, length); while (nlocal_iov > 0) { ssize_t len; if (do_read) { len = qio_channel_readv(ioc, local_iov, nlocal_iov, &local_err); } else { len = qio_channel_writev(ioc, local_iov, nlocal_iov, &local_err); } if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { /* XXX figure out if we can create a variant on * qio_channel_yield() that works with AIO contexts * and consider using that in this branch */ qemu_coroutine_yield(); } else if (done) { /* XXX this is needed by nbd_reply_ready. */ qio_channel_wait(ioc, do_read ? G_IO_IN : G_IO_OUT); } else { return -EAGAIN; } continue; } if (len < 0) { TRACE("I/O error: %s", error_get_pretty(local_err)); error_free(local_err); /* XXX handle Error objects */ done = -EIO; goto cleanup; } if (do_read && len == 0) { break; } iov_discard_front(&local_iov, &nlocal_iov, len); done += len; } cleanup: g_free(local_iov_head); return done; }
void coroutine_fn yield_until_fd_readable(int fd) { FDYieldUntilData data; assert(qemu_in_coroutine()); data.co = qemu_coroutine_self(); data.fd = fd; qemu_set_fd_handler(fd, fd_coroutine_enter, NULL, &data); qemu_coroutine_yield(); }
static void coroutine_fn mutex_fn(void *opaque) { CoMutex *m = opaque; qemu_co_mutex_lock(m); assert(!locked); locked = true; qemu_coroutine_yield(); locked = false; qemu_co_mutex_unlock(m); done++; }
static void coroutine_fn lockable_fn(void *opaque) { QemuLockable *x = opaque; qemu_lockable_lock(x); assert(!locked); locked = true; qemu_coroutine_yield(); locked = false; qemu_lockable_unlock(x); done++; }
void async_wait_fd(int fd, int flags) { action_t *action = pthread_getspecific(my_thread_state_key); action->fd = fd; action->ready = 0; action->flags = flags; while (!action->ready) { qemu_coroutine_yield(); } }
static void coroutine_fn verify_entered_step_2(void *opaque) { Coroutine *caller = (Coroutine *)opaque; g_assert(qemu_coroutine_entered(caller)); g_assert(qemu_coroutine_entered(qemu_coroutine_self())); qemu_coroutine_yield(); /* Once more to check it still works after yielding */ g_assert(qemu_coroutine_entered(caller)); g_assert(qemu_coroutine_entered(qemu_coroutine_self())); }
void block_job_yield(BlockJob *job) { assert(job->busy); /* Check cancellation *before* setting busy = false, too! */ if (block_job_is_cancelled(job)) { return; } job->busy = false; qemu_coroutine_yield(); job->busy = true; }
void coroutine_fn qio_channel_yield(QIOChannel *ioc, GIOCondition condition) { QIOChannelYieldData data; assert(qemu_in_coroutine()); data.ioc = ioc; data.co = qemu_coroutine_self(); qio_channel_add_watch(ioc, condition, qio_channel_yield_enter, &data, NULL); qemu_coroutine_yield(); }
void coroutine_fn qio_channel_yield(QIOChannel *ioc, GIOCondition condition) { assert(qemu_in_coroutine()); if (condition == G_IO_IN) { assert(!ioc->read_coroutine); ioc->read_coroutine = qemu_coroutine_self(); } else if (condition == G_IO_OUT) { assert(!ioc->write_coroutine); ioc->write_coroutine = qemu_coroutine_self(); } else { abort(); } qio_channel_set_aio_fd_handlers(ioc); qemu_coroutine_yield(); }
static coroutine_fn void nbd_read_reply_entry(void *opaque) { NBDClientSession *s = opaque; uint64_t i; int ret = 0; Error *local_err = NULL; while (!s->quit) { assert(s->reply.handle == 0); ret = nbd_receive_reply(s->ioc, &s->reply, &local_err); if (ret < 0) { error_report_err(local_err); } if (ret <= 0) { break; } /* There's no need for a mutex on the receive side, because the * handler acts as a synchronization point and ensures that only * one coroutine is called until the reply finishes. */ i = HANDLE_TO_INDEX(s, s->reply.handle); if (i >= MAX_NBD_REQUESTS || !s->requests[i].coroutine || !s->requests[i].receiving) { break; } /* We're woken up again by the request itself. Note that there * is no race between yielding and reentering read_reply_co. This * is because: * * - if the request runs on the same AioContext, it is only * entered after we yield * * - if the request runs on a different AioContext, reentering * read_reply_co happens through a bottom half, which can only * run after we yield. */ aio_co_wake(s->requests[i].coroutine); qemu_coroutine_yield(); } s->quit = true; nbd_recv_coroutines_wake_all(s); s->read_reply_co = NULL; }
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) { assert(job->busy); /* Check cancellation *before* setting busy = false, too! */ if (block_job_is_cancelled(job)) { return; } job->busy = false; if (block_job_is_paused(job)) { qemu_coroutine_yield(); } else { co_aio_sleep_ns(bdrv_get_aio_context(job->bs), type, ns); } job->busy = true; }
static void nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, NBDReply *reply, QEMUIOVector *qiov) { int i = HANDLE_TO_INDEX(s, request->handle); /* Wait until we're woken up by nbd_read_reply_entry. */ s->requests[i].receiving = true; qemu_coroutine_yield(); s->requests[i].receiving = false; *reply = s->reply; if (reply->handle != request->handle || !s->ioc || s->quit) { reply->error = EIO; } else { if (qiov && reply->error == 0) { assert(request->len == iov_size(qiov->iov, qiov->niov)); if (qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, NULL) < 0) { reply->error = EIO; s->quit = true; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } s->requests[i].coroutine = NULL; /* Kick the read_reply_co to get the next reply. */ if (s->read_reply_co) { aio_co_wake(s->read_reply_co); } qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; qemu_co_queue_next(&s->free_sema); qemu_co_mutex_unlock(&s->send_mutex); }
static void perf_nesting(void) { unsigned int i, maxcycles, maxnesting; double duration; maxcycles = 10000; maxnesting = 1000; Coroutine *root; g_test_timer_start(); for (i = 0; i < maxcycles; i++) { NestData nd = { .n_enter = 0, .n_return = 0, .max = maxnesting, }; root = qemu_coroutine_create(nest); qemu_coroutine_enter(root, &nd); } duration = g_test_timer_elapsed(); g_test_message("Nesting %u iterations of %u depth each: %f s\n", maxcycles, maxnesting, duration); } /* * Yield benchmark */ static void coroutine_fn yield_loop(void *opaque) { unsigned int *counter = opaque; while ((*counter) > 0) { (*counter)--; qemu_coroutine_yield(); } }
static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) { int ret; GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); BDRVGlusterState *s = bs->opaque; acb->size = 0; acb->ret = 0; acb->coroutine = qemu_coroutine_self(); ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); if (ret < 0) { ret = -errno; goto out; } qemu_coroutine_yield(); ret = acb->ret; out: g_slice_free(GlusterAIOCB, acb); return ret; }
static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request, struct nbd_reply *reply, struct iovec *iov, int offset) { int ret; /* Wait until we're woken up by the read handler. TODO: perhaps * peek at the next reply and avoid yielding if it's ours? */ qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle) { reply->error = EIO; } else { if (iov && reply->error == 0) { ret = qemu_co_recvv(s->sock, iov, request->len, offset); if (ret != request->len) { reply->error = EIO; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } }
static void mirror_drain(MirrorBlockJob *s) { while (s->in_flight > 0) { qemu_coroutine_yield(); } }
static void coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->common.bs; int nb_sectors, sectors_per_chunk, nb_chunks; int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; MirrorOp *op; s->sector_num = hbitmap_iter_next(&s->hbi); if (s->sector_num < 0) { bdrv_dirty_iter_init(source, &s->hbi); s->sector_num = hbitmap_iter_next(&s->hbi); trace_mirror_restart_iter(s, bdrv_get_dirty_count(source)); assert(s->sector_num >= 0); } hbitmap_next_sector = s->sector_num; sector_num = s->sector_num; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; end = s->common.len >> BDRV_SECTOR_BITS; /* Extend the QEMUIOVector to include all adjacent blocks that will * be copied in this operation. * * We have to do this if we have no backing file yet in the destination, * and the cluster size is very large. Then we need to do COW ourselves. * The first time a cluster is copied, copy it entirely. Note that, * because both the granularity and the cluster size are powers of two, * the number of sectors to copy cannot exceed one cluster. * * We also want to extend the QEMUIOVector to include more adjacent * dirty blocks if possible, to limit the number of I/O operations and * run efficiently even with a small granularity. */ nb_chunks = 0; nb_sectors = 0; next_sector = sector_num; next_chunk = sector_num / sectors_per_chunk; /* Wait for I/O to this cluster (from a previous iteration) to be done. */ while (test_bit(next_chunk, s->in_flight_bitmap)) { trace_mirror_yield_in_flight(s, sector_num, s->in_flight); qemu_coroutine_yield(); } do { int added_sectors, added_chunks; if (!bdrv_get_dirty(source, next_sector) || test_bit(next_chunk, s->in_flight_bitmap)) { assert(nb_sectors > 0); break; } added_sectors = sectors_per_chunk; if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { bdrv_round_to_clusters(s->target, next_sector, added_sectors, &next_sector, &added_sectors); /* On the first iteration, the rounding may make us copy * sectors before the first dirty one. */ if (next_sector < sector_num) { assert(nb_sectors == 0); sector_num = next_sector; next_chunk = next_sector / sectors_per_chunk; } } added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; /* When doing COW, it may happen that there is not enough space for * a full cluster. Wait if that is the case. */ while (nb_chunks == 0 && s->buf_free_count < added_chunks) { trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); qemu_coroutine_yield(); } if (s->buf_free_count < nb_chunks + added_chunks) { trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); break; } /* We have enough free space to copy these sectors. */ bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); nb_sectors += added_sectors; nb_chunks += added_chunks; next_sector += added_sectors; next_chunk += added_chunks; } while (next_sector < end); /* Allocate a MirrorOp that is used as an AIO callback. */ op = g_slice_new(MirrorOp); op->s = s; op->sector_num = sector_num; op->nb_sectors = nb_sectors; /* Now make a QEMUIOVector taking enough granularity-sized chunks * from s->buf_free. */ qemu_iovec_init(&op->qiov, nb_chunks); next_sector = sector_num; while (nb_chunks-- > 0) { MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); s->buf_free_count--; qemu_iovec_add(&op->qiov, buf, s->granularity); /* Advance the HBitmapIter in parallel, so that we do not examine * the same sector twice. */ if (next_sector > hbitmap_next_sector && bdrv_get_dirty(source, next_sector)) { hbitmap_next_sector = hbitmap_iter_next(&s->hbi); } next_sector += sectors_per_chunk; } bdrv_reset_dirty(source, sector_num, nb_sectors); /* Copy the dirty cluster. */ s->in_flight++; trace_mirror_one_iteration(s, sector_num, nb_sectors); bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, mirror_read_complete, op); }
/* A non-blocking call returned EAGAIN, so yield, ensuring the * handlers are set up so that we'll be rescheduled when there is an * interesting event on the socket. */ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) { set_fd_handler(s, bs); qemu_coroutine_yield(); clear_fd_handler(s, bs); }
static void coroutine_fn mirror_run(void *opaque) { MirrorBlockJob *s = opaque; BlockDriverState *bs = s->common.bs; int64_t sector_num, end, sectors_per_chunk, length; uint64_t last_pause_ns; BlockDriverInfo bdi; char backing_filename[1024]; int ret = 0; int n; if (block_job_is_cancelled(&s->common)) { goto immediate_exit; } s->common.len = bdrv_getlength(bs); if (s->common.len <= 0) { block_job_completed(&s->common, s->common.len); return; } length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity; s->in_flight_bitmap = bitmap_new(length); /* If we have no backing file yet in the destination, we cannot let * the destination do COW. Instead, we copy sectors around the * dirty data if needed. We need a bitmap to do that. */ bdrv_get_backing_filename(s->target, backing_filename, sizeof(backing_filename)); if (backing_filename[0] && !s->target->backing_hd) { bdrv_get_info(s->target, &bdi); if (s->granularity < bdi.cluster_size) { s->buf_size = MAX(s->buf_size, bdi.cluster_size); s->cow_bitmap = bitmap_new(length); } } end = s->common.len >> BDRV_SECTOR_BITS; s->buf = qemu_blockalign(bs, s->buf_size); sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; mirror_free_init(s); if (s->mode != MIRROR_SYNC_MODE_NONE) { /* First part, loop on the sectors and initialize the dirty bitmap. */ BlockDriverState *base; base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd; for (sector_num = 0; sector_num < end; ) { int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; ret = bdrv_is_allocated_above(bs, base, sector_num, next - sector_num, &n); if (ret < 0) { goto immediate_exit; } assert(n > 0); if (ret == 1) { bdrv_set_dirty(bs, sector_num, n); sector_num = next; } else { sector_num += n; } } } bdrv_dirty_iter_init(bs, &s->hbi); last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); for (;;) { uint64_t delay_ns; int64_t cnt; bool should_complete; if (s->ret < 0) { ret = s->ret; goto immediate_exit; } cnt = bdrv_get_dirty_count(bs); /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that qemu_aio_flush() returns. * We do so every SLICE_TIME nanoseconds, or when there is an error, * or when the source is clean, whichever comes first. */ if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); qemu_coroutine_yield(); continue; } else if (cnt != 0) { mirror_iteration(s); continue; } } should_complete = false; if (s->in_flight == 0 && cnt == 0) { trace_mirror_before_flush(s); ret = bdrv_flush(s->target); if (ret < 0) { if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) { goto immediate_exit; } } else { /* We're out of the streaming phase. From now on, if the job * is cancelled we will actually complete all pending I/O and * report completion. This way, block-job-cancel will leave * the target in a consistent state. */ s->common.offset = end * BDRV_SECTOR_SIZE; if (!s->synced) { block_job_ready(&s->common); s->synced = true; } should_complete = s->should_complete || block_job_is_cancelled(&s->common); cnt = bdrv_get_dirty_count(bs); } } if (cnt == 0 && should_complete) { /* The dirty bitmap is not updated while operations are pending. * If we're about to exit, wait for pending operations before * calling bdrv_get_dirty_count(bs), or we may exit while the * source has dirty data to copy! * * Note that I/O can be submitted by the guest while * mirror_populate runs. */ trace_mirror_before_drain(s, cnt); bdrv_drain_all(); cnt = bdrv_get_dirty_count(bs); } ret = 0; trace_mirror_before_sleep(s, cnt, s->synced); if (!s->synced) { /* Publish progress */ s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk); } else { delay_ns = 0; } block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); if (block_job_is_cancelled(&s->common)) { break; } } else if (!should_complete) { delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); } else if (cnt == 0) { /* The two disks are in sync. Exit and report successful * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); s->common.cancelled = false; break; } last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } immediate_exit: if (s->in_flight > 0) { /* We get here only if something went wrong. Either the job failed, * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); mirror_drain(s); } assert(s->in_flight == 0); qemu_vfree(s->buf); g_free(s->cow_bitmap); g_free(s->in_flight_bitmap); bdrv_set_dirty_tracking(bs, 0); bdrv_iostatus_disable(s->target); if (s->should_complete && ret == 0) { if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); } bdrv_swap(s->target, s->common.bs); } bdrv_close(s->target); bdrv_unref(s->target); block_job_completed(&s->common, ret); }
static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->common.bs; int nb_sectors, sectors_per_chunk, nb_chunks; int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; uint64_t delay_ns = 0; MirrorOp *op; int pnum; int64_t ret; s->sector_num = hbitmap_iter_next(&s->hbi); if (s->sector_num < 0) { bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); s->sector_num = hbitmap_iter_next(&s->hbi); trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); assert(s->sector_num >= 0); } hbitmap_next_sector = s->sector_num; sector_num = s->sector_num; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; end = s->bdev_length / BDRV_SECTOR_SIZE; /* Extend the QEMUIOVector to include all adjacent blocks that will * be copied in this operation. * * We have to do this if we have no backing file yet in the destination, * and the cluster size is very large. Then we need to do COW ourselves. * The first time a cluster is copied, copy it entirely. Note that, * because both the granularity and the cluster size are powers of two, * the number of sectors to copy cannot exceed one cluster. * * We also want to extend the QEMUIOVector to include more adjacent * dirty blocks if possible, to limit the number of I/O operations and * run efficiently even with a small granularity. */ nb_chunks = 0; nb_sectors = 0; next_sector = sector_num; next_chunk = sector_num / sectors_per_chunk; /* Wait for I/O to this cluster (from a previous iteration) to be done. */ while (test_bit(next_chunk, s->in_flight_bitmap)) { trace_mirror_yield_in_flight(s, sector_num, s->in_flight); s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; } do { int added_sectors, added_chunks; if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || test_bit(next_chunk, s->in_flight_bitmap)) { assert(nb_sectors > 0); break; } added_sectors = sectors_per_chunk; if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { bdrv_round_to_clusters(s->target, next_sector, added_sectors, &next_sector, &added_sectors); /* On the first iteration, the rounding may make us copy * sectors before the first dirty one. */ if (next_sector < sector_num) { assert(nb_sectors == 0); sector_num = next_sector; next_chunk = next_sector / sectors_per_chunk; } } added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; /* When doing COW, it may happen that there is not enough space for * a full cluster. Wait if that is the case. */ while (nb_chunks == 0 && s->buf_free_count < added_chunks) { trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); s->waiting_for_io = true; qemu_coroutine_yield(); s->waiting_for_io = false; } if (s->buf_free_count < nb_chunks + added_chunks) { trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); break; } if (IOV_MAX < nb_chunks + added_chunks) { trace_mirror_break_iov_max(s, nb_chunks, added_chunks); break; } /* We have enough free space to copy these sectors. */ bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); nb_sectors += added_sectors; nb_chunks += added_chunks; next_sector += added_sectors; next_chunk += added_chunks; if (!s->synced && s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); } } while (delay_ns == 0 && next_sector < end); /* Allocate a MirrorOp that is used as an AIO callback. */ op = g_new(MirrorOp, 1); op->s = s; op->sector_num = sector_num; op->nb_sectors = nb_sectors; /* Now make a QEMUIOVector taking enough granularity-sized chunks * from s->buf_free. */ qemu_iovec_init(&op->qiov, nb_chunks); next_sector = sector_num; while (nb_chunks-- > 0) { MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); s->buf_free_count--; qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); /* Advance the HBitmapIter in parallel, so that we do not examine * the same sector twice. */ if (next_sector > hbitmap_next_sector && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { hbitmap_next_sector = hbitmap_iter_next(&s->hbi); } next_sector += sectors_per_chunk; } bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); /* Copy the dirty cluster. */ s->in_flight++; s->sectors_in_flight += nb_sectors; trace_mirror_one_iteration(s, sector_num, nb_sectors); ret = bdrv_get_block_status_above(source, NULL, sector_num, nb_sectors, &pnum); if (ret < 0 || pnum < nb_sectors || (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, mirror_read_complete, op); } else if (ret & BDRV_BLOCK_ZERO) { bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, mirror_write_complete, op); } else { assert(!(ret & BDRV_BLOCK_DATA)); bdrv_aio_discard(s->target, sector_num, op->nb_sectors, mirror_write_complete, op); } return delay_ns; }