/* Disable dataplane thread during live migration since it does not * update the dirty memory bitmap yet. */ static void virtio_blk_migration_state_changed(Notifier *notifier, void *data) { VirtIOBlock *s = container_of(notifier, VirtIOBlock, migration_state_notifier); MigrationState *mig = data; Error *err = NULL; if (migration_in_setup(mig)) { if (!s->dataplane) { return; } virtio_blk_data_plane_destroy(s->dataplane); s->dataplane = NULL; } else if (migration_has_finished(mig) || migration_has_failed(mig)) { if (s->dataplane) { return; } bdrv_drain_all(); /* complete in-flight non-dataplane requests */ virtio_blk_data_plane_create(VIRTIO_DEVICE(s), &s->blk, &s->dataplane, &err); if (err != NULL) { error_report("%s", error_get_pretty(err)); error_free(err); } } }
/** * Delete an internal snapshot by @snapshot_id and @name. * @bs: block device used in the operation * @snapshot_id: unique snapshot ID, or NULL * @name: snapshot name, or NULL * @errp: location to store error * * If both @snapshot_id and @name are specified, delete the first one with * id @snapshot_id and name @name. * If only @snapshot_id is specified, delete the first one with id * @snapshot_id. * If only @name is specified, delete the first one with name @name. * if none is specified, return -EINVAL. * * Returns: 0 on success, -errno on failure. If @bs is not inserted, return * -ENOMEDIUM. If @snapshot_id and @name are both NULL, return -EINVAL. If @bs * does not support internal snapshot deletion, return -ENOTSUP. If @bs does * not support parameter @snapshot_id or @name, or one of them is not correctly * specified, return -EINVAL. If @bs can't find one matching @id and @name, * return -ENOENT. If @errp != NULL, it will always be filled with error * message on failure. */ int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id, const char *name, Error **errp) { BlockDriver *drv = bs->drv; if (!drv) { error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs)); return -ENOMEDIUM; } if (!snapshot_id && !name) { error_setg(errp, "snapshot_id and name are both NULL"); return -EINVAL; } /* drain all pending i/o before deleting snapshot */ bdrv_drain_all(); if (drv->bdrv_snapshot_delete) { return drv->bdrv_snapshot_delete(bs, snapshot_id, name, errp); } if (bs->file) { return bdrv_snapshot_delete(bs->file, snapshot_id, name, errp); } error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, drv->format_name, bdrv_get_device_name(bs), "internal snapshot deletion"); return -ENOTSUP; }
int simple_bus_fdt_init(char *bus_node_path, FDTMachineInfo *fdti, void *unused) { int i; int num_children = qemu_devtree_get_num_children(fdti->fdt, bus_node_path, 1); char **children = qemu_devtree_get_children(fdti->fdt, bus_node_path, 1); int initialRoutinesPending = fdti->routinesPending; DB_PRINT("num child devices: %d\n", num_children); for (i = 0; i < num_children; i++) { struct FDTInitNodeArgs *init_args = g_malloc0(sizeof(*init_args)); init_args->node_path = children[i]; init_args->fdti = fdti; fdti->routinesPending++; qemu_coroutine_enter(qemu_coroutine_create(fdt_init_node), init_args); } if (fdti->routinesPending != initialRoutinesPending) { bdrv_drain_all(); } g_free(children); return 0; }
static void virtio_blk_reset(VirtIODevice *vdev) { /* * This should cancel pending requests, but can't do nicely until there * are per-device request lists. */ bdrv_drain_all(); }
static void do_vm_stop(RunState state) { if (runstate_is_running()) { cpu_disable_ticks(); pause_all_vcpus(); runstate_set(state); vm_state_notify(0, state); bdrv_drain_all(); bdrv_flush_all(); monitor_protocol_event(QEVENT_STOP, NULL); } }
static void virtio_blk_reset(VirtIODevice *vdev) { #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE VirtIOBlock *s = to_virtio_blk(vdev); if (s->dataplane) { virtio_blk_data_plane_stop(s->dataplane); } #endif /* * This should cancel pending requests, but can't do nicely until there * are per-device request lists. */ bdrv_drain_all(); }
static void virtio_blk_reset(VirtIODevice *vdev) { VirtIOBlock *s = VIRTIO_BLK(vdev); #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE if (s->dataplane) { virtio_blk_data_plane_stop(s->dataplane); } #endif /* * This should cancel pending requests, but can't do nicely until there * are per-device request lists. */ bdrv_drain_all(); bdrv_set_enable_write_cache(s->bs, s->original_wce); }
static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val) { PCIXenPlatformState *s = opaque; switch (addr) { case 0: { PCIDevice *pci_dev = PCI_DEVICE(s); /* Unplug devices. Value is a bitmask of which devices to unplug, with bit 0 the IDE devices, bit 1 the network devices, and bit 2 the non-primary-master IDE devices. */ if (val & UNPLUG_ALL_IDE_DISKS) { DPRINTF("unplug disks\n"); bdrv_drain_all(); bdrv_flush_all(); pci_unplug_disks(pci_dev->bus); } if (val & UNPLUG_ALL_NICS) { DPRINTF("unplug nics\n"); pci_unplug_nics(pci_dev->bus); } if (val & UNPLUG_AUX_IDE_DISKS) { DPRINTF("unplug auxiliary disks not supported\n"); } break; } case 2: switch (val) { case 1: DPRINTF("Citrix Windows PV drivers loaded in guest\n"); break; case 0: DPRINTF("Guest claimed to be running PV product 0?\n"); break; default: DPRINTF("Unknown PV product %d loaded in guest\n", val); break; } s->driver_product_version = val; break; } }
int main(int argc, char **argv) { int readonly = 0; int growable = 0; const char *sopt = "hVc:d:rsnmgkt:T:"; const struct option lopt[] = { { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'V' }, { "offset", 1, NULL, 'o' }, { "cmd", 1, NULL, 'c' }, { "read-only", 0, NULL, 'r' }, { "snapshot", 0, NULL, 's' }, { "nocache", 0, NULL, 'n' }, { "misalign", 0, NULL, 'm' }, { "growable", 0, NULL, 'g' }, { "native-aio", 0, NULL, 'k' }, { "discard", 1, NULL, 'd' }, { "cache", 1, NULL, 't' }, { "trace", 1, NULL, 'T' }, { NULL, 0, NULL, 0 } }; int c; int opt_index = 0; int flags = BDRV_O_UNMAP; progname = basename(argv[0]); while ((c = getopt_long(argc, argv, sopt, lopt, &opt_index)) != -1) { switch (c) { case 's': flags |= BDRV_O_SNAPSHOT; break; case 'n': flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'd': if (bdrv_parse_discard_flags(optarg, &flags) < 0) { error_report("Invalid discard option: %s", optarg); exit(1); } break; case 'c': add_user_command(optarg); break; case 'r': readonly = 1; break; case 'm': misalign = 1; break; case 'g': growable = 1; break; case 'k': flags |= BDRV_O_NATIVE_AIO; break; case 't': if (bdrv_parse_cache_flags(optarg, &flags) < 0) { error_report("Invalid cache option: %s", optarg); exit(1); } break; case 'T': if (!trace_backend_init(optarg, NULL)) { exit(1); /* error message will have been printed */ } break; case 'V': printf("%s version %s\n", progname, VERSION); exit(0); case 'h': usage(progname); exit(0); default: usage(progname); exit(1); } } if ((argc - optind) > 1) { usage(progname); exit(1); } qemu_init_main_loop(); bdrv_init(); /* initialize commands */ quit_init(); help_init(); add_command(&open_cmd); add_command(&close_cmd); add_command(&read_cmd); add_command(&readv_cmd); add_command(&write_cmd); add_command(&writev_cmd); add_command(&multiwrite_cmd); add_command(&aio_read_cmd); add_command(&aio_write_cmd); add_command(&aio_flush_cmd); add_command(&flush_cmd); add_command(&truncate_cmd); add_command(&length_cmd); add_command(&info_cmd); add_command(&discard_cmd); add_command(&alloc_cmd); add_command(&map_cmd); add_command(&break_cmd); add_command(&resume_cmd); add_command(&wait_break_cmd); add_command(&abort_cmd); add_args_command(init_args_command); add_check_command(init_check_command); /* open the device */ if (!readonly) { flags |= BDRV_O_RDWR; } if ((argc - optind) == 1) { openfile(argv[optind], flags, growable); } command_loop(); /* * Make sure all outstanding requests complete before the program exits. */ bdrv_drain_all(); if (bs) { bdrv_delete(bs); } return 0; }
static int aio_flush_f(int argc, char **argv) { bdrv_drain_all(); return 0; }
static void coroutine_fn mirror_run(void *opaque) { MirrorBlockJob *s = opaque; BlockDriverState *bs = s->common.bs; int64_t sector_num, end, sectors_per_chunk, length; uint64_t last_pause_ns; BlockDriverInfo bdi; char backing_filename[1024]; int ret = 0; int n; if (block_job_is_cancelled(&s->common)) { goto immediate_exit; } s->common.len = bdrv_getlength(bs); if (s->common.len <= 0) { block_job_completed(&s->common, s->common.len); return; } length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity; s->in_flight_bitmap = bitmap_new(length); /* If we have no backing file yet in the destination, we cannot let * the destination do COW. Instead, we copy sectors around the * dirty data if needed. We need a bitmap to do that. */ bdrv_get_backing_filename(s->target, backing_filename, sizeof(backing_filename)); if (backing_filename[0] && !s->target->backing_hd) { bdrv_get_info(s->target, &bdi); if (s->granularity < bdi.cluster_size) { s->buf_size = MAX(s->buf_size, bdi.cluster_size); s->cow_bitmap = bitmap_new(length); } } end = s->common.len >> BDRV_SECTOR_BITS; s->buf = qemu_blockalign(bs, s->buf_size); sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; mirror_free_init(s); if (s->mode != MIRROR_SYNC_MODE_NONE) { /* First part, loop on the sectors and initialize the dirty bitmap. */ BlockDriverState *base; base = s->mode == MIRROR_SYNC_MODE_FULL ? NULL : bs->backing_hd; for (sector_num = 0; sector_num < end; ) { int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1; ret = bdrv_is_allocated_above(bs, base, sector_num, next - sector_num, &n); if (ret < 0) { goto immediate_exit; } assert(n > 0); if (ret == 1) { bdrv_set_dirty(bs, sector_num, n); sector_num = next; } else { sector_num += n; } } } bdrv_dirty_iter_init(bs, &s->hbi); last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); for (;;) { uint64_t delay_ns; int64_t cnt; bool should_complete; if (s->ret < 0) { ret = s->ret; goto immediate_exit; } cnt = bdrv_get_dirty_count(bs); /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that qemu_aio_flush() returns. * We do so every SLICE_TIME nanoseconds, or when there is an error, * or when the source is clean, whichever comes first. */ if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); qemu_coroutine_yield(); continue; } else if (cnt != 0) { mirror_iteration(s); continue; } } should_complete = false; if (s->in_flight == 0 && cnt == 0) { trace_mirror_before_flush(s); ret = bdrv_flush(s->target); if (ret < 0) { if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) { goto immediate_exit; } } else { /* We're out of the streaming phase. From now on, if the job * is cancelled we will actually complete all pending I/O and * report completion. This way, block-job-cancel will leave * the target in a consistent state. */ s->common.offset = end * BDRV_SECTOR_SIZE; if (!s->synced) { block_job_ready(&s->common); s->synced = true; } should_complete = s->should_complete || block_job_is_cancelled(&s->common); cnt = bdrv_get_dirty_count(bs); } } if (cnt == 0 && should_complete) { /* The dirty bitmap is not updated while operations are pending. * If we're about to exit, wait for pending operations before * calling bdrv_get_dirty_count(bs), or we may exit while the * source has dirty data to copy! * * Note that I/O can be submitted by the guest while * mirror_populate runs. */ trace_mirror_before_drain(s, cnt); bdrv_drain_all(); cnt = bdrv_get_dirty_count(bs); } ret = 0; trace_mirror_before_sleep(s, cnt, s->synced); if (!s->synced) { /* Publish progress */ s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE; if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk); } else { delay_ns = 0; } block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); if (block_job_is_cancelled(&s->common)) { break; } } else if (!should_complete) { delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); } else if (cnt == 0) { /* The two disks are in sync. Exit and report successful * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); s->common.cancelled = false; break; } last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } immediate_exit: if (s->in_flight > 0) { /* We get here only if something went wrong. Either the job failed, * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); mirror_drain(s); } assert(s->in_flight == 0); qemu_vfree(s->buf); g_free(s->cow_bitmap); g_free(s->in_flight_bitmap); bdrv_set_dirty_tracking(bs, 0); bdrv_iostatus_disable(s->target); if (s->should_complete && ret == 0) { if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) { bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL); } bdrv_swap(s->target, s->common.bs); } bdrv_close(s->target); bdrv_unref(s->target); block_job_completed(&s->common, ret); }
int main(int argc, char **argv) { int readonly = 0; int growable = 0; const char *sopt = "hVc:Crsnmgk"; struct option lopt[] = { { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'V' }, { "offset", 1, NULL, 'o' }, { "cmd", 1, NULL, 'c' }, { "create", 0, NULL, 'C' }, { "read-only", 0, NULL, 'r' }, { "snapshot", 0, NULL, 's' }, { "nocache", 0, NULL, 'n' }, { "misalign", 0, NULL, 'm' }, { "growable", 0, NULL, 'g' }, { "native-aio", 0, NULL, 'k' }, { NULL, 0, NULL, 0 } }; int c; int opt_index = 0; int flags = 0; progname = basename(argv[0]); while ((c = getopt_long(argc, argv, sopt, lopt, &opt_index)) != -1) { switch (c) { case 's': flags |= BDRV_O_SNAPSHOT; break; case 'n': flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'c': add_user_command(optarg); break; case 'C': flags |= BDRV_O_CREAT; break; case 'r': readonly = 1; break; case 'm': misalign = 1; break; case 'g': growable = 1; break; case 'k': flags |= BDRV_O_NATIVE_AIO; break; case 'V': printf("%s version %s\n", progname, VERSION); exit(0); case 'h': usage(progname); exit(0); default: usage(progname); exit(1); } } if ((argc - optind) > 1) { usage(progname); exit(1); } bdrv_init(); /* initialize commands */ quit_init(); help_init(); add_command(&open_cmd); add_command(&close_cmd); add_command(&read_cmd); add_command(&readv_cmd); add_command(&write_cmd); add_command(&writev_cmd); add_command(&aio_read_cmd); add_command(&aio_write_cmd); add_command(&aio_flush_cmd); add_command(&flush_cmd); add_command(&truncate_cmd); add_command(&length_cmd); add_command(&info_cmd); add_command(&alloc_cmd); add_args_command(init_args_command); add_check_command(init_check_command); /* open the device */ if (!readonly) { flags |= BDRV_O_RDWR; } if ((argc - optind) == 1) { openfile(argv[optind], flags, growable); } command_loop(); /* * Make sure all outstanding requests complete before the program exits. */ bdrv_drain_all(); if (bs) { bdrv_close(bs); } return 0; }
int main(int argc, char **argv) { int readonly = 0; int growable = 0; const char *sopt = "hVc:d:rsnmgkt:T:"; const struct option lopt[] = { { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'V' }, { "offset", 1, NULL, 'o' }, { "cmd", 1, NULL, 'c' }, { "read-only", 0, NULL, 'r' }, { "snapshot", 0, NULL, 's' }, { "nocache", 0, NULL, 'n' }, { "misalign", 0, NULL, 'm' }, { "growable", 0, NULL, 'g' }, { "native-aio", 0, NULL, 'k' }, { "discard", 1, NULL, 'd' }, { "cache", 1, NULL, 't' }, { "trace", 1, NULL, 'T' }, { NULL, 0, NULL, 0 } }; int c; int opt_index = 0; int flags = BDRV_O_UNMAP; #ifdef CONFIG_POSIX signal(SIGPIPE, SIG_IGN); #endif progname = basename(argv[0]); while ((c = getopt_long(argc, argv, sopt, lopt, &opt_index)) != -1) { switch (c) { case 's': flags |= BDRV_O_SNAPSHOT; break; case 'n': flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB; break; case 'd': if (bdrv_parse_discard_flags(optarg, &flags) < 0) { error_report("Invalid discard option: %s", optarg); exit(1); } break; case 'c': add_user_command(optarg); break; case 'r': readonly = 1; break; case 'm': qemuio_misalign = 1; break; case 'g': growable = 1; break; case 'k': flags |= BDRV_O_NATIVE_AIO; break; case 't': if (bdrv_parse_cache_flags(optarg, &flags) < 0) { error_report("Invalid cache option: %s", optarg); exit(1); } break; case 'T': if (!trace_backend_init(optarg, NULL)) { exit(1); /* error message will have been printed */ } break; case 'V': printf("%s version %s\n", progname, QEMU_VERSION); exit(0); case 'h': usage(progname); exit(0); default: usage(progname); exit(1); } } if ((argc - optind) > 1) { usage(progname); exit(1); } qemu_init_main_loop(); bdrv_init(); /* initialize commands */ qemuio_add_command(&quit_cmd); qemuio_add_command(&open_cmd); qemuio_add_command(&close_cmd); if (isatty(STDIN_FILENO)) { readline_state = readline_init(readline_printf_func, readline_flush_func, NULL, readline_completion_func); qemu_set_tty_echo(STDIN_FILENO, false); atexit(reenable_tty_echo); } /* open the device */ if (!readonly) { flags |= BDRV_O_RDWR; } if ((argc - optind) == 1) { openfile(argv[optind], flags, growable, NULL); } command_loop(); /* * Make sure all outstanding requests complete before the program exits. */ bdrv_drain_all(); if (qemuio_bs) { bdrv_unref(qemuio_bs); } g_free(readline_state); return 0; }