static void vhdx_close(BlockDriverState *bs) { BDRVVHDXState *s = bs->opaque; qemu_vfree(s->headers[0]); qemu_vfree(s->headers[1]); qemu_vfree(s->bat); qemu_vfree(s->parent_entries); migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); }
int whpx_init_vcpu(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu; Error *local_error = NULL; /* Add migration blockers for all unsupported features of the * Windows Hypervisor Platform */ if (whpx_migration_blocker == NULL) { error_setg(&whpx_migration_blocker, "State blocked due to non-migratable CPUID feature support," "dirty memory tracking support, and XSAVE/XRSTOR support"); (void)migrate_add_blocker(whpx_migration_blocker, &local_error); if (local_error) { error_report_err(local_error); error_free(whpx_migration_blocker); migrate_del_blocker(whpx_migration_blocker); return -EINVAL; } } vcpu = g_malloc0(sizeof(struct whpx_vcpu)); if (!vcpu) { error_report("WHPX: Failed to allocte VCPU context."); return -ENOMEM; } hr = WHvEmulatorCreateEmulator(&whpx_emu_callbacks, &vcpu->emulator); if (FAILED(hr)) { error_report("WHPX: Failed to setup instruction completion support," " hr=%08lx", hr); g_free(vcpu); return -EINVAL; } hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0); if (FAILED(hr)) { error_report("WHPX: Failed to create a virtual processor," " hr=%08lx", hr); WHvEmulatorDestroyEmulator(vcpu->emulator); g_free(vcpu); return -EINVAL; } vcpu->interruptable = true; cpu->vcpu_dirty = true; cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu; return 0; }
static void vpc_close(BlockDriverState *bs) { BDRVVPCState *s = bs->opaque; qemu_vfree(s->pagetable); #ifdef CACHE g_free(s->pageentry_u8); #endif migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); }
static void pci_ivshmem_exit(PCIDevice *dev) { IVShmemState *s = IVSHMEM(dev); int i; fifo8_destroy(&s->incoming_fifo); if (s->migration_blocker) { migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); } if (memory_region_is_mapped(&s->ivshmem)) { if (!s->hostmem) { void *addr = memory_region_get_ram_ptr(&s->ivshmem); int fd; if (munmap(addr, s->ivshmem_size) == -1) { error_report("Failed to munmap shared memory %s", strerror(errno)); } fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem)); if (fd != -1) { close(fd); } } vmstate_unregister_ram(&s->ivshmem, DEVICE(dev)); memory_region_del_subregion(&s->bar, &s->ivshmem); } if (s->eventfd_chr) { for (i = 0; i < s->vectors; i++) { if (s->eventfd_chr[i]) { qemu_chr_free(s->eventfd_chr[i]); } } g_free(s->eventfd_chr); } if (s->peers) { for (i = 0; i < s->nb_peers; i++) { close_peer_eventfds(s, i); } g_free(s->peers); } if (ivshmem_has_feature(s, IVSHMEM_MSI)) { msix_uninit_exclusive_bar(dev); } g_free(s->msi_vectors); }
static void pci_ivshmem_uninit(PCIDevice *dev) { IVShmemState *s = IVSHMEM(dev); if (s->migration_blocker) { migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); } memory_region_del_subregion(&s->bar, &s->ivshmem); vmstate_unregister_ram(&s->ivshmem, DEVICE(dev)); unregister_savevm(DEVICE(dev), "ivshmem", s); }
static int vhost_scsi_exit(DeviceState *qdev) { VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VHostSCSI *s = VHOST_SCSI(qdev); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(qdev); migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); /* This will stop vhost backend. */ vhost_scsi_set_status(vdev, 0); g_free(s->dev.vqs); return virtio_scsi_common_exit(vs); }
static void vhost_scsi_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostSCSI *s = VHOST_SCSI(dev); migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); /* This will stop vhost backend. */ vhost_scsi_set_status(vdev, 0); g_free(s->dev.vqs); virtio_scsi_common_unrealize(dev, errp); }
static void pci_ivshmem_uninit(PCIDevice *dev) { IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev); if (s->migration_blocker) { migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); } memory_region_destroy(&s->ivshmem_mmio); memory_region_del_subregion(&s->bar, &s->ivshmem); vmstate_unregister_ram(&s->ivshmem, &s->dev.qdev); memory_region_destroy(&s->ivshmem); memory_region_destroy(&s->bar); unregister_savevm(&dev->qdev, "ivshmem", s); }
static void tpm_emulator_inst_finalize(Object *obj) { TPMEmulator *tpm_emu = TPM_EMULATOR(obj); tpm_emulator_shutdown(tpm_emu); object_unref(OBJECT(tpm_emu->data_ioc)); qemu_chr_fe_deinit(&tpm_emu->ctrl_chr, false); qapi_free_TPMEmulatorOptions(tpm_emu->options); if (tpm_emu->migration_blocker) { migrate_del_blocker(tpm_emu->migration_blocker); error_free(tpm_emu->migration_blocker); } qemu_mutex_destroy(&tpm_emu->mutex); }
static void ivshmem_exit(PCIDevice *dev) { IVShmemState *s = IVSHMEM_COMMON(dev); int i; if (s->migration_blocker) { migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); } if (memory_region_is_mapped(s->ivshmem_bar2)) { if (!s->hostmem) { void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2); int fd; if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) { error_report("Failed to munmap shared memory %s", strerror(errno)); } fd = memory_region_get_fd(s->ivshmem_bar2); close(fd); } vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev)); } if (s->peers) { for (i = 0; i < s->nb_peers; i++) { close_peer_eventfds(s, i); } g_free(s->peers); } if (ivshmem_has_feature(s, IVSHMEM_MSI)) { msix_uninit_exclusive_bar(dev); } g_free(s->msi_vectors); }
static void bdrv_qed_close(BlockDriverState *bs) { BDRVQEDState *s = bs->opaque; migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); qed_cancel_need_check_timer(s); qemu_free_timer(s->need_check_timer); /* Ensure writes reach stable storage */ bdrv_flush(bs->file); /* Clean shutdown, no check required on next open */ if (s->header.features & QED_F_NEED_CHECK) { s->header.features &= ~QED_F_NEED_CHECK; qed_write_header_sync(s); } qed_free_l2_cache(&s->l2_cache); qemu_vfree(s->l1_table); }