static void kvmclock_vm_state_change(void *opaque, int running, RunState state) { KVMClockState *s = opaque; CPUState *cpu = first_cpu; int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL); int ret; if (running) { struct kvm_clock_data data; s->clock_valid = false; data.clock = s->clock; data.flags = 0; ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data); if (ret < 0) { fprintf(stderr, "KVM_SET_CLOCK failed: %s\n", strerror(ret)); abort(); } if (!cap_clock_ctrl) { return; } for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0); if (ret) { if (ret != -EINVAL) { fprintf(stderr, "%s: %s\n", __func__, strerror(-ret)); } return; } } } else { struct kvm_clock_data data; int ret; if (s->clock_valid) { return; } ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); if (ret < 0) { fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret)); abort(); } s->clock = data.clock; /* * If the VM is stopped, declare the clock state valid to * avoid re-reading it on next vmsave (which would return * a different value). Will be reset when the VM is continued. */ s->clock_valid = true; } }
static void kvm_pit_get(PITCommonState *pit) { KVMPITState *s = KVM_PIT(pit); struct kvm_pit_state2 kpit; struct kvm_pit_channel_state *kchan; struct PITChannelState *sc; int i, ret; /* No need to re-read the state if VM is stopped. */ if (s->vm_stopped) { return; } if (kvm_has_pit_state2()) { ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); if (ret < 0) { fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(ret)); abort(); } pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; } else { /* * kvm_pit_state2 is superset of kvm_pit_state struct, * so we can use it for KVM_GET_PIT as well. */ ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT, &kpit); if (ret < 0) { fprintf(stderr, "KVM_GET_PIT failed: %s\n", strerror(ret)); abort(); } } for (i = 0; i < 3; i++) { kchan = &kpit.channels[i]; sc = &pit->channels[i]; sc->count = kchan->count; sc->latched_count = kchan->latched_count; sc->count_latched = kchan->count_latched; sc->status_latched = kchan->status_latched; sc->status = kchan->status; sc->read_state = kchan->read_state; sc->write_state = kchan->write_state; sc->write_latch = kchan->write_latch; sc->rw_mode = kchan->rw_mode; sc->mode = kchan->mode; sc->bcd = kchan->bcd; sc->gate = kchan->gate; sc->count_load_time = kchan->count_load_time + s->kernel_clock_offset; } sc = &pit->channels[0]; sc->next_transition_time = pit_get_next_transition_time(sc, sc->count_load_time); }
void kvm_create_irqchip(kvm_context_t kvm) { int r; kvm->irqchip_in_kernel = 0; #ifdef KVM_CAP_IRQCHIP if (!kvm->no_irqchip_creation) { r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP); if (r > 0) { /* kernel irqchip supported */ r = kvm_vm_ioctl(kvm_state, KVM_CREATE_IRQCHIP); if (r >= 0) { kvm->irqchip_inject_ioctl = KVM_IRQ_LINE; #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS) r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQ_INJECT_STATUS); if (r > 0) { kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS; } #endif kvm->irqchip_in_kernel = 1; } else fprintf(stderr, "Create kernel PIC irqchip failed\n"); } } #endif kvm_state->irqchip_in_kernel = kvm->irqchip_in_kernel; }
static void kvm_openpic_realize(DeviceState *dev, Error **errp) { SysBusDevice *d = SYS_BUS_DEVICE(dev); KVMOpenPICState *opp = KVM_OPENPIC(dev); KVMState *s = kvm_state; int kvm_openpic_model; struct kvm_create_device cd = {0}; int ret, i; if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { error_setg(errp, "Kernel is lacking Device Control API"); return; } switch (opp->model) { case OPENPIC_MODEL_FSL_MPIC_20: kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_20; break; case OPENPIC_MODEL_FSL_MPIC_42: kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_42; break; default: error_setg(errp, "Unsupported OpenPIC model %" PRIu32, opp->model); return; } cd.type = kvm_openpic_model; ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &cd); if (ret < 0) { error_setg(errp, "Can't create device %d: %s", cd.type, strerror(errno)); return; } opp->fd = cd.fd; sysbus_init_mmio(d, &opp->mem); qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ); opp->mem_listener.region_add = kvm_openpic_region_add; opp->mem_listener.region_del = kvm_openpic_region_del; memory_listener_register(&opp->mem_listener, &address_space_memory); /* indicate pic capabilities */ msi_nonbroken = true; kvm_kernel_irqchip = true; kvm_async_interrupts_allowed = true; /* set up irq routing */ kvm_init_irq_routing(kvm_state); for (i = 0; i < 256; ++i) { kvm_irqchip_add_irq_route(kvm_state, i, 0, i); } kvm_msi_via_irqfd_allowed = true; kvm_gsi_routing_allowed = true; kvm_irqchip_commit_routes(s); }
int kvm_set_irq(int irq, int level, int *status) { struct kvm_irq_level event; int r; if (!kvm_state->irqchip_in_kernel) { return 0; } event.level = level; event.irq = irq; r = kvm_vm_ioctl(kvm_state, kvm_state->irqchip_inject_ioctl, &event); if (r < 0) { perror("kvm_set_irq"); } if (status) { #ifdef KVM_CAP_IRQ_INJECT_STATUS *status = (kvm_state->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status; #else *status = 1; #endif } return 1; }
static void kvm_s390_flic_realize(DeviceState *dev, Error **errp) { KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); struct kvm_create_device cd = {0}; struct kvm_device_attr test_attr = {0}; int ret; flic_state->fd = -1; if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { trace_flic_no_device_api(errno); return; } cd.type = KVM_DEV_TYPE_FLIC; ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); if (ret < 0) { trace_flic_create_device(errno); return; } flic_state->fd = cd.fd; /* Check clear_io_irq support */ test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ; flic_state->clear_io_supported = !ioctl(flic_state->fd, KVM_HAS_DEVICE_ATTR, test_attr); /* Register savevm handler for floating interrupts */ register_savevm(NULL, "s390-flic", 0, 1, kvm_flic_save, kvm_flic_load, (void *) flic_state); }
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) { struct kvm_userspace_memory_region mem; #ifdef CONFIG_SOLARIS caddr_t p; #endif mem.slot = slot->slot; mem.guest_phys_addr = slot->start_addr; mem.memory_size = slot->memory_size; mem.userspace_addr = (unsigned long)slot->ram; mem.flags = slot->flags; #ifdef CONFIG_SOLARIS /* we need to touch each page, presumably to ensure that * mlock() will succeed, so we use volatile to ensure it * doesn't get optimised away */ for (p = (caddr_t)mem.userspace_addr; p < (caddr_t)mem.userspace_addr + mem.memory_size; p += PAGE_SIZE) (void) *(volatile char *)p; #endif /* CONFIG_SOLARIS */ if (s->migration_log) { mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; } return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); }
int kvm_init_vcpu(CPUArchState *env) { KVMState *s = kvm_state; long mmap_size; int ret; DPRINTF("kvm_init_vcpu\n"); #ifdef CONFIG_SOLARIS ret = kvm_vm_clone(kvm_state); if (ret < 0) { fprintf(stderr, "kvm_init_vcpu could not clone fd: %m\n"); goto err; } env->kvm_fd = ret; ret = ioctl(env->kvm_fd, KVM_CREATE_VCPU, env->cpu_index); #else ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); #endif if (ret < 0) { DPRINTF("kvm_create_vcpu failed\n"); goto err; } #ifndef CONFIG_SOLARIS env->kvm_fd = ret; #endif env->kvm_state = s; env->kvm_vcpu_dirty = 1; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); goto err; } env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd, 0); if (env->kvm_run == MAP_FAILED) { ret = -errno; DPRINTF("mmap'ing vcpu state failed\n"); goto err; } if (s->coalesced_mmio && !s->coalesced_mmio_ring) { s->coalesced_mmio_ring = (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE; } ret = kvm_arch_init_vcpu(env); if (ret == 0) { qemu_register_reset(kvm_reset_vcpu, env); kvm_arch_reset_vcpu(env); } err: return ret; }
int kvm_create_irqchip(KVMState *s) { #ifdef KVM_CAP_IRQCHIP int r; if (!kvm_irqchip || !kvm_check_extension(s, KVM_CAP_IRQCHIP)) { return 0; } r = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); if (r < 0) { fprintf(stderr, "Create kernel PIC irqchip failed\n"); return r; } s->irqchip_inject_ioctl = KVM_IRQ_LINE; #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS) if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS; } #endif s->irqchip_in_kernel = 1; r = kvm_init_irq_routing(s); if (r < 0) { return r; } #endif return 0; }
static void kvm_pic_put(PICCommonState *s) { struct kvm_irqchip chip; struct kvm_pic_state *kpic; int ret; chip.chip_id = s->master ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE; kpic = &chip.chip.pic; kpic->last_irr = s->last_irr; kpic->irr = s->irr; kpic->imr = s->imr; kpic->isr = s->isr; kpic->priority_add = s->priority_add; kpic->irq_base = s->irq_base; kpic->read_reg_select = s->read_reg_select; kpic->poll = s->poll; kpic->special_mask = s->special_mask; kpic->init_state = s->init_state; kpic->auto_eoi = s->auto_eoi; kpic->rotate_on_auto_eoi = s->rotate_on_auto_eoi; kpic->special_fully_nested_mode = s->special_fully_nested_mode; kpic->init4 = s->init4; kpic->elcr = s->elcr; kpic->elcr_mask = s->elcr_mask; ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip); if (ret < 0) { fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); abort(); } }
static int kvmclock_post_load(void *opaque, int version_id) { KVMClockState *s = opaque; struct kvm_clock_data data; data.clock = s->clock; data.flags = 0; return kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data); }
int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq) { int ret; ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ); if (ret > 0) { return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, assigned_irq); } return kvm_old_assign_irq(s, assigned_irq); }
int kvm_get_dirty_fb_lines(short *rettable, int table_size_in_bytes) { struct kvm_dirty_log d; unsigned int i, j; unsigned long page_number, addr, c; int known_start = 0; /* no fb mapped */ if (fb_slot == -1) return 0; rettable[0] = 0; // starting y rettable[1] = fb_height - 1; // ending y memset(fb_bitmap, 0, fb_len); d.dirty_bitmap = fb_bitmap; d.slot = fb_slot; if (kvm_vm_ioctl(KVM_GET_DIRTY_LOG, &d) == -1) { /* failed -> expose all screen as updated */ return 1; } rettable[1] = 0; for (i = 0; i < fb_len; i++) { if (fb_bitmap[i] != 0) { c = bswap_32(fb_bitmap[i]); do { j = ffsl(c) - 1; c &= ~(1ul << j); page_number = i * 32 + j; addr = page_number * TARGET_PAGE_SIZE; if (!known_start) { rettable[0] = addr / fb_bytes_per_row; known_start = 1; } rettable[1] = ((addr + TARGET_PAGE_SIZE) / fb_bytes_per_row); } while (c != 0); } } /* not dirty */ if (rettable[0] == rettable[1]) return 0; /* cap on fb_height */ if (rettable[1] > (fb_height - 1)) rettable[1] = (fb_height - 1); return 1; }
/** * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space * This function updates qemu's dirty bitmap using * memory_region_set_dirty(). This means all bits are set * to dirty. * * @start_add: start of logged region. * @end_addr: end of logged region. */ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) { KVMState *s = kvm_state; unsigned long size, allocated_size = 0; KVMDirtyLog d; KVMSlot *mem; int ret = 0; target_phys_addr_t start_addr = section->offset_within_address_space; target_phys_addr_t end_addr = start_addr + section->size; d.dirty_bitmap = NULL; while (start_addr < end_addr) { mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); if (mem == NULL) { break; } /* XXX bad kernel interface alert * For dirty bitmap, kernel allocates array of size aligned to * bits-per-long. But for case when the kernel is 64bits and * the userspace is 32bits, userspace can't align to the same * bits-per-long, since sizeof(long) is different between kernel * and user space. This way, userspace will provide buffer which * may be 4 bytes less than the kernel will use, resulting in * userspace memory corruption (which is not detectable by valgrind * too, in most cases). * So for now, let's align to 64 instead of HOST_LONG_BITS here, in * a hope that sizeof(long) wont become >8 any time soon. */ size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), /*HOST_LONG_BITS*/ 64) / 8; if (!d.dirty_bitmap) { d.dirty_bitmap = g_malloc(size); } else if (size > allocated_size) { d.dirty_bitmap = g_realloc(d.dirty_bitmap, size); } allocated_size = size; memset(d.dirty_bitmap, 0, allocated_size); d.slot = mem->slot; if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { DPRINTF("ioctl failed %d\n", errno); ret = -1; break; } kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); start_addr = mem->start_addr + mem->memory_size; } g_free(d.dirty_bitmap); return ret; }
int kvm_assign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq) { int ret; ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ); if (ret > 0) { return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_DEV_IRQ, assigned_irq); } return kvm_old_assign_irq(kvm, assigned_irq); }
static int kvm_enable_tpr_access_reporting(CPUState *env) { int r; struct kvm_tpr_access_ctl tac = { .enabled = 1 }; r = kvm_ioctl(env->kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC); if (r <= 0) return -ENOSYS; return kvm_vcpu_ioctl(env, KVM_TPR_ACCESS_REPORTING, &tac); } #endif #ifdef KVM_CAP_ADJUST_CLOCK static struct kvm_clock_data kvmclock_data; static void kvmclock_pre_save(void *opaque) { struct kvm_clock_data *cl = opaque; kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, cl); } static int kvmclock_post_load(void *opaque, int version_id) { struct kvm_clock_data *cl = opaque; return kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, cl); } static const VMStateDescription vmstate_kvmclock= { .name = "kvmclock", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .pre_save = kvmclock_pre_save, .post_load = kvmclock_post_load, .fields = (VMStateField []) { VMSTATE_U64(clock, struct kvm_clock_data), VMSTATE_END_OF_LIST() } };
static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id) { #ifdef KVM_CAP_SET_BOOT_CPU_ID int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID); if (r > 0) { return kvm_vm_ioctl(kvm_state, KVM_SET_BOOT_CPU_ID, id); } return -ENOSYS; #else return -ENOSYS; #endif }
static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info) { int ret; if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info); if (ret == 0) { return; } } kvm_get_fallback_smmu_info(env, info); }
int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip) { int r; if (!kvm->irqchip_in_kernel) { return 0; } r = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); if (r < 0) { perror("kvm_set_irqchip\n"); } return r; }
int kvm_set_irqchip(KVMState *s, struct kvm_irqchip *chip) { int r; if (!s->irqchip_in_kernel) { return 0; } r = kvm_vm_ioctl(s, KVM_SET_IRQCHIP, chip); if (r < 0) { perror("kvm_set_irqchip\n"); } return r; }
int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages) { #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MMU_SHADOW_CACHE_CONTROL); if (r > 0) { *nrshadow_pages = kvm_vm_ioctl(kvm_state, KVM_GET_NR_MMU_PAGES); return 0; } #endif return -1; }
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) { struct kvm_userspace_memory_region mem; mem.slot = slot->slot; mem.guest_phys_addr = slot->start_addr; mem.memory_size = slot->memory_size; mem.userspace_addr = (unsigned long)slot->ram; mem.flags = slot->flags; if (s->migration_log) { mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; } return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); }
static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info) { CPUState *cs = CPU(cpu); int ret; if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info); if (ret == 0) { return; } } kvm_get_fallback_smmu_info(cpu, info); }
int kvm_reinject_control(KVMState *s, int pit_reinject) { #ifdef KVM_CAP_REINJECT_CONTROL int r; struct kvm_reinject_control control; control.pit_reinject = pit_reinject; r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL); if (r > 0) { return kvm_vm_ioctl(s, KVM_REINJECT_CONTROL, &control); } #endif return -ENOSYS; }
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) { int ret = -ENOSYS; KVMState *s = kvm_state; if (s->coalesced_mmio) { struct kvm_coalesced_mmio_zone zone; zone.addr = start; zone.size = size; ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); } return ret; }
static void sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size) { int r; struct kvm_enc_region range; range.addr = (__u64)(unsigned long)host; range.size = size; trace_kvm_memcrypt_unregister_region(host, size); r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range); if (r) { error_report("%s: failed to unregister region (%p+%#zx)", __func__, host, size); } }
int kvm_init(void) { long mmap_size; struct kvm_enable_cap cap; int r; kvm_fd = open("/dev/kvm", O_RDWR); if (kvm_fd < 0) { fprintf(stderr, "KVM: Couldn't open /dev/kvm\n"); return -1; } vm_fd = kvm_ioctl(KVM_CREATE_VM, 0); if (vm_fd < 0) { fprintf(stderr, "KVM: Couldn't create VM\n"); return -1; } vcpu_fd = kvm_vm_ioctl(KVM_CREATE_VCPU, 0); if (vcpu_fd < 0) { fprintf(stderr, "kvm_create_vcpu failed\n"); return -1; } memset(&cap, 0, sizeof(cap)); cap.cap = KVM_CAP_PPC_OSI; r = kvm_vcpu_ioctl(KVM_ENABLE_CAP, &cap); if (r < 0) { fprintf(stderr, "kvm_enable_cap failed\n"); return -1; } mmap_size = kvm_ioctl(KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { fprintf(stderr, "KVM_GET_VCPU_MMAP_SIZE failed\n"); return -1; } kvm_run = (struct kvm_run *)mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu_fd, 0); if (kvm_run == MAP_FAILED) { fprintf(stderr, "mmap'ing vcpu state failed\n"); return -1; } return 0; }
static void kvm_create_vcpu(CPUState *env, int id) { long mmap_size; int r; KVMState *s = kvm_state; r = kvm_vm_ioctl(kvm_state, KVM_CREATE_VCPU, id); if (r < 0) { fprintf(stderr, "kvm_create_vcpu: %m\n"); fprintf(stderr, "Failed to create vCPU. Check the -smp parameter.\n"); goto err; } env->kvm_fd = r; env->kvm_state = kvm_state; mmap_size = kvm_ioctl(kvm_state, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { fprintf(stderr, "get vcpu mmap size: %m\n"); goto err_fd; } env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd, 0); if (env->kvm_run == MAP_FAILED) { fprintf(stderr, "mmap vcpu area: %m\n"); goto err_fd; } #ifdef KVM_CAP_COALESCED_MMIO if (s->coalesced_mmio && !s->coalesced_mmio_ring) s->coalesced_mmio_ring = (void *) env->kvm_run + s->coalesced_mmio * PAGE_SIZE; #endif r = kvm_arch_init_vcpu(env); if (r == 0) { qemu_register_reset(kvm_reset_vcpu, env); } return; err_fd: close(env->kvm_fd); err: /* We're no good with semi-broken states. */ abort(); }
int kvm_irqchip_set_irq(KVMState *s, int irq, int level) { struct kvm_irq_level event; int ret; assert(s->irqchip_in_kernel); event.level = level; event.irq = irq; ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event); if (ret < 0) { perror("kvm_set_irqchip_line"); abort(); } return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status; }
static int kvm_set_identity_map_addr(kvm_context_t kvm, uint64_t addr) { #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_IDENTITY_MAP_ADDR); if (r > 0) { r = kvm_vm_ioctl(kvm_state, KVM_SET_IDENTITY_MAP_ADDR, &addr); if (r == -1) { fprintf(stderr, "kvm_set_identity_map_addr: %m\n"); return -errno; } return 0; } #endif return -ENOSYS; }