void kvm_create_irqchip(kvm_context_t kvm) { int r; kvm->irqchip_in_kernel = 0; #ifdef KVM_CAP_IRQCHIP if (!kvm->no_irqchip_creation) { r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP); if (r > 0) { /* kernel irqchip supported */ r = kvm_vm_ioctl(kvm_state, KVM_CREATE_IRQCHIP); if (r >= 0) { kvm->irqchip_inject_ioctl = KVM_IRQ_LINE; #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS) r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQ_INJECT_STATUS); if (r > 0) { kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS; } #endif kvm->irqchip_in_kernel = 1; } else fprintf(stderr, "Create kernel PIC irqchip failed\n"); } } #endif kvm_state->irqchip_in_kernel = kvm->irqchip_in_kernel; }
int kvm_get_mce_cap_supported(kvm_context_t kvm, uint64_t *mce_cap, int *max_banks) { #ifdef KVM_CAP_MCE int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MCE); if (r > 0) { *max_banks = r; return kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); } #endif return -ENOSYS; }
int main(int argc, char *argv[]) { int kvm_fd = open("/dev/kvm", O_RDWR); if (kvm_fd < 0) { perror("kvm_open"); return -1; } printf("sending ioctl %X\n", KVM_GET_API_VERSION); int version = kvm_ioctl(kvm_fd, KVM_GET_API_VERSION, 0); printf("got version %d\n", version); if (version != KVM_API_VERSION) { perror("ioctl"); } int vm_fd = kvm_ioctl(kvm_fd, KVM_CREATE_VM, 0); printf("got vm %d\n", vm_fd); return 0; }
int kvm_init_vcpu(CPUArchState *env) { KVMState *s = kvm_state; long mmap_size; int ret; DPRINTF("kvm_init_vcpu\n"); #ifdef CONFIG_SOLARIS ret = kvm_vm_clone(kvm_state); if (ret < 0) { fprintf(stderr, "kvm_init_vcpu could not clone fd: %m\n"); goto err; } env->kvm_fd = ret; ret = ioctl(env->kvm_fd, KVM_CREATE_VCPU, env->cpu_index); #else ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); #endif if (ret < 0) { DPRINTF("kvm_create_vcpu failed\n"); goto err; } #ifndef CONFIG_SOLARIS env->kvm_fd = ret; #endif env->kvm_state = s; env->kvm_vcpu_dirty = 1; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); goto err; } env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd, 0); if (env->kvm_run == MAP_FAILED) { ret = -errno; DPRINTF("mmap'ing vcpu state failed\n"); goto err; } if (s->coalesced_mmio && !s->coalesced_mmio_ring) { s->coalesced_mmio_ring = (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE; } ret = kvm_arch_init_vcpu(env); if (ret == 0) { qemu_register_reset(kvm_reset_vcpu, env); kvm_arch_reset_vcpu(env); } err: return ret; }
int kvm_init(void) { long mmap_size; struct kvm_enable_cap cap; int r; kvm_fd = open("/dev/kvm", O_RDWR); if (kvm_fd < 0) { fprintf(stderr, "KVM: Couldn't open /dev/kvm\n"); return -1; } vm_fd = kvm_ioctl(KVM_CREATE_VM, 0); if (vm_fd < 0) { fprintf(stderr, "KVM: Couldn't create VM\n"); return -1; } vcpu_fd = kvm_vm_ioctl(KVM_CREATE_VCPU, 0); if (vcpu_fd < 0) { fprintf(stderr, "kvm_create_vcpu failed\n"); return -1; } memset(&cap, 0, sizeof(cap)); cap.cap = KVM_CAP_PPC_OSI; r = kvm_vcpu_ioctl(KVM_ENABLE_CAP, &cap); if (r < 0) { fprintf(stderr, "kvm_enable_cap failed\n"); return -1; } mmap_size = kvm_ioctl(KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { fprintf(stderr, "KVM_GET_VCPU_MMAP_SIZE failed\n"); return -1; } kvm_run = (struct kvm_run *)mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu_fd, 0); if (kvm_run == MAP_FAILED) { fprintf(stderr, "mmap'ing vcpu state failed\n"); return -1; } return 0; }
int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state) { int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE); if (r > 0) { return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, mp_state); } return -ENOSYS; }
int kvm_assign_irq(KVMState *s, struct kvm_assigned_irq *assigned_irq) { int ret; ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ); if (ret > 0) { return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, assigned_irq); } return kvm_old_assign_irq(s, assigned_irq); }
int kvm_assign_irq(kvm_context_t kvm, struct kvm_assigned_irq *assigned_irq) { int ret; ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ); if (ret > 0) { return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_DEV_IRQ, assigned_irq); } return kvm_old_assign_irq(kvm, assigned_irq); }
int kvm_check_extension(KVMState *s, unsigned int extension) { int ret; ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); if (ret < 0) { ret = 0; } return ret; }
static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id) { #ifdef KVM_CAP_SET_BOOT_CPU_ID int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID); if (r > 0) { return kvm_vm_ioctl(kvm_state, KVM_SET_BOOT_CPU_ID, id); } return -ENOSYS; #else return -ENOSYS; #endif }
int kvm_init_coalesced_mmio(kvm_context_t kvm) { int r = 0; kvm_state->coalesced_mmio = 0; #ifdef KVM_CAP_COALESCED_MMIO r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO); if (r > 0) { kvm_state->coalesced_mmio = r; return 0; } #endif return r; }
int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages) { #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MMU_SHADOW_CACHE_CONTROL); if (r > 0) { *nrshadow_pages = kvm_vm_ioctl(kvm_state, KVM_GET_NR_MMU_PAGES); return 0; } #endif return -1; }
/* * Returns available msr list. User must free. */ static struct kvm_msr_list *kvm_get_msr_list(void) { struct kvm_msr_list sizer, *msrs; int r; sizer.nmsrs = 0; r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, &sizer); if (r < 0 && r != -E2BIG) return NULL; /* Old kernel modules had a bug and could write beyond the provided memory. Allocate at least a safe amount of 1K. */ msrs = qemu_malloc(MAX(1024, sizeof(*msrs) + sizer.nmsrs * sizeof(*msrs->indices))); msrs->nmsrs = sizer.nmsrs; r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, msrs); if (r < 0) { free(msrs); errno = r; return NULL; } return msrs; }
int kvm_reinject_control(KVMState *s, int pit_reinject) { #ifdef KVM_CAP_REINJECT_CONTROL int r; struct kvm_reinject_control control; control.pit_reinject = pit_reinject; r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL); if (r > 0) { return kvm_vm_ioctl(s, KVM_REINJECT_CONTROL, &control); } #endif return -ENOSYS; }
static int kvm_create_default_phys_mem(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem) { #ifdef KVM_CAP_USER_MEMORY int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY); if (r > 0) return 0; fprintf(stderr, "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n"); #else #error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported #endif return -1; }
int kvm_create_vm(kvm_context_t kvm) { int fd; #ifdef KVM_CAP_IRQ_ROUTING kvm->irq_routes = qemu_mallocz(sizeof(*kvm->irq_routes)); kvm->nr_allocated_irq_routes = 0; #endif fd = kvm_ioctl(kvm_state, KVM_CREATE_VM, 0); if (fd < 0) { fprintf(stderr, "kvm_create_vm: %m\n"); return -1; } kvm_state->vmfd = fd; return 0; }
static void kvm_create_vcpu(CPUState *env, int id) { long mmap_size; int r; KVMState *s = kvm_state; r = kvm_vm_ioctl(kvm_state, KVM_CREATE_VCPU, id); if (r < 0) { fprintf(stderr, "kvm_create_vcpu: %m\n"); fprintf(stderr, "Failed to create vCPU. Check the -smp parameter.\n"); goto err; } env->kvm_fd = r; env->kvm_state = kvm_state; mmap_size = kvm_ioctl(kvm_state, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { fprintf(stderr, "get vcpu mmap size: %m\n"); goto err_fd; } env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd, 0); if (env->kvm_run == MAP_FAILED) { fprintf(stderr, "mmap vcpu area: %m\n"); goto err_fd; } #ifdef KVM_CAP_COALESCED_MMIO if (s->coalesced_mmio && !s->coalesced_mmio_ring) s->coalesced_mmio_ring = (void *) env->kvm_run + s->coalesced_mmio * PAGE_SIZE; #endif r = kvm_arch_init_vcpu(env); if (r == 0) { qemu_register_reset(kvm_reset_vcpu, env); } return; err_fd: close(env->kvm_fd); err: /* We're no good with semi-broken states. */ abort(); }
static int kvm_set_identity_map_addr(kvm_context_t kvm, uint64_t addr) { #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_IDENTITY_MAP_ADDR); if (r > 0) { r = kvm_vm_ioctl(kvm_state, KVM_SET_IDENTITY_MAP_ADDR, &addr); if (r == -1) { fprintf(stderr, "kvm_set_identity_map_addr: %m\n"); return -errno; } return 0; } #endif return -ENOSYS; }
int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages) { #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MMU_SHADOW_CACHE_CONTROL); if (r > 0) { r = kvm_vm_ioctl(kvm_state, KVM_SET_NR_MMU_PAGES, nrshadow_pages); if (r < 0) { fprintf(stderr, "kvm_set_shadow_pages: %m\n"); return r; } return 0; } #endif return -1; }
static int kvm_init_tss(kvm_context_t kvm) { int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR); if (r > 0) { /* * this address is 3 pages before the bios, and the bios should present * as unavaible memory */ r = kvm_set_tss_addr(kvm, 0xfeffd000); if (r < 0) { fprintf(stderr, "kvm_init_tss: unable to set tss addr\n"); return r; } } else { fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n"); } return 0; }
static int kvm_enable_tpr_access_reporting(CPUState *env) { int r; struct kvm_tpr_access_ctl tac = { .enabled = 1 }; r = kvm_ioctl(env->kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC); if (r <= 0) return -ENOSYS; return kvm_vcpu_ioctl(env, KVM_TPR_ACCESS_REPORTING, &tac); } #endif #ifdef KVM_CAP_ADJUST_CLOCK static struct kvm_clock_data kvmclock_data; static void kvmclock_pre_save(void *opaque) { struct kvm_clock_data *cl = opaque; kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, cl); } static int kvmclock_post_load(void *opaque, int version_id) { struct kvm_clock_data *cl = opaque; return kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, cl); } static const VMStateDescription vmstate_kvmclock= { .name = "kvmclock", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .pre_save = kvmclock_pre_save, .post_load = kvmclock_post_load, .fields = (VMStateField []) { VMSTATE_U64(clock, struct kvm_clock_data), VMSTATE_END_OF_LIST() } };
static int kvm_create_pit(kvm_context_t kvm) { #ifdef KVM_CAP_PIT int r; kvm_state->pit_in_kernel = 0; if (!kvm->no_pit_creation) { r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_PIT); if (r > 0) { r = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT); if (r >= 0) kvm_state->pit_in_kernel = 1; else { fprintf(stderr, "Create kernel PIC irqchip failed\n"); return r; } } } #endif return 0; }
static int kvm_init_identity_map_page(kvm_context_t kvm) { #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR int r; r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_IDENTITY_MAP_ADDR); if (r > 0) { /* * this address is 4 pages before the bios, and the bios should present * as unavaible memory */ r = kvm_set_identity_map_addr(kvm, 0xfeffc000); if (r < 0) { fprintf(stderr, "kvm_init_identity_map_page: " "unable to set identity mapping addr\n"); return r; } } #endif return 0; }