Example #1
0
int kvm_create_irqchip(KVMState *s)
{
#ifdef KVM_CAP_IRQCHIP
    int r;

    if (!kvm_irqchip || !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
        return 0;
    }

    r = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
    if (r < 0) {
        fprintf(stderr, "Create kernel PIC irqchip failed\n");
        return r;
    }

    s->irqchip_inject_ioctl = KVM_IRQ_LINE;
#if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
        s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
    }
#endif
    s->irqchip_in_kernel = 1;

    r = kvm_init_irq_routing(s);
    if (r < 0) {
        return r;
    }
#endif

    return 0;
}
Example #2
0
/**
 * kvm_arm_init_debug() - check for guest debug capabilities
 * @cs: CPUState
 *
 * kvm_check_extension returns the number of debug registers we have
 * or 0 if we have none.
 *
 */
static void kvm_arm_init_debug(CPUState *cs)
{
    have_guest_debug = kvm_check_extension(cs->kvm_state,
                                           KVM_CAP_SET_GUEST_DEBUG);

    max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
    hw_watchpoints = g_array_sized_new(true, true,
                                       sizeof(HWWatchpoint), max_hw_wps);

    max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
    hw_breakpoints = g_array_sized_new(true, true,
                                       sizeof(HWBreakpoint), max_hw_bps);
    return;
}
Example #3
0
static void kvm_s390_flic_realize(DeviceState *dev, Error **errp)
{
    KVMS390FLICState *flic_state = KVM_S390_FLIC(dev);
    struct kvm_create_device cd = {0};
    struct kvm_device_attr test_attr = {0};
    int ret;

    flic_state->fd = -1;
    if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
        trace_flic_no_device_api(errno);
        return;
    }

    cd.type = KVM_DEV_TYPE_FLIC;
    ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd);
    if (ret < 0) {
        trace_flic_create_device(errno);
        return;
    }
    flic_state->fd = cd.fd;

    /* Check clear_io_irq support */
    test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ;
    flic_state->clear_io_supported = !ioctl(flic_state->fd,
                                            KVM_HAS_DEVICE_ATTR, test_attr);

    /* Register savevm handler for floating interrupts */
    register_savevm(NULL, "s390-flic", 0, 1, kvm_flic_save,
                    kvm_flic_load, (void *) flic_state);
}
Example #4
0
static void kvm_piix3_setup_irq_routing(bool pci_enabled)
{
#ifdef CONFIG_KVM
    KVMState *s = kvm_state;
    int i;

    if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
        for (i = 0; i < 8; ++i) {
            if (i == 2) {
                continue;
            }
            kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_PIC_MASTER, i);
        }
        for (i = 8; i < 16; ++i) {
            kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_PIC_SLAVE, i - 8);
        }
        if (pci_enabled) {
            for (i = 0; i < 24; ++i) {
                if (i == 0) {
                    kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_IOAPIC, 2);
                } else if (i != 2) {
                    kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_IOAPIC, i);
                }
            }
        }
    }
#endif /* CONFIG_KVM */
}
Example #5
0
File: kvm.c Project: joshsyu/PQEMU
int kvm_arch_get_registers(CPUState *env)
{
    struct kvm_regs regs;
    struct kvm_sregs sregs;
    uint32_t i, ret;

    ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
    if (ret < 0)
        return ret;

    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
    if (ret < 0)
        return ret;

    env->ctr = regs.ctr;
    env->lr = regs.lr;
    env->xer = regs.xer;
    env->msr = regs.msr;
    env->nip = regs.pc;

    env->spr[SPR_SRR0] = regs.srr0;
    env->spr[SPR_SRR1] = regs.srr1;

    env->spr[SPR_SPRG0] = regs.sprg0;
    env->spr[SPR_SPRG1] = regs.sprg1;
    env->spr[SPR_SPRG2] = regs.sprg2;
    env->spr[SPR_SPRG3] = regs.sprg3;
    env->spr[SPR_SPRG4] = regs.sprg4;
    env->spr[SPR_SPRG5] = regs.sprg5;
    env->spr[SPR_SPRG6] = regs.sprg6;
    env->spr[SPR_SPRG7] = regs.sprg7;

    for (i = 0; i < 32; i++)
        env->gpr[i] = regs.gpr[i];

#ifdef KVM_CAP_PPC_SEGSTATE
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_SEGSTATE)) {
        env->sdr1 = sregs.u.s.sdr1;

        /* Sync SLB */
#ifdef TARGET_PPC64
        for (i = 0; i < 64; i++) {
            ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
                          sregs.u.s.ppc64.slb[i].slbv);
        }
#endif

        /* Sync SRs */
        for (i = 0; i < 16; i++) {
            env->sr[i] = sregs.u.s.ppc32.sr[i];
        }

        /* Sync BATs */
        for (i = 0; i < 8; i++) {
            env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
            env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
            env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
            env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
        }
    }
Example #6
0
bool s390_has_feat(S390Feat feat)
{
    static S390CPU *cpu;

    if (!cpu) {
        cpu = S390_CPU(qemu_get_cpu(0));
    }

    if (!cpu || !cpu->model) {
#ifdef CONFIG_KVM
        if (kvm_enabled()) {
            if (feat == S390_FEAT_VECTOR) {
                return kvm_check_extension(kvm_state,
                                           KVM_CAP_S390_VECTOR_REGISTERS);
            }
            if (feat == S390_FEAT_RUNTIME_INSTRUMENTATION) {
                return kvm_s390_get_ri();
            }
            if (feat == S390_FEAT_MSA_EXT_3) {
                return true;
            }
        }
#endif
        return 0;
    }
    return test_bit(feat, cpu->model->features);
}
Example #7
0
static int kvm_init_irq_routing(KVMState *s)
{
#ifdef KVM_CAP_IRQ_ROUTING
    int r, gsi_count;

    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
    if (gsi_count > 0) {
        int gsi_bits, i;

        /* Round up so we can search ints using ffs */
        gsi_bits = ALIGN(gsi_count, 32);
        s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
        s->max_gsi = gsi_bits;

        /* Mark any over-allocated bits as already in use */
        for (i = gsi_count; i < gsi_bits; i++) {
            set_gsi(s, i);
        }
    }

    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
    s->nr_allocated_irq_routes = 0;

    r = kvm_arch_init_irq_routing();
    if (r < 0) {
        return r;
    }
#endif

    return 0;
}
int kvm_arch_qemu_create_context(void)
{
    int i, r;
    struct utsname utsname;

    uname(&utsname);
    lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;

    if (kvm_shadow_memory)
        kvm_set_shadow_pages(kvm_context, kvm_shadow_memory);

    kvm_msr_list = kvm_get_msr_list();
    if (!kvm_msr_list)
		return -1;
    for (i = 0; i < kvm_msr_list->nmsrs; ++i) {
	if (kvm_msr_list->indices[i] == MSR_STAR)
	    kvm_has_msr_star = 1;
        if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA)
            kvm_has_vm_hsave_pa = 1;
    }

#ifdef KVM_CAP_ADJUST_CLOCK
    if (kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK))
        vmstate_register(NULL, 0, &vmstate_kvmclock, &kvmclock_data);
#endif

    r = kvm_set_boot_cpu_id(0);
    if (r < 0 && r != -ENOSYS) {
        return r;
    }

    return 0;
}
Example #9
0
static void kvm_openpic_realize(DeviceState *dev, Error **errp)
{
    SysBusDevice *d = SYS_BUS_DEVICE(dev);
    KVMOpenPICState *opp = KVM_OPENPIC(dev);
    KVMState *s = kvm_state;
    int kvm_openpic_model;
    struct kvm_create_device cd = {0};
    int ret, i;

    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
        error_setg(errp, "Kernel is lacking Device Control API");
        return;
    }

    switch (opp->model) {
    case OPENPIC_MODEL_FSL_MPIC_20:
        kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_20;
        break;

    case OPENPIC_MODEL_FSL_MPIC_42:
        kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_42;
        break;

    default:
        error_setg(errp, "Unsupported OpenPIC model %" PRIu32, opp->model);
        return;
    }

    cd.type = kvm_openpic_model;
    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &cd);
    if (ret < 0) {
        error_setg(errp, "Can't create device %d: %s",
                   cd.type, strerror(errno));
        return;
    }
    opp->fd = cd.fd;

    sysbus_init_mmio(d, &opp->mem);
    qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ);

    opp->mem_listener.region_add = kvm_openpic_region_add;
    opp->mem_listener.region_del = kvm_openpic_region_del;
    memory_listener_register(&opp->mem_listener, &address_space_memory);

    /* indicate pic capabilities */
    msi_nonbroken = true;
    kvm_kernel_irqchip = true;
    kvm_async_interrupts_allowed = true;

    /* set up irq routing */
    kvm_init_irq_routing(kvm_state);
    for (i = 0; i < 256; ++i) {
        kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
    }

    kvm_msi_via_irqfd_allowed = true;
    kvm_gsi_routing_allowed = true;

    kvm_irqchip_commit_routes(s);
}
Example #10
0
Object *kvm_s390_stattrib_create(void)
{
    if (kvm_enabled() &&
                kvm_check_extension(kvm_state, KVM_CAP_S390_CMMA_MIGRATION)) {
        return object_new(TYPE_KVM_S390_STATTRIB);
    }
    return NULL;
}
Example #11
0
int kvm_get_gsi_count(kvm_context_t kvm)
{
#ifdef KVM_CAP_IRQ_ROUTING
    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
#else
    return -EINVAL;
#endif
}
Example #12
0
int kvm_has_gsi_routing(void)
{
    int r = 0;

#ifdef KVM_CAP_IRQ_ROUTING
    r = kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
#endif
    return r;
}
int kvm_has_pit_state2(kvm_context_t kvm)
{
	int r = 0;

#ifdef KVM_CAP_PIT_STATE2
	r = kvm_check_extension(kvm_state, KVM_CAP_PIT_STATE2);
#endif
	return r;
}
Example #14
0
static void kvmclock_vm_state_change(void *opaque, int running,
                                     RunState state)
{
    KVMClockState *s = opaque;
    CPUState *cpu = first_cpu;
    int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL);
    int ret;

    if (running) {
        struct kvm_clock_data data;

        s->clock_valid = false;

        data.clock = s->clock;
        data.flags = 0;
        ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
        if (ret < 0) {
            fprintf(stderr, "KVM_SET_CLOCK failed: %s\n", strerror(ret));
            abort();
        }

        if (!cap_clock_ctrl) {
            return;
        }
        for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
            ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0);
            if (ret) {
                if (ret != -EINVAL) {
                    fprintf(stderr, "%s: %s\n", __func__, strerror(-ret));
                }
                return;
            }
        }
    } else {
        struct kvm_clock_data data;
        int ret;

        if (s->clock_valid) {
            return;
        }
        ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
        if (ret < 0) {
            fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
            abort();
        }
        s->clock = data.clock;

        /*
         * If the VM is stopped, declare the clock state valid to
         * avoid re-reading it on next vmsave (which would return
         * a different value). Will be reset when the VM is continued.
         */
        s->clock_valid = true;
    }
}
Example #15
0
static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
{
    while (list->name) {
        if (!kvm_check_extension(s, list->value)) {
            return list;
        }
        list++;
    }
    return NULL;
}
static void kvm_reset_mpstate(CPUState *env)
{
#ifdef KVM_CAP_MP_STATE
    if (kvm_check_extension(kvm_state, KVM_CAP_MP_STATE)) {
        if (kvm_irqchip_in_kernel()) {
            env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
                                              KVM_MP_STATE_UNINITIALIZED;
        } else {
            env->mp_state = KVM_MP_STATE_RUNNABLE;
        }
    }
#endif
}
Example #17
0
static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info)
{
    int ret;

    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
        ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
        if (ret == 0) {
            return;
        }
    }

    kvm_get_fallback_smmu_info(env, info);
}
Example #18
0
static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
{
    CPUState *cs = CPU(cpu);
    int ret;

    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
        ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
        if (ret == 0) {
            return;
        }
    }

    kvm_get_fallback_smmu_info(cpu, info);
}
Example #19
0
int kvm_arch_init(KVMState *s)
{
    cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
    cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
    cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
    cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
    cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
    cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
    cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
    cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
    cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);

    if (!cap_interrupt_level) {
        fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
                        "VM to stall at times!\n");
    }

    kvm_ppc_register_host_cpu_type();

    return 0;
}
Example #20
0
/* Set up a shared TLB array with KVM */
static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
{
    CPUPPCState *env = &cpu->env;
    CPUState *cs = CPU(cpu);
    struct kvm_book3e_206_tlb_params params = {};
    struct kvm_config_tlb cfg = {};
    struct kvm_enable_cap encap = {};
    unsigned int entries = 0;
    int ret, i;

    if (!kvm_enabled() ||
        !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
        return 0;
    }

    assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);

    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
        params.tlb_sizes[i] = booke206_tlb_size(env, i);
        params.tlb_ways[i] = booke206_tlb_ways(env, i);
        entries += params.tlb_sizes[i];
    }

    assert(entries == env->nb_tlb);
    assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));

    env->tlb_dirty = true;

    cfg.array = (uintptr_t)env->tlb.tlbm;
    cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
    cfg.params = (uintptr_t)&params;
    cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;

    encap.cap = KVM_CAP_SW_TLB;
    encap.args[0] = (uintptr_t)&cfg;

    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
    if (ret < 0) {
        fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
                __func__, strerror(-ret));
        return ret;
    }

    env->kvm_sw_tlb = true;
    return 0;
}
Example #21
0
static void kvm_init_irq_routing(KVMState *s)
{
    int gsi_count;

    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
    if (gsi_count > 0) {
        unsigned int gsi_bits, i;

        /* Round up so we can search ints using ffs */
        gsi_bits = (gsi_count + 31) / 32;
        s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
        s->max_gsi = gsi_bits;

        /* Mark any over-allocated bits as already in use */
        for (i = gsi_count; i < gsi_bits; i++) {
            set_gsi(s, i);
        }
    }

    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
    s->nr_allocated_irq_routes = 0;

    kvm_arch_init_irq_routing(s);
}
void kvm_arch_load_regs(CPUState *env, int level)
{
    struct kvm_regs regs;
    struct kvm_fpu fpu;
    struct kvm_sregs sregs;
    struct kvm_msr_entry msrs[100];
    int rc, n, i;

    assert(kvm_cpu_is_stopped(env) || env->thread_id == kvm_get_thread_id());

    regs.rax = env->regs[R_EAX];
    regs.rbx = env->regs[R_EBX];
    regs.rcx = env->regs[R_ECX];
    regs.rdx = env->regs[R_EDX];
    regs.rsi = env->regs[R_ESI];
    regs.rdi = env->regs[R_EDI];
    regs.rsp = env->regs[R_ESP];
    regs.rbp = env->regs[R_EBP];
#ifdef TARGET_X86_64
    regs.r8 = env->regs[8];
    regs.r9 = env->regs[9];
    regs.r10 = env->regs[10];
    regs.r11 = env->regs[11];
    regs.r12 = env->regs[12];
    regs.r13 = env->regs[13];
    regs.r14 = env->regs[14];
    regs.r15 = env->regs[15];
#endif

    regs.rflags = env->eflags;
    regs.rip = env->eip;

    kvm_set_regs(env, &regs);

#ifdef KVM_CAP_XSAVE
    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
        struct kvm_xsave* xsave;

        uint16_t cwd, swd, twd, fop;

        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
        memset(xsave, 0, sizeof(struct kvm_xsave));
        cwd = swd = twd = fop = 0;
        swd = env->fpus & ~(7 << 11);
        swd |= (env->fpstt & 7) << 11;
        cwd = env->fpuc;
        for (i = 0; i < 8; ++i)
            twd |= (!env->fptags[i]) << i;
        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
        xsave->region[1] = (uint32_t)(fop << 16) + twd;
        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
                sizeof env->fpregs);
        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
                sizeof env->xmm_regs);
        xsave->region[XSAVE_MXCSR] = env->mxcsr;
        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
                sizeof env->ymmh_regs);
        kvm_set_xsave(env, xsave);
        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
            struct kvm_xcrs xcrs;

            xcrs.nr_xcrs = 1;
            xcrs.flags = 0;
            xcrs.xcrs[0].xcr = 0;
            xcrs.xcrs[0].value = env->xcr0;
            kvm_set_xcrs(env, &xcrs);
        }
        qemu_free(xsave);
    } else {
#endif
        memset(&fpu, 0, sizeof fpu);
        fpu.fsw = env->fpus & ~(7 << 11);
        fpu.fsw |= (env->fpstt & 7) << 11;
        fpu.fcw = env->fpuc;
        for (i = 0; i < 8; ++i)
            fpu.ftwx |= (!env->fptags[i]) << i;
        memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
        memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
        fpu.mxcsr = env->mxcsr;
        kvm_set_fpu(env, &fpu);
#ifdef KVM_CAP_XSAVE
    }
#endif

    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
    if (env->interrupt_injected >= 0) {
        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
                (uint64_t)1 << (env->interrupt_injected % 64);
    }

    if ((env->eflags & VM_MASK)) {
	    set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
	    set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
	    set_v8086_seg(&sregs.es, &env->segs[R_ES]);
	    set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
	    set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
	    set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
    } else {
	    set_seg(&sregs.cs, &env->segs[R_CS]);
	    set_seg(&sregs.ds, &env->segs[R_DS]);
	    set_seg(&sregs.es, &env->segs[R_ES]);
	    set_seg(&sregs.fs, &env->segs[R_FS]);
	    set_seg(&sregs.gs, &env->segs[R_GS]);
	    set_seg(&sregs.ss, &env->segs[R_SS]);

	    if (env->cr[0] & CR0_PE_MASK) {
		/* force ss cpl to cs cpl */
		sregs.ss.selector = (sregs.ss.selector & ~3) |
			(sregs.cs.selector & 3);
		sregs.ss.dpl = sregs.ss.selector & 3;
	    }
    }

    set_seg(&sregs.tr, &env->tr);
    set_seg(&sregs.ldt, &env->ldt);

    sregs.idt.limit = env->idt.limit;
    sregs.idt.base = env->idt.base;
    sregs.gdt.limit = env->gdt.limit;
    sregs.gdt.base = env->gdt.base;

    sregs.cr0 = env->cr[0];
    sregs.cr2 = env->cr[2];
    sregs.cr3 = env->cr[3];
    sregs.cr4 = env->cr[4];

    sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
    sregs.apic_base = cpu_get_apic_base(env->apic_state);

    sregs.efer = env->efer;

    kvm_set_sregs(env, &sregs);

    /* msrs */
    n = 0;
    /* Remember to increase msrs size if you add new registers below */
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS,  env->sysenter_cs);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
    if (kvm_has_msr_star)
	kvm_msr_entry_set(&msrs[n++], MSR_STAR,              env->star);
    if (kvm_has_vm_hsave_pa)
        kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
#ifdef TARGET_X86_64
    if (lm_capable_kernel) {
        kvm_msr_entry_set(&msrs[n++], MSR_CSTAR,             env->cstar);
        kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE,      env->kernelgsbase);
        kvm_msr_entry_set(&msrs[n++], MSR_FMASK,             env->fmask);
        kvm_msr_entry_set(&msrs[n++], MSR_LSTAR  ,           env->lstar);
    }
#endif
    if (level == KVM_PUT_FULL_STATE) {
        /*
         * KVM is yet unable to synchronize TSC values of multiple VCPUs on
         * writeback. Until this is fixed, we only write the offset to SMP
         * guests after migration, desynchronizing the VCPUs, but avoiding
         * huge jump-backs that would occur without any writeback at all.
         */
        if (smp_cpus == 1 || env->tsc != 0) {
            kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
        }
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr);
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
    }
#ifdef KVM_CAP_MCE
    if (env->mcg_cap) {
        if (level == KVM_PUT_RESET_STATE)
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
        else if (level == KVM_PUT_FULL_STATE) {
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
            for (i = 0; i < (env->mcg_cap & 0xff); i++)
                kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
        }
    }
#endif

    rc = kvm_set_msrs(env, msrs, n);
    if (rc == -1)
        perror("kvm_set_msrs FAILED");

    if (level >= KVM_PUT_RESET_STATE) {
        kvm_arch_load_mpstate(env);
        kvm_load_lapic(env);
    }
    if (level == KVM_PUT_FULL_STATE) {
        if (env->kvm_vcpu_update_vapic)
            kvm_tpr_enable_vapic(env);
    }

    kvm_put_vcpu_events(env, level);
    kvm_put_debugregs(env);

    /* must be last */
    kvm_guest_debug_workarounds(env);
}
void kvm_arch_save_regs(CPUState *env)
{
    struct kvm_regs regs;
    struct kvm_fpu fpu;
    struct kvm_sregs sregs;
    struct kvm_msr_entry msrs[100];
    uint32_t hflags;
    uint32_t i, n, rc, bit;

    assert(kvm_cpu_is_stopped(env) || env->thread_id == kvm_get_thread_id());

    kvm_get_regs(env, &regs);

    env->regs[R_EAX] = regs.rax;
    env->regs[R_EBX] = regs.rbx;
    env->regs[R_ECX] = regs.rcx;
    env->regs[R_EDX] = regs.rdx;
    env->regs[R_ESI] = regs.rsi;
    env->regs[R_EDI] = regs.rdi;
    env->regs[R_ESP] = regs.rsp;
    env->regs[R_EBP] = regs.rbp;
#ifdef TARGET_X86_64
    env->regs[8] = regs.r8;
    env->regs[9] = regs.r9;
    env->regs[10] = regs.r10;
    env->regs[11] = regs.r11;
    env->regs[12] = regs.r12;
    env->regs[13] = regs.r13;
    env->regs[14] = regs.r14;
    env->regs[15] = regs.r15;
#endif

    env->eflags = regs.rflags;
    env->eip = regs.rip;

#ifdef KVM_CAP_XSAVE
    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
        struct kvm_xsave* xsave;
        uint16_t cwd, swd, twd, fop;
        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
        kvm_get_xsave(env, xsave);
        cwd = (uint16_t)xsave->region[0];
        swd = (uint16_t)(xsave->region[0] >> 16);
        twd = (uint16_t)xsave->region[1];
        fop = (uint16_t)(xsave->region[1] >> 16);
        env->fpstt = (swd >> 11) & 7;
        env->fpus = swd;
        env->fpuc = cwd;
        for (i = 0; i < 8; ++i)
            env->fptags[i] = !((twd >> i) & 1);
        env->mxcsr = xsave->region[XSAVE_MXCSR];
        memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
                sizeof env->fpregs);
        memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
                sizeof env->xmm_regs);
        env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
        memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
                sizeof env->ymmh_regs);
        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
            struct kvm_xcrs xcrs;

            kvm_get_xcrs(env, &xcrs);
            if (xcrs.xcrs[0].xcr == 0)
                env->xcr0 = xcrs.xcrs[0].value;
        }
        qemu_free(xsave);
    } else {
Example #24
0
static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id,
                                        uint8_t isc, bool swap,
                                        bool is_maskable)
{
    struct kvm_s390_io_adapter adapter = {
        .id = id,
        .isc = isc,
        .maskable = is_maskable,
        .swap = swap,
    };
    KVMS390FLICState *flic = KVM_S390_FLIC(fs);
    int r;
    struct kvm_device_attr attr = {
        .group = KVM_DEV_FLIC_ADAPTER_REGISTER,
        .addr = (uint64_t)&adapter,
    };

    if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) {
        /* nothing to do */
        return 0;
    }

    r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);

    return r ? -errno : 0;
}

static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
                                   uint64_t map_addr, bool do_map)
{
    struct kvm_s390_io_adapter_req req = {
        .id = id,
        .type = do_map ? KVM_S390_IO_ADAPTER_MAP : KVM_S390_IO_ADAPTER_UNMAP,
        .addr = map_addr,
    };
    struct kvm_device_attr attr = {
        .group = KVM_DEV_FLIC_ADAPTER_MODIFY,
        .addr = (uint64_t)&req,
    };
    KVMS390FLICState *flic = KVM_S390_FLIC(fs);
    int r;

    if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) {
        /* nothing to do */
        return 0;
    }

    r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr);
    return r ? -errno : 0;
}

static int kvm_s390_add_adapter_routes(S390FLICState *fs,
                                       AdapterRoutes *routes)
{
    int ret, i;
    uint64_t ind_offset = routes->adapter.ind_offset;

    for (i = 0; i < routes->num_routes; i++) {
        ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter);
        if (ret < 0) {
            goto out_undo;
        }
        routes->gsi[i] = ret;
        routes->adapter.ind_offset++;
    }
    kvm_irqchip_commit_routes(kvm_state);

    /* Restore passed-in structure to original state. */
    routes->adapter.ind_offset = ind_offset;
    return 0;
out_undo:
    while (--i >= 0) {
        kvm_irqchip_release_virq(kvm_state, routes->gsi[i]);
        routes->gsi[i] = -1;
    }
    routes->adapter.ind_offset = ind_offset;
    return ret;
}

static void kvm_s390_release_adapter_routes(S390FLICState *fs,
                                            AdapterRoutes *routes)
{
    int i;

    for (i = 0; i < routes->num_routes; i++) {
        if (routes->gsi[i] >= 0) {
            kvm_irqchip_release_virq(kvm_state, routes->gsi[i]);
            routes->gsi[i] = -1;
        }
    }
}

/**
 * kvm_flic_save - Save pending floating interrupts
 * @f: QEMUFile containing migration state
 * @opaque: pointer to flic device state
 *
 * Note: Pass buf and len to kernel. Start with one page and
 * increase until buffer is sufficient or maxium size is
 * reached
 */
static void kvm_flic_save(QEMUFile *f, void *opaque)
{
    KVMS390FLICState *flic = opaque;
    int len = FLIC_SAVE_INITIAL_SIZE;
    void *buf;
    int count;

    flic_disable_wait_pfault((struct KVMS390FLICState *) opaque);

    buf = g_try_malloc0(len);
    if (!buf) {
        /* Storing FLIC_FAILED into the count field here will cause the
         * target system to fail when attempting to load irqs from the
         * migration state */
        error_report("flic: couldn't allocate memory");
        qemu_put_be64(f, FLIC_FAILED);
        return;
    }

    count = __get_all_irqs(flic, &buf, len);
    if (count < 0) {
        error_report("flic: couldn't retrieve irqs from kernel, rc %d",
                     count);
        /* Storing FLIC_FAILED into the count field here will cause the
         * target system to fail when attempting to load irqs from the
         * migration state */
        qemu_put_be64(f, FLIC_FAILED);
    } else {
        qemu_put_be64(f, count);
        qemu_put_buffer(f, (uint8_t *) buf,
                        count * sizeof(struct kvm_s390_irq));
    }
    g_free(buf);
}
Example #25
0
static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
                                       struct kvm_ppc_smmu_info *info)
{
    CPUPPCState *env = &cpu->env;
    CPUState *cs = CPU(cpu);

    memset(info, 0, sizeof(*info));

    /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
     * need to "guess" what the supported page sizes are.
     *
     * For that to work we make a few assumptions:
     *
     * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
     *   KVM which only supports 4K and 16M pages, but supports them
     *   regardless of the backing store characteritics. We also don't
     *   support 1T segments.
     *
     *   This is safe as if HV KVM ever supports that capability or PR
     *   KVM grows supports for more page/segment sizes, those versions
     *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
     *   will not hit this fallback
     *
     * - Else we are running HV KVM. This means we only support page
     *   sizes that fit in the backing store. Additionally we only
     *   advertize 64K pages if the processor is ARCH 2.06 and we assume
     *   P7 encodings for the SLB and hash table. Here too, we assume
     *   support for any newer processor will mean a kernel that
     *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
     *   this fallback.
     */
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
        /* No flags */
        info->flags = 0;
        info->slb_size = 64;

        /* Standard 4k base page size segment */
        info->sps[0].page_shift = 12;
        info->sps[0].slb_enc = 0;
        info->sps[0].enc[0].page_shift = 12;
        info->sps[0].enc[0].pte_enc = 0;

        /* Standard 16M large page size segment */
        info->sps[1].page_shift = 24;
        info->sps[1].slb_enc = SLB_VSID_L;
        info->sps[1].enc[0].page_shift = 24;
        info->sps[1].enc[0].pte_enc = 0;
    } else {
        int i = 0;

        /* HV KVM has backing store size restrictions */
        info->flags = KVM_PPC_PAGE_SIZES_REAL;

        if (env->mmu_model & POWERPC_MMU_1TSEG) {
            info->flags |= KVM_PPC_1T_SEGMENTS;
        }

        if (env->mmu_model == POWERPC_MMU_2_06) {
            info->slb_size = 32;
        } else {
            info->slb_size = 64;
        }

        /* Standard 4k base page size segment */
        info->sps[i].page_shift = 12;
        info->sps[i].slb_enc = 0;
        info->sps[i].enc[0].page_shift = 12;
        info->sps[i].enc[0].pte_enc = 0;
        i++;

        /* 64K on MMU 2.06 */
        if (env->mmu_model == POWERPC_MMU_2_06) {
            info->sps[i].page_shift = 16;
            info->sps[i].slb_enc = 0x110;
            info->sps[i].enc[0].page_shift = 16;
            info->sps[i].enc[0].pte_enc = 1;
            i++;
        }

        /* Standard 16M large page size segment */
        info->sps[i].page_shift = 24;
        info->sps[i].slb_enc = SLB_VSID_L;
        info->sps[i].enc[0].page_shift = 24;
        info->sps[i].enc[0].pte_enc = 0;
    }
}
Example #26
0
static void kvm_pit_realizefn(DeviceState *dev, Error **errp)
{
    PITCommonState *pit = PIT_COMMON(dev);
    KVMPITClass *kpc = KVM_PIT_GET_CLASS(dev);
    KVMPITState *s = KVM_PIT(pit);
    struct kvm_pit_config config = {
        .flags = 0,
    };
    int ret;

    if (kvm_check_extension(kvm_state, KVM_CAP_PIT2)) {
        ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config);
    } else {
        ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT);
    }
    if (ret < 0) {
        error_setg(errp, "Create kernel PIC irqchip failed: %s",
                   strerror(ret));
        return;
    }
    switch (s->lost_tick_policy) {
    case LOST_TICK_DELAY:
        break; /* enabled by default */
    case LOST_TICK_DISCARD:
        if (kvm_check_extension(kvm_state, KVM_CAP_REINJECT_CONTROL)) {
            struct kvm_reinject_control control = { .pit_reinject = 0 };

            ret = kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control);
            if (ret < 0) {
                error_setg(errp,
                           "Can't disable in-kernel PIT reinjection: %s",
                           strerror(ret));
                return;
            }
        }
        break;
    default:
        error_setg(errp, "Lost tick policy not supported.");
        return;
    }

    memory_region_init_reservation(&pit->ioports, NULL, "kvm-pit", 4);

    qdev_init_gpio_in(dev, kvm_pit_irq_control, 1);

    qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s);

    kpc->parent_realize(dev, errp);
}

static Property kvm_pit_properties[] = {
    DEFINE_PROP_HEX32("iobase", PITCommonState, iobase,  -1),
    DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", KVMPITState,
    lost_tick_policy, LOST_TICK_DELAY),
    DEFINE_PROP_END_OF_LIST(),
};

static void kvm_pit_class_init(ObjectClass *klass, void *data)
{
    KVMPITClass *kpc = KVM_PIT_CLASS(klass);
    PITCommonClass *k = PIT_COMMON_CLASS(klass);
    DeviceClass *dc = DEVICE_CLASS(klass);

    kpc->parent_realize = dc->realize;
    dc->realize = kvm_pit_realizefn;
    k->set_channel_gate = kvm_pit_set_gate;
    k->get_channel_info = kvm_pit_get_channel_info;
    k->pre_save = kvm_pit_get;
    k->post_load = kvm_pit_put;
    dc->reset = kvm_pit_reset;
    dc->props = kvm_pit_properties;
}

static const TypeInfo kvm_pit_info = {
    .name          = TYPE_KVM_I8254,
    .parent        = TYPE_PIT_COMMON,
    .instance_size = sizeof(KVMPITState),
    .class_init = kvm_pit_class_init,
    .class_size = sizeof(KVMPITClass),
};

static void kvm_pit_register(void)
{
    type_register_static(&kvm_pit_info);
}

type_init(kvm_pit_register)
Example #27
0
int kvm_arch_init(KVMState *s)
{
    cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
    cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
    cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
    cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
    cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
    cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
    cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
    cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
    cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
    cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
    cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
    /* Note: we don't set cap_papr here, because this capability is
     * only activated after this by kvmppc_set_papr() */

    if (!cap_interrupt_level) {
        fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
                "VM to stall at times!\n");
    }

    kvm_ppc_register_host_cpu_type();

    return 0;
}