Beispiel #1
0
int svm_create_vmcb(struct vcpu *v)
{
    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
    struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
    int rc;

    if ( (nv->nv_n1vmcx == NULL) &&
         (nv->nv_n1vmcx = alloc_vmcb()) == NULL )
    {
        printk("Failed to create a new VMCB\n");
        return -ENOMEM;
    }

    arch_svm->vmcb = nv->nv_n1vmcx;
    rc = construct_vmcb(v);
    if ( rc != 0 )
    {
        free_vmcb(nv->nv_n1vmcx);
        nv->nv_n1vmcx = NULL;
        arch_svm->vmcb = NULL;
        return rc;
    }

    arch_svm->vmcb_pa = nv->nv_n1vmcx_pa = virt_to_maddr(arch_svm->vmcb);
    return 0;
}
Beispiel #2
0
void
nestedhvm_vcpu_reset(struct vcpu *v)
{
    struct nestedvcpu *nv = &vcpu_nestedhvm(v);

    nv->nv_vmentry_pending = 0;
    nv->nv_vmexit_pending = 0;
    nv->nv_vmswitch_in_progress = 0;
    nv->nv_ioport80 = 0;
    nv->nv_ioportED = 0;

    if (nv->nv_vvmcx)
        hvm_unmap_guest_frame(nv->nv_vvmcx);
    nv->nv_vvmcx = NULL;
    nv->nv_vvmcxaddr = VMCX_EADDR;
    nv->nv_flushp2m = 0;
    nv->nv_p2m = NULL;

    hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);

    if ( hvm_funcs.nhvm_vcpu_reset )
        hvm_funcs.nhvm_vcpu_reset(v);

    /* vcpu is in host mode */
    nestedhvm_vcpu_exit_guestmode(v);
}
Beispiel #3
0
static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack)
{
    u32 ctrl;

    /* If blocked by L1's tpr, then nothing to do. */
    if ( nestedhvm_vcpu_in_guestmode(v) &&
         hvm_interrupt_blocked(v, intack) == hvm_intblk_tpr )
        return 1;

    if ( nvmx_intr_blocked(v) != hvm_intblk_none )
    {
        enable_intr_window(v, intack);
        return 1;
    }

    if ( nestedhvm_vcpu_in_guestmode(v) )
    {
        if ( intack.source == hvm_intsrc_pic ||
                 intack.source == hvm_intsrc_lapic )
        {
            ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL);
            if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) )
                return 0;

            vmx_inject_extint(intack.vector, intack.source);

            ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS);
            if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT )
            {
                /* for now, duplicate the ack path in vmx_intr_assist */
                hvm_vcpu_ack_pending_irq(v, intack);
                pt_intr_post(v, intack);

                intack = hvm_vcpu_has_pending_irq(v);
                if ( unlikely(intack.source != hvm_intsrc_none) )
                    enable_intr_window(v, intack);
            }
            else
                enable_intr_window(v, intack);

            return 1;
        }
    }

    return 0;
}
Beispiel #4
0
static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
    vintr_t intr;

    ASSERT(intack.source != hvm_intsrc_none);

    if ( nestedhvm_enabled(v->domain) ) {
        struct nestedvcpu *nv = &vcpu_nestedhvm(v);
        if ( nv->nv_vmentry_pending ) {
            struct vmcb_struct *gvmcb = nv->nv_vvmcx;

            /* check if l1 guest injects interrupt into l2 guest via vintr.
             * return here or l2 guest looses interrupts, otherwise.
             */
            ASSERT(gvmcb != NULL);
            intr = vmcb_get_vintr(gvmcb);
            if ( intr.fields.irq )
                return;
        }
    }

    HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);

    /*
     * Create a dummy virtual interrupt to intercept as soon as the
     * guest can accept the real interrupt.
     *
     * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
     * shadow. This is hard to do without hardware support. Also we should
     * not be waiting for EFLAGS.IF to become 1.
     */

    /*
     * NMI-blocking window is handled by IRET interception. We should not
     * inject a VINTR in this case as VINTR is unaware of NMI-blocking and
     * hence we can enter an endless loop (VINTR intercept fires, yet
     * hvm_interrupt_blocked() still indicates NMI-blocking is active, so
     * we inject a VINTR, ...).
     */
    if ( (intack.source == hvm_intsrc_nmi) &&
         (general1_intercepts & GENERAL1_INTERCEPT_IRET) )
        return;

    intr = vmcb_get_vintr(vmcb);
    intr.fields.irq     = 1;
    intr.fields.vector  = 0;
    intr.fields.prio    = intack.vector >> 4;
    intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
    vmcb_set_vintr(vmcb, intr);
    vmcb_set_general1_intercepts(
        vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
}
Beispiel #5
0
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v)
{
    int r = hvm_intblk_none;
    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);

    if ( nestedhvm_vcpu_in_guestmode(v) )
    {
        if ( nvcpu->nv_vmexit_pending ||
             nvcpu->nv_vmswitch_in_progress ||
             (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
            r = hvm_intblk_rflags_ie;
    }
    else if ( nvcpu->nv_vmentry_pending )
        r = hvm_intblk_rflags_ie;

    return r;
}
Beispiel #6
0
void svm_destroy_vmcb(struct vcpu *v)
{
    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
    struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;

    if ( nv->nv_n1vmcx != NULL )
        free_vmcb(nv->nv_n1vmcx);

    if ( arch_svm->msrpm != NULL )
    {
        free_xenheap_pages(
            arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE));
        arch_svm->msrpm = NULL;
    }

    nv->nv_n1vmcx = NULL;
    nv->nv_n1vmcx_pa = VMCX_EADDR;
    arch_svm->vmcb = NULL;
}
Beispiel #7
0
static void
nestedhvm_flushtlb_ipi(void *info)
{
    struct vcpu *v = current;
    struct domain *d = info;

    ASSERT(d != NULL);
    if (v->domain != d) {
        /* This cpu doesn't belong to the domain */
        return;
    }

    /* Just flush the ASID (or request a new one).
     * This is cheaper than flush_tlb_local() and has
     * the same desired effect.
     */
    hvm_asid_flush_core();
    vcpu_nestedhvm(v).nv_p2m = NULL;
}
Beispiel #8
0
/* Nested VCPU */
bool_t
nestedhvm_vcpu_in_guestmode(struct vcpu *v)
{
    return vcpu_nestedhvm(v).nv_guestmode;
}
Beispiel #9
0
void hvm_asid_flush_vcpu(struct vcpu *v)
{
    hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
    hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
}