Example #1
0
File: realmode.c Project: CPFL/xen
static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
    struct vcpu *curr = current;
    int rc;

    perfc_incr(realmode_emulations);

    rc = hvm_emulate_one(hvmemul_ctxt);

    if ( rc == X86EMUL_UNHANDLEABLE )
    {
        gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
        goto fail;
    }

    if ( rc == X86EMUL_EXCEPTION )
    {
        if ( !hvmemul_ctxt->exn_pending )
        {
            unsigned long intr_info;

            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
            __vmwrite(VM_ENTRY_INTR_INFO, 0);
            if ( !(intr_info & INTR_INFO_VALID_MASK) )
            {
                gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
                goto fail;
            }
            hvmemul_ctxt->trap.vector = (uint8_t)intr_info;
            hvmemul_ctxt->trap.insn_len = 0;
        }

        if ( unlikely(curr->domain->debugger_attached) &&
             ((hvmemul_ctxt->trap.vector == TRAP_debug) ||
              (hvmemul_ctxt->trap.vector == TRAP_int3)) )
        {
            domain_pause_for_debugger();
        }
        else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
        {
            gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
                     hvmemul_ctxt->trap.vector);
            goto fail;
        }
        else
        {
            realmode_deliver_exception(
                hvmemul_ctxt->trap.vector,
                hvmemul_ctxt->trap.insn_len,
                hvmemul_ctxt);
        }
    }

    return;

 fail:
    hvm_dump_emulation_state(XENLOG_G_ERR "Real-mode", hvmemul_ctxt);
    domain_crash(curr->domain);
}
Example #2
0
File: intr.c Project: doniexun/xen
static void vmx_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
    u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;

    ASSERT(intack.source != hvm_intsrc_none);

    if ( unlikely(tb_init_done) )
    {
        unsigned long intr;

        __vmread(VM_ENTRY_INTR_INFO, &intr);
        HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                    (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
    }

    if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
    {
        /*
         * We set MOV-SS blocking in lieu of STI blocking when delivering an
         * NMI. This is because it is processor-specific whether STI-blocking
         * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
         * (otherwise vmentry will fail on processors that check for STI-
         * blocking) but if the processor does not check for STI-blocking then
         * we may immediately vmexit and hance make no progress!
         * (see SDM 3B 21.3, "Other Causes of VM Exits").
         */
        unsigned long intr_shadow;

        __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
        if ( intr_shadow & VMX_INTR_SHADOW_STI )
        {
            /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
            intr_shadow &= ~VMX_INTR_SHADOW_STI;
            intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
            __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
        }
        ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
    }

    if ( !(v->arch.hvm_vmx.exec_control & ctl) )
    {
        v->arch.hvm_vmx.exec_control |= ctl;
        vmx_update_cpu_exec_control(v);
    }
}
Example #3
0
void vmx_do_resume(struct vcpu *v)
{
    bool_t debug_state;

    if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
    {
        if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
            vmx_load_vmcs(v);
    }
    else
    {
        /*
         * For pass-through domain, guest PCI-E device driver may leverage the
         * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
         * Since migration may occur before WBINVD or CLFLUSH, we need to
         * maintain data consistency either by:
         *  1: flushing cache (wbinvd) when the guest is scheduled out if
         *     there is no wbinvd exit, or
         *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
         */
        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
             !cpu_has_wbinvd_exiting )
        {
            int cpu = v->arch.hvm_vmx.active_cpu;
            if ( cpu != -1 )
                on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
        }

        vmx_clear_vmcs(v);
        vmx_load_vmcs(v);
        hvm_migrate_timers(v);
        vmx_set_host_env(v);
    }

    debug_state = v->domain->debugger_attached;
    if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
    {
        unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
        unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
        v->arch.hvm_vcpu.debug_state_latch = debug_state;
        if ( debug_state )
            intercepts |= mask;
        else
            intercepts &= ~mask;
        __vmwrite(EXCEPTION_BITMAP, intercepts);
    }

    hvm_do_resume(v);
    reset_stack_and_jump(vmx_asm_do_vmentry);
}
Example #4
0
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v)
{
    int r = hvm_intblk_none;
    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);

    if ( nestedhvm_vcpu_in_guestmode(v) )
    {
        if ( nvcpu->nv_vmexit_pending ||
             nvcpu->nv_vmswitch_in_progress ||
             (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
            r = hvm_intblk_rflags_ie;
    }
    else if ( nvcpu->nv_vmentry_pending )
        r = hvm_intblk_rflags_ie;

    return r;
}
Example #5
0
void vmx_intr_assist(void)
{
    struct hvm_intack intack;
    struct vcpu *v = current;
    unsigned int tpr_threshold = 0;
    enum hvm_intblk intblk;

    /* Block event injection when single step with MTF. */
    if ( unlikely(v->arch.hvm_vcpu.single_step) )
    {
        v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
        vmx_update_cpu_exec_control(v);
        return;
    }

    /* Crank the handle on interrupt state. */
    pt_update_irq(v);

    do {
        intack = hvm_vcpu_has_pending_irq(v);
        if ( likely(intack.source == hvm_intsrc_none) )
            goto out;

        if ( unlikely(nvmx_intr_intercept(v, intack)) )
            goto out;

        intblk = hvm_interrupt_blocked(v, intack);
        if ( intblk == hvm_intblk_tpr )
        {
            ASSERT(vlapic_enabled(vcpu_vlapic(v)));
            ASSERT(intack.source == hvm_intsrc_lapic);
            tpr_threshold = intack.vector >> 4;
            goto out;
        }

        if ( (intblk != hvm_intblk_none) ||
             (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
        {
            enable_intr_window(v, intack);
            goto out;
        }

        intack = hvm_vcpu_ack_pending_irq(v, intack);
    } while ( intack.source == hvm_intsrc_none );
Example #6
0
void vm_resume_fail(unsigned long eflags)
{
    unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
    printk("<vm_resume_fail> error code %lx\n", error);
    domain_crash_synchronous();
}
Example #7
0
File: intr.c Project: doniexun/xen
void vmx_intr_assist(void)
{
    struct hvm_intack intack;
    struct vcpu *v = current;
    unsigned int tpr_threshold = 0;
    enum hvm_intblk intblk;
    int pt_vector = -1;

    /* Block event injection when single step with MTF. */
    if ( unlikely(v->arch.hvm_vcpu.single_step) )
    {
        v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
        vmx_update_cpu_exec_control(v);
        return;
    }

    /* Crank the handle on interrupt state. */
    if ( is_hvm_vcpu(v) )
        pt_vector = pt_update_irq(v);

    do {
        unsigned long intr_info;

        intack = hvm_vcpu_has_pending_irq(v);
        if ( likely(intack.source == hvm_intsrc_none) )
            goto out;

        if ( unlikely(nvmx_intr_intercept(v, intack)) )
            goto out;

        intblk = hvm_interrupt_blocked(v, intack);
        if ( cpu_has_vmx_virtual_intr_delivery )
        {
            /* Set "Interrupt-window exiting" for ExtINT and NMI. */
            if ( (intblk != hvm_intblk_none) &&
                 (intack.source == hvm_intsrc_pic ||
                  intack.source == hvm_intsrc_vector ||
                  intack.source == hvm_intsrc_nmi) )
            {
                vmx_enable_intr_window(v, intack);
                goto out;
            }

            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
            if ( intr_info & INTR_INFO_VALID_MASK )
            {
                if ( (intack.source == hvm_intsrc_pic) ||
                     (intack.source == hvm_intsrc_nmi) ||
                     (intack.source == hvm_intsrc_mce) )
                    vmx_enable_intr_window(v, intack);

                goto out;
            }
        } else if ( intblk == hvm_intblk_tpr )
        {
            ASSERT(vlapic_enabled(vcpu_vlapic(v)));
            ASSERT(intack.source == hvm_intsrc_lapic);
            tpr_threshold = intack.vector >> 4;
            goto out;
        }
        else if ( intblk != hvm_intblk_none )
        {
            vmx_enable_intr_window(v, intack);
            goto out;
        }
        else
        {
            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
            if ( intr_info & INTR_INFO_VALID_MASK )
            {
                vmx_enable_intr_window(v, intack);
                goto out;
            }
        }

        intack = hvm_vcpu_ack_pending_irq(v, intack);
    } while ( intack.source == hvm_intsrc_none );
Example #8
0
void vmx_realmode(struct cpu_user_regs *regs)
{
    struct vcpu *curr = current;
    struct hvm_emulate_ctxt hvmemul_ctxt;
    struct segment_register *sreg;
    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
    unsigned long intr_info;
    unsigned int emulations = 0;

    /* Get-and-clear VM_ENTRY_INTR_INFO. */
    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
    if ( intr_info & INTR_INFO_VALID_MASK )
        __vmwrite(VM_ENTRY_INTR_INFO, 0);

    hvm_emulate_prepare(&hvmemul_ctxt, regs);

    if ( vio->io_state == HVMIO_completed )
        realmode_emulate_one(&hvmemul_ctxt);

    /* Only deliver interrupts into emulated real mode. */
    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
         (intr_info & INTR_INFO_VALID_MASK) )
    {
        realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
        intr_info = 0;
    }

    curr->arch.hvm_vmx.vmx_emulate = 1;
    while ( curr->arch.hvm_vmx.vmx_emulate &&
            !softirq_pending(smp_processor_id()) &&
            (vio->io_state == HVMIO_none) )
    {
        /*
         * Check for pending interrupts only every 16 instructions, because
         * hvm_local_events_need_delivery() is moderately expensive, and only
         * in real mode, because we don't emulate protected-mode IDT vectoring.
         */
        if ( unlikely(!(++emulations & 15)) &&
             curr->arch.hvm_vmx.vmx_realmode && 
             hvm_local_events_need_delivery(curr) )
            break;

        realmode_emulate_one(&hvmemul_ctxt);

        /* Stop emulating unless our segment state is not safe */
        if ( curr->arch.hvm_vmx.vmx_realmode )
            curr->arch.hvm_vmx.vmx_emulate = 
                (curr->arch.hvm_vmx.vm86_segment_mask != 0);
        else
            curr->arch.hvm_vmx.vmx_emulate = 
                 ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
                  || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
    }

    /* Need to emulate next time if we've started an IO operation */
    if ( vio->io_state != HVMIO_none )
        curr->arch.hvm_vmx.vmx_emulate = 1;

    if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
    {
        /*
         * Cannot enter protected mode with bogus selector RPLs and DPLs.
         * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
         * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
         */
        sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        hvmemul_ctxt.seg_reg_dirty |=
            (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
            (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
    }

    hvm_emulate_writeback(&hvmemul_ctxt);

    /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
    if ( intr_info & INTR_INFO_VALID_MASK )
        __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
}
Example #9
0
static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
    struct vcpu *curr = current;
    uint32_t intr_info;
    int rc;

    perfc_incr(realmode_emulations);

    rc = hvm_emulate_one(hvmemul_ctxt);

    if ( rc == X86EMUL_UNHANDLEABLE )
    {
        gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
        goto fail;
    }

    if ( rc == X86EMUL_EXCEPTION )
    {
        if ( !hvmemul_ctxt->exn_pending )
        {
            intr_info = __vmread(VM_ENTRY_INTR_INFO);
            __vmwrite(VM_ENTRY_INTR_INFO, 0);
            if ( !(intr_info & INTR_INFO_VALID_MASK) )
            {
                gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
                goto fail;
            }
            hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
            hvmemul_ctxt->exn_insn_len = 0;
        }

        if ( unlikely(curr->domain->debugger_attached) &&
             ((hvmemul_ctxt->exn_vector == TRAP_debug) ||
              (hvmemul_ctxt->exn_vector == TRAP_int3)) )
        {
            domain_pause_for_debugger();
        }
        else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
        {
            gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
                     hvmemul_ctxt->exn_vector);
            goto fail;
        }
        else
        {
            realmode_deliver_exception(
                hvmemul_ctxt->exn_vector,
                hvmemul_ctxt->exn_insn_len,
                hvmemul_ctxt);
        }
    }

    return;

 fail:
    gdprintk(XENLOG_ERR,
             "Real-mode emulation failed @ %04x:%08lx: "
             "%02x %02x %02x %02x %02x %02x\n",
             hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
             hvmemul_ctxt->insn_buf_eip,
             hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
             hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
             hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
    domain_crash(curr->domain);
}