Esempio n. 1
0
/* Modify the p2m type of a range of gfns from ot to nt.
 * Resets the access permissions. */
void p2m_change_type_range(struct domain *d, 
                           unsigned long start, unsigned long end,
                           p2m_type_t ot, p2m_type_t nt)
{
    p2m_access_t a;
    p2m_type_t pt;
    unsigned long gfn;
    mfn_t mfn;
    struct p2m_domain *p2m = p2m_get_hostp2m(d);

    BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));

    p2m_lock(p2m);
    p2m->defer_nested_flush = 1;

    for ( gfn = start; gfn < end; gfn++ )
    {
        mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
        if ( pt == ot )
            set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
    }

    p2m->defer_nested_flush = 0;
    if ( nestedhvm_enabled(d) )
        p2m_flush_nestedp2m(d);
    p2m_unlock(p2m);
}
Esempio n. 2
0
File: intr.c Progetto: 0day-ci/xen
static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
    vintr_t intr;

    ASSERT(intack.source != hvm_intsrc_none);

    if ( nestedhvm_enabled(v->domain) ) {
        struct nestedvcpu *nv = &vcpu_nestedhvm(v);
        if ( nv->nv_vmentry_pending ) {
            struct vmcb_struct *gvmcb = nv->nv_vvmcx;

            /* check if l1 guest injects interrupt into l2 guest via vintr.
             * return here or l2 guest looses interrupts, otherwise.
             */
            ASSERT(gvmcb != NULL);
            intr = vmcb_get_vintr(gvmcb);
            if ( intr.fields.irq )
                return;
        }
    }

    HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);

    /*
     * Create a dummy virtual interrupt to intercept as soon as the
     * guest can accept the real interrupt.
     *
     * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
     * shadow. This is hard to do without hardware support. Also we should
     * not be waiting for EFLAGS.IF to become 1.
     */

    /*
     * NMI-blocking window is handled by IRET interception. We should not
     * inject a VINTR in this case as VINTR is unaware of NMI-blocking and
     * hence we can enter an endless loop (VINTR intercept fires, yet
     * hvm_interrupt_blocked() still indicates NMI-blocking is active, so
     * we inject a VINTR, ...).
     */
    if ( (intack.source == hvm_intsrc_nmi) &&
         (general1_intercepts & GENERAL1_INTERCEPT_IRET) )
        return;

    intr = vmcb_get_vintr(vmcb);
    intr.fields.irq     = 1;
    intr.fields.vector  = 0;
    intr.fields.prio    = intack.vector >> 4;
    intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
    vmcb_set_vintr(vmcb, intr);
    vmcb_set_general1_intercepts(
        vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
}
Esempio n. 3
0
bool_t
nestedhvm_is_n2(struct vcpu *v)
{
    if (!nestedhvm_enabled(v->domain)
      || nestedhvm_vmswitch_in_progress(v)
      || !nestedhvm_paging_mode_hap(v))
        return 0;

    if (nestedhvm_vcpu_in_guestmode(v))
        return 1;

    return 0;
}
Esempio n. 4
0
void
nestedhvm_vcpu_destroy(struct vcpu *v)
{
    if ( nestedhvm_enabled(v->domain) && hvm_funcs.nhvm_vcpu_destroy )
        hvm_funcs.nhvm_vcpu_destroy(v);
}
Esempio n. 5
0
File: intr.c Progetto: 0day-ci/xen
void svm_intr_assist(void) 
{
    struct vcpu *v = current;
    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    struct hvm_intack intack;
    enum hvm_intblk intblk;

    /* Crank the handle on interrupt state. */
    pt_update_irq(v);

    do {
        intack = hvm_vcpu_has_pending_irq(v);
        if ( likely(intack.source == hvm_intsrc_none) )
            return;

        intblk = hvm_interrupt_blocked(v, intack);
        if ( intblk == hvm_intblk_svm_gif ) {
            ASSERT(nestedhvm_enabled(v->domain));
            return;
        }

        /* Interrupts for the nested guest are already
         * in the vmcb.
         */
        if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
        {
            int rc;

            /* l2 guest was running when an interrupt for
             * the l1 guest occured.
             */
            rc = nestedsvm_vcpu_interrupt(v, intack);
            switch (rc) {
            case NSVM_INTR_NOTINTERCEPTED:
                /* Inject interrupt into 2nd level guest directly. */
                break;	
            case NSVM_INTR_NOTHANDLED:
            case NSVM_INTR_FORCEVMEXIT:
                return;
            case NSVM_INTR_MASKED:
                /* Guest already enabled an interrupt window. */
                return;
            default:
                panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x",
                    __func__, rc);
            }
        }

        /*
         * Pending IRQs must be delayed if:
         * 1. An event is already pending. This is despite the fact that SVM
         *    provides a VINTR delivery method quite separate from the EVENTINJ
         *    mechanism. The event delivery can arbitrarily delay the injection
         *    of the vintr (for example, if the exception is handled via an
         *    interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
         *    - the vTPR could be modified upwards, so we need to wait until
         *      the exception is delivered before we can safely decide that an
         *      interrupt is deliverable; and
         *    - the guest might look at the APIC/PIC state, so we ought not to
         *      have cleared the interrupt out of the IRR.
         * 2. The IRQ is masked.
         */
        if ( unlikely(vmcb->eventinj.fields.v) || intblk )
        {
            svm_enable_intr_window(v, intack);
            return;
        }

        intack = hvm_vcpu_ack_pending_irq(v, intack);
    } while ( intack.source == hvm_intsrc_none );

    if ( intack.source == hvm_intsrc_nmi )
    {
        svm_inject_nmi(v);
    }
    else
    {
        HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
        svm_inject_extint(v, intack.vector);
        pt_intr_post(v, intack);
    }

    /* Is there another IRQ to queue up behind this one? */
    intack = hvm_vcpu_has_pending_irq(v);
    if ( unlikely(intack.source != hvm_intsrc_none) )
        svm_enable_intr_window(v, intack);
}