예제 #1
0
int evtchn_unmask(unsigned int port)
{
    struct domain *d = current->domain;
    struct vcpu   *v;

    ASSERT(spin_is_locked(&d->event_lock));

    if ( unlikely(!port_is_valid(d, port)) )
        return -EINVAL;

    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];

    /*
     * These operations must happen in strict order. Based on
     * include/xen/event.h:evtchn_set_pending(). 
     */
    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
         test_bit          (port, &shared_info(d, evtchn_pending)) &&
         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
                            &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }

    return 0;
}
예제 #2
0
파일: event_2l.c 프로젝트: fdario/xen
static void evtchn_2l_print_state(struct domain *d,
                                  const struct evtchn *evtchn)
{
    struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];

    printk("%d", !!test_bit(evtchn->port / BITS_PER_EVTCHN_WORD(d),
                            &vcpu_info(v, evtchn_pending_sel)));
}
예제 #3
0
파일: irq.c 프로젝트: jonludlam/xen-arm
void hvm_maybe_deassert_evtchn_irq(void)
{
    struct domain *d = current->domain;
    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;

    if ( hvm_irq->callback_via_asserted &&
            !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
        hvm_set_callback_irq_level(d->vcpu[0]);
}
예제 #4
0
파일: iret.c 프로젝트: fdario/xen
unsigned long do_iret(void)
{
    struct cpu_user_regs *regs = guest_cpu_user_regs();
    struct iret_context iret_saved;
    struct vcpu *v = current;

    if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
                                 sizeof(iret_saved))) )
    {
        gprintk(XENLOG_ERR,
                "Fault while reading IRET context from guest stack\n");
        goto exit_and_crash;
    }

    /* Returning to user mode? */
    if ( (iret_saved.cs & 3) == 3 )
    {
        if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
        {
            gprintk(XENLOG_ERR,
                    "Guest switching to user mode with no user page tables\n");
            goto exit_and_crash;
        }
        toggle_guest_mode(v);
    }

    if ( VM_ASSIST(v->domain, architectural_iopl) )
        v->arch.pv_vcpu.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;

    regs->rip    = iret_saved.rip;
    regs->cs     = iret_saved.cs | 3; /* force guest privilege */
    regs->rflags = ((iret_saved.rflags & ~(X86_EFLAGS_IOPL|X86_EFLAGS_VM))
                    | X86_EFLAGS_IF);
    regs->rsp    = iret_saved.rsp;
    regs->ss     = iret_saved.ss | 3; /* force guest privilege */

    if ( !(iret_saved.flags & VGCF_in_syscall) )
    {
        regs->entry_vector &= ~TRAP_syscall;
        regs->r11 = iret_saved.r11;
        regs->rcx = iret_saved.rcx;
    }

    /* Restore upcall mask from supplied EFLAGS.IF. */
    vcpu_info(v, evtchn_upcall_mask) = !(iret_saved.rflags & X86_EFLAGS_IF);

    async_exception_cleanup(v);

    /* Saved %rax gets written back to regs->rax in entry.S. */
    return iret_saved.rax;

 exit_and_crash:
    domain_crash(v->domain);
    return 0;
}
예제 #5
0
파일: irq.c 프로젝트: jonludlam/xen-arm
static void hvm_set_callback_irq_level(struct vcpu *v)
{
    struct domain *d = v->domain;
    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    unsigned int gsi, pdev, pintx, asserted;

    ASSERT(v->vcpu_id == 0);

    spin_lock(&d->arch.hvm_domain.irq_lock);

    /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
    asserted = !!vcpu_info(v, evtchn_upcall_pending);
    if ( hvm_irq->callback_via_asserted == asserted )
        goto out;
    hvm_irq->callback_via_asserted = asserted;

    /* Callback status has changed. Update the callback via. */
    switch ( hvm_irq->callback_via_type )
    {
    case HVMIRQ_callback_gsi:
        gsi = hvm_irq->callback_via.gsi;
        if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
        {
            vioapic_irq_positive_edge(d, gsi);
            if ( gsi <= 15 )
                vpic_irq_positive_edge(d, gsi);
        }
        else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
        {
            if ( gsi <= 15 )
                vpic_irq_negative_edge(d, gsi);
        }
        break;
    case HVMIRQ_callback_pci_intx:
        pdev  = hvm_irq->callback_via.pci.dev;
        pintx = hvm_irq->callback_via.pci.intx;
        if ( asserted )
            __hvm_pci_intx_assert(d, pdev, pintx);
        else
            __hvm_pci_intx_deassert(d, pdev, pintx);
    default:
        break;
    }

out:
    spin_unlock(&d->arch.hvm_domain.irq_lock);
}
예제 #6
0
파일: event_2l.c 프로젝트: fdario/xen
static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn)
{
    struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
    unsigned int port = evtchn->port;

    /*
     * These operations must happen in strict order. Based on
     * evtchn_2l_set_pending() above.
     */
    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
         test_bit          (port, &shared_info(d, evtchn_pending)) &&
         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
                            &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
}
static int evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    int vcpuid;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return 1;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
        return 0;

    /* Wake any interested (or potentially interested) pollers. */
    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
          vcpuid < d->max_vcpus;
          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
    {
        v = d->vcpu[vcpuid];
        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
             test_and_clear_bit(vcpuid, d->poll_mask) )
        {
            v->poll_evtchn = 0;
            vcpu_unblock(v);
        }
    }

    return 0;
}
예제 #8
0
파일: event_2l.c 프로젝트: fdario/xen
static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn)
{
    struct domain *d = v->domain;
    unsigned int port = evtchn->port;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }

    evtchn_check_pollers(d, port);
}
예제 #9
0
파일: iret.c 프로젝트: fdario/xen
unsigned int compat_iret(void)
{
    struct cpu_user_regs *regs = guest_cpu_user_regs();
    struct vcpu *v = current;
    u32 eflags;

    /* Trim stack pointer to 32 bits. */
    regs->rsp = (u32)regs->rsp;

    /* Restore EAX (clobbered by hypercall). */
    if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
    {
        domain_crash(v->domain);
        return 0;
    }

    /* Restore CS and EIP. */
    if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
        unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
    {
        domain_crash(v->domain);
        return 0;
    }

    /*
     * Fix up and restore EFLAGS. We fix up in a local staging area
     * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
     */
    if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
    {
        domain_crash(v->domain);
        return 0;
    }

    if ( VM_ASSIST(v->domain, architectural_iopl) )
        v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;

    regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;

    if ( unlikely(eflags & X86_EFLAGS_VM) )
    {
        /*
         * Cannot return to VM86 mode: inject a GP fault instead. Note that
         * the GP fault is reported on the first VM86 mode instruction, not on
         * the IRET (which is why we can simply leave the stack frame as-is
         * (except for perhaps having to copy it), which in turn seems better
         * than teaching create_bounce_frame() to needlessly deal with vm86
         * mode frames).
         */
        const struct trap_info *ti;
        u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
        unsigned int i;
        int rc = 0;

        gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
                 regs->esp, ksp);
        if ( ksp < regs->esp )
        {
            for (i = 1; i < 10; ++i)
            {
                rc |= __get_user(x, (u32 *)regs->rsp + i);
                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
            }
        }
        else if ( ksp > regs->esp )
        {
            for ( i = 9; i > 0; --i )
            {
                rc |= __get_user(x, (u32 *)regs->rsp + i);
                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
            }
        }
        if ( rc )
        {
            domain_crash(v->domain);
            return 0;
        }
        regs->esp = ksp;
        regs->ss = v->arch.pv_vcpu.kernel_ss;

        ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
        if ( TI_GET_IF(ti) )
            eflags &= ~X86_EFLAGS_IF;
        regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
                          X86_EFLAGS_NT|X86_EFLAGS_TF);
        if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
        {
            domain_crash(v->domain);
            return 0;
        }
        regs->eip = ti->address;
        regs->cs = ti->cs;
    }
    else if ( unlikely(ring_0(regs)) )
    {
        domain_crash(v->domain);
        return 0;
    }
    else if ( ring_1(regs) )
        regs->esp += 16;
    /* Return to ring 2/3: restore ESP and SS. */
    else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
              __get_user(regs->esp, (u32 *)regs->rsp + 4) )
    {
        domain_crash(v->domain);
        return 0;
    }

    /* Restore upcall mask from supplied EFLAGS.IF. */
    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);

    async_exception_cleanup(v);

    /*
     * The hypercall exit path will overwrite EAX with this return
     * value.
     */
    return regs->eax;
}