Beispiel #1
0
static void xen_restore_fl(unsigned long flags)
{
	struct vcpu_info *vcpu;

	/* convert from IF type flag */
	flags = !(flags & X86_EFLAGS_IF);

	/* There's a one instruction preempt window here.  We need to
	   make sure we're don't switch CPUs between getting the vcpu
	   pointer and updating the mask. */
	preempt_disable();
	vcpu = this_cpu_read(xen_vcpu);
	vcpu->evtchn_upcall_mask = flags;
	preempt_enable_no_resched();

	/* Doesn't matter if we get preempted here, because any
	   pending event will get dealt with anyway. */

	if (flags == 0) {
		preempt_check_resched();
		barrier(); /* unmask then check (avoid races) */
		if (unlikely(vcpu->evtchn_upcall_pending))
			xen_force_evtchn_callback();
	}
}
Beispiel #2
0
static void xen_irq_enable(void)
{
	struct vcpu_info *vcpu;

	/* We don't need to worry about being preempted here, since
	   either a) interrupts are disabled, so no preemption, or b)
	   the caller is confused and is trying to re-enable interrupts
	   on an indeterminate processor. */

	vcpu = this_cpu_read(xen_vcpu);
	vcpu->evtchn_upcall_mask = 0;

	/* Doesn't matter if we get preempted here, because any
	   pending event will get dealt with anyway. */

	barrier(); /* unmask then check (avoid races) */
	if (unlikely(vcpu->evtchn_upcall_pending))
		xen_force_evtchn_callback();
}
Beispiel #3
0
__visible void xen_restore_fl(unsigned long flags)
{
    struct vcpu_info *vcpu;

    /* convert from IF type flag */
    flags = !(flags & X86_EFLAGS_IF);

    /* See xen_irq_enable() for why preemption must be disabled. */
    preempt_disable();
    vcpu = this_cpu_read(xen_vcpu);
    vcpu->evtchn_upcall_mask = flags;

    if (flags == 0) {
        barrier(); /* unmask then check (avoid races) */
        if (unlikely(vcpu->evtchn_upcall_pending))
            xen_force_evtchn_callback();
        preempt_enable();
    } else
        preempt_enable_no_resched();
}
Beispiel #4
0
asmlinkage __visible void xen_irq_enable(void)
{
    struct vcpu_info *vcpu;

    /*
     * We may be preempted as soon as vcpu->evtchn_upcall_mask is
     * cleared, so disable preemption to ensure we check for
     * events on the VCPU we are still running on.
     */
    preempt_disable();

    vcpu = this_cpu_read(xen_vcpu);
    vcpu->evtchn_upcall_mask = 0;

    /* Doesn't matter if we get preempted here, because any
       pending event will get dealt with anyway. */

    barrier(); /* unmask then check (avoid races) */
    if (unlikely(vcpu->evtchn_upcall_pending))
        xen_force_evtchn_callback();

    preempt_enable();
}