Пример #1
0
void do_hypervisor_callback(struct pt_regs *regs) {
	unsigned long l1, l2, l1i, l2i;
	unsigned int port;
	int cpu = 0;
	shared_info_t *s = g_sharedInfoArea;
	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];

	in_callback = 1;

	vcpu_info->evtchn_upcall_pending = 0;
	/* NB x86. No need for a barrier here -- XCHG is a barrier on x86. */
#if !defined(__i386__) && !defined(__x86_64__)
	/* Clear master flag /before/ clearing selector flag. */
	wmb();
#endif
	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
	while (l1 != 0) {
		l1i = __ffs(l1);
		l1 &= ~(1UL << l1i);

		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
			l2i = __ffs(l2);
			l2 &= ~(1UL << l2i);

			port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
			do_event(port, regs);
		}
	}

	in_callback = 0;
}
Пример #2
0
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
	unsigned long  l1, l2;
	unsigned int   l1i, l2i, port;
	int            irq, cpu = smp_processor_id();
	shared_info_t *s = HYPERVISOR_shared_info;
	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];

	vcpu_info->evtchn_upcall_pending = 0;

#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
	/* Clear master pending flag /before/ clearing selector flag. */
	rmb();
#endif
	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
	while (l1 != 0) {
		l1i = __ffs(l1);
		l1 &= ~(1UL << l1i);

		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
			l2i = __ffs(l2);

			port = (l1i * BITS_PER_LONG) + l2i;
			if ((irq = evtchn_to_irq[port]) != -1)
				do_IRQ(irq, regs);
			else {
				exit_idle();
				evtchn_device_upcall(port);
			}
		}
	}
}
Пример #3
0
void 
evtchn_do_upcall(struct trapframe *frame) 
{
	unsigned long  l1, l2;
	unsigned int   l1i, l2i, port;
	int            irq, cpu;
	shared_info_t *s;
	vcpu_info_t   *vcpu_info;
	
	cpu = PCPU_GET(cpuid);
	s = HYPERVISOR_shared_info;
	vcpu_info = &s->vcpu_info[cpu];

	vcpu_info->evtchn_upcall_pending = 0;

	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
	l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);

	while (l1 != 0) {
		l1i = __ffs(l1);
		l1 &= ~(1 << l1i);
		
		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
			l2i = __ffs(l2);

			port = (l1i * LONG_BIT) + l2i;
			if ((irq = evtchn_to_irq[port]) != -1) {
				struct intsrc *isrc = intr_lookup_source(irq);
				/* 
				 * ack 
				 */
				mask_evtchn(port);
				clear_evtchn(port); 

				intr_execute_handlers(isrc, frame);
			} else {
				evtchn_device_upcall(port);
			}
		}
	}
}
Пример #4
0
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
	unsigned long       l1, l2;
	unsigned long       masked_l1, masked_l2;
	unsigned int        l1i, l2i, start_l1i, start_l2i, port, count, i;
	int                 irq;
	unsigned int        cpu = smp_processor_id();
	shared_info_t      *s = HYPERVISOR_shared_info;
	vcpu_info_t        *vcpu_info = &s->vcpu_info[cpu];

	exit_idle();
	irq_enter();

	do {
		/* Avoid a callback storm when we reenable delivery. */
		vcpu_info->evtchn_upcall_pending = 0;

		/* Nested invocations bail immediately. */
		if (unlikely(per_cpu(upcall_count, cpu)++))
			break;

#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
		/* Clear master flag /before/ clearing selector flag. */
		wmb();
#endif

		/*
		 * Handle timer interrupts before all others, so that all
		 * hardirq handlers see an up-to-date system time even if we
		 * have just woken from a long idle period.
		 */
		if ((irq = __get_cpu_var(virq_to_irq)[VIRQ_TIMER]) != -1) {
			port = evtchn_from_irq(irq);
			l1i = port / BITS_PER_LONG;
			l2i = port % BITS_PER_LONG;
			if (active_evtchns(cpu, s, l1i) & (1ul<<l2i))
				do_IRQ(irq, regs);
		}

		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);

		start_l1i = l1i = per_cpu(current_l1i, cpu);
		start_l2i = per_cpu(current_l2i, cpu);

		for (i = 0; l1 != 0; i++) {
			masked_l1 = l1 & ((~0UL) << l1i);
			/* If we masked out all events, wrap to beginning. */
			if (masked_l1 == 0) {
				l1i = l2i = 0;
				continue;
			}
			l1i = __ffs(masked_l1);

			l2 = active_evtchns(cpu, s, l1i);
			l2i = 0; /* usually scan entire word from start */
			if (l1i == start_l1i) {
				/* We scan the starting word in two parts. */
				if (i == 0)
					/* 1st time: start in the middle */
					l2i = start_l2i;
				else
					/* 2nd time: mask bits done already */
					l2 &= (1ul << start_l2i) - 1;
			}

			do {
				masked_l2 = l2 & ((~0UL) << l2i);
				if (masked_l2 == 0)
					break;
				l2i = __ffs(masked_l2);

				/* process port */
				port = (l1i * BITS_PER_LONG) + l2i;
				if ((irq = evtchn_to_irq[port]) != -1)
					do_IRQ(irq, regs);
				else
					evtchn_device_upcall(port);

				l2i = (l2i + 1) % BITS_PER_LONG;

				/* Next caller starts at last processed + 1 */
				per_cpu(current_l1i, cpu) =
					l2i ? l1i : (l1i + 1) % BITS_PER_LONG;
				per_cpu(current_l2i, cpu) = l2i;

			} while (l2i != 0);

			/* Scan start_l1i twice; all others once. */
			if ((l1i != start_l1i) || (i != 0))
				l1 &= ~(1UL << l1i);

			l1i = (l1i + 1) % BITS_PER_LONG;
		}

		/* If there were nested callbacks then we have more to do. */
		count = per_cpu(upcall_count, cpu);
		per_cpu(upcall_count, cpu) = 0;
	} while (unlikely(count != 1));

	irq_exit();
}