Ejemplo n.º 1
0
static void 
unbind_from_irq(int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);
	int cpu;

	mtx_lock_spin(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			cpu = cpu_from_evtchn(evtchn);
			pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			cpu = cpu_from_evtchn(evtchn);
			pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;
	}

	mtx_unlock_spin(&irq_mapping_update_lock);
}
Ejemplo n.º 2
0
static void unbind_from_irq(unsigned int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);

	spin_lock(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
			BUG();

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;
	}

	spin_unlock(&irq_mapping_update_lock);
}
Ejemplo n.º 3
0
static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		rebind_evtchn_to_cpu(evtchn, tcpu);
}
Ejemplo n.º 4
0
static void disable_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		mask_evtchn(evtchn);
}
Ejemplo n.º 5
0
static void end_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
		unmask_evtchn(evtchn);
}
Ejemplo n.º 6
0
static unsigned int startup_pirq(unsigned int irq)
{
	struct evtchn_bind_pirq bind_pirq;
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		goto out;

	bind_pirq.pirq  = irq;
	/* NB. We are happy to share unless we are probing. */
	bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
		if (!probing_irq(irq))
			printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
			       irq);
		return 0;
	}
	evtchn = bind_pirq.port;

	pirq_query_unmask(irq_to_pirq(irq));

	evtchn_to_irq[evtchn] = irq;
	bind_evtchn_to_cpu(evtchn, 0);
	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);

 out:
	unmask_evtchn(evtchn);
	pirq_unmask_notify(irq_to_pirq(irq));

	return 0;
}
Ejemplo n.º 7
0
void notify_remote_via_irq(int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		notify_remote_via_evtchn(evtchn);
}
Ejemplo n.º 8
0
static unsigned int startup_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		unmask_evtchn(evtchn);
	return 0;
}
Ejemplo n.º 9
0
void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{
	int evtchn = evtchn_from_irq(i);
	shared_info_t *s = HYPERVISOR_shared_info;
	if (!VALID_EVTCHN(evtchn))
		return;
	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
	synch_set_bit(evtchn, &s->evtchn_pending[0]);
}
Ejemplo n.º 10
0
static void end_pirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
		unmask_evtchn(evtchn);
		pirq_unmask_notify(irq_to_pirq(irq));
	}
}
Ejemplo n.º 11
0
static void enable_pirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn)) {
		unmask_evtchn(evtchn);
		pirq_unmask_notify(irq_to_pirq(irq));
	}
}
Ejemplo n.º 12
0
static int retrigger(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);
	shared_info_t *s = HYPERVISOR_shared_info;
	if (!VALID_EVTCHN(evtchn))
		return 1;
	synch_set_bit(evtchn, &s->evtchn_pending[0]);
	return 1;
}
Ejemplo n.º 13
0
static void ack_dynirq(unsigned int irq)
{
	int evtchn = evtchn_from_irq(irq);

	move_native_irq(irq);

	if (VALID_EVTCHN(evtchn)) {
		mask_evtchn(evtchn);
		clear_evtchn(evtchn);
	}
}
Ejemplo n.º 14
0
int resend_irq_on_evtchn(unsigned int irq)
{
	int masked, evtchn = evtchn_from_irq(irq);
	shared_info_t *s = HYPERVISOR_shared_info;

	if (!VALID_EVTCHN(evtchn))
		return 1;

	masked = test_and_set_evtchn_mask(evtchn);
	synch_set_bit(evtchn, s->evtchn_pending);
	if (!masked)
		unmask_evtchn(evtchn);

	return 1;
}
Ejemplo n.º 15
0
static void shutdown_pirq(unsigned int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);

	if (!VALID_EVTCHN(evtchn))
		return;

	mask_evtchn(evtchn);

	close.port = evtchn;
	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
		BUG();

	bind_evtchn_to_cpu(evtchn, 0);
	evtchn_to_irq[evtchn] = -1;
	irq_info[irq] = IRQ_UNBOUND;
}
Ejemplo n.º 16
0
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
	struct evtchn_bind_vcpu bind_vcpu;
	int evtchn = evtchn_from_irq(irq);

	if (!VALID_EVTCHN(evtchn))
		return;

	/* Send future instances of this interrupt to other vcpu. */
	bind_vcpu.port = evtchn;
	bind_vcpu.vcpu = tcpu;

	/*
	 * If this fails, it usually just indicates that we're dealing with a 
	 * virq or IPI channel, which don't actually need to be rebound. Ignore
	 * it, but don't do the xenlinux-level rebind in that case.
	 */
	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
		bind_evtchn_to_cpu(evtchn, tcpu);
}
Ejemplo n.º 17
0
static void unbind_from_irq(unsigned int irq)
{
	struct evtchn_close close;
	unsigned int cpu;
	int evtchn = evtchn_from_irq(irq);

	spin_lock(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
		    HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
			BUG();

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;

		/* Zap stats across IRQ changes of use. */
		for_each_possible_cpu(cpu)
			kstat_cpu(cpu).irqs[irq] = 0;
	}

	spin_unlock(&irq_mapping_update_lock);
}
Ejemplo n.º 18
0
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
	unsigned long       l1, l2;
	unsigned long       masked_l1, masked_l2;
	unsigned int        l1i, l2i, start_l1i, start_l2i, port, count, i;
	int                 irq;
	unsigned int        cpu = smp_processor_id();
	shared_info_t      *s = HYPERVISOR_shared_info;
	vcpu_info_t        *vcpu_info = &s->vcpu_info[cpu];

	exit_idle();
	irq_enter();

	do {
		/* Avoid a callback storm when we reenable delivery. */
		vcpu_info->evtchn_upcall_pending = 0;

		/* Nested invocations bail immediately. */
		if (unlikely(per_cpu(upcall_count, cpu)++))
			break;

#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
		/* Clear master flag /before/ clearing selector flag. */
		wmb();
#endif

		/*
		 * Handle timer interrupts before all others, so that all
		 * hardirq handlers see an up-to-date system time even if we
		 * have just woken from a long idle period.
		 */
		if ((irq = __get_cpu_var(virq_to_irq)[VIRQ_TIMER]) != -1) {
			port = evtchn_from_irq(irq);
			l1i = port / BITS_PER_LONG;
			l2i = port % BITS_PER_LONG;
			if (active_evtchns(cpu, s, l1i) & (1ul<<l2i))
				do_IRQ(irq, regs);
		}

		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);

		start_l1i = l1i = per_cpu(current_l1i, cpu);
		start_l2i = per_cpu(current_l2i, cpu);

		for (i = 0; l1 != 0; i++) {
			masked_l1 = l1 & ((~0UL) << l1i);
			/* If we masked out all events, wrap to beginning. */
			if (masked_l1 == 0) {
				l1i = l2i = 0;
				continue;
			}
			l1i = __ffs(masked_l1);

			l2 = active_evtchns(cpu, s, l1i);
			l2i = 0; /* usually scan entire word from start */
			if (l1i == start_l1i) {
				/* We scan the starting word in two parts. */
				if (i == 0)
					/* 1st time: start in the middle */
					l2i = start_l2i;
				else
					/* 2nd time: mask bits done already */
					l2 &= (1ul << start_l2i) - 1;
			}

			do {
				masked_l2 = l2 & ((~0UL) << l2i);
				if (masked_l2 == 0)
					break;
				l2i = __ffs(masked_l2);

				/* process port */
				port = (l1i * BITS_PER_LONG) + l2i;
				if ((irq = evtchn_to_irq[port]) != -1)
					do_IRQ(irq, regs);
				else
					evtchn_device_upcall(port);

				l2i = (l2i + 1) % BITS_PER_LONG;

				/* Next caller starts at last processed + 1 */
				per_cpu(current_l1i, cpu) =
					l2i ? l1i : (l1i + 1) % BITS_PER_LONG;
				per_cpu(current_l2i, cpu) = l2i;

			} while (l2i != 0);

			/* Scan start_l1i twice; all others once. */
			if ((l1i != start_l1i) || (i != 0))
				l1 &= ~(1UL << l1i);

			l1i = (l1i + 1) % BITS_PER_LONG;
		}

		/* If there were nested callbacks then we have more to do. */
		count = per_cpu(upcall_count, cpu);
		per_cpu(upcall_count, cpu) = 0;
	} while (unlikely(count != 1));

	irq_exit();
}
Ejemplo n.º 19
0
{
	intr_remove_handler(xp->xp_pins[irq].xp_cookie);
	unbind_from_irq(irq);
}

#if 0
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void
rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
	evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
	int evtchn;

	mtx_lock_spin(&irq_mapping_update_lock);

	evtchn = evtchn_from_irq(irq);
	if (!VALID_EVTCHN(evtchn)) {
		mtx_unlock_spin(&irq_mapping_update_lock);
		return;
	}

	/* Send future instances of this interrupt to other vcpu. */
	bind_vcpu.port = evtchn;
	bind_vcpu.vcpu = tcpu;

	/*
	 * If this fails, it usually just indicates that we're dealing with a 
	 * virq or IPI channel, which don't actually need to be rebound. Ignore
	 * it, but don't do the xenlinux-level rebind in that case.
	 */
	if (HYPERVISOR_event_channel_op(&op) >= 0)