コード例 #1
0
static void 
unbind_from_irq(int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);
	int cpu;

	mtx_lock_spin(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			cpu = cpu_from_evtchn(evtchn);
			pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			cpu = cpu_from_evtchn(evtchn);
			pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;
	}

	mtx_unlock_spin(&irq_mapping_update_lock);
}
コード例 #2
0
static void unbind_from_irq(unsigned int irq)
{
	struct evtchn_close close;
	int evtchn = evtchn_from_irq(irq);

	spin_lock(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
			BUG();

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;
	}

	spin_unlock(&irq_mapping_update_lock);
}
コード例 #3
0
ファイル: evtchn.c プロジェクト: zhoupeng/spice4xen
static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
{
	struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };

	if (pirq_eoi_does_unmask) {
		if (test_bit(eoi.irq, pirq_needs_eoi))
			VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
		else
			unmask_evtchn(evtchn);
	} else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
		if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
			struct evtchn_unmask unmask = { .port = evtchn };
			struct multicall_entry mcl[2];

			mcl[0].op = __HYPERVISOR_event_channel_op;
			mcl[0].args[0] = EVTCHNOP_unmask;
			mcl[0].args[1] = (unsigned long)&unmask;
			mcl[1].op = __HYPERVISOR_physdev_op;
			mcl[1].args[0] = PHYSDEVOP_eoi;
			mcl[1].args[1] = (unsigned long)&eoi;

			if (HYPERVISOR_multicall(mcl, 2))
				BUG();
		} else {
			unmask_evtchn(evtchn);
			VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
		}
	} else
コード例 #4
0
void unmask_evtchn(int port)
{
	shared_info_t *s = HYPERVISOR_shared_info;
	unsigned int cpu = smp_processor_id();
	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];

	BUG_ON(!irqs_disabled());

	/* Slow path (hypercall) if this is a non-local port. */
	if (unlikely(cpu != cpu_from_evtchn(port))) {
		struct evtchn_unmask unmask = { .port = port };
		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
		return;
	}

	synch_clear_bit(port, &s->evtchn_mask[0]);

	/*
	 * The following is basically the equivalent of 'hw_resend_irq'. Just
	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
	 * masked.
	 */
	if (synch_test_bit(port, &s->evtchn_pending[0]) &&
	    !synch_test_and_set_bit(port / BITS_PER_LONG,
				    &vcpu_info->evtchn_pending_sel)) {
		vcpu_info->evtchn_upcall_pending = 1;
		if (!vcpu_info->evtchn_upcall_mask)
			force_evtchn_callback();
	}
}
コード例 #5
0
ファイル: evtchn.c プロジェクト: zhoupeng/spice4xen
static void unbind_from_irq(unsigned int irq)
{
	struct evtchn_close close;
	unsigned int cpu;
	int evtchn = evtchn_from_irq(irq);

	spin_lock(&irq_mapping_update_lock);

	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
		close.port = evtchn;
		if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
		    HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
			BUG();

		switch (type_from_irq(irq)) {
		case IRQT_VIRQ:
			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		case IRQT_IPI:
			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
				[index_from_irq(irq)] = -1;
			break;
		default:
			break;
		}

		/* Closed ports are implicitly re-bound to VCPU0. */
		bind_evtchn_to_cpu(evtchn, 0);

		evtchn_to_irq[evtchn] = -1;
		irq_info[irq] = IRQ_UNBOUND;

		/* Zap stats across IRQ changes of use. */
		for_each_possible_cpu(cpu)
			kstat_cpu(cpu).irqs[irq] = 0;
	}

	spin_unlock(&irq_mapping_update_lock);
}