Example #1
0
static void ioapic_mask_cell_pins(struct cell_ioapic *ioapic,
				  enum ioapic_handover handover)
{
	struct phys_ioapic *phys_ioapic = ioapic->phys_ioapic;
	struct apic_irq_message irq_msg;
	union ioapic_redir_entry entry;
	unsigned int pin, reg;

	for (pin = 0; pin < IOAPIC_NUM_PINS; pin++) {
		if (!(ioapic->pin_bitmap & (1UL << pin)))
			continue;

		reg = IOAPIC_REDIR_TBL_START + pin * 2;

		entry.raw[0] = ioapic_reg_read(phys_ioapic, reg);
		if (entry.remap.mask)
			continue;

		ioapic_reg_write(phys_ioapic, reg, IOAPIC_REDIR_MASK);

		if (handover == PINS_MASKED) {
			phys_ioapic->shadow_redir_table[pin].native.mask = 1;
		} else if (!entry.native.level_triggered) {
			/*
			 * Inject edge-triggered interrupts to avoid losing
			 * events while masked. Linux can handle rare spurious
			 * interrupts.
			 */
			entry = phys_ioapic->shadow_redir_table[pin];
			irq_msg = ioapic_translate_redir_entry(ioapic, pin,
							       entry);
			if (irq_msg.valid)
				apic_send_irq(irq_msg);
		}
	}
}
Example #2
0
static struct apic_irq_message
pci_translate_msi_vector(struct pci_device *device, unsigned int vector,
			 unsigned int legacy_vectors, union x86_msi_vector msi)
{
	struct apic_irq_message irq_msg = { .valid = 0 };
	unsigned int idx;

	if (iommu_cell_emulates_ir(device->cell)) {
		if (!msi.remap.remapped)
			return irq_msg;

		idx = msi.remap.int_index | (msi.remap.int_index15 << 15);
		if (msi.remap.shv)
			idx += msi.remap.subhandle;
		return iommu_get_remapped_root_int(device->info->iommu,
						   device->info->bdf,
						   vector, idx);
	}

	irq_msg.vector = msi.native.vector;
	if (legacy_vectors > 1) {
		irq_msg.vector &= ~(legacy_vectors - 1);
		irq_msg.vector |= vector;
	}
	irq_msg.delivery_mode = msi.native.delivery_mode;
	irq_msg.level_triggered = 0;
	irq_msg.dest_logical = msi.native.dest_logical;
	irq_msg.redir_hint = msi.native.redir_hint;
	irq_msg.valid = 1;
	irq_msg.destination = msi.native.destination;

	return irq_msg;
}

void arch_pci_suppress_msi(struct pci_device *device,
			   const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	union x86_msi_vector msi = {
		.native.dest_logical = 1,
		.native.redir_hint = 1,
		.native.address = MSI_ADDRESS_VALUE,
	};

	if (!(pci_read_config(info->bdf, PCI_CFG_COMMAND, 2) & PCI_CMD_MASTER))
		return;

	/*
	 * Disable delivery by setting no destination CPU bit in logical
	 * addressing mode.
	 */
	if (info->msi_64bits)
		pci_write_config(info->bdf, cap->start + 8, 0, 4);
	pci_write_config(info->bdf, cap->start + 4, (u32)msi.raw.address, 4);

	/*
	 * Inject MSI vectors to avoid losing events while suppressed.
	 * Linux can handle rare spurious interrupts.
	 */
	msi = pci_get_x86_msi_vector(device);
	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		apic_send_irq(irq_msg);
	}
}

static u32 pci_get_x86_msi_remap_address(unsigned int index)
{
	union x86_msi_vector msi = {
		.remap.int_index15 = index >> 15,
		.remap.shv = 1,
		.remap.remapped = 1,
		.remap.int_index = index,
		.remap.address = MSI_ADDRESS_VALUE,
	};

	return (u32)msi.raw.address;
}

int arch_pci_update_msi(struct pci_device *device,
			const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	union x86_msi_vector msi = pci_get_x86_msi_vector(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	u16 bdf = info->bdf;
	int result = 0;

	if (vectors == 0)
		return 0;

	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		result = iommu_map_interrupt(device->cell, bdf, n, irq_msg);
		// HACK for QEMU
		if (result == -ENOSYS) {
			for (n = 1; n < (info->msi_64bits ? 4 : 3); n++)
				pci_write_config(bdf, cap->start + n * 4,
					device->msi_registers.raw[n], 4);
			return 0;
		}
		if (result < 0)
			return result;
	}

	/* set result to the base index again */
	result -= vectors - 1;

	pci_write_config(bdf, cap->start + (info->msi_64bits ? 12 : 8), 0, 2);

	if (info->msi_64bits)
		pci_write_config(bdf, cap->start + 8, 0, 4);
	pci_write_config(bdf, cap->start + 4,
			 pci_get_x86_msi_remap_address(result), 4);

	return 0;
}

int arch_pci_update_msix_vector(struct pci_device *device, unsigned int index)
{
	union x86_msi_vector msi = {
		.raw.address = device->msix_vectors[index].field.address,
		.raw.data = device->msix_vectors[index].field.data,
	};
	struct apic_irq_message irq_msg;
	int result;

	if (!device->msix_registers.field.enable)
		return 0;

	irq_msg = pci_translate_msi_vector(device, index, 0, msi);
	result = iommu_map_interrupt(device->cell, device->info->bdf, index,
				   irq_msg);
	// HACK for QEMU
	if (result == -ENOSYS) {
		mmio_write64(&device->msix_table[index].field.address,
			     device->msix_vectors[index].field.address);
		mmio_write32(&device->msix_table[index].field.data,
			     device->msix_vectors[index].field.data);
		return 0;
	}
	if (result < 0)
		return result;

	mmio_write64(&device->msix_table[index].field.address,
		     pci_get_x86_msi_remap_address(result));
	mmio_write32(&device->msix_table[index].field.data, 0);

	return 0;
}