Example #1
0
static void vtd_flush_dmar_caches(void *reg_base, u64 ctx_scope,
				  u64 iotlb_scope)
{
	void *iotlb_reg_base;

	mmio_write64(reg_base + VTD_CCMD_REG, ctx_scope | VTD_CCMD_ICC);
	while (mmio_read64(reg_base + VTD_CCMD_REG) & VTD_CCMD_ICC)
		cpu_relax();

	iotlb_reg_base = vtd_iotlb_reg_base(reg_base);
	mmio_write64(iotlb_reg_base + VTD_IOTLB_REG,
		     iotlb_scope | VTD_IOTLB_DW | VTD_IOTLB_DR |
		     VTD_IOTLB_IVT);
	while (mmio_read64(iotlb_reg_base + VTD_IOTLB_REG) & VTD_IOTLB_IVT)
		cpu_relax();
}
Example #2
0
int vtd_cell_init(struct cell *cell)
{
	struct jailhouse_cell_desc *config = cell->config;
	const struct jailhouse_memory *mem =
		jailhouse_cell_mem_regions(config);
	const struct jailhouse_pci_device *dev =
		jailhouse_cell_pci_devices(cell->config);
	void *reg_base = dmar_reg_base;
	int n, err;

	// HACK for QEMU
	if (dmar_units == 0)
		return 0;

	if (cell->id >= dmar_num_did)
		return -ERANGE;

	cell->vtd.pg_structs.root_paging = vtd_paging;
	cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1);
	if (!cell->vtd.pg_structs.root_table)
		return -ENOMEM;

	for (n = 0; n < config->num_memory_regions; n++, mem++) {
		err = vtd_map_memory_region(cell, mem);
		if (err)
			/* FIXME: release vtd.pg_structs.root_table */
			return err;
	}

	for (n = 0; n < config->num_pci_devices; n++)
		if (!vtd_add_device_to_cell(cell, &dev[n]))
			/* FIXME: release vtd.pg_structs.root_table,
			 * revert device additions*/
			return -ENOMEM;

	if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
		for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
			mmio_write64(reg_base + VTD_RTADDR_REG,
				     page_map_hvirt2phys(root_entry_table));
			mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
			while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
				 VTD_GSTS_SRTP))
				cpu_relax();

			vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
					      VTD_IOTLB_IIRG_GLOBAL);

			mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
			while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
				 VTD_GSTS_TES))
				cpu_relax();
		}

	return 0;
}
Example #3
0
void mmio_perform_access(void *base, struct mmio_access *mmio)
{
	void *addr = base + mmio->address;

	if (mmio->is_write)
		switch (mmio->size) {
		case 1:
			mmio_write8(addr, mmio->value);
			break;
		case 2:
			mmio_write16(addr, mmio->value);
			break;
		case 4:
			mmio_write32(addr, mmio->value);
			break;
#if BITS_PER_LONG == 64
		case 8:
			mmio_write64(addr, mmio->value);
			break;
#endif
		}
	else
		switch (mmio->size) {
		case 1:
			mmio->value = mmio_read8(addr);
			break;
		case 2:
			mmio->value = mmio_read16(addr);
			break;
		case 4:
			mmio->value = mmio_read32(addr);
			break;
#if BITS_PER_LONG == 64
		case 8:
			mmio->value = mmio_read64(addr);
			break;
#endif
		}
}
Example #4
0
static struct apic_irq_message
pci_translate_msi_vector(struct pci_device *device, unsigned int vector,
			 unsigned int legacy_vectors, union x86_msi_vector msi)
{
	struct apic_irq_message irq_msg = { .valid = 0 };
	unsigned int idx;

	if (iommu_cell_emulates_ir(device->cell)) {
		if (!msi.remap.remapped)
			return irq_msg;

		idx = msi.remap.int_index | (msi.remap.int_index15 << 15);
		if (msi.remap.shv)
			idx += msi.remap.subhandle;
		return iommu_get_remapped_root_int(device->info->iommu,
						   device->info->bdf,
						   vector, idx);
	}

	irq_msg.vector = msi.native.vector;
	if (legacy_vectors > 1) {
		irq_msg.vector &= ~(legacy_vectors - 1);
		irq_msg.vector |= vector;
	}
	irq_msg.delivery_mode = msi.native.delivery_mode;
	irq_msg.level_triggered = 0;
	irq_msg.dest_logical = msi.native.dest_logical;
	irq_msg.redir_hint = msi.native.redir_hint;
	irq_msg.valid = 1;
	irq_msg.destination = msi.native.destination;

	return irq_msg;
}

void arch_pci_suppress_msi(struct pci_device *device,
			   const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	union x86_msi_vector msi = {
		.native.dest_logical = 1,
		.native.redir_hint = 1,
		.native.address = MSI_ADDRESS_VALUE,
	};

	if (!(pci_read_config(info->bdf, PCI_CFG_COMMAND, 2) & PCI_CMD_MASTER))
		return;

	/*
	 * Disable delivery by setting no destination CPU bit in logical
	 * addressing mode.
	 */
	if (info->msi_64bits)
		pci_write_config(info->bdf, cap->start + 8, 0, 4);
	pci_write_config(info->bdf, cap->start + 4, (u32)msi.raw.address, 4);

	/*
	 * Inject MSI vectors to avoid losing events while suppressed.
	 * Linux can handle rare spurious interrupts.
	 */
	msi = pci_get_x86_msi_vector(device);
	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		apic_send_irq(irq_msg);
	}
}

static u32 pci_get_x86_msi_remap_address(unsigned int index)
{
	union x86_msi_vector msi = {
		.remap.int_index15 = index >> 15,
		.remap.shv = 1,
		.remap.remapped = 1,
		.remap.int_index = index,
		.remap.address = MSI_ADDRESS_VALUE,
	};

	return (u32)msi.raw.address;
}

int arch_pci_update_msi(struct pci_device *device,
			const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	union x86_msi_vector msi = pci_get_x86_msi_vector(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	u16 bdf = info->bdf;
	int result = 0;

	if (vectors == 0)
		return 0;

	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		result = iommu_map_interrupt(device->cell, bdf, n, irq_msg);
		// HACK for QEMU
		if (result == -ENOSYS) {
			for (n = 1; n < (info->msi_64bits ? 4 : 3); n++)
				pci_write_config(bdf, cap->start + n * 4,
					device->msi_registers.raw[n], 4);
			return 0;
		}
		if (result < 0)
			return result;
	}

	/* set result to the base index again */
	result -= vectors - 1;

	pci_write_config(bdf, cap->start + (info->msi_64bits ? 12 : 8), 0, 2);

	if (info->msi_64bits)
		pci_write_config(bdf, cap->start + 8, 0, 4);
	pci_write_config(bdf, cap->start + 4,
			 pci_get_x86_msi_remap_address(result), 4);

	return 0;
}

int arch_pci_update_msix_vector(struct pci_device *device, unsigned int index)
{
	union x86_msi_vector msi = {
		.raw.address = device->msix_vectors[index].field.address,
		.raw.data = device->msix_vectors[index].field.data,
	};
	struct apic_irq_message irq_msg;
	int result;

	if (!device->msix_registers.field.enable)
		return 0;

	irq_msg = pci_translate_msi_vector(device, index, 0, msi);
	result = iommu_map_interrupt(device->cell, device->info->bdf, index,
				   irq_msg);
	// HACK for QEMU
	if (result == -ENOSYS) {
		mmio_write64(&device->msix_table[index].field.address,
			     device->msix_vectors[index].field.address);
		mmio_write32(&device->msix_table[index].field.data,
			     device->msix_vectors[index].field.data);
		return 0;
	}
	if (result < 0)
		return result;

	mmio_write64(&device->msix_table[index].field.address,
		     pci_get_x86_msi_remap_address(result));
	mmio_write32(&device->msix_table[index].field.data, 0);

	return 0;
}