int vtd_cell_init(struct cell *cell) { struct jailhouse_cell_desc *config = cell->config; const struct jailhouse_memory *mem = jailhouse_cell_mem_regions(config); const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(cell->config); void *reg_base = dmar_reg_base; int n, err; // HACK for QEMU if (dmar_units == 0) return 0; if (cell->id >= dmar_num_did) return -ERANGE; cell->vtd.pg_structs.root_paging = vtd_paging; cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1); if (!cell->vtd.pg_structs.root_table) return -ENOMEM; for (n = 0; n < config->num_memory_regions; n++, mem++) { err = vtd_map_memory_region(cell, mem); if (err) /* FIXME: release vtd.pg_structs.root_table */ return err; } for (n = 0; n < config->num_pci_devices; n++) if (!vtd_add_device_to_cell(cell, &dev[n])) /* FIXME: release vtd.pg_structs.root_table, * revert device additions*/ return -ENOMEM; if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) { mmio_write64(reg_base + VTD_RTADDR_REG, page_map_hvirt2phys(root_entry_table)); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_SRTP)) cpu_relax(); vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL, VTD_IOTLB_IIRG_GLOBAL); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) cpu_relax(); } return 0; }
static bool vtd_return_device_to_linux(const struct jailhouse_pci_device *dev) { const struct jailhouse_pci_device *linux_dev = jailhouse_cell_pci_devices(linux_cell.config); unsigned int n; for (n = 0; n < linux_cell.config->num_pci_devices; n++) if (linux_dev[n].domain == dev->domain && linux_dev[n].bus == dev->bus && linux_dev[n].devfn == dev->devfn) return vtd_add_device_to_cell(&linux_cell, &linux_dev[n]); return true; }