int vtd_cell_init(struct cell *cell) { struct jailhouse_cell_desc *config = cell->config; const struct jailhouse_memory *mem = jailhouse_cell_mem_regions(config); const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(cell->config); void *reg_base = dmar_reg_base; int n, err; // HACK for QEMU if (dmar_units == 0) return 0; if (cell->id >= dmar_num_did) return -ERANGE; cell->vtd.pg_structs.root_paging = vtd_paging; cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1); if (!cell->vtd.pg_structs.root_table) return -ENOMEM; for (n = 0; n < config->num_memory_regions; n++, mem++) { err = vtd_map_memory_region(cell, mem); if (err) /* FIXME: release vtd.pg_structs.root_table */ return err; } for (n = 0; n < config->num_pci_devices; n++) if (!vtd_add_device_to_cell(cell, &dev[n])) /* FIXME: release vtd.pg_structs.root_table, * revert device additions*/ return -ENOMEM; if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) { mmio_write64(reg_base + VTD_RTADDR_REG, page_map_hvirt2phys(root_entry_table)); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_SRTP)) cpu_relax(); vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL, VTD_IOTLB_IIRG_GLOBAL); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) cpu_relax(); } return 0; }
void vtd_root_cell_shrink(struct jailhouse_cell_desc *config) { const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(config); unsigned int n; vtd_init_fault_nmi(); for (n = 0; n < config->num_pci_devices; n++) vtd_remove_device_from_cell(&root_cell, &dev[n]); vtd_flush_domain_caches(root_cell.id); }
static bool vtd_return_device_to_linux(const struct jailhouse_pci_device *dev) { const struct jailhouse_pci_device *linux_dev = jailhouse_cell_pci_devices(linux_cell.config); unsigned int n; for (n = 0; n < linux_cell.config->num_pci_devices; n++) if (linux_dev[n].domain == dev->domain && linux_dev[n].bus == dev->bus && linux_dev[n].devfn == dev->devfn) return vtd_add_device_to_cell(&linux_cell, &linux_dev[n]); return true; }
/** * Look up device owned by a cell. * @param[in] cell Owning cell. * @param bdf 16-bit bus/device/function ID. * * @return Pointer to owned PCI device or NULL. */ struct pci_device *pci_get_assigned_device(const struct cell *cell, u16 bdf) { const struct jailhouse_pci_device *dev_info = jailhouse_cell_pci_devices(cell->config); u32 n; /* We iterate over the static device information to increase cache * locality. */ for (n = 0; n < cell->config->num_pci_devices; n++) if (dev_info[n].bdf == bdf) return cell->pci_devices[n].cell ? &cell->pci_devices[n] : NULL; return NULL; }
void vtd_cell_exit(struct cell *cell) { const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(cell->config); unsigned int n; for (n = 0; n < cell->config->num_pci_devices; n++) { vtd_remove_device_from_cell(cell, &dev[n]); if (!vtd_return_device_to_root_cell(&dev[n])) printk("WARNING: Failed to re-assign PCI device to " "root cell\n"); } vtd_flush_domain_caches(cell->id); vtd_flush_domain_caches(root_cell.id); page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1); }
void vtd_linux_cell_shrink(struct jailhouse_cell_desc *config) { const struct jailhouse_memory *mem = jailhouse_cell_mem_regions(config); const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(config); unsigned int n; for (n = 0; n < config->num_memory_regions; n++, mem++) if (mem->access_flags & JAILHOUSE_MEM_DMA) page_map_destroy(linux_cell.vtd.page_table, mem->phys_start, mem->size, dmar_pt_levels, PAGE_MAP_COHERENT); for (n = 0; n < config->num_pci_devices; n++) vtd_remove_device_from_cell(&linux_cell, &dev[n]); vtd_flush_domain_caches(linux_cell.id); }