static bool vtd_add_device_to_cell(struct cell *cell, const struct jailhouse_pci_device *device) { u64 root_entry_lo = root_entry_table[device->bus].lo_word; struct vtd_entry *context_entry_table, *context_entry; printk("Adding PCI device %02x:%02x.%x to cell \"%s\"\n", device->bus, device->devfn >> 3, device->devfn & 7, cell->config->name); if (root_entry_lo & VTD_ROOT_PRESENT) { context_entry_table = page_map_phys2hvirt(root_entry_lo & PAGE_MASK); } else { context_entry_table = page_alloc(&mem_pool, 1); if (!context_entry_table) return false; root_entry_table[device->bus].lo_word = VTD_ROOT_PRESENT | page_map_hvirt2phys(context_entry_table); flush_cache(&root_entry_table[device->bus].lo_word, sizeof(u64)); } context_entry = &context_entry_table[device->devfn]; context_entry->lo_word = VTD_CTX_PRESENT | VTD_CTX_TTYPE_MLP_UNTRANS | page_map_hvirt2phys(cell->vtd.pg_structs.root_table); context_entry->hi_word = (dmar_pt_levels == 3 ? VTD_CTX_AGAW_39 : VTD_CTX_AGAW_48) | (cell->id << VTD_CTX_DID_SHIFT); flush_cache(context_entry, sizeof(*context_entry)); return true; }
static void init_early(unsigned int cpu_id) { struct jailhouse_memory hv_page; unsigned long core_percpu_size; master_cpu_id = cpu_id; arch_dbg_write_init(); printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n", JAILHOUSE_VERSION, cpu_id); printk("Code location: %p\n", __text_start); error = paging_init(); if (error) return; root_cell.config = &system_config->root_cell; error = check_mem_regions(&system_config->root_cell); if (error) return; root_cell.id = -1; error = cell_init(&root_cell); if (error) return; error = arch_init_early(); if (error) return; /* * Back the region of the hypervisor core and per-CPU page with empty * pages for Linux. This allows to fault-in the hypervisor region into * Linux' page table before shutdown without triggering violations. */ hv_page.phys_start = page_map_hvirt2phys(empty_page); hv_page.virt_start = page_map_hvirt2phys(&hypervisor_header); hv_page.size = PAGE_SIZE; hv_page.flags = JAILHOUSE_MEM_READ; core_percpu_size = PAGE_ALIGN(hypervisor_header.core_size) + hypervisor_header.possible_cpus * sizeof(struct per_cpu); while (core_percpu_size > 0) { error = arch_map_memory_region(&root_cell, &hv_page); if (error) return; core_percpu_size -= PAGE_SIZE; hv_page.virt_start += PAGE_SIZE; } page_map_dump_stats("after early setup"); printk("Initializing processors:\n"); }
int vtd_cell_init(struct cell *cell) { struct jailhouse_cell_desc *config = cell->config; const struct jailhouse_memory *mem = jailhouse_cell_mem_regions(config); const struct jailhouse_pci_device *dev = jailhouse_cell_pci_devices(cell->config); void *reg_base = dmar_reg_base; int n, err; // HACK for QEMU if (dmar_units == 0) return 0; if (cell->id >= dmar_num_did) return -ERANGE; cell->vtd.pg_structs.root_paging = vtd_paging; cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1); if (!cell->vtd.pg_structs.root_table) return -ENOMEM; for (n = 0; n < config->num_memory_regions; n++, mem++) { err = vtd_map_memory_region(cell, mem); if (err) /* FIXME: release vtd.pg_structs.root_table */ return err; } for (n = 0; n < config->num_pci_devices; n++) if (!vtd_add_device_to_cell(cell, &dev[n])) /* FIXME: release vtd.pg_structs.root_table, * revert device additions*/ return -ENOMEM; if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) { mmio_write64(reg_base + VTD_RTADDR_REG, page_map_hvirt2phys(root_entry_table)); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_SRTP)) cpu_relax(); vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL, VTD_IOTLB_IIRG_GLOBAL); mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE); while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)) cpu_relax(); } return 0; }