void arch_cell_destroy(struct cell *cell) { ioapic_cell_exit(cell); pci_cell_exit(cell); iommu_cell_exit(cell); vcpu_cell_exit(cell); }
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell) { ioapic_cell_exit(cell); pci_cell_exit(cell); vtd_cell_exit(cell); vmx_cell_exit(cell); }
int ioapic_cell_init(struct cell *cell) { const struct jailhouse_irqchip *irqchip = jailhouse_cell_irqchips(cell->config); struct cell_ioapic *ioapic, *root_ioapic; struct phys_ioapic *phys_ioapic; unsigned int n; if (cell->config->num_irqchips == 0) return 0; if (cell->config->num_irqchips > IOAPIC_MAX_CHIPS) return trace_error(-ERANGE); cell->arch.ioapics = page_alloc(&mem_pool, 1); if (!cell->arch.ioapics) return -ENOMEM; for (n = 0; n < cell->config->num_irqchips; n++, irqchip++) { phys_ioapic = ioapic_get_or_add_phys(irqchip); if (!phys_ioapic) { ioapic_cell_exit(cell); return -ENOMEM; } ioapic = &cell->arch.ioapics[n]; ioapic->info = irqchip; ioapic->cell = cell; ioapic->phys_ioapic = phys_ioapic; ioapic->pin_bitmap = (u32)irqchip->pin_bitmap; cell->arch.num_ioapics++; mmio_region_register(cell, irqchip->address, PAGE_SIZE, ioapic_access_handler, ioapic); if (cell != &root_cell) { root_ioapic = ioapic_find_by_address(&root_cell, irqchip->address); if (root_ioapic) { root_ioapic->pin_bitmap &= ~ioapic->pin_bitmap; ioapic_mask_cell_pins(ioapic, PINS_MASKED); } } } return 0; }
int arch_cell_create(struct cell *cell) { unsigned int cpu; int err; err = vcpu_cell_init(cell); if (err) return err; err = iommu_cell_init(cell); if (err) goto error_vm_exit; err = pci_cell_init(cell); if (err) goto error_iommu_exit; err = ioapic_cell_init(cell); if (err) goto error_pci_exit; err = cat_cell_init(cell); if (err) goto error_ioapic_exit; cell->comm_page.comm_region.pm_timer_address = system_config->platform_info.x86.pm_timer_address; cell->comm_page.comm_region.num_cpus = 0; for_each_cpu(cpu, cell->cpu_set) cell->comm_page.comm_region.num_cpus++; return 0; error_ioapic_exit: ioapic_cell_exit(cell); error_pci_exit: pci_cell_exit(cell); error_iommu_exit: iommu_cell_exit(cell); error_vm_exit: vcpu_cell_exit(cell); return err; }