void ics_pic_print_info(ICSState *ics, Monitor *mon) { uint32_t i; monitor_printf(mon, "ICS %4x..%4x %p\n", ics->offset, ics->offset + ics->nr_irqs - 1, ics); if (!ics->irqs) { return; } if (kvm_irqchip_in_kernel()) { ics_synchronize_state(ics); } for (i = 0; i < ics->nr_irqs; i++) { ICSIRQState *irq = ics->irqs + i; if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) { continue; } monitor_printf(mon, " %4x %s %02x %02x\n", ics->offset + i, (irq->flags & XICS_FLAGS_IRQ_LSI) ? "LSI" : "MSI", irq->priority, irq->status); } }
static void pic_reset(void *opaque) { PicState *s = opaque; s->last_irr = 0; s->irr = 0; s->imr = 0; s->isr = 0; s->priority_add = 0; s->irq_base = 0; s->read_reg_select = 0; s->poll = 0; s->special_mask = 0; s->init_state = 0; s->auto_eoi = 0; s->rotate_on_auto_eoi = 0; s->special_fully_nested_mode = 0; s->init4 = 0; s->single_mode = 0; /* Note: ELCR is not reset */ if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_kernel_pic_load_from_user(s); } }
void kvm_arch_load_mpstate(CPUState *env) { #ifdef KVM_CAP_MP_STATE struct kvm_mp_state mp_state = { .mp_state = env->mp_state }; /* * -1 indicates that the host did not support GET_MP_STATE ioctl, * so don't touch it. */ if (env->mp_state != -1) kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state); #endif } void kvm_arch_cpu_reset(CPUState *env) { if (kvm_irqchip_in_kernel(kvm_context)) { #ifdef KVM_CAP_MP_STATE kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx); #endif } else { env->interrupt_request &= ~CPU_INTERRUPT_HARD; env->halted = 1; } } void kvm_arch_do_ioperm(void *_data) { struct ioperm_data *data = _data; ioperm(data->start_port, data->num, data->turn_on); } void kvm_arch_process_irqchip_events(CPUState *env) { }
/* Send an MSI-X message */ void msix_notify(PCIDevice *dev, unsigned vector) { uint8_t *table_entry = dev->msix_table_page + vector * MSIX_ENTRY_SIZE; uint64_t address; uint32_t data; if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) return; if (msix_is_masked(dev, vector)) { msix_set_pending(dev, vector); return; } #ifdef KVM_CAP_IRQCHIP if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_set_irq(dev->msix_irq_entries[vector].gsi, 1, NULL); return; } #endif address = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR); address = (address << 32) | pci_get_long(table_entry + MSIX_MSG_ADDR); data = pci_get_long(table_entry + MSIX_MSG_DATA); stl_phys(address, data); }
/* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is * modified, it should be retrieved with msix_bar_size. */ int msix_init(struct PCIDevice *dev, unsigned short nentries, MemoryRegion *bar, unsigned bar_nr, unsigned bar_size) { int ret; /* Nothing to do if MSI is not supported by interrupt controller */ if (!msix_supported || (kvm_enabled() && kvm_irqchip_in_kernel() && !kvm_has_gsi_routing())) { return -ENOTSUP; } if (nentries > MSIX_MAX_ENTRIES) return -EINVAL; dev->msix_mask_notifier = NULL; dev->msix_entry_used = g_malloc0(MSIX_MAX_ENTRIES * sizeof *dev->msix_entry_used); dev->msix_table_page = g_malloc0(MSIX_PAGE_SIZE); msix_mask_all(dev, nentries); memory_region_init_io(&dev->msix_mmio, &msix_mmio_ops, dev, "msix", MSIX_PAGE_SIZE); dev->msix_entries_nr = nentries; ret = msix_add_config(dev, nentries, bar_nr, bar_size); if (ret) goto err_config; if (kvm_enabled() && kvm_irqchip_in_kernel()) { dev->msix_irq_entries = g_malloc(nentries * sizeof *dev->msix_irq_entries); } dev->cap_present |= QEMU_PCI_CAP_MSIX; msix_mmio_setup(dev, bar); return 0; err_config: dev->msix_entries_nr = 0; memory_region_destroy(&dev->msix_mmio); g_free(dev->msix_table_page); dev->msix_table_page = NULL; g_free(dev->msix_entry_used); dev->msix_entry_used = NULL; return ret; }
static void icp_realize(DeviceState *dev, Error **errp) { ICPState *icp = ICP(dev); PowerPCCPU *cpu; CPUPPCState *env; Object *obj; Error *err = NULL; obj = object_property_get_link(OBJECT(dev), ICP_PROP_XICS, &err); if (!obj) { error_propagate_prepend(errp, err, "required link '" ICP_PROP_XICS "' not found: "); return; } icp->xics = XICS_FABRIC(obj); obj = object_property_get_link(OBJECT(dev), ICP_PROP_CPU, &err); if (!obj) { error_propagate_prepend(errp, err, "required link '" ICP_PROP_CPU "' not found: "); return; } cpu = POWERPC_CPU(obj); icp->cs = CPU(obj); env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER7: icp->output = env->irq_inputs[POWER7_INPUT_INT]; break; case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */ icp->output = env->irq_inputs[POWER9_INPUT_INT]; break; case PPC_FLAGS_INPUT_970: icp->output = env->irq_inputs[PPC970_INPUT_INT]; break; default: error_setg(errp, "XICS interrupt controller does not support this CPU bus model"); return; } if (kvm_irqchip_in_kernel()) { icp_kvm_realize(dev, &err); if (err) { error_propagate(errp, err); return; } } qemu_register_reset(icp_reset_handler, dev); vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); }
static int a15mp_priv_init(SysBusDevice *dev) { A15MPPrivState *s = A15MPCORE_PRIV(dev); SysBusDevice *busdev; const char *gictype = "arm_gic"; int i; if (kvm_irqchip_in_kernel()) { gictype = "kvm-arm-gic"; } s->gic = qdev_create(NULL, gictype); qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu); qdev_prop_set_uint32(s->gic, "num-irq", s->num_irq); qdev_prop_set_uint32(s->gic, "revision", 2); qdev_init_nofail(s->gic); busdev = SYS_BUS_DEVICE(s->gic); /* Pass through outbound IRQ lines from the GIC */ sysbus_pass_irq(dev, busdev); /* Pass through inbound GPIO lines to the GIC */ qdev_init_gpio_in(DEVICE(dev), a15mp_priv_set_irq, s->num_irq - 32); /* Wire the outputs from each CPU's generic timer to the * appropriate GIC PPI inputs */ for (i = 0; i < s->num_cpu; i++) { DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); int ppibase = s->num_irq - 32 + i * 32; /* physical timer; we wire it up to the non-secure timer's ID, * since a real A15 always has TrustZone but QEMU doesn't. */ qdev_connect_gpio_out(cpudev, 0, qdev_get_gpio_in(s->gic, ppibase + 30)); /* virtual timer */ qdev_connect_gpio_out(cpudev, 1, qdev_get_gpio_in(s->gic, ppibase + 27)); } /* Memory map (addresses are offsets from PERIPHBASE): * 0x0000-0x0fff -- reserved * 0x1000-0x1fff -- GIC Distributor * 0x2000-0x2fff -- GIC CPU interface * 0x4000-0x4fff -- GIC virtual interface control (not modelled) * 0x5000-0x5fff -- GIC virtual interface control (not modelled) * 0x6000-0x7fff -- GIC virtual CPU interface (not modelled) */ memory_region_init(&s->container, OBJECT(s), "a15mp-priv-container", 0x8000); memory_region_add_subregion(&s->container, 0x1000, sysbus_mmio_get_region(busdev, 0)); memory_region_add_subregion(&s->container, 0x2000, sysbus_mmio_get_region(busdev, 1)); sysbus_init_mmio(dev, &s->container); return 0; }
/* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { if (vector < dev->msix_entries_nr && dev->msix_entry_used[vector]) { --dev->msix_entry_used[vector]; if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_del(dev, vector); } } }
static int ics_base_post_load(void *opaque, int version_id) { ICSState *ics = opaque; if (kvm_irqchip_in_kernel()) { return ics_set_kvm_state(ics); } return 0; }
static int ics_base_pre_save(void *opaque) { ICSState *ics = opaque; if (kvm_irqchip_in_kernel()) { ics_get_kvm_state(ics); } return 0; }
static void ics_simple_reset(DeviceState *dev) { ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev); icsc->parent_reset(dev); if (kvm_irqchip_in_kernel()) { ics_set_kvm_state(ICS_BASE(dev)); } }
static void msix_free_irq_entries(PCIDevice *dev) { int vector; if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_free(dev); } for (vector = 0; vector < dev->msix_entries_nr; ++vector) dev->msix_entry_used[vector] = 0; }
static int icp_post_load(void *opaque, int version_id) { ICPState *icp = opaque; if (kvm_irqchip_in_kernel()) { return icp_set_kvm_state(icp); } return 0; }
static int icp_pre_save(void *opaque) { ICPState *icp = opaque; if (kvm_irqchip_in_kernel()) { icp_get_kvm_state(icp); } return 0; }
void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) { assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); ics->irqs[srcno].flags |= lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; if (kvm_irqchip_in_kernel()) { ics_set_kvm_state_one(ics, srcno); } }
int kvm_get_lapic(CPUState *env, struct kvm_lapic_state *s) { int r = 0; if (!kvm_irqchip_in_kernel()) return r; r = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, s); if (r < 0) fprintf(stderr, "KVM_GET_LAPIC failed\n"); return r; }
static bool cpu_thread_is_idle(CPUArchState *env) { if (env->stop || env->queued_work_first) { return false; } if (env->stopped || !runstate_is_running()) { return true; } if (!env->halted || qemu_cpu_has_work(env) || kvm_irqchip_in_kernel()) { return false; } return true; }
static void kvm_reset_mpstate(CPUState *env) { #ifdef KVM_CAP_MP_STATE if (kvm_check_extension(kvm_state, KVM_CAP_MP_STATE)) { if (kvm_irqchip_in_kernel()) { env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE : KVM_MP_STATE_UNINITIALIZED; } else { env->mp_state = KVM_MP_STATE_RUNNABLE; } } #endif }
/* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { return; } if (--dev->msix_entry_used[vector]) { return; } if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_vector_del(dev, vector); } msix_clr_pending(dev, vector); }
static bool cpu_thread_is_idle(CPUState *env) { if (env->stop || env->queued_work_first) { return false; } if (env->stopped || !vm_running) { return true; } if (!env->halted || qemu_cpu_has_work(env) || (kvm_enabled() && kvm_irqchip_in_kernel())) { return false; } return true; }
/* Mark vector as used. */ int msix_vector_use(PCIDevice *dev, unsigned vector) { int ret; if (vector >= dev->msix_entries_nr) return -EINVAL; if (kvm_enabled() && kvm_irqchip_in_kernel() && !dev->msix_entry_used[vector]) { ret = kvm_msix_vector_add(dev, vector); if (ret) { return ret; } } ++dev->msix_entry_used[vector]; return 0; }
static void icp_reset_handler(void *dev) { ICPState *icp = ICP(dev); icp->xirr = 0; icp->pending_priority = 0xff; icp->mfrr = 0xff; /* Make all outputs are deasserted */ qemu_set_irq(icp->output, 0); if (kvm_irqchip_in_kernel()) { icp_set_kvm_state(ICP(dev)); } }
void ics_simple_set_irq(void *opaque, int srcno, int val) { ICSState *ics = (ICSState *)opaque; if (kvm_irqchip_in_kernel()) { ics_kvm_set_irq(ics, srcno, val); return; } if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { ics_simple_set_irq_lsi(ics, srcno, val); } else { ics_simple_set_irq_msi(ics, srcno, val); } }
const char *gicv3_class_name(void) { if (kvm_irqchip_in_kernel()) { #ifdef TARGET_AARCH64 return "kvm-arm-gicv3"; #else error_report("KVM GICv3 acceleration is not supported on this " "platform"); #endif } else { return "arm-gicv3"; } exit(1); }
const char *gicv3_class_name(void) { if (kvm_irqchip_in_kernel()) { #ifdef TARGET_AARCH64 return "kvm-arm-gicv3"; #else error_report("KVM GICv3 acceleration is not supported on this " "platform"); #endif } else { /* TODO: Software emulation is not implemented yet */ error_report("KVM is currently required for GICv3 emulation"); } exit(1); }
static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1); int vector = offset / MSIX_ENTRY_SIZE; int was_masked = msix_is_masked(dev, vector); memcpy(dev->msix_table_page + offset, &val, 4); if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector)); } if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }
void icp_pic_print_info(ICPState *icp, Monitor *mon) { int cpu_index = icp->cs ? icp->cs->cpu_index : -1; if (!icp->output) { return; } if (kvm_irqchip_in_kernel()) { icp_synchronize_state(icp); } monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n", cpu_index, icp->xirr, icp->xirr_owner, icp->pending_priority, icp->mfrr); }
/* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is * modified, it should be retrieved with msix_bar_size. */ int msix_init(struct PCIDevice *dev, unsigned short nentries, unsigned bar_nr, unsigned bar_size) { int ret; /* Nothing to do if MSI is not supported by interrupt controller */ if (!msix_supported) return -ENOTSUP; if (nentries > MSIX_MAX_ENTRIES) return -EINVAL; #ifdef KVM_CAP_IRQCHIP if (kvm_enabled() && kvm_irqchip_in_kernel()) { dev->msix_irq_entries = qemu_malloc(nentries * sizeof *dev->msix_irq_entries); } #endif dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * sizeof *dev->msix_entry_used); dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); dev->msix_mmio_index = cpu_register_io_memory(msix_mmio_read, msix_mmio_write, dev); if (dev->msix_mmio_index == -1) { ret = -EBUSY; goto err_index; } dev->msix_entries_nr = nentries; ret = msix_add_config(dev, nentries, bar_nr, bar_size); if (ret) goto err_config; dev->cap_present |= QEMU_PCI_CAP_MSIX; return 0; err_config: dev->msix_entries_nr = 0; cpu_unregister_io_memory(dev->msix_mmio_index); err_index: qemu_free(dev->msix_table_page); dev->msix_table_page = NULL; qemu_free(dev->msix_entry_used); dev->msix_entry_used = NULL; return ret; }
int kvm_irqchip_set_irq(KVMState *s, int irq, int level) { struct kvm_irq_level event; int ret; assert(kvm_irqchip_in_kernel()); event.level = level; event.irq = irq; ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event); if (ret < 0) { perror("kvm_set_irqchip_line"); abort(); } return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status; }
static void msix_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; int vector = offset / PCI_MSIX_ENTRY_SIZE; int was_masked = msix_is_masked(dev, vector); pci_set_long(dev->msix_table_page + offset, val); if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector)); } if (was_masked != msix_is_masked(dev, vector) && dev->msix_mask_notifier) { int r = dev->msix_mask_notifier(dev, vector, msix_is_masked(dev, vector)); assert(r >= 0); } msix_handle_mask_update(dev, vector); }