static void hotplug_event_clear(PCIDevice *dev) { hotplug_event_update_event_status(dev); if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { pci_irq_deassert(dev); } }
static void hotplug_event_clear(PCIDevice *dev) { hotplug_event_update_event_status(dev); if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { qemu_set_irq(dev->irq[dev->exp.hpev_intx], 0); } }
static void setup_interrupt(IVShmemState *s, int vector) { EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; bool with_irqfd = kvm_msi_via_irqfd_enabled() && ivshmem_has_feature(s, IVSHMEM_MSI); PCIDevice *pdev = PCI_DEVICE(s); IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); if (!with_irqfd) { IVSHMEM_DPRINTF("with eventfd"); s->eventfd_chr[vector] = create_eventfd_chr_device(s, n, vector); } else if (msix_enabled(pdev)) { IVSHMEM_DPRINTF("with irqfd"); if (ivshmem_add_kvm_msi_virq(s, vector) < 0) { return; } if (!msix_is_masked(pdev, vector)) { kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, s->msi_vectors[vector].virq); } } else { /* it will be delayed until msix is enabled, in write_config */ IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled"); } }
/* Handle MSI-X capability config write. */ void msix_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; int vector; bool was_masked; if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { return; } was_masked = dev->msix_function_masked; msix_update_function_masked(dev); if (!msix_enabled(dev)) { return; } pci_device_deassert_intx(dev); if (dev->msix_function_masked == was_masked) { return; } for (vector = 0; vector < dev->msix_entries_nr; ++vector) { msix_handle_mask_update(dev, vector, msix_vector_masked(dev, vector, was_masked)); } }
static void setup_interrupt(IVShmemState *s, int vector, Error **errp) { EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; bool with_irqfd = kvm_msi_via_irqfd_enabled() && ivshmem_has_feature(s, IVSHMEM_MSI); PCIDevice *pdev = PCI_DEVICE(s); Error *err = NULL; IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); if (!with_irqfd) { IVSHMEM_DPRINTF("with eventfd\n"); watch_vector_notifier(s, n, vector); } else if (msix_enabled(pdev)) { IVSHMEM_DPRINTF("with irqfd\n"); ivshmem_add_kvm_msi_virq(s, vector, &err); if (err) { error_propagate(errp, err); return; } if (!msix_is_masked(pdev, vector)) { kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, s->msi_vectors[vector].virq); /* TODO handle error */ } } else { /* it will be delayed until msix is enabled, in write_config */ IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n"); } }
static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, uint32_t val, int len) { IVShmemState *s = IVSHMEM_COMMON(pdev); int is_enabled, was_enabled = msix_enabled(pdev); pci_default_write_config(pdev, address, val, len); is_enabled = msix_enabled(pdev); if (kvm_msi_via_irqfd_enabled()) { if (!was_enabled && is_enabled) { ivshmem_enable_irqfd(s); } else if (was_enabled && !is_enabled) { ivshmem_disable_irqfd(s); } } }
static void e1000e_cleanup_msix(E1000EState *s) { if (msix_enabled(PCI_DEVICE(s))) { e1000e_unuse_msix_vectors(s, E1000E_MSIX_VEC_NUM); msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix); } }
/* Handle MSI-X capability config write. */ void msix_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { unsigned enable_pos = dev->msix_cap + MSIX_ENABLE_OFFSET; if (addr + len <= enable_pos || addr > enable_pos) return; if (msix_enabled(dev)) qemu_set_irq(dev->irq[0], 0); }
void isr_notify(NVMEState *n, NVMEIOCQueue *cq) { if (cq->irq_enabled) { if (msix_enabled(&(n->dev))) { msix_notify(&(n->dev), cq->vector); } else { qemu_irq_pulse(n->dev.irq[0]); } } }
static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq) { if (cq->irq_enabled) { if (msix_enabled(&(n->parent_obj))) { msix_notify(&(n->parent_obj), cq->vector); } else { pci_irq_pulse(&n->parent_obj); } } }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { pci_irq_assert(dev); } }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); } }
static void nvme_irq_check(NvmeCtrl *n) { if (msix_enabled(&(n->parent_obj))) { return; } if (~n->bar.intms & n->irq_status) { pci_irq_assert(&n->parent_obj); } else { pci_irq_deassert(&n->parent_obj); } }
static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) { if (cq->irq_enabled) { if (msix_enabled(&(n->parent_obj))) { return; } else { assert(cq->cqid < 64); n->irq_status &= ~(1 << cq->cqid); nvme_irq_check(n); } } }
static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) { if (cq->irq_enabled) { if (msix_enabled(&(n->parent_obj))) { trace_nvme_irq_msix(cq->vector); msix_notify(&(n->parent_obj), cq->vector); } else { trace_nvme_irq_pin(); assert(cq->cqid < 64); n->irq_status |= 1 << cq->cqid; nvme_irq_check(n); } } else { trace_nvme_irq_masked(); } }
static void ivshmem_vector_notify(void *opaque) { MSIVector *entry = opaque; PCIDevice *pdev = entry->pdev; IVShmemState *s = IVSHMEM_COMMON(pdev); int vector = entry - s->msi_vectors; EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; if (!event_notifier_test_and_clear(n)) { return; } IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); if (ivshmem_has_feature(s, IVSHMEM_MSI)) { if (msix_enabled(pdev)) { msix_notify(pdev, vector); } } else { ivshmem_IntrStatus_write(s, 1); } }
static void hotplug_event_notify(PCIDevice *dev) { bool prev = dev->exp.hpev_notified; hotplug_event_update_event_status(dev); if (prev == dev->exp.hpev_notified) { return; } /* Note: the logic above does not take into account whether interrupts * are masked. The result is that interrupt will be sent when it is * subsequently unmasked. This appears to be legal: Section 6.7.3.4: * The Port may optionally send an MSI when there are hot-plug events that * occur while interrupt generation is disabled, and interrupt generation is * subsequently enabled. */ if (msix_enabled(dev)) { msix_notify(dev, pcie_cap_flags_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_cap_flags_get_vector(dev)); } else { pci_set_irq(dev, dev->exp.hpev_notified); } }
static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) }; int ret; sPAPRTCETable *tcet; uint32_t liobn = svphb->phb.dma_liobn; if (svphb->iommugroupid == -1) { error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_CHECK_EXTENSION, (void *) VFIO_SPAPR_TCE_IOMMU); if (ret != 1) { error_setg_errno(errp, -ret, "spapr-vfio: SPAPR extension is not supported"); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); if (ret) { error_setg_errno(errp, -ret, "spapr-vfio: get info from container failed"); return; } tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start, SPAPR_TCE_PAGE_SHIFT, info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT, true); if (!tcet) { error_setg(errp, "spapr-vfio: failed to create VFIO TCE table"); return; } /* Register default 32bit DMA window */ memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset, spapr_tce_get_iommu(tcet)); } static void spapr_phb_vfio_eeh_reenable(sPAPRPHBVFIOState *svphb) { struct vfio_eeh_pe_op op = { .argsz = sizeof(op), .op = VFIO_EEH_PE_ENABLE }; vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); } static void spapr_phb_vfio_reset(DeviceState *qdev) { /* * The PE might be in frozen state. To reenable the EEH * functionality on it will clean the frozen state, which * ensures that the contained PCI devices will work properly * after reboot. */ spapr_phb_vfio_eeh_reenable(SPAPR_PCI_VFIO_HOST_BRIDGE(qdev)); } static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb, unsigned int addr, int option) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_eeh_pe_op op = { .argsz = sizeof(op) }; int ret; switch (option) { case RTAS_EEH_DISABLE: op.op = VFIO_EEH_PE_DISABLE; break; case RTAS_EEH_ENABLE: { PCIHostState *phb; PCIDevice *pdev; /* * The EEH functionality is enabled on basis of PCI device, * instead of PE. We need check the validity of the PCI * device address. */ phb = PCI_HOST_BRIDGE(sphb); pdev = pci_find_device(phb->bus, (addr >> 16) & 0xFF, (addr >> 8) & 0xFF); if (!pdev) { return RTAS_OUT_PARAM_ERROR; } op.op = VFIO_EEH_PE_ENABLE; break; } case RTAS_EEH_THAW_IO: op.op = VFIO_EEH_PE_UNFREEZE_IO; break; case RTAS_EEH_THAW_DMA: op.op = VFIO_EEH_PE_UNFREEZE_DMA; break; default: return RTAS_OUT_PARAM_ERROR; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); if (ret < 0) { return RTAS_OUT_HW_ERROR; } return RTAS_OUT_SUCCESS; } static int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_eeh_pe_op op = { .argsz = sizeof(op) }; int ret; op.op = VFIO_EEH_PE_GET_STATE; ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); if (ret < 0) { return RTAS_OUT_PARAM_ERROR; } *state = ret; return RTAS_OUT_SUCCESS; } static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus, PCIDevice *pdev, void *opaque) { /* Check if the device is VFIO PCI device */ if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { return; } /* * The MSIx table will be cleaned out by reset. We need * disable it so that it can be reenabled properly. Also, * the cached MSIx table should be cleared as it's not * reflecting the contents in hardware. */ if (msix_enabled(pdev)) { uint16_t flags; flags = pci_host_config_read_common(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, pci_config_size(pdev), 2); flags &= ~PCI_MSIX_FLAGS_ENABLE; pci_host_config_write_common(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, pci_config_size(pdev), flags, 2); } msix_reset(pdev); } static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque) { pci_for_each_device(bus, pci_bus_num(bus), spapr_phb_vfio_eeh_clear_dev_msix, NULL); }
static void msix_update_function_masked(PCIDevice *dev) { dev->msix_function_masked = !msix_enabled(dev) || (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK); }