static void shpc_interrupt_update(PCIDevice *d) { SHPCDevice *shpc = d->shpc; int slot; int level = 0; uint32_t serr_int; uint32_t int_locator = 0; /* Update interrupt locator register */ for (slot = 0; slot < shpc->nslots; ++slot) { uint8_t event = shpc->config[SHPC_SLOT_EVENT_LATCH(slot)]; uint8_t disable = shpc->config[SHPC_SLOT_EVENT_SERR_INT_DIS(d, slot)]; uint32_t mask = 1U << SHPC_IDX_TO_LOGICAL(slot); if (event & ~disable) { int_locator |= mask; } } serr_int = pci_get_long(shpc->config + SHPC_SERR_INT); if ((serr_int & SHPC_CMD_DETECTED) && !(serr_int & SHPC_CMD_INT_DIS)) { int_locator |= SHPC_INT_COMMAND; } pci_set_long(shpc->config + SHPC_INT_LOCATOR, int_locator); level = (!(serr_int & SHPC_INT_DIS) && int_locator) ? 1 : 0; if (msi_enabled(d) && shpc->msi_requested != level) msi_notify(d, 0); else pci_set_irq(d, level); shpc->msi_requested = level; }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { pci_irq_assert(dev); } }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); } }
static void edu_raise_irq(EduState *edu, uint32_t val) { edu->irq_status |= val; if (edu->irq_status) { if (edu_msi_enabled(edu)) { msi_notify(&edu->pdev, 0); } else { pci_set_irq(&edu->pdev, 1); } } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { struct AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); DPRINTF(0, "raise irq\n"); if (msi_enabled(&d->card)) { msi_notify(&d->card, 0); } else { qemu_irq_raise(s->irq); } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); PCIDevice *pci_dev = PCI_DEVICE(d); DPRINTF(0, "raise irq\n"); if (msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { DeviceState *dev_state = s->container; PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), TYPE_PCI_DEVICE); DPRINTF(0, "raise irq\n"); if (pci_dev && msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } }
static void hotplug_event_notify(PCIDevice *dev) { bool prev = dev->exp.hpev_notified; hotplug_event_update_event_status(dev); if (prev == dev->exp.hpev_notified) { return; } /* Note: the logic above does not take into account whether interrupts * are masked. The result is that interrupt will be sent when it is * subsequently unmasked. This appears to be legal: Section 6.7.3.4: * The Port may optionally send an MSI when there are hot-plug events that * occur while interrupt generation is disabled, and interrupt generation is * subsequently enabled. */ if (msix_enabled(dev)) { msix_notify(dev, pcie_cap_flags_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_cap_flags_get_vector(dev)); } else { pci_set_irq(dev, dev->exp.hpev_notified); } }