static void hotplug_event_clear(PCIDevice *dev) { hotplug_event_update_event_status(dev); if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { pci_irq_deassert(dev); } }
static void shpc_interrupt_update(PCIDevice *d) { SHPCDevice *shpc = d->shpc; int slot; int level = 0; uint32_t serr_int; uint32_t int_locator = 0; /* Update interrupt locator register */ for (slot = 0; slot < shpc->nslots; ++slot) { uint8_t event = shpc->config[SHPC_SLOT_EVENT_LATCH(slot)]; uint8_t disable = shpc->config[SHPC_SLOT_EVENT_SERR_INT_DIS(d, slot)]; uint32_t mask = 1U << SHPC_IDX_TO_LOGICAL(slot); if (event & ~disable) { int_locator |= mask; } } serr_int = pci_get_long(shpc->config + SHPC_SERR_INT); if ((serr_int & SHPC_CMD_DETECTED) && !(serr_int & SHPC_CMD_INT_DIS)) { int_locator |= SHPC_INT_COMMAND; } pci_set_long(shpc->config + SHPC_INT_LOCATOR, int_locator); level = (!(serr_int & SHPC_INT_DIS) && int_locator) ? 1 : 0; if (msi_enabled(d) && shpc->msi_requested != level) msi_notify(d, 0); else pci_set_irq(d, level); shpc->msi_requested = level; }
static void hotplug_event_clear(PCIDevice *dev) { hotplug_event_update_event_status(dev); if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { qemu_set_irq(dev->irq[dev->exp.hpev_intx], 0); } }
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); DPRINTF(0, "lower irq\n"); if (!msi_enabled(PCI_DEVICE(d))) { qemu_irq_lower(s->irq); } }
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) { struct AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); DPRINTF(0, "lower irq\n"); if (!msi_enabled(&d->card)) { qemu_irq_lower(s->irq); } }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { pci_irq_assert(dev); } }
static void pcie_aer_root_notify(PCIDevice *dev) { if (msix_enabled(dev)) { msix_notify(dev, pcie_aer_root_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_aer_root_get_vector(dev)); } else { qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); } }
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) { DeviceState *dev_state = s->container; PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), TYPE_PCI_DEVICE); DPRINTF(0, "lower irq\n"); if (!pci_dev || !msi_enabled(pci_dev)) { qemu_irq_lower(s->irq); } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { struct AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); DPRINTF(0, "raise irq\n"); if (msi_enabled(&d->card)) { msi_notify(&d->card, 0); } else { qemu_irq_raise(s->irq); } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); PCIDevice *pci_dev = PCI_DEVICE(d); DPRINTF(0, "raise irq\n"); if (msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } }
static void amdvi_generate_msi_interrupt(AMDVIState *s) { MSIMessage msg; MemTxAttrs attrs; attrs.requester_id = pci_requester_id(&s->pci.dev); if (msi_enabled(&s->pci.dev)) { msg = msi_get_message(&s->pci.dev, 0); address_space_stl_le(&address_space_memory, msg.address, msg.data, attrs, NULL); } }
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { DeviceState *dev_state = s->container; PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), TYPE_PCI_DEVICE); DPRINTF(0, "raise irq\n"); if (pci_dev && msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } }
static void amdvi_generate_msi_interrupt(AMDVIState *s) { MSIMessage msg = {}; MemTxAttrs attrs = { .requester_id = pci_requester_id(&s->pci.dev) }; if (msi_enabled(&s->pci.dev)) { msg = msi_get_message(&s->pci.dev, 0); address_space_stl_le(&address_space_memory, msg.address, msg.data, attrs, NULL); } } static void amdvi_log_event(AMDVIState *s, uint64_t *evt) { /* event logging not enabled */ if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF)) { return; } /* event log buffer full */ if (s->evtlog_tail >= s->evtlog_len) { amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); /* generate interrupt */ amdvi_generate_msi_interrupt(s); return; } if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, &evt, AMDVI_EVENT_LEN)) { trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); } s->evtlog_tail += AMDVI_EVENT_LEN; amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); amdvi_generate_msi_interrupt(s); } static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, int length) { int index = start / 64, bitpos = start % 64; uint64_t mask = MAKE_64BIT_MASK(start, length); buffer[index] &= ~mask; buffer[index] |= (value << bitpos) & mask; }
static void hotplug_event_notify(PCIDevice *dev) { bool prev = dev->exp.hpev_notified; hotplug_event_update_event_status(dev); if (prev == dev->exp.hpev_notified) { return; } /* Note: the logic above does not take into account whether interrupts * are masked. The result is that interrupt will be sent when it is * subsequently unmasked. This appears to be legal: Section 6.7.3.4: * The Port may optionally send an MSI when there are hot-plug events that * occur while interrupt generation is disabled, and interrupt generation is * subsequently enabled. */ if (msix_enabled(dev)) { msix_notify(dev, pcie_cap_flags_get_vector(dev)); } else if (msi_enabled(dev)) { msi_notify(dev, pcie_cap_flags_get_vector(dev)); } else { pci_set_irq(dev, dev->exp.hpev_notified); } }
static bool edu_msi_enabled(EduState *edu) { return msi_enabled(&edu->pdev); }