Ejemplo n.º 1
0
Archivo: msix.c Proyecto: Icenowy/qemu
static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
                                     unsigned size)
{
    PCIDevice *dev = opaque;

    return pci_get_long(dev->msix_table + addr);
}
Ejemplo n.º 2
0
static void pcie_aer_clear_error(PCIDevice *dev)
{
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
    PCIEAERLog *aer_log = &dev->exp.aer_log;
    PCIEAERErr err;

    if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) {
        pcie_aer_clear_log(dev);
        return;
    }

    /*
     * If more errors are queued, set corresponding bits in uncorrectable
     * error status.
     * We emulate uncorrectable error status register as W1CS.
     * So set bit in uncorrectable error status here again for multiple
     * error recording support.
     *
     * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability)
     */
    pcie_aer_update_uncor_status(dev);

    aer_log_del_err(aer_log, &err);
    pcie_aer_update_log(dev, &err);
}
Ejemplo n.º 3
0
/*
 * caller must supply valid (offset, size) * such that the range shouldn't
 * overlap with other capability or other registers.
 * This function doesn't check it.
 */
void pcie_add_capability(PCIDevice *dev,
                         uint16_t cap_id, uint8_t cap_ver,
                         uint16_t offset, uint16_t size)
{
    uint32_t header;
    uint16_t next;

    assert(offset >= PCI_CONFIG_SPACE_SIZE);
    assert(offset < offset + size);
    assert(offset + size < PCIE_CONFIG_SPACE_SIZE);
    assert(size >= 8);
    assert(pci_is_express(dev));

    if (offset == PCI_CONFIG_SPACE_SIZE) {
        header = pci_get_long(dev->config + offset);
        next = PCI_EXT_CAP_NEXT(header);
    } else {
        uint16_t prev;

        /* 0 is reserved cap id. use internally to find the last capability
           in the linked list */
        next = pcie_find_capability_list(dev, 0, &prev);

        assert(prev >= PCI_CONFIG_SPACE_SIZE);
        assert(next == 0);
        pcie_ext_cap_set_next(dev, prev, offset);
    }
    pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, next));

    /* Make capability read-only by default */
    memset(dev->wmask + offset, 0, size);
    memset(dev->w1cmask + offset, 0, size);
    /* Check capability by default */
    memset(dev->cmask + offset, 0xFF, size);
}
Ejemplo n.º 4
0
static uint64_t msix_pba_mmio_read(void *opaque, target_phys_addr_t addr,
                                   unsigned size)
{
    PCIDevice *dev = opaque;

    return pci_get_long(dev->msix_pba + addr);
}
Ejemplo n.º 5
0
Archivo: tco.c Proyecto: 8tab/qemu
static void tco_timer_expired(void *opaque)
{
    TCOIORegs *tr = opaque;
    ICH9LPCPMRegs *pm = container_of(tr, ICH9LPCPMRegs, tco_regs);
    ICH9LPCState *lpc = container_of(pm, ICH9LPCState, pm);
    uint32_t gcs = pci_get_long(lpc->chip_config + ICH9_CC_GCS);

    tr->tco.rld = 0;
    tr->tco.sts1 |= TCO_TIMEOUT;
    if (++tr->timeouts_no == 2) {
        tr->tco.sts2 |= TCO_SECOND_TO_STS;
        tr->tco.sts2 |= TCO_BOOT_STS;
        tr->timeouts_no = 0;

        if (!lpc->pin_strap.spkr_hi && !(gcs & ICH9_CC_GCS_NO_REBOOT)) {
            watchdog_perform_action();
            tco_timer_stop(tr);
            return;
        }
    }

    if (pm->smi_en & ICH9_PMIO_SMI_EN_TCO_EN) {
        ich9_generate_smi();
    }
    tr->tco.rld = tr->tco.tmr;
    tco_timer_reload(tr);
}
Ejemplo n.º 6
0
static uint32_t msix_mmio_readl(void *opaque, target_phys_addr_t addr)
{
    PCIDevice *dev = opaque;
    unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
    void *page = dev->msix_table_page;

    return pci_get_long(page + offset);
}
Ejemplo n.º 7
0
Archivo: pcie.c Proyecto: THanwa/qemu
static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next)
{
    uint32_t header = pci_get_long(dev->config + pos);
    assert(!(next & (PCI_EXT_CAP_ALIGN - 1)));
    header = (header & ~PCI_EXT_CAP_NEXT_MASK) |
        ((next << PCI_EXT_CAP_NEXT_SHIFT) & PCI_EXT_CAP_NEXT_MASK);
    pci_set_long(dev->config + pos, header);
}
Ejemplo n.º 8
0
/* Send an MSI-X message */
void msix_notify(PCIDevice *dev, unsigned vector)
{
    uint8_t *table_entry = dev->msix_table_page + vector * MSIX_ENTRY_SIZE;
    uint64_t address;
    uint32_t data;

    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector])
        return;
    if (msix_is_masked(dev, vector)) {
        msix_set_pending(dev, vector);
        return;
    }

    address = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR);
    address = (address << 32) | pci_get_long(table_entry + MSIX_MSG_ADDR);
    data = pci_get_long(table_entry + MSIX_MSG_DATA);
    stl_phys(address, data);
}
Ejemplo n.º 9
0
MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
{
    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
    MSIMessage msg;

    msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
    msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
    return msg;
}
Ejemplo n.º 10
0
static void msix_mmio_setup(PCIDevice *d, MemoryRegion *bar)
{
    uint8_t *config = d->config + d->msix_cap;
    uint32_t table = pci_get_long(config + PCI_MSIX_TABLE);
    uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1);
    /* TODO: for assigned devices, we'll want to make it possible to map
     * pending bits separately in case they are in a separate bar. */

    memory_region_add_subregion(bar, offset, &d->msix_mmio);
}
Ejemplo n.º 11
0
static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
                                      uint32_t uncor_status,
                                      bool is_advisory_nonfatal)
{
    PCIDevice *dev = inj->dev;

    inj->devsta |= PCI_EXP_DEVSTA_CED;
    if (inj->unsupported_request) {
        inj->devsta |= PCI_EXP_DEVSTA_URD;
    }
    pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);

    if (inj->aer_cap) {
        uint32_t mask;
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
                                   inj->error_status);
        mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
        if (mask & inj->error_status) {
            return false;
        }
        if (is_advisory_nonfatal) {
            uint32_t uncor_mask =
                pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
            if (!(uncor_mask & uncor_status)) {
                inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
            }
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
                                       uncor_status);
        }
    }

    if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
        return false;
    }
    if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
        return false;
    }

    inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
    return true;
}
Ejemplo n.º 12
0
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err)
{
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
    uint8_t first_bit = ctz32(err->status);
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
    int i;

    assert(err->status);
    assert(!(err->status & (err->status - 1)));

    errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
    errcap |= PCI_ERR_CAP_FEP(first_bit);

    if (err->flags & PCIE_AER_ERR_HEADER_VALID) {
        for (i = 0; i < ARRAY_SIZE(err->header); ++i) {
            /* 7.10.8 Header Log Register */
            uint8_t *header_log =
                aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0];
            stl_be_p(header_log, err->header[i]);
        }
    } else {
        assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT));
        memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
    }

    if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) &&
        (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP2) &
         PCI_EXP_DEVCAP2_EETLPP)) {
        for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) {
            /* 7.10.12 tlp prefix log register */
            uint8_t *prefix_log =
                aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0];
            stl_be_p(prefix_log, err->prefix[i]);
        }
        errcap |= PCI_ERR_CAP_TLP;
    } else {
        memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0,
               PCI_ERR_TLP_PREFIX_LOG_SIZE);
    }
    pci_set_long(aer_cap + PCI_ERR_CAP, errcap);
}
Ejemplo n.º 13
0
static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
                                   unsigned size)
{
    PCIDevice *dev = opaque;
    if (dev->msix_vector_poll_notifier) {
        unsigned vector_start = addr * 8;
        unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
        dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
    }

    return pci_get_long(dev->msix_pba + addr);
}
Ejemplo n.º 14
0
Archivo: msix.c Proyecto: Icenowy/qemu
static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
{
    unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
    uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
    /* MSIs on Xen can be remapped into pirqs. In those cases, masking
     * and unmasking go through the PV evtchn path. */
    if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
        return false;
    }
    return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
        PCI_MSIX_ENTRY_CTRL_MASKBIT;
}
Ejemplo n.º 15
0
static void ioh3420_write_config(PCIDevice *d,
                                   uint32_t address, uint32_t val, int len)
{
    uint32_t root_cmd =
        pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND);

    pci_bridge_write_config(d, address, val, len);
    ioh3420_aer_vector_update(d);
    pcie_cap_slot_write_config(d, address, val, len);
    pcie_aer_write_config(d, address, val, len);
    pcie_aer_root_write_config(d, address, val, len, root_cmd);
}
Ejemplo n.º 16
0
Archivo: pcie.c Proyecto: THanwa/qemu
bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev)
{
    if (!pci_is_express(dev)) {
        return false;
    }
    if (!dev->exp.exp_cap) {
        return false;
    }

    return pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
        PCI_EXP_DEVCTL2_ARI;
}
Ejemplo n.º 17
0
/* PBUS - bus control */
uint64_t pbus_read(void *opaque, hwaddr addr, unsigned int size)
{
    NV2AState *d = opaque;

    uint64_t r = 0;
    switch (addr) {
    case NV_PBUS_PCI_NV_0:
        r = pci_get_long(d->dev.config + PCI_VENDOR_ID);
        break;
    case NV_PBUS_PCI_NV_1:
        r = pci_get_long(d->dev.config + PCI_COMMAND);
        break;
    case NV_PBUS_PCI_NV_2:
        r = pci_get_long(d->dev.config + PCI_CLASS_REVISION);
        break;
    default:
        break;
    }

    reg_log_read(NV_PBUS, addr, r);
    return r;
}
Ejemplo n.º 18
0
static pcibus_t pci_config_get_pref_base(const PCIDevice *d,
                                         uint32_t base, uint32_t upper)
{
    pcibus_t tmp;
    pcibus_t val;

    tmp = (pcibus_t)pci_get_word(d->config + base);
    val = (tmp & PCI_PREF_RANGE_MASK) << 16;
    if (tmp & PCI_PREF_RANGE_TYPE_64) {
        val |= (pcibus_t)pci_get_long(d->config + upper) << 32;
    }
    return val;
}
Ejemplo n.º 19
0
Archivo: shpc.c Proyecto: CTU-IIG/qemu
void shpc_cap_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l)
{
    if (!ranges_overlap(addr, l, d->shpc->cap, SHPC_CAP_LENGTH)) {
        return;
    }
    if (ranges_overlap(addr, l, d->shpc->cap + SHPC_CAP_DWORD_DATA, 4)) {
        unsigned dword_data;
        dword_data = pci_get_long(d->shpc->config + d->shpc->cap
                                  + SHPC_CAP_DWORD_DATA);
        shpc_write(d, shpc_cap_dword(d) * 4, dword_data, 4);
    }
    /* Update cap dword data in case guest is going to read it. */
    shpc_cap_update_dword(d);
}
Ejemplo n.º 20
0
static int pcie_aer_record_error(PCIDevice *dev,
                                 const PCIEAERErr *err)
{
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
    int fep = PCI_ERR_CAP_FEP(errcap);

    assert(err->status);
    assert(!(err->status & (err->status - 1)));

    if (errcap & PCI_ERR_CAP_MHRE &&
        (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) {
        /*  Not first error. queue error */
        if (aer_log_add_err(&dev->exp.aer_log, err) < 0) {
            /* overflow */
            return -1;
        }
        return 0;
    }

    pcie_aer_update_log(dev, err);
    return 0;
}
Ejemplo n.º 21
0
static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal)
{
    PCIDevice *dev = inj->dev;
    uint16_t cmd;

    if (is_fatal) {
        inj->devsta |= PCI_EXP_DEVSTA_FED;
    } else {
        inj->devsta |= PCI_EXP_DEVSTA_NFED;
    }
    if (inj->unsupported_request) {
        inj->devsta |= PCI_EXP_DEVSTA_URD;
    }
    pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);

    if (inj->aer_cap) {
        uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
        if (mask & inj->error_status) {
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
                                       inj->error_status);
            return false;
        }

        inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
                                   inj->error_status);
    }

    cmd = pci_get_word(dev->config + PCI_COMMAND);
    if (inj->unsupported_request &&
        !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) {
        return false;
    }
    if (is_fatal) {
        if (!((cmd & PCI_COMMAND_SERR) ||
              (inj->devctl & PCI_EXP_DEVCTL_FERE))) {
            return false;
        }
        inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN;
    } else {
        if (!((cmd & PCI_COMMAND_SERR) ||
              (inj->devctl & PCI_EXP_DEVCTL_NFERE))) {
            return false;
        }
        inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN;
    }
    return true;
}
Ejemplo n.º 22
0
/* Should be called from device's map method. */
void msix_mmio_map(PCIDevice *d, int region_num,
                   uint32_t addr, uint32_t size, int type)
{
    uint8_t *config = d->config + d->msix_cap;
    uint32_t table = pci_get_long(config + MSIX_TABLE_OFFSET);
    uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1);
    /* TODO: for assigned devices, we'll want to make it possible to map
     * pending bits separately in case they are in a separate bar. */
    int table_bir = table & PCI_MSIX_FLAGS_BIRMASK;

    if (table_bir != region_num)
        return;
    if (size <= offset)
        return;
    cpu_register_physical_memory(addr + offset, size - offset,
                                 d->msix_mmio_index);
}
Ejemplo n.º 23
0
/* Send an MSI-X message */
void msix_notify(PCIDevice *dev, unsigned vector)
{
    uint8_t *table_entry = dev->msix_table_page + vector * PCI_MSIX_ENTRY_SIZE;
    uint64_t address;
    uint32_t data;

    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector])
        return;
    if (msix_is_masked(dev, vector)) {
        msix_set_pending(dev, vector);
        return;
    }

    if (kvm_enabled() && kvm_irqchip_in_kernel()) {
        kvm_set_irq(dev->msix_irq_entries[vector].gsi, 1, NULL);
        return;
    }

    address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
    data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
    stl_le_phys(address, data);
}
Ejemplo n.º 24
0
/*
 * non-Function specific error must be recorded in all functions.
 * It is the responsibility of the caller of this function.
 * It is also caller's responsibility to determine which function should
 * report the error.
 *
 * 6.2.4 Error Logging
 * 6.2.5 Sequence of Device Error Signaling and Logging Operations
 * Figure 6-2: Flowchart Showing Sequence of Device Error Signaling and Logging
 *             Operations
 */
int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err)
{
    uint8_t *aer_cap = NULL;
    uint16_t devctl = 0;
    uint16_t devsta = 0;
    uint32_t error_status = err->status;
    PCIEAERInject inj;

    if (!pci_is_express(dev)) {
        return -ENOSYS;
    }

    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
        error_status &= PCI_ERR_COR_SUPPORTED;
    } else {
        error_status &= PCI_ERR_UNC_SUPPORTED;
    }

    /* invalid status bit. one and only one bit must be set */
    if (!error_status || (error_status & (error_status - 1))) {
        return -EINVAL;
    }

    if (dev->exp.aer_cap) {
        uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
        aer_cap = dev->config + dev->exp.aer_cap;
        devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL);
        devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA);
    }

    inj.dev = dev;
    inj.aer_cap = aer_cap;
    inj.err = err;
    inj.devctl = devctl;
    inj.devsta = devsta;
    inj.error_status = error_status;
    inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) &&
        err->status == PCI_ERR_UNC_UNSUP;
    inj.log_overflow = false;

    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
        if (!pcie_aer_inject_cor_error(&inj, 0, false)) {
            return 0;
        }
    } else {
        bool is_fatal =
            pcie_aer_uncor_default_severity(error_status) ==
            PCI_ERR_ROOT_CMD_FATAL_EN;
        if (aer_cap) {
            is_fatal =
                error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER);
        }
        if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) {
            inj.error_status = PCI_ERR_COR_ADV_NONFATAL;
            if (!pcie_aer_inject_cor_error(&inj, error_status, true)) {
                return 0;
            }
        } else {
            if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) {
                return 0;
            }
        }
    }

    /* send up error message */
    inj.msg.source_id = err->source_id;
    pcie_aer_msg(dev, &inj.msg);

    if (inj.log_overflow) {
        PCIEAERErr header_log_overflow = {
            .status = PCI_ERR_COR_HL_OVERFLOW,
            .flags = PCIE_AER_ERR_IS_CORRECTABLE,
        };
        int ret = pcie_aer_inject_error(dev, &header_log_overflow);
        assert(!ret);
    }
Ejemplo n.º 25
0
static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
{
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
    uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
    return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
}
Ejemplo n.º 26
0
/*
 * 6.2.6 Error Message Control
 * Figure 6-3
 * root port part
 */
static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
{
    uint16_t cmd;
    uint8_t *aer_cap;
    uint32_t root_cmd;
    uint32_t root_status, prev_status;

    cmd = pci_get_word(dev->config + PCI_COMMAND);
    aer_cap = dev->config + dev->exp.aer_cap;
    root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
    prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);

    if (cmd & PCI_COMMAND_SERR) {
        /* System Error.
         *
         * The way to report System Error is platform specific and
         * it isn't implemented in qemu right now.
         * So just discard the error for now.
         * OS which cares of aer would receive errors via
         * native aer mechanims, so this wouldn't matter.
         */
    }

    /* Errro Message Received: Root Error Status register */
    switch (msg->severity) {
    case PCI_ERR_ROOT_CMD_COR_EN:
        if (root_status & PCI_ERR_ROOT_COR_RCV) {
            root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
        } else {
            pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_COR_OFFS,
                         msg->source_id);
        }
        root_status |= PCI_ERR_ROOT_COR_RCV;
        break;
    case PCI_ERR_ROOT_CMD_NONFATAL_EN:
        root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
        break;
    case PCI_ERR_ROOT_CMD_FATAL_EN:
        if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
            root_status |= PCI_ERR_ROOT_FIRST_FATAL;
        }
        root_status |= PCI_ERR_ROOT_FATAL_RCV;
        break;
    default:
        abort();
        break;
    }
    if (pcie_aer_msg_is_uncor(msg)) {
        if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
            root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
        } else {
            pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC +
                         PCI_ERR_SRC_UNCOR_OFFS, msg->source_id);
        }
        root_status |= PCI_ERR_ROOT_UNCOR_RCV;
    }
    pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);

    /* 6.2.4.1.2 Interrupt Generation */
    /* All the above did was set some bits in the status register.
     * Specifically these that match message severity.
     * The below code relies on this fact. */
    if (!(root_cmd & msg->severity) ||
        (pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
        /* Condition is not being set or was already true so nothing to do. */
        return;
    }

    pcie_aer_root_notify(dev);
}