int pci_enable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; pos = pci_pcie_cap(dev); if (!pos) return -EIO; pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); reg16 |= (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); return 0; }
int pci_disable_pcie_error_reporting(struct pci_dev *dev) { if (pcie_aer_get_firmware_first(dev)) return -EIO; return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); }
int pci_enable_pcie_error_reporting(struct pci_dev *dev) { if (pcie_aer_get_firmware_first(dev)) return -EIO; if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) return -EIO; return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); }
int pci_disable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_pcie_cap(dev); if (!pos) return -EIO; pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); reg16 &= ~(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); return 0; }
/** * aer_init - provide AER initialization * @dev: pointer to AER pcie device * * Invoked when AER service driver is loaded. */ int aer_init(struct pcie_device *dev) { if (pcie_aer_get_firmware_first(dev->port)) { dev_printk(KERN_DEBUG, &dev->device, "PCIe errors handled by platform firmware.\n"); goto out; } if (aer_osc_setup(dev)) goto out; return 0; out: if (forceload) { dev_printk(KERN_DEBUG, &dev->device, "aerdrv forceload requested.\n"); pcie_aer_force_firmware_first(dev->port, 0); return 0; } return -ENXIO; }
static int dpc_probe(struct pcie_device *dev) { struct dpc_dev *dpc; struct pci_dev *pdev = dev->port; struct device *device = &dev->device; int status; u16 ctl, cap; if (pcie_aer_get_firmware_first(pdev)) return -ENOTSUPP; dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); if (!dpc) return -ENOMEM; dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC); dpc->dev = dev; set_service_data(dev, dpc); status = devm_request_threaded_irq(device, dev->irq, dpc_irq, dpc_handler, IRQF_SHARED, "pcie-dpc", dpc); if (status) { dev_warn(device, "request IRQ%d failed: %d\n", dev->irq, status); return status; } pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); dpc->rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT); if (dpc->rp_extensions) { dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { dev_err(device, "RP PIO log size %u is invalid\n", dpc->rp_log_size); dpc->rp_log_size = 0; } }
void pci_vtd_quirk(const struct pci_dev *pdev) { int seg = pdev->seg; int bus = pdev->bus; int dev = PCI_SLOT(pdev->devfn); int func = PCI_FUNC(pdev->devfn); int pos; bool_t ff; u32 val, val2; u64 bar; paddr_t pa; const char *action; if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) != PCI_VENDOR_ID_INTEL ) return; switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) ) { /* * Mask reporting Intel VT-d faults to IOH core logic: * - Some platform escalates VT-d faults to platform errors. * - This can cause system failure upon non-fatal VT-d faults. * - Potential security issue if malicious guest trigger VT-d faults. */ case 0x0e28: /* Xeon-E5v2 (IvyBridge) */ case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */ case 0x3728: /* Xeon C5500/C3500 (JasperForest) */ case 0x3c28: /* Sandybridge */ val = pci_conf_read32(seg, bus, dev, func, 0x1AC); pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31)); printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); break; /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */ case 0x3400 ... 0x3407: /* host bridges */ case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */ /* JasperForest (Intel Xeon Processor C5500/C3500 */ case 0x3700 ... 0x370f: /* host bridges */ case 0x3720 ... 0x3724: /* root ports */ /* Sandybridge-EP (Romley) */ case 0x3c00: /* host bridge */ case 0x3c01 ... 0x3c0b: /* root ports */ pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ERR); if ( !pos ) { pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_VNDR); while ( pos ) { val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 ) { pos += PCI_VNDR_HEADER; break; } pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos, PCI_EXT_CAP_ID_VNDR); } ff = 0; } else ff = pcie_aer_get_firmware_first(pdev); if ( !pos ) { printk(XENLOG_WARNING "%04x:%02x:%02x.%u without AER capability?\n", seg, bus, dev, func); break; } val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK); val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK); if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) ) action = "Found masked"; else if ( !ff ) { pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK, val | PCI_ERR_UNC_UNSUP); pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK, val2 | PCI_ERR_COR_ADV_NFAT); action = "Masked"; } else action = "Must not mask"; /* XPUNCERRMSK Send Completion with Unsupported Request */ val = pci_conf_read32(seg, bus, dev, func, 0x20c); pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4)); printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", action, seg, bus, dev, func); break; case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */ case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */ case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */ case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */ case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */ case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */ case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */ case 0x1610: case 0x1614: case 0x1618: /* Broadwell */ case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */ case 0x1910: case 0x1918: case 0x191f: /* Skylake */ bar = pci_conf_read32(seg, bus, dev, func, 0x6c); bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68); pa = bar & 0x7ffffff000UL; /* bits 12...38 */ if ( (bar & 1) && pa && page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) ) { u32 __iomem *va = ioremap(pa, PAGE_SIZE); if ( va ) { __set_bit(0x1c8 * 8 + 20, va); iounmap(va); printk(XENLOG_INFO "Masked UR signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); } else printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n", pa, seg, bus, dev, func); } else printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n", bar, seg, bus, dev, func); break; } }