static void check_pdev(const struct pci_dev *pdev) { #define PCI_STATUS_CHECK \ (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY) u16 seg = pdev->seg; u8 bus = pdev->bus; u8 dev = PCI_SLOT(pdev->devfn); u8 func = PCI_FUNC(pdev->devfn); u16 val; if ( command_mask ) { val = pci_conf_read16(seg, bus, dev, func, PCI_COMMAND); if ( val & command_mask ) pci_conf_write16(seg, bus, dev, func, PCI_COMMAND, val & ~command_mask); val = pci_conf_read16(seg, bus, dev, func, PCI_STATUS); if ( val & PCI_STATUS_CHECK ) { printk(XENLOG_INFO "%04x:%02x:%02x.%u status %04x -> %04x\n", seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK); pci_conf_write16(seg, bus, dev, func, PCI_STATUS, val & PCI_STATUS_CHECK); } } switch ( pci_conf_read8(seg, bus, dev, func, PCI_HEADER_TYPE) & 0x7f ) { case PCI_HEADER_TYPE_BRIDGE: if ( !bridge_ctl_mask ) break; val = pci_conf_read16(seg, bus, dev, func, PCI_BRIDGE_CONTROL); if ( val & bridge_ctl_mask ) pci_conf_write16(seg, bus, dev, func, PCI_BRIDGE_CONTROL, val & ~bridge_ctl_mask); val = pci_conf_read16(seg, bus, dev, func, PCI_SEC_STATUS); if ( val & PCI_STATUS_CHECK ) { printk(XENLOG_INFO "%04x:%02x:%02x.%u secondary status %04x -> %04x\n", seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK); pci_conf_write16(seg, bus, dev, func, PCI_SEC_STATUS, val & PCI_STATUS_CHECK); } break; case PCI_HEADER_TYPE_CARDBUS: /* TODO */ break; } #undef PCI_STATUS_CHECK }
/* * QUIRK to workaround Xen boot issue on Calpella/Ironlake OEM BIOS * not enabling VT-d properly in IGD. The workaround is to not enabling * IGD VT-d translation if VT is not enabled in IGD. */ int is_igd_vt_enabled_quirk(void) { u16 ggc; if ( !IS_ILK(ioh_id) ) return 1; /* integrated graphics on Intel platforms is located at 0:2.0 */ ggc = pci_conf_read16(0, IGD_DEV, 0, GGC); return ( ggc & GGC_MEMORY_VT_ENABLED ? 1 : 0 ); }
/* * QUIRK to workaround cantiga VT-d buffer flush issue. * The workaround is to force write buffer flush even if * VT-d capability indicates it is not required. */ static void cantiga_b3_errata_init(void) { u16 vid; u8 did_hi, rid; vid = pci_conf_read16(0, IGD_DEV, 0, 0); if ( vid != 0x8086 ) return; did_hi = pci_conf_read8(0, IGD_DEV, 0, 3); rid = pci_conf_read8(0, IGD_DEV, 0, 8); if ( (did_hi == 0x2A) && (rid == 0x7) ) is_cantiga_b3 = 1; }
static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) { struct pci_dev *pdev; list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list ) if ( pdev->bus == bus && pdev->devfn == devfn ) return pdev; pdev = xzalloc(struct pci_dev); if ( !pdev ) return NULL; *(u16*) &pdev->seg = pseg->nr; *((u8*) &pdev->bus) = bus; *((u8*) &pdev->devfn) = devfn; pdev->domain = NULL; INIT_LIST_HEAD(&pdev->msi_list); if ( pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_CAP_ID_MSIX) ) { struct arch_msix *msix = xzalloc(struct arch_msix); if ( !msix ) { xfree(pdev); return NULL; } spin_lock_init(&msix->table_lock); pdev->msix = msix; } list_add(&pdev->alldevs_list, &pseg->alldevs_list); /* update bus2bridge */ switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) ) { int pos; u16 cap; u8 sec_bus, sub_bus; case DEV_TYPE_PCIe2PCI_BRIDGE: case DEV_TYPE_LEGACY_PCI_BRIDGE: sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_SECONDARY_BUS); sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_SUBORDINATE_BUS); spin_lock(&pseg->bus2bridge_lock); for ( ; sec_bus <= sub_bus; sec_bus++ ) { pseg->bus2bridge[sec_bus].map = 1; pseg->bus2bridge[sec_bus].bus = bus; pseg->bus2bridge[sec_bus].devfn = devfn; } spin_unlock(&pseg->bus2bridge_lock); break; case DEV_TYPE_PCIe_ENDPOINT: pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_CAP_ID_EXP); BUG_ON(!pos); cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + PCI_EXP_DEVCAP); if ( cap & PCI_EXP_DEVCAP_PHANTOM ) { pdev->phantom_stride = 8 >> MASK_EXTR(cap, PCI_EXP_DEVCAP_PHANTOM); if ( PCI_FUNC(devfn) >= pdev->phantom_stride ) pdev->phantom_stride = 0; } else {
void __init vga_endboot(void) { if ( vga_puts == vga_noop_puts ) return; printk("Xen is %s VGA console.\n", vgacon_keep ? "keeping" : "relinquishing"); if ( !vgacon_keep ) vga_puts = vga_noop_puts; else { int bus, devfn; for ( bus = 0; bus < 256; ++bus ) for ( devfn = 0; devfn < 256; ++devfn ) { const struct pci_dev *pdev; u8 b = bus, df = devfn, sb; spin_lock(&pcidevs_lock); pdev = pci_get_pdev(0, bus, devfn); spin_unlock(&pcidevs_lock); if ( !pdev || pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_CLASS_DEVICE) != 0x0300 || !(pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_COMMAND) & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) ) continue; while ( b ) { switch ( find_upstream_bridge(0, &b, &df, &sb) ) { case 0: b = 0; break; case 1: switch ( pci_conf_read8(0, b, PCI_SLOT(df), PCI_FUNC(df), PCI_HEADER_TYPE) ) { case PCI_HEADER_TYPE_BRIDGE: case PCI_HEADER_TYPE_CARDBUS: if ( pci_conf_read16(0, b, PCI_SLOT(df), PCI_FUNC(df), PCI_BRIDGE_CONTROL) & PCI_BRIDGE_CTL_VGA ) continue; break; } break; } break; } if ( !b ) { printk(XENLOG_INFO "Boot video device %02x:%02x.%u\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); pci_hide_device(bus, devfn); } } } switch ( vga_console_info.video_type ) { case XEN_VGATYPE_TEXT_MODE_3: if ( !vgacon_keep ) memset(video, 0, columns * lines * 2); break; case XEN_VGATYPE_VESA_LFB: case XEN_VGATYPE_EFI_LFB: vesa_endboot(vgacon_keep); break; default: BUG(); } }
void pci_vtd_quirk(const struct pci_dev *pdev) { int seg = pdev->seg; int bus = pdev->bus; int dev = PCI_SLOT(pdev->devfn); int func = PCI_FUNC(pdev->devfn); int pos; bool_t ff; u32 val, val2; u64 bar; paddr_t pa; const char *action; if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) != PCI_VENDOR_ID_INTEL ) return; switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) ) { /* * Mask reporting Intel VT-d faults to IOH core logic: * - Some platform escalates VT-d faults to platform errors. * - This can cause system failure upon non-fatal VT-d faults. * - Potential security issue if malicious guest trigger VT-d faults. */ case 0x0e28: /* Xeon-E5v2 (IvyBridge) */ case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */ case 0x3728: /* Xeon C5500/C3500 (JasperForest) */ case 0x3c28: /* Sandybridge */ val = pci_conf_read32(seg, bus, dev, func, 0x1AC); pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31)); printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); break; /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */ case 0x3400 ... 0x3407: /* host bridges */ case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */ /* JasperForest (Intel Xeon Processor C5500/C3500 */ case 0x3700 ... 0x370f: /* host bridges */ case 0x3720 ... 0x3724: /* root ports */ /* Sandybridge-EP (Romley) */ case 0x3c00: /* host bridge */ case 0x3c01 ... 0x3c0b: /* root ports */ pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ERR); if ( !pos ) { pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_VNDR); while ( pos ) { val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 ) { pos += PCI_VNDR_HEADER; break; } pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos, PCI_EXT_CAP_ID_VNDR); } ff = 0; } else ff = pcie_aer_get_firmware_first(pdev); if ( !pos ) { printk(XENLOG_WARNING "%04x:%02x:%02x.%u without AER capability?\n", seg, bus, dev, func); break; } val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK); val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK); if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) ) action = "Found masked"; else if ( !ff ) { pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK, val | PCI_ERR_UNC_UNSUP); pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK, val2 | PCI_ERR_COR_ADV_NFAT); action = "Masked"; } else action = "Must not mask"; /* XPUNCERRMSK Send Completion with Unsupported Request */ val = pci_conf_read32(seg, bus, dev, func, 0x20c); pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4)); printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", action, seg, bus, dev, func); break; case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */ case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */ case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */ case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */ case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */ case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */ case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */ case 0x1610: case 0x1614: case 0x1618: /* Broadwell */ case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */ case 0x1910: case 0x1918: case 0x191f: /* Skylake */ bar = pci_conf_read32(seg, bus, dev, func, 0x6c); bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68); pa = bar & 0x7ffffff000UL; /* bits 12...38 */ if ( (bar & 1) && pa && page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) ) { u32 __iomem *va = ioremap(pa, PAGE_SIZE); if ( va ) { __set_bit(0x1c8 * 8 + 20, va); iounmap(va); printk(XENLOG_INFO "Masked UR signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); } else printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n", pa, seg, bus, dev, func); } else printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n", bar, seg, bus, dev, func); break; } }