static bool __init acpi_memory_banned(unsigned long address, unsigned long size) { unsigned long mfn, nr_pages, i; mfn = PFN_DOWN(address); nr_pages = PFN_UP((address & ~PAGE_MASK) + size); for ( i = 0 ; i < nr_pages; i++ ) if ( !page_is_ram_type(mfn + i, RAM_TYPE_RESERVED) && !page_is_ram_type(mfn + i, RAM_TYPE_ACPI) ) return true; return false; }
void __init vga_init(void) { char *p; /* Look for 'keep' in comma-separated options. */ for ( p = opt_vga; p != NULL; p = strchr(p, ',') ) { if ( *p == ',' ) p++; if ( strncmp(p, "keep", 4) == 0 ) vgacon_keep = 1; } switch ( vga_console_info.video_type ) { case XEN_VGATYPE_TEXT_MODE_3: if ( page_is_ram_type(paddr_to_pfn(0xB8000), RAM_TYPE_CONVENTIONAL) || ((video = ioremap(0xB8000, 0x8000)) == NULL) ) return; outw(0x200a, 0x3d4); /* disable cursor */ columns = vga_console_info.u.text_mode_3.columns; lines = vga_console_info.u.text_mode_3.rows; memset(video, 0, columns * lines * 2); vga_puts = vga_text_puts; break; case XEN_VGATYPE_VESA_LFB: case XEN_VGATYPE_EFI_LFB: vesa_early_init(); break; default: memset(&vga_console_info, 0, sizeof(vga_console_info)); break; } }
static int __init acpi_invalidate_bgrt(struct acpi_table_header *table) { struct acpi_table_bgrt *bgrt_tbl = container_of(table, struct acpi_table_bgrt, header); if (table->length < sizeof(*bgrt_tbl)) return -1; if (bgrt_tbl->version == 1 && bgrt_tbl->image_address && !page_is_ram_type(PFN_DOWN(bgrt_tbl->image_address), RAM_TYPE_CONVENTIONAL)) return 0; printk(KERN_INFO PREFIX "BGRT: invalidating v%d image at %#"PRIx64"\n", bgrt_tbl->version, bgrt_tbl->image_address); bgrt_tbl->image_address = 0; bgrt_tbl->status &= ~1; return 0; }
void pci_vtd_quirk(const struct pci_dev *pdev) { int seg = pdev->seg; int bus = pdev->bus; int dev = PCI_SLOT(pdev->devfn); int func = PCI_FUNC(pdev->devfn); int pos; bool_t ff; u32 val, val2; u64 bar; paddr_t pa; const char *action; if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) != PCI_VENDOR_ID_INTEL ) return; switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) ) { /* * Mask reporting Intel VT-d faults to IOH core logic: * - Some platform escalates VT-d faults to platform errors. * - This can cause system failure upon non-fatal VT-d faults. * - Potential security issue if malicious guest trigger VT-d faults. */ case 0x0e28: /* Xeon-E5v2 (IvyBridge) */ case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */ case 0x3728: /* Xeon C5500/C3500 (JasperForest) */ case 0x3c28: /* Sandybridge */ val = pci_conf_read32(seg, bus, dev, func, 0x1AC); pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31)); printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); break; /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */ case 0x3400 ... 0x3407: /* host bridges */ case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */ /* JasperForest (Intel Xeon Processor C5500/C3500 */ case 0x3700 ... 0x370f: /* host bridges */ case 0x3720 ... 0x3724: /* root ports */ /* Sandybridge-EP (Romley) */ case 0x3c00: /* host bridge */ case 0x3c01 ... 0x3c0b: /* root ports */ pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ERR); if ( !pos ) { pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_VNDR); while ( pos ) { val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 ) { pos += PCI_VNDR_HEADER; break; } pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos, PCI_EXT_CAP_ID_VNDR); } ff = 0; } else ff = pcie_aer_get_firmware_first(pdev); if ( !pos ) { printk(XENLOG_WARNING "%04x:%02x:%02x.%u without AER capability?\n", seg, bus, dev, func); break; } val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK); val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK); if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) ) action = "Found masked"; else if ( !ff ) { pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK, val | PCI_ERR_UNC_UNSUP); pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK, val2 | PCI_ERR_COR_ADV_NFAT); action = "Masked"; } else action = "Must not mask"; /* XPUNCERRMSK Send Completion with Unsupported Request */ val = pci_conf_read32(seg, bus, dev, func, 0x20c); pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4)); printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", action, seg, bus, dev, func); break; case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */ case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */ case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */ case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */ case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */ case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */ case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */ case 0x1610: case 0x1614: case 0x1618: /* Broadwell */ case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */ case 0x1910: case 0x1918: case 0x191f: /* Skylake */ bar = pci_conf_read32(seg, bus, dev, func, 0x6c); bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68); pa = bar & 0x7ffffff000UL; /* bits 12...38 */ if ( (bar & 1) && pa && page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) ) { u32 __iomem *va = ioremap(pa, PAGE_SIZE); if ( va ) { __set_bit(0x1c8 * 8 + 20, va); iounmap(va); printk(XENLOG_INFO "Masked UR signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); } else printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n", pa, seg, bus, dev, func); } else printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n", bar, seg, bus, dev, func); break; } }