void dump_interesting_fields(u8 bus, u8 slot, u8 func) { u16 vendor_id = read_pci_config_16(bus, slot, func, PCI_VENDOR_ID); u16 device_id = read_pci_config_16(bus, slot, func, PCI_DEVICE_ID); WinDbgPrint("pci 0000:%02x:%02x.%d ", bus, slot, func); WinDbgPrint("Id %04x:%04x config space:\n", vendor_id, device_id); };
static int __init vsmp_init(void) { void *address; unsigned int cap, ctl; if (!early_pci_allowed()) return 0; /* Check if we are running on a ScaleMP vSMP box */ if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) || (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) return 0; /* set vSMP magic bits to indicate vSMP capable kernel */ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); cap = readl(address); ctl = readl(address + 4); printk("vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); if (cap & ctl & (1 << 4)) { /* Turn on vSMP IRQ fastpath handling (see system.h) */ ctl &= ~(1 << 4); writel(ctl, address + 4); ctl = readl(address + 4); printk("vSMP CTL: control set to:0x%08x\n", ctl); } iounmap(address); return 0; }
static NTSTATUS DumpPCIBridge(u8 bus, u8 slot, u8 func, struct PmemMemoryInfo *info, int len) { u8 offset; u64 base; u64 limit; // Support direct Bridge BARs. for(offset = PCI_BASE_ADDRESS_0; offset <= PCI_BASE_ADDRESS_1;) { DumpBaseAddressRegister(bus, slot, func, &offset, info, len); }; // I/O base and limit registers are not interesting for memory acquisition. // Memory base and limit registers. base = read_pci_config_16(bus, slot, func, PCI_MEMORY_BASE); if (base != 0) { base = (base & 0xFFF0) << 20; limit = read_pci_config_16(bus, slot, func, PCI_MEMORY_LIMIT); limit = (limit & 0xFFF0) << 20 | 0xFFFFFF; if (limit > base) { InsertMemoryHole(info, len, base, limit); }; }; // Prefetcheable Memory base and limit registers. base = read_pci_config_16(bus, slot, func, PCI_PREF_MEMORY_BASE); if (base != 0) { // Determine if the base register is 32 or 64 bit. if ((base & 0xF) == 0) { base = (base & 0xFFF0) << 20; } else { base = ((base & 0xFFF0) << 20) | ((u64)read_pci_config(bus, slot, func, PCI_PREF_BASE_UPPER32) << 32); }; limit = read_pci_config_16(bus, slot, func, PCI_PREF_MEMORY_LIMIT); // Determine if the limit register is 32 or 64 bit. if ((limit & 0xF) == 0) { limit = (limit & 0xFFF0) << 20 | 0xFFFFFF; } else { limit = ((limit & 0xFFF0) << 20) | 0xFFFFFF | ((u64)read_pci_config(bus, slot, func, PCI_PREF_LIMIT_UPPER32) << 32); }; if (limit > base) { InsertMemoryHole(info, len, base, limit); }; }; return STATUS_SUCCESS; };
static int __init scan_functions_for_iommu(int bus, int dev, iommu_detect_callback_ptr_t iommu_detect_callback) { int func, hdr_type; int count, error = 0; func = 0; count = 1; while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) && !error && func < count ) { hdr_type = read_pci_config_byte(bus, dev, func, PCI_HEADER_TYPE); if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) ) count = PCI_MAX_FUNC_COUNT; if ( IS_PCI_TYPE0_HEADER(hdr_type) || IS_PCI_TYPE1_HEADER(hdr_type) ) { error = scan_caps_for_iommu(bus, dev, func, iommu_detect_callback); } ++func; } return error; }
static NTSTATUS DumpBaseAddressRegister64(u8 bus, u8 slot, u8 func, u8 offset, struct PmemMemoryInfo *info, int len) { u64 base = read_pci_config(bus, slot, func, offset); u32 base_high = read_pci_config(bus, slot, func, offset + sizeof(u32)); u32 mask = 0; u32 mask_high = 0; u64 end = 0; u16 command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); // Check the lower word first. write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, (u32)base); // Check the upper 32 bit word. write_pci_config(bus, slot, func, offset + sizeof(u32), 0xFFFFFFFF); mask_high = read_pci_config(bus, slot, func, offset + sizeof(u32)); write_pci_config(bus, slot, func, offset + sizeof(u32), (u32)base_high); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); base = ((base & 0xFFFFFFF0) | ((u64)base_high) << 32); end = ~(mask | ((u64)mask_high) << 32) + base; return InsertMemoryHole(info, len, base, end); };
static size_t __init i830_stolen_size(int num, int slot, int func) { size_t stolen_size; u16 gmch_ctrl; gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: stolen_size = KB(512); break; case I830_GMCH_GMS_STOLEN_1024: stolen_size = MB(1); break; case I830_GMCH_GMS_STOLEN_8192: stolen_size = MB(8); break; case I830_GMCH_GMS_LOCAL: /* local memory isn't part of the normal address space */ stolen_size = 0; break; default: return 0; } return stolen_size; }
static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) { u16 toud = 0; toud = read_pci_config_16(0, 0, 0, I865_TOUD); return (phys_addr_t)(toud << 16) + i845_tseg_size(); }
static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) { /* * FIXME is the graphics stolen memory region * always at TOUD? Ie. is it always the last * one to be allocated by the BIOS? */ return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; }
static size_t __init gen8_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; gmch_ctrl &= BDW_GMCH_GMS_MASK; return gmch_ctrl << 25; /* 32 MB units */ }
static size_t __init gen3_stolen_size(int num, int slot, int func) { size_t stolen_size; u16 gmch_ctrl; gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: stolen_size = MB(1); break; case I855_GMCH_GMS_STOLEN_4M: stolen_size = MB(4); break; case I855_GMCH_GMS_STOLEN_8M: stolen_size = MB(8); break; case I855_GMCH_GMS_STOLEN_16M: stolen_size = MB(16); break; case I855_GMCH_GMS_STOLEN_32M: stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: stolen_size = MB(48); break; case I915_GMCH_GMS_STOLEN_64M: stolen_size = MB(64); break; case G33_GMCH_GMS_STOLEN_128M: stolen_size = MB(128); break; case G33_GMCH_GMS_STOLEN_256M: stolen_size = MB(256); break; case INTEL_GMCH_GMS_STOLEN_96M: stolen_size = MB(96); break; case INTEL_GMCH_GMS_STOLEN_160M: stolen_size = MB(160); break; case INTEL_GMCH_GMS_STOLEN_224M: stolen_size = MB(224); break; case INTEL_GMCH_GMS_STOLEN_352M: stolen_size = MB(352); break; default: stolen_size = 0; break; } return stolen_size; }
static void intel_bugs(void) { u16 device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID); #ifdef CONFIG_SMP if (device == PCI_DEVICE_ID_INTEL_E7320_MCH || device == PCI_DEVICE_ID_INTEL_E7520_MCH || device == PCI_DEVICE_ID_INTEL_E7525_MCH) quirk_intel_irqbalance(); #endif }
void dump_bar(u8 bus, u8 slot, u8 func) { u16 vendor_id = read_pci_config_16(bus, slot, func, PCI_VENDOR_ID); u16 device_id = read_pci_config_16(bus, slot, func, PCI_DEVICE_ID); u32 base0 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); u32 base1 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1); u32 base2 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_2); u32 base3 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_3); u32 base4 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_4); u32 base5 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_5); WinDbgPrint("Base Addresses: %08X %08X %08X %08X %08X %08X\n", base0, base1, base2, base3, base4, base5); WinDbgPrint("Masks: %08X %08X %08X %08X %08X %08X\n", get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_0), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_1), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_2), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_3), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_4), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_5)); }
void __init quirk_intel_irqbalance(void) { u8 config, rev; u32 word; /* BIOS may enable hardware IRQ balancing for * E7520/E7320/E7525(revision ID 0x9 and below) * based platforms. * Disable SW irqbalance/affinity on those platforms. */ rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION); if (rev > 0x9) return; printk(KERN_INFO "Intel E7520/7320/7525 detected."); /* enable access to config space */ config = read_pci_config_byte(0, 0, 0, 0xf4); write_pci_config_byte(0, 0, 0, 0xf4, config|0x2); /* read xTPR register */ word = read_pci_config_16(0, 0, 0x40, 0x4c); if (!(word & (1 << 13))) { printk(KERN_INFO "Disabling irq balancing and affinity\n"); #ifdef CONFIG_IRQBALANCE irqbalance_disable(""); #endif noirqdebug_setup(""); #ifdef CONFIG_PROC_FS no_irq_affinity = 1; #endif #ifdef CONFIG_HOTPLUG_CPU printk(KERN_INFO "Disabling cpu hotplug control\n"); enable_cpu_hotplug = 0; #endif #ifdef CONFIG_X86_64 /* force the genapic selection to flat mode so that * interrupts can be redirected to more than one CPU. */ genapic_force = &apic_flat; #endif } /* put back the original value for config space */ if (!(config & 0x2)) write_pci_config_byte(0, 0, 0, 0xf4, config); }
int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu) { int bus, dev, func; int devfn, hdr_type; int sec_bus, sub_bus; int multi_func; bus = iommu->last_downstream_bus = iommu->root_bus; iommu->downstream_bus_present[bus] = 1; dev = PCI_SLOT(iommu->first_devfn); multi_func = PCI_FUNC(iommu->first_devfn) > 0; for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) { /* skipping to next device#? */ if ( dev != PCI_SLOT(devfn) ) { dev = PCI_SLOT(devfn); multi_func = 0; } func = PCI_FUNC(devfn); if ( !VALID_PCI_VENDOR_ID( read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) ) continue; hdr_type = read_pci_config_byte(bus, dev, func, PCI_HEADER_TYPE); if ( func == 0 ) multi_func = IS_PCI_MULTI_FUNCTION(hdr_type); if ( (func == 0 || multi_func) && IS_PCI_TYPE1_HEADER(hdr_type) ) { if (!valid_bridge_bus_config(bus, dev, func, &sec_bus, &sub_bus)) return -ENODEV; if ( sub_bus > iommu->last_downstream_bus ) iommu->last_downstream_bus = sub_bus; do { iommu->downstream_bus_present[sec_bus] = 1; } while ( sec_bus++ < sub_bus ); } } return 0; }
static NTSTATUS DumpBaseAddressRegister32(u8 bus, u8 slot, u8 func, u8 offset, struct PmemMemoryInfo *info, int len) { u32 mask = 0; u32 base = read_pci_config(bus, slot, func, offset); u16 command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, base); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); base = base & 0xFFFFFFF0; return InsertMemoryHole(info, len, base, ~mask + base); };
static void __init intel_remapping_check(int num, int slot, int func) { u8 revision; u16 device; device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); /* * Revision <= 13 of all triggering devices id in this quirk * have a problem draining interrupts when irq remapping is * enabled, and should be flagged as broken. Additionally * revision 0x22 of device id 0x3405 has this problem. */ if (revision <= 0x13) set_irq_remapping_broken(); else if (device == 0x3405 && revision == 0x22) set_irq_remapping_broken(); }
static size_t __init chv_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; gmch_ctrl &= SNB_GMCH_GMS_MASK; /* * 0x0 to 0x10: 32MB increments starting at 0MB * 0x11 to 0x16: 4MB increments starting at 8MB * 0x17 to 0x1d: 4MB increments start at 36MB */ if (gmch_ctrl < 0x11) return gmch_ctrl << 25; else if (gmch_ctrl < 0x17) return (gmch_ctrl - 0x11 + 2) << 22; else return (gmch_ctrl - 0x17 + 9) << 22; }
u32 get_base_register_size(u8 bus, u8 slot, u8 func, u8 offset) { __int32 base = read_pci_config(bus, slot, func, offset); u32 mask = 0; u16 command =0; if (base == 0) return 0; command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); // Write to config space all 0xFFFFFFFF write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, base); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); return ~mask + base; };
/* Uses direct PCI probing to add accessible physical memory ranges. */ NTSTATUS PCI_AddMemoryRanges(struct PmemMemoryInfo *info, int len) { int required_length = (sizeof(struct PmemMemoryInfo) + sizeof(PHYSICAL_MEMORY_RANGE)); unsigned int bus, slot, func; if (len < required_length) { return STATUS_INFO_LENGTH_MISMATCH; }; // Initialize the physical memory range. info->NumberOfRuns.QuadPart = 1; info->Run[0].BaseAddress.QuadPart = 0; info->Run[0].NumberOfBytes.QuadPart = -1; for (bus = 0; bus < 256; bus++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { u8 type; u16 vendor_id = read_pci_config_16((u8)bus, (u8)slot, (u8)func, PCI_VENDOR_ID); // Device not present. if (vendor_id == 0xffff) continue; type = read_pci_config_byte((u8)bus, (u8)slot, (u8)func, PCI_HEADER_TYPE); // Standard header. if ((type & 0x1f) == 0) { #if WINPMEM_PCI_DEBUG WinDbgPrint("PCI Type %X\n", type); dump_interesting_fields((u8)bus, (u8)slot, (u8)func); dump_bar((u8)bus, (u8)slot, (u8)func); DumpConfigSpace((u8)bus, (u8)slot, (u8)func); #endif DumpStandardHeader((u8)bus, (u8)slot, (u8)func, info, len); // PCI-PCI bridge. } else if ((type & 0x1f) == 1) { #if WINPMEM_PCI_DEBUG WinDbgPrint("PCI Type %X\n", type); dump_interesting_fields((u8)bus, (u8)slot, (u8)func); DumpConfigSpace((u8)bus, (u8)slot, (u8)func); #endif DumpPCIBridge((u8)bus, (u8)slot, (u8)func, info, len); } else { WinDbgPrint("Unknown header PCI at 0000:%02x:%02x.%d type %d\n", bus, slot, func, type); }; // This is not a multi function device. if (func == 0 && (type & 0x80) == 0) { break; }; } } } return STATUS_SUCCESS; };