static NTSTATUS DumpBaseAddressRegister64(u8 bus, u8 slot, u8 func, u8 offset, struct PmemMemoryInfo *info, int len) { u64 base = read_pci_config(bus, slot, func, offset); u32 base_high = read_pci_config(bus, slot, func, offset + sizeof(u32)); u32 mask = 0; u32 mask_high = 0; u64 end = 0; u16 command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); // Check the lower word first. write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, (u32)base); // Check the upper 32 bit word. write_pci_config(bus, slot, func, offset + sizeof(u32), 0xFFFFFFFF); mask_high = read_pci_config(bus, slot, func, offset + sizeof(u32)); write_pci_config(bus, slot, func, offset + sizeof(u32), (u32)base_high); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); base = ((base & 0xFFFFFFF0) | ((u64)base_high) << 32); end = ~(mask | ((u64)mask_high) << 32) + base; return InsertMemoryHole(info, len, base, end); };
static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func) { u64 val64, sz64, mask64; void __iomem *base; u32 val, sz; u8 byte; val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0); sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val); if (val == 0xffffffff || sz == 0xffffffff) { pr_notice("invalid mmio bar\n"); return NULL; } val64 = val & PCI_BASE_ADDRESS_MEM_MASK; sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; mask64 = PCI_BASE_ADDRESS_MEM_MASK; if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0); sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val); val64 |= (u64)val << 32; sz64 |= (u64)sz << 32; mask64 |= ~0ULL << 32; } sz64 &= mask64; if (!sz64) { pr_notice("invalid mmio address\n"); return NULL; } sz64 = 1ULL << __ffs64(sz64); /* Check if the mem space is enabled: */ byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND); if (!(byte & PCI_COMMAND_MEMORY)) { byte |= PCI_COMMAND_MEMORY; write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte); } xdbc.xhci_start = val64; xdbc.xhci_length = sz64; base = early_ioremap(val64, sz64); return base; }
static NTSTATUS DumpPCIBridge(u8 bus, u8 slot, u8 func, struct PmemMemoryInfo *info, int len) { u8 offset; u64 base; u64 limit; // Support direct Bridge BARs. for(offset = PCI_BASE_ADDRESS_0; offset <= PCI_BASE_ADDRESS_1;) { DumpBaseAddressRegister(bus, slot, func, &offset, info, len); }; // I/O base and limit registers are not interesting for memory acquisition. // Memory base and limit registers. base = read_pci_config_16(bus, slot, func, PCI_MEMORY_BASE); if (base != 0) { base = (base & 0xFFF0) << 20; limit = read_pci_config_16(bus, slot, func, PCI_MEMORY_LIMIT); limit = (limit & 0xFFF0) << 20 | 0xFFFFFF; if (limit > base) { InsertMemoryHole(info, len, base, limit); }; }; // Prefetcheable Memory base and limit registers. base = read_pci_config_16(bus, slot, func, PCI_PREF_MEMORY_BASE); if (base != 0) { // Determine if the base register is 32 or 64 bit. if ((base & 0xF) == 0) { base = (base & 0xFFF0) << 20; } else { base = ((base & 0xFFF0) << 20) | ((u64)read_pci_config(bus, slot, func, PCI_PREF_BASE_UPPER32) << 32); }; limit = read_pci_config_16(bus, slot, func, PCI_PREF_MEMORY_LIMIT); // Determine if the limit register is 32 or 64 bit. if ((limit & 0xF) == 0) { limit = (limit & 0xFFF0) << 20 | 0xFFFFFF; } else { limit = ((limit & 0xFFF0) << 20) | 0xFFFFFF | ((u64)read_pci_config(bus, slot, func, PCI_PREF_LIMIT_UPPER32) << 32); }; if (limit > base) { InsertMemoryHole(info, len, base, limit); }; }; return STATUS_SUCCESS; };
int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu) { u32 cap_header, cap_range; u64 mmio_bar; /* remove it when BIOS available */ write_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000); write_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001); /* remove it when BIOS available */ mmio_bar = (u64)read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; mmio_bar |= read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) & PCI_CAP_MMIO_BAR_LOW_MASK; iommu->mmio_base_phys = (unsigned long)mmio_bar; if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) { dprintk(XENLOG_ERR , "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); return -ENODEV; } cap_header = read_pci_config(bus, dev, func, cap_ptr); iommu->revision = get_field_from_reg_u32(cap_header, PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT); iommu->iotlb_support = get_field_from_reg_u32(cap_header, PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT); iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header, PCI_CAP_HT_TUNNEL_MASK, PCI_CAP_HT_TUNNEL_SHIFT); iommu->not_present_cached = get_field_from_reg_u32(cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT); cap_range = read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_RANGE_OFFSET); iommu->root_bus = get_field_from_reg_u32(cap_range, PCI_CAP_BUS_NUMBER_MASK, PCI_CAP_BUS_NUMBER_SHIFT); iommu->first_devfn = get_field_from_reg_u32(cap_range, PCI_CAP_FIRST_DEVICE_MASK, PCI_CAP_FIRST_DEVICE_SHIFT); iommu->last_devfn = get_field_from_reg_u32(cap_range, PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT); return 0; }
static void __init fix_hypertransport_config(int num, int slot, int func) { u32 htcfg; /* * we found a hypertransport bus * make sure that we are broadcasting * interrupts to all cpus on the ht bus * if we're using extended apic ids */ htcfg = read_pci_config(num, slot, func, 0x68); if (htcfg & (1 << 18)) { printk(KERN_INFO "Detected use of extended apic ids " "on hypertransport bus\n"); if ((htcfg & (1 << 17)) == 0) { printk(KERN_INFO "Enabling hypertransport extended " "apic interrupt broadcast\n"); printk(KERN_INFO "Note this is a bios bug, " "please contact your hw vendor\n"); htcfg |= (1 << 17); write_pci_config(num, slot, func, 0x68, htcfg); } } }
static int __init scan_caps_for_iommu(int bus, int dev, int func, iommu_detect_callback_ptr_t iommu_detect_callback) { int cap_ptr, cap_id, cap_type; u32 cap_header; int count, error = 0; count = 0; cap_ptr = read_pci_config_byte(bus, dev, func, PCI_CAPABILITY_LIST); while ( cap_ptr >= PCI_MIN_CAP_OFFSET && count < PCI_MAX_CAP_BLOCKS && !error ) { cap_ptr &= PCI_CAP_PTR_MASK; cap_header = read_pci_config(bus, dev, func, cap_ptr); cap_id = get_field_from_reg_u32(cap_header, PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) { cap_type = get_field_from_reg_u32(cap_header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); if ( cap_type == PCI_CAP_TYPE_IOMMU ) { error = iommu_detect_callback( bus, dev, func, cap_ptr); } } cap_ptr = get_field_from_reg_u32(cap_header, PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); ++count; } return error; }
// Advances the offset depending on the size of the base address register. static NTSTATUS DumpBaseAddressRegister(u8 bus, u8 slot, u8 func, u8 *offset, struct PmemMemoryInfo *info, int len) { u64 base = read_pci_config(bus, slot, func, *offset) & 0xFFFFFFFF; if (base == 0) { *offset += sizeof(u32); return STATUS_SUCCESS; }; // We skip IO space registers since they are not interesting for memory // acquisition. if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { *offset += sizeof(u32); // 64 bit base address register. } else if ((base & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { DumpBaseAddressRegister64(bus, slot, func, *offset, info, len); *offset += sizeof(u64); // 32 bit memspace or io space base address register. } else if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { DumpBaseAddressRegister32(bus, slot, func, *offset, info, len); *offset += sizeof(u32); } else { // Something else - Just advance the offset one word forward. *offset += sizeof(u32); }; return STATUS_SUCCESS; };
static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; rev = ati_sbx00_rev(num, slot, func); if (rev >= 0x40) acpi_fix_pin2_polarity = 1; /* * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... * SB700: revisions 0x39, 0x3a, ... * SB800: revisions 0x40, 0x41, ... */ if (rev >= 0x39) return; if (acpi_use_timer_override) return; /* check for IRQ0 interrupt swap */ d = read_pci_config(num, slot, func, 0x64); if (!(d & (1<<14))) acpi_skip_timer_override = 1; if (acpi_skip_timer_override) { printk(KERN_INFO "SB600 revision 0x%x\n", rev); printk(KERN_INFO "Ignoring ACPI timer override.\n"); printk(KERN_INFO "If you got timer trouble " "try acpi_use_timer_override\n"); } }
static int __init vsmp_init(void) { void *address; unsigned int cap, ctl; if (!is_vsmp_box()) return 0; /* set vSMP magic bits to indicate vSMP capable kernel */ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); cap = readl(address); ctl = readl(address + 4); printk("vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); if (cap & ctl & (1 << 4)) { /* Turn on vSMP IRQ fastpath handling (see system.h) */ ctl &= ~(1 << 4); writel(ctl, address + 4); ctl = readl(address + 4); printk("vSMP CTL: control set to:0x%08x\n", ctl); } iounmap(address); return 0; }
static void __init set_vsmp_pv_ops(void) { void __iomem *address; unsigned int cap, ctl, cfg; /* set vSMP magic bits to indicate vSMP capable kernel */ cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0); address = early_ioremap(cfg, 8); cap = readl(address); ctl = readl(address + 4); printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); pv_init_ops.patch = vsmp_patch; ctl &= ~(1 << 4); writel(ctl, address + 4); ctl = readl(address + 4); printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl); } early_iounmap(address, 8); }
static int __init vsmp_init(void) { void *address; unsigned int cap, ctl; if (!early_pci_allowed()) return 0; /* Check if we are running on a ScaleMP vSMP box */ if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) || (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) return 0; /* set vSMP magic bits to indicate vSMP capable kernel */ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); cap = readl(address); ctl = readl(address + 4); printk("vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); if (cap & ctl & (1 << 4)) { /* Turn on vSMP IRQ fastpath handling (see system.h) */ ctl &= ~(1 << 4); writel(ctl, address + 4); ctl = readl(address + 4); printk("vSMP CTL: control set to:0x%08x\n", ctl); } iounmap(address); return 0; }
static u32 __init ati_ixp4x0_rev(int num, int slot, int func) { u32 d; u8 b; b = read_pci_config_byte(num, slot, func, 0xac); b &= ~(1<<5); write_pci_config_byte(num, slot, func, 0xac, b); d = read_pci_config(num, slot, func, 0x70); d |= 1<<8; write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; return d; }
static u32 __init ati_sbx00_rev(int num, int slot, int func) { u32 d; d = read_pci_config(num, slot, func, 0x8); d &= 0xff; return d; }
static __init int find_northbridge(void) { int num; for (num = 0; num < 32; num++) { u32 header; header = read_pci_config(0, num, 0, 0x00); if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16))) continue; header = read_pci_config(0, num, 1, 0x00); if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16))) continue; return num; } return -1; }
static NTSTATUS DumpBaseAddressRegister32(u8 bus, u8 slot, u8 func, u8 offset, struct PmemMemoryInfo *info, int len) { u32 mask = 0; u32 base = read_pci_config(bus, slot, func, offset); u16 command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, base); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); base = base & 0xFFFFFFF0; return InsertMemoryHole(info, len, base, ~mask + base); };
static void __init detect_vsmp_box(void) { is_vsmp = 0; if (!early_pci_allowed()) return; /* Check if we are running on a ScaleMP vSMPowered box */ if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) is_vsmp = 1; }
static void __init detect_vsmp_box(void) { is_vsmp = 0; if (!early_pci_allowed()) return; if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) is_vsmp = 1; }
ACPI_STATUS AcpiOsReadPciConfiguration ( ACPI_PCI_ID *PciId, UINT32 PciRegister, UINT64 *Value, UINT32 Width) { *Value = read_pci_config(PciId->Segment, PciId->Bus, PciId->Device, PciId->Function, PciRegister); return (AE_OK); }
u32 get_base_register_size(u8 bus, u8 slot, u8 func, u8 offset) { __int32 base = read_pci_config(bus, slot, func, offset); u32 mask = 0; u16 command =0; if (base == 0) return 0; command = read_pci_config_16(bus, slot, func, PCI_COMMAND); // Disable IO and memory bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, 0); // Write to config space all 0xFFFFFFFF write_pci_config(bus, slot, func, offset, 0xFFFFFFFF); mask = read_pci_config(bus, slot, func, offset) & 0xFFFFFFF0; write_pci_config(bus, slot, func, offset, base); // Reenable bus access. write_pci_config_16(bus, slot, func, PCI_COMMAND, command); return ~mask + base; };
int is_vsmp_box(void) { if (vsmp != -1) return vsmp; vsmp = 0; /* Check if we are running on a ScaleMP vSMP box */ if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) vsmp = 1; return vsmp; }
/* * Systems with Intel graphics controllers set aside memory exclusively * for gfx driver use. This memory is not marked in the E820 as reserved * or as RAM, and so is subject to overlap from E820 manipulation later * in the boot process. On some systems, MMIO space is allocated on top, * despite the efforts of the "RAM buffer" approach, which simply rounds * memory boundaries up to 64M to try to catch space that may decode * as RAM and so is not suitable for MMIO. * * And yes, so far on current devices the base addr is always under 4G. */ static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size) { u32 base; /* * For the PCI IDs in this quirk, the stolen base is always * in 0x5c, aka the BDSM register (yes that's really what * it's called). */ base = read_pci_config(num, slot, func, 0x5c); base &= ~((1<<20) - 1); return base; }
/* config_addr size -- status value */ static int rc_read_pcicfg( ulong args[], ulong ret[] ) { pci_addr_t addr; ulong v; /* XXX: don't know how to handle different PCI domains... */ /* 0,bus,devfn,reg */ addr = PCIADDR_FROM_BUS_DEVFN( 0, (args[0]>>16)&0xff, (args[0]>>8)&0xff ); v = read_pci_config( addr, args[0]&0xff, args[1] ); if( args[1] == 2 ) v = ld_le32(&v) >> 16; if( args[1] == 4 ) v = ld_le32(&v); ret[1] = v; /* printm("RTAS: read_pci_config (%ld:%02lX) + 0x%02lx [%ld] : %08lX\n", (args[0]>>16)&0xff, (args[0]>>8)&0xff, args[0]&0xff, args[1], ret[1] );*/ ret[0] = 0; return 0; }
void dump_bar(u8 bus, u8 slot, u8 func) { u16 vendor_id = read_pci_config_16(bus, slot, func, PCI_VENDOR_ID); u16 device_id = read_pci_config_16(bus, slot, func, PCI_DEVICE_ID); u32 base0 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); u32 base1 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1); u32 base2 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_2); u32 base3 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_3); u32 base4 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_4); u32 base5 = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_5); WinDbgPrint("Base Addresses: %08X %08X %08X %08X %08X %08X\n", base0, base1, base2, base3, base4, base5); WinDbgPrint("Masks: %08X %08X %08X %08X %08X %08X\n", get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_0), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_1), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_2), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_3), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_4), get_base_register_size(bus, slot, func, PCI_BASE_ADDRESS_5)); }