static int __init valid_bridge_bus_config(int bus, int dev, int func,
            int *sec_bus, int *sub_bus)
{
    int pri_bus;

    pri_bus = read_pci_config_byte(bus, dev, func, PCI_PRIMARY_BUS);
    *sec_bus = read_pci_config_byte(bus, dev, func, PCI_SECONDARY_BUS);
    *sub_bus = read_pci_config_byte(bus, dev, func, PCI_SUBORDINATE_BUS);

    return ( pri_bus == bus && *sec_bus > bus && *sub_bus >= *sec_bus );
}
Пример #2
0
void __init quirk_intel_irqbalance(void)
{
	u8 config, rev;
	u32 word;

	/* BIOS may enable hardware IRQ balancing for
	 * E7520/E7320/E7525(revision ID 0x9 and below)
	 * based platforms.
	 * Disable SW irqbalance/affinity on those platforms.
	 */
	rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
	if (rev > 0x9)
		return;

	printk(KERN_INFO "Intel E7520/7320/7525 detected.");

	/* enable access to config space */
	config = read_pci_config_byte(0, 0, 0, 0xf4);
	write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);

	/* read xTPR register */
	word = read_pci_config_16(0, 0, 0x40, 0x4c);

	if (!(word & (1 << 13))) {
		printk(KERN_INFO "Disabling irq balancing and affinity\n");
#ifdef CONFIG_IRQBALANCE
		irqbalance_disable("");
#endif
		noirqdebug_setup("");
#ifdef CONFIG_PROC_FS
		no_irq_affinity = 1;
#endif
#ifdef CONFIG_HOTPLUG_CPU
		printk(KERN_INFO "Disabling cpu hotplug control\n");
		enable_cpu_hotplug = 0;
#endif
#ifdef CONFIG_X86_64
		/* force the genapic selection to flat mode so that
		 * interrupts can be redirected to more than one CPU.
		 */
		genapic_force = &apic_flat;
#endif
	}

	/* put back the original value for config space */
	if (!(config & 0x2))
		write_pci_config_byte(0, 0, 0, 0xf4, config);
}
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
				      struct intel_iommu *iommu)
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
	int count;

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
		bus = read_pci_config_byte(bus, path->dev, path->fn,
					   PCI_SECONDARY_BUS);
		path++;
	}

	ir_ioapic[ir_ioapic_num].bus   = bus;
	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
	ir_ioapic[ir_ioapic_num].iommu = iommu;
	ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
	ir_ioapic_num++;
}
static int __init scan_functions_for_iommu(int bus, int dev,
            iommu_detect_callback_ptr_t iommu_detect_callback)
{
    int func, hdr_type;
    int count, error = 0;

    func = 0;
    count = 1;
    while ( VALID_PCI_VENDOR_ID(read_pci_config_16(bus, dev, func,
            PCI_VENDOR_ID)) && !error && func < count ) {
        hdr_type = read_pci_config_byte(bus, dev, func,
                PCI_HEADER_TYPE);

        if ( func == 0 && IS_PCI_MULTI_FUNCTION(hdr_type) )
            count = PCI_MAX_FUNC_COUNT;

        if ( IS_PCI_TYPE0_HEADER(hdr_type) ||
            IS_PCI_TYPE1_HEADER(hdr_type) ) {
            error =  scan_caps_for_iommu(bus, dev, func,
                    iommu_detect_callback);
        }
        ++func;
    }

    return error;
}
static int __init scan_caps_for_iommu(int bus, int dev, int func,
            iommu_detect_callback_ptr_t iommu_detect_callback)
{
    int cap_ptr, cap_id, cap_type;
    u32 cap_header;
    int count, error = 0;

    count = 0;
    cap_ptr = read_pci_config_byte(bus, dev, func,
            PCI_CAPABILITY_LIST);
    while ( cap_ptr >= PCI_MIN_CAP_OFFSET &&
        count < PCI_MAX_CAP_BLOCKS && !error ) {
        cap_ptr &= PCI_CAP_PTR_MASK;
        cap_header = read_pci_config(bus, dev, func, cap_ptr);
        cap_id = get_field_from_reg_u32(cap_header,
                PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT);

        if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) {
            cap_type = get_field_from_reg_u32(cap_header,
                    PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT);
            if ( cap_type == PCI_CAP_TYPE_IOMMU ) {
                error = iommu_detect_callback(
                        bus, dev, func, cap_ptr);
            }
        }

        cap_ptr = get_field_from_reg_u32(cap_header,
                PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT);
        ++count;    }

    return error;
}
Пример #6
0
static size_t __init i85x_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);

	if (!(tmp & TSEG_ENABLE))
		return 0;

	return MB(1);
}
Пример #7
0
static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
{
	u64 val64, sz64, mask64;
	void __iomem *base;
	u32 val, sz;
	u8 byte;

	val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
	sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);

	if (val == 0xffffffff || sz == 0xffffffff) {
		pr_notice("invalid mmio bar\n");
		return NULL;
	}

	val64	= val & PCI_BASE_ADDRESS_MEM_MASK;
	sz64	= sz & PCI_BASE_ADDRESS_MEM_MASK;
	mask64	= PCI_BASE_ADDRESS_MEM_MASK;

	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
		val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
		sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);

		val64	|= (u64)val << 32;
		sz64	|= (u64)sz << 32;
		mask64	|= ~0ULL << 32;
	}

	sz64 &= mask64;

	if (!sz64) {
		pr_notice("invalid mmio address\n");
		return NULL;
	}

	sz64 = 1ULL << __ffs64(sz64);

	/* Check if the mem space is enabled: */
	byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
	if (!(byte & PCI_COMMAND_MEMORY)) {
		byte |= PCI_COMMAND_MEMORY;
		write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
	}

	xdbc.xhci_start = val64;
	xdbc.xhci_length = sz64;
	base = early_ioremap(val64, sz64);

	return base;
}
Пример #8
0
static size_t __init i830_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);

	if (!(tmp & TSEG_ENABLE))
		return 0;

	if (tmp & I830_TSEG_SIZE_1M)
		return MB(1);
	else
		return KB(512);
}
Пример #9
0
void DumpConfigSpace(u8 bus, u8 slot, u8 func) {

  u8 i, j;

  for(i=0; i<0x80; i+=0x10) {
    WinDbgPrint("0x%02X ", i);
    for(j=i; j<i+0x10; j++) {
      u8 b = read_pci_config_byte(bus, slot, func, j);
      WinDbgPrint("%02X ", b);
    };
    WinDbgPrint("\n");
  }
};
Пример #10
0
static size_t __init i845_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);

	if (!(tmp & TSEG_ENABLE))
		return 0;

	switch (tmp & I845_TSEG_SIZE_MASK) {
	case I845_TSEG_SIZE_512K:
		return KB(512);
	case I845_TSEG_SIZE_1M:
		return MB(1);
	default:
		WARN_ON(1);
		return 0;
	}
}
Пример #11
0
static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
{
	u32 d;
	u8  b;

	b = read_pci_config_byte(num, slot, func, 0xac);
	b &= ~(1<<5);
	write_pci_config_byte(num, slot, func, 0xac, b);

	d = read_pci_config(num, slot, func, 0x70);
	d |= 1<<8;
	write_pci_config(num, slot, func, 0x70, d);

	d = read_pci_config(num, slot, func, 0x8);
	d &= 0xff;
	return d;
}
int __init get_iommu_last_downstream_bus(struct amd_iommu *iommu)
{
    int bus, dev, func;
    int devfn, hdr_type;
    int sec_bus, sub_bus;
    int multi_func;

    bus = iommu->last_downstream_bus = iommu->root_bus;
    iommu->downstream_bus_present[bus] = 1;
    dev = PCI_SLOT(iommu->first_devfn);
    multi_func = PCI_FUNC(iommu->first_devfn) > 0;
    for ( devfn = iommu->first_devfn; devfn <= iommu->last_devfn; ++devfn ) {
        /* skipping to next device#? */
        if ( dev != PCI_SLOT(devfn) ) {
            dev = PCI_SLOT(devfn);
            multi_func = 0;
        }
        func = PCI_FUNC(devfn);
 
        if ( !VALID_PCI_VENDOR_ID(
            read_pci_config_16(bus, dev, func, PCI_VENDOR_ID)) )
            continue;

        hdr_type = read_pci_config_byte(bus, dev, func,
                PCI_HEADER_TYPE);
        if ( func == 0 )
            multi_func = IS_PCI_MULTI_FUNCTION(hdr_type);

        if ( (func == 0 || multi_func) &&
            IS_PCI_TYPE1_HEADER(hdr_type) ) {
            if (!valid_bridge_bus_config(bus, dev, func,
                &sec_bus, &sub_bus))
                return -ENODEV;

            if ( sub_bus > iommu->last_downstream_bus )
                iommu->last_downstream_bus = sub_bus;
            do {
                iommu->downstream_bus_present[sec_bus] = 1;
            } while ( sec_bus++ < sub_bus );
        }
    }

    return 0;
}
Пример #13
0
static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
				     struct intel_iommu *iommu,
				     struct acpi_dmar_hardware_unit *drhd)
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
	int count, free = -1;

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		/*
		 * Access PCI directly due to the PCI
		 * subsystem isn't initialized yet.
		 */
		bus = read_pci_config_byte(bus, path->device, path->function,
					   PCI_SECONDARY_BUS);
		path++;
	}

	for (count = 0; count < MAX_IO_APICS; count++) {
		if (ir_ioapic[count].iommu == iommu &&
		    ir_ioapic[count].id == scope->enumeration_id)
			return 0;
		else if (ir_ioapic[count].iommu == NULL && free == -1)
			free = count;
	}
	if (free == -1) {
		pr_warn("Exceeded Max IO APICS\n");
		return -ENOSPC;
	}

	ir_ioapic[free].bus   = bus;
	ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
	ir_ioapic[free].iommu = iommu;
	ir_ioapic[free].id    = scope->enumeration_id;
	pr_info("IOAPIC id %d under DRHD base  0x%Lx IOMMU %d\n",
		scope->enumeration_id, drhd->address, iommu->seq_id);

	return 0;
}
Пример #14
0
static void __init intel_remapping_check(int num, int slot, int func)
{
	u8 revision;
	u16 device;

	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
	revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);

	/*
	 * Revision <= 13 of all triggering devices id in this quirk
	 * have a problem draining interrupts when irq remapping is
	 * enabled, and should be flagged as broken. Additionally
	 * revision 0x22 of device id 0x3405 has this problem.
	 */
	if (revision <= 0x13)
		set_irq_remapping_broken();
	else if (device == 0x3405 && revision == 0x22)
		set_irq_remapping_broken();
}
Пример #15
0
static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
				      struct intel_iommu *iommu)
{
	struct acpi_dmar_pci_path *path;
	u8 bus;
	int count;

	bus = scope->bus;
	path = (struct acpi_dmar_pci_path *)(scope + 1);
	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
		/ sizeof(struct acpi_dmar_pci_path);

	while (--count > 0) {
		bus = read_pci_config_byte(bus, path->dev, path->fn,
					   PCI_SECONDARY_BUS);
		path++;
	}
	ir_hpet[ir_hpet_num].bus   = bus;
	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
	ir_hpet[ir_hpet_num].iommu = iommu;
	ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
	ir_hpet_num++;
}
Пример #16
0
static size_t __init i85x_mem_size(void)
{
	return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32);
}
Пример #17
0
/*
  Uses direct PCI probing to add accessible physical memory ranges.
*/
NTSTATUS PCI_AddMemoryRanges(struct PmemMemoryInfo *info, int len) {
  int required_length = (sizeof(struct PmemMemoryInfo) +
			 sizeof(PHYSICAL_MEMORY_RANGE));
  unsigned int bus, slot, func;

  if (len < required_length) {
    return STATUS_INFO_LENGTH_MISMATCH;
  };

  // Initialize the physical memory range.
  info->NumberOfRuns.QuadPart = 1;
  info->Run[0].BaseAddress.QuadPart = 0;
  info->Run[0].NumberOfBytes.QuadPart = -1;

  for (bus = 0; bus < 256; bus++) {
    for (slot = 0; slot < 32; slot++) {
      for (func = 0; func < 8; func++) {
	u8 type;

	u16 vendor_id = read_pci_config_16((u8)bus, (u8)slot, (u8)func,
					   PCI_VENDOR_ID);

	// Device not present.
	if (vendor_id == 0xffff)
	  continue;

	type = read_pci_config_byte((u8)bus, (u8)slot, (u8)func,
				    PCI_HEADER_TYPE);

        // Standard header.
	if ((type & 0x1f) == 0) {

#if WINPMEM_PCI_DEBUG
          WinDbgPrint("PCI Type %X\n", type);
          dump_interesting_fields((u8)bus, (u8)slot, (u8)func);
          dump_bar((u8)bus, (u8)slot, (u8)func);
          DumpConfigSpace((u8)bus, (u8)slot, (u8)func);
#endif
	  DumpStandardHeader((u8)bus, (u8)slot, (u8)func, info, len);

	  // PCI-PCI bridge.
	} else if ((type & 0x1f) == 1) {

#if WINPMEM_PCI_DEBUG
          WinDbgPrint("PCI Type %X\n", type);
          dump_interesting_fields((u8)bus, (u8)slot, (u8)func);
          DumpConfigSpace((u8)bus, (u8)slot, (u8)func);
#endif

	  DumpPCIBridge((u8)bus, (u8)slot, (u8)func, info, len);

	} else {
	  WinDbgPrint("Unknown header PCI at 0000:%02x:%02x.%d type %d\n",
		      bus, slot, func, type);
	};

	// This is not a multi function device.
	if (func == 0 && (type & 0x80) == 0) {
	  break;
	};
      }
    }
  }

  return STATUS_SUCCESS;
};