Exemplo n.º 1
0
/*
 * map IGD MMIO+0x2000 page to allow Xen access to IGD 3D register.
 */
static void *map_igd_reg(void)
{
    u64 igd_mmio, igd_reg;

    if ( !is_cantiga_b3 && !is_snb_gfx )
        return NULL;

    if ( igd_reg_va )
        return igd_reg_va;

    /* get IGD mmio address in PCI BAR */
    igd_mmio = ((u64)pci_conf_read32(0, IGD_DEV, 0, 0x14) << 32) +
                     pci_conf_read32(0, IGD_DEV, 0, 0x10);

    /* offset of IGD regster we want to access is in 0x2000 range */
    igd_reg = (igd_mmio & IGD_BAR_MASK) + 0x2000;

    /* ioremap this physical page */
#if defined(CONFIG_X86)
    set_fixmap_nocache(FIX_IGD_MMIO, igd_reg);
    igd_reg_va = (u8 *)fix_to_virt(FIX_IGD_MMIO);
#else
    igd_reg_va = ioremap_nocache(igd_reg, 0x100);
#endif
    return igd_reg_va;
}
Exemplo n.º 2
0
void me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map)
{
    u32 id;

    id = pci_conf_read32(0, 0, 0, 0);
    if ( IS_CTG(id) )
    {
        /* quit if ME does not exist */
        if ( pci_conf_read32(0, 3, 0, 0) == 0xffffffff )
            return;

        /* if device is WLAN device, map ME phantom device 0:3.7 */
        id = pci_conf_read32(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
        switch (id)
        {
            case 0x42328086:
            case 0x42358086:
            case 0x42368086:
            case 0x42378086:
            case 0x423a8086:
            case 0x423b8086:
            case 0x423c8086:
            case 0x423d8086:
                map_me_phantom_function(domain, 3, map);
                break;
            default:
                break;
        }
    }
    else if ( IS_ILK(id) || IS_CPT(id) )
    {
        /* quit if ME does not exist */
        if ( pci_conf_read32(0, 22, 0, 0) == 0xffffffff )
            return;

        /* if device is WLAN device, map ME phantom device 0:22.7 */
        id = pci_conf_read32(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
        switch (id)
        {
            case 0x00878086:        /* Kilmer Peak */
            case 0x00898086:
            case 0x00828086:        /* Taylor Peak */
            case 0x00858086:
            case 0x008F8086:        /* Rainbow Peak */
            case 0x00908086:
            case 0x00918086:
            case 0x42388086:        /* Puma Peak */
            case 0x422b8086:
            case 0x422c8086:
                map_me_phantom_function(domain, 22, map);
                break;
            default:
                break;
        }
    }
}
Exemplo n.º 3
0
/*
 * Mask reporting Intel VT-d faults to IOH core logic:
 *   - Some platform escalates VT-d faults to platform errors 
 *   - This can cause system failure upon non-fatal VT-d faults
 *   - Potential security issue if malicious guest trigger VT-d faults
 */
void __hwdom_init pci_vtd_quirk(struct pci_dev *pdev)
{
    int seg = pdev->seg;
    int bus = pdev->bus;
    int dev = PCI_SLOT(pdev->devfn);
    int func = PCI_FUNC(pdev->devfn);
    int id, val;

    id = pci_conf_read32(seg, bus, dev, func, 0);
    if ( id == 0x342e8086 || id == 0x3c288086 )
    {
        val = pci_conf_read32(seg, bus, dev, func, 0x1AC);
        pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31));
    }
}
Exemplo n.º 4
0
/*
 * map IGD MMIO+0x2000 page to allow Xen access to IGD 3D register.
 */
static void __init map_igd_reg(void)
{
    u64 igd_mmio;

    if ( !is_cantiga_b3 && !is_snb_gfx )
        return;

    if ( igd_reg_va )
        return;

    igd_mmio   = pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_1);
    igd_mmio <<= 32;
    igd_mmio  += pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_0);
    igd_reg_va = ioremap(igd_mmio & IGD_BAR_MASK, 0x3000);
}
Exemplo n.º 5
0
/*
 * Mask reporting Intel VT-d faults to IOH core logic:
 *   - Some platform escalates VT-d faults to platform errors 
 *   - This can cause system failure upon non-fatal VT-d faults
 *   - Potential security issue if malicious guest trigger VT-d faults
 */
void pci_vtd_quirk(struct pci_dev *pdev)
{
#ifdef CONFIG_X86_64
    int bus = pdev->bus;
    int dev = PCI_SLOT(pdev->devfn);
    int func = PCI_FUNC(pdev->devfn);
    int id, val;

    id = pci_conf_read32(bus, dev, func, 0);
    if ( id == 0x342e8086 || id == 0x3c288086 )
    {
        val = pci_conf_read32(bus, dev, func, 0x1AC);
        pci_conf_write32(bus, dev, func, 0x1AC, val | (1 << 31));
    }
#endif
}
Exemplo n.º 6
0
static int __init get_iommu_capabilities(
    u16 seg, u8 bus, u8 dev, u8 func, u16 cap_ptr, struct amd_iommu *iommu)
{
    u8 type;

    iommu->cap.header = pci_conf_read32(seg, bus, dev, func, cap_ptr);
    type = get_field_from_reg_u32(iommu->cap.header, PCI_CAP_TYPE_MASK,
                                  PCI_CAP_TYPE_SHIFT);

    if ( type != PCI_CAP_TYPE_IOMMU )
        return -ENODEV;

    return 0;
}
Exemplo n.º 7
0
/* initialize platform identification flags */
void __init platform_quirks_init(void)
{
    ioh_id = pci_conf_read32(0, IOH_DEV, 0, 0);
    igd_id = pci_conf_read32(0, IGD_DEV, 0, 0);

    /* Mobile 4 Series Chipset neglects to set RWBF capability. */
    if ( ioh_id == 0x2a408086 )
    {
        dprintk(XENLOG_INFO VTDPREFIX, "DMAR: Forcing write-buffer flush\n");
        rwbf_quirk = 1;
    }

    /* initialize cantiga B3 identification */
    cantiga_b3_errata_init();

    snb_errata_init();

    /* ioremap IGD MMIO+0x2000 page */
    map_igd_reg();

    /* Tylersburg interrupt remap quirk */
    if ( iommu_intremap )
        tylersburg_intremap_quirk();
}
Exemplo n.º 8
0
/* 5500/5520/X58 Chipset Interrupt remapping errata, for stepping B-3.
 * Fixed in stepping C-2. */
static void __init tylersburg_intremap_quirk(void)
{
    uint32_t bus, device;
    uint8_t rev;

    for ( bus = 0; bus < 0x100; bus++ )
    {
        /* Match on System Management Registers on Device 20 Function 0 */
        device = pci_conf_read32(bus, 20, 0, PCI_VENDOR_ID);
        rev = pci_conf_read8(bus, 20, 0, PCI_REVISION_ID);

        if ( rev == 0x13 && device == 0x342e8086 )
        {
            printk(XENLOG_WARNING VTDPREFIX
                   "Disabling IOMMU due to Intel 5500/5520/X58 Chipset errata #47, #53\n");
            iommu_enabled = 0;
            break;
        }
    }
}
Exemplo n.º 9
0
static void pcie_scanbus(struct pci_tegra_device *dev_parent)
{
	u8 subordinate_bus;
	u8 hdr_type;
	u8 next_bus_number;
	u32 device = 0;
	u32 id;
	struct pci_tegra_device *dev;
	u32 retry_count;

	next_bus_number = dev_parent->sec_bus;

next_device:
	retry_count = 6;
	if (device == 0x20) {
		/* Termination condition: Max number of devices reached.
		 * PCIe bus segment can only have 32 devices.
		 * */
		dev_parent->sub_bus = next_bus_number;
		if (!dev_parent->root_port) {
			/* Change the subordinate bus-number to the actual
			 * value of all buses on the hierarcy.
			*
			* Do this execpt for the root port.
			*/
			pci_conf_write8(dev_parent->bus, dev_parent->devfn,
				PCI_SUBORDINATE_BUS, next_bus_number);
		}
		return;
	}

	if (dev_parent->root_port && device != 0) {
		/* Sepcial Exit condition for root port.
		 * Root port only connect to one bridge or device.
		 */
		dev_parent->sub_bus = dev_parent->sec_bus;
		return;
	}

	while (--retry_count) {
		id = pci_conf_read32(dev_parent->sec_bus,
			PCI_DEVFN(device, 0), 0);
		if (id != 0xFFFFFFFF)
		{
		/* Found a valid device, break. Otherwise, retry a couple of
		 * times. It is possible that the bridges can take some time
		 * to settle and it will take couple of transcations to find
		 * the devcies behind the bridge.
		 * */
		/* FIXME: What should be the delay? */
			msleep(100);
			break;
		}
	}
	if (id == 0xFFFFFFFF) {
		/* Invalid device. Skip that one and look for next device */
		device++;
		goto next_device;
	}

	dev = alloc_pci_tegra_device();

	/* Fill the device information */
	dev->parent = dev_parent;
	dev->id = id;
	dev->bus = dev_parent->sec_bus;
	dev->devfn = PCI_DEVFN(device, 0);
	if (dev_parent->child == NULL) {
		dev_parent->child = dev;
		dev->prev = NULL;
	} else {
		/* Add dev to the list of devices on the same bus */
		struct pci_tegra_device *temp;

		temp = dev_parent->child;
		BUG_ON(temp != NULL);
		while (temp->next != NULL)
		temp = temp->next;
		temp->next = dev;
		dev->prev = temp;
	}

	hdr_type = pci_conf_read8(dev->bus, dev->devfn, PCI_HEADER_TYPE);
	if ((hdr_type & 0x7f) == 0x1) {
		/* Bridge device */

		/* Temporarily assign 0xff for the subordinate bus number as
		 * we don't * know how many devices are present behind this
		 * bridge.
		 * */
		subordinate_bus = 0xff;
		dev->sec_bus = next_bus_number + 1;

		pci_conf_write8(dev->bus, dev->devfn, PCI_PRIMARY_BUS,
			dev_parent->sec_bus);
		pci_conf_write8(dev->bus, dev->devfn, PCI_SECONDARY_BUS,
			dev->sec_bus);
		pci_conf_write8(dev->bus, dev->devfn, PCI_SUBORDINATE_BUS,
			subordinate_bus);

		/* Scan all the buses behind this bridge */
		pcie_scanbus(dev);

		next_bus_number = dev->sub_bus;
	} else if ((hdr_type & 0x7f) == 0x0) {

		/* PCI endpoint - Can be single function or multie function */
		pr_info("PCI endpoint (0x%x) is on bus = %d, device = %d\n",
			id, dev_parent->sec_bus, device);

	} else if ((hdr_type & 0x7f) == 0x2) {
		/* PC card device - Not handled */
		BUG();
	} else {
		BUG();
	}
	device++;
	goto next_device;
}
Exemplo n.º 10
0
void pci_vtd_quirk(const struct pci_dev *pdev)
{
    int seg = pdev->seg;
    int bus = pdev->bus;
    int dev = PCI_SLOT(pdev->devfn);
    int func = PCI_FUNC(pdev->devfn);
    int pos;
    bool_t ff;
    u32 val, val2;
    u64 bar;
    paddr_t pa;
    const char *action;

    if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) !=
         PCI_VENDOR_ID_INTEL )
        return;

    switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) )
    {
    /*
     * Mask reporting Intel VT-d faults to IOH core logic:
     *   - Some platform escalates VT-d faults to platform errors.
     *   - This can cause system failure upon non-fatal VT-d faults.
     *   - Potential security issue if malicious guest trigger VT-d faults.
     */
    case 0x0e28: /* Xeon-E5v2 (IvyBridge) */
    case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */
    case 0x3728: /* Xeon C5500/C3500 (JasperForest) */
    case 0x3c28: /* Sandybridge */
        val = pci_conf_read32(seg, bus, dev, func, 0x1AC);
        pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31));
        printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n",
               seg, bus, dev, func);
        break;

    /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */
    case 0x3400 ... 0x3407: /* host bridges */
    case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */
    /* JasperForest (Intel Xeon Processor C5500/C3500 */
    case 0x3700 ... 0x370f: /* host bridges */
    case 0x3720 ... 0x3724: /* root ports */
    /* Sandybridge-EP (Romley) */
    case 0x3c00: /* host bridge */
    case 0x3c01 ... 0x3c0b: /* root ports */
        pos = pci_find_ext_capability(seg, bus, pdev->devfn,
                                      PCI_EXT_CAP_ID_ERR);
        if ( !pos )
        {
            pos = pci_find_ext_capability(seg, bus, pdev->devfn,
                                          PCI_EXT_CAP_ID_VNDR);
            while ( pos )
            {
                val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER);
                if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 )
                {
                    pos += PCI_VNDR_HEADER;
                    break;
                }
                pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos,
                                                   PCI_EXT_CAP_ID_VNDR);
            }
            ff = 0;
        }
        else
            ff = pcie_aer_get_firmware_first(pdev);
        if ( !pos )
        {
            printk(XENLOG_WARNING "%04x:%02x:%02x.%u without AER capability?\n",
                   seg, bus, dev, func);
            break;
        }

        val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK);
        val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK);
        if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) )
            action = "Found masked";
        else if ( !ff )
        {
            pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK,
                             val | PCI_ERR_UNC_UNSUP);
            pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK,
                             val2 | PCI_ERR_COR_ADV_NFAT);
            action = "Masked";
        }
        else
            action = "Must not mask";

        /* XPUNCERRMSK Send Completion with Unsupported Request */
        val = pci_conf_read32(seg, bus, dev, func, 0x20c);
        pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4));

        printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n",
               action, seg, bus, dev, func);
        break;

    case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */
    case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */
    case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */
    case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */
    case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */
    case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */
    case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */
    case 0x1610: case 0x1614: case 0x1618: /* Broadwell */
    case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */
    case 0x1910: case 0x1918: case 0x191f: /* Skylake */
        bar = pci_conf_read32(seg, bus, dev, func, 0x6c);
        bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68);
        pa = bar & 0x7ffffff000UL; /* bits 12...38 */
        if ( (bar & 1) && pa &&
             page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) )
        {
            u32 __iomem *va = ioremap(pa, PAGE_SIZE);

            if ( va )
            {
                __set_bit(0x1c8 * 8 + 20, va);
                iounmap(va);
                printk(XENLOG_INFO "Masked UR signaling on %04x:%02x:%02x.%u\n",
                       seg, bus, dev, func);
            }
            else
                printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n",
                       pa, seg, bus, dev, func);
        }
        else
            printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n",
                   bar, seg, bus, dev, func);
        break;
    }
}