static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
				      const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr;

	nvidia_private.dev_1 =
		pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
	nvidia_private.dev_2 =
		pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
	nvidia_private.dev_3 =
		pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));

	if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
		printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
			"chipset, but could not find the secondary devices.\n");
		return -ENODEV;
	}

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	switch (pdev->device) {
	case PCI_DEVICE_ID_NVIDIA_NFORCE:
		printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
		nvidia_private.wbc_mask = 0x00010000;
		break;
	case PCI_DEVICE_ID_NVIDIA_NFORCE2:
		printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
		nvidia_private.wbc_mask = 0x80000000;
		break;
	default:
		printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
			    pdev->device);
		return -ENODEV;
	}

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &nvidia_driver;
	bridge->dev_private_data = &nvidia_private,
	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	/* Fill in the mode register */
	pci_read_config_dword(pdev,
			bridge->capndx+PCI_AGP_STATUS,
			&bridge->mode);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
/*************************************************************************
 * IXDP2x00-common PCI init
 *
 * The IXDP2[48]00 has a horrid PCI bus layout. Basically the board 
 * contains two NPUs (ingress and egress) connected over PCI,  both running 
 * instances  of the kernel. So far so good. Peers on the PCI bus running 
 * Linux is a common design in telecom systems. The problem is that instead 
 * of all the devices being controlled by a single host, different
 * devices are controlled by different NPUs on the same bus, leading to
 * multiple hosts on the bus. The exact bus layout looks like:
 *
 *                   Bus 0
 *    Master NPU <-------------------+-------------------> Slave NPU
 *                                   |
 *                                   |
 *                                  P2P 
 *                                   |
 *
 *                  Bus 1            |
 *               <--+------+---------+---------+------+-->
 *                  |      |         |         |      |
 *                  |      |         |         |      |
 *             ... Dev    PMC       Media     Eth0   Eth1 ...
 *
 * The master controls all but Eth1, which is controlled by the
 * slave. What this means is that the both the master and the slave
 * have to scan the bus, but only one of them can enumerate the bus.
 * In addition, after the bus is scanned, each kernel must remove
 * the device(s) it does not control from the PCI dev list otherwise
 * a driver on each NPU will try to manage it and we will have horrible
 * conflicts. Oh..and the slave NPU needs to see the master NPU
 * for Intel's drivers to work properly. Closed source drivers...
 *
 * The way we deal with this is fairly simple but ugly:
 *
 * 1) Let master scan and enumerate the bus completely.
 * 2) Master deletes Eth1 from device list.
 * 3) Slave scans bus and then deletes all but Eth1 (Eth0 on slave)
 *    from device list.
 * 4) Find HW designers and LART them.
 *
 * The boards also do not do normal PCI IRQ routing, or any sort of 
 * sensical  swizzling, so we just need to check where on the  bus a
 * device sits and figure out to which CPLD pin the interrupt is routed.
 * See ixdp2[48]00.c files.
 *
 *************************************************************************/
void ixdp2x00_slave_pci_postinit(void)
{
	struct pci_dev *dev;

	/*
	 * Remove PMC device is there is one
	 */
	if((dev = pci_get_bus_and_slot(1, IXDP2X00_PMC_DEVFN))) {
		pci_stop_and_remove_bus_device(dev);
		pci_dev_put(dev);
	}

	dev = pci_get_bus_and_slot(0, IXDP2X00_21555_DEVFN);
	pci_stop_and_remove_bus_device(dev);
	pci_dev_put(dev);
}
Beispiel #3
0
static void
intel_teardown_mchbar(struct drm_device *dev, bool disable)
{
    drm_i915_private_t *dev_priv = dev->dev_private;
    struct pci_dev *bridge_dev;
    int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
    u32 temp;

    bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
    if (!bridge_dev) {
        DRM_DEBUG("no bridge dev?!\n");
        return;
    }

    if (disable) {
        if (IS_I915G(dev) || IS_I915GM(dev)) {
            pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
            temp &= ~DEVEN_MCHBAR_EN;
            pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
        } else {
            pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
            temp &= ~1;
            pci_write_config_dword(bridge_dev, mchbar_reg, temp);
        }
    }

    if (dev_priv->mch_res.start)
        release_resource(&dev_priv->mch_res);
}
Beispiel #4
0
static void ixdp2400_pci_postinit(void)
{
	struct pci_dev *dev;

	if (ixdp2x00_master_npu()) {
		dev = pci_get_bus_and_slot(1, IXDP2400_SLAVE_ENET_DEVFN);
		pci_remove_bus_device(dev);
		pci_dev_put(dev);
	} else {
		dev = pci_get_bus_and_slot(1, IXDP2400_MASTER_ENET_DEVFN);
		pci_remove_bus_device(dev);
		pci_dev_put(dev);

		ixdp2x00_slave_pci_postinit();
	}
}
Beispiel #5
0
/* FIXME ? - shared with Poulsbo */
static void cdv_get_core_freq(struct drm_device *dev)
{
	uint32_t clock;
	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
	struct drm_psb_private *dev_priv = dev->dev_private;

	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
	pci_read_config_dword(pci_root, 0xD4, &clock);
	pci_dev_put(pci_root);

	switch (clock & 0x07) {
	case 0:
		dev_priv->core_freq = 100;
		break;
	case 1:
		dev_priv->core_freq = 133;
		break;
	case 2:
		dev_priv->core_freq = 150;
		break;
	case 3:
		dev_priv->core_freq = 178;
		break;
	case 4:
		dev_priv->core_freq = 200;
		break;
	case 5:
	case 6:
	case 7:
		dev_priv->core_freq = 266;
	default:
		dev_priv->core_freq = 0;
	}
}
Beispiel #6
0
static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
{
	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
	pci_write_config_dword(pci_root, 0xD4, value);
	pci_write_config_dword(pci_root, 0xD0, mcr);
	pci_dev_put(pci_root);
}
static int intel_mid_msgbus_init(void)
{
	pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
	if (!pci_root) {
		pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
		return -ENODEV;
	}
	return 0;
}
Beispiel #8
0
static inline u32 CDV_MSG_READ32(uint port, uint offset)
{
	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
	uint32_t ret_val = 0;
	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
	pci_write_config_dword(pci_root, 0xD0, mcr);
	pci_read_config_dword(pci_root, 0xD4, &ret_val);
	pci_dev_put(pci_root);
	return ret_val;
}
Beispiel #9
0
/*
 *	Get the revison ID, B0:D2:F0;0x08
 */
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
	uint32_t platform_rev_id = 0;
	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));

	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
	pci_dev_put(pci_gfx_root);
	dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
					dev_priv->platform_rev_id);
}
Beispiel #10
0
static int i915_get_bridge_dev(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
	if (!dev_priv->bridge_dev) {
		DRM_ERROR("bridge device not found\n");
		return -1;
	}
	return 0;
}
Beispiel #11
0
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
    struct pci_dev *bridge_dev;
    drm_i915_private_t *dev_priv = dev->dev_private;
    int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
    u32 temp_lo, temp_hi = 0;
    u64 mchbar_addr;
    int ret = 0;

    bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
    if (!bridge_dev) {
        DRM_DEBUG("no bridge dev?!\n");
        ret = -ENODEV;
        goto out;
    }

    if (IS_I965G(dev))
        pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
    pci_read_config_dword(bridge_dev, reg, &temp_lo);
    mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

    /* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
    if (mchbar_addr &&
            pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
        ret = 0;
        goto out_put;
    }
#endif

    /* Get some space for it */
    ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
                                 MCHBAR_SIZE, MCHBAR_SIZE,
                                 PCIBIOS_MIN_MEM,
                                 0,   pcibios_align_resource,
                                 bridge_dev);
    if (ret) {
        DRM_DEBUG("failed bus alloc: %d\n", ret);
        dev_priv->mch_res.start = 0;
        goto out_put;
    }

    if (IS_I965G(dev))
        pci_write_config_dword(bridge_dev, reg + 4,
                               upper_32_bits(dev_priv->mch_res.start));

    pci_write_config_dword(bridge_dev, reg,
                           lower_32_bits(dev_priv->mch_res.start));
out_put:
    pci_dev_put(bridge_dev);
out:
    return ret;
}
Beispiel #12
0
int __init ixdp2800_pci_init(void)
{
	if (machine_is_ixdp2800()) {
		struct pci_dev *dev;

		pci_common_init(&ixdp2800_pci);
		if (ixdp2x00_master_npu()) {
			dev = pci_get_bus_and_slot(1, IXDP2800_SLAVE_ENET_DEVFN);
			pci_remove_bus_device(dev);
			pci_dev_put(dev);

			ixdp2800_master_enable_slave();
			ixdp2800_master_wait_for_slave_bus_scan();
		} else {
			dev = pci_get_bus_and_slot(1, IXDP2800_MASTER_ENET_DEVFN);
			pci_remove_bus_device(dev);
			pci_dev_put(dev);
		}
	}

	return 0;
}
Beispiel #13
0
SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
		unsigned long, off, unsigned long, len, void __user *, buf)
{
	struct pci_dev *dev;
	u8 byte;
	u16 word;
	u32 dword;
	int err = 0;

	if (!capable(CAP_SYS_ADMIN) || (get_securelevel() > 0))
		return -EPERM;

	dev = pci_get_bus_and_slot(bus, dfn);
	if (!dev)
		return -ENODEV;

	switch (len) {
	case 1:
		err = get_user(byte, (u8 __user *)buf);
		if (err)
			break;
		err = pci_user_write_config_byte(dev, off, byte);
		if (err != PCIBIOS_SUCCESSFUL)
			err = -EIO;
		break;

	case 2:
		err = get_user(word, (u16 __user *)buf);
		if (err)
			break;
		err = pci_user_write_config_word(dev, off, word);
		if (err != PCIBIOS_SUCCESSFUL)
			err = -EIO;
		break;

	case 4:
		err = get_user(dword, (u32 __user *)buf);
		if (err)
			break;
		err = pci_user_write_config_dword(dev, off, dword);
		if (err != PCIBIOS_SUCCESSFUL)
			err = -EIO;
		break;

	default:
		err = -EINVAL;
		break;
	}
	pci_dev_put(dev);
	return err;
}
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	u32 addr;
	u8 __iomem *vbt_virtual;
	struct vbt_header vbt_header;
	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
	int ret = -1;

	/* Get the address of the platform config vbt */
	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
	pci_dev_put(pci_gfx_root);

	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);

	if (!addr)
		goto out;

	/* get the virtual address of the vbt */
	vbt_virtual = ioremap(addr, sizeof(vbt_header));
	if (!vbt_virtual)
		goto out;

	memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
	iounmap(vbt_virtual);

	if (memcmp(&vbt_header.signature, "$GCT", 4))
		goto out;

	dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);

	switch (vbt_header.revision) {
	case 0x00:
		ret = mid_get_vbt_data_r0(dev_priv, addr);
		break;
	case 0x01:
		ret = mid_get_vbt_data_r1(dev_priv, addr);
		break;
	case 0x10:
		ret = mid_get_vbt_data_r10(dev_priv, addr);
		break;
	default:
		dev_err(dev->dev, "Unknown revision of GCT!\n");
	}

out:
	if (ret)
		dev_err(dev->dev, "Unable to read GCT!");
	else
		dev_priv->has_gct = true;
}
Beispiel #15
0
void __init
nautilus_init_pci(void)
{
	struct pci_controller *hose = hose_head;
	struct pci_bus *bus;
	struct pci_dev *irongate;
	unsigned long bus_align, bus_size, pci_mem;
	unsigned long memtop = max_low_pfn << PAGE_SHIFT;

	/* Scan our single hose.  */
	bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
	if (!bus)
		return;

	hose->bus = bus;
	pcibios_claim_one_bus(bus);

	irongate = pci_get_bus_and_slot(0, 0);
	bus->self = irongate;
	bus->resource[0] = &irongate_io;
	bus->resource[1] = &irongate_mem;

	pci_bus_size_bridges(bus);

	/* IO port range. */
	bus->resource[0]->start = 0;
	bus->resource[0]->end = 0xffff;

	/* Set up PCI memory range - limit is hardwired to 0xffffffff,
	   base must be at aligned to 16Mb. */
	bus_align = bus->resource[1]->start;
	bus_size = bus->resource[1]->end + 1 - bus_align;
	if (bus_align < 0x1000000UL)
		bus_align = 0x1000000UL;

	pci_mem = (0x100000000UL - bus_size) & -bus_align;

	bus->resource[1]->start = pci_mem;
	bus->resource[1]->end = 0xffffffffUL;
	if (request_resource(&iomem_resource, bus->resource[1]) < 0)
		printk(KERN_ERR "Failed to request MEM on hose 0\n");

	if (pci_mem < memtop)
		memtop = pci_mem;
	if (memtop > alpha_mv.min_mem_address) {
		free_reserved_area(__va(alpha_mv.min_mem_address),
				   __va(memtop), -1, NULL);
		printk("nautilus_init_pci: %ldk freed\n",
			(memtop - alpha_mv.min_mem_address) >> 10);
	}
Beispiel #16
0
int rtsx_read_pci_cfg_byte(u8 bus, u8 dev, u8 func, u8 offset, u8 *val)
{
	struct pci_dev *pdev;
	u8 data;
	u8 devfn = (dev << 3) | func;

	pdev = pci_get_bus_and_slot(bus, devfn);
	if (!pdev)
		return -1;

	pci_read_config_byte(pdev, offset, &data);
	if (val)
		*val = data;

	return 0;
}
void __init
nautilus_init_pci(void)
{
	struct pci_controller *hose = hose_head;
	struct pci_bus *bus;
	struct pci_dev *irongate;
	unsigned long bus_align, bus_size, pci_mem;
	unsigned long memtop = max_low_pfn << PAGE_SHIFT;

	/*                        */
	bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
	hose->bus = bus;
	pcibios_claim_one_bus(bus);

	irongate = pci_get_bus_and_slot(0, 0);
	bus->self = irongate;
	bus->resource[1] = &irongate_mem;

	pci_bus_size_bridges(bus);

	/*                */
	bus->resource[0]->start = 0;
	bus->resource[0]->end = 0xffff;

	/*                                                            
                                     */
	bus_align = bus->resource[1]->start;
	bus_size = bus->resource[1]->end + 1 - bus_align;
	if (bus_align < 0x1000000UL)
		bus_align = 0x1000000UL;

	pci_mem = (0x100000000UL - bus_size) & -bus_align;

	bus->resource[1]->start = pci_mem;
	bus->resource[1]->end = 0xffffffffUL;
	if (request_resource(&iomem_resource, bus->resource[1]) < 0)
		printk(KERN_ERR "Failed to request MEM on hose 0\n");

	if (pci_mem < memtop)
		memtop = pci_mem;
	if (memtop > alpha_mv.min_mem_address) {
		free_reserved_mem(__va(alpha_mv.min_mem_address),
				  __va(memtop));
		printk("nautilus_init_pci: %ldk freed\n",
			(memtop - alpha_mv.min_mem_address) >> 10);
	}
Beispiel #18
0
/* Setup MCHBAR if possible, return true if we should disable it again */
static bool
intel_setup_mchbar(struct drm_device *dev)
{
    struct pci_dev *bridge_dev;
    int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
    u32 temp;
    bool need_disable = false, enabled;

    bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
    if (!bridge_dev) {
        DRM_DEBUG("no bridge dev?!\n");
        goto out;
    }

    if (IS_I915G(dev) || IS_I915GM(dev)) {
        pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
        enabled = !!(temp & DEVEN_MCHBAR_EN);
    } else {
        pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
        enabled = temp & 1;
    }

    /* If it's already enabled, don't have to do anything */
    if (enabled)
        goto out_put;

    if (intel_alloc_mchbar_resource(dev))
        goto out_put;

    need_disable = true;

    /* Space is allocated or reserved, so enable it. */
    if (IS_I915G(dev) || IS_I915GM(dev)) {
        pci_write_config_dword(bridge_dev, DEVEN_REG,
                               temp | DEVEN_MCHBAR_EN);
    } else {
        pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
        pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
    }
out_put:
    pci_dev_put(bridge_dev);
out:
    return need_disable;
}
static ssize_t mv64x60_hs_reg_read(struct kobject *kobj, char *buf, loff_t off,
				   size_t count)
{
	struct pci_dev *phb;
	u32 v;

	if (off > 0)
		return 0;
	if (count < MV64X60_VAL_LEN_MAX)
		return -EINVAL;

	phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
	if (!phb)
		return -ENODEV;
	pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v);
	pci_dev_put(phb);

	return sprintf(buf, "0x%08x\n", v);
}
Beispiel #20
0
/*
 * probe_pci_dev
 *	find a pci device that can be used for other test
 *	calls in this kernel module.
 */
static int probe_pci_dev(unsigned int bus, unsigned int slot)
{
	struct pci_dev *dev;

	if (ltp_pci.dev) {
		pci_dev_put(ltp_pci.dev);
		ltp_pci.dev = NULL;
	}

	dev = pci_get_bus_and_slot(bus, slot);
	if (!dev || !dev->driver)
		return -ENODEV;

	prk_info("found pci_dev '%s', bus %u, devfn %u",
		pci_name(dev), bus, slot);

	ltp_pci.dev = dev;
	ltp_pci.bus = dev->bus;
	prk_info("Bus number: %d", dev->bus->number);
	return 0;
}
static ssize_t mv64x60_hs_reg_write(struct kobject *kobj, char *buf, loff_t off,
				    size_t count)
{
	struct pci_dev *phb;
	u32 v;

	if (off > 0)
		return 0;
	if (count <= 0)
		return -EINVAL;

	if (sscanf(buf, "%i", &v) != 1)
		return -EINVAL;

	phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
	if (!phb)
		return -ENODEV;
	pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v);
	pci_dev_put(phb);

	return count;
}
Beispiel #22
0
static int oprompci2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
{
	int err = -EINVAL;

	if (bufsize >= 2*sizeof(int)) {
#ifdef CONFIG_PCI
		struct pci_dev *pdev;
		struct device_node *dp;

		pdev = pci_get_bus_and_slot (((int *) op->oprom_array)[0],
				      ((int *) op->oprom_array)[1]);

		dp = pci_device_to_OF_node(pdev);
		data->current_node = dp;
		*((int *)op->oprom_array) = dp->phandle;
		op->oprom_size = sizeof(int);
		err = copyout(argp, op, bufsize + sizeof(int));

		pci_dev_put(pdev);
#endif
	}

	return err;
}
Beispiel #23
0
static void mid_get_fuse_settings(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
	uint32_t fuse_value = 0;
	uint32_t fuse_value_tmp = 0;

#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE  (1 << 11)
#define FB_REG09 0xD0810900
#define FB_REG09 0xD0810900
#define FB_SKU_MASK  0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
#define FB_SKU_100L 1
#define FB_SKU_83 2
	if (pci_root == NULL) {
		WARN_ON(1);
		return;
	}


	pci_write_config_dword(pci_root, 0xD0, FB_REG06);
	pci_read_config_dword(pci_root, 0xD4, &fuse_value);

	/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
	if (IS_MRST(dev))
		dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;

	DRM_INFO("internal display is %s\n",
		 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");

	 /* Prevent runtime suspend at start*/
	 if (dev_priv->iLVDS_enable) {
		dev_priv->is_lvds_on = true;
		dev_priv->is_mipi_on = false;
	} else {
		dev_priv->is_mipi_on = true;
		dev_priv->is_lvds_on = false;
	}

	dev_priv->video_device_fuse = fuse_value;

	pci_write_config_dword(pci_root, 0xD0, FB_REG09);
	pci_read_config_dword(pci_root, 0xD4, &fuse_value);

	dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;

	dev_priv->fuse_reg_value = fuse_value;

	switch (fuse_value_tmp) {
	case FB_SKU_100:
		dev_priv->core_freq = 200;
		break;
	case FB_SKU_100L:
		dev_priv->core_freq = 100;
		break;
	case FB_SKU_83:
		dev_priv->core_freq = 166;
		break;
	default:
		dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
								fuse_value_tmp);
		dev_priv->core_freq = 0;
	}
	dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
	pci_dev_put(pci_root);
}
Beispiel #24
0
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
	u32 addr;
	u16 new_size;
	u8 *vbt_virtual;
	u8 bpi;
	u8 number_desc = 0;
	struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
	struct gct_r10_timing_info ti;
	void *pGCT;
	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));

	/* Get the address of the platform config vbt, B0:D2:F0;0xFC */
	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
	pci_dev_put(pci_gfx_root);

	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);

	/* check for platform config address == 0. */
	/* this means fw doesn't support vbt */

	if (addr == 0) {
		vbt->size = 0;
		return;
	}

	/* get the virtual address of the vbt */
	vbt_virtual = ioremap(addr, sizeof(*vbt));
	if (vbt_virtual == NULL) {
		vbt->size = 0;
		return;
	}

	memcpy(vbt, vbt_virtual, sizeof(*vbt));
	iounmap(vbt_virtual); /* Free virtual address space */

	/* No matching signature don't process the data */
	if (memcmp(vbt->signature, "$GCT", 4)) {
		vbt->size = 0;
		return;
	}

	dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);

	switch (vbt->revision) {
	case 0:
		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
					vbt->size - sizeof(*vbt) + 4);
		pGCT = vbt->oaktrail_gct;
		bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
		dev_priv->gct_data.bpi = bpi;
		dev_priv->gct_data.pt =
			((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
		memcpy(&dev_priv->gct_data.DTD,
			&((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
				sizeof(struct oaktrail_timing_info));
		dev_priv->gct_data.Panel_Port_Control =
		  ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
			((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
		break;
	case 1:
		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
					vbt->size - sizeof(*vbt) + 4);
		pGCT = vbt->oaktrail_gct;
		bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
		dev_priv->gct_data.bpi = bpi;
		dev_priv->gct_data.pt =
			((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
		memcpy(&dev_priv->gct_data.DTD,
			&((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
				sizeof(struct oaktrail_timing_info));
		dev_priv->gct_data.Panel_Port_Control =
		  ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
			((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
		break;
	case 0x10:
		/*header definition changed from rev 01 (v2) to rev 10h. */
		/*so, some values have changed location*/
		new_size = vbt->checksum; /*checksum contains lo size byte*/
		/*LSB of oaktrail_gct contains hi size byte*/
		new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;

		vbt->checksum = vbt->size; /*size contains the checksum*/
		if (new_size > 0xff)
			vbt->size = 0xff; /*restrict size to 255*/
		else
			vbt->size = new_size;

		/* number of descriptors defined in the GCT */
		number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
		bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
		vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
				GCT_R10_DISPLAY_DESC_SIZE * number_desc);
		pGCT = vbt->oaktrail_gct;
		pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
		dev_priv->gct_data.bpi = bpi; /*save boot panel id*/

		/*copy the GCT display timings into a temp structure*/
		memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));

		/*now copy the temp struct into the dev_priv->gct_data*/
		dp_ti->pixel_clock = ti.pixel_clock;
		dp_ti->hactive_hi = ti.hactive_hi;
		dp_ti->hactive_lo = ti.hactive_lo;
		dp_ti->hblank_hi = ti.hblank_hi;
		dp_ti->hblank_lo = ti.hblank_lo;
		dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
		dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
		dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
		dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
		dp_ti->vactive_hi = ti.vactive_hi;
		dp_ti->vactive_lo = ti.vactive_lo;
		dp_ti->vblank_hi = ti.vblank_hi;
		dp_ti->vblank_lo = ti.vblank_lo;
		dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
		dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
		dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
		dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;

		/* Move the MIPI_Display_Descriptor data from GCT to dev priv */
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
							*((u8 *)pGCT + 0x0d);
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
						(*((u8 *)pGCT + 0x0e)) << 8;
		break;
	default:
		dev_err(dev->dev, "Unknown revision of GCT!\n");
		vbt->size = 0;
	}
}
static int __init asr_get_base_address(void)
{
	unsigned char low, high;
	const char *type = "";

	asr_length = 1;

	switch (asr_type) {
	case ASMTYPE_TOPAZ:
		/*                                 
                                        */
		outb(0x07, 0x2e);
		outb(0x07, 0x2f);

		/*                                                          */
		outb(0x60, 0x2e);
		high = inb(0x2f);

		/*                                                         */
		outb(0x61, 0x2e);
		low = inb(0x2f);

		asr_base = (high << 16) | low;
		asr_read_addr = asr_write_addr =
			asr_base + TOPAZ_ASR_REG_OFFSET;
		asr_length = 5;

		break;

	case ASMTYPE_JASPER:
		type = "Jaspers ";
#if 0
		u32 r;
		/*               */
		pdev = pci_get_bus_and_slot(0, DEVFN(0x1f, 0));
		if (pdev == NULL)
			return -ENODEV;
		pci_read_config_dword(pdev, 0x58, &r);
		asr_base = r & 0xFFFE;
		pci_dev_put(pdev);
#else
		/*                                         
                           */

/*                                             */

		/*                                                      */
		outl(0x8000f858, 0xcf8);

		/*                                  */

		/*
                                                
                                                              
                                                
   */
		asr_base = inl(0xcfc) & 0xfffe;

/*                                                  */
#endif
		asr_read_addr = asr_write_addr =
			asr_base + JASPER_ASR_REG_OFFSET;
		asr_toggle_mask = JASPER_ASR_TOGGLE_MASK;
		asr_disable_mask = JASPER_ASR_DISABLE_MASK;
		asr_length = JASPER_ASR_REG_OFFSET + 1;

		break;

	case ASMTYPE_PEARL:
		type = "Pearls ";
		asr_base = PEARL_BASE;
		asr_read_addr = PEARL_READ;
		asr_write_addr = PEARL_WRITE;
		asr_toggle_mask = PEARL_ASR_TOGGLE_MASK;
		asr_disable_mask = PEARL_ASR_DISABLE_MASK;
		asr_length = 4;
		break;

	case ASMTYPE_JUNIPER:
		type = "Junipers ";
		asr_base = JUNIPER_BASE_ADDRESS;
		asr_read_addr = asr_write_addr = asr_base;
		asr_toggle_mask = JUNIPER_ASR_TOGGLE_MASK;
		asr_disable_mask = JUNIPER_ASR_DISABLE_MASK;
		break;

	case ASMTYPE_SPRUCE:
		type = "Spruce's ";
		asr_base = SPRUCE_BASE_ADDRESS;
		asr_read_addr = asr_write_addr = asr_base;
		asr_toggle_mask = SPRUCE_ASR_TOGGLE_MASK;
		asr_disable_mask = SPRUCE_ASR_DISABLE_MASK;
		break;
	}

	if (!request_region(asr_base, asr_length, "ibmasr")) {
		pr_err("address %#x already in use\n", asr_base);
		return -EBUSY;
	}

	pr_info("found %sASR @ addr %#x\n", type, asr_base);

	return 0;
}
static int atomisp_pci_probe(struct pci_dev *dev,
				       const struct pci_device_id *id)
{
	const struct atomisp_platform_data *pdata;
	struct atomisp_device *isp;
	unsigned int start;
	void __iomem *base;
	int err;

	if (!dev) {
		dev_err(&dev->dev, "atomisp: error device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	pdata = atomisp_get_platform_data();
	if (pdata == NULL) {
		dev_err(&dev->dev, "no platform data available\n");
		return -ENODEV;
	}

	err = pcim_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
			err);
		return err;
	}

	start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
	v4l2_dbg(1, dbg_level, &atomisp_dev, "start: 0x%x\n", start);

	err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
	if (err) {
		dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
			err);
		return err;
	}

	base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
	v4l2_dbg(1, dbg_level, &atomisp_dev, "base: %p\n", base);

	atomisp_io_base = base;

	v4l2_dbg(1, dbg_level, &atomisp_dev, "atomisp_io_base: %p\n",
			atomisp_io_base);

	isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		dev_err(&dev->dev, "Failed to alloc CI ISP structure\n");
		return -ENOMEM;
	}
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->pci_root = pci_get_bus_and_slot(0, 0);
	if (!isp->pci_root) {
		dev_err(&dev->dev, "Unable to find PCI host\n");
		return -ENODEV;
	}
	isp->saved_regs.ispmmadr = start;

	mutex_init(&isp->mutex);
	mutex_init(&isp->streamoff_mutex);
	spin_lock_init(&isp->lock);
	init_completion(&isp->init_done);

	isp->media_dev.driver_version = ATOMISP_CSS_VERSION_20;

	switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
	case ATOMISP_PCI_DEVICE_SOC_MRFLD:
	case ATOMISP_PCI_DEVICE_SOC_BYT:
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2400
			 << ATOMISP_HW_REVISION_SHIFT) |
#ifdef CONFIG_ISP2400
			ATOMISP_HW_STEPPING_A0;
#else
			ATOMISP_HW_STEPPING_B0;
#endif
		break;
	default:
		/* Medfield and Clovertrail. */
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2300
			 << ATOMISP_HW_REVISION_SHIFT) |
			(dev->revision < 0x09 ?
			 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0);
	}

	isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
	if ((pdata->spid->platform_family_id == INTEL_CLVTP_PHONE ||
	     pdata->spid->platform_family_id == INTEL_CLVT_TABLET) &&
	    isp->pdev->revision < 0x09) {
		/* Workaround for Cloverview(+) older than stepping B0 */
		isp->max_isr_latency = CSTATE_EXIT_LATENCY_C1;
	}

	/* Load isp firmware from user space */
	isp->firmware = load_firmware(&dev->dev);
	if (!isp->firmware) {
		err = -ENOENT;
		dev_err(&dev->dev, "Load firmwares failed\n");
		goto load_fw_fail;
	}

	isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
	if (isp->wdt_work_queue == NULL) {
		dev_err(&dev->dev, "Failed to initialize wdt work queue\n");
		err = -ENOMEM;
		goto wdt_work_queue_fail;
	}
	INIT_WORK(&isp->wdt_work, atomisp_wdt_work);

	isp->delayed_init_workq =
		alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE, 1);
	if (isp->delayed_init_workq == NULL) {
		dev_err(&dev->dev, "Failed to initialize delayed init workq\n");
		err = -ENOMEM;
		goto delayed_init_work_queue_fail;
	}
	INIT_WORK(&isp->delayed_init_work, atomisp_delayed_init_work);

	pci_set_master(dev);
	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
		goto enable_msi_fail;
	}

	err = devm_request_threaded_irq(&dev->dev, dev->irq,
					atomisp_isr, atomisp_isr_thread,
					IRQF_SHARED, "isp_irq", isp);
	if (err) {
		dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
		goto enable_msi_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	if (IS_ISP2400) {
		u32 reg32;
		/*
		 * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
		 * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
		 * backend write 0 will enable Arasan CSI backend, which has
		 * bugs(like sighting:4567697 and 4567699) and will be removed
		 * in B0
		 */
		atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
		pci_read_config_dword(dev, PCI_I_CONTROL, &reg32);
		reg32 |= MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING
			| MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_write_config_dword(dev, PCI_I_CONTROL, reg32);
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
		goto enable_msi_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
			err);
		goto enable_msi_fail;
	}
	atomisp_acc_init(isp);

	/* save the iunit context only once after all the values are init'ed. */
	atomisp_save_iunit_reg(isp);

	pm_runtime_put_noidle(&dev->dev);
	pm_runtime_allow(&dev->dev);

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		dev_err(&dev->dev, "Failed to register reserved memory pool.\n");

	return 0;

enable_msi_fail:
	destroy_workqueue(isp->delayed_init_workq);
delayed_init_work_queue_fail:
	destroy_workqueue(isp->wdt_work_queue);
wdt_work_queue_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	pci_dev_put(isp->pci_root);
	return err;
}
Beispiel #27
0
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
					   const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	struct pci_dev *bridge_dev;
	u32 temp, temp2;
	u8 cap_ptr = 0;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);

	switch (pdev->device) {
	case 0x0006:
		dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
		return -ENODEV;

	case PCI_DEVICE_ID_SERVERWORKS_HE:
	case PCI_DEVICE_ID_SERVERWORKS_LE:
	case 0x0007:
		break;

	default:
		if (cap_ptr)
			dev_err(&pdev->dev, "unsupported Serverworks chipset "
				"[%04x/%04x]\n", pdev->vendor, pdev->device);
		return -ENODEV;
	}

	/* Everything is on func 1 here so we are hardcoding function one */
	bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
			PCI_DEVFN(0, 1));
	if (!bridge_dev) {
		dev_info(&pdev->dev, "can't find secondary device\n");
		return -ENODEV;
	}

	serverworks_private.svrwrks_dev = bridge_dev;
	serverworks_private.gart_addr_ofs = 0x10;

	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit aperture address, "
				 "but top bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
		serverworks_private.mm_addr_ofs = 0x18;
	} else
		serverworks_private.mm_addr_ofs = 0x14;

	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
		pci_read_config_dword(pdev,
				serverworks_private.mm_addr_ofs + 4, &temp2);
		if (temp2 != 0) {
			dev_info(&pdev->dev, "64 bit MMIO address, but top "
				 "bits are not zero; disabling AGP\n");
			return -ENODEV;
		}
	}

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &sworks_driver;
	bridge->dev_private_data = &serverworks_private,
	bridge->dev = pci_dev_get(pdev);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
Beispiel #28
0
SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
		unsigned long, off, unsigned long, len, void __user *, buf)
{
	struct pci_dev *dev;
	u8 byte;
	u16 word;
	u32 dword;
	long err;
	long cfg_ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	err = -ENODEV;
	dev = pci_get_bus_and_slot(bus, dfn);
	if (!dev)
		goto error;

	switch (len) {
	case 1:
		cfg_ret = pci_user_read_config_byte(dev, off, &byte);
		break;
	case 2:
		cfg_ret = pci_user_read_config_word(dev, off, &word);
		break;
	case 4:
		cfg_ret = pci_user_read_config_dword(dev, off, &dword);
		break;
	default:
		err = -EINVAL;
		goto error;
	}

	err = -EIO;
	if (cfg_ret != PCIBIOS_SUCCESSFUL)
		goto error;

	switch (len) {
	case 1:
		err = put_user(byte, (unsigned char __user *)buf);
		break;
	case 2:
		err = put_user(word, (unsigned short __user *)buf);
		break;
	case 4:
		err = put_user(dword, (unsigned int __user *)buf);
		break;
	}
	pci_dev_put(dev);
	return err;

error:
	/* ??? XFree86 doesn't even check the return value.  They
	   just look for 0xffffffff in the output, since that's what
	   they get instead of a machine check on x86.  */
	switch (len) {
	case 1:
		put_user(-1, (unsigned char __user *)buf);
		break;
	case 2:
		put_user(-1, (unsigned short __user *)buf);
		break;
	case 4:
		put_user(-1, (unsigned int __user *)buf);
		break;
	}
	pci_dev_put(dev);
	return err;
}
Beispiel #29
0
static int __init asr_get_base_address(void)
{
	unsigned char low, high;
	const char *type = "";

	asr_length = 1;

	switch (asr_type) {
	case ASMTYPE_TOPAZ:
		/* SELECT SuperIO CHIP FOR QUERYING
		   (WRITE 0x07 TO BOTH 0x2E and 0x2F) */
		outb(0x07, 0x2e);
		outb(0x07, 0x2f);

		/* SELECT AND READ THE HIGH-NIBBLE OF THE GPIO BASE ADDRESS */
		outb(0x60, 0x2e);
		high = inb(0x2f);

		/* SELECT AND READ THE LOW-NIBBLE OF THE GPIO BASE ADDRESS */
		outb(0x61, 0x2e);
		low = inb(0x2f);

		asr_base = (high << 16) | low;
		asr_read_addr = asr_write_addr =
			asr_base + TOPAZ_ASR_REG_OFFSET;
		asr_length = 5;

		break;

	case ASMTYPE_JASPER:
		type = "Jaspers ";
#if 0
		u32 r;
		/* Suggested fix */
		pdev = pci_get_bus_and_slot(0, DEVFN(0x1f, 0));
		if (pdev == NULL)
			return -ENODEV;
		pci_read_config_dword(pdev, 0x58, &r);
		asr_base = r & 0xFFFE;
		pci_dev_put(pdev);
#else
		/* FIXME: need to use pci_config_lock here,
		   but it's not exported */

/*		spin_lock_irqsave(&pci_config_lock, flags);*/

		/* Select the SuperIO chip in the PCI I/O port register */
		outl(0x8000f858, 0xcf8);

		/* BUS 0, Slot 1F, fnc 0, offset 58 */

		/*
		 * Read the base address for the SuperIO chip.
		 * Only the lower 16 bits are valid, but the address is word
		 * aligned so the last bit must be masked off.
		 */
		asr_base = inl(0xcfc) & 0xfffe;

/*		spin_unlock_irqrestore(&pci_config_lock, flags);*/
#endif
		asr_read_addr = asr_write_addr =
			asr_base + JASPER_ASR_REG_OFFSET;
		asr_toggle_mask = JASPER_ASR_TOGGLE_MASK;
		asr_disable_mask = JASPER_ASR_DISABLE_MASK;
		asr_length = JASPER_ASR_REG_OFFSET + 1;

		break;

	case ASMTYPE_PEARL:
		type = "Pearls ";
		asr_base = PEARL_BASE;
		asr_read_addr = PEARL_READ;
		asr_write_addr = PEARL_WRITE;
		asr_toggle_mask = PEARL_ASR_TOGGLE_MASK;
		asr_disable_mask = PEARL_ASR_DISABLE_MASK;
		asr_length = 4;
		break;

	case ASMTYPE_JUNIPER:
		type = "Junipers ";
		asr_base = JUNIPER_BASE_ADDRESS;
		asr_read_addr = asr_write_addr = asr_base;
		asr_toggle_mask = JUNIPER_ASR_TOGGLE_MASK;
		asr_disable_mask = JUNIPER_ASR_DISABLE_MASK;
		break;

	case ASMTYPE_SPRUCE:
		type = "Spruce's ";
		asr_base = SPRUCE_BASE_ADDRESS;
		asr_read_addr = asr_write_addr = asr_base;
		asr_toggle_mask = SPRUCE_ASR_TOGGLE_MASK;
		asr_disable_mask = SPRUCE_ASR_DISABLE_MASK;
		break;
	}

	if (!request_region(asr_base, asr_length, "ibmasr")) {
		pr_err("address %#x already in use\n", asr_base);
		return -EBUSY;
	}

	pr_info("found %sASR @ addr %#x\n", type, asr_base);

	return 0;
}
static int __devinit atomisp_pci_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	struct atomisp_device *isp = NULL;
	unsigned int start, len;
	void __iomem *base = NULL;
	int err = 0;

	if (!dev) {
		v4l2_err(&atomisp_dev, "atomisp: erorr device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	err = pci_enable_device(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable CI ISP device\n");
		return err;
	}

	start = pci_resource_start(dev, 0);
	len = pci_resource_len(dev, 0);

	err = pci_request_region(dev, 0, atomisp_pci_driver.name);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request region 0x%1x-0x%Lx\n",
			    start, (unsigned long long)pci_resource_end(dev,
				0));
		goto request_region_fail;
	}

	base = ioremap_nocache(start, len);
	if (!base) {
		v4l2_err(&atomisp_dev,
			    "Failed to I/O memory remapping\n");
		err = -ENOMEM;
		goto ioremap_fail;
	}

	isp = kzalloc(sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		v4l2_err(&atomisp_dev, "Failed to alloc CI ISP structure\n");
		goto kzalloc_fail;
	}
	isp->sw_contex.probed = false;
	isp->sw_contex.init = false;
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->hw_contex.pci_root = pci_get_bus_and_slot(0, 0);

	/* Load isp firmware from user space */
	/*
	 * fixing me:
	 * MRFLD VP does not use firmware loading
	 * from file system
	 */
	if (!IS_MRFLD) {
		isp->firmware = load_firmware(&dev->dev);
		if (!isp->firmware) {
			v4l2_err(&atomisp_dev, "Load firmwares failed\n");
			goto load_fw_fail;
		}
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_initialize_modules\n");
		goto init_mod_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		v4l2_err(&atomisp_dev, "atomisp_register_entities failed\n");
		goto init_mod_fail;
	}

	init_completion(&isp->wq_frame_complete);
	init_completion(&isp->dis_state_complete);
	spin_lock_init(&isp->irq_lock);

	isp->work_queue = create_singlethread_workqueue(isp->v4l2_dev.name);
	if (isp->work_queue == NULL) {
		v4l2_err(&atomisp_dev, "Failed to initialize work queue\n");
		goto work_queue_fail;
	}
	INIT_WORK(&isp->work, atomisp_work);

	isp->hw_contex.ispmmadr = start;

	pci_set_master(dev);
	atomisp_io_base = base;

	isp->tvnorm = tvnorms;
	mutex_init(&isp->input_lock);
	/* isp_lock is to protect race access of css functions */
	mutex_init(&isp->isp_lock);
	isp->sw_contex.updating_uptr = false;
	isp->isp3a_stat_ready = false;

	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to enable msi\n");
		goto enable_msi_fail;
	}
	err = request_irq(dev->irq, atomisp_isr,
			  IRQF_SHARED, "isp_irq", isp);
	if (err) {
		v4l2_err(&atomisp_dev,
			    "Failed to request irq\n");
		goto request_irq_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt_wakeup_dog, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);
	/*
	 * fixing me!
	 * MRFLD VP does not implement
	 * PM Core
	 */
#ifdef CONFIG_PM
	if (!IS_MRFLD) {
		pm_runtime_put_noidle(&dev->dev);
		pm_runtime_allow(&dev->dev);
	}
#endif
	isp->sw_contex.probed = true;

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		v4l2_err(&atomisp_dev,
			    "Failed to register reserved memory pool.\n");

	return 0;

request_irq_fail:
	pci_disable_msi(dev);
enable_msi_fail:
	pci_set_drvdata(dev, NULL);
	destroy_workqueue(isp->work_queue);
work_queue_fail:
	atomisp_unregister_entities(isp);
init_mod_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	kfree(isp);
kzalloc_fail:
	iounmap(base);
ioremap_fail:
	pci_release_region(dev, 0);
request_region_fail:
	pci_disable_device(dev);
	return err;
}