Exemple #1
0
static int dw_mci_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *entries)
{
	struct dw_mci *host;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	host->irq = pdev->irq;
	host->irq_flags = IRQF_SHARED;
	host->dev = &pdev->dev;
	host->pdata = &pci_board_data;

	ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev));
	if (ret)
		return ret;

	host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];

	pci_set_master(pdev);

	ret = dw_mci_probe(host);
	if (ret)
		return ret;

	pci_set_drvdata(pdev, host);

	return 0;
}
Exemple #2
0
static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] =
		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
	struct ata_host *host;
	int i, rc;

	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
		return -ENOMEM;
	}

	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
			   "PCI BARs (errno=%d)\n", rc);
		return rc;
	}
	host->iomap = pcim_iomap_table(pdev);

	for (i = 0; i < host->n_ports; i++)
		vt6421_init_addrs(host->ports[i]);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	return 0;
}
static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem * const *iomap;
	struct device *dev = &pdev->dev;
	struct ioatdma_device *device;
	int err;

	err = pcim_enable_device(pdev);
	if (err)
		return err;

	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
	if (err)
		return err;
	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		return err;

	device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
	if (!device)
		return -ENOMEM;

	pci_set_master(pdev);

	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
	if (!device)
		return -ENOMEM;
	pci_set_drvdata(pdev, device);

	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
	if (device->version == IOAT_VER_1_2)
		err = ioat1_dma_probe(device, ioat_dca_enabled);
	else if (device->version == IOAT_VER_2_0)
		err = ioat2_dma_probe(device, ioat_dca_enabled);
	else if (device->version >= IOAT_VER_3_0)
		err = ioat3_dma_probe(device, ioat_dca_enabled);
	else
		return -ENODEV;

	if (err) {
		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
		return -ENODEV;
	}

	return 0;
}
Exemple #4
0
/**
 * stmmac_pci_probe
 *
 * @pdev: pci device pointer
 * @id: pointer to table of device id/id's.
 *
 * Description: This probing function gets called for all PCI devices which
 * match the ID table and are not "owned" by other driver yet. This function
 * gets passed a "struct pci_dev *" for each device whose entry in the ID table
 * matches the device. The probe functions returns zero when the driver choose
 * to take "ownership" of the device or an error code(-ve no) otherwise.
 */
static int stmmac_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *id)
{
	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
	struct plat_stmmacenet_data *plat;
	struct stmmac_resources res;
	int i;
	int ret;

	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
	if (!plat)
		return -ENOMEM;

	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
					   sizeof(*plat->mdio_bus_data),
					   GFP_KERNEL);
	if (!plat->mdio_bus_data)
		return -ENOMEM;

	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
				     GFP_KERNEL);
	if (!plat->dma_cfg)
		return -ENOMEM;

	/* Enable pci device */
	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
			__func__);
		return ret;
	}

	/* Get the base address of device */
	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
		if (pci_resource_len(pdev, i) == 0)
			continue;
		ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
		if (ret)
			return ret;
		break;
	}

	pci_set_master(pdev);

	ret = info->setup(pdev, plat);
	if (ret)
		return ret;

	pci_enable_msi(pdev);

	memset(&res, 0, sizeof(res));
	res.addr = pcim_iomap_table(pdev)[i];
	res.wol_irq = pdev->irq;
	res.irq = pdev->irq;

	return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
Exemple #5
0
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mt76x2_dev *dev;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret)
		return ret;

	pci_set_master(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	dev = mt76x2_alloc_device(&pdev->dev);
	if (!dev)
		return -ENOMEM;

	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);

	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);

	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
			       IRQF_SHARED, KBUILD_MODNAME, dev);
	if (ret)
		goto error;

	ret = mt76x2_register_device(dev);
	if (ret)
		goto error;

	/* Fix up ASPM configuration */

	/* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);

	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
	mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);

	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);

	return 0;

error:
	ieee80211_free_hw(mt76_hw(dev));
	return ret;
}
Exemple #6
0
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct hsu_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->length = pci_resource_len(pdev, 0);
	chip->offset = HSU_PCI_CHAN_OFFSET;
	chip->irq = pdev->irq;

	pci_enable_msi(pdev);

	ret = hsu_dma_probe(chip);
	if (ret)
		return ret;

	ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
	if (ret)
		goto err_register_irq;

	pci_set_drvdata(pdev, chip);

	return 0;

err_register_irq:
	hsu_dma_remove(chip);
	return ret;
}
Exemple #7
0
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	/* Set up the PCI device */
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	/* Set up the register mappings. We use the I/O mapping as only the
	   older chips also have MMIO on BAR 1 */
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	/* FIXME: Should we disable them at remove ? */
	return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
Exemple #8
0
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct ata_host *host;
	struct ata_port *ap;
	void __iomem *base;
	int rc;

	host = ata_host_alloc(&dev->dev, 1);
	if (!host)
		return -ENOMEM;
	ap = host->ports[0];

	
	rc = pcim_enable_device(dev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(dev);
	if (rc)
		return rc;

	host->iomap = pcim_iomap_table(dev);
	rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(dev);

	
	base = host->iomap[0];
	if (!base)
		return -ENOMEM;
	ap->ops = &ninja32_port_ops;
	ap->pio_mask = ATA_PIO4;
	ap->flags |= ATA_FLAG_SLAVE_POSS;

	ap->ioaddr.cmd_addr = base + 0x10;
	ap->ioaddr.ctl_addr = base + 0x1E;
	ap->ioaddr.altstatus_addr = base + 0x1E;
	ap->ioaddr.bmdma_addr = base;
	ata_sff_std_ports(&ap->ioaddr);
	ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;

	ninja32_program(base);
	
	return ata_host_activate(host, dev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &ninja32_sht);
}
static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int printed_version;
	unsigned int board_idx = (unsigned int) ent->driver_data;
	struct device *dev = &pdev->dev;
	struct ata_probe_ent *probe_ent;
	int rc;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev,
			   "version " DRV_VERSION "\n");

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;

	probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
	if (!probe_ent)
		return -ENOMEM;

	probe_ent->dev = dev;
	INIT_LIST_HEAD(&probe_ent->node);

	probe_ent->sht 		= scc_port_info[board_idx].sht;
	probe_ent->port_flags 	= scc_port_info[board_idx].flags;
	probe_ent->pio_mask 	= scc_port_info[board_idx].pio_mask;
	probe_ent->udma_mask 	= scc_port_info[board_idx].udma_mask;
	probe_ent->port_ops 	= scc_port_info[board_idx].port_ops;

	probe_ent->irq = pdev->irq;
	probe_ent->irq_flags = IRQF_SHARED;
	probe_ent->iomap = pcim_iomap_table(pdev);

	rc = scc_host_init(probe_ent);
	if (rc)
		return rc;

	if (!ata_device_add(probe_ent))
		return -ENODEV;

	devm_kfree(dev, probe_ent);
	return 0;
}
Exemple #10
0
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mt7603_dev *dev;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret)
		return ret;

	pci_set_master(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	dev = mt7603_alloc_device(&pdev->dev);
	if (!dev)
		return -ENOMEM;

	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);

	pci_set_drvdata(pdev, dev);

	dev->mt76.rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
			(mt76_rr(dev, MT_HW_REV) & 0xff);
	dev_printk(KERN_INFO, dev->mt76.dev, "ASIC revision: %04x\n", dev->mt76.rev);

	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt7603_irq_handler,
			       IRQF_SHARED, KBUILD_MODNAME, dev);
	if (ret)
		goto error;

	ret = mt7603_register_device(dev);
	if (ret)
		goto error;

	return 0;
error:
	ieee80211_free_hw(mt76_hw(dev));
	return ret;
}
static int ce4100_spi_probe(struct pci_dev *dev,
		const struct pci_device_id *ent)
{
	struct platform_device_info pi;
	int ret;
	struct platform_device *pdev;
	struct pxa2xx_spi_master spi_pdata;
	struct ssp_device *ssp;

	ret = pcim_enable_device(dev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
	if (ret)
		return ret;

	memset(&spi_pdata, 0, sizeof(spi_pdata));
	spi_pdata.num_chipselect = dev->devfn;

	ssp = &spi_pdata.ssp;
	ssp->phys_base = pci_resource_start(dev, 0);
	ssp->mmio_base = pcim_iomap_table(dev)[0];
	if (!ssp->mmio_base) {
		dev_err(&dev->dev, "failed to ioremap() registers\n");
		return -EIO;
	}
	ssp->irq = dev->irq;
	ssp->port_id = dev->devfn;
	ssp->type = PXA25x_SSP;

	memset(&pi, 0, sizeof(pi));
	pi.parent = &dev->dev;
	pi.name = "pxa2xx-spi";
	pi.id = ssp->port_id;
	pi.data = &spi_pdata;
	pi.size_data = sizeof(spi_pdata);

	pdev = platform_device_register_full(&pi);
	if (IS_ERR(pdev))
		return PTR_ERR(pdev);

	pci_set_drvdata(dev, pdev);

	return 0;
}
/**
 * ufshcd_pci_probe - probe routine of the driver
 * @pdev: pointer to PCI device handle
 * @id: PCI device id
 *
 * Returns 0 on success, non-zero value on failure
 */
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ufs_hba *hba;
	void __iomem *mmio_base;
	int err;

	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "pcim_enable_device failed\n");
		return err;
	}

	pci_set_master(pdev);

	err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
	if (err < 0) {
		dev_err(&pdev->dev, "request and iomap failed\n");
		return err;
	}

	mmio_base = pcim_iomap_table(pdev)[0];

	err = ufshcd_alloc_host(&pdev->dev, &hba);
	if (err) {
		dev_err(&pdev->dev, "Allocation failed\n");
		return err;
	}

	INIT_LIST_HEAD(&hba->clk_list_head);

	err = ufshcd_init(hba, mmio_base, pdev->irq);
	if (err) {
		dev_err(&pdev->dev, "Initialization failed\n");
		ufshcd_dealloc_host(hba);
		return err;
	}

	pci_set_drvdata(pdev, hba);
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);

	return 0;
}
Exemple #13
0
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
	const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
	struct dw_dma_chip *chip;
	int ret;

	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
	if (!chip)
		return -ENOMEM;

	chip->dev = &pdev->dev;
	chip->regs = pcim_iomap_table(pdev)[0];
	chip->irq = pdev->irq;
	chip->pdata = pdata;

	ret = dw_dma_probe(chip);
	if (ret)
		return ret;

	pci_set_drvdata(pdev, chip);

	return 0;
}
Exemple #14
0
static int isci_pci_init(struct pci_dev *pdev)
{
	int err, bar_num, bar_mask = 0;
	void __iomem * const *iomap;

	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev,
			"failed enable PCI device %s!\n",
			pci_name(pdev));
		return err;
	}

	for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
		bar_mask |= 1 << (bar_num * 2);

	err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
	if (err)
		return err;

	iomap = pcim_iomap_table(pdev);
	if (!iomap)
		return -ENOMEM;

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err)
			return err;
	}

	return 0;
}
Exemple #15
0
static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
	struct ata_host *host;
	int rc;

	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
	if (rc)
		return rc;
	*r_host = host;

	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
		return rc;
	}

	host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
	host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);

	return 0;
}
Exemple #16
0
static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
{
	void __iomem *vaddr;
	dma_addr_t busaddr;
	size_t len;
	int ret;

	ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
	if (ret)
		return IOMEM_ERR_PTR(ret);

	busaddr = pci_resource_start(priv->pdev, index);
	len = pci_resource_len(priv->pdev, index);
	vaddr = pcim_iomap_table(priv->pdev)[index];
	if (!vaddr)
		return IOMEM_ERR_PTR(-ENOMEM);

	pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
		 index, vaddr, &busaddr, (int)len);

	return vaddr;
}
Exemple #17
0
static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
	struct ata_host *host;
	int i, rc;

	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
	if (rc)
		return rc;
	*r_host = host;

	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
		return rc;
	}

	/* 8251 hosts four sata ports as M/S of the two channels */
	for (i = 0; i < host->n_ports; i++)
		ata_slave_link_init(host->ports[i]);

	return 0;
}
Exemple #18
0
/*
 * Most of the initialization at module load time is done here.
 */
static int vmci_guest_probe_device(struct pci_dev *pdev,
				   const struct pci_device_id *id)
{
	struct vmci_guest_device *vmci_dev;
	void __iomem *iobase;
	unsigned int capabilities;
	unsigned long cmd;
	int vmci_err;
	int error;

	dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");

	error = pcim_enable_device(pdev);
	if (error) {
		dev_err(&pdev->dev,
			"Failed to enable VMCI device: %d\n", error);
		return error;
	}

	error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
	if (error) {
		dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
		return error;
	}

	iobase = pcim_iomap_table(pdev)[0];

	dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
		 (unsigned long)iobase, pdev->irq);

	vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
	if (!vmci_dev) {
		dev_err(&pdev->dev,
			"Can't allocate memory for VMCI device\n");
		return -ENOMEM;
	}

	vmci_dev->dev = &pdev->dev;
	vmci_dev->exclusive_vectors = false;
	vmci_dev->iobase = iobase;

	tasklet_init(&vmci_dev->datagram_tasklet,
		     vmci_dispatch_dgs, (unsigned long)vmci_dev);
	tasklet_init(&vmci_dev->bm_tasklet,
		     vmci_process_bitmap, (unsigned long)vmci_dev);

	vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
	if (!vmci_dev->data_buffer) {
		dev_err(&pdev->dev,
			"Can't allocate memory for datagram buffer\n");
		return -ENOMEM;
	}

	pci_set_master(pdev);	/* To enable queue_pair functionality. */

	/*
	 * Verify that the VMCI Device supports the capabilities that
	 * we need. If the device is missing capabilities that we would
	 * like to use, check for fallback capabilities and use those
	 * instead (so we can run a new VM on old hosts). Fail the load if
	 * a required capability is missing and there is no fallback.
	 *
	 * Right now, we need datagrams. There are no fallbacks.
	 */
	capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
	if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
		dev_err(&pdev->dev, "Device does not support datagrams\n");
		error = -ENXIO;
		goto err_free_data_buffer;
	}

	/*
	 * If the hardware supports notifications, we will use that as
	 * well.
	 */
	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
		vmci_dev->notification_bitmap = dma_alloc_coherent(
			&pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
			GFP_KERNEL);
		if (!vmci_dev->notification_bitmap) {
			dev_warn(&pdev->dev,
				 "Unable to allocate notification bitmap\n");
		} else {
			memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
			capabilities |= VMCI_CAPS_NOTIFICATIONS;
		}
	}

	dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);

	/* Let the host know which capabilities we intend to use. */
	iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);

	/* Set up global device so that we can start sending datagrams */
	spin_lock_irq(&vmci_dev_spinlock);
	vmci_dev_g = vmci_dev;
	vmci_pdev = pdev;
	spin_unlock_irq(&vmci_dev_spinlock);

	/*
	 * Register notification bitmap with device if that capability is
	 * used.
	 */
	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
		unsigned long bitmap_ppn =
			vmci_dev->notification_base >> PAGE_SHIFT;
		if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
			dev_warn(&pdev->dev,
				 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
				 (u32) bitmap_ppn);
			error = -ENXIO;
			goto err_remove_vmci_dev_g;
		}
	}
Exemple #19
0
static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	ata_print_version_once(&pdev->dev, DRV_VERSION);

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_sff_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_sff_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &sil680_sht);

use_ioports:
	return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct ata_port_info pi = sis_port_info;
	const struct ata_port_info *ppi[] = { &pi, &pi };
	struct ata_host *host;
	u32 genctl, val;
	u8 pmr;
	u8 port2_start = 0x20;
	int i, rc;

	ata_print_version_once(&pdev->dev, DRV_VERSION);

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* check and see if the SCRs are in IO space or PCI cfg space */
	pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
	if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
		pi.flags |= SIS_FLAG_CFGSCR;

	/* if hardware thinks SCRs are in IO space, but there are
	 * no IO resources assigned, change to PCI cfg space.
	 */
	if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
	    ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
	     (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
		genctl &= ~GENCTL_IOMAPPED_SCR;
		pci_write_config_dword(pdev, SIS_GENCTL, genctl);
		pi.flags |= SIS_FLAG_CFGSCR;
	}

	pci_read_config_byte(pdev, SIS_PMR, &pmr);
	switch (ent->device) {
	case 0x0180:
	case 0x0181:

		/* The PATA-handling is provided by pata_sis */
		switch (pmr & 0x30) {
		case 0x10:
			ppi[1] = &sis_info133_for_sata;
			break;

		case 0x30:
			ppi[0] = &sis_info133_for_sata;
			break;
		}
		if ((pmr & SIS_PMR_COMBINED) == 0) {
			dev_info(&pdev->dev,
				 "Detected SiS 180/181/964 chipset in SATA mode\n");
			port2_start = 64;
		} else {
			dev_info(&pdev->dev,
				 "Detected SiS 180/181 chipset in combined mode\n");
			port2_start = 0;
			pi.flags |= ATA_FLAG_SLAVE_POSS;
		}
		break;

	case 0x0182:
	case 0x0183:
		pci_read_config_dword(pdev, 0x6C, &val);
		if (val & (1L << 31)) {
			dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
			pi.flags |= ATA_FLAG_SLAVE_POSS;
		} else {
			dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
		}
		break;

	case 0x1182:
		dev_info(&pdev->dev,
			 "Detected SiS 1182/966/680 SATA controller\n");
		pi.flags |= ATA_FLAG_SLAVE_POSS;
		break;

	case 0x1183:
		dev_info(&pdev->dev,
			 "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
		ppi[0] = &sis_info133_for_sata;
		ppi[1] = &sis_info133_for_sata;
		break;
	}

	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		struct ata_port *ap = host->ports[i];

		if (ap->flags & ATA_FLAG_SATA &&
		    ap->flags & ATA_FLAG_SLAVE_POSS) {
			rc = ata_slave_link_init(ap);
			if (rc)
				return rc;
		}
	}

	if (!(pi.flags & SIS_FLAG_CFGSCR)) {
		void __iomem *mmio;

		rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
		if (rc)
			return rc;
		mmio = host->iomap[SIS_SCR_PCI_BAR];

		host->ports[0]->ioaddr.scr_addr = mmio;
		host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
	}

	pci_set_master(pdev);
	pci_intx(pdev, 1);
	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &sis_sht);
}
static int atomisp_pci_probe(struct pci_dev *dev,
				       const struct pci_device_id *id)
{
	const struct atomisp_platform_data *pdata;
	struct atomisp_device *isp;
	unsigned int start;
	void __iomem *base;
	int err;

	if (!dev) {
		dev_err(&dev->dev, "atomisp: error device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	pdata = atomisp_get_platform_data();
	if (pdata == NULL) {
		dev_err(&dev->dev, "no platform data available\n");
		return -ENODEV;
	}

	err = pcim_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
			err);
		return err;
	}

	start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
	v4l2_dbg(1, dbg_level, &atomisp_dev, "start: 0x%x\n", start);

	err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
	if (err) {
		dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
			err);
		return err;
	}

	base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
	v4l2_dbg(1, dbg_level, &atomisp_dev, "base: %p\n", base);

	atomisp_io_base = base;

	v4l2_dbg(1, dbg_level, &atomisp_dev, "atomisp_io_base: %p\n",
			atomisp_io_base);

	isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		dev_err(&dev->dev, "Failed to alloc CI ISP structure\n");
		return -ENOMEM;
	}
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->pci_root = pci_get_bus_and_slot(0, 0);
	if (!isp->pci_root) {
		dev_err(&dev->dev, "Unable to find PCI host\n");
		return -ENODEV;
	}
	isp->saved_regs.ispmmadr = start;

	mutex_init(&isp->mutex);
	mutex_init(&isp->streamoff_mutex);
	spin_lock_init(&isp->lock);
	init_completion(&isp->init_done);

	isp->media_dev.driver_version = ATOMISP_CSS_VERSION_20;

	switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
	case ATOMISP_PCI_DEVICE_SOC_MRFLD:
	case ATOMISP_PCI_DEVICE_SOC_BYT:
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2400
			 << ATOMISP_HW_REVISION_SHIFT) |
#ifdef CONFIG_ISP2400
			ATOMISP_HW_STEPPING_A0;
#else
			ATOMISP_HW_STEPPING_B0;
#endif
		break;
	default:
		/* Medfield and Clovertrail. */
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2300
			 << ATOMISP_HW_REVISION_SHIFT) |
			(dev->revision < 0x09 ?
			 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0);
	}

	isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
	if ((pdata->spid->platform_family_id == INTEL_CLVTP_PHONE ||
	     pdata->spid->platform_family_id == INTEL_CLVT_TABLET) &&
	    isp->pdev->revision < 0x09) {
		/* Workaround for Cloverview(+) older than stepping B0 */
		isp->max_isr_latency = CSTATE_EXIT_LATENCY_C1;
	}

	/* Load isp firmware from user space */
	isp->firmware = load_firmware(&dev->dev);
	if (!isp->firmware) {
		err = -ENOENT;
		dev_err(&dev->dev, "Load firmwares failed\n");
		goto load_fw_fail;
	}

	isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
	if (isp->wdt_work_queue == NULL) {
		dev_err(&dev->dev, "Failed to initialize wdt work queue\n");
		err = -ENOMEM;
		goto wdt_work_queue_fail;
	}
	INIT_WORK(&isp->wdt_work, atomisp_wdt_work);

	isp->delayed_init_workq =
		alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE, 1);
	if (isp->delayed_init_workq == NULL) {
		dev_err(&dev->dev, "Failed to initialize delayed init workq\n");
		err = -ENOMEM;
		goto delayed_init_work_queue_fail;
	}
	INIT_WORK(&isp->delayed_init_work, atomisp_delayed_init_work);

	pci_set_master(dev);
	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
		goto enable_msi_fail;
	}

	err = devm_request_threaded_irq(&dev->dev, dev->irq,
					atomisp_isr, atomisp_isr_thread,
					IRQF_SHARED, "isp_irq", isp);
	if (err) {
		dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
		goto enable_msi_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	if (IS_ISP2400) {
		u32 reg32;
		/*
		 * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
		 * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
		 * backend write 0 will enable Arasan CSI backend, which has
		 * bugs(like sighting:4567697 and 4567699) and will be removed
		 * in B0
		 */
		atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
		pci_read_config_dword(dev, PCI_I_CONTROL, &reg32);
		reg32 |= MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING
			| MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_write_config_dword(dev, PCI_I_CONTROL, reg32);
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
		goto enable_msi_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
			err);
		goto enable_msi_fail;
	}
	atomisp_acc_init(isp);

	/* save the iunit context only once after all the values are init'ed. */
	atomisp_save_iunit_reg(isp);

	pm_runtime_put_noidle(&dev->dev);
	pm_runtime_allow(&dev->dev);

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		dev_err(&dev->dev, "Failed to register reserved memory pool.\n");

	return 0;

enable_msi_fail:
	destroy_workqueue(isp->delayed_init_workq);
delayed_init_work_queue_fail:
	destroy_workqueue(isp->wdt_work_queue);
wdt_work_queue_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	pci_dev_put(isp->pci_root);
	return err;
}
static int css2600_pci_probe(struct pci_dev *pdev,
			     const struct pci_device_id *id)
{
	struct css2600_device *isp;
	phys_addr_t phys;
	void __iomem *mmu_base[CSS2600_MMU_MAX_DEVICES];
	struct css2600_bus_iommu *isys_iommu, *psys_iommu;
	void __iomem * const *iomap;
	void __iomem *isys_base = NULL;
	void __iomem *psys_base = NULL;
	char *fw_binary;
	unsigned int iommus = 0;
	unsigned int dma_mask = 39;
	int rval;

	isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
	if (!isp) {
		dev_err(&pdev->dev, "Failed to alloc CI ISP structure\n");
		return -ENOMEM;
	}
	isp->pdev = pdev;
	INIT_LIST_HEAD(&isp->devices);

	isys_iommu = devm_kzalloc(&pdev->dev, sizeof(*isys_iommu), GFP_KERNEL);
	psys_iommu = devm_kzalloc(&pdev->dev, sizeof(*psys_iommu), GFP_KERNEL);
	if (!isys_iommu || !psys_iommu) {
		dev_err(&pdev->dev, "Can't allocate memory for iommu\n");
		return -ENOMEM;
	}

	/* Share IOMMU mapping between isys and psys */
	isys_iommu->m = psys_iommu->m = devm_kzalloc(
		&pdev->dev, sizeof(*isys_iommu->m), GFP_KERNEL);
	if (!isys_iommu->m) {
		dev_err(&pdev->dev,
			"Can't allocate memory for iommu mapping\n");
		return -ENOMEM;
	}

	rval = pcim_enable_device(pdev);
	if (rval) {
		dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n",
			rval);
		return rval;
	}

	phys = pci_resource_start(pdev, CSS2600_PCI_BAR);

	rval = pcim_iomap_regions(pdev, 1 << CSS2600_PCI_BAR, pci_name(pdev));
	if (rval) {
		dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n",
			rval);
		return rval;
	}
	dev_info(&pdev->dev, "physical base address 0x%llx\n", phys);

	iomap = pcim_iomap_table(pdev);
	if (!iomap) {
		dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval);
		return -ENODEV;
	}

	isp->base = iomap[CSS2600_PCI_BAR];
	dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base);

	pci_set_drvdata(pdev, isp);

	pci_set_master(pdev);

	switch (isp->pdev->device) {
	case CSS2600_HW_MRFLD_2401:
		psys_base = isp->base;
		isys_base = isp->base;
		fw_binary = CSS2401_FIRMWARE;
		break;
	case CSS2600_HW_BXT_FPGA:
		dma_mask = 32;
		/* fall through */
	case CSS2600_HW_BXT:
		psys_base = isp->base + CSS2600_BXT_A0_PSYS_OFFSET;
		isys_base = isp->base + CSS2600_BXT_A0_ISYS_OFFSET;
		fw_binary = NULL;
		break;
	default:
		dev_err(&pdev->dev, "Not supported device\n");
		return -EINVAL;
	}

	rval = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
	if (!rval)
		rval = pci_set_consistent_dma_mask(pdev,
						   DMA_BIT_MASK(dma_mask));
	if (rval) {
		dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval);
		return rval;
	}

	if (fw_binary)
		rval = request_firmware(&isp->fw, fw_binary, &pdev->dev);
	if (rval) {
		dev_err(&pdev->dev, "Requesting firmware failed\n");
		return rval;
	}
	css2600_wrapper_init(psys_base, isys_base, isp->fw);

	if (pdev->device == CSS2600_HW_BXT) {
		isp->buttress = css2600_buttress_init(pdev, &pdev->dev, isp->base, 0);
		rval = PTR_ERR(isp->buttress);
		if (IS_ERR(isp->buttress))
			goto out_css2600_bus_del_devices;
	}
	if (pdev->device == CSS2600_HW_BXT
	    || (pdev->device == CSS2600_HW_BXT_FPGA
		&& IS_ENABLED(CONFIG_VIDEO_CSS2600_ISYS))) {
		mmu_base[0] = isys_base + CSS2600_BXT_A0_ISYS_IOMMU0_OFFSET;
		mmu_base[1] = isys_base + CSS2600_BXT_A0_ISYS_IOMMU1_OFFSET;
		isp->isys_iommu = css2600_mmu_init(pdev, &isp->buttress->dev,
			mmu_base, 2, 0, CSS2600_MMU_TYPE_CSS2600);
		rval = PTR_ERR(isp->isys_iommu);
		if (IS_ERR(isp->isys_iommu)) {
			dev_err(&pdev->dev, "can't create isys iommu device\n");
			return -ENOMEM;
		}

		rval = css2600_isys_iomem_filters_add(&pdev->dev, mmu_base, 2, 0xc);
		if (rval)
			goto out_css2600_bus_del_devices;
		iommus++;

		isys_iommu->dev = &isp->isys_iommu->dev;

		isp->isys = css2600_isys_init(
			pdev, &isp->buttress->dev, isys_iommu, isys_base,
			&isys_ipdata_2600, pdev->dev.platform_data, 0,
			pdev->device == CSS2600_HW_BXT ?
			CSS2600_ISYS_TYPE_CSS2600 :
			CSS2600_ISYS_TYPE_CSS2600_FPGA);
		rval = PTR_ERR(isp->isys);
		if (IS_ERR(isp->isys))
			goto out_css2600_bus_del_devices;
	}

	if (pdev->device == CSS2600_HW_BXT
	    || (pdev->device == CSS2600_HW_BXT_FPGA
		&& IS_ENABLED(CONFIG_VIDEO_CSS2600_PSYS))) {
		mmu_base[0] = psys_base + CSS2600_BXT_A0_PSYS_IOMMU0_OFFSET;
		mmu_base[1] = psys_base + CSS2600_BXT_A0_PSYS_IOMMU1_OFFSET;
		isp->psys_iommu = css2600_mmu_init(pdev, &isp->isys->dev,
			mmu_base, 2, 1, CSS2600_MMU_TYPE_CSS2600);
		rval = PTR_ERR(isp->psys_iommu);
		if (IS_ERR(isp->psys_iommu)) {
			dev_err(&pdev->dev, "can't create psys iommu device\n");
			goto out_css2600_bus_del_devices;
		}

		rval = css2600_isys_iomem_filters_add(&pdev->dev, mmu_base, 2, 0xc);
		if (rval)
			goto out_css2600_bus_del_devices;
		iommus++;

		psys_iommu->dev = &isp->psys_iommu->dev;
		isp->psys = css2600_psys_init(pdev, &isp->buttress->dev, psys_iommu, psys_base, 0);
		rval = PTR_ERR(isp->isys);
		if (IS_ERR(isp->isys))
			goto out_css2600_bus_del_devices;
	}

	if (pdev->device == CSS2600_HW_MRFLD_2401) {
		u32 val;

		mmu_base[0] = isp->base + CSS2600_MRFLD_DATA_IOMMU_OFFSET;
		mmu_base[1] = isp->base + CSS2600_MRFLD_ICACHE_IOMMU_OFFSET;
		isp->isys_iommu = css2600_mmu_init(pdev, &pdev->dev, mmu_base,
			2, 0, CSS2600_MMU_TYPE_CSS2401);
		rval = PTR_ERR(isp->isys_iommu);
		if (IS_ERR(isp->isys_iommu)) {
			dev_err(&pdev->dev, "can't create iommu device\n");
			goto out_css2600_bus_del_devices;
		}

		rval = css2600_isys_iomem_filters_add(&pdev->dev, mmu_base, 2, 0xc);
		if (rval)
			goto out_css2600_bus_del_devices;
		iommus++;

		isys_iommu->dev = &isp->isys_iommu->dev;
		isp->isys = css2600_isys_init(
			pdev, &pdev->dev, isys_iommu, isp->base,
			&isys_ipdata_2401, pdev->dev.platform_data, 0,
			CSS2600_ISYS_TYPE_CSS2401);
		rval = PTR_ERR(isp->isys);
		if (rval < 0)
			goto out_css2600_bus_del_devices;
		isp->psys = css2600_psys_init(pdev, &isp->isys->dev, isys_iommu,
					      isp->base, 0);
		rval = PTR_ERR(isp->psys);
		if (rval < 0)
			goto out_css2600_bus_del_devices;

		writel(CSS2401_CSI_RECEIVER_SELECTION_INTEL,
		       isp->base + CSS2401_REG_CSI_RECEIVER_SELECTION);

		/* Ensure read/write combining is enabled. */
		pci_read_config_dword(pdev, CSS2401_REG_PCI_I_CONTROL, &val);
		val |= CSS2401_PCI_I_CONTROL_ENABLE_READ_COMBINING |
			CSS2401_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_write_config_dword(pdev, CSS2401_REG_PCI_I_CONTROL, val);

		/*
		 * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
		 * ANN/CHV: RCOMP updates do not happen when using CSI2+ path
		 * and sensor sending "continuous clock".
		 * TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence
		 * is missed, and IUNIT can hang.
		 * For both issues, setting this bit is a workaround.
		 */
		pci_read_config_dword(pdev, CSS2401_REG_PCI_CSI_RCOMP_CONTROL,
				      &val);
		val |= CSS2401_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
		pci_write_config_dword(pdev, CSS2401_REG_PCI_CSI_RCOMP_CONTROL,
				       val);

		pci_read_config_dword(pdev, CSS2401_REG_PCI_CSI_CONTROL, &val);
		val |= CSS2401_PCI_CSI_CONTROL_PARPATHEN;
		pci_write_config_dword(pdev, CSS2401_REG_PCI_CSI_CONTROL, val);
	}

	rval = css2600_pci_config_setup(pdev,
					pdev->device != CSS2600_HW_BXT_FPGA);
	if (rval)
		goto out_css2600_bus_del_devices;

	rval = css2600_wrapper_set_iommus(iommus);
	if (rval)
		goto out_css2600_bus_del_devices;

	rval = css2600_wrapper_set_device(&isp->isys->dev);
	if (rval)
		goto out_css2600_bus_del_devices;

	rval = devm_request_threaded_irq(&pdev->dev, pdev->irq,	css2600_isr,
					 css2600_isr_thread, IRQF_SHARED,
					 CSS2600_NAME, isp);
	if (rval) {
		dev_err(&pdev->dev, "Requesting threaded irq failed(%d)\n",
			rval);
		goto out_css2600_bus_del_devices;
	}

	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);

	return 0;

out_css2600_bus_del_devices:
	css2600_isys_iomem_filter_remove(&pdev->dev);
	css2600_bus_del_devices(pdev);
	release_firmware(isp->fw);

	return rval;
}
static int __devinit sil680_init_one(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	static int printed_version;
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
				 &sil680_sht);

use_ioports:
	return ata_pci_init_one(pdev, ppi);
}

#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
	int try_mmio;

	sil680_init_chip(pdev, &try_mmio);
	return ata_pci_device_resume(pdev);
}
#endif

static const struct pci_device_id sil680[] = {
	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },

	{ },
};

static struct pci_driver sil680_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= sil680,
	.probe 		= sil680_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= ata_pci_device_suspend,
	.resume		= sil680_reinit_one,
#endif
};

static int __init sil680_init(void)
{
	return pci_register_driver(&sil680_pci_driver);
}

static void __exit sil680_exit(void)
{
	pci_unregister_driver(&sil680_pci_driver);
}
Exemple #24
0
/**
 * mei_txe_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_txe_hw *hw;
	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
	int err;

	/* enable pci dev */
	err = pcim_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions and mapping IO device memory for mei driver */
	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto end;
	}

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "No suitable DMA available.\n");
			goto end;
		}
	}

	/* allocates and initializes the mei dev structure */
	dev = mei_txe_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto end;
	}
	hw = to_txe_hw(dev);
	hw->mem_addr = pcim_iomap_table(pdev);

	pci_enable_msi(pdev);

	/* clear spurious interrupts */
	mei_clear_interrupts(dev);

	/* request and enable interrupt  */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_txe_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_txe_irq_quick_handler,
			mei_txe_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);
	if (err) {
		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
			pdev->irq);
		goto end;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto stop;

	pci_set_drvdata(pdev, dev);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_txe_set_pm_domain(dev);

	pm_runtime_put_noidle(&pdev->dev);

	return 0;

stop:
	mei_stop(dev);
release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static int printed_version;
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
#if defined(CONFIG_PATA_HPT3X3_DMA)
		/* Further debug needed */
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA2,
#endif
		.port_ops = &hpt3x3_port_ops
	};
	/* Register offsets of taskfiles in BAR4 area */
	static const u8 offset_cmd[2] = { 0x20, 0x28 };
	static const u8 offset_ctl[2] = { 0x36, 0x3E };
	const struct ata_port_info *ppi[] = { &info, NULL };
	struct ata_host *host;
	int i, rc;
	void __iomem *base;

	hpt3x3_init_chipset(pdev);

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	/* acquire resources and fill host */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* Everything is relative to BAR4 if we set up this way */
	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;
	host->iomap = pcim_iomap_table(pdev);
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	base = host->iomap[4];	/* Bus mastering base */

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->cmd_addr = base + offset_cmd[i];
		ioaddr->altstatus_addr =
		ioaddr->ctl_addr = base + offset_ctl[i];
		ioaddr->scr_addr = NULL;
		ata_sff_std_ports(ioaddr);
		ioaddr->bmdma_addr = base + 8 * i;

		ata_port_pbar_desc(ap, 4, -1, "ioport");
		ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
	}
	pci_set_master(pdev);
	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &hpt3x3_sht);
}
static int __devinit sil680_init_one(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	static int printed_version;
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;
	struct pci_dev *drac;

	/* DRAC only filter */

	drac = pci_get_device(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4,
			      NULL);

	if (drac == NULL) {
        /* There are two common devices on DRACs. See if we can *
	 * find the second one if couldn't find the first.      */
		printk(KERN_INFO "sil680: Trying SMIC device.\n");
		drac = pci_get_device(PCI_VENDOR_ID_DELL, 0x0014, NULL);
	}

	if (drac == NULL)
		return -ENODEV;
	
	if (drac->bus != pdev->bus)     /* Not the right SIL680 */
	{
		pci_dev_put(drac);
		return -ENODEV;
	}
	pci_dev_put(drac);

        /* Back to original code */

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_sff_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_sff_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &sil680_sht);

use_ioports:
	return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL);
}

#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	int try_mmio, rc;

	rc = ata_pci_device_do_resume(pdev);
	if (rc)
		return rc;
	sil680_init_chip(pdev, &try_mmio);
	ata_host_resume(host);
	return 0;
}
#endif

static const struct pci_device_id sil680[] = {
	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },

	{ },
};

static struct pci_driver sil680_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= sil680,
	.probe 		= sil680_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= ata_pci_device_suspend,
	.resume		= sil680_reinit_one,
#endif
};

static int __init sil680_init(void)
{
	return pci_register_driver(&sil680_pci_driver);
}

static void __exit sil680_exit(void)
{
	pci_unregister_driver(&sil680_pci_driver);
}

MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for SI680 PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil680);
MODULE_VERSION(DRV_VERSION);

module_init(sil680_init);
module_exit(sil680_exit);
Exemple #27
0
static int intel_gpio_probe(struct pci_dev *pdev,
			  const struct pci_device_id *id)
{
	void __iomem *base;
	struct intel_mid_gpio *priv;
	u32 gpio_base;
	u32 irq_base;
	int retval;
	struct intel_mid_gpio_ddata *ddata =
				(struct intel_mid_gpio_ddata *)id->driver_data;

	retval = pcim_enable_device(pdev);
	if (retval)
		return retval;

	retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
	if (retval) {
		dev_err(&pdev->dev, "I/O memory mapping error\n");
		return retval;
	}

	base = pcim_iomap_table(pdev)[1];

	irq_base = readl(base);
	gpio_base = readl(sizeof(u32) + base);

	/* release the IO mapping, since we already get the info from bar1 */
	pcim_iounmap_regions(pdev, 1 << 1);

	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "can't allocate chip data\n");
		return -ENOMEM;
	}

	priv->reg_base = pcim_iomap_table(pdev)[0];
	priv->chip.label = dev_name(&pdev->dev);
	priv->chip.parent = &pdev->dev;
	priv->chip.request = intel_gpio_request;
	priv->chip.direction_input = intel_gpio_direction_input;
	priv->chip.direction_output = intel_gpio_direction_output;
	priv->chip.get = intel_gpio_get;
	priv->chip.set = intel_gpio_set;
	priv->chip.base = gpio_base;
	priv->chip.ngpio = ddata->ngpio;
	priv->chip.can_sleep = false;
	priv->pdev = pdev;

	spin_lock_init(&priv->lock);

	pci_set_drvdata(pdev, priv);
	retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
	if (retval) {
		dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
		return retval;
	}

	retval = gpiochip_irqchip_add(&priv->chip,
				      &intel_mid_irqchip,
				      irq_base,
				      handle_simple_irq,
				      IRQ_TYPE_NONE);
	if (retval) {
		dev_err(&pdev->dev,
			"could not connect irqchip to gpiochip\n");
		return retval;
	}

	intel_mid_irq_init_hw(priv);

	gpiochip_set_chained_irqchip(&priv->chip,
				     &intel_mid_irqchip,
				     pdev->irq,
				     intel_mid_irq_handler);

	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_allow(&pdev->dev);

	return 0;
}
Exemple #28
0
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct xgbe_prv_data *pdata;
	struct device *dev = &pdev->dev;
	void __iomem * const *iomap_table;
	struct pci_dev *rdev;
	unsigned int ma_lo, ma_hi;
	unsigned int reg;
	int bar_mask;
	int ret;

	pdata = xgbe_alloc_pdata(dev);
	if (IS_ERR(pdata)) {
		ret = PTR_ERR(pdata);
		goto err_alloc;
	}

	pdata->pcidev = pdev;
	pci_set_drvdata(pdev, pdata);

	/* Get the version data */
	pdata->vdata = (struct xgbe_version_data *)id->driver_data;

	ret = pcim_enable_device(pdev);
	if (ret) {
		dev_err(dev, "pcim_enable_device failed\n");
		goto err_pci_enable;
	}

	/* Obtain the mmio areas for the device */
	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
	ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
	if (ret) {
		dev_err(dev, "pcim_iomap_regions failed\n");
		goto err_pci_enable;
	}

	iomap_table = pcim_iomap_table(pdev);
	if (!iomap_table) {
		dev_err(dev, "pcim_iomap_table failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}

	pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
	if (!pdata->xgmac_regs) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
	pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
		dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
		dev_dbg(dev, "xi2c_regs  = %p\n", pdata->xi2c_regs);
	}

	pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
	if (!pdata->xpcs_regs) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	/* Set the PCS indirect addressing definition registers */
	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
	if (rdev &&
	    (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
	} else {
		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
	}
	pci_dev_put(rdev);

	/* Configure the PCS indirect addressing support */
	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
	pdata->xpcs_window <<= 6;
	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xpcs window def  = %#010x\n",
			pdata->xpcs_window_def_reg);
		dev_dbg(dev, "xpcs window sel  = %#010x\n",
			pdata->xpcs_window_sel_reg);
		dev_dbg(dev, "xpcs window      = %#010x\n",
			pdata->xpcs_window);
		dev_dbg(dev, "xpcs window size = %#010x\n",
			pdata->xpcs_window_size);
		dev_dbg(dev, "xpcs window mask = %#010x\n",
			pdata->xpcs_window_mask);
	}

	pci_set_master(pdev);

	/* Enable all interrupts in the hardware */
	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);

	/* Retrieve the MAC address */
	ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
	ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
	pdata->mac_addr[0] = ma_lo & 0xff;
	pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
	pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
	pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
	pdata->mac_addr[4] = ma_hi & 0xff;
	pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
	if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
	    !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid mac address\n");
		ret = -EINVAL;
		goto err_pci_enable;
	}

	/* Clock settings */
	pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
	pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;

	/* Set the DMA coherency values */
	pdata->coherent = 1;
	pdata->arcr = XGBE_DMA_PCI_ARCR;
	pdata->awcr = XGBE_DMA_PCI_AWCR;
	pdata->awarcr = XGBE_DMA_PCI_AWARCR;

	/* Set the maximum channels and queues */
	reg = XP_IOREAD(pdata, XP_PROP_1);
	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
			pdata->tx_max_channel_count,
			pdata->tx_max_channel_count);
		dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
			pdata->tx_max_q_count, pdata->rx_max_q_count);
	}

	/* Set the hardware channel and queue counts */
	xgbe_set_counts(pdata);

	/* Set the maximum fifo amounts */
	reg = XP_IOREAD(pdata, XP_PROP_2);
	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
	pdata->tx_max_fifo_size *= 16384;
	pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
				      pdata->vdata->tx_max_fifo_size);
	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
	pdata->rx_max_fifo_size *= 16384;
	pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
				      pdata->vdata->rx_max_fifo_size);
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
			pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);

	/* Configure interrupt support */
	ret = xgbe_config_irqs(pdata);
	if (ret)
		goto err_pci_enable;

	/* Configure the netdev resource */
	ret = xgbe_config_netdev(pdata);
	if (ret)
		goto err_irq_vectors;

	netdev_notice(pdata->netdev, "net device enabled\n");

	return 0;

err_irq_vectors:
	pci_free_irq_vectors(pdata->pcidev);

err_pci_enable:
	xgbe_free_pdata(pdata);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Exemple #29
0
static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
{
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
	unsigned int mask = 0;
	int i, rc;

	/*
	 * do not map rombase
	 */
	rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;
	host->iomap = pcim_iomap_table(pdev);

#ifdef	ATP867X_DEBUG
	atp867x_check_res(pdev);

	for (i = 0; i < PCI_ROM_RESOURCE; i++)
		printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
			(unsigned long long)(host->iomap[i]));
#endif

	/*
	 * request, iomap BARs and init port addresses accordingly
	 */
	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i);
		ioaddr->ctl_addr = ioaddr->altstatus_addr
				 = ATP867X_IO_ALTSTATUS(ap, i);
		ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i);

		ata_sff_std_ports(ioaddr);
		rc = atp867x_set_priv(ap);
		if (rc)
			return rc;

#ifdef	ATP867X_DEBUG
		atp867x_check_ports(ap, i);
#endif
		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
			(unsigned long)ioaddr->cmd_addr,
			(unsigned long)ioaddr->ctl_addr);
		ata_port_desc(ap, "bmdma 0x%lx",
			(unsigned long)ioaddr->bmdma_addr);

		mask |= 1 << i;
	}

	if (!mask) {
		dev_printk(KERN_ERR, gdev, "no available native port\n");
		return -ENODEV;
	}

	atp867x_fixup(host);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	return rc;
}
Exemple #30
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ath_softc *sc;
	struct ieee80211_hw *hw;
	u8 csz;
	u32 val;
	int ret = 0;
	char hw_name[64];

	if (pcim_enable_device(pdev))
		return -EIO;

	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		pr_err("32-bit DMA not available\n");
		return ret;
	}

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		pr_err("32-bit DMA consistent DMA enable failed\n");
		return ret;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES / sizeof(u32);
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
	/*
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems. It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_read_config_dword(pdev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

	ret = pcim_iomap_regions(pdev, BIT(0), "ath9k");
	if (ret) {
		dev_err(&pdev->dev, "PCI memory region reserve error\n");
		return -ENODEV;
	}

	hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
	if (!hw) {
		dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
		return -ENOMEM;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	sc = hw->priv;
	sc->hw = hw;
	sc->dev = &pdev->dev;
	sc->mem = pcim_iomap_table(pdev)[0];
	sc->driver_data = id->driver_data;

	/* Will be cleared in ath9k_start() */
	set_bit(SC_OP_INVALID, &sc->sc_flags);

	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
	if (ret) {
		dev_err(&pdev->dev, "request_irq failed\n");
		goto err_irq;
	}

	sc->irq = pdev->irq;

	ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
	if (ret) {
		dev_err(&pdev->dev, "Failed to initialize device\n");
		goto err_init;
	}

	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
		   hw_name, (unsigned long)sc->mem, pdev->irq);

	return 0;

err_init:
	free_irq(sc->irq, sc);
err_irq:
	ieee80211_free_hw(hw);
	return ret;
}