Beispiel #1
0
static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
{
	const struct ata_port_info *ppi[] =
		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
	struct ata_host *host;
	int i, rc;

	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
		return -ENOMEM;
	}

	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
			   "PCI BARs (errno=%d)\n", rc);
		return rc;
	}
	host->iomap = pcim_iomap_table(pdev);

	for (i = 0; i < host->n_ports; i++)
		vt6421_init_addrs(host->ports[i]);

	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	return 0;
}
Beispiel #2
0
static int atp867x_init_one(struct pci_dev *pdev,
	const struct pci_device_id *id)
{
	static int printed_version;
	static const struct ata_port_info info_867x = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= ATA_PIO4,
		.udma_mask 	= ATA_UDMA6,
		.port_ops	= &atp867x_ops,
	};

	struct ata_host *host;
	const struct ata_port_info *ppi[] = { &info_867x, NULL };
	int rc;

	if (!printed_version++)
		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
		pdev->device);

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
	if (!host) {
		dev_printk(KERN_ERR, &pdev->dev,
			"failed to allocate ATA host\n");
		rc = -ENOMEM;
		goto err_out;
	}

	rc = atp867x_ata_pci_sff_init_host(host);
	if (rc) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to init host\n");
		goto err_out;
	}

	pci_set_master(pdev);

	rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				IRQF_SHARED, &atp867x_sht);
	if (rc)
		dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");

err_out:
	return rc;
}
Beispiel #3
0
static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	ata_print_version_once(&pdev->dev, DRV_VERSION);

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_sff_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_sff_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &sil680_sht);

use_ioports:
	return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
Beispiel #4
0
static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= ATA_PIO4,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void __iomem *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_io(pdev)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[0],
		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[1],
		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_bmdma_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		ata_port_desc(ap, "irq %d", irq[i]);
	}

	return ata_host_register(host, &cs5520_sht);
}
Beispiel #5
0
static int ahci_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ahci_platform_data *pdata = dev_get_platdata(dev);
	const struct of_device_id *of_id =
			of_match_device(ahci_of_match, &pdev->dev);
	const struct platform_device_id	*id_entry = of_id->data;
	const struct platform_device_id *id = platform_get_device_id(pdev);
	struct ata_port_info pi = ahci_port_info[id ? id->driver_data : \
		id_entry->driver_data];
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ahci_host_priv *hpriv;
	struct ata_host *host;
	struct resource *mem;
	int irq;
	int n_ports;
	int i;
	int rc;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "no mmio space\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "no irq\n");
		return -EINVAL;
	}

	if (pdata && pdata->ata_port_info)
		pi = *pdata->ata_port_info;

	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
	if (!hpriv) {
		dev_err(dev, "can't alloc ahci_host_priv\n");
		return -ENOMEM;
	}

	hpriv->flags |= (unsigned long)pi.private_data;

	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
	if (!hpriv->mmio) {
		dev_err(dev, "can't map %pR\n", mem);
		return -ENOMEM;
	}

	hpriv->clk = clk_get(dev, NULL);
	if (IS_ERR(hpriv->clk)) {
		dev_err(dev, "can't get clock\n");
	} else {
		rc = clk_prepare_enable(hpriv->clk);
		if (rc) {
			dev_err(dev, "clock prepare enable failed");
			goto free_clk;
		}
	}

	/*
	 * Some platforms might need to prepare for mmio region access,
	 * which could be done in the following init call. So, the mmio
	 * region shouldn't be accessed before init (if provided) has
	 * returned successfully.
	 */
	if (pdata && pdata->init) {
		rc = pdata->init(dev, hpriv->mmio);
		if (rc)
			goto disable_unprepare_clk;
	}

	ahci_save_initial_config(dev, hpriv,
		pdata ? pdata->force_port_map : 0,
		pdata ? pdata->mask_port_map  : 0);

	/* prepare host */
	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host) {
		rc = -ENOMEM;
		goto pdata_exit;
	}

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR", mem);
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	rc = ahci_reset_controller(host);
	if (rc)
		goto pdata_exit;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
			       &ahci_platform_sht);
	if (rc)
		goto pdata_exit;

	return 0;
pdata_exit:
	if (pdata && pdata->exit)
		pdata->exit(dev);
disable_unprepare_clk:
	if (!IS_ERR(hpriv->clk))
		clk_disable_unprepare(hpriv->clk);
free_clk:
	if (!IS_ERR(hpriv->clk))
		clk_put(hpriv->clk);
	return rc;
}
static int __init ahci_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ahci_platform_data *pdata = dev->platform_data;
	struct ata_port_info pi = {
		.flags		= AHCI_FLAG_COMMON,
		.pio_mask	= ATA_PIO4,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &ahci_ops,
	};
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ahci_host_priv *hpriv;
	struct ata_host *host;
	struct resource *mem;
	int irq;
	int n_ports;
	int i;
	int rc;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "no mmio space\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "no irq\n");
		return -EINVAL;
	}

	if (pdata && pdata->init) {
		rc = pdata->init(dev);
		if (rc)
			return rc;
	}

	if (pdata && pdata->ata_port_info)
		pi = *pdata->ata_port_info;

	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
	if (!hpriv) {
		rc = -ENOMEM;
		goto err0;
	}

	hpriv->flags |= (unsigned long)pi.private_data;

	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
	if (!hpriv->mmio) {
		dev_err(dev, "can't map %pR\n", mem);
		rc = -ENOMEM;
		goto err0;
	}

	ahci_save_initial_config(dev, hpriv,
		pdata ? pdata->force_port_map : 0,
		pdata ? pdata->mask_port_map  : 0);

	/* prepare host */
	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host) {
		rc = -ENOMEM;
		goto err0;
	}

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR", mem);
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set initial link pm policy */
		ap->pm_policy = NOT_AVAILABLE;

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	rc = ahci_reset_controller(host);
	if (rc)
		goto err0;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
			       &ahci_sht);
	if (rc)
		goto err0;

	return 0;
err0:
	if (pdata && pdata->exit)
		pdata->exit(dev);
	return rc;
}
static int __devinit sil680_init_one(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.sht = &sil680_sht,
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	static int printed_version;
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
				 &sil680_sht);

use_ioports:
	return ata_pci_init_one(pdev, ppi);
}

#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
	int try_mmio;

	sil680_init_chip(pdev, &try_mmio);
	return ata_pci_device_resume(pdev);
}
#endif

static const struct pci_device_id sil680[] = {
	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },

	{ },
};

static struct pci_driver sil680_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= sil680,
	.probe 		= sil680_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= ata_pci_device_suspend,
	.resume		= sil680_reinit_one,
#endif
};

static int __init sil680_init(void)
{
	return pci_register_driver(&sil680_pci_driver);
}

static void __exit sil680_exit(void)
{
	pci_unregister_driver(&sil680_pci_driver);
}
Beispiel #8
0
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= 0x1f,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void __iomem *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_printk(KERN_WARNING, &pdev->dev,
			   "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_bars(pdev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[0],
		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[1],
		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_sff_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		ata_port_desc(ap, "irq %d", irq[i]);
	}

	return ata_host_register(host, &cs5520_sht);
}

#ifdef CONFIG_PM
/**
 *	cs5520_reinit_one	-	device resume
 *	@pdev: PCI device
 *
 *	Do any reconfiguration work needed by a resume from RAM. We need
 *	to restore DMA mode support on BIOSen which disabled it
 */

static int cs5520_reinit_one(struct pci_dev *pdev)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	u8 pcicfg;
	int rc;

	rc = ata_pci_device_do_resume(pdev);
	if (rc)
		return rc;

	pci_read_config_byte(pdev, 0x60, &pcicfg);
	if ((pcicfg & 0x40) == 0)
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);

	ata_host_resume(host);
	return 0;
}

/**
 *	cs5520_pci_device_suspend	-	device suspend
 *	@pdev: PCI device
 *
 *	We have to cut and waste bits from the standard method because
 *	the 5520 is a bit odd and not just a pure ATA device. As a result
 *	we must not disable it. The needed code is short and this avoids
 *	chip specific mess in the core code.
 */

static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	int rc = 0;

	rc = ata_host_suspend(host, mesg);
	if (rc)
		return rc;

	pci_save_state(pdev);
	return 0;
}
#endif /* CONFIG_PM */

/* For now keep DMA off. We can set it for all but A rev CS5510 once the
   core ATA code can handle it */

static const struct pci_device_id pata_cs5520[] = {
	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },

	{ },
};

static struct pci_driver cs5520_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= pata_cs5520,
	.probe 		= cs5520_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= cs5520_pci_device_suspend,
	.resume		= cs5520_reinit_one,
#endif
};

static int __init cs5520_init(void)
{
	return pci_register_driver(&cs5520_pci_driver);
}

static void __exit cs5520_exit(void)
{
	pci_unregister_driver(&cs5520_pci_driver);
}
Beispiel #9
0
static int pata_ftide010_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *np = dev->of_node;
	struct ata_port_info pi = ftide010_port_info;
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ftide010 *ftide;
	struct resource *res;
	int irq;
	int ret;
	int i;

	ftide = devm_kzalloc(dev, sizeof(*ftide), GFP_KERNEL);
	if (!ftide)
		return -ENOMEM;
	ftide->dev = dev;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	ftide->base = devm_ioremap_resource(dev, res);
	if (IS_ERR(ftide->base))
		return PTR_ERR(ftide->base);

	ftide->pclk = devm_clk_get(dev, "PCLK");
	if (!IS_ERR(ftide->pclk)) {
		ret = clk_prepare_enable(ftide->pclk);
		if (ret) {
			dev_err(dev, "failed to enable PCLK\n");
			return ret;
		}
	}

	/* Some special Cortina Gemini init, if needed */
	if (of_device_is_compatible(np, "cortina,gemini-pata")) {
		/*
		 * We need to know which instance is probing (the
		 * Gemini has two instances of FTIDE010) and we do
		 * this simply by looking at the physical base
		 * address, which is 0x63400000 for ATA1, else we
		 * are ATA0. This will also set up the cable types.
		 */
		ret = pata_ftide010_gemini_init(ftide,
				&pi,
				(res->start == 0x63400000));
		if (ret)
			goto err_dis_clk;
	} else {
		/* Else assume we are connected using PATA40 */
		ftide->master_cbl = ATA_CBL_PATA40;
		ftide->slave_cbl = ATA_CBL_PATA40;
	}

	ftide->host = ata_host_alloc_pinfo(dev, ppi, 1);
	if (!ftide->host) {
		ret = -ENOMEM;
		goto err_dis_clk;
	}
	ftide->host->private_data = ftide;

	for (i = 0; i < ftide->host->n_ports; i++) {
		struct ata_port *ap = ftide->host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->bmdma_addr = ftide->base + FTIDE010_DMA_REG;
		ioaddr->cmd_addr = ftide->base + FTIDE010_CMD_DATA;
		ioaddr->ctl_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
		ioaddr->altstatus_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
		ata_sff_std_ports(ioaddr);
	}

	dev_info(dev, "device ID %08x, irq %d, reg %pR\n",
		 readl(ftide->base + FTIDE010_IDE_DEVICE_ID), irq, res);

	ret = ata_host_activate(ftide->host, irq, ata_bmdma_interrupt,
				0, &pata_ftide010_sht);
	if (ret)
		goto err_dis_clk;

	return 0;

err_dis_clk:
	if (!IS_ERR(ftide->pclk))
		clk_disable_unprepare(ftide->pclk);
	return ret;
}
Beispiel #10
0
static int __devinit sil680_init_one(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA6,
		.port_ops = &sil680_port_ops
	};
	static const struct ata_port_info info_slow = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = 0x1f,
		.mwdma_mask = 0x07,
		.udma_mask = ATA_UDMA5,
		.port_ops = &sil680_port_ops
	};
	const struct ata_port_info *ppi[] = { &info, NULL };
	static int printed_version;
	struct ata_host *host;
	void __iomem *mmio_base;
	int rc, try_mmio;
	struct pci_dev *drac;

	/* DRAC only filter */

	drac = pci_get_device(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4,
			      NULL);

	if (drac == NULL) {
        /* There are two common devices on DRACs. See if we can *
	 * find the second one if couldn't find the first.      */
		printk(KERN_INFO "sil680: Trying SMIC device.\n");
		drac = pci_get_device(PCI_VENDOR_ID_DELL, 0x0014, NULL);
	}

	if (drac == NULL)
		return -ENODEV;
	
	if (drac->bus != pdev->bus)     /* Not the right SIL680 */
	{
		pci_dev_put(drac);
		return -ENODEV;
	}
	pci_dev_put(drac);

        /* Back to original code */

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	switch (sil680_init_chip(pdev, &try_mmio)) {
		case 0:
			ppi[0] = &info_slow;
			break;
		case 0x30:
			return -ENODEV;
	}

	if (!try_mmio)
		goto use_ioports;

	/* Try to acquire MMIO resources and fallback to PIO if
	 * that fails
	 */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;
	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
	if (rc)
		goto use_ioports;

	/* Allocate host and set it up */
	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	host->iomap = pcim_iomap_table(pdev);

	/* Setup DMA masks */
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	pci_set_master(pdev);

	/* Get MMIO base and initialize port addresses */
	mmio_base = host->iomap[SIL680_MMIO_BAR];
	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
	ata_sff_std_ports(&host->ports[0]->ioaddr);
	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
	ata_sff_std_ports(&host->ports[1]->ioaddr);

	/* Register & activate */
	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &sil680_sht);

use_ioports:
	return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL);
}

#ifdef CONFIG_PM
static int sil680_reinit_one(struct pci_dev *pdev)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	int try_mmio, rc;

	rc = ata_pci_device_do_resume(pdev);
	if (rc)
		return rc;
	sil680_init_chip(pdev, &try_mmio);
	ata_host_resume(host);
	return 0;
}
#endif

static const struct pci_device_id sil680[] = {
	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },

	{ },
};

static struct pci_driver sil680_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= sil680,
	.probe 		= sil680_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= ata_pci_device_suspend,
	.resume		= sil680_reinit_one,
#endif
};

static int __init sil680_init(void)
{
	return pci_register_driver(&sil680_pci_driver);
}

static void __exit sil680_exit(void)
{
	pci_unregister_driver(&sil680_pci_driver);
}

MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for SI680 PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil680);
MODULE_VERSION(DRV_VERSION);

module_init(sil680_init);
module_exit(sil680_exit);
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static int printed_version;
	static const struct ata_port_info info = {
		.flags = ATA_FLAG_SLAVE_POSS,
		.pio_mask = ATA_PIO4,
#if defined(CONFIG_PATA_HPT3X3_DMA)
		/* Further debug needed */
		.mwdma_mask = ATA_MWDMA2,
		.udma_mask = ATA_UDMA2,
#endif
		.port_ops = &hpt3x3_port_ops
	};
	/* Register offsets of taskfiles in BAR4 area */
	static const u8 offset_cmd[2] = { 0x20, 0x28 };
	static const u8 offset_ctl[2] = { 0x36, 0x3E };
	const struct ata_port_info *ppi[] = { &info, NULL };
	struct ata_host *host;
	int i, rc;
	void __iomem *base;

	hpt3x3_init_chipset(pdev);

	if (!printed_version++)
		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;
	/* acquire resources and fill host */
	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* Everything is relative to BAR4 if we set up this way */
	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;
	host->iomap = pcim_iomap_table(pdev);
	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;
	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
	if (rc)
		return rc;

	base = host->iomap[4];	/* Bus mastering base */

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->cmd_addr = base + offset_cmd[i];
		ioaddr->altstatus_addr =
		ioaddr->ctl_addr = base + offset_ctl[i];
		ioaddr->scr_addr = NULL;
		ata_sff_std_ports(ioaddr);
		ioaddr->bmdma_addr = base + 8 * i;

		ata_port_pbar_desc(ap, 4, -1, "ioport");
		ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
	}
	pci_set_master(pdev);
	return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
				 IRQF_SHARED, &hpt3x3_sht);
}
static int sata_dwc_probe(struct platform_device *ofdev)
{
    struct sata_dwc_device *hsdev;
    u32 idr, versionr;
    char *ver = (char *)&versionr;
    u8 *base = NULL;
    int err = 0;
    int irq, rc;
    struct ata_host *host;
    struct ata_port_info pi = sata_dwc_port_info[0];
    const struct ata_port_info *ppi[] = { &pi, NULL };


    hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
    if (hsdev == NULL) {
        dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
        err = -ENOMEM;
        goto error;
    }


    base = of_iomap(ofdev->dev.of_node, 0);
    if (!base) {
        dev_err(&ofdev->dev, "ioremap failed for SATA register"
                " address\n");
        err = -ENODEV;
        goto error_kmalloc;
    }
    hsdev->reg_base = base;
    dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");


    hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);


    host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
    if (!host) {
        dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
        err = -ENOMEM;
        goto error_iomap;
    }

    host->private_data = hsdev;


    host->ports[0]->ioaddr.cmd_addr = base;
    host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
    host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
    sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);


    idr = in_le32(&hsdev->sata_dwc_regs->idr);
    versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
    dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
               idr, ver[0], ver[1], ver[2]);


    irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
    if (irq == NO_IRQ) {
        dev_err(&ofdev->dev, "no SATA DMA irq\n");
        err = -ENODEV;
        goto error_out;
    }


    host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
    if (!(host_pvt.sata_dma_regs)) {
        dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
                " address\n");
        err = -ENODEV;
        goto error_out;
    }


    host_pvt.dwc_dev = &ofdev->dev;


    dma_dwc_init(hsdev, irq);


    sata_dwc_enable_interrupts(hsdev);


    irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
    if (irq == NO_IRQ) {
        dev_err(&ofdev->dev, "no SATA DMA irq\n");
        err = -ENODEV;
        goto error_out;
    }

    rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);

    if (rc != 0)
        dev_err(&ofdev->dev, "failed to activate host");

    dev_set_drvdata(&ofdev->dev, host);
    return 0;

error_out:

    dma_dwc_exit(hsdev);

error_iomap:
    iounmap(base);
error_kmalloc:
    kfree(hsdev);
error:
    return err;
}
Beispiel #13
0
static int sata_dwc_probe(struct platform_device *ofdev)
{
	struct sata_dwc_device *hsdev;
	u32 idr, versionr;
	char *ver = (char *)&versionr;
	u8 *base = NULL;
	int err = 0;
	int irq, rc;
	struct ata_host *host;
	struct ata_port_info pi = sata_dwc_port_info[0];
	const struct ata_port_info *ppi[] = { &pi, NULL };

	/* Allocate DWC SATA device */
	hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
	if (hsdev == NULL) {
		dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
		err = -ENOMEM;
		goto error;
	}

	/* Ioremap SATA registers */
	base = of_iomap(ofdev->dev.of_node, 0);
	if (!base) {
		dev_err(&ofdev->dev, "ioremap failed for SATA register"
			" address\n");
		err = -ENODEV;
		goto error_kmalloc;
	}
	hsdev->reg_base = base;
	dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");

	/* Synopsys DWC SATA specific Registers */
	hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);

	/* Allocate and fill host */
	host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
	if (!host) {
		dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
		err = -ENOMEM;
		goto error_iomap;
	}

	host->private_data = hsdev;

	/* Setup port */
	host->ports[0]->ioaddr.cmd_addr = base;
	host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
	host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
	sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);

	/* Read the ID and Version Registers */
	idr = in_le32(&hsdev->sata_dwc_regs->idr);
	versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
	dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
		   idr, ver[0], ver[1], ver[2]);

	/* Get SATA DMA interrupt number */
	irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
	if (irq == NO_IRQ) {
		dev_err(&ofdev->dev, "no SATA DMA irq\n");
		err = -ENODEV;
		goto error_out;
	}

	/* Get physical
Beispiel #14
0
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= 0x1f,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_printk(KERN_WARNING, &pdev->dev,
			   "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_bars(pdev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
	iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
	iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
	iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_std_ports(ioaddr);

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_std_ports(ioaddr);

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		if (i == 0)
			host->irq = irq[0];
		else
			host->irq2 = irq[1];
	}

	return ata_host_register(host, &cs5520_sht);
}

/**
 *	cs5520_remove_one	-	device unload
 *	@pdev: PCI device being removed
 *
 *	Handle an unplug/unload event for a PCI device. Unload the
 *	PCI driver but do not use the default handler as we manage
 *	resources ourself and *MUST NOT* disable the device as it has
 *	other functions.
 */

static void __devexit cs5520_remove_one(struct pci_dev *pdev)
{
	struct device *dev = pci_dev_to_dev(pdev);
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
}

#ifdef CONFIG_PM
/**
 *	cs5520_reinit_one	-	device resume
 *	@pdev: PCI device
 *
 *	Do any reconfiguration work needed by a resume from RAM. We need
 *	to restore DMA mode support on BIOSen which disabled it
 */

static int cs5520_reinit_one(struct pci_dev *pdev)
{
	u8 pcicfg;
	pci_read_config_byte(pdev, 0x60, &pcicfg);
	if ((pcicfg & 0x40) == 0)
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	return ata_pci_device_resume(pdev);
}
Beispiel #15
0
/**
 * ahci_platform_init_host - Bring up an ahci-platform host
 * @pdev: platform device pointer for the host
 * @hpriv: ahci-host private data for the host
 * @pi_template: template for the ata_port_info to use
 * @host_flags: ahci host flags used in ahci_host_priv
 * @force_port_map: param passed to ahci_save_initial_config
 * @mask_port_map: param passed to ahci_save_initial_config
 *
 * This function does all the usual steps needed to bring up an
 * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
 * must be initialized / enabled before calling this.
 *
 * RETURNS:
 * 0 on success otherwise a negative error code
 */
int ahci_platform_init_host(struct platform_device *pdev,
			    struct ahci_host_priv *hpriv,
			    const struct ata_port_info *pi_template,
			    unsigned long host_flags,
			    unsigned int force_port_map,
			    unsigned int mask_port_map)
{
	struct device *dev = &pdev->dev;
	struct ata_port_info pi = *pi_template;
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ata_host *host;
	int i, irq, n_ports, rc;

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "no irq\n");
		return -EINVAL;
	}

	/* prepare host */
	pi.private_data = (void *)host_flags;
	hpriv->flags |= host_flags;

	ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);

	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host)
		return -ENOMEM;

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		dev_info(dev, "SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR",
			      platform_get_resource(pdev, IORESOURCE_MEM, 0));
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	rc = ahci_reset_controller(host);
	if (rc)
		return rc;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
				 &ahci_platform_sht);
}
static int __init exynos_sata_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ata_port_info pi = ahci_port_info;
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ahci_host_priv *hpriv;
	struct exynos_sata *sata;
	struct ata_host *host;
	struct resource *mem;
	int n_ports, i, ret;

	sata = devm_kzalloc(dev, sizeof(*sata), GFP_KERNEL);
	if (!sata) {
		dev_err(dev, "can't alloc sata\n");
		return -EINVAL;
	}

	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
	if (!hpriv) {
		dev_err(dev, "can't alloc ahci_host_priv\n");
		ret = -ENOMEM;
		goto err1;
	}

	hpriv->flags |= (unsigned long)pi.private_data;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "no mmio space\n");
		ret = -EINVAL;
		goto err2;
	}

	sata->irq = platform_get_irq(pdev, 0);
	if (sata->irq <= 0) {
		dev_err(dev, "no irq\n");
		ret = -EINVAL;
		goto err2;
	}

	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
	if (!hpriv->mmio) {
		dev_err(dev, "can't map %pR\n", mem);
		ret = -ENOMEM;
		goto err2;
	}

	exynos_sata_parse_dt(dev->of_node, sata);
	if (!sata->freq) {
		dev_err(dev, "can't determine sata frequency \n");
		ret = -ENOMEM;
		goto err2;
	}

	sata->sclk = devm_clk_get(dev, "sclk_sata");
	if (IS_ERR(sata->sclk)) {
		dev_err(dev, "failed to get sclk_sata\n");
		ret = PTR_ERR(sata->sclk);
		goto err3;
	}
	clk_enable(sata->sclk);

	clk_set_rate(sata->sclk, sata->freq * MHZ);

	sata->clk = devm_clk_get(dev, "sata");
	if (IS_ERR(sata->clk)) {
		dev_err(dev, "failed to get sata clock\n");
		ret = PTR_ERR(sata->clk);
		goto err4;
	}
	clk_enable(sata->clk);

	/*  Get a gen 3 PHY controller */

	sata->phy = sata_get_phy(SATA_PHY_GENERATION3);
	if (!sata->phy) {
		dev_err(dev, "failed to get sata phy\n");
		ret = -EPROBE_DEFER;
		goto err5;
	}

	/* Initialize the controller */

	ret = sata_init_phy(sata->phy);
	if (ret < 0) {
		dev_err(dev, "failed to initialize sata phy\n");
		goto err6;
	}

	ahci_save_initial_config(dev, hpriv, 0, 0);

	/* prepare host */
	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host) {
		ret = -ENOMEM;
		goto err7;
	}

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		pr_info(KERN_INFO
		       "ahci: SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR", mem);
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	ret = ahci_reset_controller(host);
	if (ret)
		goto err7;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	ret = ata_host_activate(host, sata->irq, ahci_interrupt, IRQF_SHARED,
				&ahci_platform_sht);
	if (ret)
		goto err7;

	platform_set_drvdata(pdev, sata);

	return 0;

 err7:
	sata_shutdown_phy(sata->phy);

 err6:
	sata_put_phy(sata->phy);

 err5:
	clk_disable(sata->clk);
	devm_clk_put(dev, sata->clk);

 err4:
	clk_disable(sata->sclk);
	devm_clk_put(dev, sata->sclk);

 err3:
	devm_iounmap(dev, hpriv->mmio);

 err2:
	devm_kfree(dev, hpriv);

 err1:
	devm_kfree(dev, sata);

	return ret;
}