Beispiel #1
0
static int mgag200_device_init(struct drm_device *dev,
			       uint32_t flags)
{
	struct mga_device *mdev = dev->dev_private;
	int ret, option;

	mdev->type = flags;

	/* Hardcode the number of CRTCs to 1 */
	mdev->num_crtc = 1;

	pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
	mdev->has_sdram = !(option & (1 << 14));

	/* BAR 0 is the framebuffer, BAR 1 contains registers */
	mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
	mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);

	if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
				"mgadrmfb_mmio")) {
		DRM_ERROR("can't reserve mmio registers\n");
		return -ENOMEM;
	}

	mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
	if (mdev->rmmio == NULL)
		return -ENOMEM;

	/* stash G200 SE model number for later use */
	if (IS_G200_SE(mdev))
		mdev->unique_rev_id = RREG32(0x1e24);

	ret = mga_vram_init(mdev);
	if (ret)
		return ret;

	mdev->bpp_shifts[0] = 0;
	mdev->bpp_shifts[1] = 1;
	mdev->bpp_shifts[2] = 0;
	mdev->bpp_shifts[3] = 2;
	return 0;
}
Beispiel #2
0
static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
{
	void __iomem	*reg;
	u32		value;

	reg = pcim_iomap(pci, GP_RWBAR, 0);
	if (!reg)
		return -ENOMEM;

	value = readl(reg + GP_RWREG1);
	if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
		goto unmap; /* ULPI refclk already enabled */

	value &= ~GP_RWREG1_ULPI_REFCLK_DISABLE;
	writel(value, reg + GP_RWREG1);
	/* This comes from the Intel Android x86 tree w/o any explanation */
	msleep(100);
unmap:
	pcim_iounmap(pci, reg);
	return 0;
}
static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
{
	struct ata_probe_ent *probe_ent;
	struct ata_port_info *ppi[2];
	void __iomem *bar5;

	ppi[0] = ppi[1] = &vt6420_port_info;
	probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
	if (!probe_ent)
		return NULL;

	bar5 = pcim_iomap(pdev, 5, 0);
	if (!bar5) {
		dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
		return NULL;
	}

	probe_ent->port[0].scr_addr = svia_scr_addr(bar5, 0);
	probe_ent->port[1].scr_addr = svia_scr_addr(bar5, 1);

	return probe_ent;
}
Beispiel #4
0
static int hififo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    int i;
    int rc;
    dev_t dev = 0;
    char tmpstr[16];
    struct hififo_dev *drvdata;
    struct hififo_fifo * fifo;

    drvdata = devm_kzalloc(&pdev->dev,
                           sizeof(struct hififo_dev),
                           GFP_KERNEL);
    if (!drvdata) {
        printk(KERN_ERR DEVICE_NAME "failed to alloc drvdata\n");
        return -ENOMEM;
    }

    rc = pcim_enable_device(pdev);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": pcim_enable_device() failed\n");
        return rc;
    }

    rc = pci_request_regions(pdev, DEVICE_NAME);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_request_regions() failed\n");
        return rc;
    }

    printk(KERN_INFO DEVICE_NAME\
           ": Found Harmon Instruments PCI Express interface board\n");
    pci_set_drvdata(pdev, drvdata);

    pci_set_master(pdev); /* returns void */

    pci_set_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    rc = pci_enable_msi(pdev);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_enable_msi() failed\n");
        return rc;
    }

    rc = devm_request_irq(&pdev->dev,
                          pdev->irq,
                          (irq_handler_t) hififo_interrupt,
                          0, /* flags */
                          DEVICE_NAME,
                          drvdata);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": request_irq() failed\n");
        return rc;
    }

    drvdata->pio_reg_base = (u64 *) pcim_iomap(pdev, 0, 0);
    printk(KERN_INFO DEVICE_NAME					\
           ": pci_resource_start(dev, 0) = 0x%.8llx, virt = 0x%.16llx\n",
           (u64) pci_resource_start(pdev, 0),
           (u64) drvdata->pio_reg_base);

    for(i=0; i<8; i++)
        printk("bar0[%d] = %.8x\n", i, (u32) readreg(drvdata, i));
    drvdata->idreg = readreg(drvdata, REG_ID);
    drvdata->build = readreg(drvdata, REG_BUILD);
    printk(KERN_INFO DEVICE_NAME " FPGA build = 0x%.8X\n", drvdata->build);
    drvdata->nfifos = 0;
    for(i=0; i<MAX_FIFOS; i++) {
        if(drvdata->idreg & (1 << i))
            drvdata->nfifos ++;
    }

    if(drvdata->nfifos == 0) {
        printk(KERN_INFO DEVICE_NAME "no fifos reported on card\n");
        return -1;
    }

    /* reset it */
    writereg(drvdata, 0xFFFF, REG_RESET_SET);
    udelay(10); /* wait for completion of anything that was running */
    writereg(drvdata, 0xFFFF, REG_RESET_CLEAR);

    rc = alloc_chrdev_region(&dev, 0, drvdata->nfifos, DEVICE_NAME);
    if (rc) {
        printk(KERN_ERR DEVICE_NAME ": alloc_chrdev_region() failed\n");
        return rc;
    }

    drvdata->major = MAJOR(dev);

    for(i=0; i<MAX_FIFOS; i++) {
        if((drvdata->idreg & (1 << i)) == 0)
            continue; /* fifo not present */
        fifo = devm_kzalloc(&pdev->dev,
                            sizeof(struct hififo_fifo),
                            GFP_KERNEL);
        if (!fifo) {
            printk(KERN_ERR DEVICE_NAME\
                   "failed to alloc hififo_fifo\n");
            return -ENOMEM;
        }
        drvdata->fifo[i] = fifo;
        if(i<MAX_FIFOS/2) {
            cdev_init(&fifo->cdev, &fops_fpc); /* returns void */
            fifo->cdev.ops = &fops_fpc;
        }
        else {
            cdev_init(&fifo->cdev, &fops_tpc); /* returns void */
            fifo->cdev.ops = &fops_tpc;
        }

        fifo->cdev.owner = THIS_MODULE;

        rc = cdev_add (&fifo->cdev, MKDEV(MAJOR(dev), i), 1);
        if (rc) {
            printk(KERN_NOTICE DEVICE_NAME\
                   ": Error %d adding cdev\n", rc);
            return rc;
        }
        sprintf(tmpstr, "hififo_%d_%d", hififo_count, i);
        device_create(hififo_class,
                      NULL,
                      MKDEV(MAJOR(dev), i),
                      NULL,
                      tmpstr);
        fifo->n = i;
        spin_lock_init(&fifo->lock_open);
        fifo->local_base = drvdata->pio_reg_base+8+i;
        init_waitqueue_head(&fifo->queue);
        fifo->build = drvdata->build;
        mutex_init(&fifo->sem);
        fifo->ring = pci_alloc_consistent(pdev,
                                          BUFFER_SIZE,
                                          &fifo->ring_dma_addr);
    }
    hififo_count++;
    /* enable interrupts */
    writereg(drvdata, 0xFFFF, REG_INTERRUPT);
    return 0;
}
Beispiel #5
0
static int sst_platform_get_resources(struct intel_sst_drv *ctx)
{
	int ddr_base, ret = 0;
	struct pci_dev *pci = ctx->pci;

	ret = pci_request_regions(pci, SST_DRV_NAME);
	if (ret)
		return ret;

	/* map registers */
	/* DDR base */
	if (ctx->dev_id == SST_MRFLD_PCI_ID) {
		ctx->ddr_base = pci_resource_start(pci, 0);
		/* check that the relocated IMR base matches with FW Binary */
		ddr_base = relocate_imr_addr_mrfld(ctx->ddr_base);
		if (!ctx->pdata->lib_info) {
			dev_err(ctx->dev, "lib_info pointer NULL\n");
			ret = -EINVAL;
			goto do_release_regions;
		}
		if (ddr_base != ctx->pdata->lib_info->mod_base) {
			dev_err(ctx->dev,
					"FW LSP DDR BASE does not match with IFWI\n");
			ret = -EINVAL;
			goto do_release_regions;
		}
		ctx->ddr_end = pci_resource_end(pci, 0);

		ctx->ddr = pcim_iomap(pci, 0,
					pci_resource_len(pci, 0));
		if (!ctx->ddr) {
			ret = -EINVAL;
			goto do_release_regions;
		}
		dev_dbg(ctx->dev, "sst: DDR Ptr %p\n", ctx->ddr);
	} else {
		ctx->ddr = NULL;
	}
	/* SHIM */
	ctx->shim_phy_add = pci_resource_start(pci, 1);
	ctx->shim = pcim_iomap(pci, 1, pci_resource_len(pci, 1));
	if (!ctx->shim) {
		ret = -EINVAL;
		goto do_release_regions;
	}
	dev_dbg(ctx->dev, "SST Shim Ptr %p\n", ctx->shim);

	/* Shared SRAM */
	ctx->mailbox_add = pci_resource_start(pci, 2);
	ctx->mailbox = pcim_iomap(pci, 2, pci_resource_len(pci, 2));
	if (!ctx->mailbox) {
		ret = -EINVAL;
		goto do_release_regions;
	}
	dev_dbg(ctx->dev, "SRAM Ptr %p\n", ctx->mailbox);

	/* IRAM */
	ctx->iram_end = pci_resource_end(pci, 3);
	ctx->iram_base = pci_resource_start(pci, 3);
	ctx->iram = pcim_iomap(pci, 3, pci_resource_len(pci, 3));
	if (!ctx->iram) {
		ret = -EINVAL;
		goto do_release_regions;
	}
	dev_dbg(ctx->dev, "IRAM Ptr %p\n", ctx->iram);

	/* DRAM */
	ctx->dram_end = pci_resource_end(pci, 4);
	ctx->dram_base = pci_resource_start(pci, 4);
	ctx->dram = pcim_iomap(pci, 4, pci_resource_len(pci, 4));
	if (!ctx->dram) {
		ret = -EINVAL;
		goto do_release_regions;
	}
	dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
do_release_regions:
	pci_release_regions(pci);
	return 0;
}
Beispiel #6
0
static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= ATA_PIO4,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void __iomem *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_io(pdev)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[0],
		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[1],
		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_bmdma_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		ata_port_desc(ap, "irq %d", irq[i]);
	}

	return ata_host_register(host, &cs5520_sht);
}
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
	u8 pcicfg;
	void __iomem *iomap[5];
	static struct ata_probe_ent probe[2];
	int ports = 0;

	/* IDE port enable bits */
	pci_read_config_byte(dev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	if ((pcicfg & 0x40) == 0) {
		printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n");
		pci_write_config_byte(dev, 0x60, pcicfg | 0x40);
	}

	/* Perform set up for DMA */
	if (pci_enable_device_bars(dev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}
	pci_set_master(dev);
	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports */
	iomap[0] = devm_ioport_map(&dev->dev, 0x1F0, 8);
	iomap[1] = devm_ioport_map(&dev->dev, 0x3F6, 1);
	iomap[2] = devm_ioport_map(&dev->dev, 0x170, 8);
	iomap[3] = devm_ioport_map(&dev->dev, 0x376, 1);
	iomap[4] = pcim_iomap(dev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	/* We have to do our own plumbing as the PCI setup for this
	   chipset is non-standard so we can't punt to the libata code */

	INIT_LIST_HEAD(&probe[0].node);
	probe[0].dev = pci_dev_to_dev(dev);
	probe[0].port_ops = &cs5520_port_ops;
	probe[0].sht = &cs5520_sht;
	probe[0].pio_mask = 0x1F;
	probe[0].mwdma_mask = id->driver_data;
	probe[0].irq = 14;
	probe[0].irq_flags = 0;
	probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
	probe[0].n_ports = 1;
	probe[0].port[0].cmd_addr = iomap[0];
	probe[0].port[0].ctl_addr = iomap[1];
	probe[0].port[0].altstatus_addr = iomap[1];
	probe[0].port[0].bmdma_addr = iomap[4];

	/* The secondary lurks at different addresses but is otherwise
	   the same beastie */

	probe[1] = probe[0];
	INIT_LIST_HEAD(&probe[1].node);
	probe[1].irq = 15;
	probe[1].port[0].cmd_addr = iomap[2];
	probe[1].port[0].ctl_addr = iomap[3];
	probe[1].port[0].altstatus_addr = iomap[3];
	probe[1].port[0].bmdma_addr = iomap[4] + 8;

	/* Let libata fill in the port details */
	ata_std_ports(&probe[0].port[0]);
	ata_std_ports(&probe[1].port[0]);

	/* Now add the ports that are active */
	if (pcicfg & 1)
		ports += ata_device_add(&probe[0]);
	if (pcicfg & 2)
		ports += ata_device_add(&probe[1]);
	if (ports)
		return 0;
	return -ENODEV;
}
Beispiel #8
0
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= 0x1f,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void __iomem *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_printk(KERN_WARNING, &pdev->dev,
			   "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_bars(pdev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[0],
		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_sff_std_ports(ioaddr);

	ata_port_desc(host->ports[1],
		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_sff_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		ata_port_desc(ap, "irq %d", irq[i]);
	}

	return ata_host_register(host, &cs5520_sht);
}

#ifdef CONFIG_PM
/**
 *	cs5520_reinit_one	-	device resume
 *	@pdev: PCI device
 *
 *	Do any reconfiguration work needed by a resume from RAM. We need
 *	to restore DMA mode support on BIOSen which disabled it
 */

static int cs5520_reinit_one(struct pci_dev *pdev)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	u8 pcicfg;
	int rc;

	rc = ata_pci_device_do_resume(pdev);
	if (rc)
		return rc;

	pci_read_config_byte(pdev, 0x60, &pcicfg);
	if ((pcicfg & 0x40) == 0)
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);

	ata_host_resume(host);
	return 0;
}

/**
 *	cs5520_pci_device_suspend	-	device suspend
 *	@pdev: PCI device
 *
 *	We have to cut and waste bits from the standard method because
 *	the 5520 is a bit odd and not just a pure ATA device. As a result
 *	we must not disable it. The needed code is short and this avoids
 *	chip specific mess in the core code.
 */

static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
	struct ata_host *host = dev_get_drvdata(&pdev->dev);
	int rc = 0;

	rc = ata_host_suspend(host, mesg);
	if (rc)
		return rc;

	pci_save_state(pdev);
	return 0;
}
#endif /* CONFIG_PM */

/* For now keep DMA off. We can set it for all but A rev CS5510 once the
   core ATA code can handle it */

static const struct pci_device_id pata_cs5520[] = {
	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },

	{ },
};

static struct pci_driver cs5520_pci_driver = {
	.name 		= DRV_NAME,
	.id_table	= pata_cs5520,
	.probe 		= cs5520_init_one,
	.remove		= ata_pci_remove_one,
#ifdef CONFIG_PM
	.suspend	= cs5520_pci_device_suspend,
	.resume		= cs5520_reinit_one,
#endif
};

static int __init cs5520_init(void)
{
	return pci_register_driver(&cs5520_pci_driver);
}

static void __exit cs5520_exit(void)
{
	pci_unregister_driver(&cs5520_pci_driver);
}
Beispiel #9
0
static int thunder_mmc_probe(struct pci_dev *pdev,
			     const struct pci_device_id *id)
{
	struct device_node *node = pdev->dev.of_node;
	struct device *dev = &pdev->dev;
	struct device_node *child_node;
	struct cvm_mmc_host *host;
	int ret, i = 0;

	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	pci_set_drvdata(pdev, host);
	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret)
		return ret;

	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!host->base)
		return -EINVAL;

	/* On ThunderX these are identical */
	host->dma_base = host->base;

	host->reg_off = 0x2000;
	host->reg_off_dma = 0x160;

	host->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(host->clk))
		return PTR_ERR(host->clk);

	ret = clk_prepare_enable(host->clk);
	if (ret)
		return ret;
	host->sys_freq = clk_get_rate(host->clk);

	spin_lock_init(&host->irq_handler_lock);
	sema_init(&host->mmc_serializer, 1);

	host->dev = dev;
	host->acquire_bus = thunder_mmc_acquire_bus;
	host->release_bus = thunder_mmc_release_bus;
	host->int_enable = thunder_mmc_int_enable;

	host->use_sg = true;
	host->big_dma_addr = true;
	host->need_irq_handler_lock = true;
	host->last_slot = -1;

	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
	if (ret)
		goto error;

	/*
	 * Clear out any pending interrupts that may be left over from
	 * bootloader. Writing 1 to the bits clears them.
	 */
	writeq(127, host->base + MIO_EMM_INT_EN(host));
	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
	/* Clear DMA FIFO */
	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));

	ret = thunder_mmc_register_interrupts(host, pdev);
	if (ret)
		goto error;

	for_each_child_of_node(node, child_node) {
		/*
		 * mmc_of_parse and devm* require one device per slot.
		 * Create a dummy device per slot and set the node pointer to
		 * the slot. The easiest way to get this is using
		 * of_platform_device_create.
		 */
		if (of_device_is_compatible(child_node, "mmc-slot")) {
			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
								       &pdev->dev);
			if (!host->slot_pdev[i])
				continue;

			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
			if (ret)
				goto error;
		}
		i++;
	}
	dev_info(dev, "probed\n");
	return 0;

error:
	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
		if (host->slot[i])
			cvm_mmc_of_slot_remove(host->slot[i]);
		if (host->slot_pdev[i])
			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
	}
	clk_disable_unprepare(host->clk);
	return ret;
}
Beispiel #10
0
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct ata_port_info pi = {
		.flags		= ATA_FLAG_SLAVE_POSS,
		.pio_mask	= 0x1f,
		.port_ops	= &cs5520_port_ops,
	};
	const struct ata_port_info *ppi[2];
	u8 pcicfg;
	void *iomap[5];
	struct ata_host *host;
	struct ata_ioports *ioaddr;
	int i, rc;

	/* IDE port enable bits */
	pci_read_config_byte(pdev, 0x60, &pcicfg);

	/* Check if the ATA ports are enabled */
	if ((pcicfg & 3) == 0)
		return -ENODEV;

	ppi[0] = ppi[1] = &ata_dummy_port_info;
	if (pcicfg & 1)
		ppi[0] = &pi;
	if (pcicfg & 2)
		ppi[1] = &pi;

	if ((pcicfg & 0x40) == 0) {
		dev_printk(KERN_WARNING, &pdev->dev,
			   "DMA mode disabled. Enabling.\n");
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	}

	pi.mwdma_mask = id->driver_data;

	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
	if (!host)
		return -ENOMEM;

	/* Perform set up for DMA */
	if (pci_enable_device_bars(pdev, 1<<2)) {
		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
		return -ENODEV;
	}

	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
		return -ENODEV;
	}
	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
		return -ENODEV;
	}

	/* Map IO ports and initialize host accordingly */
	iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
	iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
	iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
	iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
	iomap[4] = pcim_iomap(pdev, 2, 0);

	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
		return -ENOMEM;

	ioaddr = &host->ports[0]->ioaddr;
	ioaddr->cmd_addr = iomap[0];
	ioaddr->ctl_addr = iomap[1];
	ioaddr->altstatus_addr = iomap[1];
	ioaddr->bmdma_addr = iomap[4];
	ata_std_ports(ioaddr);

	ioaddr = &host->ports[1]->ioaddr;
	ioaddr->cmd_addr = iomap[2];
	ioaddr->ctl_addr = iomap[3];
	ioaddr->altstatus_addr = iomap[3];
	ioaddr->bmdma_addr = iomap[4] + 8;
	ata_std_ports(ioaddr);

	/* activate the host */
	pci_set_master(pdev);
	rc = ata_host_start(host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		static const int irq[] = { 14, 15 };
		struct ata_port *ap = host->ports[i];

		if (ata_port_is_dummy(ap))
			continue;

		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
				      ata_interrupt, 0, DRV_NAME, host);
		if (rc)
			return rc;

		if (i == 0)
			host->irq = irq[0];
		else
			host->irq2 = irq[1];
	}

	return ata_host_register(host, &cs5520_sht);
}

/**
 *	cs5520_remove_one	-	device unload
 *	@pdev: PCI device being removed
 *
 *	Handle an unplug/unload event for a PCI device. Unload the
 *	PCI driver but do not use the default handler as we manage
 *	resources ourself and *MUST NOT* disable the device as it has
 *	other functions.
 */

static void __devexit cs5520_remove_one(struct pci_dev *pdev)
{
	struct device *dev = pci_dev_to_dev(pdev);
	struct ata_host *host = dev_get_drvdata(dev);

	ata_host_detach(host);
}

#ifdef CONFIG_PM
/**
 *	cs5520_reinit_one	-	device resume
 *	@pdev: PCI device
 *
 *	Do any reconfiguration work needed by a resume from RAM. We need
 *	to restore DMA mode support on BIOSen which disabled it
 */

static int cs5520_reinit_one(struct pci_dev *pdev)
{
	u8 pcicfg;
	pci_read_config_byte(pdev, 0x60, &pcicfg);
	if ((pcicfg & 0x40) == 0)
		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
	return ata_pci_device_resume(pdev);
}