static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id) { struct ata_host *host; struct ata_port *ap; void __iomem *cmd_addr, *ctl_addr; int irq = 0; irq_handler_t handler = NULL; if (pnp_port_valid(idev, 0) == 0) return -ENODEV; if (pnp_irq_valid(idev, 0)) { irq = pnp_irq(idev, 0); handler = ata_sff_interrupt; } /* allocate host */ host = ata_host_alloc(&idev->dev, 1); if (!host) return -ENOMEM; /* acquire resources and fill host */ cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8); if (!cmd_addr) return -ENOMEM; ap = host->ports[0]; ap->ops = &isapnp_noalt_port_ops; ap->pio_mask = ATA_PIO0; ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = cmd_addr; if (pnp_port_valid(idev, 1)) { ctl_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 1), 1); ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.ctl_addr = ctl_addr; ap->ops = &isapnp_port_ops; } ata_sff_std_ports(&ap->ioaddr); ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", (unsigned long long)pnp_port_start(idev, 0), (unsigned long long)pnp_port_start(idev, 1)); /* activate */ return ata_host_activate(host, irq, handler, 0, &isapnp_sht); }
static int kempld_probe(struct platform_device *pdev) { struct kempld_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; struct kempld_device_data *pld; struct resource *ioport; int ret; pld = devm_kzalloc(dev, sizeof(*pld), GFP_KERNEL); if (!pld) return -ENOMEM; ioport = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!ioport) return -EINVAL; pld->io_base = devm_ioport_map(dev, ioport->start, ioport->end - ioport->start); if (!pld->io_base) return -ENOMEM; pld->io_index = pld->io_base; pld->io_data = pld->io_base + 1; pld->pld_clock = pdata->pld_clock; pld->dev = dev; mutex_init(&pld->lock); platform_set_drvdata(pdev, pld); ret = kempld_detect_device(pld); if (ret) return ret; return 0; }
static int __init mod_init(void) { int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; u32 pmbase; struct amd768_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); if (ent) goto found; } /* Device not found. */ return -ENODEV; found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) return err; pmbase &= 0x0000FF00; if (pmbase == 0) return -EIO; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); return -EBUSY; } priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, PMBASE_SIZE); if (!priv->iobase) { pr_err(DRV_NAME "Cannot map ioport\n"); return -ENOMEM; } amd_rng.priv = (unsigned long)priv; priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); return devm_hwrng_register(&pdev->dev, &amd_rng); }
static int __devinit plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; void __iomem *base, *alt_base; ide_hwif_t *hwif; struct pata_platform_info *pdata; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; int ret = 0; int mmio = 0; hw_regs_t hw; pdata = pdev->dev.platform_data; /* get a pointer to the register memory */ res_base = platform_get_resource(pdev, IORESOURCE_IO, 0); res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1); if (!res_base || !res_alt) { res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_base || !res_alt) { ret = -ENOMEM; goto out; } mmio = 1; } res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { ret = -EINVAL; goto out; } if (mmio) { base = devm_ioremap(&pdev->dev, res_base->start, res_base->end - res_base->start + 1); alt_base = devm_ioremap(&pdev->dev, res_alt->start, res_alt->end - res_alt->start + 1); } else { base = devm_ioport_map(&pdev->dev, res_base->start, res_base->end - res_base->start + 1); alt_base = devm_ioport_map(&pdev->dev, res_alt->start, res_alt->end - res_alt->start + 1); } hwif = ide_find_port(); if (!hwif) { ret = -ENODEV; goto out; } memset(&hw, 0, sizeof(hw)); plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; ide_init_port_hw(hwif, &hw); if (mmio) { hwif->host_flags = IDE_HFLAG_MMIO; default_hwif_mmiops(hwif); } idx[0] = hwif->index; ide_device_add(idx, NULL); platform_set_drvdata(pdev, hwif); return 0; out: return ret; }
static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void __iomem *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = π if (pcicfg & 2) ppi[1] = π if ((pcicfg & 0x40) == 0) { dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_io(pdev)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[0], "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[1], "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_bmdma_interrupt, 0, DRV_NAME, host); if (rc) return rc; ata_port_desc(ap, "irq %d", irq[i]); } return ata_host_register(host, &cs5520_sht); }
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u8 pcicfg; void __iomem *iomap[5]; static struct ata_probe_ent probe[2]; int ports = 0; /* IDE port enable bits */ pci_read_config_byte(dev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; if ((pcicfg & 0x40) == 0) { printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n"); pci_write_config_byte(dev, 0x60, pcicfg | 0x40); } /* Perform set up for DMA */ if (pci_enable_device_bars(dev, 1<<2)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } pci_set_master(dev); if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports */ iomap[0] = devm_ioport_map(&dev->dev, 0x1F0, 8); iomap[1] = devm_ioport_map(&dev->dev, 0x3F6, 1); iomap[2] = devm_ioport_map(&dev->dev, 0x170, 8); iomap[3] = devm_ioport_map(&dev->dev, 0x376, 1); iomap[4] = pcim_iomap(dev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; /* We have to do our own plumbing as the PCI setup for this chipset is non-standard so we can't punt to the libata code */ INIT_LIST_HEAD(&probe[0].node); probe[0].dev = pci_dev_to_dev(dev); probe[0].port_ops = &cs5520_port_ops; probe[0].sht = &cs5520_sht; probe[0].pio_mask = 0x1F; probe[0].mwdma_mask = id->driver_data; probe[0].irq = 14; probe[0].irq_flags = 0; probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; probe[0].n_ports = 1; probe[0].port[0].cmd_addr = iomap[0]; probe[0].port[0].ctl_addr = iomap[1]; probe[0].port[0].altstatus_addr = iomap[1]; probe[0].port[0].bmdma_addr = iomap[4]; /* The secondary lurks at different addresses but is otherwise the same beastie */ probe[1] = probe[0]; INIT_LIST_HEAD(&probe[1].node); probe[1].irq = 15; probe[1].port[0].cmd_addr = iomap[2]; probe[1].port[0].ctl_addr = iomap[3]; probe[1].port[0].altstatus_addr = iomap[3]; probe[1].port[0].bmdma_addr = iomap[4] + 8; /* Let libata fill in the port details */ ata_std_ports(&probe[0].port[0]); ata_std_ports(&probe[1].port[0]); /* Now add the ports that are active */ if (pcicfg & 1) ports += ata_device_add(&probe[0]); if (pcicfg & 2) ports += ata_device_add(&probe[1]); if (ports) return 0; return -ENODEV; }
/** * __pata_platform_probe - attach a platform interface * @dev: device * @io_res: Resource representing I/O base * @ctl_res: Resource representing CTL base * @irq_res: Resource representing IRQ and its flags * @ioport_shift: I/O port shift * @__pio_mask: PIO mask * * Register a platform bus IDE interface. Such interfaces are PIO and we * assume do not support IRQ sharing. * * Platform devices are expected to contain at least 2 resources per port: * * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM) * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM) * * and optionally: * * - IRQ (IORESOURCE_IRQ) * * If the base resources are both mem types, the ioremap() is handled * here. For IORESOURCE_IO, it's assumed that there's no remapping * necessary. * * If no IRQ resource is present, PIO polling mode is used instead. */ int __devinit __pata_platform_probe(struct device *dev, struct resource *io_res, struct resource *ctl_res, struct resource *irq_res, unsigned int ioport_shift, int __pio_mask) { struct ata_host *host; struct ata_port *ap; unsigned int mmio; int irq = 0; int irq_flags = 0; /* * Check for MMIO */ mmio = (( io_res->flags == IORESOURCE_MEM) && (ctl_res->flags == IORESOURCE_MEM)); /* * And the IRQ */ if (irq_res && irq_res->start > 0) { irq = irq_res->start; irq_flags = irq_res->flags; } /* * Now that that's out of the way, wire up the port.. */ host = ata_host_alloc(dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->ops = &pata_platform_port_ops; ap->pio_mask = __pio_mask; ap->flags |= ATA_FLAG_SLAVE_POSS; /* * Use polling mode if there's no IRQ */ if (!irq) { ap->flags |= ATA_FLAG_PIO_POLLING; ata_port_desc(ap, "no IRQ, using PIO polling"); } /* * Handle the MMIO case */ if (mmio) { ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } else { ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { dev_err(dev, "failed to map IO/CTL base\n"); return -ENOMEM; } ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pata_platform_setup_port(&ap->ioaddr, ioport_shift); ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport", (unsigned long long)io_res->start, (unsigned long long)ctl_res->start); /* activate */ return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, irq_flags, &pata_platform_sht); }
/** * pata_platform_probe - attach a platform interface * @pdev: platform device * * Register a platform bus IDE interface. Such interfaces are PIO and we * assume do not support IRQ sharing. * * Platform devices are expected to contain at least 2 resources per port: * * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM) * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM) * * and optionally: * * - IRQ (IORESOURCE_IRQ) * * If the base resources are both mem types, the ioremap() is handled * here. For IORESOURCE_IO, it's assumed that there's no remapping * necessary. * * If no IRQ resource is present, PIO polling mode is used instead. */ static int __devinit pata_platform_probe(struct platform_device *pdev) { struct resource *io_res, *ctl_res; struct ata_host *host; struct ata_port *ap; struct pata_platform_info *pp_info; unsigned int mmio; int irq; /* * Simple resource validation .. */ if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * Get the I/O base first */ io_res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (io_res == NULL) { io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(io_res == NULL)) return -EINVAL; } /* * Then the CTL base */ ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1); if (ctl_res == NULL) { ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (unlikely(ctl_res == NULL)) return -EINVAL; } /* * Check for MMIO */ mmio = (( io_res->flags == IORESOURCE_MEM) && (ctl_res->flags == IORESOURCE_MEM)); /* * And the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) irq = 0; /* no irq */ /* * Now that that's out of the way, wire up the port.. */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->ops = &pata_platform_port_ops; ap->pio_mask = pio_mask; ap->flags |= ATA_FLAG_SLAVE_POSS; /* * Use polling mode if there's no IRQ */ if (!irq) { ap->flags |= ATA_FLAG_PIO_POLLING; ata_port_desc(ap, "no IRQ, using PIO polling"); } /* * Handle the MMIO case */ if (mmio) { ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } else { ap->ioaddr.cmd_addr = devm_ioport_map(&pdev->dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { dev_err(&pdev->dev, "failed to map IO/CTL base\n"); return -ENOMEM; } ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pp_info = pdev->dev.platform_data; pata_platform_setup_port(&ap->ioaddr, pp_info); ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport", (unsigned long long)io_res->start, (unsigned long long)ctl_res->start); /* activate */ return ata_host_activate(host, irq, irq ? ata_interrupt : NULL, pp_info ? pp_info->irq_flags : 0, &pata_platform_sht); }
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void __iomem *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = π if (pcicfg & 2) ppi[1] = π if ((pcicfg & 0x40) == 0) { dev_printk(KERN_WARNING, &pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_bars(pdev, 1<<2)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[0], "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[1], "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_sff_interrupt, 0, DRV_NAME, host); if (rc) return rc; ata_port_desc(ap, "irq %d", irq[i]); } return ata_host_register(host, &cs5520_sht); } #ifdef CONFIG_PM /** * cs5520_reinit_one - device resume * @pdev: PCI device * * Do any reconfiguration work needed by a resume from RAM. We need * to restore DMA mode support on BIOSen which disabled it */ static int cs5520_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); u8 pcicfg; int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; pci_read_config_byte(pdev, 0x60, &pcicfg); if ((pcicfg & 0x40) == 0) pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); ata_host_resume(host); return 0; } /** * cs5520_pci_device_suspend - device suspend * @pdev: PCI device * * We have to cut and waste bits from the standard method because * the 5520 is a bit odd and not just a pure ATA device. As a result * we must not disable it. The needed code is short and this avoids * chip specific mess in the core code. */ static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; pci_save_state(pdev); return 0; } #endif /* CONFIG_PM */ /* For now keep DMA off. We can set it for all but A rev CS5510 once the core ATA code can handle it */ static const struct pci_device_id pata_cs5520[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, { }, }; static struct pci_driver cs5520_pci_driver = { .name = DRV_NAME, .id_table = pata_cs5520, .probe = cs5520_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = cs5520_pci_device_suspend, .resume = cs5520_reinit_one, #endif }; static int __init cs5520_init(void) { return pci_register_driver(&cs5520_pci_driver); } static void __exit cs5520_exit(void) { pci_unregister_driver(&cs5520_pci_driver); }
static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev) { static int possible_irqs[] = { 5, 7, 9, 10, 11, -1 }; static int possible_dmas[] = { 1, 3, 0, -1 }; int err; int xirq = irq[dev]; int xdma = dma[dev]; struct snd_card *card; struct snd_ad1848 *chip; struct snd_opl3 *opl3; char __iomem *vport; char __iomem *vmss_port; card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0); if (!card) return -ENOMEM; if (xirq == SNDRV_AUTO_IRQ) { xirq = snd_legacy_find_free_irq(possible_irqs); if (xirq < 0) { snd_printk(KERN_ERR PFX "unable to find a free IRQ\n"); err = -EBUSY; goto err_exit; } } if (xdma == SNDRV_AUTO_DMA) { xdma = snd_legacy_find_free_dma(possible_dmas); if (xdma < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA\n"); err = -EBUSY; goto err_exit; } } if (!request_region(port[dev], 0x10, DRV_NAME)) { snd_printk(KERN_ERR PFX "I/O port region is already in use.\n"); err = -EBUSY; goto err_exit; } vport = devm_ioport_map(devptr, port[dev], 0x10); if (!vport) { snd_printk(KERN_ERR PFX "I/O port cannot be iomaped.\n"); err = -EBUSY; goto err_unmap1; } /* to make it marked as used */ if (!request_region(mss_port[dev], 4, DRV_NAME)) { snd_printk(KERN_ERR PFX "SC-6000 port I/O port region is already in use.\n"); err = -EBUSY; goto err_unmap1; } vmss_port = devm_ioport_map(devptr, mss_port[dev], 4); if (!vport) { snd_printk(KERN_ERR PFX "MSS port I/O cannot be iomaped.\n"); err = -EBUSY; goto err_unmap2; } snd_printd("Initializing BASE[0x%lx] IRQ[%d] DMA[%d] MIRQ[%d]\n", port[dev], xirq, xdma, mpu_irq[dev] == SNDRV_AUTO_IRQ ? 0 : mpu_irq[dev]); err = sc6000_init_board(vport, xirq, xdma, vmss_port, mpu_irq[dev]); if (err < 0) goto err_unmap2; err = snd_ad1848_create(card, mss_port[dev] + 4, xirq, xdma, AD1848_HW_DETECT, &chip); if (err < 0) goto err_unmap2; card->private_data = chip; err = snd_ad1848_pcm(chip, 0, NULL); if (err < 0) { snd_printk(KERN_ERR PFX "error creating new ad1848 PCM device\n"); goto err_unmap2; } err = snd_ad1848_mixer(chip); if (err < 0) { snd_printk(KERN_ERR PFX "error creating new ad1848 mixer\n"); goto err_unmap2; } err = snd_sc6000_mixer(chip); if (err < 0) { snd_printk(KERN_ERR PFX "the mixer rewrite failed\n"); goto err_unmap2; } if (snd_opl3_create(card, 0x388, 0x388 + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%x-0x%x ?\n", 0x388, 0x388 + 2); } else { err = snd_opl3_timer_new(opl3, 0, 1); if (err < 0) goto err_unmap2; err = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (err < 0) goto err_unmap2; } if (mpu_port[dev] != SNDRV_AUTO_PORT) { if (mpu_irq[dev] == SNDRV_AUTO_IRQ) mpu_irq[dev] = -1; if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[dev], 0, mpu_irq[dev], IRQF_DISABLED, NULL) < 0) snd_printk(KERN_ERR "no MPU-401 device at 0x%lx ?\n", mpu_port[dev]); } strcpy(card->driver, DRV_NAME); strcpy(card->shortname, "SC-6000"); sprintf(card->longname, "Gallant SC-6000 at 0x%lx, irq %d, dma %d", mss_port[dev], xirq, xdma); snd_card_set_dev(card, devptr); err = snd_card_register(card); if (err < 0) goto err_unmap2; dev_set_drvdata(devptr, card); return 0; err_unmap2: release_region(mss_port[dev], 4); err_unmap1: release_region(port[dev], 0x10); err_exit: snd_card_free(card); return err; }
static int plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; void __iomem *base, *alt_base; struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; struct ide_hw hw, *hws[] = { &hw }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; /* get a pointer to the register memory */ res_base = platform_get_resource(pdev, IORESOURCE_IO, 0); res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1); if (!res_base || !res_alt) { res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_base || !res_alt) { ret = -ENOMEM; goto out; } mmio = 1; } res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { ret = -EINVAL; goto out; } if (mmio) { printk(KERN_EMERG "base addr %p\n",res_base->start); base = devm_ioremap(&pdev->dev, res_base->start, resource_size(res_base)); printk(KERN_EMERG "ctrl addr %p\n",res_alt->start); alt_base = devm_ioremap(&pdev->dev, res_alt->start, resource_size(res_alt)); printk(KERN_EMERG "map base %p\n",base); printk(KERN_EMERG "map ctrl %p\n",alt_base); } else { base = devm_ioport_map(&pdev->dev, res_base->start, resource_size(res_base)); alt_base = devm_ioport_map(&pdev->dev, res_alt->start, resource_size(res_alt)); printk(KERN_EMERG "base addr %p\n",res_base->start); printk(KERN_EMERG "ctrl addr %p\n",res_alt->start); printk(KERN_EMERG "map base %p\n",base); printk(KERN_EMERG "map ctrl %p\n",alt_base); } memset(&hw, 0, sizeof(hw)); plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; #ifdef CONFIG_MACH_KS8695_VSOPENRISC d.irq_flags |= IRQF_SHARED; #else if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) d.irq_flags |= IRQF_SHARED; #endif if (mmio) d.host_flags |= IDE_HFLAG_MMIO; ret = ide_host_add(&d, hws, 1, &host); if (ret) goto out; platform_set_drvdata(pdev, host); return 0; out: return ret; }
static int tqmx86_probe(struct platform_device *pdev) { u8 board_id, rev, i2c_det, i2c_ien, io_ext_int_val; struct device *dev = &pdev->dev; u8 gpio_irq_cfg, readback; const char *board_name; void __iomem *io_base; int err; switch (gpio_irq) { case 0: gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_NONE; break; case 7: gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_7; break; case 9: gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_9; break; case 12: gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_12; break; default: pr_err("tqmx86: Invalid GPIO IRQ (%d)\n", gpio_irq); return -EINVAL; } io_base = devm_ioport_map(dev, TQMX86_IOBASE, TQMX86_IOSIZE); if (!io_base) return -ENOMEM; board_id = ioread8(io_base + TQMX86_REG_BOARD_ID); board_name = tqmx86_board_id_to_name(board_id); rev = ioread8(io_base + TQMX86_REG_BOARD_REV); dev_info(dev, "Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n", board_name, board_id, rev >> 4, rev & 0xf); i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT); i2c_ien = ioread8(io_base + TQMX86_REG_I2C_INT_EN); if (gpio_irq_cfg) { io_ext_int_val = gpio_irq_cfg << TQMX86_REG_IO_EXT_INT_GPIO_SHIFT; iowrite8(io_ext_int_val, io_base + TQMX86_REG_IO_EXT_INT); readback = ioread8(io_base + TQMX86_REG_IO_EXT_INT); if (readback != io_ext_int_val) { dev_warn(dev, "GPIO interrupts not supported.\n"); return -EINVAL; } /* Assumes the IRQ resource is first. */ tqmx_gpio_resources[0].start = gpio_irq; } ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id); if (i2c_det == TQMX86_REG_I2C_DETECT_SOFT) { err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tqmx86_i2c_soft_dev, ARRAY_SIZE(tqmx86_i2c_soft_dev), NULL, 0, NULL); if (err) return err; } return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, tqmx86_devs, ARRAY_SIZE(tqmx86_devs), NULL, 0, NULL); }
/** * pata_platform_probe - attach a platform interface * @pdev: platform device * * Register a platform bus IDE interface. Such interfaces are PIO and we * assume do not support IRQ sharing. * * Platform devices are expected to contain 3 resources per port: * * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM) * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM) * - IRQ (IORESOURCE_IRQ) * * If the base resources are both mem types, the ioremap() is handled * here. For IORESOURCE_IO, it's assumed that there's no remapping * necessary. */ static int __devinit pata_platform_probe(struct platform_device *pdev) { struct resource *io_res, *ctl_res; struct ata_host *host; struct ata_port *ap; struct pata_platform_info *pp_info; unsigned int mmio; /* * Simple resource validation .. */ if (unlikely(pdev->num_resources != 3)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * Get the I/O base first */ io_res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (io_res == NULL) { io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(io_res == NULL)) return -EINVAL; } /* * Then the CTL base */ ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1); if (ctl_res == NULL) { ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (unlikely(ctl_res == NULL)) return -EINVAL; } /* * Check for MMIO */ mmio = (( io_res->flags == IORESOURCE_MEM) && (ctl_res->flags == IORESOURCE_MEM)); /* * Now that that's out of the way, wire up the port.. */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->ops = &pata_platform_port_ops; ap->pio_mask = pio_mask; ap->flags |= ATA_FLAG_SLAVE_POSS; /* * Handle the MMIO case */ if (mmio) { ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } else { ap->ioaddr.cmd_addr = devm_ioport_map(&pdev->dev, io_res->start, io_res->end - io_res->start + 1); ap->ioaddr.ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start, ctl_res->end - ctl_res->start + 1); } if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { dev_err(&pdev->dev, "failed to map IO/CTL base\n"); return -ENOMEM; } ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; pp_info = (struct pata_platform_info *)(pdev->dev.platform_data); pata_platform_setup_port(&ap->ioaddr, pp_info); /* activate */ return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt, pp_info ? pp_info->irq_flags : 0, &pata_platform_sht); }
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = π if (pcicfg & 2) ppi[1] = π if ((pcicfg & 0x40) == 0) { dev_printk(KERN_WARNING, &pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_bars(pdev, 1<<2)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8); iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1); iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8); iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_std_ports(ioaddr); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_std_ports(ioaddr); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_interrupt, 0, DRV_NAME, host); if (rc) return rc; if (i == 0) host->irq = irq[0]; else host->irq2 = irq[1]; } return ata_host_register(host, &cs5520_sht); } /** * cs5520_remove_one - device unload * @pdev: PCI device being removed * * Handle an unplug/unload event for a PCI device. Unload the * PCI driver but do not use the default handler as we manage * resources ourself and *MUST NOT* disable the device as it has * other functions. */ static void __devexit cs5520_remove_one(struct pci_dev *pdev) { struct device *dev = pci_dev_to_dev(pdev); struct ata_host *host = dev_get_drvdata(dev); ata_host_detach(host); } #ifdef CONFIG_PM /** * cs5520_reinit_one - device resume * @pdev: PCI device * * Do any reconfiguration work needed by a resume from RAM. We need * to restore DMA mode support on BIOSen which disabled it */ static int cs5520_reinit_one(struct pci_dev *pdev) { u8 pcicfg; pci_read_config_byte(pdev, 0x60, &pcicfg); if ((pcicfg & 0x40) == 0) pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); return ata_pci_device_resume(pdev); }
static int __devinit plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; void __iomem *base, *alt_base; struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; struct ide_hw hw, *hws[] = { &hw }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; /* get a pointer to the register memory */ res_base = platform_get_resource(pdev, IORESOURCE_IO, 0); res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1); if (!res_base || !res_alt) { res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_base || !res_alt) { ret = -ENOMEM; goto out; } mmio = 1; } res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { ret = -EINVAL; goto out; } if (mmio) { base = devm_ioremap(&pdev->dev, res_base->start, resource_size(res_base)); alt_base = devm_ioremap(&pdev->dev, res_alt->start, resource_size(res_alt)); } else { base = devm_ioport_map(&pdev->dev, res_base->start, resource_size(res_base)); alt_base = devm_ioport_map(&pdev->dev, res_alt->start, resource_size(res_alt)); } memset(&hw, 0, sizeof(hw)); plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; d.irq_flags = 0; if (mmio) d.host_flags |= IDE_HFLAG_MMIO; ret = ide_host_add(&d, hws, 1, &host); if (ret) goto out; platform_set_drvdata(pdev, host); return 0; out: return ret; }