static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info_82c700 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &optidma_port_ops }; static const struct ata_port_info info_82c700_udma = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &optiplus_port_ops }; const struct ata_port_info *ppi[] = { &info_82c700, NULL }; static int printed_version; int rc; if (!printed_version++) dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(dev); if (rc) return rc; /* Fixed location chipset magic */ inw(0x1F1); inw(0x1F1); pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */ if (optiplus_with_udma(dev)) ppi[0] = &info_82c700_udma; return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL); }
/** * find_source_device - search through device hierarchy for source device * @parent: pointer to Root Port pci_dev data structure * @e_info: including detailed error information such like id * * Return true if found. * * Invoked by DPC when error is detected at the Root Port. * Caller of this function must set id, severity, and multi_error_valid of * struct aer_err_info pointed by @e_info properly. This function must fill * e_info->error_dev_num and e_info->dev[], based on the given information. */ static bool find_source_device(struct pci_dev *parent, struct aer_err_info *e_info) { struct pci_dev *dev = parent; int result; /* Must reset in this function */ e_info->error_dev_num = 0; /* Is Root Port an agent that sends error message? */ result = find_device_iter(dev, e_info); if (result) return true; pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { dev_printk(KERN_DEBUG, &parent->dev, "can't find device of ID%04x\n", e_info->id); return false; } return true; }
void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, const char *dbgname) { struct resource *res; void __iomem *ptr; if (name) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); else res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ptr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ptr)) { dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, PTR_ERR(ptr)); return ptr; } if (reglog) dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", dbgname, ptr, (size_t)resource_size(res)); return ptr; }
static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &ns87415_pata_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; int rc; #if defined(CONFIG_SUPERIO) static const struct ata_port_info info87560 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &ns87560_pata_ops, }; if (PCI_SLOT(pdev->devfn) == 0x0E) ppi[0] = &info87560; #endif if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; /* Select 512 byte sectors */ pci_write_config_byte(pdev, 0x55, 0xEE); /* Select PIO0 8bit clocking */ pci_write_config_byte(pdev, 0x54, 0xB7); return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL); }
static void svia_configure(struct pci_dev *pdev, int board_id) { u8 tmp8; pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); /* make sure SATA channels are enabled */ pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); if ((tmp8 & ALL_PORTS) != ALL_PORTS) { dev_printk(KERN_DEBUG, &pdev->dev, "enabling SATA channels (0x%x)\n", (int) tmp8); tmp8 |= ALL_PORTS; pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); } /* make sure interrupts for each channel sent to us */ pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); if ((tmp8 & ALL_PORTS) != ALL_PORTS) { dev_printk(KERN_DEBUG, &pdev->dev, "enabling SATA channel interrupts (0x%x)\n", (int) tmp8); tmp8 |= ALL_PORTS; pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); } /* make sure native mode is enabled */ pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { dev_printk(KERN_DEBUG, &pdev->dev, "enabling SATA channel native mode (0x%x)\n", (int) tmp8); tmp8 |= NATIVE_MODE_ALL; pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } /* * vt6420/1 has problems talking to some drives. The following * is the fix from Joseph Chan <*****@*****.**>. * * When host issues HOLD, device may send up to 20DW of data * before acknowledging it with HOLDA and the host should be * able to buffer them in FIFO. Unfortunately, some WD drives * send upto 40DW before acknowledging HOLD and, in the * default configuration, this ends up overflowing vt6421's * FIFO, making the controller abort the transaction with * R_ERR. * * Rx52[2] is the internal 128DW FIFO Flow control watermark * adjusting mechanism enable bit and the default value 0 * means host will issue HOLD to device when the left FIFO * size goes below 32DW. Setting it to 1 makes the watermark * 64DW. * * https://bugzilla.kernel.org/show_bug.cgi?id=15173 * http://article.gmane.org/gmane.linux.ide/46352 * http://thread.gmane.org/gmane.linux.kernel/1062139 */ if (board_id == vt6420 || board_id == vt6421) { pci_read_config_byte(pdev, 0x52, &tmp8); tmp8 |= 1 << 2; pci_write_config_byte(pdev, 0x52, tmp8); } }
static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned char temp; if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) && (PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5)) srvrworks_csb5_delay = 1; /* On some motherboards, it was reported that accessing the SMBus caused severe hardware problems */ if (dmi_check_system(piix4_dmi_blacklist)) { dev_err(&PIIX4_dev->dev, "Accessing the SMBus on this system is unsafe!\n"); return -EPERM; } /* Don't access SMBus on IBM systems which get corrupted eeproms */ if (dmi_check_system(piix4_dmi_ibm) && PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) { dev_err(&PIIX4_dev->dev, "IBM system detected; this module " "may corrupt your serial eeprom! Refusing to load " "module!\n"); return -EPERM; } /* Determine the address of the SMBus areas */ if (force_addr) { piix4_smba = force_addr & 0xfff0; force = 0; } else { pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba); piix4_smba &= 0xfff0; if(piix4_smba == 0) { dev_err(&PIIX4_dev->dev, "SMBus base address " "uninitialized - upgrade BIOS or use " "force_addr=0xaddr\n"); return -ENODEV; } } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make sure, we disable the PIIX4 first. */ if (force_addr) { pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe); pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba); pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01); dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to " "new address %04x!\n", piix4_smba); } else if ((temp & 1) == 0) { if (force) { /* This should never need to be done, but has been * noted that many Dell machines have the SMBus * interface on the PIIX4 disabled!? NOTE: This assumes * I/O space and other allocations WERE done by the * Bios! Don't complain if your hardware does weird * things after enabling this. :') Check for Bios * updates before resorting to this. */ pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 1); dev_printk(KERN_NOTICE, &PIIX4_dev->dev, "WARNING: SMBus interface has been " "FORCEFULLY ENABLED!\n"); } else { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); release_region(piix4_smba, SMBIOSIZE); piix4_smba = 0; return -ENODEV; } } if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2)) dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n"); else if ((temp & 0x0E) == 0) dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n"); else dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration " "(or code out of date)!\n"); pci_read_config_byte(PIIX4_dev, SMBREV, &temp); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, temp); return 0; }
static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { struct domain_device *dev = task->dev; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev; struct pm8001_port *port = NULL; struct sas_task *t = task; struct pm8001_ccb_info *ccb; u32 tag = 0xdeadbeef, rc, n_elem = 0; u32 n = num; unsigned long flags = 0, flags_libsas = 0; if (!dev->port) { struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; if (dev->dev_type != SATA_DEV) t->task_done(t); return 0; } pm8001_ha = pm8001_find_ha_by_dev(task->dev); PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); spin_lock_irqsave(&pm8001_ha->lock, flags); do { dev = t->dev; pm8001_dev = dev->lldd_dev; if (DEV_IS_GONE(pm8001_dev)) { if (pm8001_dev) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("device %d not ready.\n", pm8001_dev->device_id)); } else { PM8001_IO_DBG(pm8001_ha, pm8001_printk("device %016llx not " "ready.\n", SAS_ADDR(dev->sas_addr))); } rc = SAS_PHY_DOWN; goto out_done; } port = &pm8001_ha->port[sas_find_local_port_id(dev)]; if (!port->port_attached) { if (sas_protocol_ata(t->task_proto)) { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; spin_unlock_irqrestore(&pm8001_ha->lock, flags); spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags_libsas); t->task_done(t); spin_lock_irqsave(dev->sata_dev.ap->lock, flags_libsas); spin_lock_irqsave(&pm8001_ha->lock, flags); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } else { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; t->task_done(t); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } } rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) goto err_out; ccb = &pm8001_ha->ccb_info[tag]; if (!sas_protocol_ata(t->task_proto)) { if (t->num_scatter) { n_elem = dma_map_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); if (!n_elem) { rc = -ENOMEM; goto err_out_tag; } } } else { n_elem = t->num_scatter; } t->lldd_task = ccb; ccb->n_elem = n_elem; ccb->ccb_tag = tag; ccb->task = t; switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = pm8001_task_prep_smp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SSP: if (is_tmf) rc = pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); else rc = pm8001_task_prep_ssp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = pm8001_task_prep_ata(pm8001_ha, ccb); break; default: dev_printk(KERN_ERR, pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", t->task_proto); rc = -EINVAL; break; } if (rc) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc is %x\n", rc)); goto err_out_tag; } /* TODO: select normal or high priority */ spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); pm8001_dev->running_req++; if (n > 1) t = list_entry(t->list.next, struct sas_task, list); } while (--n);
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) { dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n"); return 0; }
int asihpi_adapter_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { int idx, nm; int adapter_index; unsigned int memlen; struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; struct hpi_pci pci; memset(&adapter, 0, sizeof(adapter)); dev_printk(KERN_DEBUG, &pci_dev->dev, "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); if (pci_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; } pci_set_master(pci_dev); /* also sets latency timer if < 16 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER); hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER, HPI_ERROR_PROCESSING_MESSAGE); hm.adapter_index = HPI_ADAPTER_INDEX_INVALID; nm = HPI_MAX_ADAPTER_MEM_SPACES; for (idx = 0; idx < nm; idx++) { HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx, &pci_dev->resource[idx]); if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) { memlen = pci_resource_len(pci_dev, idx); pci.ap_mem_base[idx] = ioremap(pci_resource_start(pci_dev, idx), memlen); if (!pci.ap_mem_base[idx]) { HPI_DEBUG_LOG(ERROR, "ioremap failed, aborting\n"); /* unmap previously mapped pci mem space */ goto err; } } } pci.pci_dev = pci_dev; hm.u.s.resource.bus_type = HPI_BUS_PCI; hm.u.s.resource.r.pci = &pci; /* call CreateAdapterObject on the relevant hpi module */ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; adapter_index = hr.u.s.adapter_index; adapter.adapter = hpi_find_adapter(adapter_index); if (prealloc_stream_buf) { adapter.p_buffer = vmalloc(prealloc_stream_buf); if (!adapter.p_buffer) { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "kernel buffer size %d\n", prealloc_stream_buf); goto err; } } hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; /* WARNING can't init mutex in 'adapter' * and then copy it to adapters[] ?!?! */ adapters[adapter_index] = adapter; mutex_init(&adapters[adapter_index].mutex); pci_set_drvdata(pci_dev, &adapters[adapter_index]); dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n", adapter.adapter->type, adapter_index); return 0; err: for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; } } if (adapter.p_buffer) { adapter.buffer_size = 0; vfree(adapter.p_buffer); } HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n"); return -ENODEV; }
static int __devinit mpcore_wdt_probe(struct platform_device *dev) { struct mpcore_wdt *wdt; struct resource *res; int ret; /* We only accept one device, and it must have an id of -1 */ if (dev->id != -1) return -ENODEV; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto err_out; } wdt = kmalloc(sizeof(struct mpcore_wdt), GFP_KERNEL); if (!wdt) { ret = -ENOMEM; goto err_out; } memset(wdt, 0, sizeof(struct mpcore_wdt)); wdt->dev = &dev->dev; wdt->irq = platform_get_irq(dev, 0); if (wdt->irq < 0) { ret = -ENXIO; goto err_free; } wdt->base = ioremap(res->start, res->end - res->start + 1); if (!wdt->base) { ret = -ENOMEM; goto err_free; } mpcore_wdt_miscdev.dev = &dev->dev; ret = misc_register(&mpcore_wdt_miscdev); if (ret) { dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_misc; } ret = request_irq(wdt->irq, mpcore_wdt_fire, SA_INTERRUPT, "mpcore_wdt", wdt); if (ret) { dev_printk(KERN_ERR, _dev, "cannot register IRQ%d for watchdog\n", wdt->irq); goto err_irq; } mpcore_wdt_stop(wdt); platform_set_drvdata(&dev->dev, wdt); mpcore_wdt_dev = dev; return 0; err_irq: misc_deregister(&mpcore_wdt_miscdev); err_misc: iounmap(wdt->base); err_free: kfree(wdt); err_out: return ret; }
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void __iomem *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = π if (pcicfg & 2) ppi[1] = π if ((pcicfg & 0x40) == 0) { dev_printk(KERN_WARNING, &pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_bars(pdev, 1<<2)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[0], "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[1], "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_sff_interrupt, 0, DRV_NAME, host); if (rc) return rc; ata_port_desc(ap, "irq %d", irq[i]); } return ata_host_register(host, &cs5520_sht); } #ifdef CONFIG_PM /** * cs5520_reinit_one - device resume * @pdev: PCI device * * Do any reconfiguration work needed by a resume from RAM. We need * to restore DMA mode support on BIOSen which disabled it */ static int cs5520_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); u8 pcicfg; int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; pci_read_config_byte(pdev, 0x60, &pcicfg); if ((pcicfg & 0x40) == 0) pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); ata_host_resume(host); return 0; } /** * cs5520_pci_device_suspend - device suspend * @pdev: PCI device * * We have to cut and waste bits from the standard method because * the 5520 is a bit odd and not just a pure ATA device. As a result * we must not disable it. The needed code is short and this avoids * chip specific mess in the core code. */ static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; pci_save_state(pdev); return 0; } #endif /* CONFIG_PM */ /* For now keep DMA off. We can set it for all but A rev CS5510 once the core ATA code can handle it */ static const struct pci_device_id pata_cs5520[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, { }, }; static struct pci_driver cs5520_pci_driver = { .name = DRV_NAME, .id_table = pata_cs5520, .probe = cs5520_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = cs5520_pci_device_suspend, .resume = cs5520_reinit_one, #endif }; static int __init cs5520_init(void) { return pci_register_driver(&cs5520_pci_driver); } static void __exit cs5520_exit(void) { pci_unregister_driver(&cs5520_pci_driver); }
static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data) { struct pci_root_info *info = data; struct resource *res; struct acpi_resource_address64 addr; acpi_status status; unsigned long flags; u64 start, orig_end, end; status = resource_to_addr(acpi_res, &addr); if (!ACPI_SUCCESS(status)) return AE_OK; if (addr.resource_type == ACPI_MEMORY_RANGE) { flags = IORESOURCE_MEM; if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) flags |= IORESOURCE_PREFETCH; } else if (addr.resource_type == ACPI_IO_RANGE) { flags = IORESOURCE_IO; } else return AE_OK; start = addr.minimum + addr.translation_offset; orig_end = end = addr.maximum + addr.translation_offset; /* Exclude non-addressable range or non-addressable portion of range */ end = min(end, (u64)iomem_resource.end); if (end <= start) { dev_info(&info->bridge->dev, "host bridge window [%#llx-%#llx] " "(ignored, not CPU addressable)\n", start, orig_end); return AE_OK; } else if (orig_end != end) { dev_info(&info->bridge->dev, "host bridge window [%#llx-%#llx] " "([%#llx-%#llx] ignored, not CPU addressable)\n", start, orig_end, end + 1, orig_end); } res = &info->res[info->res_num]; res->name = info->name; res->flags = flags; res->start = start; res->end = end; res->child = NULL; if (!pci_use_crs) { dev_printk(KERN_DEBUG, &info->bridge->dev, "host bridge window %pR (ignored)\n", res); return AE_OK; } info->res_num++; if (addr.translation_offset) dev_info(&info->bridge->dev, "host bridge window %pR " "(PCI address [%#llx-%#llx])\n", res, res->start - addr.translation_offset, res->end - addr.translation_offset); else dev_info(&info->bridge->dev, "host bridge window %pR\n", res); return AE_OK; }
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static int printed_version; static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, #if defined(CONFIG_PATA_HPT3X3_DMA) /* Further debug needed */ .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, #endif .port_ops = &hpt3x3_port_ops }; /* Register offsets of taskfiles in BAR4 area */ static const u8 offset_cmd[2] = { 0x20, 0x28 }; static const u8 offset_ctl[2] = { 0x36, 0x3E }; const struct ata_port_info *ppi[] = { &info, NULL }; struct ata_host *host; int i, rc; void __iomem *base; hpt3x3_init_chipset(pdev); if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; /* Everything is relative to BAR4 if we set up this way */ rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; base = host->iomap[4]; /* Bus mastering base */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_ioports *ioaddr = &ap->ioaddr; ioaddr->cmd_addr = base + offset_cmd[i]; ioaddr->altstatus_addr = ioaddr->ctl_addr = base + offset_ctl[i]; ioaddr->scr_addr = NULL; ata_sff_std_ports(ioaddr); ioaddr->bmdma_addr = base + 8 * i; ata_port_pbar_desc(ap, 4, -1, "ioport"); ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); } pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ata_sff_interrupt, IRQF_SHARED, &hpt3x3_sht); }
int asihpi_adapter_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { int idx, nm, low_latency_mode = 0, irq_supported = 0; int adapter_index; unsigned int memlen; struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; struct hpi_pci pci; memset(&adapter, 0, sizeof(adapter)); dev_printk(KERN_DEBUG, &pci_dev->dev, "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); if (pci_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; } pci_set_master(pci_dev); /* also sets latency timer if < 16 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER); hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER, HPI_ERROR_PROCESSING_MESSAGE); hm.adapter_index = HPI_ADAPTER_INDEX_INVALID; nm = HPI_MAX_ADAPTER_MEM_SPACES; for (idx = 0; idx < nm; idx++) { HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx, &pci_dev->resource[idx]); if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) { memlen = pci_resource_len(pci_dev, idx); pci.ap_mem_base[idx] = ioremap(pci_resource_start(pci_dev, idx), memlen); if (!pci.ap_mem_base[idx]) { HPI_DEBUG_LOG(ERROR, "ioremap failed, aborting\n"); /* unmap previously mapped pci mem space */ goto err; } } } pci.pci_dev = pci_dev; hm.u.s.resource.bus_type = HPI_BUS_PCI; hm.u.s.resource.r.pci = &pci; /* call CreateAdapterObject on the relevant hpi module */ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; adapter_index = hr.u.s.adapter_index; adapter.adapter = hpi_find_adapter(adapter_index); if (prealloc_stream_buf) { adapter.p_buffer = vmalloc(prealloc_stream_buf); if (!adapter.p_buffer) { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "kernel buffer size %d\n", prealloc_stream_buf); goto err; } } hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) { HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_OPEN failed, aborting\n"); goto err; } /* Check if current mode == Low Latency mode */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_MODE); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (!hr.error && hr.u.ax.mode.adapter_mode == HPI_ADAPTER_MODE_LOW_LATENCY) low_latency_mode = 1; else dev_info(&pci_dev->dev, "Adapter at index %d is not in low latency mode\n", adapter.adapter->index); /* Check if IRQs are supported */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_PROPERTY); hm.adapter_index = adapter.adapter->index; hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error || !hr.u.ax.property_get.parameter1) { dev_info(&pci_dev->dev, "IRQs not supported by adapter at index %d\n", adapter.adapter->index); } else { irq_supported = 1; } /* WARNING can't init mutex in 'adapter' * and then copy it to adapters[] ?!?! */ adapters[adapter_index] = adapter; mutex_init(&adapters[adapter_index].mutex); pci_set_drvdata(pci_dev, &adapters[adapter_index]); if (low_latency_mode && irq_supported) { if (!adapter.adapter->irq_query_and_clear) { dev_err(&pci_dev->dev, "no IRQ handler for adapter %d, aborting\n", adapter.adapter->index); goto err; } /* Disable IRQ generation on DSP side by setting the rate to 0 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_SET_PROPERTY); hm.adapter_index = adapter.adapter->index; hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE; hm.u.ax.property_set.parameter1 = 0; hm.u.ax.property_set.parameter2 = 0; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) { HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_GET_MODE failed, aborting\n"); goto err; } /* Note: request_irq calls asihpi_isr here */ if (request_irq(pci_dev->irq, asihpi_isr, IRQF_SHARED, "asihpi", &adapters[adapter_index])) { dev_err(&pci_dev->dev, "request_irq(%d) failed\n", pci_dev->irq); goto err; } adapters[adapter_index].interrupt_mode = 1; dev_info(&pci_dev->dev, "using irq %d\n", pci_dev->irq); adapters[adapter_index].irq = pci_dev->irq; } else { dev_info(&pci_dev->dev, "using polled mode\n"); } dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n", adapter.adapter->type, adapter_index); return 0; err: for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; } } if (adapter.p_buffer) { adapter.buffer_size = 0; vfree(adapter.p_buffer); } HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n"); return -ENODEV; }
/** pcmcia_modify_configuration * * Modify a locked socket configuration */ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod) { struct pcmcia_socket *s; config_t *c; int ret; s = p_dev->socket; mutex_lock(&s->ops_mutex); c = p_dev->function_config; if (!(s->state & SOCKET_PRESENT)) { dev_dbg(&s->dev, "No card present\n"); ret = -ENODEV; goto unlock; } if (!(c->state & CONFIG_LOCKED)) { dev_dbg(&s->dev, "Configuration isnt't locked\n"); ret = -EACCES; goto unlock; } if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { dev_dbg(&s->dev, "changing Vcc or IRQ is not allowed at this time\n"); ret = -EINVAL; goto unlock; } /* We only allow changing Vpp1 and Vpp2 to the same value */ if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { if (mod->Vpp1 != mod->Vpp2) { dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); ret = -EINVAL; goto unlock; } s->socket.Vpp = mod->Vpp1; if (s->ops->set_socket(s, &s->socket)) { dev_printk(KERN_WARNING, &s->dev, "Unable to set VPP\n"); ret = -EIO; goto unlock; } } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); ret = -EINVAL; goto unlock; } if (mod->Attributes & CONF_IO_CHANGE_WIDTH) { pccard_io_map io_off = { 0, 0, 0, 0, 1 }; pccard_io_map io_on; int i; io_on.speed = io_speed; for (i = 0; i < MAX_IO_WIN; i++) { if (!s->io[i].res) continue; io_off.map = i; io_on.map = i; io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8; io_on.start = s->io[i].res->start; io_on.stop = s->io[i].res->end; s->ops->set_io_map(s, &io_off); mdelay(40); s->ops->set_io_map(s, &io_on); } } ret = 0; unlock: mutex_unlock(&s->ops_mutex); return ret; } /* modify_configuration */
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; struct ata_port_info pi = sis_port_info; const struct ata_port_info *ppi[] = { &pi, &pi }; struct ata_host *host; u32 genctl, val; u8 pmr; u8 port2_start = 0x20; int i, rc; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) pi.flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(pi.flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); pi.flags |= SIS_FLAG_CFGSCR; } pci_read_config_byte(pdev, SIS_PMR, &pmr); switch (ent->device) { case 0x0180: case 0x0181: /* The PATA-handling is provided by pata_sis */ switch (pmr & 0x30) { case 0x10: ppi[1] = &sis_info133_for_sata; break; case 0x30: ppi[0] = &sis_info133_for_sata; break; } if ((pmr & SIS_PMR_COMBINED) == 0) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181/964 chipset in SATA mode\n"); port2_start = 64; } else { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181 chipset in combined mode\n"); port2_start = 0; pi.flags |= ATA_FLAG_SLAVE_POSS; } break; case 0x0182: case 0x0183: pci_read_config_dword(pdev, 0x6C, &val); if (val & (1L << 31)) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n"); pi.flags |= ATA_FLAG_SLAVE_POSS; } else { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n"); } break; case 0x1182: dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/966/680 SATA controller\n"); pi.flags |= ATA_FLAG_SLAVE_POSS; break; case 0x1183: dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n"); ppi[0] = &sis_info133_for_sata; ppi[1] = &sis_info133_for_sata; break; } rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; for (i = 0; i < 2; i++) { struct ata_port *ap = host->ports[i]; if (ap->flags & ATA_FLAG_SATA && ap->flags & ATA_FLAG_SLAVE_POSS) { rc = ata_slave_link_init(ap); if (rc) return rc; } } if (!(pi.flags & SIS_FLAG_CFGSCR)) { void __iomem *mmio; rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME); if (rc) return rc; mmio = host->iomap[SIS_SCR_PCI_BAR]; host->ports[0]->ioaddr.scr_addr = mmio; host->ports[1]->ioaddr.scr_addr = mmio + port2_start; } pci_set_master(pdev); pci_intx(pdev, 1); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sis_sht); }
static int __devinit sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &sil680_port_ops }; static const struct ata_port_info info_slow = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &sil680_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; static int printed_version; struct ata_host *host; void __iomem *mmio_base; int rc, try_mmio; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; switch (sil680_init_chip(pdev, &try_mmio)) { case 0: ppi[0] = &info_slow; break; case 0x30: return -ENODEV; } if (!try_mmio) goto use_ioports; /* Try to acquire MMIO resources and fallback to PIO if * that fails */ rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); if (rc) goto use_ioports; /* Allocate host and set it up */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; host->iomap = pcim_iomap_table(pdev); /* Setup DMA masks */ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; pci_set_master(pdev); /* Get MMIO base and initialize port addresses */ mmio_base = host->iomap[SIL680_MMIO_BAR]; host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00; host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; ata_sff_std_ports(&host->ports[0]->ioaddr); host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; ata_sff_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); }
static void do_io_probe(struct pcmcia_socket *s, unsigned int base, unsigned int num) { struct resource *res; struct socket_data *s_data = s->resource_data; unsigned int i, j, bad; int any; u_char *b, hole, most; dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", base, base+num-1); /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { dev_printk(KERN_ERR, &s->dev, "do_io_probe: unable to kmalloc 256 bytes"); return; } for (i = base, most = 0; i < base+num; i += 8) { res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe"); if (!res) continue; hole = inb(i); for (j = 1; j < 8; j++) if (inb(i+j) != hole) break; free_region(res); if ((j == 8) && (++b[hole] > b[most])) most = hole; if (b[most] == 127) break; } kfree(b); bad = any = 0; for (i = base; i < base+num; i += 8) { res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe"); if (!res) continue; for (j = 0; j < 8; j++) if (inb(i+j) != most) break; free_region(res); if (j < 8) { if (!any) printk(" excluding"); if (!bad) bad = any = i; } else { if (bad) { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); bad = 0; } } } if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { printk(" nothing: probe failed.\n"); return; } else { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); } } printk(any ? "\n" : " clean.\n"); }
static int __devinit sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .sht = &sil680_sht, .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA6, .port_ops = &sil680_port_ops }; static const struct ata_port_info info_slow = { .sht = &sil680_sht, .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA5, .port_ops = &sil680_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; static int printed_version; struct ata_host *host; void __iomem *mmio_base; int rc, try_mmio; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); switch (sil680_init_chip(pdev, &try_mmio)) { case 0: ppi[0] = &info_slow; break; case 0x30: return -ENODEV; } if (!try_mmio) goto use_ioports; /* Try to acquire MMIO resources and fallback to PIO if * that fails */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); if (rc) goto use_ioports; /* Allocate host and set it up */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; host->iomap = pcim_iomap_table(pdev); /* Setup DMA masks */ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; pci_set_master(pdev); /* Get MMIO base and initialize port addresses */ mmio_base = host->iomap[SIL680_MMIO_BAR]; host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00; host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; ata_std_ports(&host->ports[0]->ioaddr); host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; ata_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: return ata_pci_init_one(pdev, ppi); } #ifdef CONFIG_PM static int sil680_reinit_one(struct pci_dev *pdev) { int try_mmio; sil680_init_chip(pdev, &try_mmio); return ata_pci_device_resume(pdev); } #endif static const struct pci_device_id sil680[] = { { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, { }, }; static struct pci_driver sil680_pci_driver = { .name = DRV_NAME, .id_table = sil680, .probe = sil680_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = sil680_reinit_one, #endif }; static int __init sil680_init(void) { return pci_register_driver(&sil680_pci_driver); } static void __exit sil680_exit(void) { pci_unregister_driver(&sil680_pci_driver); }
void cdv_intel_crt_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev) { struct gma_connector *gma_connector; struct gma_encoder *gma_encoder; struct drm_connector *connector; struct drm_encoder *encoder; u32 i2c_reg; gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); if (!gma_encoder) return; gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL); if (!gma_connector) goto failed_connector; connector = &gma_connector->base; connector->polled = DRM_CONNECTOR_POLL_HPD; drm_connector_init(dev, connector, &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); encoder = &gma_encoder->base; drm_encoder_init(dev, encoder, &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); /* Set up the DDC bus. */ i2c_reg = GPIOA; /* Remove the following code for CDV */ /* if (dev_priv->crt_ddc_bus != 0) i2c_reg = dev_priv->crt_ddc_bus; }*/ gma_encoder->ddc_bus = psb_intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); if (!gma_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } gma_encoder->type = INTEL_OUTPUT_ANALOG; /* psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); */ connector->interlace_allowed = 0; connector->doublescan_allowed = 0; drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs); drm_connector_helper_add(connector, &cdv_intel_crt_connector_helper_funcs); drm_connector_register(connector); return; failed_ddc: drm_encoder_cleanup(&gma_encoder->base); drm_connector_cleanup(&gma_connector->base); kfree(gma_connector); failed_connector: kfree(gma_encoder); return; }
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); struct iwl_bus *bus; struct iwl_pci_bus *pci_bus; u16 pci_cmd; int err; bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL); if (!bus) { dev_printk(KERN_ERR, &pdev->dev, "Couldn't allocate iwl_pci_bus"); err = -ENOMEM; goto out_no_pci; } pci_bus = IWL_BUS_GET_PCI_BUS(bus); pci_bus->pci_dev = pdev; /* W/A - seems to solve weird behavior. We need to remove this if we * don't want to stay in L1 all the time. This wastes a lot of power */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { dev_printk(KERN_ERR, bus->dev, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed"); goto out_pci_disable_device; } pci_bus->hw_base = pci_iomap(pdev, 0, 0); if (!pci_bus->hw_base) { dev_printk(KERN_ERR, bus->dev, "pci_iomap failed"); err = -ENODEV; goto out_pci_release_regions; } dev_printk(KERN_INFO, &pdev->dev, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); dev_printk(KERN_INFO, &pdev->dev, "pci_resource_base = %p\n", pci_bus->hw_base); dev_printk(KERN_INFO, &pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); err = pci_enable_msi(pdev); if (err) { dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); goto out_iounmap; } /* TODO: Move this away, not needed if not MSI */ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; err = iwl_probe(bus, cfg); if (err) goto out_disable_msi; return 0; out_disable_msi: pci_disable_msi(pdev); out_iounmap: pci_iounmap(pdev, pci_bus->hw_base); out_pci_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(bus); return err; }
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static int printed_version; static struct ata_port_info info_6210 = { .sht = &artop_sht, .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = ATA_UDMA2, .port_ops = &artop6210_ops, }; static struct ata_port_info info_626x = { .sht = &artop_sht, .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = ATA_UDMA4, .port_ops = &artop6260_ops, }; static struct ata_port_info info_626x_fast = { .sht = &artop_sht, .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = ATA_UDMA5, .port_ops = &artop6260_ops, }; struct ata_port_info *port_info[2]; struct ata_port_info *info = NULL; int ports = 2; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); if (id->driver_data == 0) { /* 6210 variant */ info = &info_6210; /* BIOS may have left us in UDMA, clear it before libata probe */ pci_write_config_byte(pdev, 0x54, 0); /* For the moment (also lacks dsc) */ printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n"); printk(KERN_WARNING "Secondary ATA ports will not be activated.\n"); ports = 1; } else if (id->driver_data == 1) /* 6260 */ info = &info_626x; else if (id->driver_data == 2) { /* 6260 or 6260 + fast */ unsigned long io = pci_resource_start(pdev, 4); u8 reg; info = &info_626x; if (inb(io) & 0x10) info = &info_626x_fast; /* Mac systems come up with some registers not set as we will need them */ /* Clear reset & test bits */ pci_read_config_byte(pdev, 0x49, ®); pci_write_config_byte(pdev, 0x49, reg & ~ 0x30); /* PCI latency must be > 0x80 for burst mode, tweak it * if required. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®); if (reg <= 0x80) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); /* Enable IRQ output and burst mode */ pci_read_config_byte(pdev, 0x4a, ®); pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); } BUG_ON(info == NULL); port_info[0] = port_info[1] = info; return ata_pci_init_one(pdev, port_info, ports); } static const struct pci_device_id artop_pci_tbl[] = { { PCI_VDEVICE(ARTOP, 0x0005), 0 }, { PCI_VDEVICE(ARTOP, 0x0006), 1 }, { PCI_VDEVICE(ARTOP, 0x0007), 1 }, { PCI_VDEVICE(ARTOP, 0x0008), 2 }, { PCI_VDEVICE(ARTOP, 0x0009), 2 }, { } /* terminate list */ }; static struct pci_driver artop_pci_driver = { .name = DRV_NAME, .id_table = artop_pci_tbl, .probe = artop_init_one, .remove = ata_pci_remove_one, }; static int __init artop_init(void) { return pci_register_driver(&artop_pci_driver); } static void __exit artop_exit(void) { pci_unregister_driver(&artop_pci_driver); } module_init(artop_init); module_exit(artop_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, artop_pci_tbl); MODULE_VERSION(DRV_VERSION);
/** * default_reset_link - default reset function * @dev: pointer to pci_dev data structure * * Invoked when performing link reset on a Downstream Port or a * Root Port with no aer driver. */ static pci_ers_result_t default_reset_link(struct pci_dev *dev) { pci_reset_bridge_secondary_bus(dev); dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n"); return PCI_ERS_RESULT_RECOVERED; }
static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const unsigned int cmd_port[] = { 0x1F0, 0x170 }; static const unsigned int ctl_port[] = { 0x3F6, 0x376 }; struct ata_port_info pi = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5520_port_ops, }; const struct ata_port_info *ppi[2]; u8 pcicfg; void __iomem *iomap[5]; struct ata_host *host; struct ata_ioports *ioaddr; int i, rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* IDE port enable bits */ pci_read_config_byte(pdev, 0x60, &pcicfg); /* Check if the ATA ports are enabled */ if ((pcicfg & 3) == 0) return -ENODEV; ppi[0] = ppi[1] = &ata_dummy_port_info; if (pcicfg & 1) ppi[0] = π if (pcicfg & 2) ppi[1] = π if ((pcicfg & 0x40) == 0) { dev_printk(KERN_WARNING, &pdev->dev, "DMA mode disabled. Enabling.\n"); pci_write_config_byte(pdev, 0x60, pcicfg | 0x40); } pi.mwdma_mask = id->driver_data; host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; /* Perform set up for DMA */ if (pci_enable_device_io(pdev)) { printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n"); return -ENODEV; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); return -ENODEV; } if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); return -ENODEV; } /* Map IO ports and initialize host accordingly */ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8); iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1); iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8); iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1); iomap[4] = pcim_iomap(pdev, 2, 0); if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4]) return -ENOMEM; ioaddr = &host->ports[0]->ioaddr; ioaddr->cmd_addr = iomap[0]; ioaddr->ctl_addr = iomap[1]; ioaddr->altstatus_addr = iomap[1]; ioaddr->bmdma_addr = iomap[4]; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[0], "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]); ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma"); ioaddr = &host->ports[1]->ioaddr; ioaddr->cmd_addr = iomap[2]; ioaddr->ctl_addr = iomap[3]; ioaddr->altstatus_addr = iomap[3]; ioaddr->bmdma_addr = iomap[4] + 8; ata_sff_std_ports(ioaddr); ata_port_desc(host->ports[1], "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]); ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma"); /* activate the host */ pci_set_master(pdev); rc = ata_host_start(host); if (rc) return rc; for (i = 0; i < 2; i++) { static const int irq[] = { 14, 15 }; struct ata_port *ap = host->ports[i]; if (ata_port_is_dummy(ap)) continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], ata_sff_interrupt, 0, DRV_NAME, host); if (rc) return rc; ata_port_desc(ap, "irq %d", irq[i]); } return ata_host_register(host, &cs5520_sht); }
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { struct acpi_device *device = root->device; struct pci_root_info *info = NULL; int domain = root->segment; int busnum = root->secondary.start; LIST_HEAD(resources); struct pci_bus *bus = NULL; struct pci_sysdata *sd; int node; #ifdef CONFIG_ACPI_NUMA int pxm; #endif if (pci_ignore_seg) domain = 0; if (domain && !pci_domains_supported) { printk(KERN_WARNING "pci_bus %04x:%02x: " "ignored (multiple domains not supported)\n", domain, busnum); return NULL; } node = -1; #ifdef CONFIG_ACPI_NUMA pxm = acpi_get_pxm(device->handle); if (pxm >= 0) node = pxm_to_node(pxm); if (node != -1) set_mp_bus_to_node(busnum, node); else #endif node = get_mp_bus_to_node(busnum); if (node != -1 && !node_online(node)) node = -1; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { printk(KERN_WARNING "pci_bus %04x:%02x: " "ignored (out of memory)\n", domain, busnum); return NULL; } sd = &info->sd; sd->domain = domain; sd->node = node; sd->companion = device; /* * Maybe the desired pci bus has been already scanned. In such case * it is unnecessary to scan the pci bus with the given domain,busnum. */ bus = pci_find_bus(domain, busnum); if (bus) { /* * If the desired bus exits, the content of bus->sysdata will * be replaced by sd. */ memcpy(bus->sysdata, sd, sizeof(*sd)); kfree(info); } else { probe_pci_root_info(info, device, busnum, domain); /* insert busn res at first */ pci_add_resource(&resources, &root->secondary); /* * _CRS with no apertures is normal, so only fall back to * defaults or native bridge info if we're ignoring _CRS. */ if (pci_use_crs) add_resources(info, &resources); else { free_pci_root_info_res(info); x86_pci_root_bus_resources(busnum, &resources); } if (!setup_mcfg_map(info, domain, (u8)root->secondary.start, (u8)root->secondary.end, root->mcfg_addr)) bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd, &resources); if (bus) { pci_scan_child_bus(bus); pci_set_host_bridge_release( to_pci_host_bridge(bus->bridge), release_pci_root_info, info); } else { pci_free_resource_list(&resources); __release_pci_root_info(info); } } /* After the PCI-E bus has been walked and all devices discovered, * configure any settings of the fabric that might be necessary. */ if (bus) { struct pci_bus *child; list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } if (bus && node != -1) { #ifdef CONFIG_ACPI_NUMA if (pxm >= 0) dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d (pxm %d)\n", node, pxm); #else dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); #endif } return bus; }
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; struct ata_probe_ent *probe_ent = NULL; int rc; u32 genctl, val; struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi }; int pci_dev_busy = 0; u8 pmr; u8 port2_start; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) { pci_dev_busy = 1; goto err_out; } rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) pi.flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(pi.flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); pi.flags |= SIS_FLAG_CFGSCR; } pci_read_config_byte(pdev, SIS_PMR, &pmr); if (ent->device != 0x182) { if ((pmr & SIS_PMR_COMBINED) == 0) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181/964 chipset in SATA mode\n"); port2_start = 64; } else { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181 chipset in combined mode\n"); port2_start=0; pi.flags |= ATA_FLAG_SLAVE_POSS; } } else { pci_read_config_dword ( pdev, 0x6C, &val); if (val & (1L << 31)) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n"); pi.flags |= ATA_FLAG_SLAVE_POSS; } else dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n"); port2_start = 0x20; } probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; } if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) { probe_ent->port[0].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR); probe_ent->port[1].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start; } pci_set_master(pdev); pci_intx(pdev, 1); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_regions: pci_release_regions(pdev); err_out: if (!pci_dev_busy) pci_disable_device(pdev); return rc; }
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static int printed_version; static const struct ata_port_info info_6210 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &artop6210_ops, }; static const struct ata_port_info info_626x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &artop6260_ops, }; static const struct ata_port_info info_628x_fast = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &artop6260_ops, }; const struct ata_port_info *ppi[] = { NULL, NULL }; int rc; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; if (id->driver_data == 0) { /* 6210 variant */ ppi[0] = &info_6210; /* BIOS may have left us in UDMA, clear it before libata probe */ pci_write_config_byte(pdev, 0x54, 0); } else if (id->driver_data == 1) /* 6260 */ ppi[0] = &info_626x; else if (id->driver_data == 2) { /* 6280 or 6280 + fast */ unsigned long io = pci_resource_start(pdev, 4); u8 reg; ppi[0] = &info_628x; if (inb(io) & 0x10) ppi[0] = &info_628x_fast; /* Mac systems come up with some registers not set as we will need them */ /* Clear reset & test bits */ pci_read_config_byte(pdev, 0x49, ®); pci_write_config_byte(pdev, 0x49, reg & ~ 0x30); /* PCI latency must be > 0x80 for burst mode, tweak it * if required. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®); if (reg <= 0x80) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); /* Enable IRQ output and burst mode */ pci_read_config_byte(pdev, 0x4a, ®); pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); } BUG_ON(ppi[0] == NULL); return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); }
static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data) { struct pci_root_info *info = data; struct resource *res; struct acpi_resource_address64 addr; acpi_status status; unsigned long flags; struct resource *root, *conflict; u64 start, end; status = resource_to_addr(acpi_res, &addr); if (!ACPI_SUCCESS(status)) return AE_OK; if (addr.resource_type == ACPI_MEMORY_RANGE) { root = &iomem_resource; flags = IORESOURCE_MEM; if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) flags |= IORESOURCE_PREFETCH; } else if (addr.resource_type == ACPI_IO_RANGE) { root = &ioport_resource; flags = IORESOURCE_IO; } else return AE_OK; start = addr.minimum + addr.translation_offset; end = addr.maximum + addr.translation_offset; res = &info->res[info->res_num]; res->name = info->name; res->flags = flags; res->start = start; res->end = end; res->child = NULL; if (!pci_use_crs) { dev_printk(KERN_DEBUG, &info->bridge->dev, "host bridge window %pR (ignored)\n", res); return AE_OK; } conflict = insert_resource_conflict(root, res); if (conflict) { dev_err(&info->bridge->dev, "address space collision: host bridge window %pR " "conflicts with %s %pR\n", res, conflict->name, conflict); } else { pci_bus_add_resource(info->bus, res, 0); info->res_num++; if (addr.translation_offset) dev_info(&info->bridge->dev, "host bridge window %pR " "(PCI address [%#llx-%#llx])\n", res, res->start - addr.translation_offset, res->end - addr.translation_offset); else dev_info(&info->bridge->dev, "host bridge window %pR\n", res); } return AE_OK; }
static int atp867x_ata_pci_sff_init_host(struct ata_host *host) { struct device *gdev = host->dev; struct pci_dev *pdev = to_pci_dev(gdev); unsigned int mask = 0; int i, rc; /* * do not map rombase */ rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); #ifdef ATP867X_DEBUG atp867x_check_res(pdev); for (i = 0; i < PCI_ROM_RESOURCE; i++) printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i, (unsigned long long)(host->iomap[i])); #endif /* * request, iomap BARs and init port addresses accordingly */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; struct ata_ioports *ioaddr = &ap->ioaddr; ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i); ioaddr->ctl_addr = ioaddr->altstatus_addr = ATP867X_IO_ALTSTATUS(ap, i); ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i); ata_sff_std_ports(ioaddr); rc = atp867x_set_priv(ap); if (rc) return rc; #ifdef ATP867X_DEBUG atp867x_check_ports(ap, i); #endif ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)ioaddr->cmd_addr, (unsigned long)ioaddr->ctl_addr); ata_port_desc(ap, "bmdma 0x%lx", (unsigned long)ioaddr->bmdma_addr); mask |= 1 << i; } if (!mask) { dev_printk(KERN_ERR, gdev, "no available native port\n"); return -ENODEV; } atp867x_fixup(host); rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); return rc; }
struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) { struct acpi_device *device = root->device; int domain = root->segment; int busnum = root->secondary.start; struct pci_bus *bus; struct pci_sysdata *sd; int node; #ifdef CONFIG_ACPI_NUMA int pxm; #endif if (domain && !pci_domains_supported) { printk(KERN_WARNING "pci_bus %04x:%02x: " "ignored (multiple domains not supported)\n", domain, busnum); return NULL; } node = -1; #ifdef CONFIG_ACPI_NUMA pxm = acpi_get_pxm(device->handle); if (pxm >= 0) node = pxm_to_node(pxm); if (node != -1) set_mp_bus_to_node(busnum, node); else #endif node = get_mp_bus_to_node(busnum); if (node != -1 && !node_online(node)) node = -1; /* Allocate per-root-bus (not per bus) arch-specific data. * TODO: leak; this memory is never freed. * It's arguable whether it's worth the trouble to care. */ sd = kzalloc(sizeof(*sd), GFP_KERNEL); if (!sd) { printk(KERN_WARNING "pci_bus %04x:%02x: " "ignored (out of memory)\n", domain, busnum); return NULL; } sd->domain = domain; sd->node = node; /* * Maybe the desired pci bus has been already scanned. In such case * it is unnecessary to scan the pci bus with the given domain,busnum. */ bus = pci_find_bus(domain, busnum); if (bus) { /* * If the desired bus exits, the content of bus->sysdata will * be replaced by sd. */ memcpy(bus->sysdata, sd, sizeof(*sd)); kfree(sd); } else { bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); if (bus) { get_current_resources(device, busnum, domain, bus); bus->subordinate = pci_scan_child_bus(bus); } } if (!bus) kfree(sd); if (bus && node != -1) { #ifdef CONFIG_ACPI_NUMA if (pxm >= 0) dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d (pxm %d)\n", node, pxm); #else dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); #endif } return bus; }