static int ati_create_gatt_table(struct agp_bridge_data *bridge) { struct aper_size_info_lvl2 *value; struct ati_page_map page_dir; unsigned long __iomem *cur_gatt; unsigned long addr; int retval; u32 temp; int i; struct aper_size_info_lvl2 *current_size; value = A_SIZE_LVL2(agp_bridge->current_size); retval = ati_create_page_map(&page_dir); if (retval != 0) return retval; retval = ati_create_gatt_pages(value->num_entries / 1024); if (retval != 0) { ati_free_page_map(&page_dir); return retval; } agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Write out the size register */ current_size = A_SIZE_LVL2(agp_bridge->current_size); if (is_r200()) { pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp); } else { pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp); pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp); } /* * Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1, page_dir.remapped+GET_PAGE_DIR_OFF(addr)); readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ } for (i = 0; i < value->num_entries; i++) { addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); } return 0; }
int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, struct ssb_device *dev) { struct ssb_device *pdev = pc->dev; struct ssb_bus *bus; int err = 0; u32 tmp; if (dev->bus->bustype != SSB_BUSTYPE_PCI) { goto out; } if (!pdev) goto out; bus = pdev->bus; might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); if (bus->host_pci && ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) { u32 coremask; coremask = (1 << dev->core_index); err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); if (err) goto out; tmp |= coremask << 8; err = pci_write_config_dword(bus->host_pci, SSB_PCI_IRQMASK, tmp); if (err) goto out; } else { u32 intvec; intvec = ssb_read32(pdev, SSB_INTVEC); tmp = ssb_read32(dev, SSB_TPSFLAG); tmp &= SSB_TPSFLAG_BPFLAG; intvec |= (1 << tmp); ssb_write32(pdev, SSB_INTVEC, intvec); } if (pc->setup_done) goto out; if (pdev->id.coreid == SSB_DEV_PCI) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_PREF; tmp |= SSB_PCICORE_SBTOPCI_BURST; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); if (pdev->id.revision < 5) { tmp = ssb_read32(pdev, SSB_IMCFGLO); tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 2; tmp &= ~SSB_IMCFGLO_REQTO; tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT; ssb_write32(pdev, SSB_IMCFGLO, tmp); ssb_commit_settings(bus); } else if (pdev->id.revision >= 11) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_MRM; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); } } else { WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) { tmp = ssb_pcie_read(pc, 0x4); tmp |= 0x8; ssb_pcie_write(pc, 0x4, tmp); } if (pdev->id.revision == 0) { const u8 serdes_rx_device = 0x1F; ssb_pcie_mdio_write(pc, serdes_rx_device, 2 , 0x8128); ssb_pcie_mdio_write(pc, serdes_rx_device, 6 , 0x0100); ssb_pcie_mdio_write(pc, serdes_rx_device, 7 , 0x1466); } else if (pdev->id.revision == 1) { tmp = ssb_pcie_read(pc, 0x100); tmp |= 0x40; ssb_pcie_write(pc, 0x100, tmp); } } pc->setup_done = 1; out: return err; }
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct ata_probe_ent *probe_ent = NULL; int rc; u32 genctl; struct ata_port_info *ppi; int pci_dev_busy = 0; rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) { pci_dev_busy = 1; goto err_out; } rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; ppi = &sis_port_info; probe_ent = ata_pci_init_native_mode(pdev, &ppi); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; } /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) probe_ent->host_flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); probe_ent->host_flags |= SIS_FLAG_CFGSCR; } if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) { probe_ent->port[0].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR); probe_ent->port[1].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR) + 64; } pci_set_master(pdev); pci_enable_intx(pdev); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_regions: pci_release_regions(pdev); err_out: if (!pci_dev_busy) pci_disable_device(pdev); return rc; }
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct ata_probe_ent *probe_ent = NULL; int rc; u32 genctl; rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out; rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; } memset(probe_ent, 0, sizeof(*probe_ent)); probe_ent->pdev = pdev; INIT_LIST_HEAD(&probe_ent->node); probe_ent->sht = &sis_sht; probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | ATA_FLAG_NO_LEGACY; /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) probe_ent->host_flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); probe_ent->host_flags |= SIS_FLAG_CFGSCR; } probe_ent->pio_mask = 0x03; probe_ent->udma_mask = 0x7f; probe_ent->port_ops = &sis_ops; probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); ata_std_ports(&probe_ent->port[0]); probe_ent->port[0].ctl_addr = pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) probe_ent->port[0].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR); probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); ata_std_ports(&probe_ent->port[1]); probe_ent->port[1].ctl_addr = pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) probe_ent->port[1].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR) + 64; probe_ent->n_ports = 2; probe_ent->irq = pdev->irq; probe_ent->irq_flags = SA_SHIRQ; pci_set_master(pdev); pci_enable_intx(pdev); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_regions: pci_release_regions(pdev); err_out: pci_disable_device(pdev); return rc; }
static int __devinit i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; struct mem_ctl_info *mci; struct i5100_priv *priv; struct pci_dev *ch0mm, *ch1mm; int ret = 0; u32 dw; int ranksperch; if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; rc = pci_enable_device(pdev); if (rc < 0) { ret = rc; goto bail; } /* ECC enabled? */ pci_read_config_dword(pdev, I5100_MC, &dw); if (!i5100_mc_errdeten(dw)) { printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); ret = -ENODEV; goto bail_pdev; } /* figure out how many ranks, from strapped state of 48GB_Mode input */ pci_read_config_dword(pdev, I5100_MS, &dw); ranksperch = !!(dw & (1 << 8)) * 2 + 4; /* enable error reporting... */ pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); dw &= ~I5100_FERR_NF_MEM_ANY_MASK; pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_21, 0); if (!ch0mm) { ret = -ENODEV; goto bail_pdev; } rc = pci_enable_device(ch0mm); if (rc < 0) { ret = rc; goto bail_ch0; } /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_22, 0); if (!ch1mm) { ret = -ENODEV; goto bail_disable_ch0; } rc = pci_enable_device(ch1mm); if (rc < 0) { ret = rc; goto bail_ch1; } mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); if (!mci) { ret = -ENOMEM; goto bail_disable_ch1; } mci->dev = &pdev->dev; priv = mci->pvt_info; priv->ranksperchan = ranksperch; priv->mc = pdev; priv->ch0mm = ch0mm; priv->ch1mm = ch1mm; INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); /* If scrubbing was already enabled by the bios, start maintaining it */ pci_read_config_dword(pdev, I5100_MC, &dw); if (i5100_mc_scrben(dw)) { priv->scrub_enable = 1; schedule_delayed_work(&(priv->i5100_scrubbing), I5100_SCRUB_REFRESH_RATE); } i5100_init_dimm_layout(pdev, mci); i5100_init_interleaving(pdev, mci); mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = "i5100_edac.c"; mci->mod_ver = "not versioned"; mci->ctl_name = "i5100"; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; mci->edac_check = i5100_check_error; mci->set_sdram_scrub_rate = i5100_set_scrub_rate; mci->get_sdram_scrub_rate = i5100_get_scrub_rate; i5100_init_csrows(mci); /* this strange construction seems to be in every driver, dunno why */ switch (edac_op_state) { case EDAC_OPSTATE_POLL: case EDAC_OPSTATE_NMI: break; default: edac_op_state = EDAC_OPSTATE_POLL; break; } if (edac_mc_add_mc(mci)) { ret = -ENODEV; goto bail_scrub; } return ret; bail_scrub: priv->scrub_enable = 0; cancel_delayed_work_sync(&(priv->i5100_scrubbing)); edac_mc_free(mci); bail_disable_ch1: pci_disable_device(ch1mm); bail_ch1: pci_dev_put(ch1mm); bail_disable_ch0: pci_disable_device(ch0mm); bail_ch0: pci_dev_put(ch0mm); bail_pdev: pci_disable_device(pdev); bail: return ret; }
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; struct ata_probe_ent *probe_ent = NULL; int rc; u32 genctl, val; struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi }; int pci_dev_busy = 0; u8 pmr; u8 port2_start; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); rc = pci_enable_device(pdev); if (rc) return rc; rc = pci_request_regions(pdev, DRV_NAME); if (rc) { pci_dev_busy = 1; goto err_out; } rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) goto err_out_regions; /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) pi.flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(pi.flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); pi.flags |= SIS_FLAG_CFGSCR; } pci_read_config_byte(pdev, SIS_PMR, &pmr); if (ent->device != 0x182) { if ((pmr & SIS_PMR_COMBINED) == 0) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181/964 chipset in SATA mode\n"); port2_start = 64; } else { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 180/181 chipset in combined mode\n"); port2_start=0; pi.flags |= ATA_FLAG_SLAVE_POSS; } } else { pci_read_config_dword ( pdev, 0x6C, &val); if (val & (1L << 31)) { dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n"); pi.flags |= ATA_FLAG_SLAVE_POSS; } else dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n"); port2_start = 0x20; } probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; } if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) { probe_ent->port[0].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR); probe_ent->port[1].scr_addr = pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start; } pci_set_master(pdev); pci_intx(pdev, 1); /* FIXME: check ata_device_add return value */ ata_device_add(probe_ent); kfree(probe_ent); return 0; err_out_regions: pci_release_regions(pdev); err_out: if (!pci_dev_busy) pci_disable_device(pdev); return rc; }
static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio) { u8 tmpbyte = 0; /* FIXME: double check */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, pdev->revision ? 1 : 255); pci_write_config_byte(pdev, 0x80, 0x00); pci_write_config_byte(pdev, 0x84, 0x00); pci_read_config_byte(pdev, 0x8A, &tmpbyte); dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n", tmpbyte & 1, tmpbyte & 0x30); *try_mmio = 0; #ifdef CONFIG_PPC if (machine_is(cell)) *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5); #endif switch (tmpbyte & 0x30) { case 0x00: /* 133 clock attempt to force it on */ pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10); break; case 0x30: /* if clocking is disabled */ /* 133 clock attempt to force it on */ pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20); break; case 0x10: /* 133 already */ break; case 0x20: /* BIOS set PCI x2 clocking */ break; } pci_read_config_byte(pdev, 0x8A, &tmpbyte); dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n", tmpbyte & 1, tmpbyte & 0x30); pci_write_config_byte(pdev, 0xA1, 0x72); pci_write_config_word(pdev, 0xA2, 0x328A); pci_write_config_dword(pdev, 0xA4, 0x62DD62DD); pci_write_config_dword(pdev, 0xA8, 0x43924392); pci_write_config_dword(pdev, 0xAC, 0x40094009); pci_write_config_byte(pdev, 0xB1, 0x72); pci_write_config_word(pdev, 0xB2, 0x328A); pci_write_config_dword(pdev, 0xB4, 0x62DD62DD); pci_write_config_dword(pdev, 0xB8, 0x43924392); pci_write_config_dword(pdev, 0xBC, 0x40094009); switch (tmpbyte & 0x30) { case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n"); break; case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n"); break; case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n"); break; /* This last case is _NOT_ ok */ case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n"); } return tmpbyte & 0x30; }
static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val) { pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val); }
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; struct ata_port_info pi = sis_port_info; const struct ata_port_info *ppi[] = { &pi, &pi }; struct ata_host *host; u32 genctl, val; u8 pmr; u8 port2_start = 0x20; int i, rc; if (!printed_version++) ; rc = pcim_enable_device(pdev); if (rc) return rc; /* check and see if the SCRs are in IO space or PCI cfg space */ pci_read_config_dword(pdev, SIS_GENCTL, &genctl); if ((genctl & GENCTL_IOMAPPED_SCR) == 0) pi.flags |= SIS_FLAG_CFGSCR; /* if hardware thinks SCRs are in IO space, but there are * no IO resources assigned, change to PCI cfg space. */ if ((!(pi.flags & SIS_FLAG_CFGSCR)) && ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { genctl &= ~GENCTL_IOMAPPED_SCR; pci_write_config_dword(pdev, SIS_GENCTL, genctl); pi.flags |= SIS_FLAG_CFGSCR; } pci_read_config_byte(pdev, SIS_PMR, &pmr); switch (ent->device) { case 0x0180: case 0x0181: /* The PATA-handling is provided by pata_sis */ switch (pmr & 0x30) { case 0x10: ppi[1] = &sis_info133_for_sata; break; case 0x30: ppi[0] = &sis_info133_for_sata; break; } if ((pmr & SIS_PMR_COMBINED) == 0) { // dev_printk(KERN_INFO, &pdev->dev, ; port2_start = 64; } else { // dev_printk(KERN_INFO, &pdev->dev, ; port2_start = 0; pi.flags |= ATA_FLAG_SLAVE_POSS; } break; case 0x0182: case 0x0183: pci_read_config_dword(pdev, 0x6C, &val); if (val & (1L << 31)) { // dev_printk(KERN_INFO, &pdev->dev, ; pi.flags |= ATA_FLAG_SLAVE_POSS; } else { // dev_printk(KERN_INFO, &pdev->dev, ; } break; case 0x1182: // dev_printk(KERN_INFO, &pdev->dev, ; pi.flags |= ATA_FLAG_SLAVE_POSS; break; case 0x1183: // dev_printk(KERN_INFO, &pdev->dev, ; ppi[0] = &sis_info133_for_sata; ppi[1] = &sis_info133_for_sata; break; } rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; for (i = 0; i < 2; i++) { struct ata_port *ap = host->ports[i]; if (ap->flags & ATA_FLAG_SATA && ap->flags & ATA_FLAG_SLAVE_POSS) { rc = ata_slave_link_init(ap); if (rc) return rc; } } if (!(pi.flags & SIS_FLAG_CFGSCR)) { void __iomem *mmio; rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME); if (rc) return rc; mmio = host->iomap[SIS_SCR_PCI_BAR]; host->ports[0]->ioaddr.scr_addr = mmio; host->ports[1]->ioaddr.scr_addr = mmio + port2_start; } pci_set_master(pdev); pci_intx(pdev, 1); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sis_sht); }
int rasta_register(void) { uint32_t bar0, bar1, data; unsigned int *page0 = NULL; unsigned int *apb_base = NULL; int found=0; DBG("Searching for RASTA board ..."); /* Search PCI vendor/device id. */ if (BSP_pciFindDevice(0x1AC8, 0x0010, 0, &bus, &dev, &fun) == 0) { found = 1; } /* Search old PCI vendor/device id. */ if ( (!found) && (BSP_pciFindDevice(0x16E3, 0x0210, 0, &bus, &dev, &fun) == 0) ) { found = 1; } /* Did we find a RASTA board? */ if ( !found ) return -1; DBG(" found it (dev/fun: %d/%d).\n", dev, fun); pci_read_config_dword(bus, dev, fun, 0x10, &bar0); pci_read_config_dword(bus, dev, fun, 0x14, &bar1); page0 = (unsigned int *)(bar0 + 0x400000); *page0 = 0x80000000; /* Point PAGE0 to start of APB */ apb_base = (unsigned int *)(bar0+APB2_OFFSET); /* apb_base[0] = 0x000002ff; apb_base[1] = 0x8a205260; apb_base[2] = 0x00184000; */ /* Configure memory controller */ #ifdef RASTA_SRAM apb_base[0] = 0x000002ff; apb_base[1] = 0x00001260; apb_base[2] = 0x000e8000; #else apb_base[0] = 0x000002ff; apb_base[1] = 0x82206000; apb_base[2] = 0x000e8000; #endif /* Set up rasta irq controller */ irq = (struct irqmp_regs *) (bar0+IRQ_OFFSET); irq->iclear = 0xffff; irq->ilevel = 0; irq->mask[0] = 0xffff & ~(UART0_IRQ|UART1_IRQ|SPW0_IRQ|SPW1_IRQ|SPW2_IRQ|GRCAN_IRQ|BRM_IRQ); /* Configure AT697 ioport bit 7 to input pci irq */ regs->PIO_Direction &= ~(1<<7); regs->PIO_Interrupt |= (0x87<<8); /* level sensitive */ apb_base[0x100] |= 0x40000000; /* Set GRPCI mmap 0x4 */ apb_base[0x104] = 0x40000000; /* 0xA0000000; Point PAGE1 to RAM */ /* set parity error response */ pci_read_config_dword(bus, dev, fun, 0x4, &data); pci_write_config_dword(bus, dev, fun, 0x4, data|0x40); pci_master_enable(bus, dev, fun); /* install PCI interrupt vector */ /* set_vector(pci_interrupt_handler,14+0x10, 1); */ /* install interrupt vector */ set_vector(rasta_interrupt_handler, RASTA_IRQ+0x10, 1); /* Scan AMBA Plug&Play */ /* AMBA MAP bar0 (in CPU) ==> 0x80000000(remote amba address) */ amba_maps[0].size = 0x10000000; amba_maps[0].local_adr = bar0; amba_maps[0].remote_adr = 0x80000000; /* AMBA MAP bar1 (in CPU) ==> 0x40000000(remote amba address) */ amba_maps[1].size = 0x10000000; amba_maps[1].local_adr = bar1; amba_maps[1].remote_adr = 0x40000000; /* Mark end of table */ amba_maps[2].size=0; amba_maps[2].local_adr = 0; amba_maps[2].remote_adr = 0; memset(&abus,0,sizeof(abus)); /* Start AMBA PnP scan at first AHB bus */ ambapp_scan(&abus, bar0 + (AHB1_IOAREA_BASE_ADDR & ~0xf0000000), NULL, &amba_maps[0]); printk("Registering RASTA GRCAN driver\n\r"); /*grhcan_register(bar0 + GRHCAN_OFFSET, bar1);*/ grcan_rasta_int_reg=rasta_interrrupt_register; if ( grcan_rasta_ram_register(&abus,bar1+0x20000) ){ printk("Failed to register RASTA GRCAN driver\n\r"); return -1; } printk("Registering RASTA BRM driver\n\r"); /*brm_register(bar0 + BRM_OFFSET, bar1);*/ /* register the BRM RASTA driver, use 128k on RASTA SRAM... */ b1553brm_rasta_int_reg=rasta_interrrupt_register; if ( b1553brm_rasta_register(&abus,2,0,3,bar1,0x40000000) ){ printk("Failed to register BRM RASTA driver\n"); return -1; } /* provide the spacewire driver with AMBA Plug&Play * info so that it can find the GRSPW cores. */ grspw_rasta_int_reg=rasta_interrrupt_register; if ( grspw_rasta_register(&abus,bar1) ){ printk("Failed to register RASTA GRSPW driver\n\r"); return -1; } /* provide the spacewire driver with AMBA Plug&Play * info so that it can find the GRSPW cores. */ apbuart_rasta_int_reg=rasta_interrrupt_register; if ( apbuart_rasta_register(&abus) ){ printk("Failed to register RASTA APBUART driver\n\r"); return -1; } /* Find GPIO0 address */ if ( rasta_get_gpio(&abus,0,&gpio0,NULL) ){ printk("Failed to get address for RASTA GPIO0\n\r"); return -1; } /* Find GPIO1 address */ if ( rasta_get_gpio(&abus,1,&gpio1,NULL) ){ printk("Failed to get address for RASTA GPIO1\n\r"); return -1; } /* Successfully registered the RASTA board */ return 0; }
static int altera_cvp_write_init(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count) { struct altera_cvp_conf *conf = mgr->priv; struct pci_dev *pdev = conf->pci_dev; u32 iflags, val; int ret; iflags = info ? info->flags : 0; if (iflags & FPGA_MGR_PARTIAL_RECONFIG) { dev_err(&mgr->dev, "Partial reconfiguration not supported.\n"); return -EINVAL; } /* Determine allowed clock to data ratio */ if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM) conf->numclks = 8; /* ratio for all compressed images */ else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM) conf->numclks = 4; /* for uncompressed and encrypted images */ else conf->numclks = 1; /* for uncompressed and unencrypted images */ /* STEP 1 - read CVP status and check CVP_EN flag */ pci_read_config_dword(pdev, VSE_CVP_STATUS, &val); if (!(val & VSE_CVP_STATUS_CVP_EN)) { dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val); return -ENODEV; } if (val & VSE_CVP_STATUS_CFG_RDY) { dev_warn(&mgr->dev, "CvP already started, teardown first\n"); ret = altera_cvp_teardown(mgr, info); if (ret) return ret; } /* * STEP 2 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned) */ /* switch from fabric to PMA clock */ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL; pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); /* set CVP mode */ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); val |= VSE_CVP_MODE_CTRL_CVP_MODE; pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); /* * STEP 3 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP */ altera_cvp_dummy_write(conf); /* STEP 4 - set CVP_CONFIG bit */ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); /* request control block to begin transfer using CVP */ val |= VSE_CVP_PROG_CTRL_CONFIG; pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, VSE_CVP_STATUS_CFG_RDY, 10); if (ret) { dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n"); return ret; } /* * STEP 6 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP */ altera_cvp_dummy_write(conf); /* STEP 7 - set START_XFER */ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); val |= VSE_CVP_PROG_CTRL_START_XFER; pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); return 0; }
void os_pci_writel(void *osext, HPT_U8 offset, HPT_U32 value) { pci_write_config_dword(((PHBA)osext)->pcidev, offset, value); }
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) { struct msi_desc *entry; u32 address_hi, address_lo; unsigned int irq = vector; unsigned int dest_cpu = first_cpu(cpu_mask); unsigned long flags; spin_lock_irqsave(&msi_lock, flags); entry = (struct msi_desc *)msi_desc[vector]; if (!entry || !entry->dev) goto out_unlock; if (entry->msi_attrib.state == 0) goto out_unlock; switch (entry->msi_attrib.type) { case PCI_CAP_ID_MSI: { int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); if (!pos) goto out_unlock; pci_read_config_dword(entry->dev, msi_upper_address_reg(pos), &address_hi); pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), &address_lo); msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); pci_write_config_dword(entry->dev, msi_upper_address_reg(pos), address_hi); pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), address_lo); set_native_irq_info(irq, cpu_mask); break; } case PCI_CAP_ID_MSIX: { int offset_hi = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET; int offset_lo = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; address_hi = readl(entry->mask_base + offset_hi); address_lo = readl(entry->mask_base + offset_lo); msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); writel(address_hi, entry->mask_base + offset_hi); writel(address_lo, entry->mask_base + offset_lo); set_native_irq_info(irq, cpu_mask); break; } default: break; } out_unlock: spin_unlock_irqrestore(&msi_lock, flags); }
/* * Called to perform platform specific PCI setup */ int pcibios_plat_dev_init(struct pci_dev *dev) { uint16_t config; uint32_t dconfig; int pos; /* * Force the Cache line setting to 64 bytes. The standard * Linux bus scan doesn't seem to set it. Octeon really has * 128 byte lines, but Intel bridges get really upset if you * try and set values above 64 bytes. Value is specified in * 32bit words. */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4); /* Set latency timers for all devices */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48); /* Enable reporting System errors and parity errors on all devices */ /* Enable parity checking and error reporting */ pci_read_config_word(dev, PCI_COMMAND, &config); config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; pci_write_config_word(dev, PCI_COMMAND, config); if (dev->subordinate) { /* Set latency timers on sub bridges */ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48); /* More bridge error detection */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config); config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config); } /* Enable the PCIe normal error reporting */ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pos) { /* Update Device Control */ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); /* Correctable Error Reporting */ config |= PCI_EXP_DEVCTL_CERE; /* Non-Fatal Error Reporting */ config |= PCI_EXP_DEVCTL_NFERE; /* Fatal Error Reporting */ config |= PCI_EXP_DEVCTL_FERE; /* Unsupported Request */ config |= PCI_EXP_DEVCTL_URRE; pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config); } /* Find the Advanced Error Reporting capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (pos) { /* Clear Uncorrectable Error Status */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &dconfig); pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, dconfig); /* Enable reporting of all uncorrectable errors */ /* Uncorrectable Error Mask - turned on bits disable errors */ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0); /* * Leave severity at HW default. This only controls if * errors are reported as uncorrectable or * correctable, not if the error is reported. */ /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */ /* Clear Correctable Error Status */ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig); pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig); /* Enable reporting of all correctable errors */ /* Correctable Error Mask - turned on bits disable errors */ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0); /* Advanced Error Capabilities */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig); /* ECRC Generation Enable */ if (config & PCI_ERR_CAP_ECRC_GENC) config |= PCI_ERR_CAP_ECRC_GENE; /* ECRC Check Enable */ if (config & PCI_ERR_CAP_ECRC_CHKC) config |= PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig); /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */ /* Report all errors to the root complex */ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, PCI_ERR_ROOT_CMD_COR_EN | PCI_ERR_ROOT_CMD_NONFATAL_EN | PCI_ERR_ROOT_CMD_FATAL_EN); /* Clear the Root status register */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig); pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); } dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; return 0; }
int l2_cache_enable (int l2control) { if (l2control) /* BAB750 */ { mtspr(SPRN_L2CR, l2control); mtspr(SPRN_L2CR, (l2control | L2CR_I)); while (mfspr(SPRN_L2CR) & L2CR_IP) ; mtspr(SPRN_L2CR, (l2control | L2CR_E)); return (0); } else /* BAB740 */ { int picr1, picr2, mask; int picr2CacheSize, cacheSize; int *d; int devbusfn; u32 reg32; devbusfn = pci_find_device(PCI_VENDOR_ID_MOTOROLA, PCI_DEVICE_ID_MOTOROLA_MPC106, 0); if (devbusfn == -1) return (-1); pci_read_config_dword (devbusfn, PCI_PICR2, ®32); reg32 &= ~PICR2_L2_EN; pci_write_config_dword (devbusfn, PCI_PICR2, reg32); /* cache size */ if (*(volatile unsigned char *) (CFG_ISA_IO + 0x220) & 0x04) { /* cache size is 512 KB */ picr2CacheSize = PICR2_L2_SIZE_512K; cacheSize = 0x80000; } else { /* cache size is 256 KB */ picr2CacheSize = PICR2_L2_SIZE_256K; cacheSize = 0x40000; } /* setup PICR1 */ mask = ~(PICR1_CF_BREAD_WS(1) | PICR1_CF_BREAD_WS(2) | PICR1_CF_CBA(0xff) | PICR1_CF_CACHE_1G | PICR1_CF_DPARK | PICR1_CF_APARK | PICR1_CF_L2_CACHE_MASK); picr1 = (PICR1_CF_CBA(0x3f) | PICR1_CF_CACHE_1G | PICR1_CF_APARK | PICR1_CF_DPARK | PICR1_CF_L2_COPY_BACK); /* PICR1_CF_L2_WRITE_THROUGH */ pci_read_config_dword (devbusfn, PCI_PICR1, ®32); reg32 &= mask; reg32 |= picr1; pci_write_config_dword (devbusfn, PCI_PICR1, reg32); /* * invalidate all L2 cache */ picr2 = (PICR2_CF_INV_MODE | PICR2_CF_HIT_HIGH | PICR2_CF_MOD_HIGH | PICR2_CF_L2_HIT_DELAY(1) | PICR2_CF_APHASE_WS(1) | picr2CacheSize); pci_write_config_dword (devbusfn, PCI_PICR2, picr2); /* * dummy transactions */ for (d=0; d<(int *)(2*cacheSize); d++) dummy(*d); pci_write_config_dword (devbusfn, PCI_PICR2, (picr2 | PICR2_CF_FLUSH_L2)); /* setup PICR2 */ picr2 = (PICR2_CF_FAST_CASTOUT | PICR2_CF_WDATA | PICR2_CF_ADDR_ONLY_DISABLE | PICR2_CF_HIT_HIGH | PICR2_CF_MOD_HIGH | PICR2_L2_UPDATE_EN | PICR2_L2_EN | PICR2_CF_APHASE_WS(1) | PICR2_CF_DATA_RAM_PBURST | PICR2_CF_L2_HIT_DELAY(1) | PICR2_CF_SNOOP_WS(2) | picr2CacheSize); pci_write_config_dword (devbusfn, PCI_PICR2, picr2); } return (0); }
static long nv_tco_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_options, retval = -EINVAL; int new_heartbeat; void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = TCO_MODULE_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { tco_timer_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { tco_timer_keepalive(); tco_timer_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: tco_timer_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_heartbeat, p)) return -EFAULT; if (tco_timer_set_heartbeat(new_heartbeat)) return -EINVAL; tco_timer_keepalive(); /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: return -ENOTTY; } } /* * Kernel Interfaces */ static const struct file_operations nv_tco_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = nv_tco_write, .unlocked_ioctl = nv_tco_ioctl, .open = nv_tco_open, .release = nv_tco_release, }; static struct miscdevice nv_tco_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &nv_tco_fops, }; /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static struct pci_device_id tco_pci_tbl[] = { { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, tco_pci_tbl); /* * Init & exit routines */ static unsigned char __init nv_tco_getdevice(void) { struct pci_dev *dev = NULL; u32 val; /* Find the PCI device */ for_each_pci_dev(dev) { if (pci_match_id(tco_pci_tbl, dev) != NULL) { tco_pci = dev; break; } } if (!tco_pci) return 0; /* Find the base io port */ pci_read_config_dword(tco_pci, 0x64, &val); val &= 0xffff; if (val == 0x0001 || val == 0x0000) { /* Something is wrong here, bar isn't setup */ printk(KERN_ERR PFX "failed to get tcobase address\n"); return 0; } val &= 0xff00; tcobase = val + 0x40; if (!request_region(tcobase, 0x10, "NV TCO")) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", tcobase); return 0; } /* Set a reasonable heartbeat before we stop the timer */ tco_timer_set_heartbeat(30); /* * Stop the TCO before we change anything so we don't race with * a zeroed timer. */ tco_timer_keepalive(); tco_timer_stop(); /* Disable SMI caused by TCO */ if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) { printk(KERN_ERR PFX "I/O address 0x%04x already in use\n", MCP51_SMI_EN(tcobase)); goto out; } val = inl(MCP51_SMI_EN(tcobase)); val &= ~MCP51_SMI_EN_TCO; outl(val, MCP51_SMI_EN(tcobase)); val = inl(MCP51_SMI_EN(tcobase)); release_region(MCP51_SMI_EN(tcobase), 4); if (val & MCP51_SMI_EN_TCO) { printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n"); goto out; } /* Check chipset's NO_REBOOT bit */ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT; pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val); pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) { printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot " "disabled by hardware\n"); goto out; } return 1; out: release_region(tcobase, 0x10); return 0; } static int __devinit nv_tco_init(struct platform_device *dev) { int ret; /* Check whether or not the hardware watchdog is there */ if (!nv_tco_getdevice()) return -ENODEV; /* Check to see if last reboot was due to watchdog timeout */ printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n", inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not "); /* Clear out the old status */ outl(TCO_STS_RESET, TCO_STS(tcobase)); /* * Check that the heartbeat value is within it's range. * If not, reset to the default. */ if (tco_timer_set_heartbeat(heartbeat)) { heartbeat = WATCHDOG_HEARTBEAT; tco_timer_set_heartbeat(heartbeat); printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39, " "using %d\n", heartbeat); } ret = misc_register(&nv_tco_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d " "(err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_region; } clear_bit(0, &timer_alive); tco_timer_stop(); printk(KERN_INFO PFX "initialized (0x%04x). heartbeat=%d sec " "(nowayout=%d)\n", tcobase, heartbeat, nowayout); return 0; unreg_region: release_region(tcobase, 0x10); return ret; } static void __devexit nv_tco_cleanup(void) { u32 val; /* Stop the timer before we leave */ if (!nowayout) tco_timer_stop(); /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT; pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val); pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) { printk(KERN_CRIT PFX "Couldn't unset REBOOT bit. Machine may " "soon reset\n"); } /* Deregister */ misc_deregister(&nv_tco_miscdev); release_region(tcobase, 0x10); }
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; u32 val; int ret = 0; char hw_name[64]; if (pcim_enable_device(pdev)) return -EIO; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("32-bit DMA not available\n"); return ret; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { pr_err("32-bit DMA consistent DMA enable failed\n"); return ret; } /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); if (csz == 0) { /* * Linux 2.4.18 (at least) writes the cache line size * register as a 16-bit wide register which is wrong. * We must have this setup properly for rx buffer * DMA to work so force a reasonable value here if it * comes up zero. */ csz = L1_CACHE_BYTES / sizeof(u32); pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); } /* * The default setting of latency timer yields poor results, * set it to the value used by other systems. It may be worth * tweaking this setting more. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); pci_set_master(pdev); /* * Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); ret = pcim_iomap_regions(pdev, BIT(0), "ath9k"); if (ret) { dev_err(&pdev->dev, "PCI memory region reserve error\n"); return -ENODEV; } hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (!hw) { dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); return -ENOMEM; } SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); sc = hw->priv; sc->hw = hw; sc->dev = &pdev->dev; sc->mem = pcim_iomap_table(pdev)[0]; sc->driver_data = id->driver_data; /* Will be cleared in ath9k_start() */ set_bit(SC_OP_INVALID, &sc->sc_flags); ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); if (ret) { dev_err(&pdev->dev, "request_irq failed\n"); goto err_irq; } sc->irq = pdev->irq; ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops); if (ret) { dev_err(&pdev->dev, "Failed to initialize device\n"); goto err_init; } ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", hw_name, (unsigned long)sc->mem, pdev->irq); return 0; err_init: free_irq(sc->irq, sc); err_irq: ieee80211_free_hw(hw); return ret; }
static int __init init_l440gx(void) { struct pci_dev *dev, *pm_dev; struct resource *pm_iobase; __u16 word; dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, NULL); pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); pci_dev_put(dev); if (!dev || !pm_dev) { printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); pci_dev_put(pm_dev); return -ENODEV; } l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); pci_dev_put(pm_dev); return -ENOMEM; } simple_map_init(&l440gx_map); printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt); /* Setup the pm iobase resource * This code should move into some kind of generic bridge * driver but for the moment I'm content with getting the * allocation correct. */ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE]; if (!(pm_iobase->flags & IORESOURCE_IO)) { pm_iobase->name = "pm iobase"; pm_iobase->start = 0; pm_iobase->end = 63; pm_iobase->flags = IORESOURCE_IO; /* Put the current value in the resource */ pci_read_config_dword(pm_dev, 0x40, &iobase); iobase &= ~1; pm_iobase->start += iobase & ~1; pm_iobase->end += iobase & ~1; pci_dev_put(pm_dev); /* Allocate the resource region */ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { pci_dev_put(dev); pci_dev_put(pm_dev); printk(KERN_WARNING "Could not allocate pm iobase resource\n"); iounmap(l440gx_map.virt); return -ENXIO; } } /* Set the iobase */ iobase = pm_iobase->start; pci_write_config_dword(pm_dev, 0x40, iobase | 1); /* Set XBCS# */ pci_read_config_word(dev, 0x4e, &word); word |= 0x4; pci_write_config_word(dev, 0x4e, word); /* Supply write voltage to the chip */ l440gx_set_vpp(&l440gx_map, 1); /* Enable the gate on the WE line */ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT); printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n"); mymtd = do_map_probe("jedec_probe", &l440gx_map); if (!mymtd) { printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n"); mymtd = do_map_probe("map_rom", &l440gx_map); } if (mymtd) { mymtd->owner = THIS_MODULE; add_mtd_device(mymtd); return 0; } iounmap(l440gx_map.virt); return -ENXIO; }
static int __devinit sis_find_family(struct pci_dev *dev) { struct pci_dev *host; int i = 0; chipset_family = 0; for (i = 0; i < ARRAY_SIZE(SiSHostChipInfo) && !chipset_family; i++) { host = pci_get_device(PCI_VENDOR_ID_SI, SiSHostChipInfo[i].host_id, NULL); if (!host) continue; chipset_family = SiSHostChipInfo[i].chipset_family; /* Special case for SiS630 : 630S/ET is ATA_100a */ if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) { if (host->revision >= 0x30) chipset_family = ATA_100a; } pci_dev_put(host); printk(KERN_INFO DRV_NAME " %s: %s %s controller\n", pci_name(dev), SiSHostChipInfo[i].name, chipset_capability[chipset_family]); } if (!chipset_family) { /* Belongs to pci-quirks */ u32 idemisc; u16 trueid; /* Disable ID masking and register remapping */ pci_read_config_dword(dev, 0x54, &idemisc); pci_write_config_dword(dev, 0x54, (idemisc & 0x7fffffff)); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_dword(dev, 0x54, idemisc); if (trueid == 0x5518) { printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133; /* Check for 5513 compability mapping * We must use this, else the port enabled code will fail, * as it expects the enablebits at 0x4a. */ if ((idemisc & 0x40000000) == 0) { pci_write_config_dword(dev, 0x54, idemisc | 0x40000000); printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n", pci_name(dev)); } } } if (!chipset_family) { /* Belongs to pci-quirks */ struct pci_dev *lpc_bridge; u16 trueid; u8 prefctl; u8 idecfg; pci_read_config_byte(dev, 0x4a, &idecfg); pci_write_config_byte(dev, 0x4a, idecfg | 0x10); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_byte(dev, 0x4a, idecfg); if (trueid == 0x5517) { /* SiS 961/961B */ lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */ pci_read_config_byte(dev, 0x49, &prefctl); pci_dev_put(lpc_bridge); if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133a; } else { printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n", pci_name(dev)); chipset_family = ATA_100; } } } return chipset_family; }
static int __devinit bcma_host_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct bcma_bus *bus; int err = -ENOMEM; const char *name; u32 val; /* Alloc */ bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) goto out; /* Basic PCI configuration */ err = pci_enable_device(dev); if (err) goto err_kfree_bus; name = dev_name(&dev->dev); if (dev->driver && dev->driver->name) name = dev->driver->name; err = pci_request_regions(dev, name); if (err) goto err_pci_disable; pci_set_master(dev); /* Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_read_config_dword(dev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(dev, 0x40, val & 0xffff00ff); /* SSB needed additional powering up, do we have any AMBA PCI cards? */ if (!pci_is_pcie(dev)) bcma_err(bus, "PCI card detected, report problems.\n"); /* Map MMIO */ err = -ENOMEM; bus->mmio = pci_iomap(dev, 0, ~0UL); if (!bus->mmio) goto err_pci_release_regions; /* Host specific */ bus->host_pci = dev; bus->hosttype = BCMA_HOSTTYPE_PCI; bus->ops = &bcma_host_pci_ops; bus->boardinfo.vendor = bus->host_pci->subsystem_vendor; bus->boardinfo.type = bus->host_pci->subsystem_device; /* Register */ err = bcma_bus_register(bus); if (err) goto err_pci_unmap_mmio; pci_set_drvdata(dev, bus); out: return err; err_pci_unmap_mmio: pci_iounmap(dev, bus->mmio); err_pci_release_regions: pci_release_regions(dev); err_pci_disable: pci_disable_device(dev); err_kfree_bus: kfree(bus); return err; }
static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "ALi M1535 WatchDog Timer", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { ali_stop(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { ali_start(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: ali_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; if (ali_settimer(new_timeout)) return -EINVAL; ali_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } /* * ali_open - handle open of ali watchdog * @inode: inode from VFS * @file: file from VFS * * Open the ALi watchdog device. Ensure only one person opens it * at a time. Also start the watchdog running. */ static int ali_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &ali_is_open)) return -EBUSY; /* Activate */ ali_start(); return nonseekable_open(inode, file); } /* * ali_release - close an ALi watchdog * @inode: inode from VFS * @file: file from VFS * * Close the ALi watchdog device. Actual shutdown of the timer * only occurs if the magic sequence has been set. */ static int ali_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (ali_expect_release == 42) ali_stop(); else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); ali_keepalive(); } clear_bit(0, &ali_is_open); ali_expect_release = 0; return 0; } /* * ali_notify_sys - System down notifier * * Notifier for system down */ static int ali_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) ali_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static struct pci_device_id ali_pci_tbl[] = { { PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,}, { 0, }, }; MODULE_DEVICE_TABLE(pci, ali_pci_tbl); /* * ali_find_watchdog - find a 1535 and 7101 * * Scans the PCI hardware for a 1535 series bridge and matching 7101 * watchdog device. This may be overtight but it is better to be safe */ static int __init ali_find_watchdog(void) { struct pci_dev *pdev; u32 wdog; /* Check for a 1533/1535 series bridge */ pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1535, NULL); if (pdev == NULL) pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1533, NULL); if (pdev == NULL) return -ENODEV; pci_dev_put(pdev); /* Check for the a 7101 PMU */ pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x7101, NULL); if (pdev == NULL) return -ENODEV; if (pci_enable_device(pdev)) { pci_dev_put(pdev); return -EIO; } ali_pci = pdev; /* * Initialize the timer bits */ pci_read_config_dword(pdev, 0xCC, &wdog); /* Timer bits */ wdog &= ~0x3F; /* Issued events */ wdog &= ~((1<<27)|(1<<26)|(1<<25)|(1<<24)); /* No monitor bits */ wdog &= ~((1<<16)|(1<<13)|(1<<12)|(1<<11)|(1<<10)|(1<<9)); pci_write_config_dword(pdev, 0xCC, wdog); return 0; } /* * Kernel Interfaces */ static const struct file_operations ali_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = ali_write, .unlocked_ioctl = ali_ioctl, .open = ali_open, .release = ali_release, }; static struct miscdevice ali_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ali_fops, }; static struct notifier_block ali_notifier = { .notifier_call = ali_notify_sys, }; /* * watchdog_init - module initialiser * * Scan for a suitable watchdog and if so initialize it. Return an error * if we cannot, the error causes the module to unload */ static int __init watchdog_init(void) { int ret; /* Check whether or not the hardware watchdog is there */ if (ali_find_watchdog() != 0) return -ENODEV; /* Check that the timeout value is within it's range; if not reset to the default */ if (timeout < 1 || timeout >= 18000) { timeout = WATCHDOG_TIMEOUT; printk(KERN_INFO PFX "timeout value must be 0 < timeout < 18000, using %d\n", timeout); } /* Calculate the watchdog's timeout */ ali_settimer(timeout); ret = register_reboot_notifier(&ali_notifier); if (ret != 0) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); goto out; } ret = misc_register(&ali_miscdev); if (ret != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_reboot; } printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); out: return ret; unreg_reboot: unregister_reboot_notifier(&ali_notifier); goto out; } /* * watchdog_exit - module de-initialiser * * Called while unloading a successfully installed watchdog module. */ static void __exit watchdog_exit(void) { /* Stop the timer before we leave */ ali_stop(); /* Deregister */ misc_deregister(&ali_miscdev); unregister_reboot_notifier(&ali_notifier); pci_dev_put(ali_pci); }
static int __devinit pc300_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PC300"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || pci_resource_len(pdev, 2) != PC300_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { printk(KERN_ERR "pc300: invalid card EEPROM parameters\n"); pc300_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PC300_SCA_SIZE); ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = pci_ioremap_bar(pdev, 3); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { printk(KERN_ERR "pc300: ioremap() failed\n"); pc300_pci_remove_one(pdev); } /* PLX PCI 9050 workaround for local configuration register read bug */ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_2) card->type = PC300_TE; /* not fully supported */ else if (card->init_ctrl_value & PC300_CTYPE_MASK) card->type = PC300_X21; else card->type = PC300_RSV; if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_1) card->n_ports = 1; else card->n_ports = 2; for (i = 0; i < card->n_ports; i++) if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pc300_pci_remove_one(pdev); return -ENOMEM; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(card->init_ctrl_value | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); /* Reload Config. Registers from EEPROM */ writel(card->init_ctrl_value | 0x20000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); if (use_crystal_clock) card->init_ctrl_value &= ~PC300_CLKSEL_MASK; else card->init_ctrl_value |= PC300_CLKSEL_MASK; writel(card->init_ctrl_value, &card->plxbase->init_ctrl); /* number of TX + RX buffers for one port */ i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = card->n_ports * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, " "using %u TX + %u RX packets rings\n", card->type == PC300_X21 ? "X21" : card->type == PC300_TE ? "TE" : "RSV", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (card->tx_ring_buffers < 1) { printk(KERN_ERR "pc300: RAM test failed\n"); pc300_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge, LINTi1 active low */ writew(0x0041, &card->plxbase->intr_ctrl_stat); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) { printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n", pdev->irq); pc300_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); // COTE not set - allows better TX DMA settings // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card); sca_out(0x10, BTCR, card); for (i = 0; i < card->n_ports; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port->netdev; hdlc_device *hdlc = dev_to_hdlc(dev); port->chan = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->netdev_ops = &pc300_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; if (card->type == PC300_X21) port->iface = IF_IFACE_X21; else port->iface = IF_IFACE_V35; sca_init_port(port); if (register_hdlc_device(dev)) { printk(KERN_ERR "pc300: unable to register hdlc " "device\n"); port->card = NULL; pc300_pci_remove_one(pdev); return -ENOBUFS; } printk(KERN_INFO "%s: PC300 channel %d\n", dev->name, port->chan); } return 0; }
static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 ferr, u32 nerr) { struct i5100_priv *priv = mci->pvt_info; struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; u32 dw; u32 dw2; unsigned syndrome = 0; unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; unsigned cas; unsigned ras; pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); if (i5100_validlog_redmemvalid(dw)) { pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { const char *msg; pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); merr = i5100_recmema_merr(dw2); bank = i5100_recmema_bank(dw2); rank = i5100_recmema_rank(dw2); pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); cas = i5100_recmemb_cas(dw2); ras = i5100_recmemb_ras(dw2); /* FIXME: not really sure if this is what merr is... */ if (!merr) msg = i5100_err_msg(ferr); else msg = i5100_err_msg(nerr); i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); } if (i5100_validlog_nrecmemvalid(dw)) { const char *msg; pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); merr = i5100_nrecmema_merr(dw2); bank = i5100_nrecmema_bank(dw2); rank = i5100_nrecmema_rank(dw2); pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); cas = i5100_nrecmemb_cas(dw2); ras = i5100_nrecmemb_ras(dw2); /* FIXME: not really sure if this is what merr is... */ if (!merr) msg = i5100_err_msg(ferr); else msg = i5100_err_msg(nerr); i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); } pci_write_config_dword(pdev, I5100_VALIDLOG, dw); }
int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec) { u32 val; __be32 *be32ptr; u8 timers; int i, rc; long recv_cap; char *recv_rate; /* * Skip on function != 0, as the TL can only be defined on 0 */ if (PCI_FUNC(dev->devfn) != 0) return 0; recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL); if (!recv_rate) return -ENOMEM; /* * The spec defines 64 templates for messages in the * Transaction Layer (TL). * * The host and device each support a subset, so we need to * configure the transmitters on each side to send only * templates the receiver understands, at a rate the receiver * can process. Per the spec, template 0 must be supported by * everybody. That's the template which has been used by the * host and device so far. * * The sending rate limit must be set before the template is * enabled. */ /* * Device -> host */ rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate, PNV_OCXL_TL_RATE_BUF_SIZE); if (rc) goto out; for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { be32ptr = (__be32 *) &recv_rate[i]; pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i, be32_to_cpu(*be32ptr)); } val = recv_cap >> 32; pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val); val = recv_cap & GENMASK(31, 0); pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val); /* * Host -> device */ for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i, &val); be32ptr = (__be32 *) &recv_rate[i]; *be32ptr = cpu_to_be32(val); } pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val); recv_cap = (long) val << 32; pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val); recv_cap |= val; rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate), PNV_OCXL_TL_RATE_BUF_SIZE); if (rc) goto out; /* * Opencapi commands needing to be retried are classified per * the TL in 2 groups: short and long commands. * * The short back off timer it not used for now. It will be * for opencapi 4.0. * * The long back off timer is typically used when an AFU hits * a page fault but the NPU is already processing one. So the * AFU needs to wait before it can resubmit. Having a value * too low doesn't break anything, but can generate extra * traffic on the link. * We set it to 1.6 us for now. It's shorter than, but in the * same order of magnitude as the time spent to process a page * fault. */ timers = 0x2 << 4; /* long timer = 1.6 us */ pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS, timers); rc = 0; out: kfree(recv_rate); return rc; }
static void mid_get_fuse_settings(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); uint32_t fuse_value = 0; uint32_t fuse_value_tmp = 0; #define FB_REG06 0xD0810600 #define FB_MIPI_DISABLE (1 << 11) #define FB_REG09 0xD0810900 #define FB_REG09 0xD0810900 #define FB_SKU_MASK 0x7000 #define FB_SKU_SHIFT 12 #define FB_SKU_100 0 #define FB_SKU_100L 1 #define FB_SKU_83 2 if (pci_root == NULL) { WARN_ON(1); return; } pci_write_config_dword(pci_root, 0xD0, FB_REG06); pci_read_config_dword(pci_root, 0xD4, &fuse_value); /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */ if (IS_MRST(dev)) dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE; DRM_INFO("internal display is %s\n", dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display"); /* Prevent runtime suspend at start*/ if (dev_priv->iLVDS_enable) { dev_priv->is_lvds_on = true; dev_priv->is_mipi_on = false; } else { dev_priv->is_mipi_on = true; dev_priv->is_lvds_on = false; } dev_priv->video_device_fuse = fuse_value; pci_write_config_dword(pci_root, 0xD0, FB_REG09); pci_read_config_dword(pci_root, 0xD4, &fuse_value); dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value); fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT; dev_priv->fuse_reg_value = fuse_value; switch (fuse_value_tmp) { case FB_SKU_100: dev_priv->core_freq = 200; break; case FB_SKU_100L: dev_priv->core_freq = 100; break; case FB_SKU_83: dev_priv->core_freq = 166; break; default: dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n", fuse_value_tmp); dev_priv->core_freq = 0; } dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq); pci_dev_put(pci_root); }
static inline void t1_sw_reset(struct pci_dev *pdev) { pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); }
int pciregions_read_proc(char *buf, char **start, off_t offset, int len, int *eof, void *data) { int i, pos=0; int bus, devfn, is_multi = 0; unsigned char headertype, pribus, secbus; u16 vendorid, deviceid = 0; /* to print information about several buses, keep an array of them */ #define MAXNBUS 8 int buses[MAXNBUS] = {0,}; int lastbus = 0; /* only one bus, by default, bus 0 */ int busindex = 0; /* this macro helps keeping the following lines short */ #define PRINTF(fmt, args...) sprintf(buf+len, fmt, ## args) len=0; /* Loop through the devices (code not printed in the book) */ if (!pci_present()) return sprintf(buf, "PCI not available in this computer\n"); bus = buses[busindex]; /* first bus (bus 0) */ for (devfn=0; pos < PAGE_SIZE; devfn++) { struct pci_dev *dev = NULL; /* * A clean implementation * would have a separate function to dump a single bus, but i * preferred to keep it in one function to include part of it * in the book (the printed code is automagically extracted from * this file). * * Instead, I use a dirty trick to fold two loops in one. */ if (devfn > 0xff) { /* end of this bus */ if (busindex == lastbus) break; /* loop over to the next bus */ bus = buses[++busindex]; devfn = 0; } /* * This code is derived from "drivers/pci/pci.c" in version * 2.0, although it has been modified to work with the 2.4 interface. * This means that the GPL applies to this source file * and credit is due to the original authors * (Drew Eckhardt, Frederic Potter, David Mosberger-Tang) */ if (PCI_FUNC(devfn) && !is_multi) /* not multi-function */ continue; dev = pci_find_slot(bus, devfn); if (!dev) { if (!PCI_FUNC(devfn)) is_multi = 0; /* no first implies no other */ continue; /* no such device */ } pci_read_config_byte(dev, PCI_HEADER_TYPE, &headertype); if (!PCI_FUNC(devfn)) /* first function */ is_multi = headertype & 0x80; headertype &= 0x7f; /* mask multi-function bit */ /* FIXME: should get rid of the PAGE_SIZE limit */ if (len > PAGE_SIZE / 2) { /* a big margin, just to be sure */ *eof = 1; return len; } vendorid = dev->vendor; deviceid = dev->device; len += PRINTF("Bus %i, device %2i, devfn %2i (id %04x-%04x," " headertype 0x%02x)\n", bus, devfn>>3, devfn & 7, vendorid, deviceid, headertype); if (headertype == PCI_HEADER_TYPE_BRIDGE) { /* This is a bridge, print what it does */ pci_read_config_byte(dev, PCI_PRIMARY_BUS, &pribus); pci_read_config_byte(dev, PCI_SECONDARY_BUS, &secbus); len += PRINTF("\tbridge connecting PCI bus %i to PCI bus %i\n", secbus, pribus); /* remember about this bus, to dump it later */ if (lastbus <= MAXNBUS-1) { lastbus++; buses[lastbus] = secbus; len += PRINTF("\t(bus %i is dumped below)\n", secbus); } else { len += PRINTF("\t(bus %i won't be dumped)\n", secbus); } pci_release_device(dev); /* 2.0 compatibility */ continue; } else if (headertype == PCI_HEADER_TYPE_CARDBUS) { /* This is a CardBus bridge, print what it does */ pci_read_config_byte(dev, PCI_CB_PRIMARY_BUS,&pribus); pci_read_config_byte(dev, PCI_CB_CARD_BUS,&secbus); len += PRINTF("\tbridge connecting CardBus %i to PCI bus %i\n", secbus, pribus); pci_release_device(dev); /* 2.0 compatibility */ continue; } else if (headertype != PCI_HEADER_TYPE_NORMAL) { len += PRINTF("\tunknown header type, skipping\n"); pci_release_device(dev); /* 2.0 compatibility */ continue; } /* Print the address regions of this device */ for (i=0; addresses[i]; i++) { u32 curr, mask, size; char *type; pci_read_config_dword(dev, addresses[i],&curr); cli(); pci_write_config_dword(dev, addresses[i],~0); pci_read_config_dword(dev, addresses[i],&mask); pci_write_config_dword(dev, addresses[i],curr); sti(); if (!mask) continue; /* there may be other regions */ /* * apply the I/O or memory mask to current position * note that I/O is limited to 0xffff, and 64-bit is not * supported by this simple imeplementation */ if (curr & PCI_BASE_ADDRESS_SPACE_IO) { curr &= PCI_BASE_ADDRESS_IO_MASK; } else { curr &= PCI_BASE_ADDRESS_MEM_MASK; } len += PRINTF("\tregion %i: mask 0x%08lx, now at 0x%08lx\n", i, (unsigned long)mask, (unsigned long)curr); /* extract the type, and the programmable bits */ if (mask & PCI_BASE_ADDRESS_SPACE_IO) { type = "I/O"; mask &= PCI_BASE_ADDRESS_IO_MASK; size = (~mask + 1) & 0xffff; /* Bleah */ } else { type = "mem"; mask &= PCI_BASE_ADDRESS_MEM_MASK; size = ~mask + 1; } len += PRINTF("\tregion %i: type %s, size %i (%i%s)\n", i, type, size, (size & 0xfffff) == 0 ? size >> 20 : (size & 0x3ff) == 0 ? size >> 10 : size, (size & 0xfffff) == 0 ? "MB" : (size & 0x3ff) == 0 ? "KB" : "B"); if (len > PAGE_SIZE / 2) { len += PRINTF("... more info skipped ...\n"); *eof = 1; return len; } } pci_release_device(dev); /* 2.0 compatibility */ } /* devfn */ *eof = 1; return len; }
static void intel_tlbflush(struct agp_memory *mem) { pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); }
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *mem; struct ath_wiphy *aphy; struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; u16 subsysid; u32 val; int ret = 0; struct ath_hw *ah; char hw_name[64]; if (pci_enable_device(pdev)) return -EIO; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); goto bad; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA consistent " "DMA enable failed\n"); goto bad; } /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); if (csz == 0) { /* * Linux 2.4.18 (at least) writes the cache line size * register as a 16-bit wide register which is wrong. * We must have this setup properly for rx buffer * DMA to work so force a reasonable value here if it * comes up zero. */ csz = L1_CACHE_BYTES / sizeof(u32); pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); } /* * The default setting of latency timer yields poor results, * set it to the value used by other systems. It may be worth * tweaking this setting more. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); pci_set_master(pdev); /* * Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); ret = pci_request_region(pdev, 0, "ath9k"); if (ret) { dev_err(&pdev->dev, "PCI memory region reserve error\n"); ret = -ENODEV; goto bad; } mem = pci_iomap(pdev, 0, 0); if (!mem) { printk(KERN_ERR "PCI memory map error\n") ; ret = -EIO; goto bad1; } hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + sizeof(struct ath_softc), &ath9k_ops); if (!hw) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; goto bad2; } SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); aphy = hw->priv; sc = (struct ath_softc *) (aphy + 1); aphy->sc = sc; aphy->hw = hw; sc->pri_wiphy = aphy; sc->hw = hw; sc->dev = &pdev->dev; sc->mem = mem; pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops); if (ret) { dev_err(&pdev->dev, "failed to initialize device\n"); goto bad3; } /* setup interrupt service routine */ ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); if (ret) { dev_err(&pdev->dev, "request_irq failed\n"); goto bad4; } sc->irq = pdev->irq; ah = sc->sc_ah; ath9k_hw_name(ah, hw_name, sizeof(hw_name)); printk(KERN_INFO "%s: %s mem=0x%lx, irq=%d\n", wiphy_name(hw->wiphy), hw_name, (unsigned long)mem, pdev->irq); return 0; bad4: ath_detach(sc); bad3: ieee80211_free_hw(hw); bad2: pci_iounmap(pdev, mem); bad1: pci_release_region(pdev, 0); bad: pci_disable_device(pdev); return ret; }
/* * Undocumented chipset feature taken from LinuxBIOS. */ static void nvidia_force_hpet_resume(void) { pci_write_config_dword(cached_dev, 0x44, 0xfed00001); printk(KERN_DEBUG "Force enabled HPET at resume\n"); }