static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device, u8 address, u16 data) { const u16 mdio_control = 0x128; const u16 mdio_data = 0x12C; u32 v; int i; v = 0x80; /* Enable Preamble Sequence */ v |= 0x2; /* MDIO Clock Divisor */ pcicore_write32(pc, mdio_control, v); v = (1 << 30); /* Start of Transaction */ v |= (1 << 28); /* Write Transaction */ v |= (1 << 17); /* Turnaround */ v |= (u32)device << 22; v |= (u32)address << 18; v |= data; pcicore_write32(pc, mdio_data, v); /* Wait for the device to complete the transaction */ udelay(10); for (i = 0; i < 10; i++) { v = pcicore_read32(pc, mdio_control); if (v & 0x100 /* Trans complete */) break; msleep(1); } pcicore_write32(pc, mdio_control, 0); }
static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device, u8 address, u16 data) { const u16 mdio_control = 0x128; const u16 mdio_data = 0x12C; u32 v; int i; v = 0x80; v |= 0x2; pcicore_write32(pc, mdio_control, v); v = (1 << 30); v |= (1 << 28); v |= (1 << 17); v |= (u32)device << 22; v |= (u32)address << 18; v |= data; pcicore_write32(pc, mdio_data, v); udelay(10); for (i = 0; i < 10; i++) { v = pcicore_read32(pc, mdio_control); if (v & 0x100 ) break; msleep(1); } pcicore_write32(pc, mdio_control, 0); }
static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address, u32 data) { pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data); }
static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) { u32 val; if (WARN_ON(extpci_core)) return; extpci_core = pc; ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); /* Reset devices on the external PCI bus */ val = SSB_PCICORE_CTL_RST_OE; val |= SSB_PCICORE_CTL_CLK_OE; pcicore_write32(pc, SSB_PCICORE_CTL, val); val |= SSB_PCICORE_CTL_CLK; /* Clock on */ pcicore_write32(pc, SSB_PCICORE_CTL, val); udelay(150); /* Assertion time demanded by the PCI standard */ val |= SSB_PCICORE_CTL_RST; /* Deassert RST# */ pcicore_write32(pc, SSB_PCICORE_CTL, val); val = SSB_PCICORE_ARBCTL_INTERN; pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); udelay(1); /* Assertion time demanded by the PCI standard */ /*TODO cardbus mode */ /* 64MB I/O window */ pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, SSB_PCICORE_SBTOPCI_IO); /* 64MB config space */ pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, SSB_PCICORE_SBTOPCI_CFG0); /* 1GB memory window */ pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA); /* Enable PCI bridge BAR0 prefetch and burst */ val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2); /* Clear error conditions */ val = 0; ssb_extpci_write_config(pc, 0, 0, 0, PCI_STATUS, &val, 2); /* Enable PCI interrupts */ pcicore_write32(pc, SSB_PCICORE_IMASK, SSB_PCICORE_IMASK_INTA); /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ set_io_port_base((unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000)); /* Give some time to the PCI controller to configure itself with the new * values. Not waiting at this point causes crashes of the machine. */ mdelay(10); register_pci_controller(&ssb_pcicore_controller); }
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) { int max_retries = 10; u16 ret = 0; u32 v; int i; /* enable mdio access to SERDES */ v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN; v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v); if (pc->core->id.rev >= 10) { max_retries = 200; bcma_pcie_mdio_set_phy(pc, device); v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); } else { v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD); v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); } v = BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_READ; v |= BCMA_CORE_PCI_MDIODATA_TA; pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); /* Wait for the device to complete the transaction */ udelay(10); for (i = 0; i < max_retries; i++) { v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) { udelay(10); ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA); break; } msleep(1); } pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); return ret; }
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) { const u16 mdio_control = 0x128; const u16 mdio_data = 0x12C; int max_retries = 10; u16 ret = 0; u32 v; int i; v = 0x80; /* Enable Preamble Sequence */ v |= 0x2; /* MDIO Clock Divisor */ pcicore_write32(pc, mdio_control, v); if (pc->core->id.rev >= 10) { max_retries = 200; bcma_pcie_mdio_set_phy(pc, device); } v = (1 << 30); /* Start of Transaction */ v |= (1 << 29); /* Read Transaction */ v |= (1 << 17); /* Turnaround */ if (pc->core->id.rev < 10) v |= (u32)device << 22; v |= (u32)address << 18; pcicore_write32(pc, mdio_data, v); /* Wait for the device to complete the transaction */ udelay(10); for (i = 0; i < max_retries; i++) { v = pcicore_read32(pc, mdio_control); if (v & 0x100 /* Trans complete */) { udelay(10); ret = pcicore_read32(pc, mdio_data); break; } msleep(1); } pcicore_write32(pc, mdio_control, 0); return ret; }
static u32 get_cfgspace_addr(struct ssb_pcicore *pc, unsigned int bus, unsigned int dev, unsigned int func, unsigned int off) { u32 addr = 0; u32 tmp; /* We do only have one cardbus device behind the bridge. */ if (pc->cardbusmode && (dev >= 1)) goto out; if (bus == 0) { /* Type 0 transaction */ if (unlikely(dev >= SSB_PCI_SLOT_MAX)) goto out; /* Slide the window */ tmp = SSB_PCICORE_SBTOPCI_CFG0; tmp |= ((1 << (dev + 16)) & SSB_PCICORE_SBTOPCI1_MASK); pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, tmp); /* Calculate the address */ addr = SSB_PCI_CFG; addr |= ((1 << (dev + 16)) & ~SSB_PCICORE_SBTOPCI1_MASK); addr |= (func << 8); addr |= (off & ~3); } else { /* Type 1 transaction */ pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, SSB_PCICORE_SBTOPCI_CFG1); /* Calculate the address */ addr = SSB_PCI_CFG; addr |= (bus << 16); addr |= (dev << 11); addr |= (func << 8); addr |= (off & ~3); } out: return addr; }
static u32 get_cfgspace_addr(struct ssb_pcicore *pc, unsigned int bus, unsigned int dev, unsigned int func, unsigned int off) { u32 addr = 0; u32 tmp; if (pc->cardbusmode && (dev >= 1)) goto out; if (bus == 0) { if (unlikely(dev >= SSB_PCI_SLOT_MAX)) goto out; tmp = SSB_PCICORE_SBTOPCI_CFG0; tmp |= ((1 << (dev + 16)) & SSB_PCICORE_SBTOPCI1_MASK); pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, tmp); addr = SSB_PCI_CFG; addr |= ((1 << (dev + 16)) & ~SSB_PCICORE_SBTOPCI1_MASK); addr |= (func << 8); addr |= (off & ~3); } else { pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, SSB_PCICORE_SBTOPCI_CFG1); addr = SSB_PCI_CFG; addr |= (bus << 16); addr |= (dev << 11); addr |= (func << 8); addr |= (off & ~3); } out: return addr; }
static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) { u32 val; if (WARN_ON(extpci_core)) return; extpci_core = pc; ssb_dbg("PCIcore in host mode found\n"); /* Reset devices on the external PCI bus */ val = SSB_PCICORE_CTL_RST_OE; val |= SSB_PCICORE_CTL_CLK_OE; pcicore_write32(pc, SSB_PCICORE_CTL, val); val |= SS
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) { const u16 mdio_control = 0x128; const u16 mdio_data = 0x12C; u32 v; int i; v = (1 << 30); /* Start of Transaction */ v |= (1 << 28); /* Write Transaction */ v |= (1 << 17); /* Turnaround */ v |= (0x1F << 18); v |= (phy << 4); pcicore_write32(pc, mdio_data, v); udelay(10); for (i = 0; i < 200; i++) { v = pcicore_read32(pc, mdio_control); if (v & 0x100 /* Trans complete */) break; msleep(1); } }
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) { u32 v; int i; v = BCMA_CORE_PCI_MDIODATA_START; v |= BCMA_CORE_PCI_MDIODATA_WRITE; v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); v |= BCMA_CORE_PCI_MDIODATA_TA; v |= (phy << 4); pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); udelay(10); for (i = 0; i < 200; i++) { v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) break; msleep(1); } }
static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev, unsigned int func, unsigned int off) { u32 addr = 0; /* Issue config commands only when the data link is up (atleast * one external pcie device is present). */ if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG) & BCMA_CORE_PCI_DLLP_LSREG_LINKUP)) goto out; /* Type 0 transaction */ /* Slide the PCI window to the appropriate slot */ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); /* Calculate the address */ addr = pc->host_controller->host_cfg_addr; addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT); addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT); addr |= (off & ~3); out: return addr; }
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address) { pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address); pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR); return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA); }
static u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address) { pcicore_write32(pc, 0x130, address); pcicore_read32(pc, 0x130); return pcicore_read32(pc, 0x134); }
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) { struct bcma_bus *bus = pc->core->bus; struct bcma_drv_pci_host *pc_host; u32 tmp; u32 pci_membase_1G; unsigned long io_map_base; bcma_info(bus, "PCIEcore in host mode found\n"); if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) { bcma_info(bus, "This PCIE core is disabled and not working\n"); return; } pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); if (!pc_host) { bcma_err(bus, "can not allocate memory"); return; } pc->host_controller = pc_host; pc_host->pci_controller.io_resource = &pc_host->io_resource; pc_host->pci_controller.mem_resource = &pc_host->mem_resource; pc_host->pci_controller.pci_ops = &pc_host->pci_ops; pc_host->pdev = pc; pci_membase_1G = BCMA_SOC_PCI_DMA; pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG; pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config; pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config; pc_host->mem_resource.name = "BCMA PCIcore external memory", pc_host->mem_resource.start = BCMA_SOC_PCI_DMA; pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1; pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; pc_host->io_resource.name = "BCMA PCIcore external I/O", pc_host->io_resource.start = 0x100; pc_host->io_resource.end = 0x7FF; pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; /* Reset RC */ usleep_range(3000, 5000); pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE); msleep(50); pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST | BCMA_CORE_PCI_CTL_RST_OE); /* 64 MB I/O access window. On 4716, use * sbtopcie0 to access the device registers. We * can't use address match 2 (1 GB window) region * as mips can't generate 64-bit address on the * backplane. */ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 || bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) { pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + BCMA_SOC_PCI_MEM_SZ - 1; pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM); } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { tmp = BCMA_CORE_PCI_SBTOPCI_MEM; tmp |= BCMA_CORE_PCI_SBTOPCI_PREF; tmp |= BCMA_CORE_PCI_SBTOPCI_BURST; if (pc->core->core_unit == 0) { pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + BCMA_SOC_PCI_MEM_SZ - 1; pc_host->io_resource.start = 0x100; pc_host->io_resource.end = 0x47F; pci_membase_1G = BCMA_SOC_PCIE_DMA_H32; pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, tmp | BCMA_SOC_PCI_MEM); } else if (pc->core->core_unit == 1) { pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM; pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM + BCMA_SOC_PCI_MEM_SZ - 1; pc_host->io_resource.start = 0x480; pc_host->io_resource.end = 0x7FF; pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32; pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG; pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, tmp | BCMA_SOC_PCI1_MEM); } } else pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, BCMA_CORE_PCI_SBTOPCI_IO); /* 64 MB configuration access window */ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0); /* 1 GB memory access window */ pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2, BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G); /* As per PCI Express Base Spec 1.1 we need to wait for * at least 100 ms from the end of a reset (cold/warm/hot) * before issuing configuration requests to PCI Express * devices. */ msleep(100); bcma_core_pci_enable_crs(pc); if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 || bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) { u16 val16; bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL, &val16, sizeof(val16)); val16 |= (2 << 5); /* Max payload size of 512 */ val16 |= (2 << 12); /* MRRS 512 */ bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL, &val16, sizeof(val16)); } /* Enable PCI bridge BAR0 memory & master access */ tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp)); /* Enable PCI interrupts */ pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA); /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start, resource_size(&pc_host->mem_resource)); pc_host->pci_controller.io_map_base = io_map_base; set_io_port_base(pc_host->pci_controller.io_map_base); /* Give some time to the PCI controller to configure itself with the new * values. Not waiting at this point causes crashes of the machine. */ usleep_range(10000, 15000); register_pci_controller(&pc_host->pci_controller); return; }
static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address) { pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address); pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR); return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA); }
int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, struct ssb_device *dev) { struct ssb_device *pdev = pc->dev; struct ssb_bus *bus; int err = 0; u32 tmp; might_sleep(); if (!pdev) goto out; bus = pdev->bus; /* Enable interrupts for this device. */ if (bus->host_pci && ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) { u32 coremask; /* Calculate the "coremask" for the device. */ coremask = (1 << dev->core_index); err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); if (err) goto out; tmp |= coremask << 8; err = pci_write_config_dword(bus->host_pci, SSB_PCI_IRQMASK, tmp); if (err) goto out; } else { u32 intvec; intvec = ssb_read32(pdev, SSB_INTVEC); if ((bus->chip_id & 0xFF00) == 0x4400) { /* Workaround: On the BCM44XX the BPFLAG routing * bit is wrong. Use a hardcoded constant. */ intvec |= 0x00000002; } else { tmp = ssb_read32(dev, SSB_TPSFLAG); tmp &= SSB_TPSFLAG_BPFLAG; intvec |= tmp; } ssb_write32(pdev, SSB_INTVEC, intvec); } /* Setup PCIcore operation. */ if (pc->setup_done) goto out; if (pdev->id.coreid == SSB_DEV_PCI) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_PREF; tmp |= SSB_PCICORE_SBTOPCI_BURST; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); if (pdev->id.revision < 5) { tmp = ssb_read32(pdev, SSB_IMCFGLO); tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 2; tmp &= ~SSB_IMCFGLO_REQTO; tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT; ssb_write32(pdev, SSB_IMCFGLO, tmp); ssb_commit_settings(bus); } else if (pdev->id.revision >= 11) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_MRM; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); } } else { WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); //TODO: Better make defines for all these magic PCIE values. if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) { /* TLP Workaround register. */ tmp = ssb_pcie_read(pc, 0x4); tmp |= 0x8; ssb_pcie_write(pc, 0x4, tmp); } if (pdev->id.revision == 0) { const u8 serdes_rx_device = 0x1F; ssb_pcie_mdio_write(pc, serdes_rx_device, 2 /* Timer */, 0x8128); ssb_pcie_mdio_write(pc, serdes_rx_device, 6 /* CDR */, 0x0100); ssb_pcie_mdio_write(pc, serdes_rx_device, 7 /* CDR BW */, 0x1466); } else if (pdev->id.revision == 1) { /* DLLP Link Control register. */ tmp = ssb_pcie_read(pc, 0x100); tmp |= 0x40; ssb_pcie_write(pc, 0x100, tmp); } } pc->setup_done = 1; out: return err; }
static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev, unsigned int func, unsigned int off, const void *buf, int len) { int err = -EINVAL; u32 addr, val; void __iomem *mmio = 0; u16 chipid = pc->core->bus->chipinfo.id; WARN_ON(!pc->hostmode); if (unlikely(len != 1 && len != 2 && len != 4)) goto out; if (dev == 0) { /* we support only two functions on device 0 */ if (func > 1) goto out; /* accesses to config registers with offsets >= 256 * requires indirect access. */ if (off >= PCI_CONFIG_SPACE_SIZE) { addr = (func << 12); addr |= (off & 0x0FFC); val = bcma_pcie_read_config(pc, addr); } else { addr = BCMA_CORE_PCI_PCICFG0; addr |= (func << 8); addr |= (off & 0xFC); val = pcicore_read32(pc, addr); } } else { addr = bcma_get_cfgspace_addr(pc, dev, func, off); if (unlikely(!addr)) goto out; err = -ENOMEM; mmio = ioremap_nocache(addr, sizeof(val)); if (!mmio) goto out; if (mips_busprobe32(val, mmio)) { val = 0xFFFFFFFF; goto unmap; } } switch (len) { case 1: val &= ~(0xFF << (8 * (off & 3))); val |= *((const u8 *)buf) << (8 * (off & 3)); break; case 2: val &= ~(0xFFFF << (8 * (off & 3))); val |= *((const u16 *)buf) << (8 * (off & 3)); break; case 4: val = *((const u32 *)buf); break; } if (dev == 0) { /* accesses to config registers with offsets >= 256 * requires indirect access. */ if (off >= PCI_CONFIG_SPACE_SIZE) bcma_pcie_write_config(pc, addr, val); else pcicore_write32(pc, addr, val); } else { writel(val, mmio); if (chipid == BCMA_CHIP_ID_BCM4716 || chipid == BCMA_CHIP_ID_BCM4748) readl(mmio); } err = 0; unmap: if (mmio) iounmap(mmio); out: return err; }
static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) { u32 val; if (WARN_ON(extpci_core)) return; extpci_core = pc; ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); val = SSB_PCICORE_CTL_RST_OE; val |= SSB_PCICORE_CTL_CLK_OE; pcicore_write32(pc, SSB_PCICORE_CTL, val); val |= SSB_PCICORE_CTL_CLK; pcicore_write32(pc, SSB_PCICORE_CTL, val); udelay(150); val |= SSB_PCICORE_CTL_RST; pcicore_write32(pc, SSB_PCICORE_CTL, val); val = SSB_PCICORE_ARBCTL_INTERN; pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); udelay(1); if (pc->dev->bus->has_cardbus_slot) { ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n"); pc->cardbusmode = 1; ssb_gpio_out(pc->dev->bus, 1, 1); ssb_gpio_outen(pc->dev->bus, 1, 1); pcicore_write16(pc, SSB_PCICORE_SPROM(0), pcicore_read16(pc, SSB_PCICORE_SPROM(0)) | 0x0400); } pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, SSB_PCICORE_SBTOPCI_IO); pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, SSB_PCICORE_SBTOPCI_CFG0); pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA); val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2); val = 0; ssb_extpci_write_config(pc, 0, 0, 0, PCI_STATUS, &val, 2); pcicore_write32(pc, SSB_PCICORE_IMASK, SSB_PCICORE_IMASK_INTA); ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000); set_io_port_base(ssb_pcicore_controller.io_map_base); mdelay(10); register_pci_controller(&ssb_pcicore_controller); }
static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data) { pcicore_write32(pc, 0x130, address); pcicore_write32(pc, 0x134, data); }
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) { pcicore_write32(pc, 0x130, address); return pcicore_read32(pc, 0x134); }
static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data) { pcicore_write32(pc, 0x130, address); pcicore_read32(pc, 0x130); pcicore_write32(pc, 0x134, data); }
int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, struct ssb_device *dev) { struct ssb_device *pdev = pc->dev; struct ssb_bus *bus; int err = 0; u32 tmp; if (dev->bus->bustype != SSB_BUSTYPE_PCI) { goto out; } if (!pdev) goto out; bus = pdev->bus; might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); if (bus->host_pci && ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) { u32 coremask; coremask = (1 << dev->core_index); err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); if (err) goto out; tmp |= coremask << 8; err = pci_write_config_dword(bus->host_pci, SSB_PCI_IRQMASK, tmp); if (err) goto out; } else { u32 intvec; intvec = ssb_read32(pdev, SSB_INTVEC); tmp = ssb_read32(dev, SSB_TPSFLAG); tmp &= SSB_TPSFLAG_BPFLAG; intvec |= (1 << tmp); ssb_write32(pdev, SSB_INTVEC, intvec); } if (pc->setup_done) goto out; if (pdev->id.coreid == SSB_DEV_PCI) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_PREF; tmp |= SSB_PCICORE_SBTOPCI_BURST; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); if (pdev->id.revision < 5) { tmp = ssb_read32(pdev, SSB_IMCFGLO); tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 2; tmp &= ~SSB_IMCFGLO_REQTO; tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT; ssb_write32(pdev, SSB_IMCFGLO, tmp); ssb_commit_settings(bus); } else if (pdev->id.revision >= 11) { tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); tmp |= SSB_PCICORE_SBTOPCI_MRM; pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); } } else { WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) { tmp = ssb_pcie_read(pc, 0x4); tmp |= 0x8; ssb_pcie_write(pc, 0x4, tmp); } if (pdev->id.revision == 0) { const u8 serdes_rx_device = 0x1F; ssb_pcie_mdio_write(pc, serdes_rx_device, 2 , 0x8128); ssb_pcie_mdio_write(pc, serdes_rx_device, 6 , 0x0100); ssb_pcie_mdio_write(pc, serdes_rx_device, 7 , 0x1466); } else if (pdev->id.revision == 1) { tmp = ssb_pcie_read(pc, 0x100); tmp |= 0x40; ssb_pcie_write(pc, 0x100, tmp); } } pc->setup_done = 1; out: return err; }