static inline void setup_transfer(struct floppy_state *fs) { int n; struct swim3 __iomem *sw = fs->swim3; struct dbdma_cmd *cp = fs->dma_cmd; struct dbdma_regs __iomem *dr = fs->dma; if (fd_req->current_nr_sectors <= 0) { printk(KERN_ERR "swim3: transfer 0 sectors?\n"); return; } if (rq_data_dir(fd_req) == WRITE) n = 1; else { n = fs->secpertrack - fs->req_sector + 1; if (n > fd_req->current_nr_sectors) n = fd_req->current_nr_sectors; } fs->scount = n; swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); out_8(&sw->sector, fs->req_sector); out_8(&sw->nsect, n); out_8(&sw->gap3, 0); out_le32(&dr->cmdptr, virt_to_bus(cp)); if (rq_data_dir(fd_req) == WRITE) { /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); in_8(&sw->error); out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); if (rq_data_dir(fd_req) == WRITE) out_8(&sw->control_bis, WRITE_SECTORS); in_8(&sw->intr); out_le32(&dr->control, (RUN << 16) | RUN); /* enable intr when transfer complete */ out_8(&sw->intr_enable, TRANSFER_DONE); out_8(&sw->control_bis, DO_ACTION); set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */ }
/* * setup Gbit PHYs */ int last_stage_init(void) { ihs_fpga_t *fpga = (ihs_fpga_t *) CONFIG_SYS_FPGA_BASE(0); unsigned int k; miiphy_register(CONFIG_SYS_GBIT_MII_BUSNAME, bb_miiphy_read, bb_miiphy_write); for (k = 0; k < 32; ++k) configure_gbit_phy(k); /* take fpga serdes blocks out of reset */ out_le16(&fpga->quad_serdes_reset, 0); return 0; }
static int direct_pci_write_config_word( unsigned char bus, unsigned char slot, unsigned char function, unsigned char offset, uint16_t val ) { if (offset&1) return PCIBIOS_BAD_REGISTER_NUMBER; if (bus != 0 || (1<<slot & 0xff8007fe)) return PCIBIOS_DEVICE_NOT_FOUND; out_le16((volatile unsigned short *) (pci.pci_config_data + ((1<<slot)&~1) + (function<<8) + offset), val); return PCIBIOS_SUCCESSFUL; }
static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); struct ppc4xx_pciex_port *port = &ppc4xx_pciex_ports[hose->indirect_type]; void __iomem *addr; u32 gpl_cfg; if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0) return PCIBIOS_DEVICE_NOT_FOUND; addr = ppc4xx_pciex_get_config_base(port, bus, devfn); /* * Reading from configuration space of non-existing device can * generate transaction errors. For the read duration we suppress * assertion of machine check exceptions to avoid those. */ gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG); dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA); pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x" " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n", bus->number, hose->first_busno, hose->last_busno, devfn, offset, len, addr + offset, val); switch (len) { case 1: out_8((u8 *)(addr + offset), val); break; case 2: out_le16((u16 *)(addr + offset), val); break; default: out_le32((u32 *)(addr + offset), val); break; } dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg); return PCIBIOS_SUCCESSFUL; }
int fpga_set_reg(u32 fpga, u16 *reg, off_t regoff, u16 data) { int res; switch (fpga) { case 0: out_le16(reg, data); break; default: res = mclink_send(fpga - 1, regoff, data); if (res < 0) { printf("mclink_send reg %02lx data %04x returned %d\n", regoff, data, res); return res; } break; } return 0; }
static int mpc83xx_indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = bus->sysdata; volatile unsigned char *cfg_data; u8 cfg_type = 0; u8 bus_num; if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == hose->first_busno) bus_num = 0; else bus_num = bus->number; PCI_CFG_OUT(hose->cfg_addr, (0x80000000 | (bus_num << 16) | (devfn << 8) | ((offset & 0xfc) | cfg_type))); /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + (offset & 3); switch (len) { case 1: out_8((u8 *)cfg_data, val); break; case 2: out_le16((u16 *)cfg_data, val); break; default: out_le32((u32 *)cfg_data, val); break; } return PCIBIOS_SUCCESSFUL; }
static int mv_ata_exec_ata_cmd_nondma(int port, struct sata_fis_h2d *cfis, u8 *buffer, u32 len, u32 iswrite) { struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv; int i; u16 *tp; debug("%s\n", __func__); out_le32(priv->regbase + PIO_SECTOR_COUNT, cfis->sector_count); out_le32(priv->regbase + PIO_LBA_HI, cfis->lba_high); out_le32(priv->regbase + PIO_LBA_MID, cfis->lba_mid); out_le32(priv->regbase + PIO_LBA_LOW, cfis->lba_low); out_le32(priv->regbase + PIO_ERR_FEATURES, cfis->features); out_le32(priv->regbase + PIO_DEVICE, cfis->device); out_le32(priv->regbase + PIO_CMD_STATUS, cfis->command); if (ata_wait_register((u32 *)(priv->regbase + PIO_CMD_STATUS), ATA_BUSY, 0x0, 10000)) { debug("Failed to wait for completion\n"); return -1; } if (len > 0) { tp = (u16 *)buffer; for (i = 0; i < len / 2; i++) { if (iswrite) out_le16(priv->regbase + PIO_DATA, *tp++); else *tp++ = in_le16(priv->regbase + PIO_DATA); } } return len; }
int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfunc, int offset, int len, u32 val) { volatile unsigned char *cfg_addr; struct pci_controller *hose = pci_bus_to_host(bus); if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(hose, bus->number, devfunc)) return PCIBIOS_DEVICE_NOT_FOUND; cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number, devfunc, offset) | (offset & 0x03)); #ifdef DEBUG printk("PCI CFG write : "); printk("%d:0x%x:0x%x ", bus->number, devfunc, offset); printk("%d ADDR=0x%08x ", len, (uint) cfg_addr); printk("data = 0x%08x\n", val); #endif switch (len) { case 1: out_8((u8 *) cfg_addr, val); break; case 2: out_le16((u16 *) cfg_addr, val); break; default: out_le32((u32 *) cfg_addr, val); break; } return PCIBIOS_SUCCESSFUL; }
static int pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = bus->sysdata; if (PCI_SLOT(devfn) != 1) return PCIBIOS_DEVICE_NOT_FOUND; offset += devfn << 12; switch (len) { case 1: out_8(hose->cfg_data + offset, val); break; case 2: out_le16(hose->cfg_data + offset, val); break; default: out_le32(hose->cfg_data + offset, val); break; } return PCIBIOS_SUCCESSFUL; }
int fpga_set_reg(u32 fpga, u16 *reg, off_t regoff, u16 data) { out_le16(reg, data); return 0; }
static int indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); volatile void __iomem *cfg_data; u8 cfg_type = 0; u32 bus_no, reg; if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) { if (bus->number != hose->first_busno) return PCIBIOS_DEVICE_NOT_FOUND; if (devfn != 0) return PCIBIOS_DEVICE_NOT_FOUND; } if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE) if (bus->number != hose->first_busno) cfg_type = 1; bus_no = (bus->number == hose->first_busno) ? hose->self_busno : bus->number; if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG) reg = ((offset & 0xf00) << 16) | (offset & 0xfc); else reg = offset & 0xfc; if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN) out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | (devfn << 8) | reg | cfg_type)); else out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | (devfn << 8) | reg | cfg_type)); /* surpress setting of PCI_PRIMARY_BUS */ if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) if ((offset == PCI_PRIMARY_BUS) && (bus->number == hose->first_busno)) val &= 0xffffff00; /* Workaround for PCI_28 Errata in 440EPx/GRx */ if ((hose->indirect_type & PPC_INDIRECT_TYPE_BROKEN_MRM) && offset == PCI_CACHE_LINE_SIZE) { val = 0; } /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + (offset & 3); switch (len) { case 1: out_8(cfg_data, val); break; case 2: out_le16(cfg_data, val); break; default: out_le32(cfg_data, val); break; } return PCIBIOS_SUCCESSFUL; }
/* * provide access to fpga gpios (for I2C bitbang) */ void fpga_gpio_set(int pin) { out_le16((void *)(CONFIG_SYS_FPGA0_BASE + 0x18), pin); }
/* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ static int pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) { struct dbdma_cmd *table, *tstart; int count = 0; struct request *rq = HWGROUP(drive)->rq; struct buffer_head *bh = rq->bh; unsigned int size, addr; volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs; table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(pmac_ide[ix].dma_table); out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); while (in_le32(&dma->status) & RUN) udelay(1); do { /* * Determine addr and size of next buffer area. We assume that * individual virtual buffers are always composed linearly in * physical memory. For example, we assume that any 8kB buffer * is always composed of two adjacent physical 4kB pages rather * than two possibly non-adjacent physical 4kB pages. */ if (bh == NULL) { /* paging requests have (rq->bh == NULL) */ addr = virt_to_bus(rq->buffer); size = rq->nr_sectors << 9; } else { /* group sequential buffers into one large buffer */ addr = virt_to_bus(bh->b_data); size = bh->b_size; while ((bh = bh->b_reqnext) != NULL) { if ((addr + size) != virt_to_bus(bh->b_data)) break; size += bh->b_size; } } /* * Fill in the next DBDMA command block. * Note that one DBDMA command can transfer * at most 65535 bytes. */ while (size) { unsigned int tc = (size < 0xfe00)? size: 0xfe00; if (++count >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); return 0; /* revert to PIO for this request */ } st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE); st_le16(&table->req_count, tc); st_le32(&table->phy_addr, addr); table->cmd_dep = 0; table->xfer_status = 0; table->res_count = 0; addr += tc; size -= tc; ++table; } } while (bh != NULL); /* convert the last command to an input/output last command */ if (count) st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST); else printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); /* add the stop command to the end of the list */ memset(table, 0, sizeof(struct dbdma_cmd)); out_le16(&table->command, DBDMA_STOP); out_le32(&dma->cmdptr, virt_to_bus(tstart)); return 1; }
void ppc440spe_setup_pcie(struct pci_controller *hose, int port) { void __iomem *mbase; /* * Map 16MB, which is enough for 4 bits of bus # */ hose->cfg_data = ioremap64(0xc40000000ull + port * 0x40000000, 1 << 24); hose->ops = &pcie_pci_ops; /* * Set bus numbers on our root port */ mbase = ioremap64(0xc50000000ull + port * 0x40000000, 4096); out_8(mbase + PCI_PRIMARY_BUS, 0); out_8(mbase + PCI_SECONDARY_BUS, 0); /* * Set up outbound translation to hose->mem_space from PLB * addresses at an offset of 0xd_0000_0000. We set the low * bits of the mask to 11 to turn off splitting into 8 * subregions and to enable the outbound translation. */ out_le32(mbase + PECFG_POM0LAH, 0); out_le32(mbase + PECFG_POM0LAL, hose->mem_space.start); switch (port) { case 0: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0), hose->mem_space.start); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0), ~(hose->mem_space.end - hose->mem_space.start) | 3); break; case 1: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1), hose->mem_space.start); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1), ~(hose->mem_space.end - hose->mem_space.start) | 3); break; case 2: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2), hose->mem_space.start); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2), ~(hose->mem_space.end - hose->mem_space.start) | 3); break; } /* Set up 16GB inbound memory window at 0 */ out_le32(mbase + PCI_BASE_ADDRESS_0, 0); out_le32(mbase + PCI_BASE_ADDRESS_1, 0); out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc); out_le32(mbase + PECFG_BAR0LMPA, 0); out_le32(mbase + PECFG_PIM0LAL, 0); out_le32(mbase + PECFG_PIM0LAH, 0); out_le32(mbase + PECFG_PIMEN, 0x1); /* Enable I/O, Mem, and Busmaster cycles */ out_le16(mbase + PCI_COMMAND, in_le16(mbase + PCI_COMMAND) | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); iounmap(mbase); }
void fpga_gpio_clear(int pin) { out_le16((void *)(CONFIG_SYS_FPGA0_BASE + 0x16), pin); }
int last_stage_init(void) { unsigned int k; unsigned int fpga; struct ihs_fpga *fpga0 = (struct ihs_fpga *) CONFIG_SYS_FPGA_BASE(0); struct ihs_fpga *fpga1 = (struct ihs_fpga *) CONFIG_SYS_FPGA_BASE(1); int failed = 0; char str_phys[] = "Setup PHYs -"; char str_serdes[] = "Start SERDES blocks"; char str_channels[] = "Start FPGA channels"; char str_locks[] = "Verify SERDES locks"; char str_hicb[] = "Verify HICB status"; char str_status[] = "Verify PHY status -"; char slash[] = "\\|/-\\|/-"; print_fpga_info(0); print_fpga_info(1); /* setup Gbit PHYs */ puts("TRANS: "); puts(str_phys); miiphy_register(CONFIG_SYS_GBIT_MII_BUSNAME, bb_miiphy_read, bb_miiphy_write); for (k = 0; k < 32; ++k) { configure_gbit_phy(CONFIG_SYS_GBIT_MII_BUSNAME, k); putc('\b'); putc(slash[k % 8]); } miiphy_register(CONFIG_SYS_GBIT_MII1_BUSNAME, bb_miiphy_read, bb_miiphy_write); for (k = 0; k < 32; ++k) { configure_gbit_phy(CONFIG_SYS_GBIT_MII1_BUSNAME, k); putc('\b'); putc(slash[k % 8]); } blank_string(strlen(str_phys)); /* take fpga serdes blocks out of reset */ puts(str_serdes); udelay(500000); out_le16(&fpga0->quad_serdes_reset, 0); out_le16(&fpga1->quad_serdes_reset, 0); blank_string(strlen(str_serdes)); /* take channels out of reset */ puts(str_channels); udelay(500000); for (fpga = 0; fpga < 2; ++fpga) { u16 *ch0_config_int = &(fpga ? fpga1 : fpga0)->ch0_config_int; for (k = 0; k < 32; ++k) out_le16(ch0_config_int + 4 * k, 0); } blank_string(strlen(str_channels)); /* verify channels serdes lock */ puts(str_locks); udelay(500000); for (fpga = 0; fpga < 2; ++fpga) { u16 *ch0_status_int = &(fpga ? fpga1 : fpga0)->ch0_status_int; for (k = 0; k < 32; ++k) { u16 status = in_le16(ch0_status_int + 4*k); if (!(status & (1 << 4))) { failed = 1; printf("fpga %d channel %d: no serdes lock\n", fpga, k); } /* reset events */ out_le16(ch0_status_int + 4*k, status); } } blank_string(strlen(str_locks)); /* verify hicb_status */ puts(str_hicb); for (fpga = 0; fpga < 2; ++fpga) { u16 *ch0_hicb_status_int = &(fpga ? fpga1 : fpga0)->ch0_hicb_status_int; for (k = 0; k < 32; ++k) { u16 status = in_le16(ch0_hicb_status_int + 4*k); if (status) printf("fpga %d hicb %d: hicb status %04x\n", fpga, k, status); /* reset events */ out_le16(ch0_hicb_status_int + 4*k, status); } } blank_string(strlen(str_hicb)); /* verify phy status */ puts(str_status); for (k = 0; k < 32; ++k) { if (verify_gbit_phy(CONFIG_SYS_GBIT_MII_BUSNAME, k)) { printf("verify baseboard phy %d failed\n", k); failed = 1; } putc('\b'); putc(slash[k % 8]); } for (k = 0; k < 32; ++k) { if (verify_gbit_phy(CONFIG_SYS_GBIT_MII1_BUSNAME, k)) { printf("verify extensionboard phy %d failed\n", k); failed = 1; } putc('\b'); putc(slash[k % 8]); } blank_string(strlen(str_status)); printf("Starting 64 channels %s\n", failed ? "failed" : "ok"); return 0; }
static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) { struct resource dma_window; struct pci_controller *hose = NULL; const int *bus_range; int primary = 0, busses; void __iomem *mbase = NULL, *cfg_data = NULL; const u32 *pval; u32 val; /* Check if primary bridge */ if (of_get_property(port->node, "primary", NULL)) primary = 1; /* Get bus range if any */ bus_range = of_get_property(port->node, "bus-range", NULL); /* Allocate the host controller data structure */ hose = pcibios_alloc_controller(port->node); if (!hose) goto fail; /* We stick the port number in "indirect_type" so the config space * ops can retrieve the port data structure easily */ hose->indirect_type = port->index; /* Get bus range */ hose->first_busno = bus_range ? bus_range[0] : 0x0; hose->last_busno = bus_range ? bus_range[1] : 0xff; /* Because of how big mapping the config space is (1M per bus), we * limit how many busses we support. In the long run, we could replace * that with something akin to kmap_atomic instead. We set aside 1 bus * for the host itself too. */ busses = hose->last_busno - hose->first_busno; /* This is off by 1 */ if (busses > MAX_PCIE_BUS_MAPPED) { busses = MAX_PCIE_BUS_MAPPED; hose->last_busno = hose->first_busno + busses; } if (!port->endpoint) { /* Only map the external config space in cfg_data for * PCIe root-complexes. External space is 1M per bus */ cfg_data = ioremap(port->cfg_space.start + (hose->first_busno + 1) * 0x100000, busses * 0x100000); if (cfg_data == NULL) { printk(KERN_ERR "%s: Can't map external config space !", port->node->full_name); goto fail; } hose->cfg_data = cfg_data; } /* Always map the host config space in cfg_addr. * Internal space is 4K */ mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); if (mbase == NULL) { printk(KERN_ERR "%s: Can't map internal config space !", port->node->full_name); goto fail; } hose->cfg_addr = mbase; pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, hose->first_busno, hose->last_busno); pr_debug(" config space mapped at: root @0x%p, other @0x%p\n", hose->cfg_addr, hose->cfg_data); /* Setup config space */ hose->ops = &ppc4xx_pciex_pci_ops; port->hose = hose; mbase = (void __iomem *)hose->cfg_addr; if (!port->endpoint) { /* * Set bus numbers on our root port */ out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); } /* * OMRs are already reset, also disable PIMs */ out_le32(mbase + PECFG_PIMEN, 0); /* Parse outbound mapping resources */ pci_process_bridge_OF_ranges(hose, port->node, primary); /* Parse inbound mapping resources */ if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0) goto fail; /* Configure outbound ranges POMs */ ppc4xx_configure_pciex_POMs(port, hose, mbase); /* Configure inbound ranges PIMs */ ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); /* The root complex doesn't show up if we don't set some vendor * and device IDs into it. The defaults below are the same bogus * one that the initial code in arch/ppc had. This can be * overwritten by setting the "vendor-id/device-id" properties * in the pciex node. */ /* Get the (optional) vendor-/device-id from the device-tree */ pval = of_get_property(port->node, "vendor-id", NULL); if (pval) { val = *pval; } else { if (!port->endpoint) val = 0xaaa0 + port->index; else val = 0xeee0 + port->index; } out_le16(mbase + 0x200, val); pval = of_get_property(port->node, "device-id", NULL); if (pval) { val = *pval; } else { if (!port->endpoint) val = 0xbed0 + port->index; else val = 0xfed0 + port->index; } out_le16(mbase + 0x202, val); if (!port->endpoint) { /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ out_le32(mbase + 0x208, 0x06040001); printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", port->index); } else { /* Set Class Code to Processor/PPC */ out_le32(mbase + 0x208, 0x0b200001); printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", port->index); } return; fail: if (hose) pcibios_free_controller(hose); if (cfg_data) iounmap(cfg_data); if (mbase) iounmap(mbase); }
void ppc440spe_setup_pcie_rootpoint(struct pci_controller *hose, int port) { volatile void *mbase = NULL; volatile void *rmbase = NULL; pci_set_ops(hose, pcie_read_config_byte, pcie_read_config_word, pcie_read_config_dword, pcie_write_config_byte, pcie_write_config_word, pcie_write_config_dword); switch (port) { case 0: mbase = (u32 *)CFG_PCIE0_XCFGBASE; rmbase = (u32 *)CFG_PCIE0_CFGBASE; hose->cfg_data = (u8 *)CFG_PCIE0_CFGBASE; break; case 1: mbase = (u32 *)CFG_PCIE1_XCFGBASE; rmbase = (u32 *)CFG_PCIE1_CFGBASE; hose->cfg_data = (u8 *)CFG_PCIE1_CFGBASE; break; case 2: mbase = (u32 *)CFG_PCIE2_XCFGBASE; rmbase = (u32 *)CFG_PCIE2_CFGBASE; hose->cfg_data = (u8 *)CFG_PCIE2_CFGBASE; break; } /* * Set bus numbers on our root port */ out_8((u8 *)mbase + PCI_PRIMARY_BUS, 0); out_8((u8 *)mbase + PCI_SECONDARY_BUS, 1); out_8((u8 *)mbase + PCI_SUBORDINATE_BUS, 1); /* * Set up outbound translation to hose->mem_space from PLB * addresses at an offset of 0xd_0000_0000. We set the low * bits of the mask to 11 to turn off splitting into 8 * subregions and to enable the outbound translation. */ out_le32(mbase + PECFG_POM0LAH, 0x00000000); out_le32(mbase + PECFG_POM0LAL, 0x00000000); switch (port) { case 0: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0), CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; case 1: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1), (CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE)); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; case 2: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2), (CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE)); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; } /* Set up 16GB inbound memory window at 0 */ out_le32(mbase + PCI_BASE_ADDRESS_0, 0); out_le32(mbase + PCI_BASE_ADDRESS_1, 0); out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc); out_le32(mbase + PECFG_BAR0LMPA, 0); out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); out_le32(mbase + PECFG_PIM01SAL, 0x00000000); out_le32(mbase + PECFG_PIM0LAL, 0); out_le32(mbase + PECFG_PIM0LAH, 0); out_le32(mbase + PECFG_PIM1LAL, 0x00000000); out_le32(mbase + PECFG_PIM1LAH, 0x00000004); out_le32(mbase + PECFG_PIMEN, 0x1); /* Enable I/O, Mem, and Busmaster cycles */ out_le16((u16 *)(mbase + PCI_COMMAND), in_le16((u16 *)(mbase + PCI_COMMAND)) | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); printf("PCIE:%d successfully set as rootpoint\n",port); }
int ppc440spe_setup_pcie_endpoint(struct pci_controller *hose, int port) { volatile void *mbase = NULL; int attempts = 0; pci_set_ops(hose, pcie_read_config_byte, pcie_read_config_word, pcie_read_config_dword, pcie_write_config_byte, pcie_write_config_word, pcie_write_config_dword); switch (port) { case 0: mbase = (u32 *)CFG_PCIE0_XCFGBASE; hose->cfg_data = (u8 *)CFG_PCIE0_CFGBASE; break; case 1: mbase = (u32 *)CFG_PCIE1_XCFGBASE; hose->cfg_data = (u8 *)CFG_PCIE1_CFGBASE; break; case 2: mbase = (u32 *)CFG_PCIE2_XCFGBASE; hose->cfg_data = (u8 *)CFG_PCIE2_CFGBASE; break; } /* * Set up outbound translation to hose->mem_space from PLB * addresses at an offset of 0xd_0000_0000. We set the low * bits of the mask to 11 to turn off splitting into 8 * subregions and to enable the outbound translation. */ out_le32(mbase + PECFG_POM0LAH, 0x00001ff8); out_le32(mbase + PECFG_POM0LAL, 0x00001000); switch (port) { case 0: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0), CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; case 1: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1), (CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE)); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; case 2: mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2), 0x0000000d); mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2), (CFG_PCIE_MEMBASE + port * CFG_PCIE_MEMSIZE)); mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff); mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2), ~(CFG_PCIE_MEMSIZE - 1) | 3); break; } /* Set up 16GB inbound memory window at 0 */ out_le32(mbase + PCI_BASE_ADDRESS_0, 0); out_le32(mbase + PCI_BASE_ADDRESS_1, 0); out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc); out_le32(mbase + PECFG_BAR0LMPA, 0); out_le32(mbase + PECFG_PIM0LAL, 0x00000000); out_le32(mbase + PECFG_PIM0LAH, 0x00000004); /* pointing to SRAM */ out_le32(mbase + PECFG_PIMEN, 0x1); /* Enable I/O, Mem, and Busmaster cycles */ out_le16((u16 *)(mbase + PCI_COMMAND), in_le16((u16 *)(mbase + PCI_COMMAND)) | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); out_le16(mbase + 0x200,0xcaad); /* Setting vendor ID */ out_le16(mbase + 0x202,0xfeed); /* Setting device ID */ attempts = 10; switch (port) { case 0: while (!(SDR_READ(PESDR0_RCSSTS) & (1 << 8))) { if (!(attempts--)) { printf("PCIE0: BMEN is not active\n"); return -1; } mdelay(1000); } break; case 1: while (!(SDR_READ(PESDR1_RCSSTS) & (1 << 8))) { if (!(attempts--)) { printf("PCIE1: BMEN is not active\n"); return -1; } mdelay(1000); } break; case 2: while (!(SDR_READ(PESDR2_RCSSTS) & (1 << 8))) { if (!(attempts--)) { printf("PCIE2: BMEN is not active\n"); return -1; } mdelay(1000); } break; } printf("PCIE:%d successfully set as endpoint\n",port); return 0; }
static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, struct pci_controller *hose, void __iomem *mbase, struct resource *res) { resource_size_t size = res->end - res->start + 1; u64 sa; if (port->endpoint) { resource_size_t ep_addr = 0; resource_size_t ep_size = 32 << 20; /* Currently we map a fixed 64MByte window to PLB address * 0 (SDRAM). This should probably be configurable via a dts * property. */ /* Calculate window size */ sa = (0xffffffffffffffffull << ilog2(ep_size)); /* Setup BAR0 */ out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | PCI_BASE_ADDRESS_MEM_TYPE_64); /* Disable BAR1 & BAR2 */ out_le32(mbase + PECFG_BAR1MPA, 0); out_le32(mbase + PECFG_BAR2HMPA, 0); out_le32(mbase + PECFG_BAR2LMPA, 0); out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); } else { /* Calculate window size */ sa = (0xffffffffffffffffull << ilog2(size)); if (res->flags & IORESOURCE_PREFETCH) sa |= 0x8; out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); /* The setup of the split looks weird to me ... let's see * if it works */ out_le32(mbase + PECFG_PIM0LAL, 0x00000000); out_le32(mbase + PECFG_PIM0LAH, 0x00000000); out_le32(mbase + PECFG_PIM1LAL, 0x00000000); out_le32(mbase + PECFG_PIM1LAH, 0x00000000); out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); out_le32(mbase + PECFG_PIM01SAL, 0x00000000); out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); } /* Enable inbound mapping */ out_le32(mbase + PECFG_PIMEN, 0x1); /* Enable I/O, Mem, and Busmaster cycles */ out_le16(mbase + PCI_COMMAND, in_le16(mbase + PCI_COMMAND) | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); }
int board_early_init_r(void) { unsigned k; unsigned ctr; for (k = 0; k < CONFIG_SYS_FPGA_COUNT; ++k) gd->arch.fpga_state[k] = 0; /* * reset FPGA */ gd405ex_init(); gd405ex_set_fpga_reset(1); gd405ex_setup_hw(); for (k = 0; k < CONFIG_SYS_FPGA_COUNT; ++k) { ctr = 0; while (!gd405ex_get_fpga_done(k)) { udelay(100000); if (ctr++ > 5) { gd->arch.fpga_state[k] |= FPGA_STATE_DONE_FAILED; break; } } } udelay(10); gd405ex_set_fpga_reset(0); for (k = 0; k < CONFIG_SYS_FPGA_COUNT; ++k) { struct ihs_fpga *fpga = (struct ihs_fpga *)CONFIG_SYS_FPGA_BASE(k); #ifdef CONFIG_SYS_FPGA_NO_RFL_HI u16 *reflection_target = &fpga->reflection_low; #else u16 *reflection_target = &fpga->reflection_high; #endif /* * wait for fpga out of reset */ ctr = 0; while (1) { out_le16(&fpga->reflection_low, REFLECTION_TESTPATTERN); if (in_le16(reflection_target) == REFLECTION_TESTPATTERN_INV) break; udelay(100000); if (ctr++ > 5) { gd->arch.fpga_state[k] |= FPGA_STATE_REFLECTION_FAILED; break; } } } return 0; }
static void ace_out_le16(struct ace_device *ace, int reg, u16 val) { out_le16(ace->baseaddr + reg, val); }
int board_early_init_f(void) { /* * Setup the interrupt controller polarities, triggers, etc. */ mtdcr(UIC0SR, 0xffffffff); /* clear all */ mtdcr(UIC0ER, 0x00000000); /* disable all */ mtdcr(UIC0CR, 0x00000005); /* ATI & UIC1 crit are critical */ mtdcr(UIC0PR, 0xffffffff); /* per ref-board manual */ mtdcr(UIC0TR, 0x00000000); /* per ref-board manual */ mtdcr(UIC0VR, 0x00000000); /* int31 highest, base=0x000 */ mtdcr(UIC0SR, 0xffffffff); /* clear all */ mtdcr(UIC1SR, 0xffffffff); /* clear all */ mtdcr(UIC1ER, 0x00000000); /* disable all */ mtdcr(UIC1CR, 0x00000000); /* all non-critical */ mtdcr(UIC1PR, 0xffffffff); /* per ref-board manual */ mtdcr(UIC1TR, 0x00000000); /* per ref-board manual */ mtdcr(UIC1VR, 0x00000000); /* int31 highest, base=0x000 */ mtdcr(UIC1SR, 0xffffffff); /* clear all */ mtdcr(UIC2SR, 0xffffffff); /* clear all */ mtdcr(UIC2ER, 0x00000000); /* disable all */ mtdcr(UIC2CR, 0x00000000); /* all non-critical */ mtdcr(UIC2PR, 0xffffffff); /* per ref-board manual */ mtdcr(UIC2TR, 0x00000000); /* per ref-board manual */ mtdcr(UIC2VR, 0x00000000); /* int31 highest, base=0x000 */ mtdcr(UIC2SR, 0xffffffff); /* clear all */ mtdcr(UIC3SR, 0xffffffff); /* clear all */ mtdcr(UIC3ER, 0x00000000); /* disable all */ mtdcr(UIC3CR, 0x00000000); /* all non-critical */ mtdcr(UIC3PR, 0xffffffff); /* per ref-board manual */ mtdcr(UIC3TR, 0x00000000); /* per ref-board manual */ mtdcr(UIC3VR, 0x00000000); /* int31 highest, base=0x000 */ mtdcr(UIC3SR, 0xffffffff); /* clear all */ /* * Configure PFC (Pin Function Control) registers * enable GPIO 49-63 * UART0: 4 pins */ mtsdr(SDR0_PFC0, 0x00007fff); mtsdr(SDR0_PFC1, 0x00040000); /* Enable PCI host functionality in SDR0_PCI0 */ mtsdr(SDR0_PCI0, 0xe0000000); mtsdr(SDR0_SRST1, 0); /* Pull AHB out of reset default=1 */ /* Setup PLB4-AHB bridge based on the system address map */ mtdcr(AHB_TOP, 0x8000004B); mtdcr(AHB_BOT, 0x8000004B); /* * Configure USB-STP pins as alternate and not GPIO * It seems to be neccessary to configure the STP pins as GPIO * input at powerup (perhaps while USB reset is asserted). So * we configure those pins to their "real" function now. */ gpio_config(16, GPIO_OUT, GPIO_ALT1, GPIO_OUT_1); gpio_config(19, GPIO_OUT, GPIO_ALT1, GPIO_OUT_1); /* Trigger board component reset */ out_le16((void *)CONFIG_SYS_IO_BASE, 0xffff); out_le16((void *)CONFIG_SYS_IO_BASE + 0x100, 0xffff); udelay(50); out_le16((void *)CONFIG_SYS_IO_BASE, 0xffbf); out_le16((void *)CONFIG_SYS_IO_BASE + 0x100, 0xffbf); udelay(50); out_le16((void *)CONFIG_SYS_IO_BASE, 0xffff); out_le16((void *)CONFIG_SYS_IO_BASE + 0x100, 0xffff); return 0; }