static void xfer_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; struct dbdma_regs __iomem *dr = fs->dma; int n; fs->timeout_pending = 0; out_le32(&dr->control, RUN << 16); /* We must wait a bit for dbdma to stop */ for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++) udelay(1); out_8(&sw->intr_enable, 0); out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); out_8(&sw->select, RELAX); printk(KERN_ERR "swim3: timeout %sing sector %ld\n", (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)blk_rq_pos(fd_req)); swim3_end_request_cur(-EIO); fs->state = idle; start_request(fs); }
static void xfer_timeout(unsigned long data) { struct floppy_state *fs = (struct floppy_state *) data; struct swim3 __iomem *sw = fs->swim3; struct dbdma_regs __iomem *dr = fs->dma; struct dbdma_cmd *cp = fs->dma_cmd; unsigned long s; int n; fs->timeout_pending = 0; out_le32(&dr->control, RUN << 16); /* We must wait a bit for dbdma to stop */ for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++) udelay(1); out_8(&sw->intr_enable, 0); out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); out_8(&sw->select, RELAX); if (rq_data_dir(fd_req) == WRITE) ++cp; if (ld_le16(&cp->xfer_status) != 0) s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9); else
static int indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = bus->sysdata; volatile void __iomem *cfg_data; u8 cfg_type = 0; if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; if (hose->set_cfg_type) if (bus->number != hose->first_busno) cfg_type = 1; PCI_CFG_OUT(hose->cfg_addr, (0x80000000 | ((bus->number - hose->bus_offset) << 16) | (devfn << 8) | ((offset & 0xfc) | cfg_type))); /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + (offset & 3); switch (len) { case 1: *val = in_8(cfg_data); break; case 2: *val = in_le16(cfg_data); break; default: *val = in_le32(cfg_data); break; } return PCIBIOS_SUCCESSFUL; }
static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { int i, j, num_entries; void *temp; temp = agp_bridge->current_size; num_entries = A_SIZE_32(temp)->num_entries; if (type != 0 || mem->type != 0) /* We know nothing of memory types */ return -EINVAL; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j])) return -EBUSY; j++; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { agp_bridge->gatt_table[j] = cpu_to_le32((mem->memory[i] & 0xfffff000) | 0x00000001UL); flush_dcache_range((unsigned long)__va(mem->memory[i]), (unsigned long)__va(mem->memory[i])+0x1000); } (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); mb(); flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start], (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]); uninorth_tlbflush(mem); return 0; }
void erratum_a009635(void) { u32 val; unsigned long interval_mhz = get_internval_val_mhz(); if (!interval_mhz) return; val = in_le32(DCSR_CGACRE5); writel(val | 0x00000200, DCSR_CGACRE5); val = in_le32(EPU_EPCMPR5); writel(interval_mhz, EPU_EPCMPR5); val = in_le32(EPU_EPCCR5); writel(val | 0x82820000, EPU_EPCCR5); val = in_le32(EPU_EPSMCR5); writel(val | 0x002f0000, EPU_EPSMCR5); val = in_le32(EPU_EPECR5); writel(val | 0x20000000, EPU_EPECR5); val = in_le32(EPU_EPGCR); writel(val | 0x80000000, EPU_EPGCR); }
static void c2k_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); }
static int pasemi_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *rng_regs = (void __iomem *)rng->priv; *data = in_le32(rng_regs + SDCRNG_VAL_REG); return 4; }
void BSP_motload_pci_fixup(void) { uint32_t b0,b1,r0,r1,lim,dis; /* MotLoad on the mvme5500 and mvme6100 configures the PCI * busses nicely, i.e., the values read from the memory address * space BARs by means of PCI config cycles directly reflect the * CPU memory map. Thus, the presence of two hoses is already hidden. * * Unfortunately, all PCI I/O addresses are 'zero-based' i.e., * a hose-specific base address would have to be added to * the values read from config space. * * We fix this here so I/O BARs also reflect the CPU memory map. * * Furthermore, the mvme5500 uses * f000.0000 * ..f07f.ffff for PCI-0 / hose0 * * and * * f080.0000 * ..f0ff.0000 for PCI-1 / hose 0 * * whereas the mvme6100 does it the other way round... */ b0 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Low_Decode) ); b1 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Low_Decode) ); r0 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap) ); r1 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap) ); switch ( BSP_getDiscoveryVersion(0) ) { case MV_64360: /* In case of the MV64360 the 'limit' is actually a 'size'! * Disable by setting special bits in the 'BAR disable reg'. */ dis = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL) ); /* disable PCI0 I/O and PCI1 I/O */ out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL), dis | (1<<9) | (1<<14) ); /* remap busses on hose 0; if the remap register was already set, assume * that someone else [such as the bootloader] already performed the fixup */ if ( (b0 & 0xffff) && 0 == (r0 & 0xffff) ) { rtems_pci_io_remap( 0, BSP_pci_hose1_bus_base, (b0 & 0xffff)<<16 ); out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap), (b0 & 0xffff) ); } /* remap busses on hose 1 */ if ( (b1 & 0xffff) && 0 == (r1 & 0xffff) ) { rtems_pci_io_remap( BSP_pci_hose1_bus_base, pci_bus_count(), (b1 & 0xffff)<<16 ); out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap), (b1 & 0xffff) ); } /* re-enable */ out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL), dis ); break; case GT_64260_A: case GT_64260_B: if ( (b0 & 0xfff) && 0 == (r0 & 0xfff) ) { /* base are only 12 bits */ /* switch window off by setting the limit < base */ lim = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode) ); out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode), 0 ); /* remap busses on hose 0 */ rtems_pci_io_remap( 0, BSP_pci_hose1_bus_base, (b0 & 0xfff)<<20 ); /* BTW: it seems that writing the base register also copies the * value into the 'remap' register automatically (??) */ out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap), (b0 & 0xfff) ); /* re-enable */ out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode), lim ); } if ( (b1 & 0xfff) && 0 == (r1 & 0xfff) ) { /* base are only 12 bits */ /* switch window off by setting the limit < base */ lim = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode) ); out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode), 0 ); /* remap busses on hose 1 */ rtems_pci_io_remap( BSP_pci_hose1_bus_base, pci_bus_count(), (b1 & 0xfff)<<20 ); out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap), (b1 & 0xfff) ); /* re-enable */ out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode), lim ); } break; default: BSP_panic("Unknown discovery version; switch in file: "__FILE__" not implemented (yet)"); break; /* never get here */ } /* Fixup the IRQ lines; the mvme6100 maps them nicely into our scheme, i.e., GPP * interrupts start at 64 upwards * * The mvme5500 is apparently initialized differently :-(. GPP interrupts start at 0 * Since all PCI interrupts are wired to GPP we simply check for a value < 64 and * reprogram the interrupt line register. */ BSP_pciScan(0, fixup_irq_line, 0); }
static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; if (!par->cmap_adr || regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; switch (par->cmap_type) { case cmap_m64: writeb(regno, par->cmap_adr); writeb(red, par->cmap_data); writeb(green, par->cmap_data); writeb(blue, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_radeon: /* Set palette index & data (could be smarter) */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_gxt2000: out_le32((unsigned __iomem *) par->cmap_adr + regno, (red << 16 | green << 8 | blue)); break; } if (regno < 16) switch (info->var.bits_per_pixel) { case 16: ((u16 *) (info->pseudo_palette))[regno] = (regno << 10) | (regno << 5) | regno; break; case 32: { int i = (regno << 8) | regno; ((u32 *) (info->pseudo_palette))[regno] = (i << 16) | i; break; } } return 0; }
/* pasemi_read_dma_reg - read DMA register * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_dma_reg(unsigned int reg) { return in_le32(dma_regs+reg); }
void __init bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip) { #ifdef CONFIG_PCI unsigned int bar_response, bar; /* * Expected PCI mapping: * * PLB addr PCI memory addr * --------------------- --------------------- * 0000'0000 - 7fff'ffff <--- 0000'0000 - 7fff'ffff * 8000'0000 - Bfff'ffff ---> 8000'0000 - Bfff'ffff * * PLB addr PCI io addr * --------------------- --------------------- * e800'0000 - e800'ffff ---> 0000'0000 - 0001'0000 * * The following code is simplified by assuming that the bootrom * has been well behaved in following this mapping. */ #ifdef DEBUG int i; printk("ioremap PCLIO_BASE = 0x%x\n", pcip); printk("PCI bridge regs before fixup \n"); for (i = 0; i <= 3; i++) { printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); } printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); #endif /* added for IBM boot rom version 1.15 bios bar changes -AK */ /* Disable region first */ out_le32((void *) &(pcip->pmm[0].ma), 0x00000000); /* PLB starting addr, PCI: 0x80000000 */ out_le32((void *) &(pcip->pmm[0].la), 0x80000000); /* PCI start addr, 0x80000000 */ out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE); /* 512MB range of PLB to PCI */ out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000); /* Enable no pre-fetch, enable region */ out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff - (PPC405_PCI_UPPER_MEM - PPC405_PCI_MEM_BASE)) | 0x01)); /* Disable region one */ out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); out_le32((void *) &(pcip->pmm[1].la), 0x00000000); out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000); out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000); out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); out_le32((void *) &(pcip->ptm1ms), 0x00000001); /* Disable region two */ out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); out_le32((void *) &(pcip->pmm[2].la), 0x00000000); out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000); out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000); out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); out_le32((void *) &(pcip->ptm2ms), 0x00000000); out_le32((void *) &(pcip->ptm2la), 0x00000000); /* Zero config bars */ for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) { early_write_config_dword(hose, hose->first_busno, PCI_FUNC(hose->first_busno), bar, 0x00000000); early_read_config_dword(hose, hose->first_busno, PCI_FUNC(hose->first_busno), bar, &bar_response); DBG("BUS %d, device %d, Function %d bar 0x%8.8x is 0x%8.8x\n", hose->first_busno, PCI_SLOT(hose->first_busno), PCI_FUNC(hose->first_busno), bar, bar_response); } /* end work arround */ #ifdef DEBUG printk("PCI bridge regs after fixup \n"); for (i = 0; i <= 3; i++) { printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); } printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); #endif #endif }
/* * Create the appropriate control structures to manage * a new EHCI host controller. * * Excerpts from linux ehci fsl driver. */ int ehci_hcd_init(int index, struct ehci_hccr **hccr, struct ehci_hcor **hcor) { struct usb_ehci *ehci; const char *phy_type = NULL; size_t len; #ifdef CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY char usb_phy[5]; usb_phy[0] = '\0'; #endif ehci = (struct usb_ehci *)CONFIG_SYS_FSL_USB_ADDR; *hccr = (struct ehci_hccr *)((uint32_t)&ehci->caplength); *hcor = (struct ehci_hcor *)((uint32_t) *hccr + HC_LENGTH(ehci_readl(&(*hccr)->cr_capbase))); /* Set to Host mode */ setbits_le32(&ehci->usbmode, CM_HOST); out_be32(&ehci->snoop1, SNOOP_SIZE_2GB); out_be32(&ehci->snoop2, 0x80000000 | SNOOP_SIZE_2GB); /* Init phy */ if (hwconfig_sub("usb1", "phy_type")) phy_type = hwconfig_subarg("usb1", "phy_type", &len); else phy_type = getenv("usb_phy_type"); if (!phy_type) { #ifdef CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY /* if none specified assume internal UTMI */ strcpy(usb_phy, "utmi"); phy_type = usb_phy; #else printf("WARNING: USB phy type not defined !!\n"); return -1; #endif } if (!strcmp(phy_type, "utmi")) { #if defined(CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY) setbits_be32(&ehci->control, PHY_CLK_SEL_UTMI); setbits_be32(&ehci->control, UTMI_PHY_EN); udelay(1000); /* delay required for PHY Clk to appear */ #endif out_le32(&(*hcor)->or_portsc[0], PORT_PTS_UTMI); } else { #if defined(CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY) clrbits_be32(&ehci->control, UTMI_PHY_EN); setbits_be32(&ehci->control, PHY_CLK_SEL_ULPI); udelay(1000); /* delay required for PHY Clk to appear */ #endif out_le32(&(*hcor)->or_portsc[0], PORT_PTS_ULPI); } /* Enable interface. */ setbits_be32(&ehci->control, USB_EN); out_be32(&ehci->prictrl, 0x0000000c); out_be32(&ehci->age_cnt_limit, 0x00000040); out_be32(&ehci->sictrl, 0x00000001); in_le32(&ehci->usbmode); return 0; }
static u32 qspi_read32(u32 flags, u32 *addr) { return flags & QSPI_FLAG_REGMAP_ENDIAN_BIG ? in_be32(addr) : in_le32(addr); }
static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; int i, depth; u32 *pal = info->pseudo_palette; depth = info->var.bits_per_pixel; if (depth == 16) depth = (info->var.green.length == 5) ? 15 : 16; if (regno > 255 || (depth == 16 && regno > 63) || (depth == 15 && regno > 31)) return 1; if (regno < 16) { switch (depth) { case 15: pal[regno] = (regno << 10) | (regno << 5) | regno; break; case 16: pal[regno] = (regno << 11) | (regno << 5) | regno; break; case 24: pal[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: i = (regno << 8) | regno; pal[regno] = (i << 16) | i; break; } } red >>= 8; green >>= 8; blue >>= 8; if (!par->cmap_adr) return 0; switch (par->cmap_type) { case cmap_m64: writeb(regno, par->cmap_adr); writeb(red, par->cmap_data); writeb(green, par->cmap_data); writeb(blue, par->cmap_data); break; case cmap_M3A: /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_M3B: /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) | 0x20); /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_radeon: /* Set palette index & data (could be smarter) */ out_8(par->cmap_adr + 0xb0, regno); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); break; case cmap_gxt2000: out_le32(((unsigned __iomem *) par->cmap_adr) + regno, (red << 16 | green << 8 | blue)); break; case cmap_avivo: /* Write to both LUTs for now */ writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT); writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX); writel(((red) << 22) | ((green) << 12) | ((blue) << 2), par->cmap_adr + AVIVO_DC_LUT_30_COLOR); break; } return 0; }
static int __devinit pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pasemi_softc *sc; int ret, i; DPRINTF(KERN_ERR "%s()\n", __FUNCTION__); sc = kzalloc(sizeof(*sc), GFP_KERNEL); if (!sc) return -ENOMEM; softc_device_init(sc, DRV_NAME, 1, pasemi_methods); pci_set_drvdata(pdev, sc); spin_lock_init(&sc->sc_chnlock); sc->sc_sessions = (struct pasemi_session **) kzalloc(PASEMI_INITIAL_SESSIONS * sizeof(struct pasemi_session *), GFP_ATOMIC); if (sc->sc_sessions == NULL) { ret = -ENOMEM; goto out; } sc->sc_nsessions = PASEMI_INITIAL_SESSIONS; sc->sc_lastchn = 0; sc->base_irq = pdev->irq + 6; sc->base_chan = 6; sc->sc_cid = -1; sc->dma_pdev = pdev; sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!sc->iob_pdev) { dev_err(&pdev->dev, "Can't find I/O Bridge\n"); ret = -ENODEV; goto out; } /* This is hardcoded and ugly, but we have some firmware versions * who don't provide the register space in the device tree. Luckily * they are at well-known locations so we can just do the math here. */ sc->dma_regs = ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000); sc->iob_regs = ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000); if (!sc->dma_regs || !sc->iob_regs) { dev_err(&pdev->dev, "Can't map registers\n"); ret = -ENODEV; goto out; } dma_status = __ioremap(0xfd800000, 0x1000, 0); if (!dma_status) { ret = -ENODEV; dev_err(&pdev->dev, "Can't map dmastatus space\n"); goto out; } sc->tx = (struct pasemi_fnu_txring *) kzalloc(sizeof(struct pasemi_fnu_txring) * 8, GFP_KERNEL); if (!sc->tx) { ret = -ENOMEM; goto out; } /* Initialize the h/w */ out_le32(sc->dma_regs + PAS_DMA_COM_CFG, (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) | PAS_DMA_COM_CFG_FWF)); out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); for (i = 0; i < PASEMI_FNU_CHANNELS; i++) { sc->sc_num_channels++; ret = pasemi_dma_setup_tx_resources(sc, i); if (ret) goto out; } sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n"); ret = -ENXIO; goto out; } /* register algorithms with the framework */ printk(DRV_NAME ":"); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); return 0; out: pasemi_dma_remove(pdev); return ret; }
void ls2080a_handle_phy_interface_qsgmii(int dpmac_id) { int lane = 0, slot; struct mii_dev *bus; struct ccsr_gur __iomem *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; int serdes1_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_SHIFT; switch (serdes1_prtcl) { case 0x33: switch (dpmac_id) { case 1: case 2: case 3: case 4: lane = serdes_get_first_lane(FSL_SRDS_1, QSGMII_A); break; case 5: case 6: case 7: case 8: lane = serdes_get_first_lane(FSL_SRDS_1, QSGMII_B); break; case 9: case 10: case 11: case 12: lane = serdes_get_first_lane(FSL_SRDS_1, QSGMII_C); break; case 13: case 14: case 15: case 16: lane = serdes_get_first_lane(FSL_SRDS_1, QSGMII_D); break; } slot = lane_to_slot_fsm1[lane]; switch (++slot) { case 1: /* Slot housing a QSGMII riser card? */ wriop_set_phy_address(dpmac_id, dpmac_id - 1); dpmac_info[dpmac_id].board_mux = EMI1_SLOT1; bus = mii_dev_for_muxval(EMI1_SLOT1); wriop_set_mdio(dpmac_id, bus); dpmac_info[dpmac_id].phydev = phy_connect( dpmac_info[dpmac_id].bus, dpmac_info[dpmac_id].phy_addr, NULL, dpmac_info[dpmac_id].enet_if); phy_config(dpmac_info[dpmac_id].phydev); break; case 3: break; case 4: break; case 5: break; case 6: break; } break; default: printf("qds: WRIOP: Unsupported SerDes Protocol 0x%02x\n", serdes1_prtcl); break; } qsgmii_configure_repeater(dpmac_id); }
void ls2080a_handle_phy_interface_sgmii(int dpmac_id) { int lane, slot; struct mii_dev *bus; struct ccsr_gur __iomem *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; int serdes1_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_SHIFT; int serdes2_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_SHIFT; int *riser_phy_addr; char *env_hwconfig = getenv("hwconfig"); if (hwconfig_f("xqsgmii", env_hwconfig)) riser_phy_addr = &xqsgii_riser_phy_addr[0]; else riser_phy_addr = &sgmii_riser_phy_addr[0]; if (dpmac_id > WRIOP1_DPMAC9) goto serdes2; switch (serdes1_prtcl) { case 0x07: lane = serdes_get_first_lane(FSL_SRDS_1, SGMII1 + dpmac_id); slot = lane_to_slot_fsm1[lane]; switch (++slot) { case 1: /* Slot housing a SGMII riser card? */ wriop_set_phy_address(dpmac_id, riser_phy_addr[dpmac_id - 1]); dpmac_info[dpmac_id].board_mux = EMI1_SLOT1; bus = mii_dev_for_muxval(EMI1_SLOT1); wriop_set_mdio(dpmac_id, bus); dpmac_info[dpmac_id].phydev = phy_connect( dpmac_info[dpmac_id].bus, dpmac_info[dpmac_id].phy_addr, NULL, dpmac_info[dpmac_id].enet_if); phy_config(dpmac_info[dpmac_id].phydev); break; case 2: /* Slot housing a SGMII riser card? */ wriop_set_phy_address(dpmac_id, riser_phy_addr[dpmac_id - 1]); dpmac_info[dpmac_id].board_mux = EMI1_SLOT2; bus = mii_dev_for_muxval(EMI1_SLOT2); wriop_set_mdio(dpmac_id, bus); dpmac_info[dpmac_id].phydev = phy_connect( dpmac_info[dpmac_id].bus, dpmac_info[dpmac_id].phy_addr, NULL, dpmac_info[dpmac_id].enet_if); phy_config(dpmac_info[dpmac_id].phydev); break; case 3: break; case 4: break; case 5: break; case 6: break; } break; default: printf("%s qds: WRIOP: Unsupported SerDes1 Protocol 0x%02x\n", __func__ , serdes1_prtcl); break; } serdes2: switch (serdes2_prtcl) { case 0x07: case 0x08: case 0x49: lane = serdes_get_first_lane(FSL_SRDS_2, SGMII9 + (dpmac_id - 9)); slot = lane_to_slot_fsm2[lane]; switch (++slot) { case 1: break; case 3: break; case 4: /* Slot housing a SGMII riser card? */ wriop_set_phy_address(dpmac_id, riser_phy_addr[dpmac_id - 9]); dpmac_info[dpmac_id].board_mux = EMI1_SLOT4; bus = mii_dev_for_muxval(EMI1_SLOT4); wriop_set_mdio(dpmac_id, bus); dpmac_info[dpmac_id].phydev = phy_connect( dpmac_info[dpmac_id].bus, dpmac_info[dpmac_id].phy_addr, NULL, dpmac_info[dpmac_id].enet_if); phy_config(dpmac_info[dpmac_id].phydev); break; case 5: break; case 6: /* Slot housing a SGMII riser card? */ wriop_set_phy_address(dpmac_id, riser_phy_addr[dpmac_id - 13]); dpmac_info[dpmac_id].board_mux = EMI1_SLOT6; bus = mii_dev_for_muxval(EMI1_SLOT6); wriop_set_mdio(dpmac_id, bus); break; } break; default: printf("%s qds: WRIOP: Unsupported SerDes2 Protocol 0x%02x\n", __func__, serdes2_prtcl); break; } }
static void fixup_pci(void) { struct pci_range *mem = NULL, *mmio = NULL, *io = NULL, *mem_base = NULL; u32 *pci_regs[3]; u8 *soc_regs; int i, len; void *node, *parent_node; u32 naddr, nsize, mem_pow2, mem_mask; node = finddevice("/pci"); if (!node || !dt_is_compatible(node, "fsl,pq2-pci")) return; for (i = 0; i < 3; i++) if (!dt_xlate_reg(node, i, (unsigned long *)&pci_regs[i], NULL)) goto err; soc_regs = (u8 *)fsl_get_immr(); if (!soc_regs) goto unhandled; dt_get_reg_format(node, &naddr, &nsize); if (naddr != 3 || nsize != 2) goto err; parent_node = get_parent(node); if (!parent_node) goto err; dt_get_reg_format(parent_node, &naddr, &nsize); if (naddr != 1 || nsize != 1) goto unhandled; len = getprop(node, "ranges", pci_ranges_buf, sizeof(pci_ranges_buf)); for (i = 0; i < len / sizeof(struct pci_range); i++) { u32 flags = pci_ranges_buf[i].flags & 0x43000000; if (flags == 0x42000000) mem = &pci_ranges_buf[i]; else if (flags == 0x02000000) mmio = &pci_ranges_buf[i]; else if (flags == 0x01000000) io = &pci_ranges_buf[i]; } if (!mem || !mmio || !io) goto unhandled; if (mem->size[1] != mmio->size[1]) goto unhandled; if (mem->size[1] & (mem->size[1] - 1)) goto unhandled; if (io->size[1] & (io->size[1] - 1)) goto unhandled; if (mem->phys_addr + mem->size[1] == mmio->phys_addr) mem_base = mem; else if (mmio->phys_addr + mmio->size[1] == mem->phys_addr) mem_base = mmio; else goto unhandled; out_be32(&pci_regs[1][0], mem_base->phys_addr | 1); out_be32(&pci_regs[2][0], ~(mem->size[1] + mmio->size[1] - 1)); out_be32(&pci_regs[1][1], io->phys_addr | 1); out_be32(&pci_regs[2][1], ~(io->size[1] - 1)); out_le32(&pci_regs[0][0], mem->pci_addr[1] >> 12); out_le32(&pci_regs[0][2], mem->phys_addr >> 12); out_le32(&pci_regs[0][4], (~(mem->size[1] - 1) >> 12) | 0xa0000000); out_le32(&pci_regs[0][6], mmio->pci_addr[1] >> 12); out_le32(&pci_regs[0][8], mmio->phys_addr >> 12); out_le32(&pci_regs[0][10], (~(mmio->size[1] - 1) >> 12) | 0x80000000); out_le32(&pci_regs[0][12], io->pci_addr[1] >> 12); out_le32(&pci_regs[0][14], io->phys_addr >> 12); out_le32(&pci_regs[0][16], (~(io->size[1] - 1) >> 12) | 0xc0000000); /* */ out_le32(&pci_regs[0][58], 0); out_le32(&pci_regs[0][60], 0); mem_pow2 = 1 << (__ilog2_u32(bd.bi_memsize - 1) + 1); mem_mask = ~(mem_pow2 - 1) >> 12; out_le32(&pci_regs[0][62], 0xa0000000 | mem_mask); /* */ if (!(in_le32(&pci_regs[0][32]) & 1)) { /* */ udelay(100000); out_le32(&pci_regs[0][32], 1); /* */ udelay(1020000); } /* */ out_le32(&pci_regs[0][64], 0x80000004); out_le32(&pci_regs[0][65], in_le32(&pci_regs[0][65]) | 6); /* */ out_8(&soc_regs[0x10028], 3); out_be32((u32 *)&soc_regs[0x1002c], 0x01236745); return; err: printf("Bad PCI node -- using existing firmware setup.\r\n"); return; unhandled: printf("Unsupported PCI node -- using existing firmware setup.\r\n"); }
ulong post_word_load(void) { volatile void* addr = (void *) (gd->ram_size - BOOTCOUNT_ADDR + POST_WORD_OFF); return in_le32(addr); }
static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *rng_regs = (void __iomem *) rng->priv; *data = in_le32(rng_regs + PPC4XX_TRNG_DATA); return 4; }
/* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ static int pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) { struct dbdma_cmd *table, *tstart; int count = 0; struct request *rq = HWGROUP(drive)->rq; struct buffer_head *bh = rq->bh; unsigned int size, addr; volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs; table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(pmac_ide[ix].dma_table); out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); while (in_le32(&dma->status) & RUN) udelay(1); do { /* * Determine addr and size of next buffer area. We assume that * individual virtual buffers are always composed linearly in * physical memory. For example, we assume that any 8kB buffer * is always composed of two adjacent physical 4kB pages rather * than two possibly non-adjacent physical 4kB pages. */ if (bh == NULL) { /* paging requests have (rq->bh == NULL) */ addr = virt_to_bus(rq->buffer); size = rq->nr_sectors << 9; } else { /* group sequential buffers into one large buffer */ addr = virt_to_bus(bh->b_data); size = bh->b_size; while ((bh = bh->b_reqnext) != NULL) { if ((addr + size) != virt_to_bus(bh->b_data)) break; size += bh->b_size; } } /* * Fill in the next DBDMA command block. * Note that one DBDMA command can transfer * at most 65535 bytes. */ while (size) { unsigned int tc = (size < 0xfe00)? size: 0xfe00; if (++count >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); return 0; /* revert to PIO for this request */ } st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE); st_le16(&table->req_count, tc); st_le32(&table->phy_addr, addr); table->cmd_dep = 0; table->xfer_status = 0; table->res_count = 0; addr += tc; size -= tc; ++table; } } while (bh != NULL); /* convert the last command to an input/output last command */ if (count) st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST); else printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); /* add the stop command to the end of the list */ memset(table, 0, sizeof(struct dbdma_cmd)); out_le16(&table->command, DBDMA_STOP); out_le32(&dma->cmdptr, virt_to_bus(tstart)); return 1; }
/* * Initialize the dpmac_info array. * */ static void initialize_dpmac_to_slot(void) { struct ccsr_gur __iomem *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; int serdes1_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_SHIFT; int serdes2_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_SHIFT; char *env_hwconfig; env_hwconfig = getenv("hwconfig"); switch (serdes1_prtcl) { case 0x07: case 0x09: case 0x33: printf("qds: WRIOP: Supported SerDes1 Protocol 0x%02x\n", serdes1_prtcl); lane_to_slot_fsm1[0] = EMI1_SLOT1; lane_to_slot_fsm1[1] = EMI1_SLOT1; lane_to_slot_fsm1[2] = EMI1_SLOT1; lane_to_slot_fsm1[3] = EMI1_SLOT1; if (hwconfig_f("xqsgmii", env_hwconfig)) { lane_to_slot_fsm1[4] = EMI1_SLOT1; lane_to_slot_fsm1[5] = EMI1_SLOT1; lane_to_slot_fsm1[6] = EMI1_SLOT1; lane_to_slot_fsm1[7] = EMI1_SLOT1; } else { lane_to_slot_fsm1[4] = EMI1_SLOT2; lane_to_slot_fsm1[5] = EMI1_SLOT2; lane_to_slot_fsm1[6] = EMI1_SLOT2; lane_to_slot_fsm1[7] = EMI1_SLOT2; } break; case 0x2A: printf("qds: WRIOP: Supported SerDes1 Protocol 0x%02x\n", serdes1_prtcl); break; default: printf("%s qds: WRIOP: Unsupported SerDes1 Protocol 0x%02x\n", __func__, serdes1_prtcl); break; } switch (serdes2_prtcl) { case 0x07: case 0x08: case 0x09: case 0x49: printf("qds: WRIOP: Supported SerDes2 Protocol 0x%02x\n", serdes2_prtcl); lane_to_slot_fsm2[0] = EMI1_SLOT4; lane_to_slot_fsm2[1] = EMI1_SLOT4; lane_to_slot_fsm2[2] = EMI1_SLOT4; lane_to_slot_fsm2[3] = EMI1_SLOT4; if (hwconfig_f("xqsgmii", env_hwconfig)) { lane_to_slot_fsm2[4] = EMI1_SLOT4; lane_to_slot_fsm2[5] = EMI1_SLOT4; lane_to_slot_fsm2[6] = EMI1_SLOT4; lane_to_slot_fsm2[7] = EMI1_SLOT4; } else { /* No MDIO physical connection */ lane_to_slot_fsm2[4] = EMI1_SLOT6; lane_to_slot_fsm2[5] = EMI1_SLOT6; lane_to_slot_fsm2[6] = EMI1_SLOT6; lane_to_slot_fsm2[7] = EMI1_SLOT6; } break; default: printf(" %s qds: WRIOP: Unsupported SerDes2 Protocol 0x%02x\n", __func__ , serdes2_prtcl); break; } }
int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) { int ix, dstat, i; volatile struct dbdma_regs *dma; /* Can we stuff a pointer to our intf structure in config_data * or select_data in hwif ? */ ix = pmac_ide_find(drive); if (ix < 0) return 0; dma = pmac_ide[ix].dma_regs; switch (func) { case ide_dma_off: printk(KERN_INFO "%s: DMA disabled\n", drive->name); case ide_dma_off_quietly: drive->using_dma = 0; break; case ide_dma_on: case ide_dma_check: pmac_ide_check_dma(drive); break; case ide_dma_read: case ide_dma_write: if (!pmac_ide_build_dmatable(drive, ix, func==ide_dma_write)) return 1; drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL); OUT_BYTE(func==ide_dma_write? WIN_WRITEDMA: WIN_READDMA, IDE_COMMAND_REG); case ide_dma_begin: out_le32(&dma->control, (RUN << 16) | RUN); break; case ide_dma_end: drive->waiting_for_dma = 0; dstat = in_le32(&dma->status); out_le32(&dma->control, ((RUN|WAKE|DEAD) << 16)); /* verify good dma status */ return (dstat & (RUN|DEAD|ACTIVE)) != RUN; case ide_dma_test_irq: if ((in_le32(&dma->status) & (RUN|ACTIVE)) == RUN) return 1; /* That's a bit ugly and dangerous, but works in our case * to workaround a problem with the channel status staying * active if the drive returns an error */ if (IDE_CONTROL_REG) { byte stat; stat = GET_ALTSTAT(); if (stat & ERR_STAT) return 1; } /* In some edge cases, some datas may still be in the dbdma * engine fifo, we wait a bit for dbdma to complete */ while ((in_le32(&dma->status) & (RUN|ACTIVE)) != RUN) { if (++i > 100) return 0; udelay(1); } return 1; /* Let's implement tose just in case someone wants them */ case ide_dma_bad_drive: case ide_dma_good_drive: return check_drive_lists(drive, (func == ide_dma_good_drive)); case ide_dma_verbose: return report_drive_dmaing(drive); case ide_dma_retune: case ide_dma_lostirq: case ide_dma_timeout: printk(KERN_WARNING "ide_pmac_dmaproc: chipset supported %s func only: %d\n", ide_dmafunc_verbose(func), func); return 1; default: printk(KERN_WARNING "ide_pmac_dmaproc: unsupported %s func: %d\n", ide_dmafunc_verbose(func), func); return 1; } return 0; }
/* pasemi_read_mac_reg - read MAC register * @intf: MAC interface * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_mac_reg(int intf, unsigned int reg) { return in_le32(mac_regs[intf]+reg); }
/* * Return the current value of the Embedded Utilities Memory Block Base Address * Register (EUMBBAR) as read from the processor configuration register using * Processor Address Map B (CHRP). */ unsigned int get_eumbbar(void) { out_le32( (volatile unsigned *)0xfec00000, 0x80000078 ); return in_le32( (volatile unsigned *)0xfee00000 ); }
int board_eth_init(bd_t *bis) { int error; #ifdef CONFIG_FSL_MC_ENET struct ccsr_gur __iomem *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; int serdes1_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS1_PRTCL_SHIFT; int serdes2_prtcl = (in_le32(&gur->rcwsr[28]) & FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_MASK) >> FSL_CHASSIS3_RCWSR28_SRDS2_PRTCL_SHIFT; struct memac_mdio_info *memac_mdio0_info; struct memac_mdio_info *memac_mdio1_info; unsigned int i; char *env_hwconfig; env_hwconfig = getenv("hwconfig"); initialize_dpmac_to_slot(); memac_mdio0_info = (struct memac_mdio_info *)malloc( sizeof(struct memac_mdio_info)); memac_mdio0_info->regs = (struct memac_mdio_controller *) CONFIG_SYS_FSL_WRIOP1_MDIO1; memac_mdio0_info->name = DEFAULT_WRIOP_MDIO1_NAME; /* Register the real MDIO1 bus */ fm_memac_mdio_init(bis, memac_mdio0_info); memac_mdio1_info = (struct memac_mdio_info *)malloc( sizeof(struct memac_mdio_info)); memac_mdio1_info->regs = (struct memac_mdio_controller *) CONFIG_SYS_FSL_WRIOP1_MDIO2; memac_mdio1_info->name = DEFAULT_WRIOP_MDIO2_NAME; /* Register the real MDIO2 bus */ fm_memac_mdio_init(bis, memac_mdio1_info); /* Register the muxing front-ends to the MDIO buses */ ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT1); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT2); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT3); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT4); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT5); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO1_NAME, EMI1_SLOT6); ls2080a_qds_mdio_init(DEFAULT_WRIOP_MDIO2_NAME, EMI2); for (i = WRIOP1_DPMAC1; i < NUM_WRIOP_PORTS; i++) { switch (wriop_get_enet_if(i)) { case PHY_INTERFACE_MODE_QSGMII: ls2080a_handle_phy_interface_qsgmii(i); break; case PHY_INTERFACE_MODE_SGMII: ls2080a_handle_phy_interface_sgmii(i); break; case PHY_INTERFACE_MODE_XGMII: ls2080a_handle_phy_interface_xsgmii(i); break; default: break; if (i == 16) i = NUM_WRIOP_PORTS; } } error = cpu_eth_init(bis); if (hwconfig_f("xqsgmii", env_hwconfig)) { if (serdes1_prtcl == 0x7) sgmii_configure_repeater(1); if (serdes2_prtcl == 0x7 || serdes2_prtcl == 0x8 || serdes2_prtcl == 0x49) sgmii_configure_repeater(2); } #endif error = pci_eth_init(bis); return error; }
static u8 mpsc_tstc(void) { return (u8)((in_le32((u32 *)(mpscintr_base + MPSC_INTR_CAUSE)) & MPSC_INTR_CAUSE_RCC) != 0); }
static inline int mdio_read(struct mii_bus *bus) { return !!(in_le32(gpio_regs+0x40) & (1 << MDIO_PIN(bus))); }
void ppc4xx_find_bridges(void) { struct pci_controller *hose_a; struct pcil0_regs *pcip; unsigned int new_pmm_max; unsigned int new_pmm_min; isa_io_base = 0; isa_mem_base = 0; pci_dram_offset = 0; #if (PSR_PCI_ARBIT_EN > 1) /* Check if running in slave mode */ if ((mfdcr(DCRN_CHPSR) & PSR_PCI_ARBIT_EN) == 0) { printk("Running as PCI slave, kernel PCI disabled !\n"); return; } #endif /* Setup PCI32 hose */ hose_a = pcibios_alloc_controller(); if (!hose_a) return; setup_indirect_pci(hose_a, PPC405_PCI_CONFIG_ADDR, PPC405_PCI_CONFIG_DATA); #ifdef CONFIG_XILINX_OCP /* Eliminate "unused variable" warning for pcip. Optimizer removes. */ pcip = NULL; new_pmm_min = PPC405_PCI_LOWER_MEM; new_pmm_max = PPC405_PCI_UPPER_MEM; #else /* Must be IBM */ pcip = ioremap(PPC4xx_PCI_LCFG_PADDR, PAGE_SIZE); if (pcip != NULL) { unsigned int tmp_addr; unsigned int tmp_size; unsigned int reg_index; #if defined(CONFIG_BIOS_FIXUP) bios_fixup(hose_a, pcip); #endif new_pmm_min = 0xffffffff; for (reg_index = 0; reg_index < 3; reg_index++) { tmp_size = in_le32((void *) &(pcip->pmm[reg_index].ma)); // *_PMM0MA if (tmp_size & 0x1) { tmp_addr = in_le32((void *) &(pcip->pmm[reg_index].pcila)); // *_PMM0PCILA if (tmp_addr < PPC405_PCI_PHY_MEM_BASE) { printk(KERN_DEBUG "Disabling mapping to PCI mem addr 0x%8.8x\n", tmp_addr); out_le32((void *) &(pcip->pmm[reg_index].ma), tmp_size & ~1); // *_PMMOMA } else { tmp_addr = in_le32((void *) &(pcip->pmm[reg_index].la)); // *_PMMOLA if (tmp_addr < new_pmm_min) new_pmm_min = tmp_addr; tmp_addr = tmp_addr + (0xffffffff - (tmp_size & 0xffffc000)); if (tmp_addr > PPC405_PCI_UPPER_MEM) { new_pmm_max = tmp_addr; // PPC405_PCI_UPPER_MEM } else { new_pmm_max = PPC405_PCI_UPPER_MEM; } } } } // for iounmap(pcip); } #endif hose_a->first_busno = 0; hose_a->last_busno = 0xff; hose_a->pci_mem_offset = 0; /* Setup bridge memory/IO ranges & resources * TODO: Handle firmwares setting up a legacy ISA mem base */ hose_a->io_space.start = PPC405_PCI_LOWER_IO; hose_a->io_space.end = PPC405_PCI_UPPER_IO; hose_a->mem_space.start = new_pmm_min; hose_a->mem_space.end = new_pmm_max; hose_a->io_base_phys = PPC405_PCI_PHY_IO_BASE; hose_a->io_base_virt = ioremap(hose_a->io_base_phys, 0x10000); hose_a->io_resource.start = 0; hose_a->io_resource.end = PPC405_PCI_UPPER_IO - PPC405_PCI_LOWER_IO; hose_a->io_resource.flags = IORESOURCE_IO; hose_a->io_resource.name = "PCI I/O"; hose_a->mem_resources[0].start = new_pmm_min; hose_a->mem_resources[0].end = new_pmm_max; hose_a->mem_resources[0].flags = IORESOURCE_MEM; hose_a->mem_resources[0].name = "PCI Memory"; isa_io_base = (int) hose_a->io_base_virt; isa_mem_base = 0; /* ISA not implemented */ ISA_DMA_THRESHOLD = 0x00ffffff; /* ??? ISA not implemented */ /* Scan busses & initial setup by pci_auto */ hose_a->last_busno = pciauto_bus_scan(hose_a, hose_a->first_busno); /* Setup ppc_md */ ppc_md.pcibios_fixup = NULL; ppc_md.pci_exclude_device = ppc4xx_exclude_device; ppc_md.pcibios_fixup_resources = ppc405_pcibios_fixup_resources; ppc_md.pci_swizzle = common_swizzle; ppc_md.pci_map_irq = ppc405_map_irq; }
/* pasemi_read_iob_reg - read IOB register * @reg: Register to read (offset into PCI CFG space) */ unsigned int pasemi_read_iob_reg(unsigned int reg) { return in_le32(iob_regs+reg); }