void set_auxio(unsigned char bits_on, unsigned char bits_off) { unsigned char regval; unsigned long flags; spin_lock_irqsave(&auxio_lock, flags); switch(sparc_cpu_model) { case sun4c: regval = sbus_readb(auxio_register); sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN, auxio_register); break; case sun4m: if(!auxio_register) break; regval = sbus_readb(auxio_register); sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M, auxio_register); break; case sun4d: break; default: panic("Can't set AUXIO register on this machine."); } spin_unlock_irqrestore(&auxio_lock, flags); }
/** * bw2_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int bw2_blank(int blank, struct fb_info *info) { struct bw2_par *par = (struct bw2_par *) info->par; struct bw2_regs __iomem *regs = par->regs; unsigned long flags; u8 val; spin_lock_irqsave(&par->lock, flags); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val = sbus_readb(®s->control); val |= BWTWO_CTL_ENABLE_VIDEO; sbus_writeb(val, ®s->control); par->flags &= ~BW2_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val = sbus_readb(®s->control); val &= ~BWTWO_CTL_ENABLE_VIDEO; sbus_writeb(val, ®s->control); par->flags |= BW2_FLAG_BLANKED; break; } spin_unlock_irqrestore(&par->lock, flags); return 0; }
static void set_pins(unsigned short pins, unsigned minor) { void __iomem *base = base_addrs[minor]; unsigned char bits_tcr = 0, bits_or = 0; if (instances[minor].direction & 0x20) bits_tcr |= P_TCR_DIR; if ( pins & BPP_PP_nStrobe) bits_tcr |= P_TCR_DS; if ( pins & BPP_PP_nAutoFd) bits_or |= P_OR_AFXN; if (! (pins & BPP_PP_nInit)) bits_or |= P_OR_INIT; if (! (pins & BPP_PP_nSelectIn)) bits_or |= P_OR_SLCT_IN; sbus_writeb(bits_or, base + BPP_OR); sbus_writeb(bits_tcr, base + BPP_TCR); }
static void parport_sunbpp_write_data(struct parport *p, unsigned char d) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; sbus_writeb(d, ®s->p_dr); dprintk((KERN_DEBUG "wrote 0x%x\n", d)); }
void __init sun4c_init_IRQ(void) { struct device_node *dp; const u32 *addr; dp = of_find_node_by_name(NULL, "interrupt-enable"); if (!dp) { prom_printf("sun4c_init_IRQ: Unable to find interrupt-enable\n"); prom_halt(); } addr = of_get_property(dp, "address", NULL); of_node_put(dp); if (!addr) { prom_printf("sun4c_init_IRQ: No address property\n"); prom_halt(); } interrupt_enable = (void __iomem *) (unsigned long) addr[0]; BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP); sparc_irq_config.init_timers = sun4c_init_timers; sparc_irq_config.build_device_irq = sun4c_build_device_irq; #ifdef CONFIG_SMP BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP); #endif sbus_writeb(SUN4C_INT_ENABLE, interrupt_enable); /* Cannot enable interrupts until OBP ticker is disabled. */ }
static void sun4c_enable_irq(unsigned int irq_nr) { unsigned long flags; unsigned char current_mask, new_mask; local_irq_save(flags); irq_nr &= (NR_IRQS - 1); current_mask = sbus_readb(interrupt_enable); switch(irq_nr) { case 1: new_mask = ((current_mask) | SUN4C_INT_E1); break; case 8: new_mask = ((current_mask) | SUN4C_INT_E8); break; case 10: new_mask = ((current_mask) | SUN4C_INT_E10); break; case 14: new_mask = ((current_mask) | SUN4C_INT_E14); break; default: local_irq_restore(flags); return; } sbus_writeb(new_mask, interrupt_enable); local_irq_restore(flags); }
static unsigned char parport_sunbpp_frob_control(struct parport *p, unsigned char mask, unsigned char val) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(®s->p_tcr); unsigned char value_or = sbus_readb(®s->p_or); dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", value_tcr, value_or)); if (mask & PARPORT_CONTROL_STROBE) { if (val & PARPORT_CONTROL_STROBE) { value_tcr &= ~P_TCR_DS; } else { value_tcr |= P_TCR_DS; } } if (mask & PARPORT_CONTROL_AUTOFD) { if (val & PARPORT_CONTROL_AUTOFD) { value_or &= ~P_OR_AFXN; } else { value_or |= P_OR_AFXN; } } if (mask & PARPORT_CONTROL_INIT) { if (val & PARPORT_CONTROL_INIT) { value_or &= ~P_OR_INIT; } else { value_or |= P_OR_INIT; } } if (mask & PARPORT_CONTROL_SELECT) { if (val & PARPORT_CONTROL_SELECT) { value_or |= P_OR_SLCT_IN; } else { value_or &= ~P_OR_SLCT_IN; } } sbus_writeb(value_or, ®s->p_or); sbus_writeb(value_tcr, ®s->p_tcr); dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", value_tcr, value_or)); return parport_sunbpp_read_control(p); }
static void __cg14_reset(struct cg14_par *par) { struct cg14_regs __iomem *regs = par->regs; u8 val; val = sbus_readb(®s->mcr); val &= ~(CG14_MCR_PIXMODE_MASK); sbus_writeb(val, ®s->mcr); }
static void control_pc_to_sunbpp(struct parport *p, unsigned char status) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(®s->p_tcr); unsigned char value_or = sbus_readb(®s->p_or); if (status & PARPORT_CONTROL_STROBE) value_tcr |= P_TCR_DS; if (status & PARPORT_CONTROL_AUTOFD) value_or |= P_OR_AFXN; if (status & PARPORT_CONTROL_INIT) value_or |= P_OR_INIT; if (status & PARPORT_CONTROL_SELECT) value_or |= P_OR_SLCT_IN; sbus_writeb(value_or, ®s->p_or); sbus_writeb(value_tcr, ®s->p_tcr); }
static void parport_sunbpp_data_reverse (struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; u8 val = sbus_readb(®s->p_tcr); dprintk((KERN_DEBUG "reverse\n")); val |= P_TCR_DIR; sbus_writeb(val, ®s->p_tcr); }
static void parport_sunbpp_data_forward (struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(®s->p_tcr); dprintk((KERN_DEBUG "forward\n")); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, ®s->p_tcr); }
static void cg3_unblank (struct fb_info_sbusfb *fb) { unsigned long flags; u8 tmp; spin_lock_irqsave(&fb->lock, flags); tmp = sbus_readb(&fb->s.cg3.regs->control); tmp |= CG3_CR_ENABLE_VIDEO; sbus_writeb(tmp, &fb->s.cg3.regs->control); spin_unlock_irqrestore(&fb->lock, flags); }
static void bw2_blank (struct fb_info_sbusfb *fb) { unsigned long flags; u8 tmp; spin_lock_irqsave(&fb->lock, flags); tmp = sbus_readb(&fb->s.bw2.regs->control); tmp &= ~BWTWO_CTL_ENABLE_VIDEO; sbus_writeb(tmp, &fb->s.bw2.regs->control); spin_unlock_irqrestore(&fb->lock, flags); }
static void sun4c_unmask_irq(struct irq_data *data) { unsigned long mask = (unsigned long)data->chip_data; if (mask) { unsigned long flags; local_irq_save(flags); mask = sbus_readb(interrupt_enable) | mask; sbus_writeb(mask, interrupt_enable); local_irq_restore(flags); } }
static int __devinit bw2_do_default_mode(struct bw2_par *par, struct fb_info *info, int *linebytes) { u8 status, mon; u8 *p; status = sbus_readb(&par->regs->status); mon = status & BWTWO_SR_RES_MASK; switch (status & BWTWO_SR_ID_MASK) { case BWTWO_SR_ID_MONO_ECL: if (mon == BWTWO_SR_1600_1280) { p = bw2regs_1600; info->var.xres = info->var.xres_virtual = 1600; info->var.yres = info->var.yres_virtual = 1280; *linebytes = 1600 / 8; } else p = bw2regs_ecl; break; case BWTWO_SR_ID_MONO: p = bw2regs_analog; break; case BWTWO_SR_ID_MSYNC: if (mon == BWTWO_SR_1152_900_76_A || mon == BWTWO_SR_1152_900_76_B) p = bw2regs_76hz; else p = bw2regs_66hz; break; case BWTWO_SR_ID_NOCONN: return 0; default: printk(KERN_ERR "bw2: can't handle SR %02x\n", status); return -EINVAL; } for ( ; *p; p += 2) { u8 __iomem *regp = &((u8 __iomem *)par->regs)[p[0]]; sbus_writeb(p[1], regp); } return 0; }
static void __auxio_sbus_set(u8 bits_on, u8 bits_off) { if (auxio_register) { unsigned char regval; unsigned long flags; unsigned char newval; spin_lock_irqsave(&auxio_lock, flags); regval = sbus_readb(auxio_register); newval = regval | bits_on; newval &= ~bits_off; newval &= ~AUXIO_AUX1_MASK; sbus_writeb(newval, auxio_register); spin_unlock_irqrestore(&auxio_lock, flags); } }
static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; char *addrs; int i; u32 crc; /* Lock out others. */ netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0xff, qep->mregs + MREGS_FILTER); sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } else if (dev->flags & IFF_PROMISC) { new_mconfig |= MREGS_MCONFIG_PROMISC; } else { u16 hash_table[4]; u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } /* Program the qe with the new filter value. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) { u8 tmp = *hbytes++; sbus_writeb(tmp, qep->mregs + MREGS_FILTER); } sbus_writeb(0, qep->mregs + MREGS_IACONFIG); }
static void cg3_loadcmap (struct fb_info_sbusfb *fb, struct display *p, int index, int count) { struct bt_regs *bt = &fb->s.cg3.regs->cmap; unsigned long flags; u32 *i; volatile u8 *regp; int steps; spin_lock_irqsave(&fb->lock, flags); i = (((u32 *)fb->color_map) + D4M3(index)); steps = D4M3(index+count-1) - D4M3(index)+3; regp = (volatile u8 *)&bt->addr; sbus_writeb(D4M4(index), regp); while (steps--) { u32 val = *i++; sbus_writel(val, &bt->color_map); } spin_unlock_irqrestore(&fb->lock, flags); }
static inline int qe_stop(struct sunqe *qep) { void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; int tries; /* Reset the MACE, then the QEC channel. */ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); tries = MACE_RESET_RETRIES; while (--tries) { u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); if (tmp & MREGS_BCONFIG_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); return -1; } sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); tries = QE_RESET_RETRIES; while (--tries) { u32 tmp = sbus_readl(cregs + CREG_CTRL); if (tmp & CREG_CTRL_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); return -1; } return 0; }
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus) { if (auxio_register) { unsigned long flags; u8 regval, newval; spin_lock_irqsave(&auxio_lock, flags); regval = (ebus ? (u8) readl(auxio_register) : sbus_readb(auxio_register)); newval = regval | bits_on; newval &= ~bits_off; if (!ebus) newval &= ~AUXIO_AUX1_MASK; if (ebus) writel((u32) newval, auxio_register); else sbus_writeb(newval, auxio_register); spin_unlock_irqrestore(&auxio_lock, flags); } }
static int __init init_one_port(struct sbus_dev *sdev) { struct parport *p; /* at least in theory there may be a "we don't dma" case */ struct parport_operations *ops; void __iomem *base; int irq, dma, err = 0, size; struct bpp_regs __iomem *regs; unsigned char value_tcr; Node *node; dprintk((KERN_DEBUG "init_one_port(%p): ranges, alloc_io, ", sdev)); node = kmalloc(sizeof(Node), GFP_KERNEL); if (!node) goto out0; irq = sdev->irqs[0]; base = sbus_ioremap(&sdev->resource[0], 0, sdev->reg_addrs[0].reg_size, "sunbpp"); if (!base) goto out1; size = sdev->reg_addrs[0].reg_size; dma = PARPORT_DMA_NONE; dprintk(("alloc(ppops), ")); ops = kmalloc (sizeof (struct parport_operations), GFP_KERNEL); if (!ops) goto out2; memcpy (ops, &parport_sunbpp_ops, sizeof (struct parport_operations)); dprintk(("register_port\n")); if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) goto out3; p->size = size; dprintk((KERN_DEBUG "init_one_port: request_irq(%08x:%p:%x:%s:%p) ", p->irq, parport_sunbpp_interrupt, SA_SHIRQ, p->name, p)); if ((err = request_irq(p->irq, parport_sunbpp_interrupt, SA_SHIRQ, p->name, p)) != 0) { dprintk(("ERROR %d\n", err)); goto out4; } dprintk(("OK\n")); parport_sunbpp_enable_irq(p); regs = (struct bpp_regs __iomem *)p->base; dprintk((KERN_DEBUG "forward\n")); value_tcr = sbus_readb(®s->p_tcr); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, ®s->p_tcr); printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base); node->port = p; list_add(&node->list, &port_list); parport_announce_port (p); return 1; out4: parport_put_port(p); out3: kfree(ops); out2: sbus_iounmap(base, size); out1: kfree(node); out0: return err; }
static int __devinit myri_load_lanai(struct myri_eth *mp) { const struct firmware *fw; struct net_device *dev = mp->dev; struct myri_shmem __iomem *shmem = mp->shmem; void __iomem *rptr; int i, lanai4_data_size; myri_disable_irq(mp->lregs, mp->cregs); myri_reset_on(mp->cregs); rptr = mp->lanai; for (i = 0; i < mp->eeprom.ramsz; i++) sbus_writeb(0, rptr + i); if (mp->eeprom.cpuvers >= CPUVERS_3_0) sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL); i = request_firmware(&fw, FWNAME, &mp->myri_op->dev); if (i) { // printk(KERN_ERR "Failed to load image \"%s\" err %d\n", ; return i; } if (fw->size < 2) { // printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", ; release_firmware(fw); return -EINVAL; } lanai4_data_size = fw->data[0] << 8 | fw->data[1]; /* Load executable code. */ for (i = 2; i < fw->size; i++) sbus_writeb(fw->data[i], rptr++); /* Load data segment. */ for (i = 0; i < lanai4_data_size; i++) sbus_writeb(0, rptr++); /* Set device address. */ sbus_writeb(0, &shmem->addr[0]); sbus_writeb(0, &shmem->addr[1]); for (i = 0; i < 6; i++) sbus_writeb(dev->dev_addr[i], &shmem->addr[i + 2]); /* Set SBUS bursts and interrupt mask. */ sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst); sbus_writel(SHMEM_IMASK_RX, &shmem->imask); /* Release the LANAI. */ myri_disable_irq(mp->lregs, mp->cregs); myri_reset_off(mp->lregs, mp->cregs); myri_disable_irq(mp->lregs, mp->cregs); /* Wait for the reset to complete. */ for (i = 0; i < 5000; i++) { if (sbus_readl(&shmem->channel.state) != STATE_READY) break; else udelay(10); } if (i == 5000) ; i = myri_do_handshake(mp); if (i) ; if (mp->eeprom.cpuvers == CPUVERS_4_0) sbus_writel(0, mp->lregs + LANAI_VERS); release_firmware(fw); return i; }
static int myri_load_lanai(struct myri_eth *mp) { struct net_device *dev = mp->dev; struct myri_shmem *shmem = mp->shmem; unsigned char *rptr; int i; myri_disable_irq(mp->lregs, mp->cregs); myri_reset_on(mp->cregs); rptr = (unsigned char *) mp->lanai; for (i = 0; i < mp->eeprom.ramsz; i++) sbus_writeb(0, &rptr[i]); if (mp->eeprom.cpuvers >= CPUVERS_3_0) sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL); /* Load executable code. */ for (i = 0; i < sizeof(lanai4_code); i++) sbus_writeb(lanai4_code[i], &rptr[(lanai4_code_off * 2) + i]); /* Load data segment. */ for (i = 0; i < sizeof(lanai4_data); i++) sbus_writeb(lanai4_data[i], &rptr[(lanai4_data_off * 2) + i]); /* Set device address. */ sbus_writeb(0, &shmem->addr[0]); sbus_writeb(0, &shmem->addr[1]); for (i = 0; i < 6; i++) sbus_writeb(dev->dev_addr[i], &shmem->addr[i + 2]); /* Set SBUS bursts and interrupt mask. */ sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst); sbus_writel(SHMEM_IMASK_RX, &shmem->imask); /* Release the LANAI. */ myri_disable_irq(mp->lregs, mp->cregs); myri_reset_off(mp->lregs, mp->cregs); myri_disable_irq(mp->lregs, mp->cregs); /* Wait for the reset to complete. */ for (i = 0; i < 5000; i++) { if (sbus_readl(&shmem->channel.state) != STATE_READY) break; else udelay(10); } if (i == 5000) printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n"); i = myri_do_handshake(mp); if (i) printk(KERN_ERR "myricom: Handshake with LANAI failed.\n"); if (mp->eeprom.cpuvers == CPUVERS_4_0) sbus_writel(0, mp->lregs + LANAI_VERS); return i; }
static int __init init_one_port(struct sbus_dev *sdev) { struct parport *p; /* at least in theory there may be a "we don't dma" case */ struct parport_operations *ops; unsigned long base; int irq, dma, err, size; struct bpp_regs *regs; unsigned char value_tcr; dprintk((KERN_DEBUG "init_one_port(%p): ranges, alloc_io, ", sdev)); irq = sdev->irqs[0]; base = sbus_ioremap(&sdev->resource[0], 0, sdev->reg_addrs[0].reg_size, "sunbpp"); size = sdev->reg_addrs[0].reg_size; dma = PARPORT_DMA_NONE; dprintk(("alloc(ppops), ")); ops = kmalloc (sizeof (struct parport_operations), GFP_KERNEL); if (!ops) { sbus_iounmap(base, size); return 0; } memcpy (ops, &parport_sunbpp_ops, sizeof (struct parport_operations)); dprintk(("register_port\n")); if (!(p = parport_register_port(base, irq, dma, ops))) { kfree(ops); sbus_iounmap(base, size); return 0; } p->size = size; dprintk((KERN_DEBUG "init_one_port: request_irq(%08x:%p:%x:%s:%p) ", p->irq, parport_sunbpp_interrupt, SA_SHIRQ, p->name, p)); if ((err = request_irq(p->irq, parport_sunbpp_interrupt, SA_SHIRQ, p->name, p)) != 0) { dprintk(("ERROR %d\n", err)); parport_unregister_port(p); kfree(ops); sbus_iounmap(base, size); return err; } else { dprintk(("OK\n")); parport_sunbpp_enable_irq(p); } regs = (struct bpp_regs *)p->base; dprintk((KERN_DEBUG "forward\n")); value_tcr = sbus_readb(®s->p_tcr); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, ®s->p_tcr); printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base); parport_proc_register(p); parport_announce_port (p); return 1; }
static int qe_init(struct sunqe *qep, int from_irq) { struct sunqec *qecp = qep->parent; void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i; /* Shut it up. */ if (qe_stop(qep)) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(1, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); /* Setup the FIFO pointers into QEC local memory. */ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); sbus_writel(tmp, cregs + CREG_RXRBUFPTR); sbus_writel(tmp, cregs + CREG_RXWBUFPTR); tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + sbus_readl(gregs + GLOB_RSIZE); sbus_writel(tmp, cregs + CREG_TXRBUFPTR); sbus_writel(tmp, cregs + CREG_TXWBUFPTR); /* Clear the channel collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* For 10baseT, inter frame space nor throttle seems to be necessary. */ sbus_writel(0, cregs + CREG_PIPG); /* Now dork with the AMD MACE. */ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); sbus_writeb(0, mregs + MREGS_RXFCNTL); /* The QEC dma's the rx'd packets from local memory out to main memory, * and therefore it interrupts when the packet reception is "complete". * So don't listen for the MACE talking about it. */ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), mregs + MREGS_FCONFIG); /* Only usable interface on QuadEther is twisted pair. */ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); /* Tell MACE we are changing the ether address. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); sbus_writeb(e[0], mregs + MREGS_ETHADDR); sbus_writeb(e[1], mregs + MREGS_ETHADDR); sbus_writeb(e[2], mregs + MREGS_ETHADDR); sbus_writeb(e[3], mregs + MREGS_ETHADDR); sbus_writeb(e[4], mregs + MREGS_ETHADDR); sbus_writeb(e[5], mregs + MREGS_ETHADDR); /* Clear out the address filter. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0, mregs + MREGS_FILTER); /* Address changes are now complete. */ sbus_writeb(0, mregs + MREGS_IACONFIG); qe_init_rings(qep); /* Wait a little bit for the link to come up... */ mdelay(5); if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { int tries = 50; while (--tries) { u8 tmp; mdelay(5); barrier(); tmp = sbus_readb(mregs + MREGS_PHYCONFIG); if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) break; } if (tries == 0) printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); } /* Missed packet counter is cleared on a read. */ sbus_readb(mregs + MREGS_MPCNT); /* Reload multicast information, this will enable the receiver * and transmitter. */ qe_set_multicast(qep->dev); /* QEC should now start to show interrupts. */ return 0; }
static int __devinit bpp_probe(struct of_device *op, const struct of_device_id *match) { struct parport_operations *ops; struct bpp_regs __iomem *regs; int irq, dma, err = 0, size; unsigned char value_tcr; void __iomem *base; struct parport *p; irq = op->irqs[0]; base = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "sunbpp"); if (!base) return -ENODEV; size = resource_size(&op->resource[0]); dma = PARPORT_DMA_NONE; ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); if (!ops) goto out_unmap; memcpy (ops, &parport_sunbpp_ops, sizeof(struct parport_operations)); dprintk(("register_port\n")); if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) goto out_free_ops; p->size = size; p->dev = &op->dev; if ((err = request_irq(p->irq, parport_irq_handler, IRQF_SHARED, p->name, p)) != 0) { goto out_put_port; } parport_sunbpp_enable_irq(p); regs = (struct bpp_regs __iomem *)p->base; value_tcr = sbus_readb(®s->p_tcr); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, ®s->p_tcr); printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base); dev_set_drvdata(&op->dev, p); parport_announce_port(p); return 0; out_put_port: parport_put_port(p); out_free_ops: kfree(ops); out_unmap: of_iounmap(&op->resource[0], base, size); return err; }
static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) { sbus_writeb(val, esp->regs + (reg * 4UL)); }
char __init *cgthreefb_init(struct fb_info_sbusfb *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct display *disp = &fb->disp; struct fbtype *type = &fb->type; struct sbus_dev *sdev = fb->sbdp; unsigned long phys = sdev->reg_addrs[0].phys_addr; int cgRDI = strstr(fb->sbdp->prom_name, "cgRDI") != NULL; #ifndef FBCON_HAS_CFB8 return NULL; #endif if (!fb->s.cg3.regs) { fb->s.cg3.regs = (struct cg3_regs *) sbus_ioremap(&sdev->resource[0], CG3_REGS_OFFSET, sizeof(struct cg3_regs), "cg3 regs"); if (cgRDI) { char buffer[40]; char *p; int ww, hh; *buffer = 0; prom_getstring (fb->prom_node, "params", buffer, sizeof(buffer)); if (*buffer) { ww = simple_strtoul (buffer, &p, 10); if (ww && *p == 'x') { hh = simple_strtoul (p + 1, &p, 10); if (hh && *p == '-') { if (type->fb_width != ww || type->fb_height != hh) { type->fb_width = ww; type->fb_height = hh; return SBUSFBINIT_SIZECHANGE; } } } } } } strcpy(fb->info.modename, "CGthree"); strcpy(fix->id, "CGthree"); fix->line_length = fb->var.xres_virtual; fix->accel = FB_ACCEL_SUN_CGTHREE; disp->scrollmode = SCROLL_YREDRAW; if (!disp->screen_base) { disp->screen_base = (char *) sbus_ioremap(&sdev->resource[0], CG3_RAM_OFFSET, type->fb_size, "cg3 ram"); } disp->screen_base += fix->line_length * fb->y_margin + fb->x_margin; fb->dispsw = fbcon_cfb8; fb->margins = cg3_margins; fb->loadcmap = cg3_loadcmap; fb->blank = cg3_blank; fb->unblank = cg3_unblank; fb->physbase = phys; fb->mmap_map = cg3_mmap_map; #ifdef __sparc_v9__ sprintf(idstring, "%s at %016lx", cgRDI ? "cgRDI" : "cgthree", phys); #else sprintf(idstring, "%s at %x.%08lx", cgRDI ? "cgRDI" : "cgthree", fb->iospace, phys); #endif if (!prom_getbool(fb->prom_node, "width")) { /* Ugh, broken PROM didn't initialize us. * Let's deal with this ourselves. */ enum cg3_type type; u8 *p; if (cgRDI) type = CG3_RDI; else { u8 status = sbus_readb(&fb->s.cg3.regs->status), mon; if ((status & CG3_SR_ID_MASK) == CG3_SR_ID_COLOR) { mon = status & CG3_SR_RES_MASK; if (mon == CG3_SR_1152_900_76_A || mon == CG3_SR_1152_900_76_B) type = CG3_AT_76HZ; else type = CG3_AT_66HZ; } else { prom_printf("cgthree: can't handle SR %02x\n", status); prom_halt(); return NULL; /* fool gcc. */ } } for (p = cg3_regvals[type]; *p; p += 2) { u8 *regp = &((u8 *)fb->s.cg3.regs)[p[0]]; sbus_writeb(p[1], regp); } for (p = cg3_dacvals; *p; p += 2) { volatile u8 *regp; regp = (volatile u8 *)&fb->s.cg3.regs->cmap.addr; sbus_writeb(p[0], regp); regp = (volatile u8 *)&fb->s.cg3.regs->cmap.control; sbus_writeb(p[1], regp); } } return idstring; }
static int cg14_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg, struct fb_info *info) { struct cg14_par *par = (struct cg14_par *) info->par; struct cg14_regs __iomem *regs = par->regs; struct mdi_cfginfo kmdi, __user *mdii; unsigned long flags; int cur_mode, mode, ret = 0; switch (cmd) { case MDI_RESET: spin_lock_irqsave(&par->lock, flags); __cg14_reset(par); spin_unlock_irqrestore(&par->lock, flags); break; case MDI_GET_CFGINFO: memset(&kmdi, 0, sizeof(kmdi)); spin_lock_irqsave(&par->lock, flags); kmdi.mdi_type = FBTYPE_MDICOLOR; kmdi.mdi_height = info->var.yres; kmdi.mdi_width = info->var.xres; kmdi.mdi_mode = par->mode; kmdi.mdi_pixfreq = 72; /* FIXME */ kmdi.mdi_size = par->ramsize; spin_unlock_irqrestore(&par->lock, flags); mdii = (struct mdi_cfginfo __user *) arg; if (copy_to_user(mdii, &kmdi, sizeof(kmdi))) ret = -EFAULT; break; case MDI_SET_PIXELMODE: if (get_user(mode, (int __user *) arg)) { ret = -EFAULT; break; } spin_lock_irqsave(&par->lock, flags); cur_mode = sbus_readb(®s->mcr); cur_mode &= ~CG14_MCR_PIXMODE_MASK; switch(mode) { case MDI_32_PIX: cur_mode |= (CG14_MCR_PIXMODE_32 << CG14_MCR_PIXMODE_SHIFT); break; case MDI_16_PIX: cur_mode |= (CG14_MCR_PIXMODE_16 << CG14_MCR_PIXMODE_SHIFT); break; case MDI_8_PIX: break; default: ret = -ENOSYS; break; }; if (!ret) { sbus_writeb(cur_mode, ®s->mcr); par->mode = mode; } spin_unlock_irqrestore(&par->lock, flags); break; default: ret = sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_MDICOLOR, 8, par->fbsize); break; }; return ret; }
static int __devinit myri_load_lanai(struct myri_eth *mp) { const struct firmware *fw; struct net_device *dev = mp->dev; struct myri_shmem __iomem *shmem = mp->shmem; void __iomem *rptr; int i, lanai4_data_size; myri_disable_irq(mp->lregs, mp->cregs); myri_reset_on(mp->cregs); rptr = mp->lanai; for (i = 0; i < mp->eeprom.ramsz; i++) sbus_writeb(0, rptr + i); if (mp->eeprom.cpuvers >= CPUVERS_3_0) sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL); i = request_firmware(&fw, FWNAME, &mp->myri_op->dev); if (i) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", FWNAME, i); return i; } if (fw->size < 2) { printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", fw->size, FWNAME); release_firmware(fw); return -EINVAL; } lanai4_data_size = fw->data[0] << 8 | fw->data[1]; for (i = 2; i < fw->size; i++) sbus_writeb(fw->data[i], rptr++); for (i = 0; i < lanai4_data_size; i++) sbus_writeb(0, rptr++); sbus_writeb(0, &shmem->addr[0]); sbus_writeb(0, &shmem->addr[1]); for (i = 0; i < 6; i++) sbus_writeb(dev->dev_addr[i], &shmem->addr[i + 2]); sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst); sbus_writel(SHMEM_IMASK_RX, &shmem->imask); myri_disable_irq(mp->lregs, mp->cregs); myri_reset_off(mp->lregs, mp->cregs); myri_disable_irq(mp->lregs, mp->cregs); for (i = 0; i < 5000; i++) { if (sbus_readl(&shmem->channel.state) != STATE_READY) break; else udelay(10); } if (i == 5000) printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n"); i = myri_do_handshake(mp); if (i) printk(KERN_ERR "myricom: Handshake with LANAI failed.\n"); if (mp->eeprom.cpuvers == CPUVERS_4_0) sbus_writel(0, mp->lregs + LANAI_VERS); release_firmware(fw); return i; }