void __init prom_free_prom_memory(void) { #ifdef CONFIG_SGI_IP28 u32 mconfig1; unsigned long flags; spinlock_t lock; /* * because ARCS accesses memory uncached we wait until ARCS * isn't needed any longer, before we switch from slow to * normal mode */ spin_lock_irqsave(&lock, flags); mconfig1 = sgimc->mconfig1; /* map ECC register */ sgimc->mconfig1 = (mconfig1 & 0xffff0000) | 0x2060; iob(); /* switch to normal mode */ *(unsigned long *)PHYS_TO_XKSEG_UNCACHED(0x60000000) = 0; iob(); /* reduce WR_COL */ sgimc->cmacc = (sgimc->cmacc & ~0xf) | 4; iob(); /* restore old config */ sgimc->mconfig1 = mconfig1; iob(); spin_unlock_irqrestore(&lock, flags); #endif }
static inline int mips_pcibios_iack(void) { int irq; switch (mips_revision_sconid) { case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: case MIPS_REVISION_SCON_SOCITSC: case MIPS_REVISION_SCON_SOCITSCP: MSC_READ(MSC01_PCI_IACK, irq); irq &= 0xff; break; case MIPS_REVISION_SCON_GT64120: irq = GT_READ(GT_PCI0_IACK_OFS); irq &= 0xff; break; case MIPS_REVISION_SCON_BONITO: BONITO_PCIMAP_CFG = 0x20000; (void) BONITO_PCIMAP_CFG; iob(); irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); iob(); irq &= 0xff; BONITO_PCIMAP_CFG = 0; break; default: printk(KERN_WARNING "Unknown system controller.\n"); return -1; } return irq; }
static inline int mips_pcibios_iack(void) { int irq; u32 dummy; /* * Determine highest priority pending interrupt by performing * a PCI Interrupt Acknowledge cycle. */ switch(mips_revision_corid) { case MIPS_REVISION_CORID_CORE_MSC: case MIPS_REVISION_CORID_CORE_FPGA2: case MIPS_REVISION_CORID_CORE_FPGA3: case MIPS_REVISION_CORID_CORE_24K: case MIPS_REVISION_CORID_CORE_EMUL_MSC: MSC_READ(MSC01_PCI_IACK, irq); irq &= 0xff; break; case MIPS_REVISION_CORID_QED_RM5261: case MIPS_REVISION_CORID_CORE_LV: case MIPS_REVISION_CORID_CORE_FPGA: case MIPS_REVISION_CORID_CORE_FPGAR2: irq = GT_READ(GT_PCI0_IACK_OFS); irq &= 0xff; break; case MIPS_REVISION_CORID_BONITO64: case MIPS_REVISION_CORID_CORE_20K: case MIPS_REVISION_CORID_CORE_EMUL_BON: /* The following will generate a PCI IACK cycle on the * Bonito controller. It's a little bit kludgy, but it * was the easiest way to implement it in hardware at * the given time. */ BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ dummy = BONITO_PCIMAP_CFG; iob(); /* sync */ irq = *(volatile u32 *)(_pcictrl_bonito_pcicfg); iob(); /* sync */ irq &= 0xff; BONITO_PCIMAP_CFG = 0; break; default: printk("Unknown Core card, don't know the system controller.\n"); return -1; } return irq; }
static void ack_kn02_irq(unsigned int irq) { spin_lock(&kn02_lock); mask_kn02_irq(irq); spin_unlock(&kn02_lock); iob(); }
static int celleb_epci_check_abort(struct pci_controller *hose, PCI_IO_ADDR addr) { PCI_IO_ADDR reg; PCI_IO_ADDR epci_base; u32 val; iob(); epci_base = celleb_epci_get_epci_base(hose); reg = epci_base + PCI_COMMAND; val = in_be32(reg); if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) { out_be32(reg, (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16)); /* clear PCI Controller error, FRE, PMFE */ reg = epci_base + SCC_EPCI_STATUS; out_be32(reg, SCC_EPCI_INT_PAI); reg = epci_base + SCC_EPCI_VCSR; val = in_be32(reg) & 0xffff; val |= SCC_EPCI_VCSR_FRE; out_be32(reg, val); reg = epci_base + SCC_EPCI_VISTAT; out_be32(reg, SCC_EPCI_VISTAT_PMFE); return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; }
static int txx9_sio_kgdb_init(void) { struct uart_port *port = &uart_txx9_port[kgdb_txx9_ttyS]; unsigned int quot, sibgr; if (port->iotype != UPIO_MEM && port->iotype != UPIO_MEM32) return -1; /* Reset the UART. */ sio_out(port, TXX9_SIFCR, TXX9_SIFCR_SWRST); #ifdef CONFIG_CPU_TX49XX /* * TX4925 BUG WORKAROUND. Accessing SIOC register * immediately after soft reset causes bus error. */ iob(); udelay(1); #endif /* Wait until reset is complete. */ while (sio_in(port, TXX9_SIFCR) & TXX9_SIFCR_SWRST); /* Select the frame format and input clock. */ sio_out(port, TXX9_SILCR, TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT | ((port->flags & UPF_MAGIC_MULTIPLIER) ? TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG)); /* Select the input clock prescaler that fits the baud rate. */ quot = (port->uartclk + 8 * kgdb_txx9_baud) / (16 * kgdb_txx9_baud); if (quot < (256 << 1)) sibgr = (quot >> 1) | TXX9_SIBGR_BCLK_T0; else if (quot < ( 256 << 3))
static void __init tx4927_pci_setup(void) { int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) txx9_pci_option = (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) | TXX9_PCI_OPT_CLK_66; /* already configured */ /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_66) tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_AUTO && txx9_pci66_check(c, 0, 0)) { /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); /* Reinitialize PCIC */ tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); } tx4927_setup_pcierr_irq(); }
static void dz_reset(struct dz_port *dport) { dz_out(dport, DZ_CSR, DZ_CLR); while (dz_in(dport, DZ_CSR) & DZ_CLR); iob(); /* enable scanning */ dz_out(dport, DZ_CSR, DZ_MSE); }
static RMuint32 set_remap(RMuint32 remap_reg, RMuint32 value) { RMuint32 orig = *((volatile RMuint32 *)KSEG1ADDR(REG_BASE_cpu_block + remap_reg)); if (orig != value) { *((volatile RMuint32 *)KSEG1ADDR(REG_BASE_cpu_block + remap_reg)) = value; iob(); } return(orig); }
static inline int mips_pcibios_iack(void) { int irq; u32 dummy; /* * Determine highest priority pending interrupt by performing * a PCI Interrupt Acknowledge cycle. */ switch (mips_revision_sconid) { case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: case MIPS_REVISION_SCON_SOCITSC: case MIPS_REVISION_SCON_SOCITSCP: MSC_READ(MSC01_PCI_IACK, irq); irq &= 0xff; break; case MIPS_REVISION_SCON_GT64120: irq = GT_READ(GT_PCI0_IACK_OFS); irq &= 0xff; break; case MIPS_REVISION_SCON_BONITO: /* The following will generate a PCI IACK cycle on the * Bonito controller. It's a little bit kludgy, but it * was the easiest way to implement it in hardware at * the given time. */ BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ dummy = BONITO_PCIMAP_CFG; iob(); /* sync */ irq = readl((u32 *)_pcictrl_bonito_pcicfg); iob(); /* sync */ irq &= 0xff; BONITO_PCIMAP_CFG = 0; break; default: printk(KERN_WARNING "Unknown system controller.\n"); return -1; } return irq; }
static u32 bcm63xx_int_cfg_readl(u32 reg) { u32 tmp; tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK; tmp |= MPI_PCICFGCTL_WRITEEN_MASK; bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG); iob(); return bcm_mpi_readl(MPI_PCICFGDATA_REG); }
/* Watchdog functions */ static void wdt_gpi_start(void) { u32 reg; lock_titan_regs(); reg = titan_readl(CPGIG1ER); titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER); iob(); unlock_titan_regs(); }
static inline int check_abort(void) { if (tx3927_pcicptr->pcistat & PCI_STATUS_REC_MASTER_ABORT) { tx3927_pcicptr->pcistat |= PCI_STATUS_REC_MASTER_ABORT; tx3927_pcicptr->pcistatim |= PCI_STATUS_REC_MASTER_ABORT; /* flush write buffer */ iob(); return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; }
static void wdt_gpi_stop(void) { u32 reg; lock_titan_regs(); reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4)); titan_writel(reg, CPCCR); reg = titan_readl(CPGIG1ER); titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER); iob(); unlock_titan_regs(); }
static inline void dec_kn01_be_ack(void) { volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR); unsigned long flags; raw_spin_lock_irqsave(&kn01_lock, flags); *csr = cached_kn01_csr | KN01_CSR_MEMERR; /* Clear bus IRQ. */ iob(); raw_spin_unlock_irqrestore(&kn01_lock, flags); }
/* * ------------------------------------------------------------------- * dz_console_putchar() -- transmit a character * * Polled transmission. This is tricky. We need to mask transmit * interrupts so that they do not interfere, enable the transmitter * for the line requested and then wait till the transmit scanner * requests data for this line. But it may request data for another * line first, in which case we have to disable its transmitter and * repeat waiting till our line pops up. Only then the character may * be transmitted. Finally, the state of the transmitter mask is * restored. Welcome to the world of PDP-11! * ------------------------------------------------------------------- */ static void dz_console_putchar(struct uart_port *uport, int ch) { struct dz_port *dport = (struct dz_port *)uport; unsigned long flags; unsigned short csr, tcr, trdy, mask; int loops = 10000; spin_lock_irqsave(&dport->port.lock, flags); csr = dz_in(dport, DZ_CSR); dz_out(dport, DZ_CSR, csr & ~DZ_TIE); tcr = dz_in(dport, DZ_TCR); tcr |= 1 << dport->port.line; mask = tcr; dz_out(dport, DZ_TCR, mask); iob(); spin_unlock_irqrestore(&dport->port.lock, flags); while (loops--) { trdy = dz_in(dport, DZ_CSR); if (!(trdy & DZ_TRDY)) continue; trdy = (trdy & DZ_TLINE) >> 8; if (trdy == dport->port.line) break; mask &= ~(1 << trdy); dz_out(dport, DZ_TCR, mask); iob(); udelay(2); } if (loops) /* Cannot send otherwise. */ dz_out(dport, DZ_TDR, ch); dz_out(dport, DZ_TCR, tcr); dz_out(dport, DZ_CSR, csr); }
void __init init_kn02_irqs(int base) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); int i; /* Mask interrupts. */ cached_kn02_csr &= ~KN02_CSR_IOINTEN; *csr = cached_kn02_csr; iob(); for (i = base; i < base + KN02_IRQ_LINES; i++) irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq); kn02_irq_base = base; }
/* Interrupt handler */ static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt) { if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1)) return IRQ_NONE; __raw_writel(0x1, wd_regs + 0x0008); printk(KERN_CRIT "%s: watchdog expired - resetting system\n", wdt_gpi_name); *(volatile char *) flagaddr |= 0x01; *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2; iob(); while (1) cpu_relax(); }
static void wdt_gpi_set_timeout(unsigned int to) { u32 reg; const u32 wdval = (to * CLOCK) & ~0x0000000f; lock_titan_regs(); reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4)); titan_writel(reg, CPCCR); wmb(); __raw_writel(wdval, wd_regs + 0x0000); wmb(); titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR); wmb(); titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR); iob(); unlock_titan_regs(); }
void __init dec_kn01_be_init(void) { volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR); unsigned long flags; raw_spin_lock_irqsave(&kn01_lock, flags); /* Preset write-only bits of the Control Register cache. */ cached_kn01_csr = *csr; cached_kn01_csr &= KN01_CSR_STATUS | KN01_CSR_PARDIS | KN01_CSR_TXDIS; cached_kn01_csr |= KN01_CSR_LEDS; /* Enable parity error detection. */ cached_kn01_csr &= ~KN01_CSR_PARDIS; *csr = cached_kn01_csr; iob(); raw_spin_unlock_irqrestore(&kn01_lock, flags); /* Clear any leftover errors from the firmware. */ dec_kn01_be_ack(); }
void __init init_kn02_irqs(int base) { volatile u32 *csr = (volatile u32 *)KN02_CSR_BASE; unsigned long flags; int i; /* Mask interrupts. */ spin_lock_irqsave(&kn02_lock, flags); cached_kn02_csr &= ~KN03_CSR_IOINTEN; *csr = cached_kn02_csr; iob(); spin_unlock_irqrestore(&kn02_lock, flags); for (i = base; i < base + KN02_IRQ_LINES; i++) { irq_desc[i].status = IRQ_DISABLED; irq_desc[i].action = 0; irq_desc[i].depth = 1; irq_desc[i].handler = &kn02_irq_type; } kn02_irq_base = base; }
static inline void writereg(volatile unsigned short *regptr, short value) { *regptr = value; iob(); }
int __init dz_init(void) { int i; long flags; struct dz_serial *info; /* Setup base handler, and timer table. */ init_bh(SERIAL_BH, do_serial_bh); show_serial_version(); memset(&serial_driver, 0, sizeof(struct tty_driver)); serial_driver.magic = TTY_DRIVER_MAGIC; #if (LINUX_VERSION_CODE > 0x2032D && defined(CONFIG_DEVFS_FS)) serial_driver.name = "ttyS"; #else serial_driver.name = "tts/%d"; #endif serial_driver.major = TTY_MAJOR; serial_driver.minor_start = 64; serial_driver.num = DZ_NB_PORT; serial_driver.type = TTY_DRIVER_TYPE_SERIAL; serial_driver.subtype = SERIAL_TYPE_NORMAL; serial_driver.init_termios = tty_std_termios; serial_driver.init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver.flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS; serial_driver.refcount = &serial_refcount; serial_driver.table = serial_table; serial_driver.termios = serial_termios; serial_driver.termios_locked = serial_termios_locked; serial_driver.open = dz_open; serial_driver.close = dz_close; serial_driver.write = dz_write; serial_driver.flush_chars = dz_flush_chars; serial_driver.write_room = dz_write_room; serial_driver.chars_in_buffer = dz_chars_in_buffer; serial_driver.flush_buffer = dz_flush_buffer; serial_driver.ioctl = dz_ioctl; serial_driver.throttle = dz_throttle; serial_driver.unthrottle = dz_unthrottle; serial_driver.send_xchar = dz_send_xchar; serial_driver.set_termios = dz_set_termios; serial_driver.stop = dz_stop; serial_driver.start = dz_start; serial_driver.hangup = dz_hangup; /* * The callout device is just like normal device except for * major number and the subtype code. */ callout_driver = serial_driver; #if (LINUX_VERSION_CODE > 0x2032D && defined(CONFIG_DEVFS_FS)) callout_driver.name = "cua"; #else callout_driver.name = "cua/%d"; #endif callout_driver.major = TTYAUX_MAJOR; callout_driver.subtype = SERIAL_TYPE_CALLOUT; if (tty_register_driver(&serial_driver)) panic("Couldn't register serial driver"); if (tty_register_driver(&callout_driver)) panic("Couldn't register callout driver"); save_flags(flags); cli(); for (i = 0; i < DZ_NB_PORT; i++) { info = &multi[i]; lines[i] = info; info->magic = SERIAL_MAGIC; if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100) info->port = (unsigned long) KN01_DZ11_BASE; else info->port = (unsigned long) KN02_DZ11_BASE; info->line = i; info->tty = 0; info->close_delay = 50; info->closing_wait = 3000; info->x_char = 0; info->event = 0; info->count = 0; info->blocked_open = 0; info->tqueue.routine = do_softint; info->tqueue.data = info; info->tqueue_hangup.routine = do_serial_hangup; info->tqueue_hangup.data = info; info->callout_termios = callout_driver.init_termios; info->normal_termios = serial_driver.init_termios; init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); /* * If we are pointing to address zero then punt - not correctly * set up in setup.c to handle this. */ if (!info->port) return 0; printk("ttyS%02d at 0x%08x (irq = %d)\n", info->line, info->port, dec_interrupt[DEC_IRQ_DZ11]); tty_register_devfs(&serial_driver, 0, serial_driver.minor_start + info->line); tty_register_devfs(&callout_driver, 0, callout_driver.minor_start + info->line); } /* reset the chip */ #ifndef CONFIG_SERIAL_DEC_CONSOLE dz_out(info, DZ_CSR, DZ_CLR); while (dz_in(info, DZ_CSR) & DZ_CLR); iob(); /* enable scanning */ dz_out(info, DZ_CSR, DZ_MSE); #endif /* order matters here... the trick is that flags is updated... in request_irq - to immediatedly obliterate it is unwise. */ restore_flags(flags); if (request_irq(dec_interrupt[DEC_IRQ_DZ11], dz_interrupt, SA_INTERRUPT, "DZ", lines[0])) panic("Unable to register DZ interrupt"); return 0; }
void enable_atlas_irq(unsigned int irq_nr) { atlas_hw0_icregs->intseten = 1 << (irq_nr - ATLAS_INT_BASE); iob(); }
static void ack_kn02_irq(struct irq_data *d) { mask_kn02_irq(d); iob(); }
static inline void dec_ecc_be_ack(void) { *kn0x_erraddr = 0; /* any write clears the IRQ */ iob(); }
static int dec_ecc_be_backend(struct pt_regs *regs, int is_fixup, int invoker) { static const char excstr[] = "exception"; static const char intstr[] = "interrupt"; static const char cpustr[] = "CPU"; static const char dmastr[] = "DMA"; static const char readstr[] = "read"; static const char mreadstr[] = "memory read"; static const char writestr[] = "write"; static const char mwritstr[] = "partial memory write"; static const char timestr[] = "timeout"; static const char overstr[] = "overrun"; static const char eccstr[] = "ECC error"; const char *kind, *agent, *cycle, *event; const char *status = "", *xbit = "", *fmt = ""; unsigned long address; u16 syn = 0, sngl; int i = 0; u32 erraddr = *kn0x_erraddr; u32 chksyn = *kn0x_chksyn; int action = MIPS_BE_FATAL; /* For non-ECC ack ASAP, so that any subsequent errors get caught. */ if ((erraddr & (KN0X_EAR_VALID | KN0X_EAR_ECCERR)) == KN0X_EAR_VALID) dec_ecc_be_ack(); kind = invoker ? intstr : excstr; if (!(erraddr & KN0X_EAR_VALID)) { /* No idea what happened. */ printk(KERN_ALERT "Unidentified bus error %s\n", kind); return action; } agent = (erraddr & KN0X_EAR_CPU) ? cpustr : dmastr; if (erraddr & KN0X_EAR_ECCERR) { /* An ECC error on a CPU or DMA transaction. */ cycle = (erraddr & KN0X_EAR_WRITE) ? mwritstr : mreadstr; event = eccstr; } else { /* A CPU timeout or a DMA overrun. */ cycle = (erraddr & KN0X_EAR_WRITE) ? writestr : readstr; event = (erraddr & KN0X_EAR_CPU) ? timestr : overstr; } address = erraddr & KN0X_EAR_ADDRESS; /* For ECC errors on reads adjust for MT pipelining. */ if ((erraddr & (KN0X_EAR_WRITE | KN0X_EAR_ECCERR)) == KN0X_EAR_ECCERR) address = (address & ~0xfffLL) | ((address - 5) & 0xfffLL); address <<= 2; /* Only CPU errors are fixable. */ if (erraddr & KN0X_EAR_CPU && is_fixup) action = MIPS_BE_FIXUP; if (erraddr & KN0X_EAR_ECCERR) { static const u8 data_sbit[32] = { 0x4f, 0x4a, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d, 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x31, 0x34, 0x0e, 0x0b, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c, 0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x70, 0x75, }; static const u8 data_mbit[25] = { 0x07, 0x0d, 0x1f, 0x2f, 0x32, 0x37, 0x38, 0x3b, 0x3d, 0x3e, 0x43, 0x45, 0x46, 0x49, 0x4c, 0x51, 0x5e, 0x61, 0x6e, 0x73, 0x76, 0x79, 0x7a, 0x7c, 0x7f, }; static const char sbestr[] = "corrected single"; static const char dbestr[] = "uncorrectable double"; static const char mbestr[] = "uncorrectable multiple"; if (!(address & 0x4)) syn = chksyn; /* Low bank. */ else syn = chksyn >> 16; /* High bank. */ if (!(syn & KN0X_ESR_VLDLO)) { /* Ack now, no rewrite will happen. */ dec_ecc_be_ack(); fmt = KERN_ALERT "%s" "invalid\n"; } else { sngl = syn & KN0X_ESR_SNGLO; syn &= KN0X_ESR_SYNLO; /* * Multibit errors may be tagged incorrectly; * check the syndrome explicitly. */ for (i = 0; i < 25; i++) if (syn == data_mbit[i]) break; if (i < 25) { status = mbestr; } else if (!sngl) { status = dbestr; } else { volatile u32 *ptr = (void *)CKSEG1ADDR(address); *ptr = *ptr; /* Rewrite. */ iob(); status = sbestr; action = MIPS_BE_DISCARD; } /* Ack now, now we've rewritten (or not). */ dec_ecc_be_ack(); if (syn && syn == (syn & -syn)) { if (syn == 0x01) { fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at check bit C%s\n"; xbit = "X"; } else { fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at check bit C%s%u\n"; } i = syn >> 2; } else { for (i = 0; i < 32; i++) if (syn == data_sbit[i]) break; if (i < 32) fmt = KERN_ALERT "%s" "%#04x -- %s bit error " "at data bit D%s%u\n"; else fmt = KERN_ALERT "%s" "%#04x -- %s bit error\n"; } } }
static void __init rbtx4938_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) txx9_pci_option = (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) | TXX9_PCI_OPT_CLK_66; /* already configured */ /* Reset PCI Bus */ writeb(0, rbtx4938_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_66) tx4938_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); writeb(1, rbtx4938_pcireset_addr); iob(); tx4938_report_pciclk(); tx4927_pcic_setup(tx4938_pcicptr, c, extarb); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_AUTO && txx9_pci66_check(c, 0, 0)) { /* Reset PCI Bus */ writeb(0, rbtx4938_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); tx4938_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); writeb(1, rbtx4938_pcireset_addr); iob(); /* Reinitialize PCIC */ tx4938_report_pciclk(); tx4927_pcic_setup(tx4938_pcicptr, c, extarb); } if (__raw_readq(&tx4938_ccfgptr->pcfg) & (TX4938_PCFG_ETH0_SEL|TX4938_PCFG_ETH1_SEL)) { /* Reset PCIC1 */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST); /* PCI1DMD==0 => PCI1CLK==GBUSCLK/2 => PCI66 */ if (!(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI1DMD)) tx4938_ccfg_set(TX4938_CCFG_PCI1_66); mdelay(10); /* clear PCIC1 reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST); tx4938_report_pci1clk(); /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); register_pci_controller(c); tx4927_pcic_setup(tx4938_pcic1ptr, c, 0); } tx4938_setup_pcierr_irq(); #endif /* CONFIG_PCI */ }
void disable_atlas_irq(unsigned int irq_nr) { atlas_hw0_icregs->intrsten = (1 << (irq_nr-ATLASINT_BASE)); iob(); }
static void ack_kn02_irq(unsigned int irq) { mask_kn02_irq(irq); iob(); }