static int ip22_gio_id(unsigned long addr, u32 *res) { u8 tmp8; u8 tmp16; u32 tmp32; u8 *ptr8; u16 *ptr16; u32 *ptr32; ptr32 = (void *)CKSEG1ADDR(addr); if (!get_dbe(tmp32, ptr32)) { /* * We got no DBE, but this doesn't mean anything. * If GIO is pipelined (which can't be disabled * for GFX slot) we don't get a DBE, but we see * the transfer size as data. So we do an 8bit * and a 16bit access and check whether the common * data matches */ ptr8 = (void *)CKSEG1ADDR(addr + 3); get_dbe(tmp8, ptr8); ptr16 = (void *)CKSEG1ADDR(addr + 2); get_dbe(tmp16, ptr16); if (tmp8 == (tmp16 & 0xff) && tmp8 == (tmp32 & 0xff) && tmp16 == (tmp32 & 0xffff)) { *res = tmp32; return 1; } } return 0; /* nothing here */ }
static void __init dz_init_ports(void) { static int first = 1; struct dz_port *dport; unsigned long base; int i; if (!first) return; first = 0; if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100) base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_DZ11); else base = CKSEG1ADDR(KN02_SLOT_BASE + KN02_DZ11); for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) { spin_lock_init(&dport->port.lock); dport->port.membase = (char *) base; dport->port.iotype = UPIO_MEM; dport->port.irq = dec_interrupt[DEC_IRQ_DZ11]; dport->port.line = i; dport->port.fifosize = 1; dport->port.ops = &dz_ops; dport->port.flags = UPF_BOOT_AUTOCONF; } }
static int __init ms02nv_init(void) { volatile u32 *csr; uint stride = 0; int count = 0; int i; switch (mips_machtype) { case MACH_DS5000_200: csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); if (*csr & KN02_CSR_BNK32M) stride = 2; break; case MACH_DS5000_2X0: case MACH_DS5900: csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR); if (*csr & KN03_MCR_BNK32M) stride = 2; break; default: return -ENODEV; break; } for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++) if (!ms02nv_init_one(ms02nv_addrs[i] << stride)) count++; return (count > 0) ? 0 : -ENODEV; }
int is_sram_addr(void *p) { if ((CKSEG1ADDR(p) & 0xffffc000) == (CKSEG1ADDR(DSPRAM_BASE) & 0xffffc000)) return 1; else return 0; }
int board_early_init_f(void) { ulong io_base; /* choose correct PCI I/O base */ switch (malta_sys_con()) { case SYSCON_GT64120: io_base = CKSEG1ADDR(MALTA_GT_PCIIO_BASE); break; case SYSCON_MSC01: io_base = CKSEG1ADDR(MALTA_MSC01_PCIIO_BASE); break; default: return -1; } set_io_port_base(io_base); /* setup FDC37M817 super I/O controller */ malta_superio_init(); return 0; }
static inline uint ms02nv_probe_one(ulong addr) { ms02nv_uint *ms02nv_diagp; ms02nv_uint *ms02nv_magicp; uint ms02nv_diag; uint ms02nv_magic; size_t size; int err; /* * The firmware writes MS02NV_ID at MS02NV_MAGIC and also * a diagnostic status at MS02NV_DIAG. */ ms02nv_diagp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_DIAG)); ms02nv_magicp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_MAGIC)); err = get_dbe(ms02nv_magic, ms02nv_magicp); if (err) return 0; if (ms02nv_magic != MS02NV_ID) return 0; ms02nv_diag = *ms02nv_diagp; size = (ms02nv_diag & MS02NV_DIAG_SIZE_MASK) << MS02NV_DIAG_SIZE_SHIFT; if (size > MS02NV_CSR) size = MS02NV_CSR; return size; }
static inline void prom_init_kn03(void) { dec_kn_slot_base = KN03_SLOT_BASE; dec_kn_slot_size = IOASIC_SLOT_SIZE; dec_tc_bus = 1; ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL); dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY); }
int rtl_cipher_crypt(struct crypto_cipher *cipher, u8 bEncrypt, struct rtl_cipher_ctx *ctx, u8 *src, unsigned int nbytes, u8 *iv, u8 *dst) { unsigned int bsize = crypto_cipher_blocksize(cipher); u8 *key = bEncrypt ? ctx->key : ctx->mode & 0x20 ? ctx->aes_dekey : ctx->key; rtl_ipsecScatter_t scatter[1]; u32 flag_encrypt = bEncrypt ? 4 : 0; int err; #ifdef CONFIG_RTK_VOIP_DBG printk("%s: src=%p, len=%d, blk=%d, key=%p, iv=%p, dst=%p\n", __FUNCTION__, src, nbytes, bsize, key, iv, dst); rtl_crypto_hexdump((void *) src, nbytes); rtl_crypto_hexdump((void *) key, ctx->key_length); rtl_crypto_hexdump((void *) iv, bsize); #endif dma_cache_wback((u32) src, nbytes); dma_cache_wback((u32) key, ctx->key_length); dma_cache_wback((u32) iv, bsize); scatter[0].len = (nbytes / bsize) * bsize; scatter[0].ptr = (void *) CKSEG1ADDR(src); /* int32 rtl_ipsecEngine(uint32 modeCrypto, uint32 modeAuth, uint32 cntScatter, rtl_ipsecScatter_t *scatter, void *pCryptResult, uint32 lenCryptoKey, void* pCryptoKey, uint32 lenAuthKey, void* pAuthKey, void* pIv, void* pPad, void* pDigest, uint32 a2eo, uint32 enl) */ err = rtl_ipsecEngine(ctx->mode | flag_encrypt, -1, 1, scatter, (void *) CKSEG1ADDR(dst), ctx->key_length, (void *) CKSEG1ADDR(key), 0, NULL, (void *) CKSEG1ADDR(iv), NULL, NULL, 0, scatter[0].len); if (unlikely(err)) printk("%s: rtl_ipsecEngine failed\n", __FUNCTION__); dma_cache_inv((u32) dst, nbytes); #ifdef CONFIG_RTK_VOIP_DBG printk("result:\n"); rtl_crypto_hexdump(dst, nbytes); #endif // return handled bytes, even err! (for blkcipher_walk) return nbytes - scatter[0].len; }
void pci_init_board(void) { pci_dev_t bdf; u32 val32; u8 val8; switch (malta_sys_con()) { case SYSCON_GT64120: set_io_port_base(CKSEG1ADDR(MALTA_GT_PCIIO_BASE)); gt64120_pci_init((void *)CKSEG1ADDR(MALTA_GT_BASE), 0x00000000, 0x00000000, CONFIG_SYS_MEM_SIZE, 0x10000000, 0x10000000, 128 * 1024 * 1024, 0x00000000, 0x00000000, 0x20000); break; default: case SYSCON_MSC01: set_io_port_base(CKSEG1ADDR(MALTA_MSC01_PCIIO_BASE)); msc01_pci_init((void *)CKSEG1ADDR(MALTA_MSC01_PCI_BASE), 0x00000000, 0x00000000, CONFIG_SYS_MEM_SIZE, MALTA_MSC01_PCIMEM_MAP, CKSEG1ADDR(MALTA_MSC01_PCIMEM_BASE), MALTA_MSC01_PCIMEM_SIZE, MALTA_MSC01_PCIIO_MAP, 0x00000000, MALTA_MSC01_PCIIO_SIZE); break; } bdf = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, 0); if (bdf == -1) panic("Failed to find PIIX4 PCI bridge\n"); /* setup PCI interrupt routing */ pci_write_config_byte(bdf, PCI_CFG_PIIX4_PIRQRCA, 10); pci_write_config_byte(bdf, PCI_CFG_PIIX4_PIRQRCB, 10); pci_write_config_byte(bdf, PCI_CFG_PIIX4_PIRQRCC, 11); pci_write_config_byte(bdf, PCI_CFG_PIIX4_PIRQRCD, 11); /* mux SERIRQ onto SERIRQ pin */ pci_read_config_dword(bdf, PCI_CFG_PIIX4_GENCFG, &val32); val32 |= PCI_CFG_PIIX4_GENCFG_SERIRQ; pci_write_config_dword(bdf, PCI_CFG_PIIX4_GENCFG, val32); /* enable SERIRQ - Linux currently depends upon this */ pci_read_config_byte(bdf, PCI_CFG_PIIX4_SERIRQC, &val8); val8 |= PCI_CFG_PIIX4_SERIRQC_EN | PCI_CFG_PIIX4_SERIRQC_CONT; pci_write_config_byte(bdf, PCI_CFG_PIIX4_SERIRQC, val8); }
irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id) { volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR); struct pt_regs *regs = get_irq_regs(); int action; if (!(*csr & KN01_CSR_MEMERR)) return IRQ_NONE; /* Must have been video. */ action = dec_kn01_be_backend(regs, 0, 1); if (action == MIPS_BE_DISCARD) return IRQ_HANDLED; /* * FIXME: Find the affected processes and kill them, otherwise * we must die. * * The interrupt is asynchronously delivered thus EPC and RA * may be irrelevant, but are printed for a reference. */ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n", regs->cp0_epc, regs->regs[31]); die("Unrecoverable bus error", regs); }
static inline void prom_init_kn230(void) { dec_kn_slot_base = KN01_SLOT_BASE; dec_kn_slot_size = KN01_SLOT_SIZE; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC); }
void _machine_restart(void) { void __iomem *reset_base; reset_base = (void __iomem *)CKSEG1ADDR(MALTA_RESET_BASE); __raw_writel(GORESET, reset_base); }
static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; unsigned long core_entry; int c, v; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); for (c = nvpes = 0; c < ncores; c++) { core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); /* Use the number of VPEs in core 0 for smp_num_siblings */ if (!c) smp_num_siblings = core_vpes; for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) cpu_data[nvpes + v].vpe_id = v; #endif } nvpes += core_vpes; } pr_cont("} total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { set_cpu_possible(v, true); set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); /* Initialise core 0 */ mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); if (mips_cm_revision() >= CM_REV_CM3) { core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_bev_base(core_entry); } #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ }
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) { struct vm_struct * area; unsigned long offset; phys_t last_addr; void * addr; phys_addr = fixup_bigphys_addr(phys_addr, size); /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; #if !defined(CONFIG_BRCM_UPPER_768MB) /* * Map uncached objects in the low 512mb of address space using KSEG1, * otherwise map using page tables. */ if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && flags == _CACHE_UNCACHED) return (void __iomem *) CKSEG1ADDR(phys_addr); #endif /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; } /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = area->addr; if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char *)addr); }
static inline void prom_init_kn02(void) { dec_kn_slot_base = KN02_SLOT_BASE; dec_kn_slot_size = KN02_SLOT_SIZE; dec_tc_bus = 1; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN02_RTC); }
static void mask_kn02_irq(struct irq_data *d) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr &= ~(1 << (d->irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; }
static inline void mask_kn02_irq(unsigned int irq) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); cached_kn02_csr &= ~(1 << (irq - kn02_irq_base + 16)); *csr = cached_kn02_csr; }
void *prom_get_hwconf(void) { u32 hwconf = _prom_get_hwconf(); if (hwconf == 0xffffffff) return NULL; return (void *)CKSEG1ADDR(hwconf); }
void __cpuinit spram_config(void) { struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config0; switch (c->cputype) { case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ #ifdef CONFIG_MIPS_TC3262 #ifdef CONFIG_TC3162_IMEM if (config0 & (1<<24)) { probe_spram("ISPRAM", CPHYSADDR(&__imem), &ispram_load_tag, &ispram_store_tag); ispram_fill(); if (!isRT63165 && !isRT63365 && !isMT751020) VPint(CR_DMC_ISPCFGR) = (CPHYSADDR(&__imem) & 0xfffff000) | (1<<8) | (0x7); } #endif #ifdef CONFIG_TC3162_DMEM if (isRT63165 || isRT63365) { VPint(CR_SRAM) = (CPHYSADDR(DSPRAM_BASE) & 0xffffc000) | (1<<0); printk(KERN_INFO "Enable SRAM=0x%08lx\n", VPint(CR_SRAM)); sram_allocp = (char *) CKSEG1ADDR(DSPRAM_BASE); sram_size = sram_free = 0x8000; } else { if (!isTC3182 && !isRT65168) { if (config0 & (1<<23)) { if(isMT751020){ probe_spram("DSPRAM", CPHYSADDR(DSPRAM_BASE), &dspram_load_tag, &dspram_store_tag); dspram_p = (char *)(DSPRAM_BASE); } else{ probe_spram("DSPRAM", CPHYSADDR(DSPRAM_BASE), &dspram_load_tag, &dspram_store_tag); VPint(CR_DMC_DSPCFGR) = (CPHYSADDR(DSPRAM_BASE) & 0xfffff000) | (1<<8) | (0x7); } } } } #endif #else if (config0 & (1<<24)) { probe_spram("ISPRAM", 0x1c000000, &ispram_load_tag, &ispram_store_tag); } if (config0 & (1<<23)) probe_spram("DSPRAM", 0x1c100000, &dspram_load_tag, &dspram_store_tag); #endif } }
static int __init init_one(int slot) { unsigned long base_addr = CKSEG1ADDR(get_tc_base_addr(slot)); struct aafb_info *ip = &my_fb_info[slot]; memset(ip, 0, sizeof(struct aafb_info)); /* * Framebuffer display memory base address and friends. */ ip->bt455 = (struct bt455_regs *) (base_addr + PMAG_AA_BT455_OFFSET); ip->bt431 = (struct bt431_regs *) (base_addr + PMAG_AA_BT431_OFFSET); ip->fb_start = base_addr + PMAG_AA_ONBOARD_FBMEM_OFFSET; ip->fb_size = 2048 * 1024; /* fb_fix_screeninfo.smem_length seems to be physical */ ip->fb_line_length = 2048; /* * Let there be consoles.. */ strcpy(ip->info.modename, "PMAG-AA"); ip->info.node = -1; ip->info.flags = FBINFO_FLAG_DEFAULT; ip->info.fbops = &aafb_ops; ip->info.disp = &ip->disp; ip->info.changevar = NULL; ip->info.switch_con = &aafb_switch; ip->info.updatevar = &aafb_update_var; ip->info.blank = &aafb_blank; aafb_set_disp(&ip->disp, currcon, ip); /* * Configure the RAM DACs. */ bt455_erase_cursor(ip->bt455); /* Init colormap. */ bt455_write_cmap_entry(ip->bt455, 0, 0x00, 0x00, 0x00); bt455_write_cmap_entry(ip->bt455, 1, 0x0f, 0x0f, 0x0f); /* Init hardware cursor. */ bt431_init_cursor(ip->bt431); aafb_cursor_init(ip); /* Clear the screen. */ memset ((void *)ip->fb_start, 0, ip->fb_size); if (register_framebuffer(&ip->info) < 0) return -EINVAL; printk(KERN_INFO "fb%d: %s frame buffer in TC slot %d\n", GET_FB_IDX(ip->info.node), ip->info.modename, slot); return 0; }
static int __devinit i2c_au1550_probe(struct platform_device *pdev) { struct i2c_au1550_data *priv; struct resource *r; int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { ret = -ENODEV; goto out; } priv = kzalloc(sizeof(struct i2c_au1550_data), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out; } priv->ioarea = request_mem_region(r->start, resource_size(r), pdev->name); if (!priv->ioarea) { ret = -EBUSY; goto out_mem; } priv->psc_base = CKSEG1ADDR(r->start); priv->xfer_timeout = 200; priv->ack_timeout = 200; priv->adap.nr = pdev->id; priv->adap.algo = &au1550_algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &pdev->dev; strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name)); i2c_au1550_setup(priv); ret = i2c_add_numbered_adapter(&priv->adap); if (ret == 0) { platform_set_drvdata(pdev, priv); return 0; } i2c_au1550_disable(priv); release_resource(priv->ioarea); kfree(priv->ioarea); out_mem: kfree(priv); out: return ret; }
static int seeq_init_ring(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); int i; netif_stop_queue(dev); sp->rx_new = sp->tx_new = 0; sp->rx_old = sp->tx_old = 0; __sgiseeq_set_mac_address(dev); /* Setup tx ring. */ for(i = 0; i < SEEQ_TX_BUFFERS; i++) { if (!sp->tx_desc[i].tdma.pbuf) { unsigned long buffer; buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); if (!buffer) return -ENOMEM; sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer); } sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (!sp->rx_desc[i].rdma.pbuf) { unsigned long buffer; buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); if (!buffer) return -ENOMEM; sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer); } sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; } sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; return 0; }
static inline void dec_kn01_be_ack(void) { volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR); unsigned long flags; raw_spin_lock_irqsave(&kn01_lock, flags); *csr = cached_kn01_csr | KN01_CSR_MEMERR; /* Clear bus IRQ. */ iob(); raw_spin_unlock_irqrestore(&kn01_lock, flags); }
static int ip22_is_gr2(unsigned long addr) { u32 tmp; u32 *ptr; /* HQ2 only allows 32bit accesses */ ptr = (void *)CKSEG1ADDR(addr + HQ2_MYSTERY_OFFS); if (!get_dbe(tmp, ptr)) { if (tmp == 0xdeadbeef) return 1; } return 0; }
void __init plat_mem_setup(void) { static struct uart_port uart; unsigned int devfn = PCI_DEVFN(COBALT_PCICONF_VIA, 0); int i; _machine_restart = cobalt_machine_restart; _machine_halt = cobalt_machine_halt; pm_power_off = cobalt_machine_power_off; set_io_port_base(CKSEG1ADDR(GT64111_IO_BASE)); /* I/O port resource must include UART and LCD/buttons */ ioport_resource.end = 0x0fffffff; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < COBALT_IO_RESOURCES; i++) request_resource(&ioport_resource, cobalt_io_resources + i); /* Read the cobalt id register out of the PCI config space */ PCI_CFG_SET(devfn, (VIA_COBALT_BRD_ID_REG & ~0x3)); cobalt_board_id = GALILEO_INL(GT_PCI0_CFGDATA_OFS); cobalt_board_id >>= ((VIA_COBALT_BRD_ID_REG & 3) * 8); cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(cobalt_board_id); printk("Cobalt board ID: %d\n", cobalt_board_id); #ifdef CONFIG_PCI register_pci_controller(&cobalt_pci_controller); #endif #ifdef CONFIG_SERIAL_8250 if (cobalt_board_id > COBALT_BRD_ID_RAQ1) { #ifdef CONFIG_EARLY_PRINTK cobalt_early_console(); #endif uart.line = 0; uart.type = PORT_UNKNOWN; uart.uartclk = 18432000; uart.irq = COBALT_SERIAL_IRQ; uart.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; uart.iobase = 0xc800000; uart.iotype = UPIO_PORT; early_serial_setup(&uart); } #endif }
void __init init_kn02_irqs(int base) { volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR); int i; /* Mask interrupts. */ cached_kn02_csr &= ~KN02_CSR_IOINTEN; *csr = cached_kn02_csr; iob(); for (i = base; i < base + KN02_IRQ_LINES; i++) irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq); kn02_irq_base = base; }
void __init plat_mem_setup(void) { int i; _machine_restart = cobalt_machine_restart; _machine_halt = cobalt_machine_halt; pm_power_off = cobalt_machine_halt; set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE)); /* I/O port resource must include LCD/buttons */ ioport_resource.end = 0x0fffffff; /* These resources have been reserved by VIA SuperI/O chip. */ for (i = 0; i < ARRAY_SIZE(cobalt_reserved_resources); i++) request_resource(&ioport_resource, cobalt_reserved_resources + i); }
static int __init init_one(int slot) { unsigned long base_addr = CKSEG1ADDR(get_tc_base_addr(slot)); struct aafb_info *ip = &my_fb_info[slot]; memset(ip, 0, sizeof(struct aafb_info)); ip->bt455 = (struct bt455_regs *) (base_addr + PMAG_AA_BT455_OFFSET); ip->bt431 = (struct bt431_regs *) (base_addr + PMAG_AA_BT431_OFFSET); ip->fb_start = base_addr + PMAG_AA_ONBOARD_FBMEM_OFFSET; ip->fb_size = 2048 * 1024; ip->fb_line_length = 2048; strcpy(ip->info.modename, "PMAG-AA"); ip->info.node = -1; ip->info.flags = FBINFO_FLAG_DEFAULT; ip->info.fbops = &aafb_ops; ip->info.disp = &ip->disp; ip->info.changevar = NULL; ip->info.switch_con = &aafb_switch; ip->info.updatevar = &aafb_update_var; ip->info.blank = &aafb_blank; aafb_set_disp(&ip->disp, currcon, ip); bt455_erase_cursor(ip->bt455); bt455_write_cmap_entry(ip->bt455, 0, 0x00, 0x00, 0x00); bt455_write_cmap_entry(ip->bt455, 1, 0x0f, 0x0f, 0x0f); bt431_init_cursor(ip->bt431); aafb_cursor_init(ip); memset ((void *)ip->fb_start, 0, ip->fb_size); if (register_framebuffer(&ip->info) < 0) return -EINVAL; printk(KERN_INFO "fb%d: %s frame buffer in TC slot %d\n", GET_FB_IDX(ip->info.node), ip->info.modename, slot); return 0; }
void __init plat_mem_setup(void) { int i; _machine_restart = cobalt_machine_restart; _machine_halt = cobalt_machine_halt; pm_power_off = cobalt_machine_halt; set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE)); ioport_resource.end = 0x01ffffff; for (i = 0; i < ARRAY_SIZE(cobalt_reserved_resources); i++) request_resource(&ioport_resource, cobalt_reserved_resources + i); }
static void malta_lcd_puts(const char *str) { int i; void *reg = (void *)CKSEG1ADDR(MALTA_ASCIIPOS0); /* print up to 8 characters of the string */ for (i = 0; i < min((int)strlen(str), 8); i++) { __raw_writel(str[i], reg); reg += MALTA_ASCIIPOS1 - MALTA_ASCIIPOS0; } /* fill the rest of the display with spaces */ for (; i < 8; i++) { __raw_writel(' ', reg); reg += MALTA_ASCIIPOS1 - MALTA_ASCIIPOS0; } }