static __init void prom_meminit(void) { u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */ int mem_flags = 0; unsigned int idx; int rd_flag; #ifdef CONFIG_BLK_DEV_INITRD unsigned long initrd_pstart; unsigned long initrd_pend; initrd_pstart = CPHYSADDR(initrd_start); initrd_pend = CPHYSADDR(initrd_end); if (initrd_start && ((initrd_pstart > MAX_RAM_SIZE) || (initrd_pend > MAX_RAM_SIZE))) { panic("initrd out of addressable memory"); } #endif /* INITRD */ for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE; idx++) { rd_flag = 0; if (type == CFE_MI_AVAILABLE) { /* * See if this block contains (any portion of) the * ramdisk */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { if ((initrd_pstart > addr) && (initrd_pstart < (addr + size))) { add_memory_region(addr, initrd_pstart - addr, BOOT_MEM_RAM); rd_flag = 1; } if ((initrd_pend > addr) && (initrd_pend < (addr + size))) { add_memory_region(initrd_pend, (addr + size) - initrd_pend, BOOT_MEM_RAM); rd_flag = 1; } } #endif if (!rd_flag) { if (addr > MAX_RAM_SIZE) continue; if (addr+size > MAX_RAM_SIZE) size = MAX_RAM_SIZE - (addr+size) + 1; /* * memcpy/__copy_user prefetch, which * will cause a bus error for * KSEG/KUSEG addrs not backed by RAM. * Hence, reserve some padding for the * prefetch distance. */ if (size > 512) size -= 512; add_memory_region(addr, size, BOOT_MEM_RAM); } board_mem_region_addrs[board_mem_region_count] = addr; board_mem_region_sizes[board_mem_region_count] = size; board_mem_region_count++; if (board_mem_region_count == SIBYTE_MAX_MEM_REGIONS) { /* * Too many regions. Need to configure more */ while(1); } } } #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, BOOT_MEM_RESERVED); } #endif }
static inline void bootmem_init(void) { unsigned long start_pfn; unsigned long reserved_end = (unsigned long)&_end; #ifndef CONFIG_SGI_IP27 unsigned long first_usable_pfn; unsigned long bootmap_size; int i; #endif #ifdef CONFIG_BLK_DEV_INITRD int initrd_reserve_bootmem = 0; /* Board specific code should have set up initrd_start and initrd_end */ ROOT_DEV = Root_RAM0; if (parse_rd_cmdline(&initrd_start, &initrd_end)) { reserved_end = max(reserved_end, initrd_end); initrd_reserve_bootmem = 1; } else { unsigned long tmp; u32 *initrd_header; tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2; if (tmp < reserved_end) tmp += PAGE_SIZE; initrd_header = (u32 *)tmp; if (initrd_header[0] == 0x494E5244) { initrd_start = (unsigned long)&initrd_header[2]; initrd_end = initrd_start + initrd_header[1]; reserved_end = max(reserved_end, initrd_end); initrd_reserve_bootmem = 1; } } #endif /* CONFIG_BLK_DEV_INITRD */ /* * Partially used pages are not usable - thus * we are rounding upwards. */ start_pfn = PFN_UP(CPHYSADDR(reserved_end)); #ifndef CONFIG_SGI_IP27 /* Find the highest page frame number we have available. */ max_pfn = 0; first_usable_pfn = -1UL; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (start >= end) continue; if (end > max_pfn) max_pfn = end; if (start < first_usable_pfn) { if (start > start_pfn) { first_usable_pfn = start; } else if (end > start_pfn) { first_usable_pfn = start_pfn; } } } /* * Determine low and high memory ranges */ max_low_pfn = max_pfn; if (max_low_pfn > MAXMEM_PFN) { max_low_pfn = MAXMEM_PFN; #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM >> 20); printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); #endif }
static inline void bootmem_init(void) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long tmp; unsigned long *initrd_header; #endif unsigned long bootmap_size; unsigned long start_pfn, max_pfn; int i; #ifdef CONFIG_BLK_DEV_INITRD tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8; if (tmp < (unsigned long)&_end) tmp += PAGE_SIZE; initrd_header = (unsigned long *)tmp; if (initrd_header[0] == 0x494E5244) { initrd_start = (unsigned long)&initrd_header[2]; initrd_end = initrd_start + initrd_header[1]; } start_pfn = PFN_UP(CPHYSADDR((&_end)+(initrd_end - initrd_start) + PAGE_SIZE)); #else /* * Partially used pages are not usable - thus * we are rounding upwards. */ start_pfn = PFN_UP(CPHYSADDR(&_end)); #endif /* CONFIG_BLK_DEV_INITRD */ #ifndef CONFIG_SGI_IP27 /* Find the highest page frame number we have available. */ max_pfn = 0; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (start >= end) continue; if (end > max_pfn) max_pfn = end; } /* Initialize the boot-time allocator. */ bootmap_size = init_bootmem(start_pfn, max_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long curr_pfn, last_pfn, size; /* * Reserve usable memory. */ if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(boot_mem_map.map[i].addr); if (curr_pfn >= max_pfn) continue; if (curr_pfn < start_pfn) curr_pfn = start_pfn; /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (last_pfn > max_pfn) last_pfn = max_pfn; /* * ... finally, did all the rounding and playing * around just make the area go away? */ if (last_pfn <= curr_pfn) continue; size = last_pfn - curr_pfn; free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); } /* Reserve the bootmap memory. */ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size); #endif #ifdef CONFIG_BLK_DEV_INITRD /* Board specific code should have set up initrd_start and initrd_end */ ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { initrd_start = (unsigned long)&__rd_start; initrd_end = (unsigned long)&__rd_end; } initrd_below_start_ok = 1; if (initrd_start) { unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); printk("Initial ramdisk at: 0x%p (%lu bytes)\n", (void *)initrd_start, initrd_size); /* FIXME: is this right? */ #ifndef CONFIG_SGI_IP27 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_pfn)) { printk("initrd extends beyond end of memory " "(0x%p > 0x%p)\ndisabling initrd\n", (void *)CPHYSADDR(initrd_end), (void *)PFN_PHYS(max_pfn)); initrd_start = 0; } #endif /* !CONFIG_SGI_IP27 */ } #endif }
static void __init rbtx4938_mem_setup(void) { unsigned long long pcfg; if (txx9_master_clock == 0) txx9_master_clock = 25000000; /* 25MHz */ tx4938_setup(); #ifdef CONFIG_PCI txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0); txx9_board_pcibios_setup = tx4927_pcibios_setup; #else set_io_port_base(RBTX4938_ETHER_BASE); #endif tx4938_sio_init(7372800, 0); #ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61 pr_info("PIOSEL: disabling both ATA and NAND selection\n"); txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL | TX4938_PCFG_ATA_SEL); #endif #ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND pr_info("PIOSEL: enabling NAND selection\n"); txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL); txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL); #endif #ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA pr_info("PIOSEL: enabling ATA selection\n"); txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL); txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL); #endif #ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); pr_info("PIOSEL: NAND %s, ATA %s\n", (pcfg & TX4938_PCFG_NDF_SEL) ? "enabled" : "disabled", (pcfg & TX4938_PCFG_ATA_SEL) ? "enabled" : "disabled"); #endif rbtx4938_spi_setup(); pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); /* updated */ /* fixup piosel */ if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) == TX4938_PCFG_ATA_SEL) writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04, rbtx4938_piosel_addr); else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) == TX4938_PCFG_NDF_SEL) writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08, rbtx4938_piosel_addr); else writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04), rbtx4938_piosel_addr); rbtx4938_fpga_resource.name = "FPGA Registers"; rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR); rbtx4938_fpga_resource.end = CPHYSADDR(RBTX4938_FPGA_REG_ADDR) + 0xffff; rbtx4938_fpga_resource.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&txx9_ce_res[2], &rbtx4938_fpga_resource)) printk(KERN_ERR "request resource for fpga failed\n"); _machine_restart = rbtx4938_machine_restart; writeb(0xff, rbtx4938_led_addr); printk(KERN_INFO "RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n", readb(rbtx4938_fpga_rev_addr), readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr)); }
.num_resources = ARRAY_SIZE(au1200_mmc1_resources), .resource = au1200_mmc1_resources, }; #endif /* #ifndef CONFIG_MIPS_DB1200 */ #endif /* #ifdef CONFIG_SOC_AU1200 */ static struct platform_device au1x00_pcmcia_device = { .name = "au1x00-pcmcia", .id = 0, }; /* All Alchemy demoboards with I2C have this #define in their headers */ #ifdef SMBUS_PSC_BASE static struct resource pbdb_smbus_resources[] = { { .start = CPHYSADDR(SMBUS_PSC_BASE), .end = CPHYSADDR(SMBUS_PSC_BASE + 0xfffff), .flags = IORESOURCE_MEM, }, }; static struct platform_device pbdb_smbus_device = { .name = "au1xpsc_smbus", .id = 0, /* bus number */ .num_resources = ARRAY_SIZE(pbdb_smbus_resources), .resource = pbdb_smbus_resources, }; #endif static struct platform_device *au1xxx_platform_devices[] __initdata = { &au1xx0_uart_device,
static inline void rc32434_chain_rx(struct rc32434_local *lp, volatile DMAD_t rd) { rc32434_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd)); }
static int rc32434_rx(struct net_device *dev, int limit) { struct rc32434_local *lp = netdev_priv(dev); volatile DMAD_t rd = &lp->rd_ring[lp->rx_next_done]; struct sk_buff *skb, *skb_new; u8 *pkt_buf; u32 devcs, pkt_len, dmas, rx_free_desc; u32 pktuncrc_len; int count; dma_cache_inv((u32)rd, sizeof(*rd)); for (count = 0; count < limit; count++) { /* init the var. used for the later operations within the while loop */ skb_new = NULL; devcs = rd->devcs; pkt_len = RCVPKT_LENGTH(devcs); skb = lp->rx_skb[lp->rx_next_done]; if ((devcs & ( ETHRX_ld_m)) != ETHRX_ld_m) { /* check that this is a whole packet */ /* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */ lp->stats.rx_errors++; lp->stats.rx_dropped++; } else if ( (devcs & ETHRX_rok_m) ) { /* must be the (first and) last descriptor then */ pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data; pktuncrc_len = pkt_len - 4; /* invalidate the cache */ dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len); /* Malloc up new buffer. */ skb_new = netdev_alloc_skb(dev, RC32434_RBSIZE + 2); if (skb_new != NULL){ /* Make room */ skb_put(skb, pktuncrc_len); skb->protocol = eth_type_trans(skb, dev); /* pass the packet to upper layers */ netif_receive_skb(skb); dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pktuncrc_len; if (IS_RCV_MP(devcs)) lp->stats.multicast++; /* 16 bit align */ skb_reserve(skb_new, 2); skb_new->dev = dev; lp->rx_skb[lp->rx_next_done] = skb_new; } else { ERR("no memory, dropping rx packet.\n"); lp->stats.rx_errors++; lp->stats.rx_dropped++; } } else { /* This should only happen if we enable accepting broken packets */ lp->stats.rx_errors++; lp->stats.rx_dropped++; /* add statistics counters */ if (IS_RCV_CRC_ERR(devcs)) { DBG(2, "RX CRC error\n"); lp->stats.rx_crc_errors++; } else if (IS_RCV_LOR_ERR(devcs)) { DBG(2, "RX LOR error\n"); lp->stats.rx_length_errors++; } else if (IS_RCV_LE_ERR(devcs)) { DBG(2, "RX LE error\n"); lp->stats.rx_length_errors++; } else if (IS_RCV_OVR_ERR(devcs)) { lp->stats.rx_over_errors++; } else if (IS_RCV_CV_ERR(devcs)) { /* code violation */ DBG(2, "RX CV error\n"); lp->stats.rx_frame_errors++; } else if (IS_RCV_CES_ERR(devcs)) { DBG(2, "RX Preamble error\n"); } } rd->devcs = 0; /* restore descriptor's curr_addr */ if(skb_new) { rd->ca = CPHYSADDR(skb_new->data); } else rd->ca = CPHYSADDR(skb->data); rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m; lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &= ~(DMAD_cod_m); lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK; dma_cache_wback((u32)rd, sizeof(*rd)); rd = &lp->rd_ring[lp->rx_next_done]; __raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas); } dmas = __raw_readl(&lp->rx_dma_regs->dmas); if(dmas & DMAS_h_m) { /* Mask off halt and error bits */ __raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas); #ifdef RC32434_PROC_DEBUG lp->dma_halt_cnt++; #endif rd->devcs = 0; skb = lp->rx_skb[lp->rx_next_done]; rd->ca = CPHYSADDR(skb->data); dma_cache_wback((u32)rd, sizeof(*rd)); rc32434_chain_rx(lp,rd); } return count; }
struct prom_pmemblock * __init prom_getmdesc(void) { char *memsize_str; unsigned int memsize; memsize_str = prom_getenv("memsize"); if (!memsize_str) { #ifdef CONFIG_MIPS_AVALANCHE_SOC prom_printf("memsize not set in boot prom, set to default (64Mb)\n"); memsize = 0x04000000; #else prom_printf("memsize not set in boot prom, set to default (32Mb)\n"); memsize = 0x02000000; #endif } else { #ifdef DEBUG prom_printf("prom_memsize = %s\n", memsize_str); #endif memsize = simple_strtol(memsize_str, NULL, 0); } memset(mdesc, 0, sizeof(mdesc)); #if defined(CONFIG_MIPS_AVALANCHE_SOC) #define PREV_MDESC(x) ((x) - 1) { struct prom_pmemblock *p_mdesc = &mdesc[0]; p_mdesc->type = yamon_dontuse; p_mdesc->base = 0x00000000; p_mdesc->size = AVALANCHE_SDRAM_BASE; p_mdesc++; p_mdesc->type = yamon_prom; p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size; p_mdesc->size = PAGE_SIZE; p_mdesc++; p_mdesc->type = yamon_prom; p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size; p_mdesc->size = (CONFIG_MIPS_AVALANCHE_LOAD_ADDRESS - KSEG0ADDR(AVALANCHE_SDRAM_BASE)) - PAGE_SIZE; p_mdesc++; p_mdesc->type = yamon_dontuse; p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size; p_mdesc->size = CPHYSADDR(PFN_ALIGN(&_end)) - p_mdesc->base; p_mdesc++; p_mdesc->type = yamon_free; p_mdesc->base = PREV_MDESC(p_mdesc)->base + PREV_MDESC(p_mdesc)->size; p_mdesc->size = memsize - (CPHYSADDR(PFN_ALIGN(&_end)) - AVALANCHE_SDRAM_BASE); } #else mdesc[0].type = yamon_dontuse; mdesc[0].base = 0x00000000; mdesc[0].size = 0x00001000; mdesc[1].type = yamon_prom; mdesc[1].base = 0x00001000; mdesc[1].size = 0x000ef000; #ifdef CONFIG_MIPS_MALTA /* * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the * south bridge and PCI access always forwarded to the ISA Bus and * BIOSCS# is always generated. * This mean that this area can't be used as DMA memory for PCI * devices. */ mdesc[2].type = yamon_dontuse; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; #else mdesc[2].type = yamon_prom; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; #endif mdesc[3].type = yamon_dontuse; mdesc[3].base = 0x00100000; mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base; mdesc[4].type = yamon_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; #endif /* CONFIG_MIPS_AVALANCHE_SOC */ return &mdesc[0]; }
static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker) { volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_ERRADDR); static const char excstr[] = "exception"; static const char intstr[] = "interrupt"; static const char cpustr[] = "CPU"; static const char mreadstr[] = "memory read"; static const char readstr[] = "read"; static const char writestr[] = "write"; static const char timestr[] = "timeout"; static const char paritystr[] = "parity error"; int data = regs->cp0_cause & 4; unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) != 0); union mips_instruction insn; unsigned long entrylo, offset; long asid, entryhi, vaddr; const char *kind, *agent, *cycle, *event; unsigned long address; u32 erraddr = *kn01_erraddr; int action = MIPS_BE_FATAL; /* Ack ASAP, so that any subsequent errors get caught. */ dec_kn01_be_ack(); kind = invoker ? intstr : excstr; agent = cpustr; if (invoker) address = erraddr; else { /* Bloody hardware doesn't record the address for reads... */ if (data) { /* This never faults. */ __get_user(insn.word, pc); vaddr = regs->regs[insn.i_format.rs] + insn.i_format.simmediate; } else vaddr = (long)pc; if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1) address = CPHYSADDR(vaddr); else { /* Peek at what physical address the CPU used. */ asid = read_c0_entryhi(); entryhi = asid & (PAGE_SIZE - 1); entryhi |= vaddr & ~(PAGE_SIZE - 1); write_c0_entryhi(entryhi); BARRIER; tlb_probe(); /* No need to check for presence. */ tlb_read(); entrylo = read_c0_entrylo0(); write_c0_entryhi(asid); offset = vaddr & (PAGE_SIZE - 1); address = (entrylo & ~(PAGE_SIZE - 1)) | offset; } } /* Treat low 256MB as memory, high -- as I/O. */ if (address < 0x10000000) { cycle = mreadstr; event = paritystr; } else { cycle = invoker ? writestr : readstr; event = timestr; } if (is_fixup) action = MIPS_BE_FIXUP; if (action != MIPS_BE_FIXUP) printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n", kind, agent, cycle, event, address); return action; }
fw_memblock_t * __init fw_getmdesc(int eva) { char *memsize_str, *ememsize_str = NULL, *ptr; unsigned long memsize = 0, ememsize = 0; static char cmdline[COMMAND_LINE_SIZE] __initdata; int tmp; /* otherwise look in the environment */ memsize_str = fw_getenv("memsize"); if (memsize_str) { tmp = kstrtoul(memsize_str, 0, &memsize); if (tmp) pr_warn("Failed to read the 'memsize' env variable.\n"); } if (eva) { /* Look for ememsize for EVA */ ememsize_str = fw_getenv("ememsize"); if (ememsize_str) { tmp = kstrtoul(ememsize_str, 0, &ememsize); if (tmp) pr_warn("Failed to read the 'ememsize' env variable.\n"); } } if (!memsize && !ememsize) { pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { if (memsize > (256 << 20)) { /* memsize should be capped to 256M */ pr_warn("Unsupported memsize value (0x%lx) detected! " "Using 0x10000000 (256M) instead\n", memsize); memsize = 256 << 20; } /* If ememsize is set, then set physical_memsize to that */ physical_memsize = ememsize ? : memsize; } #ifdef CONFIG_CPU_BIG_ENDIAN /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last word of physical memory */ physical_memsize -= PAGE_SIZE; #endif /* Check the command line for a memsize directive that overrides the physical/default amount */ strcpy(cmdline, arcs_cmdline); ptr = strstr(cmdline, "memsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " memsize="); /* And now look for ememsize */ if (eva) { ptr = strstr(cmdline, "ememsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " ememsize="); } if (ptr) memsize = memparse(ptr + 8 + (eva ? 1 : 0), &ptr); else memsize = physical_memsize; /* Last 64K for HIGHMEM arithmetics */ if (memsize > 0x7fff0000) memsize = 0x7fff0000; memset(mdesc, 0, sizeof(mdesc)); mdesc[0].type = fw_dontuse; mdesc[0].base = PHYS_OFFSET; mdesc[0].size = 0x00001000; mdesc[1].type = fw_code; mdesc[1].base = mdesc[0].base + 0x00001000UL; mdesc[1].size = 0x000ef000; /* * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the * south bridge and PCI access always forwarded to the ISA Bus and * BIOSCS# is always generated. * This mean that this area can't be used as DMA memory for PCI * devices. */ mdesc[2].type = fw_dontuse; mdesc[2].base = mdesc[0].base + 0x000f0000UL; mdesc[2].size = 0x00010000; mdesc[3].type = fw_dontuse; mdesc[3].base = mdesc[0].base + 0x00100000UL; mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 0x00100000UL; mdesc[4].type = fw_free; mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base); return &mdesc[0]; }
void __init mips_pcibios_init(void) { struct pci_controller *controller; resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; switch (mips_revision_sconid) { case MIPS_REVISION_SCON_GT64120: /* * Due to a bug in the Galileo system controller, we need * to setup the PCI BAR for the Galileo internal registers. * This should be done in the bios/bootprom and will be * fixed in a later revision of YAMON (the MIPS boards * boot prom). */ GT_WRITE(GT_PCI0_CFGADDR_OFS, (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */ (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */ (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/ ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/ GT_PCI0_CFGADDR_CONFIGEN_BIT); /* Perform the write */ GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE)); /* Set up resource ranges from the controller's registers. */ start = GT_READ(GT_PCI0M0LD_OFS); end = GT_READ(GT_PCI0M0HD_OFS); map = GT_READ(GT_PCI0M0REMAP_OFS); end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); start1 = GT_READ(GT_PCI0M1LD_OFS); end1 = GT_READ(GT_PCI0M1HD_OFS); map1 = GT_READ(GT_PCI0M1REMAP_OFS); end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK); /* Cannot support multiple windows, use the wider. */ if (end1 - start1 > end - start) { start = start1; end = end1; map = map1; } mask = ~(start ^ end); /* We don't support remapping with a discontiguous mask. */ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && mask != ~((mask & -mask) - 1)); gt64120_mem_resource.start = start; gt64120_mem_resource.end = end; gt64120_controller.mem_offset = (start & mask) - (map & mask); /* Addresses are 36-bit, so do shifts in the destinations. */ gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF; gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF; gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF; start = GT_READ(GT_PCI0IOLD_OFS); end = GT_READ(GT_PCI0IOHD_OFS); map = GT_READ(GT_PCI0IOREMAP_OFS); end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK); mask = ~(start ^ end); /* We don't support remapping with a discontiguous mask. */ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && mask != ~((mask & -mask) - 1)); gt64120_io_resource.start = map & mask; gt64120_io_resource.end = (map & mask) | ~mask; gt64120_controller.io_offset = 0; /* Addresses are 36-bit, so do shifts in the destinations. */ gt64120_io_resource.start <<= GT_PCI_DCRM_SHF; gt64120_io_resource.end <<= GT_PCI_DCRM_SHF; gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1; controller = >64120_controller; break; case MIPS_REVISION_SCON_BONITO: /* Set up resource ranges from the controller's registers. */ map = BONITO_PCIMAP; map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >> BONITO_PCIMAP_PCIMAP_LO0_SHIFT; map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >> BONITO_PCIMAP_PCIMAP_LO1_SHIFT; map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >> BONITO_PCIMAP_PCIMAP_LO2_SHIFT; /* Combine as many adjacent windows as possible. */ map = map1; start = BONITO_PCILO0_BASE; end = 1; if (map3 == map2 + 1) { map = map2; start = BONITO_PCILO1_BASE; end++; } if (map2 == map1 + 1) { map = map1; start = BONITO_PCILO0_BASE; end++; } bonito64_mem_resource.start = start; bonito64_mem_resource.end = start + BONITO_PCIMAP_WINBASE(end) - 1; bonito64_controller.mem_offset = start - BONITO_PCIMAP_WINBASE(map); controller = &bonito64_controller; break; case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: case MIPS_REVISION_SCON_SOCITSC: case MIPS_REVISION_SCON_SOCITSCP: /* Set up resource ranges from the controller's registers. */ MSC_READ(MSC01_PCI_SC2PMBASL, start); MSC_READ(MSC01_PCI_SC2PMMSKL, mask); MSC_READ(MSC01_PCI_SC2PMMAPL, map); msc_mem_resource.start = start & mask; msc_mem_resource.end = (start & mask) | ~mask; msc_controller.mem_offset = (start & mask) - (map & mask); MSC_READ(MSC01_PCI_SC2PIOBASL, start); MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); MSC_READ(MSC01_PCI_SC2PIOMAPL, map); msc_io_resource.start = map & mask; msc_io_resource.end = (map & mask) | ~mask; msc_controller.io_offset = 0; ioport_resource.end = ~mask; /* If ranges overlap I/O takes precedence. */ start = start & mask; end = start | ~mask; if ((start >= msc_mem_resource.start && start <= msc_mem_resource.end) || (end >= msc_mem_resource.start && end <= msc_mem_resource.end)) { /* Use the larger space. */ start = max(start, msc_mem_resource.start); end = min(end, msc_mem_resource.end); if (start - msc_mem_resource.start >= msc_mem_resource.end - end) msc_mem_resource.end = start - 1; else msc_mem_resource.start = end + 1; } controller = &msc_controller; break; default: return; } if (controller->io_resource->start < 0x00001000UL) /* FIXME */ controller->io_resource->start = 0x00001000UL; iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ ioport_resource.end = controller->io_resource->end; register_pci_controller(controller); }
static int __init init_msp_flash(void) { int i, j, ret = -ENOMEM; int offset, coff; char *env; int pcnt; char flash_name[] = "flash0"; char part_name[] = "flash0_0"; unsigned addr, size; /* If ELB is disabled by "ful-mux" mode, we can't get at flash */ if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) && (*ELB_1PC_EN_REG & SINGLE_PCCARD)) { printk(KERN_NOTICE "Single PC Card mode: no flash access\n"); return -ENXIO; } /* examine the prom environment for flash devices */ for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++) flash_name[5] = '0' + fcnt + 1; if (fcnt < 1) return -ENXIO; printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); if (!msp_flash) return -ENOMEM; msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; /* loop over the flash devices, initializing each */ for (i = 0; i < fcnt; i++) { /* examine the prom environment for flash partititions */ part_name[5] = '0' + i; part_name[7] = '0'; for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++) part_name[7] = '0' + pcnt + 1; if (pcnt == 0) { printk(KERN_NOTICE "Skipping flash device %d " "(no partitions defined)\n", i); continue; } msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition), GFP_KERNEL); if (!msp_parts[i]) goto cleanup_loop; /* now initialize the devices proper */ flash_name[5] = '0' + i; env = prom_getenv(flash_name); if (sscanf(env, "%x:%x", &addr, &size) < 2) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } addr = CPHYSADDR(addr); printk(KERN_NOTICE "MSP flash device \"%s\": 0x%08x at 0x%08x\n", flash_name, size, addr); /* This must matchs the actual size of the flash chip */ msp_maps[i].size = size; msp_maps[i].phys = addr; /* * Platforms have a specific limit of the size of memory * which may be mapped for flash: */ if (size > CONFIG_MSP_FLASH_MAP_LIMIT) size = CONFIG_MSP_FLASH_MAP_LIMIT; msp_maps[i].virt = ioremap(addr, size); if (msp_maps[i].virt == NULL) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].bankwidth = 1; msp_maps[i].name = kmalloc(7, GFP_KERNEL); if (!msp_maps[i].name) { iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); for (j = 0; j < pcnt; j++) { part_name[5] = '0' + i; part_name[7] = '0' + j; env = prom_getenv(part_name); if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2) { ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_parts[i][j].size = size; msp_parts[i][j].offset = offset; msp_parts[i][j].name = env + coff; } /* now probe and add the device */ simple_map_init(&msp_maps[i]); msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]); if (msp_flash[i]) { msp_flash[i]->owner = THIS_MODULE; add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); } else { printk(KERN_ERR "map probe failed for flash\n"); ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } } return 0; cleanup_loop: while (i--) { del_mtd_partitions(msp_flash[i]); map_destroy(msp_flash[i]); kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); } kfree(msp_maps); free_msp_parts: kfree(msp_parts); free_msp_flash: kfree(msp_flash); return ret; }
}; #endif #endif #ifdef CONFIG_MFD_SM501 #include <linux/sm501.h> #include <linux/sm501-regs.h> #define CONFIG_NXC2600_SM501_ADDR (0x18000000) #define CONFIG_NXC2600_SM501_MEMSIZE (0x4000000) #define CONFIG_NXC2600_SM501_IRQ_PIN (89) #define CONFIG_NXC2600_SM501_IRQ (NXC2600_GPIO_NIRQ(CONFIG_NXC2600_SM501_IRQ_PIN)) #define NXC2600_SM501_MMIO_ADDR_OFFSET (0x4000000-0x200000) static struct resource nxc2600_sm501_resource[] = { [0] = { .start = CPHYSADDR(CONFIG_NXC2600_SM501_ADDR), .end = CPHYSADDR(CONFIG_NXC2600_SM501_ADDR + CONFIG_NXC2600_SM501_MEMSIZE) -1, .flags = IORESOURCE_MEM, }, [1] = { .start = CPHYSADDR(CONFIG_NXC2600_SM501_ADDR + NXC2600_SM501_MMIO_ADDR_OFFSET), .end = CPHYSADDR(CONFIG_NXC2600_SM501_ADDR + NXC2600_SM501_MMIO_ADDR_OFFSET + 0x200000 )-1, .flags = IORESOURCE_MEM, }, [2] = { .start = CONFIG_NXC2600_SM501_IRQ, .end = CONFIG_NXC2600_SM501_IRQ, .flags = IORESOURCE_IRQ, }, };
struct prom_pmemblock * __init prom_getmdesc(void) { unsigned int memsize; //char cmdline[CL_SIZE], *ptr; char *ptr; static char cmdline[COMMAND_LINE_SIZE] __initdata; mem_resource_start = CPHYSADDR(PFN_ALIGN(&_text)); mem_resource_end = CPHYSADDR(PFN_ALIGN(&_end)); #if 0 /* otherwise look in the environment */ memsize_str = prom_getenv("memsize"); if (!memsize_str) { prom_printf("memsize not set in boot prom, set to default (64Mb)\n"); physical_memsize = 0x04000000; } else { #ifdef DEBUG prom_printf("prom_memsize = %s\n", memsize_str); #endif physical_memsize = simple_strtol(memsize_str, NULL, 0); } #ifdef CONFIG_CPU_BIG_ENDIAN /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last word of physical memory */ physical_memsize -= PAGE_SIZE; #endif #endif physical_memsize = 0x04000000; /* Check the command line for a memsize directive that overrides the physical/default amount */ strcpy(cmdline, arcs_cmdline); ptr = strstr(cmdline, "memsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " memsize="); if (ptr) memsize = memparse(ptr + 8, &ptr); else memsize = physical_memsize; memset(mdesc, 0, sizeof(mdesc)); mdesc[0].type = yamon_dontuse; mdesc[0].base = 0x00000000; mdesc[0].size = 0x00001000; mdesc[1].type = yamon_prom; mdesc[1].base = 0x00001000; mdesc[1].size = 0x000ef000; mdesc[2].type = yamon_prom; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; mdesc[3].type = yamon_dontuse; mdesc[3].base = 0x00100000; mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; #if 0 mdesc[4].type = yamon_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; #else // Patch for Samsung mdesc[4].type = yamon_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = 0x02000000 - mdesc[4].base; mdesc[5].type = yamon_dontuse; mdesc[5].base = 0x02000000; mdesc[5].size = 0x05000000 - mdesc[5].base; #if 0 mdesc[6].type = yamon_free; mdesc[6].base = 0x05000000; mdesc[6].size = memsize - mdesc[6].base; mdesc[6].type = yamon_free; mdesc[6].base = 0x30000000; mdesc[6].size = 0x10000000; #endif #endif return &mdesc[0]; }
static struct prom_pmemblock * __init prom_getmdesc(void) { char *memsize_str; unsigned int memsize; char *ptr; static char cmdline[COMMAND_LINE_SIZE] __initdata; /* otherwise look in the environment */ memsize_str = prom_getenv("memsize"); if (!memsize_str) { printk(KERN_WARNING "memsize not set in boot prom, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { #ifdef DEBUG pr_debug("prom_memsize = %s\n", memsize_str); #endif physical_memsize = simple_strtol(memsize_str, NULL, 0); } #ifdef CONFIG_CPU_BIG_ENDIAN /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last word of physical memory */ physical_memsize -= PAGE_SIZE; #endif /* Check the command line for a memsize directive that overrides the physical/default amount */ strcpy(cmdline, arcs_cmdline); ptr = strstr(cmdline, "memsize="); if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) ptr = strstr(ptr, " memsize="); if (ptr) memsize = memparse(ptr + 8, &ptr); else memsize = physical_memsize; memset(mdesc, 0, sizeof(mdesc)); mdesc[0].type = yamon_dontuse; mdesc[0].base = 0x00000000; mdesc[0].size = 0x00001000; mdesc[1].type = yamon_prom; mdesc[1].base = 0x00001000; mdesc[1].size = 0x000ef000; /* * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the * south bridge and PCI access always forwarded to the ISA Bus and * BIOSCS# is always generated. * This mean that this area can't be used as DMA memory for PCI * devices. */ mdesc[2].type = yamon_dontuse; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; mdesc[3].type = yamon_dontuse; mdesc[3].base = 0x00100000; mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; mdesc[4].type = yamon_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; return &mdesc[0]; }
static struct resource au1xpsc_psc1_res[] = { [0] = { .start = CPHYSADDR(PSC1_BASE_ADDR), .end = CPHYSADDR(PSC1_BASE_ADDR) + 0x000fffff, .flags = IORESOURCE_MEM, }, [1] = { #ifdef CONFIG_SOC_AU1200 .start = AU1200_PSC1_INT, .end = AU1200_PSC1_INT, #elif defined(CONFIG_SOC_AU1550) .start = AU1550_PSC1_INT, .end = AU1550_PSC1_INT, #endif .flags = IORESOURCE_IRQ, }, [2] = { .start = DSCR_CMD0_PSC1_TX, .end = DSCR_CMD0_PSC1_TX, .flags = IORESOURCE_DMA, }, [3] = { .start = DSCR_CMD0_PSC1_RX, .end = DSCR_CMD0_PSC1_RX, .flags = IORESOURCE_DMA, }, };
static inline void rc32434_start_tx(struct rc32434_local *lp, volatile DMAD_t td) { rc32434_start_dma(lp->tx_dma_regs, CPHYSADDR(td)); }
/* * Probe for the NAND device. */ static int xway_nand_probe(struct platform_device *pdev) { struct xway_nand_data *data; struct mtd_info *mtd; struct resource *res; int err; u32 cs; u32 cs_flag = 0; /* Allocate memory for the device structure (and zero it) */ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), GFP_KERNEL); if (!data) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->nandaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->nandaddr)) return PTR_ERR(data->nandaddr); nand_set_flash_node(&data->chip, pdev->dev.of_node); mtd = nand_to_mtd(&data->chip); mtd->dev.parent = &pdev->dev; data->chip.cmd_ctrl = xway_cmd_ctrl; data->chip.dev_ready = xway_dev_ready; data->chip.select_chip = xway_select_chip; data->chip.write_buf = xway_write_buf; data->chip.read_buf = xway_read_buf; data->chip.read_byte = xway_read_byte; data->chip.chip_delay = 30; data->chip.ecc.mode = NAND_ECC_SOFT; data->chip.ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, data); nand_set_controller_data(&data->chip, data); /* load our CS from the DT. Either we find a valid 1 or default to 0 */ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); if (!err && cs == 1) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ ltq_ebu_w32(CPHYSADDR(data->nandaddr) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | cs_flag, EBU_NAND_CON); /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) return err; err = mtd_device_register(mtd, NULL, 0); if (err) nand_release(mtd); return err; }
/* transmit packet */ static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev) { struct rc32434_local *lp = (struct rc32434_local *)dev->priv; unsigned long flags; u32 length; DMAD_t td; spin_lock_irqsave(&lp->lock, flags); td = &lp->td_ring[lp->tx_chain_tail]; /* stop queue when full, drop pkts if queue already full */ if(lp->tx_count >= (RC32434_NUM_TDS - 2)) { lp->tx_full = 1; if(lp->tx_count == (RC32434_NUM_TDS - 2)) { netif_stop_queue(dev); } else { lp->stats.tx_dropped++; dev_kfree_skb_any(skb); spin_unlock_irqrestore(&lp->lock, flags); return 1; } } lp->tx_count ++; lp->tx_skb[lp->tx_chain_tail] = skb; length = skb->len; dma_cache_wback((u32)skb->data, skb->len); /* Setup the transmit descriptor. */ dma_cache_inv((u32) td, sizeof(*td)); td->ca = CPHYSADDR(skb->data); if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) { if( lp->tx_chain_status == empty ) { td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */ lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */ __raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */ lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */ } else { td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m; /* Update tail */ lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */ lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */ lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */ __raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */ lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */ lp->tx_chain_status = empty; } } else { if( lp->tx_chain_status == empty ) { td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */ lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */ lp->tx_chain_status = filled; } else { td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */ lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */ lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */ lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */ } } dma_cache_wback((u32) td, sizeof(*td)); dev->trans_start = jiffies; spin_unlock_irqrestore(&lp->lock, flags); return 0; }
#ifdef CONFIG_ANDROID_PMEM #include <linux/android_pmem.h> #endif #include <asm/jzsoc.h> #include <linux/usb/musb.h> extern void __init board_msc_init(void); /* OHCI (USB full speed host controller) */ static struct resource jz_usb_ohci_resources[] = { [0] = { .start = CPHYSADDR(UHC_BASE), // phys addr for ioremap .end = CPHYSADDR(UHC_BASE) + 0x10000 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UHC, .end = IRQ_UHC, .flags = IORESOURCE_IRQ, }, }; /* The dmamask must be set for OHCI to work */ static u64 ohci_dmamask = ~(u32)0; static struct platform_device jz_usb_ohci_device = { .name = "jz-ohci",
/* * Initialize the RC32434 ethernet controller. */ static int rc32434_init(struct net_device *dev) { struct rc32434_local *lp = (struct rc32434_local *)dev->priv; int i, j; /* Disable DMA */ rc32434_abort_tx(dev); rc32434_abort_rx(dev); /* reset ethernet logic */ __raw_writel(0, &lp->eth_regs->ethintfc); while((__raw_readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m)) dev->trans_start = jiffies; /* Enable Ethernet Interface */ __raw_writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc); tasklet_disable(lp->tx_tasklet); /* Initialize the transmit Descriptors */ for (i = 0; i < RC32434_NUM_TDS; i++) { lp->td_ring[i].control = DMAD_iof_m; lp->td_ring[i].devcs = ETHTX_fd_m | ETHTX_ld_m; lp->td_ring[i].ca = 0; lp->td_ring[i].link = 0; if (lp->tx_skb[i] != NULL) { dev_kfree_skb_any(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = lp->tx_full = lp->tx_count = 0; lp-> tx_chain_status = empty; /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ for (i=0; i<RC32434_NUM_RDS; i++) { struct sk_buff *skb = lp->rx_skb[i]; if (lp->rx_skb[i] == NULL) { skb = dev_alloc_skb(RC32434_RBSIZE + 2); if (skb == NULL) { ERR("No memory in the system\n"); for (j = 0; j < RC32434_NUM_RDS; j ++) if (lp->rx_skb[j] != NULL) dev_kfree_skb_any(lp->rx_skb[j]); return 1; } else { skb->dev = dev; skb_reserve(skb, 2); lp->rx_skb[i] = skb; lp->rd_ring[i].ca = CPHYSADDR(skb->data); } } lp->rd_ring[i].control = DMAD_iod_m | DMA_COUNT(RC32434_RBSIZE); lp->rd_ring[i].devcs = 0; lp->rd_ring[i].ca = CPHYSADDR(skb->data); lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); } /* loop back */ lp->rd_ring[RC32434_NUM_RDS-1].link = CPHYSADDR(&lp->rd_ring[0]); lp->rx_next_done = 0; lp->rd_ring[RC32434_NUM_RDS-1].control |= DMAD_cod_m; lp->rx_chain_head = 0; lp->rx_chain_tail = 0; lp->rx_chain_status = empty; __raw_writel(0, &lp->rx_dma_regs->dmas); /* Start Rx DMA */ rc32434_start_rx(lp, &lp->rd_ring[0]); /* Enable F E bit in Tx DMA */ __raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); /* Enable D H E bit in Rx DMA */ __raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm); /* Accept only packets destined for this Ethernet device address */ __raw_writel(ETHARC_ab_m, &lp->eth_regs->etharc); /* Set all Ether station address registers to their initial values */ __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0); __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0); __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1); __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1); __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2); __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2); __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3); __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3); /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */ __raw_writel(ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m, &lp->eth_regs->ethmac2); //ETHMAC2_flc_m ETHMAC2_fd_m lp->duplex_mode /* Back to back inter-packet-gap */ __raw_writel(0x15, &lp->eth_regs->ethipgt); /* Non - Back to back inter-packet-gap */ __raw_writel(0x12, &lp->eth_regs->ethipgr); /* Management Clock Prescaler Divisor */ /* Clock independent setting */ __raw_writel(((idt_cpu_freq)/MII_CLOCK+1) & ~1, &lp->eth_regs->ethmcp); /* don't transmit until fifo contains 48b */ __raw_writel(48, &lp->eth_regs->ethfifott); __raw_writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1); napi_enable(&lp->napi); tasklet_enable(lp->tx_tasklet); netif_start_queue(dev); return 0; }
static void korina_chain_rx(struct korina_private *lp, struct dma_desc *rd) { korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd)); }
static struct net_device * au1000_probe(int port_num) { static unsigned version_printed = 0; struct au1000_private *aup = NULL; struct net_device *dev = NULL; db_dest_t *pDB, *pDBfree; char *pmac, *argptr; char ethaddr[6]; int irq, i, err; u32 base, macen; if (port_num >= NUM_ETH_INTERFACES) return NULL; base = CPHYSADDR(iflist[port_num].base_addr ); macen = CPHYSADDR(iflist[port_num].macen_addr); irq = iflist[port_num].irq; if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || !request_mem_region(macen, 4, "Au1x00 ENET")) return NULL; if (version_printed++ == 0) printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); dev = alloc_etherdev(sizeof(struct au1000_private)); if (!dev) { printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); return NULL; } if ((err = register_netdev(dev)) != 0) { printk(KERN_ERR "%s: Cannot register net device, error %d\n", DRV_NAME, err); free_netdev(dev); return NULL; } printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", dev->name, base, irq); aup = dev->priv; /* Allocate the data buffers */ /* Snooping works fine with eth on all au1xxx */ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), &aup->dma_addr, 0); if (!aup->vaddr) { free_netdev(dev); release_mem_region( base, MAC_IOSIZE); release_mem_region(macen, 4); return NULL; } /* aup->mac is the base address of the MAC's registers */ aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; /* Setup some variables for quick register address access */ aup->enable = (volatile u32 *)iflist[port_num].macen_addr; aup->mac_id = port_num; au_macs[port_num] = aup; if (port_num == 0) { /* Check the environment variables first */ if (get_ethernet_addr(ethaddr) == 0) memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); else { /* Check command line */ argptr = prom_getcmdline(); if ((pmac = strstr(argptr, "ethaddr=")) == NULL) printk(KERN_INFO "%s: No MAC address found\n", dev->name); /* Use the hard coded MAC addresses */ else { str2eaddr(ethaddr, pmac + strlen("ethaddr=")); memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); } } setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); } else if (port_num == 1) setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); /* * Assign to the Ethernet ports two consecutive MAC addresses * to match those that are printed on their stickers */ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); dev->dev_addr[5] += port_num; *aup->enable = 0; aup->mac_enabled = 0; aup->mii_bus.priv = dev; aup->mii_bus.read = mdiobus_read; aup->mii_bus.write = mdiobus_write; aup->mii_bus.reset = mdiobus_reset; aup->mii_bus.name = "au1000_eth_mii"; aup->mii_bus.id = aup->mac_id; aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); for(i = 0; i < PHY_MAX_ADDR; ++i) aup->mii_bus.irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ #if defined(AU1XXX_PHY_STATIC_CONFIG) # if defined(AU1XXX_PHY0_IRQ) if (AU1XXX_PHY0_BUSID == aup->mii_bus.id) aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; # endif # if defined(AU1XXX_PHY1_IRQ) if (AU1XXX_PHY1_BUSID == aup->mii_bus.id) aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; # endif #endif mdiobus_register(&aup->mii_bus); if (mii_probe(dev) != 0) { goto err_out; } pDBfree = NULL; /* setup the data buffer descriptors and attach a buffer to each one */ pDB = aup->db; for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); pDB++; } aup->pDBfree = pDBfree; for (i = 0; i < NUM_RX_DMA; i++) { pDB = GetFreeDB(aup); if (!pDB) { goto err_out; } aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->rx_db_inuse[i] = pDB; } for (i = 0; i < NUM_TX_DMA; i++) { pDB = GetFreeDB(aup); if (!pDB) { goto err_out; } aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } spin_lock_init(&aup->lock); dev->base_addr = base; dev->irq = irq; dev->open = au1000_open; dev->hard_start_xmit = au1000_tx; dev->stop = au1000_close; dev->get_stats = au1000_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &au1000_ioctl; SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); dev->tx_timeout = au1000_tx_timeout; dev->watchdog_timeo = ETH_TX_TIMEOUT; /* * The boot code uses the ethernet controller, so reset it to start * fresh. au1000_init() expects that the device is in reset state. */ reset_mac(dev); return dev; err_out: /* here we should have a valid dev plus aup-> register addresses * so we can reset the mac properly.*/ reset_mac(dev); for (i = 0; i < NUM_RX_DMA; i++) { if (aup->rx_db_inuse[i]) ReleaseDB(aup, aup->rx_db_inuse[i]); } for (i = 0; i < NUM_TX_DMA; i++) { if (aup->tx_db_inuse[i]) ReleaseDB(aup, aup->tx_db_inuse[i]); } dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr); unregister_netdev(dev); free_netdev(dev); release_mem_region( base, MAC_IOSIZE); release_mem_region(macen, 4); return NULL; }
/* transmit packet */ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); unsigned long flags; u32 length; u32 chain_index; struct dma_desc *td; spin_lock_irqsave(&lp->lock, flags); td = &lp->td_ring[lp->tx_chain_tail]; /* stop queue when full, drop pkts if queue already full */ if (lp->tx_count >= (KORINA_NUM_TDS - 2)) { lp->tx_full = 1; if (lp->tx_count == (KORINA_NUM_TDS - 2)) netif_stop_queue(dev); else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_BUSY; } } lp->tx_count++; lp->tx_skb[lp->tx_chain_tail] = skb; length = skb->len; dma_cache_wback((u32)skb->data, skb->len); /* Setup the transmit descriptor. */ dma_cache_inv((u32) td, sizeof(*td)); td->ca = CPHYSADDR(skb->data); chain_index = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK; if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { if (lp->tx_chain_status == desc_empty) { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; /* Move tail */ lp->tx_chain_tail = chain_index; /* Write to NDPTR */ writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &lp->tx_dma_regs->dmandptr); /* Move head to tail */ lp->tx_chain_head = lp->tx_chain_tail; } else { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; /* Link to prev */ lp->td_ring[chain_index].control &= ~DMA_DESC_COF; /* Link to prev */ lp->td_ring[chain_index].link = CPHYSADDR(td); /* Move tail */ lp->tx_chain_tail = chain_index; /* Write to NDPTR */ writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Move head to tail */ lp->tx_chain_head = lp->tx_chain_tail; lp->tx_chain_status = desc_empty; } } else { if (lp->tx_chain_status == desc_empty) { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; /* Move tail */ lp->tx_chain_tail = chain_index; lp->tx_chain_status = desc_filled; netif_stop_queue(dev); } else { /* Update tail */ td->control = DMA_COUNT(length) | DMA_DESC_COF | DMA_DESC_IOF; lp->td_ring[chain_index].control &= ~DMA_DESC_COF; lp->td_ring[chain_index].link = CPHYSADDR(td); lp->tx_chain_tail = chain_index; } } dma_cache_wback((u32) td, sizeof(*td)); dev->trans_start = jiffies; spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; }
static int __init init_msp_flash(void) { int i, j, ret = -ENOMEM; int offset, coff; char *env; int pcnt; char flash_name[] = "flash0"; char part_name[] = "flash0_0"; unsigned addr, size; if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) && (*ELB_1PC_EN_REG & SINGLE_PCCARD)) { printk(KERN_NOTICE "Single PC Card mode: no flash access\n"); return -ENXIO; } for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++) flash_name[5] = '0' + fcnt + 1; if (fcnt < 1) return -ENXIO; printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); if (!msp_flash) return -ENOMEM; msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; for (i = 0; i < fcnt; i++) { part_name[5] = '0' + i; part_name[7] = '0'; for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++) part_name[7] = '0' + pcnt + 1; if (pcnt == 0) { printk(KERN_NOTICE "Skipping flash device %d " "(no partitions defined)\n", i); continue; } msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition), GFP_KERNEL); if (!msp_parts[i]) goto cleanup_loop; flash_name[5] = '0' + i; env = prom_getenv(flash_name); if (sscanf(env, "%x:%x", &addr, &size) < 2) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } addr = CPHYSADDR(addr); printk(KERN_NOTICE "MSP flash device \"%s\": 0x%08x at 0x%08x\n", flash_name, size, addr); msp_maps[i].size = size; msp_maps[i].phys = addr; if (size > CONFIG_MSP_FLASH_MAP_LIMIT) size = CONFIG_MSP_FLASH_MAP_LIMIT; msp_maps[i].virt = ioremap(addr, size); if (msp_maps[i].virt == NULL) { ret = -ENXIO; kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].bankwidth = 1; msp_maps[i].name = kmalloc(7, GFP_KERNEL); if (!msp_maps[i].name) { iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); for (j = 0; j < pcnt; j++) { part_name[5] = '0' + i; part_name[7] = '0' + j; env = prom_getenv(part_name); if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2) { ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } msp_parts[i][j].size = size; msp_parts[i][j].offset = offset; msp_parts[i][j].name = env + coff; } simple_map_init(&msp_maps[i]); msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]); if (msp_flash[i]) { msp_flash[i]->owner = THIS_MODULE; add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); } else { printk(KERN_ERR "map probe failed for flash\n"); ret = -ENXIO; kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); goto cleanup_loop; } } return 0; cleanup_loop: while (i--) { del_mtd_partitions(msp_flash[i]); map_destroy(msp_flash[i]); kfree(msp_maps[i].name); iounmap(msp_maps[i].virt); kfree(msp_parts[i]); } kfree(msp_maps); free_msp_parts: kfree(msp_parts); free_msp_flash: kfree(msp_flash); return ret; }
static int korina_rx(struct net_device *dev, int limit) { struct korina_private *lp = netdev_priv(dev); struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; struct sk_buff *skb, *skb_new; u8 *pkt_buf; u32 devcs, pkt_len, dmas, rx_free_desc; int count; dma_cache_inv((u32)rd, sizeof(*rd)); for (count = 0; count < limit; count++) { devcs = rd->devcs; /* Update statistics counters */ if (devcs & ETH_RX_CRC) dev->stats.rx_crc_errors++; if (devcs & ETH_RX_LOR) dev->stats.rx_length_errors++; if (devcs & ETH_RX_LE) dev->stats.rx_length_errors++; if (devcs & ETH_RX_OVR) dev->stats.rx_over_errors++; if (devcs & ETH_RX_CV) dev->stats.rx_frame_errors++; if (devcs & ETH_RX_CES) dev->stats.rx_length_errors++; if (devcs & ETH_RX_MP) dev->stats.multicast++; if ((devcs & ETH_RX_LD) != ETH_RX_LD) { /* check that this is a whole packet * WARNING: DMA_FD bit incorrectly set * in Rc32434 (errata ref #077) */ dev->stats.rx_errors++; dev->stats.rx_dropped++; } while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) { /* init the var. used for the later * operations within the while loop */ skb_new = NULL; pkt_len = RCVPKT_LENGTH(devcs); skb = lp->rx_skb[lp->rx_next_done]; if ((devcs & ETH_RX_ROK)) { /* must be the (first and) last * descriptor then */ pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; /* invalidate the cache */ dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); /* Malloc up new buffer. */ skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); if (!skb_new) break; /* Do not count the CRC */ skb_put(skb, pkt_len - 4); skb->protocol = eth_type_trans(skb, dev); /* Pass the packet to upper layers */ netif_receive_skb(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; /* Update the mcast stats */ if (devcs & ETH_RX_MP) dev->stats.multicast++; lp->rx_skb[lp->rx_next_done] = skb_new; } rd->devcs = 0; /* Restore descriptor's curr_addr */ if (skb_new) rd->ca = CPHYSADDR(skb_new->data); else rd->ca = CPHYSADDR(skb->data); rd->control = DMA_COUNT(KORINA_RBSIZE) | DMA_DESC_COD | DMA_DESC_IOD; lp->rd_ring[(lp->rx_next_done - 1) & KORINA_RDS_MASK].control &= ~DMA_DESC_COD; lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; dma_cache_wback((u32)rd, sizeof(*rd)); rd = &lp->rd_ring[lp->rx_next_done]; writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); } } dmas = readl(&lp->rx_dma_regs->dmas); if (dmas & DMA_STAT_HALT) { writel(~(DMA_STAT_HALT | DMA_STAT_ERR), &lp->rx_dma_regs->dmas); lp->dma_halt_cnt++; rd->devcs = 0; skb = lp->rx_skb[lp->rx_next_done]; rd->ca = CPHYSADDR(skb->data); dma_cache_wback((u32)rd, sizeof(*rd)); korina_chain_rx(lp, rd); } return count; }
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) { sp->SCp.have_data_in = vdma_alloc(CPHYSADDR(sp->SCp.buffer), sp->SCp.this_residual); sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); }
/* * Erase flash sectors, returns: * ERR_OK - OK * ERR_INVAL - invalid sector arguments * ERR_TIMOUT - write timeout * ERR_NOT_ERASED - Flash not erased * ERR_UNKNOWN_FLASH_VENDOR - incorrect flash */ int flash_erase(flash_info_t *info, int s_first, int s_last) { ulong sect_start, sect_end, flags; int prot, sect; int rc; if ((info->flash_id & FLASH_VENDMASK) != FLASH_MAN_MCHP) { printf("Can't erase unknown flash type %08lx - aborted\n", info->flash_id); return ERR_UNKNOWN_FLASH_VENDOR; } if ((s_first < 0) || (s_first > s_last)) { printf("- no sectors to erase\n"); return ERR_INVAL; } prot = 0; for (sect = s_first; sect <= s_last; ++sect) { if (info->protect[sect]) prot++; } if (prot) printf("- Warning: %d protected sectors will not be erased!\n", prot); else printf("\n"); /* erase on unprotected sectors */ for (sect = s_first; sect <= s_last; sect++) { if (info->protect[sect]) continue; /* disable interrupts */ flags = disable_interrupts(); /* write destination page address (physical) */ sect_start = CPHYSADDR(info->start[sect]); writel(sect_start, &nvm_regs_p->addr.raw); /* page erase */ flash_initiate_operation(NVMOP_PAGE_ERASE); /* wait */ rc = flash_wait_till_busy(__func__, CONFIG_SYS_FLASH_ERASE_TOUT); /* re-enable interrupts if necessary */ if (flags) enable_interrupts(); if (rc != ERR_OK) return rc; rc = flash_complete_operation(); if (rc != ERR_OK) return rc; /* * flash content is updated but cache might contain stale * data, so invalidate dcache. */ sect_end = info->start[sect] + info->size / info->sector_count; invalidate_dcache_range(info->start[sect], sect_end); } printf(" done\n"); return ERR_OK; }