void copy_page(void *to, void *from) { u64 from_phys = CPHYSADDR((unsigned long)from); u64 to_phys = CPHYSADDR((unsigned long)to); unsigned int cpu = smp_processor_id(); /* if any page is not in KSEG0, use old way */ if ((long)KSEGX((unsigned long)to) != (long)CKSEG0 || (long)KSEGX((unsigned long)from) != (long)CKSEG0) return copy_page_cpu(to, from); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); /* * Don't really want to do it this way, but there's no * reliable way to delay completion detection. */ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) & M_DM_DSCR_BASE_INTERRUPT)) ; __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); }
/******************************************************************* * vflash_read_buf: * create a request message and send it via do_rpc_io * do_rpc_io waits for response or timeout ******************************************************************/ static int vflash_read_buf(int partition, int offset, u_char *buffer, int numbytes) { ItcRpcMsg req; u_char *vmallocated_buf = NULL; int ret, is_buf_vmallocated; #if defined(CONFIG_MTD_BRCMNAND) uint8 * ecc_buf; uint8 * data_buf; #endif /* VMallocated (mmu translated) memory can't be used by the eCos CPU */ is_buf_vmallocated = KSEGX(buffer) == KSEG2; if (is_buf_vmallocated) { vmallocated_buf = buffer; buffer = kmalloc(numbytes, GFP_KERNEL); if (!buffer) return -EINVAL; } // Construct a request message memset((void *)&req, 0, sizeof(req)); req.dev_func = DEV_FUNC(REMOTE_FLASH_DEVICE_ID, REMOTE_READ, partition, numbytes); req.xid = read_c0_count(); req.u0 = (uint32)buffer; req.u1 = offset; #if !defined(CONFIG_MTD_BRCMNAND) bcm_cache_inv((uint32)buffer, (uint32)numbytes); #else data_buf = (uint8 *)(((BufArray *)buffer)->data_buf); if(data_buf) { bcm_cache_inv((uint32)data_buf, (uint32)numbytes); } ecc_buf = (uint8 *)(((BufArray *)buffer)->ecc_stat_buf); if(ecc_buf) { bcm_cache_inv((uint32)ecc_buf, (uint32)ecc_stat_buf_len); } bcm_cache_wback_inv((uint32)buffer, (uint32)sizeof(BufArray)); #endif #if DEBUG_DQM_IO printk("%s partition %d offset %08x buffer %p size %d\n", __func__, partition, offset, buffer, numbytes); #endif ret = do_rpc_io(&req); if (is_buf_vmallocated) { memcpy(vmallocated_buf, buffer, numbytes); kfree(buffer); } return ret; }
static void* pcu_dma_tasklet_read_get_data(uint32_t virtual_addr_buffer, uint32_t pcu_dma_len) { __sync(); if (KSEGX(virtual_addr_buffer) != KSEG0) { memcpy((void *)virtual_addr_buffer, pcu_dma_buf, pcu_dma_len); } //else the data is already in virtual_addr_buffer return (void *)virtual_addr_buffer; }
static int pcu_dma_tasklet_read(uint32_t virtual_addr_buffer, L_OFF_T external_physical_device_address, uint32_t pcu_dma_len) { uint32_t phys_mem; int ret = 0; unsigned long flags; if (KSEGX(virtual_addr_buffer) == KSEG0) { dma_cache_inv(virtual_addr_buffer, pcu_dma_len); phys_mem = virt_to_phys((void *)virtual_addr_buffer); } else { dma_cache_inv((unsigned long)pcu_dma_buf, pcu_dma_len); phys_mem = virt_to_phys((void *)pcu_dma_buf); } spin_lock_irqsave(&gPcuDmaIsrData.lock, flags); gPcuDmaIsrData.flashAddr = __ll_low(external_physical_device_address); gPcuDmaIsrData.dramAddr = phys_mem; /* * Enable L2 Interrupt */ gPcuDmaIsrData.cmd = PCU_DMA_READ; gPcuDmaIsrData.opComplete = 0; gPcuDmaIsrData.status = 0; gPcuDmaIsrData.mask = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; gPcuDmaIsrData.expect = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; gPcuDmaIsrData.error = 0; /* there is no DMA error reported check NAND controller status while processing */ gPcuDmaIsrData.intr = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; /* write back 1 to clear */ spin_unlock_irqrestore(&gPcuDmaIsrData.lock, flags); PCU_DMA_CLRI(); ISR_enable_irq(); //issue command and return: Interrupts will be handled from the tasklet pcu_dma_issue_command(phys_mem, external_physical_device_address, PCU_DMA_READ, pcu_dma_len); return ret; }
static int pcu_dma_tasklet_write(uint32_t virtual_addr_buffer, L_OFF_T external_physical_device_address, uint32_t pcu_dma_len) { uint32_t phys_mem; int ret = 0; unsigned long flags; if (KSEGX(virtual_addr_buffer) == KSEG0) { phys_mem = virt_to_phys((void *)virtual_addr_buffer); dma_cache_wback(virtual_addr_buffer, pcu_dma_len); } else { phys_mem = virt_to_phys((void *)pcu_dma_buf); memcpy(pcu_dma_buf, (void *)virtual_addr_buffer, pcu_dma_len); dma_cache_wback((unsigned long)pcu_dma_buf, pcu_dma_len); } spin_lock_irqsave(&gPcuDmaIsrData.lock, flags); gPcuDmaIsrData.flashAddr = __ll_low(external_physical_device_address); gPcuDmaIsrData.dramAddr = phys_mem; gPcuDmaIsrData.cmd = PCU_DMA_WRITE; gPcuDmaIsrData.opComplete = 0; gPcuDmaIsrData.status = 0; /* On write we wait for both DMA done|error and Flash Status */ gPcuDmaIsrData.mask = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; gPcuDmaIsrData.expect = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; gPcuDmaIsrData.error = 0; /* no error indication */ gPcuDmaIsrData.intr = PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; /* write back 1 to clear */ spin_unlock_irqrestore(&gPcuDmaIsrData.lock, flags); /* * Enable L2 Interrupt */ PCU_DMA_CLRI(); ISR_enable_irq(); pcu_dma_issue_command(phys_mem, external_physical_device_address, PCU_DMA_WRITE, pcu_dma_len); /* 1: Is a Read, 0 Is a Write */ //Do not wait for completion here; This is done in brcmnand35xxx_base.c ??? return ret; }
/******************************************************************* * vflash_write_buf: * create a request message and send it via do_rpc_io * do_rpc_io waits for response or timeout ******************************************************************/ static int vflash_write_buf(int partition, int offset, u_char *buffer, int numbytes) { ItcRpcMsg req; u_char *vmallocated_buf = NULL; int ret, is_buf_vmallocated; /* VMallocated (mmu translated) memory can't be used by the eCos CPU */ is_buf_vmallocated = KSEGX(buffer) == KSEG2; if (is_buf_vmallocated) { vmallocated_buf = buffer; buffer = kmalloc(numbytes, GFP_KERNEL); if (!buffer) return -EINVAL; memcpy(buffer, vmallocated_buf, numbytes); } // Construct a request message memset((void *)&req, 0, sizeof(req)); req.dev_func = DEV_FUNC(REMOTE_FLASH_DEVICE_ID, REMOTE_WRITE, partition, numbytes); req.xid = read_c0_count(); req.u0 = (uint32)buffer; req.u1 = offset; bcm_cache_wback_inv((uint32)buffer, (uint32)numbytes); #if DEBUG_DQM_IO printk("%s partition %d offset %08x buffer %p size %d\n", __func__, partition, offset, buffer, numbytes); #endif ret = do_rpc_io(&req); if (is_buf_vmallocated) kfree(buffer); return ret; }
u32 zephyr_to_viper_addr(u32 zephyr_addr) { int i; u32 ubus, memc, size; u32 zephyr_phys = CPHYSADDR(zephyr_addr); u32 kseg = KSEGX(zephyr_addr); addr_trans *att = &addr_trans_table[0]; for (i = 0; i < NUM_ATWS; i++) { memc = att->memc_base; ubus = att->ubus_base; size = att->size; if ((zephyr_phys >= memc) && (zephyr_phys < (memc+size))) { return kseg | (ubus + (zephyr_phys - memc)); } att++; } printk("%s: Unable to translate %08x phys %08x\n", __func__, zephyr_addr, zephyr_phys); return zephyr_addr; }
int mips32_pracc_write_mem(struct mips_ejtag *ejtag_info, uint32_t addr, int size, int count, void *buf) { int retval; switch (size) { case 1: retval = mips32_pracc_write_mem8(ejtag_info, addr, count, (uint8_t *)buf); break; case 2: retval = mips32_pracc_write_mem16(ejtag_info, addr, count, (uint16_t *)buf); break; case 4: if (count == 1) retval = mips32_pracc_write_u32(ejtag_info, addr, (uint32_t *)buf); else retval = mips32_pracc_write_mem32(ejtag_info, addr, count, (uint32_t *)buf); break; default: retval = ERROR_FAIL; } /** * If we are in the cachable regoion and cache is activated, * we must clean D$ + invalidate I$ after we did the write, * so that changes do not continue to live only in D$, but to be * replicated in I$ also (maybe we wrote the istructions) */ uint32_t conf = 0; int cached = 0; mips32_cp0_read(ejtag_info, &conf, 16, 0); switch (KSEGX(addr)) { case KUSEG: cached = (conf & MIPS32_CONFIG0_KU_MASK) >> MIPS32_CONFIG0_KU_SHIFT; break; case KSEG0: cached = (conf & MIPS32_CONFIG0_K0_MASK) >> MIPS32_CONFIG0_K0_SHIFT; break; case KSEG1: /* uncachable segment - nothing to do */ break; case KSEG2: case KSEG3: cached = (conf & MIPS32_CONFIG0_K23_MASK) >> MIPS32_CONFIG0_K23_SHIFT; break; default: /* what ? */ break; } /** * Check cachablitiy bits coherency algorithm - * is the region cacheable or uncached. * If cacheable we have to synchronize the cache */ if (cached == 0x3) { uint32_t start_addr, end_addr; uint32_t rel; start_addr = addr; end_addr = addr + count * size; /** select cache synchronisation mechanism based on Architecture Release */ rel = (conf & MIPS32_CONFIG0_AR_MASK) >> MIPS32_CONFIG0_AR_SHIFT; switch (rel) { case MIPS32_ARCH_REL1: /* MIPS32/64 Release 1 - we must use cache instruction */ mips32_pracc_clean_invalidate_cache(ejtag_info, start_addr, end_addr); break; case MIPS32_ARCH_REL2: /* MIPS32/64 Release 2 - we can use synci instruction */ mips32_pracc_sync_cache(ejtag_info, start_addr, end_addr); break; default: /* what ? */ break; } }
static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker) { volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_ERRADDR); static const char excstr[] = "exception"; static const char intstr[] = "interrupt"; static const char cpustr[] = "CPU"; static const char mreadstr[] = "memory read"; static const char readstr[] = "read"; static const char writestr[] = "write"; static const char timestr[] = "timeout"; static const char paritystr[] = "parity error"; int data = regs->cp0_cause & 4; unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) != 0); union mips_instruction insn; unsigned long entrylo, offset; long asid, entryhi, vaddr; const char *kind, *agent, *cycle, *event; unsigned long address; u32 erraddr = *kn01_erraddr; int action = MIPS_BE_FATAL; /* Ack ASAP, so that any subsequent errors get caught. */ dec_kn01_be_ack(); kind = invoker ? intstr : excstr; agent = cpustr; if (invoker) address = erraddr; else { /* Bloody hardware doesn't record the address for reads... */ if (data) { /* This never faults. */ __get_user(insn.word, pc); vaddr = regs->regs[insn.i_format.rs] + insn.i_format.simmediate; } else vaddr = (long)pc; if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1) address = CPHYSADDR(vaddr); else { /* Peek at what physical address the CPU used. */ asid = read_c0_entryhi(); entryhi = asid & (PAGE_SIZE - 1); entryhi |= vaddr & ~(PAGE_SIZE - 1); write_c0_entryhi(entryhi); BARRIER; tlb_probe(); /* No need to check for presence. */ tlb_read(); entrylo = read_c0_entrylo0(); write_c0_entryhi(asid); offset = vaddr & (PAGE_SIZE - 1); address = (entrylo & ~(PAGE_SIZE - 1)) | offset; } } /* Treat low 256MB as memory, high -- as I/O. */ if (address < 0x10000000) { cycle = mreadstr; event = paritystr; } else { cycle = invoker ? writestr : readstr; event = timestr; } if (is_fixup) action = MIPS_BE_FIXUP; if (action != MIPS_BE_FIXUP) printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n", kind, agent, cycle, event, address); return action; }