void memcpy(void *target, const void *source, size_t len) { int ch = DMA_CHANNEL; unsigned char *dp; if(len < 4) _memcpy(target, source, len); if(((unsigned int)source < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)source, len); if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)source); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 4; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_DAI | DMAC_DCMD_SWDH_32 | DMAC_DCMD_DWDH_32 | DMAC_DCMD_DS_32BIT; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while (REG_DMAC_DTCR(ch)); if(len % 4) { dp = (unsigned char*)((unsigned int)target + (len & (4 - 1))); for(i = 0; i < (len % 4); i++) *dp++ = *source; } }
void memset16(void *target, unsigned short c, size_t len) { int ch = DMA_CHANNEL; unsigned short d; unsigned short *dp; if(len < 32) _memset16(target,c,len); else { if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); d = c; REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)&d); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 32; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_DAI | DMAC_DCMD_SWDH_16 | DMAC_DCMD_DWDH_16 | DMAC_DCMD_DS_32BYTE; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while (REG_DMAC_DTCR(ch)); if(len % 32) { dp = (unsigned short *)((unsigned int)target + (len & (32 - 1))); for(d = 0; d < (len % 32); d++) *dp++ = c; } } }
void cfe_arena_init(void) { uint64_t memlo,memhi; /* * This macro expands via cpu_config.h to an appropriate function * name for creating an empty arena appropriately for our CPU */ CPUCFG_ARENAINIT(); /* * Round the area used by the firmware to a page boundary and * mark it in use */ memhi = PHYSADDR((mem_topofmem + 4095) & ~4095); memlo = PHYSADDR(mem_bottomofmem) & ~4095; ARENA_RANGE(memlo,memhi-1,MEMTYPE_DRAM_USEDBYFIRMWARE); /* * Create the initial page table */ cfe_bootarea_init(); }
static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, volatile struct sgiseeq_regs *sregs) { volatile struct hpc3_ethregs *hregs = sp->hregs; int err; reset_hpc3_and_seeq(hregs, sregs); err = seeq_init_ring(dev); if (err) return err; /* Setup to field the proper interrupt types. */ if (sp->is_edlc) { sregs->tstat = (TSTAT_INIT_EDLC); sregs->rw.wregs.control = sp->control; sregs->rw.wregs.frame_gap = 0; } else { sregs->tstat = (TSTAT_INIT_SEEQ); } hregs->rx_dconfig |= RDMACFG_INIT; hregs->rx_ndptr = PHYSADDR(&sp->srings.rx_desc[0]); hregs->tx_ndptr = PHYSADDR(&sp->srings.tx_desc[0]); seeq_go(sp, hregs, sregs); return 0; }
int _ra_nor_dma_pull(char *dst, char *src, int len) { int ret = 0; //fixme, take care about alignment issues while (len > 0) { int size = (len > ((1<<16) - 4)) ? ((1<<16) - 4) : len; // limitation is DMA buffer // set GDMA _set_gdma_ch(PHYSADDR(dst), PHYSADDR(src), size, BURST_SIZE_32B, SW_MODE, DMA_REQMEM, DMA_REQMEM, TRN_INC, TRN_INC); // start and wait dma done if (_nand_dma_sync()) { printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, dst, len); ret = -1; } // disable dma _release_dma_buf(); len -= size; dst += size; src += size; } return ret; }
int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images) { void (*theKernel)(int magic, void *tagtable); struct tag *params, *params_start; char *commandline = getenv("bootargs"); int ret; theKernel = (void *)images->ep; show_boot_progress (15); params = params_start = (struct tag *)gd->bd->bi_boot_params; params = setup_start_tag(params); params = setup_memory_tags(params); if (images->rd_start) { params = setup_ramdisk_tag(params, PHYSADDR(images->rd_start), PHYSADDR(images->rd_end)); } params = setup_commandline_tag(params, commandline); params = setup_clock_tags(params); params = setup_ethernet_tags(params); setup_end_tag(params); printf("\nStarting kernel at %p (params at %p)...\n\n", theKernel, params_start); prepare_to_boot(); theKernel(ATAG_MAGIC, params_start); /* does not return */ error: return 1; }
void dma_copy_nowait(void *tar,void *src,int size) { int timeout = 0x1000000; while ((!(INREG32(A_DMA_DCS(DMA_CPY_CHANNEL)) & DCS_TT)) && (timeout--)); CLRREG32(A_DMA_DCS(DMA_CPY_CHANNEL), DCS_CTE); OUTREG32(A_DMA_DSA(DMA_CPY_CHANNEL), PHYSADDR((unsigned long)src)); OUTREG32(A_DMA_DTA(DMA_CPY_CHANNEL), PHYSADDR((unsigned long)tar)); OUTREG32(A_DMA_DTC(DMA_CPY_CHANNEL), size / 32); OUTREG32(A_DMA_DRT(DMA_CPY_CHANNEL), DRT_AUTO); OUTREG32(A_DMA_DCM(DMA_CPY_CHANNEL), (DCM_SAI| DCM_DAI | DCM_SP_32BIT | DCM_DP_32BIT | DCM_TSZ_32BYTE)); CLRREG32(A_DMA_DCS(DMA_CPY_CHANNEL),(DCS_TT)); SETREG32(A_DMA_DCS(DMA_CPY_CHANNEL), DCS_CTE | DCS_NDES); }
void dma_start(int ch, unsigned int srcAddr, unsigned int dstAddr, unsigned int count) { //set_dma_addr REG_DMAC_DSAR(ch) = PHYSADDR(srcAddr); REG_DMAC_DDAR(ch) = PHYSADDR(dstAddr); //set_dma_count REG_DMAC_DTCR(ch) = count / dma_unit_size[ch]; //enable_dma REG_DMAC_DCCSR(ch) |= DMAC_DCCSR_NDES; /* No-descriptor transfer */ __dmac_enable_channel(ch); if (dma_irq[ch]) __dmac_channel_enable_irq(ch); }
static int seeq_init_ring(struct net_device *dev) { struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; volatile struct sgiseeq_init_block *ib = &sp->srings; int i; netif_stop_queue(dev); sp->rx_new = sp->tx_new = 0; sp->rx_old = sp->tx_old = 0; seeq_load_eaddr(dev, sp->sregs); /* XXX for now just accept packets directly to us * XXX and ether-broadcast. Will do multicast and * XXX promiscuous mode later. -davem */ sp->mode = SEEQ_RCMD_RBCAST; /* Setup tx ring. */ for(i = 0; i < SEEQ_TX_BUFFERS; i++) { if(!ib->tx_desc[i].tdma.pbuf) { unsigned long buffer; buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); if (!buffer) return -ENOMEM; ib->tx_desc[i].buf_vaddr = KSEG1ADDR(buffer); ib->tx_desc[i].tdma.pbuf = PHYSADDR(buffer); // flush_cache_all(); } ib->tx_desc[i].tdma.cntinfo = (TCNTINFO_INIT); } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (!ib->rx_desc[i].rdma.pbuf) { unsigned long buffer; buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); if (!buffer) return -ENOMEM; ib->rx_desc[i].buf_vaddr = KSEG1ADDR(buffer); ib->rx_desc[i].rdma.pbuf = PHYSADDR(buffer); // flush_cache_all(); } ib->rx_desc[i].rdma.cntinfo = (RCNTINFO_INIT); } ib->rx_desc[i - 1].rdma.cntinfo |= (HPCDMA_EOR); return 0; }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); pud_t *pud = pud_offset(pgd, p3_addr); pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } }
unsigned long tx_ioinl(unsigned int *addr) { unsigned long val; __u32 ioaddr; unsigned long flags; save_and_cli(flags); ioaddr = PHYSADDR(addr); if (ioaddr < pci_io_resource.start || ioaddr > pci_io_resource.end) { restore_flags(flags); return std_inl(addr); } *(volatile u32 *)(ulong)&tx3927_pcicptr->ipciaddr = (unsigned long)ioaddr; *(volatile u32 *)(ulong)&tx3927_pcicptr->ipcibe = (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | PCI_IPCIBE_IBE_LONG; while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)) { udelay(2); } val = le32_to_cpu(*(volatile u32 *)(ulong)&tx3927_pcicptr->ipcidata); /* clear by setting */ tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; restore_flags(flags); return val; }
// this is data moving from memory to nand. int _ra_nand_dma_push(unsigned long src, int len) { int ret = 0; #if !defined (__UBOOT__) // uboot set kseg0 as noncache dma_cache_wback(src, len); #else flush_cache(src, len); #endif // set GDMA _set_gdma_ch(NFC_DATA, PHYSADDR((void*)src), len, BURST_SIZE_4B, HW_MODE, DMA_REQMEM, DMA_NAND_REQ, TRN_INC, TRN_FIX); // start and wait dma done if (_nand_dma_sync()) { printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, src, len); ret = -1; } // disable dma _release_dma_buf(); return ret; }
// this is "data moving" from nand to memory. int _ra_nand_dma_pull(unsigned long dst, int len) { int ret =0; #if !defined (__UBOOT__) dma_cache_inv(dst, len); #endif #if defined (__UBOOT__) flush_cache(dst, len); #endif // set GDMA _set_gdma_ch(PHYSADDR(dst), NFC_DATA, len, BURST_SIZE_4B, HW_MODE, DMA_NAND_REQ, DMA_REQMEM, TRN_FIX, TRN_INC); // start and wait dma done if (_nand_dma_sync()) { printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, dst, len); ret = -1; } // disable dma _release_dma_buf(); return ret; }
int _ra_nand_prepare_dma_pull(unsigned long dst, int len) { _set_gdma_ch(PHYSADDR(dst), NFC_DATA, len, BURST_SIZE_4B, HW_MODE, DMA_NAND_REQ, DMA_REQMEM, TRN_FIX, TRN_INC); return 0; }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *dir = pgd_offset_k(p3_addr); pmd_t *pmd = pmd_offset(dir, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } }
static void lb_memcpy(void *target,void* source,unsigned int len) { int ch = DMA_CHANNEL; if(((unsigned int)source < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)source, len); if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)source); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 32; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_SAI| DMAC_DCMD_DAI | DMAC_DCMD_SWDH_32 | DMAC_DCMD_DWDH_32|DMAC_DCMD_DS_32BYTE; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while ( REG_DMAC_DTCR(ch) ); }
int do_bootm_linux(int flag, int argc, char * const argv[], bootm_headers_t *images) { void (*theKernel)(int magic, void *tagtable); struct tag *params, *params_start; char *commandline = getenv("bootargs"); /* * allow the PREP bootm subcommand, it is required for bootm to work * * TODO: Andreas Bießmann <*****@*****.**> refactor the * do_bootm_linux() for avr32 */ if (flag & BOOTM_STATE_OS_PREP) return 0; if ((flag != 0) && (flag != BOOTM_STATE_OS_GO)) return 1; theKernel = (void *)images->ep; bootstage_mark(BOOTSTAGE_ID_RUN_OS); params = params_start = (struct tag *)gd->bd->bi_boot_params; params = setup_start_tag(params); params = setup_memory_tags(params); if (images->rd_start) { params = setup_ramdisk_tag(params, PHYSADDR(images->rd_start), PHYSADDR(images->rd_end)); } params = setup_commandline_tag(params, commandline); params = setup_clock_tags(params); params = setup_ethernet_tags(params); params = setup_boardinfo_tag(params); setup_end_tag(params); printf("\nStarting kernel at %p (params at %p)...\n\n", theKernel, params_start); prepare_to_boot(); theKernel(ATAG_MAGIC, params_start); /* does not return */ return 1; }
static inline void rx_maybe_restart(struct sgiseeq_private *sp, volatile struct hpc3_ethregs *hregs, volatile struct sgiseeq_regs *sregs) { if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { hregs->rx_ndptr = PHYSADDR(&sp->srings.rx_desc[sp->rx_new]); seeq_go(sp, hregs, sregs); } }
static void board_console_add(void *regs, uint irq, uint baud_base, uint reg_shift) { physaddr_t base; /* The CFE NS16550 driver expects a physical address */ base = PHYSADDR((physaddr_t) regs); cfe_add_device(&ns16550_uart, base, baud_base, ®_shift); }
static inline void init_hpc_chain(uchar *buf) { struct hpc_chunk *hcp = (struct hpc_chunk *) buf; unsigned long start, end; start = (unsigned long) buf; end = start + PAGE_SIZE; while(start < end) { hcp->desc.pnext = PHYSADDR((hcp + 1)); hcp->desc.cntinfo = HPCDMA_EOX; hcp++; start += sizeof(struct hpc_chunk); }; hcp--; hcp->desc.pnext = PHYSADDR(buf); /* Force flush to memory */ dma_cache_wback_inv((unsigned long) buf, PAGE_SIZE); }
/* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh7705_flush_dcache_page(void *arg) { struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else __flush_dcache_page(PHYSADDR(page_address(page))); }
/* RTE function has a return value of 0. */ void SifInitCmd(void) { u32 status; int i; core_save_disable(&status); if (initialized) { core_restore(status); return; } initialized = 1; _sif_cmd_data.pktbuf = KSEG1ADDR((u32 *)pktbuf); _sif_cmd_data.cmdbuf = KSEG1ADDR((u32 *)cmdbuf); _sif_cmd_data.iopbuf = 0; _sif_cmd_data.sys_cmd_handlers = sifCmdSysBuffer; _sif_cmd_data.nr_sys_handlers = 32; _sif_cmd_data.usr_cmd_handlers = NULL; _sif_cmd_data.nr_usr_handlers = 0; _sif_cmd_data.sregs = sregs; for(i = 0; i < CMD_HANDLER_MAX; i++) { sifCmdSysBuffer[i].handler = NULL; sifCmdSysBuffer[i].harg = NULL; } for(i = 0; i < 32; i++) { sregs[i] = 0; } sifCmdSysBuffer[0].handler = change_addr; sifCmdSysBuffer[0].harg = &_sif_cmd_data; sifCmdSysBuffer[1].handler = set_sreg; sifCmdSysBuffer[1].harg = &_sif_cmd_data; core_restore(status); /* No check here if IOP is already initialized. Assumption is that it is * already initialized */ /* give it our new receive address. */ _sif_cmd_data.iopbuf = (void *) sbios_iopaddr; /* XXX: inserted for test. */ ((ca_pkt_t *)(sifCmdInitPkt))->buf = (void *) PHYSADDR((u32 *)_sif_cmd_data.pktbuf); SifSendCmd(SIF_CMD_CHANGE_SADDR, sifCmdInitPkt, sizeof(ca_pkt_t), NULL, NULL, 0); #if 0 /* RTE does the following: */ SifSetReg(SIF_CMD_CHANGE_SADDR, sbios_iopaddr); SifSetReg(0x80000001, sbios_iopaddr); #else /* XXX: PS2SDK code looks better: */ SifSetReg(SIF_CMD_CHANGE_SADDR, (uint32_t) _sif_cmd_data.iopbuf); SifSetReg(0x80000001, (uint32_t) &_sif_cmd_data); #endif }
void dma_nand_set_wait(void *tar,unsigned char src,unsigned int size) { unsigned int setdata[16]; unsigned int *ptemp; ptemp = (unsigned int *)UNCACHE(((unsigned int)(&setdata)+ 31)& (~31)); *ptemp = (unsigned int) ((src << 24) | (src << 16) | (src << 8) | src); if(((unsigned int)tar < 0xa0000000) && size) dma_cache_wback_inv((unsigned long)tar, size); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE); OUTREG32(A_DMA_DSA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)ptemp)); OUTREG32(A_DMA_DTA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)tar)); OUTREG32(A_DMA_DTC(DMA_NAND_COPY_CHANNEL), size / 32); OUTREG32(A_DMA_DRT(DMA_NAND_COPY_CHANNEL), DRT_AUTO); OUTREG32(A_DMA_DCM(DMA_NAND_COPY_CHANNEL),(DCM_DAI | DCM_SP_32BIT | DCM_DP_32BIT| DCM_TSZ_32BYTE)); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL),(DCS_TT)); SETREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE | DCS_NDES); while (!(INREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL)) & DCS_TT)); }
void tc_writel(unsigned long data, volatile __u32 *addr) { addr = PHYSADDR(addr); *(volatile u32 *)(ulong)&tx3927_pcicptr->ipcidata = cpu_to_le32(data); *(volatile u32 *)(ulong)&tx3927_pcicptr->ipciaddr = (unsigned long)addr; *(volatile u32 *)(ulong)&tx3927_pcicptr->ipcibe = (PCI_IPCIBE_ICMD_MEMWRITE << PCI_IPCIBE_ICMD_SHIFT) | PCI_IPCIBE_IBE_LONG; while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)) ; /* clear by setting */ tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; }
static unsigned long jazz_fd_dma_mem_alloc(unsigned long size) { unsigned long mem; mem = __get_dma_pages(GFP_KERNEL, get_order(size)); if(!mem) return 0; vdma_alloc(PHYSADDR(mem), size); /* XXX error checking */ return mem; }
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) { int sz = sp->SCp.buffers_residual; struct mmu_sglist *sg = (struct mmu_sglist *) sp->SCp.buffer; while (sz >= 0) { sg[sz].dvma_addr = vdma_alloc(PHYSADDR(sg[sz].addr), sg[sz].len); sz--; } sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dvma_address); }
void __init prom_init(unsigned int mem_upper) { mem_upper = PHYSADDR(mem_upper); mips_machgroup = MACH_GROUP_UNKNOWN; mips_machtype = MACH_UNKNOWN; arcs_cmdline[0] = 0; vac_memory_upper = mem_upper; add_memory_region(0, mem_upper, BOOT_MEM_RAM); }
void dma_nand_copy_wait(void *tar,void *src,int size) { int timeout = 0x1000000; if(((unsigned int)src < 0xa0000000) && size) dma_cache_wback_inv((unsigned long)src, size); if(((unsigned int)tar < 0xa0000000) && size) dma_cache_wback_inv((unsigned long)tar, size); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE); OUTREG32(A_DMA_DSA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)src)); OUTREG32(A_DMA_DTA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)tar)); OUTREG32(A_DMA_DTC(DMA_NAND_COPY_CHANNEL), size / 32); OUTREG32(A_DMA_DRT(DMA_NAND_COPY_CHANNEL), DRT_AUTO); OUTREG32(A_DMA_DCM(DMA_NAND_COPY_CHANNEL), (DCM_SAI| DCM_DAI | DCM_SP_32BIT | DCM_DP_32BIT | DCM_TSZ_32BYTE)); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL),(DCS_TT)); SETREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE | DCS_NDES); while ((!(INREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL)) & DCS_TT)) && (timeout--)); }
void yuv_copy_nowait(unsigned int *tar,unsigned int *src,int *t_stride,int *s_stride,int *size,int *line) { int i; while (!(INREG32(A_DMA_DCS(DMA_STRIDE_CPY_CHANNEL)) & DCS_CT) ); //error for(i = 0;i < 3;i++) { g_desc[8*i+1] = PHYSADDR(src[i]); g_desc[8*i+2] = PHYSADDR(tar[i]); g_desc[8*i+3] = (((unsigned int)&g_desc[8 * (i + 1)] & 0xff0)<<20 ) | ((*(line + i))<<16) | (*(size + i) ); g_desc[8*i+4] = ((*(t_stride + i)) << 16) | (*(s_stride + i)); } OUTREG32(A_DMA_DDA(DMA_STRIDE_CPY_CHANNEL),PHYSADDR(g_desc)); OUTREG32(A_DMA_DRT(DMA_STRIDE_CPY_CHANNEL), DRT_AUTO); CLRREG32(A_DMA_DCS(DMA_STRIDE_CPY_CHANNEL),(DCS_TT | DCS_CT)); OUTREG32(A_DMA_DDRS(DMA_STRIDE_CPY_CHANNEL / 6), (1 << (DMA_STRIDE_CPY_CHANNEL % 6)));//add SETREG32(A_DMA_DCS(DMA_STRIDE_CPY_CHANNEL), DCS_CTE); }
/* pci io access */ unsigned char tx_ioinb(unsigned char *addr) { unsigned long val; __u32 ioaddr; int offset; int byte; unsigned long flags; save_and_cli(flags); ioaddr = PHYSADDR(addr); if (ioaddr < pci_io_resource.start || ioaddr > pci_io_resource.end) { restore_flags(flags); return std_inb(addr); } offset = ioaddr & 0x3; #ifdef __BIG_ENDIAN if (offset == 0) byte = 0x7; else if (offset == 1) byte = 0xb; else if (offset == 2) byte = 0xd; else if (offset == 3) byte = 0xe; #else if (offset == 0) byte = 0xe; else if (offset == 1) byte = 0xd; else if (offset == 2) byte = 0xb; else if (offset == 3) byte = 0x7; #endif *(volatile u32 *)(ulong)&tx3927_pcicptr->ipciaddr = (unsigned long)ioaddr; *(volatile u32 *)(ulong)&tx3927_pcicptr->ipcibe = (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | byte; while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)) { udelay(2); } val = (le32_to_cpu(((ulong)tx3927_pcicptr->ipcidata))) >> (offset*8); val = val & 0xff; /* clear by setting */ tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; restore_flags(flags); return val; }