void memcpy(void *target, const void *source, size_t len) { int ch = DMA_CHANNEL; unsigned char *dp; if(len < 4) _memcpy(target, source, len); if(((unsigned int)source < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)source, len); if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)source); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 4; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_DAI | DMAC_DCMD_SWDH_32 | DMAC_DCMD_DWDH_32 | DMAC_DCMD_DS_32BIT; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while (REG_DMAC_DTCR(ch)); if(len % 4) { dp = (unsigned char*)((unsigned int)target + (len & (4 - 1))); for(i = 0; i < (len % 4); i++) *dp++ = *source; } }
/* Put a source buffer into the DMA ring. * This updates the source pointer and byte count. Normally used * for memory to fifo transfers. */ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) { chan_tab_t *ctp; au1x_ddma_desc_t *dp; /* I guess we could check this to be within the * range of the table...... */ ctp = *((chan_tab_t **)chanid); /* We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed. */ dp = ctp->put_ptr; /* If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition. */ if (dp->dscr_cmd0 & DSCR_CMD0_V) { return 0; } /* Load up buffer address and byte count. */ dp->dscr_source0 = virt_to_phys(buf); dp->dscr_cmd1 = nbytes; /* Check flags */ if (flags & DDMA_FLAGS_IE) dp->dscr_cmd0 |= DSCR_CMD0_IE; if (flags & DDMA_FLAGS_NOIE) dp->dscr_cmd0 &= ~DSCR_CMD0_IE; /* * There is an errata on the Au1200/Au1550 parts that could result * in "stale" data being DMA'd. It has to do with the snoop logic on * the dache eviction buffer. NONCOHERENT_IO is on by default for * these parts. If it is fixedin the future, these dma_cache_inv will * just be nothing more than empty macros. See io.h. * */ dma_cache_wback_inv((unsigned long)buf, nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ au_sync(); dma_cache_wback_inv((unsigned long)dp, sizeof(dp)); ctp->chan_ptr->ddma_dbell = 0; /* Get next descriptor pointer. */ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); /* return something not zero. */ return nbytes; }
static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) return ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); if (!hw_coherentio) ret = UNCAC_ADDR(ret); } } return ret; }
static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) return ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); #ifdef CONFIG_BRCM_CONSISTENT_DMA if (brcm_map_coherent(*dma_handle, ret, PFN_ALIGN(size), &ret, gfp)) { free_pages((unsigned long)ret, size); ret = NULL; } #else ret = UNCAC_ADDR(ret); #endif } } return ret; }
void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) return; dma_cache_wback_inv((unsigned long)vaddr, size); }
static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) page = dma_alloc_from_contiguous(dev, count, get_order(size), gfp); if (!page) page = alloc_pages(gfp, get_order(size)); if (!page) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } return ret; }
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { #ifdef CONFIG_MSTAR_CHIP extern int hw_coherentio; #endif void *ret; if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) return ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); #ifdef CONFIG_MSTAR_CHIP if (!hw_coherentio) #endif ret = UNCAC_ADDR(ret); } } return ret; }
static int new_usb_control_msg2(struct usb_ctrlrequest *dr, struct urb *urb, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { static volatile int done=1; unsigned long expire; int status; int retval; if(done==0) { //printk("%s, %d, done=%d, busy!\n", __func__, __LINE__, done); return -EBUSY; } dr->bRequestType= requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16p(&value); dr->wIndex = cpu_to_le16p(&index); dr->wLength = cpu_to_le16p(&size); done = 0; usb_fill_control_urb(urb, dev, pipe, (unsigned char *)dr, data, size, new_usb_api_blocking_completion, (void *)&done); dma_cache_wback_inv((unsigned long)data, size); urb->actual_length = 0; urb->status = 0; status = usb_submit_urb(urb, GFP_ATOMIC); return status; }
void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { BUG_ON(direction == PCI_DMA_NONE); dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); }
void memset16(void *target, unsigned short c, size_t len) { int ch = DMA_CHANNEL; unsigned short d; unsigned short *dp; if(len < 32) _memset16(target,c,len); else { if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); d = c; REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)&d); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 32; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_DAI | DMAC_DCMD_SWDH_16 | DMAC_DCMD_DWDH_16 | DMAC_DCMD_DS_32BYTE; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while (REG_DMAC_DTCR(ch)); if(len % 32) { dp = (unsigned short *)((unsigned int)target + (len & (32 - 1))); for(d = 0; d < (len % 32); d++) *dp++ = c; } } }
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { unsigned long addr = (unsigned long) ptr; switch (direction) { case DMA_TO_DEVICE: dma_cache_wback(addr, size); break; case DMA_FROM_DEVICE: dma_cache_inv(addr, size); break; case DMA_BIDIRECTIONAL: dma_cache_wback_inv(addr, size); break; default: BUG(); } addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;; if(dev == NULL) addr+=CRIME_HI_MEM_BASE; return (dma_addr_t)addr; }
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) dma_cache_wback_inv((unsigned long)vaddr, size); }
static void lb_memcpy(void *target,void* source,unsigned int len) { int ch = DMA_CHANNEL; if(((unsigned int)source < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)source, len); if(((unsigned int)target < 0xa0000000) && len) dma_cache_wback_inv((unsigned long)target, len); REG_DMAC_DSAR(ch) = PHYSADDR((unsigned long)source); REG_DMAC_DTAR(ch) = PHYSADDR((unsigned long)target); REG_DMAC_DTCR(ch) = len / 32; REG_DMAC_DRSR(ch) = DMAC_DRSR_RS_AUTO; REG_DMAC_DCMD(ch) = DMAC_DCMD_SAI| DMAC_DCMD_DAI | DMAC_DCMD_SWDH_32 | DMAC_DCMD_DWDH_32|DMAC_DCMD_DS_32BYTE; REG_DMAC_DCCSR(ch) = DMAC_DCCSR_EN | DMAC_DCCSR_NDES; while ( REG_DMAC_DTCR(ch) ); }
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) { dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length); vdma_disable ((int)esp->dregs); vdma_set_mode ((int)esp->dregs, DMA_MODE_WRITE); vdma_set_addr ((int)esp->dregs, vaddress); vdma_set_count ((int)esp->dregs, length); vdma_enable ((int)esp->dregs); }
static int ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct ag71xx_ring *ring = &ag->tx_ring; struct ag71xx_desc *desc; unsigned long flags; int i; i = ring->curr % AG71XX_TX_RING_SIZE; desc = &ring->descs[i]; spin_lock_irqsave(&ag->lock, flags); ar71xx_ddr_flush(pdata->flush_reg); spin_unlock_irqrestore(&ag->lock, flags); if (!ag71xx_desc_empty(desc)) goto err_drop; if (skb->len <= 0) { DBG("%s: packet len is too small\n", ag->dev->name); goto err_drop; } dma_cache_wback_inv((unsigned long)skb->data, skb->len); ring->buf[i].skb = skb; /* setup descriptor fields */ desc->data = virt_to_phys(skb->data); desc->ctrl = (skb->len & DESC_PKTLEN_M); /* flush descriptor */ wmb(); ring->curr++; if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) { DBG("%s: tx queue full\n", ag->dev->name); netif_stop_queue(dev); } DBG("%s: packet injected into TX queue\n", ag->dev->name); /* enable TX engine */ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); dev->trans_start = jiffies; return 0; err_drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; }
/* Put a destination buffer into the DMA ring. * This updates the destination pointer and byte count. Normally used * to place an empty buffer into the ring for fifo to memory transfers. */ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) { chan_tab_t *ctp; au1x_ddma_desc_t *dp; /* I guess we could check this to be within the * range of the table...... */ ctp = *((chan_tab_t **)chanid); /* We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed. */ dp = ctp->put_ptr; /* If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition. */ if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0; /* Load up buffer address and byte count */ /* Check flags */ if (flags & DDMA_FLAGS_IE) dp->dscr_cmd0 |= DSCR_CMD0_IE; if (flags & DDMA_FLAGS_NOIE) dp->dscr_cmd0 &= ~DSCR_CMD0_IE; dp->dscr_dest0 = buf & ~0UL; dp->dscr_cmd1 = nbytes; #if 0 printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); #endif /* * There is an errata on the Au1200/Au1550 parts that could result in * "stale" data being DMA'ed. It has to do with the snoop logic on the * cache eviction buffer. DMA_NONCOHERENT is on by default for these * parts. If it is fixed in the future, these dma_cache_inv will just * be nothing more than empty macros. See io.h. */ dma_cache_inv((unsigned long)buf, nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ wmb(); /* drain writebuffer */ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); ctp->chan_ptr->ddma_dbell = 0; /* Get next descriptor pointer. */ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); /* Return something non-zero. */ return nbytes; }
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { unsigned long addr; BUG_ON(direction == DMA_NONE); addr = (unsigned long) page_address(page) + offset; dma_cache_wback_inv(addr, size); return page_to_phys(page) + offset; }
void dma_nand_copy_wait(void *tar,void *src,int size) { int timeout = 0x1000000; if(((unsigned int)src < 0xa0000000) && size) dma_cache_wback_inv((unsigned long)src, size); if(((unsigned int)tar < 0xa0000000) && size) dma_cache_wback_inv((unsigned long)tar, size); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE); OUTREG32(A_DMA_DSA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)src)); OUTREG32(A_DMA_DTA(DMA_NAND_COPY_CHANNEL), PHYSADDR((unsigned long)tar)); OUTREG32(A_DMA_DTC(DMA_NAND_COPY_CHANNEL), size / 32); OUTREG32(A_DMA_DRT(DMA_NAND_COPY_CHANNEL), DRT_AUTO); OUTREG32(A_DMA_DCM(DMA_NAND_COPY_CHANNEL), (DCM_SAI| DCM_DAI | DCM_SP_32BIT | DCM_DP_32BIT | DCM_TSZ_32BYTE)); CLRREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL),(DCS_TT)); SETREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL), DCS_CTE | DCS_NDES); while ((!(INREG32(A_DMA_DCS(DMA_NAND_COPY_CHANNEL)) & DCS_TT)) && (timeout--)); }
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (direction != DMA_TO_DEVICE) { unsigned long addr; addr = dma_address + PAGE_OFFSET; dma_cache_wback_inv(addr, size); } }
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_cache_wback_inv((unsigned long) ret, size); ret = (void *)UNCAC_ADDR(ret); } return ret; }
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, int gfp) { void *ret; ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); if (ret) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } return ret; }
static int __MDrv_VPool_Ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err= 0; /* * extract the type and number bitfields, and don't decode * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() */ if (VPOOL_IOC_MAGIC!= _IOC_TYPE(cmd)) { return -ENOTTY; } /* * the direction is a bitmask, and VERIFY_WRITE catches R/W * transfers. `Type' is user-oriented, while * access_ok is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else if (_IOC_DIR(cmd) & _IOC_WRITE) { err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } if (err) { return -EFAULT; } switch(cmd) { case VPOOL_IOC_FLUSH_INV_DCACHE: { DrvVPool_Info_t region ; copy_from_user(®ion, (void __user *)arg, sizeof(region)); dma_cache_wback_inv((unsigned long)region.pAddr, region.u32Size); } #if defined(CONFIG_MSTAR_TITANIA3) || defined(CONFIG_MSTAR_TITANIA10) || defined(CONFIG_MSTAR_TITANIA4) || defined(CONFIG_MSTAR_URANUS4) Chip_Flush_Memory(); #endif break; default: printk("Unknown ioctl command %d\n", cmd); return -ENOTTY; } return 0; }
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { unsigned long addr; addr = plat_dma_addr_to_phys(dma_address); dma_cache_wback_inv(addr, size); } plat_unmap_dma_mem(dma_address); }
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) { unsigned long addr; addr = (unsigned long) page_address(page) + offset; dma_cache_wback_inv(addr, size); } return plat_map_dma_mem_page(dev, page) + offset; }
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { unsigned long addr; BUG_ON(direction == DMA_NONE); addr = (unsigned long) page_address(page) + offset; dma_cache_wback_inv(addr, size); addr = __pa(addr)&RAM_OFFSET_MASK;; if(dev == NULL) addr += CRIME_HI_MEM_BASE; return (dma_addr_t)addr; }
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (direction != DMA_TO_DEVICE) { unsigned long addr; dma_address&=RAM_OFFSET_MASK; addr = dma_address + PAGE_OFFSET; if(dma_address>=256*1024*1024) addr+=CRIME_HI_MEM_BASE; dma_cache_wback_inv(addr, size); } }
static int dma_setup(Scsi_Cmnd *cmd, int datainp) { struct WD33C93_hostdata *hdata = (struct WD33C93_hostdata *)cmd->host->hostdata; struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) cmd->host->base; struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->dma_bounce_buffer; #ifdef DEBUG_DMA printk("dma_setup: datainp<%d> hcp<%p> ", datainp, hcp); #endif hdata->dma_dir = datainp; /* * wd33c93 shouldn't pass us bogus dma_setups, but * it does:-( The other wd33c93 drivers deal with * it the same way (which isn't that obvious). * IMHO a better fix would be, not to do these * dma setups in the first place */ if (cmd->SCp.ptr == NULL) return 1; fill_hpc_entries (&hcp, cmd->SCp.ptr,cmd->SCp.this_residual); /* To make sure, if we trip an HPC bug, that we transfer * every single byte, we tag on an extra zero length dma * descriptor at the end of the chain. */ hcp->desc.pbuf = 0; hcp->desc.cntinfo = (HPCDMA_EOX); #ifdef DEBUG_DMA printk(" HPCGO\n"); #endif /* Start up the HPC. */ hregs->ndptr = PHYSADDR(hdata->dma_bounce_buffer); if(datainp) { dma_cache_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual); hregs->ctrl = (HPC3_SCTRL_ACTIVE); } else { dma_cache_wback_inv((unsigned long) cmd->SCp.ptr, cmd->SCp.this_residual); hregs->ctrl = (HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR); } return 0; }
int NAND_LB_Read(unsigned int Sector, void *pBuffer) { int x; //printf("LB_Read = %x %x\r\n",Sector,pBuffer); dma_cache_wback_inv(pBuffer,SECTOR_SIZE); unsigned char *ptr = (unsigned char *)CACHE_TO_UNCATCH(pBuffer); if(_NAND_LB_GetFromCache(Sector,ptr)) { x = _NAND_LB_Read(Sector,ptr); _NAND_LB_CopyToCache(Sector,ptr,0); return x; } return 512; }
/* * streaming DMA Mapping API... * CPU accesses page via normal paddr, thus needs to explicitly made * consistent before each use */ static void _dma_cache_sync(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_FROM_DEVICE: dma_cache_inv(paddr, size); break; case DMA_TO_DEVICE: dma_cache_wback(paddr, size); break; case DMA_BIDIRECTIONAL: dma_cache_wback_inv(paddr, size); break; default: pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); } }
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t * dma_handle) { void *ret; int gfp = GFP_ATOMIC; if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) gfp |= GFP_DMA; ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); dma_cache_wback_inv((unsigned long) ret, size); *dma_handle = virt_to_bus(ret); } return ret; }