dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) { unsigned long addr; addr = (unsigned long) page_address(page) + offset; dma_cache_wback_inv(addr, size); } return plat_map_dma_mem_page(dev, page) + offset; }
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { unsigned long addr; addr = dma_addr_to_virt(dma_address); dma_cache_wback_inv(addr, size); } plat_unmap_dma_mem(dma_address); }
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (!plat_device_is_coherent(dev)) __dma_sync((unsigned long)page_address(sg_page(sg)), sg->length, direction); } }
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr; if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); free_pages(addr, get_order(size)); }
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) vaddr; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); page = virt_to_page((void *) addr); if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, get_order(size)); }
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction, struct dma_attrs *attrs) { unsigned long addr; int i; for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { addr = (unsigned long) sg_virt(sg); if (addr) __dma_sync(addr, sg->length, direction); } plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } }
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { int i; for (i = 0; i < nents; i++, sg++) { unsigned long addr; addr = (unsigned long) sg_virt(sg); if (!plat_device_is_coherent(dev) && addr) __dma_sync(addr, sg->length, direction); sg->dma_address = plat_map_dma_mem(dev, (void *)addr, sg->length); } return nents; }
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { unsigned long addr; int i; BUG_ON(direction == DMA_NONE); for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { addr = (unsigned long) page_address(sg->page); if (addr) __dma_sync(addr + sg->offset, sg->length, direction); } plat_unmap_dma_mem(sg->dma_address); } }
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); for (i = 0; i < nents; i++, sg++) { unsigned long addr; addr = (unsigned long) page_address(sg->page); if (!plat_device_is_coherent(dev) && addr) __dma_sync(addr + sg->offset, sg->length, direction); sg->dma_address = plat_map_dma_mem(dev, (void *)(addr + sg->offset), sg->length); } return nents; }
/* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can * speculatively fill random cachelines with stale data at any time, * requiring an extra flush post-DMA. * * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. * * Note that the R14000 and R16000 should also be checked for in this * condition. However this function is only called on non-I/O-coherent * systems and only the R10000 and R12000 are used in such systems, the * SGI IP28 Indigo² rsp. SGI IP32 aka O2. */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { if (plat_device_is_coherent(dev)) return false; switch (boot_cpu_type()) { case CPU_R10000: case CPU_R12000: case CPU_BMIPS5000: return true; default: /* * Presence of MAARs suggests that the CPU supports * speculatively prefetching data, and therefore requires * the post-DMA flush/invalidate. */ return cpu_has_maar; } }
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); if (dma_release_from_coherent(dev, order, vaddr)) return; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); #ifdef CONFIG_BRCM_CONSISTENT_DMA addr = (unsigned long)brcm_unmap_coherent(vaddr); #else if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); #endif free_pages(addr, get_order(size)); }
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } } return ret; }
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; int order = get_order(size); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; if (dma_release_from_coherent(dev, order, vaddr)) return; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) addr = CAC_ADDR(addr); page = virt_to_page((void *) addr); if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, get_order(size)); }
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { #ifdef CONFIG_MSTAR_CHIP extern int hw_coherentio; #endif unsigned long addr = (unsigned long) vaddr; int order = get_order(size); if (dma_release_from_coherent(dev, order, vaddr)) return; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev)) #ifdef CONFIG_MSTAR_CHIP if (!hw_coherentio) #endif addr = CAC_ADDR(addr); free_pages(addr, get_order(size)); }
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } } return ret; }
static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; /* * XXX: seems like the coherent and non-coherent implementations could * be consolidated. */ if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) page = dma_alloc_from_contiguous(dev, count, get_order(size)); if (!page) page = alloc_pages(gfp, get_order(size)); if (!page) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); if (!hw_coherentio) ret = UNCAC_ADDR(ret); } return ret; }
static inline int cpu_is_noncoherent_r10000(struct device *dev) { return !plat_device_is_coherent(dev) && (current_cpu_type() == CPU_R10000 || current_cpu_type() == CPU_R12000); }
int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return plat_device_is_coherent(dev); }