Example #1
0
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs)
{
	if (!(attrs & DMA_ATTR_NON_CONSISTENT))
		cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
	dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
Example #2
0
static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);

	free_pages(addr, get_order(size));
}
Example #3
0
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
			dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);

	free_pages(addr, get_order(size));
}
Example #4
0
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	void *cpu_addr, dma_addr_t dma_addr, size_t size,
	unsigned long attrs)
{
	unsigned long user_count = vma_pages(vma);
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long addr = (unsigned long)cpu_addr;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;
	int ret = -ENXIO;

	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

	pfn = page_to_pfn(virt_to_page((void *)addr));

	if (attrs & DMA_ATTR_WRITE_COMBINE)
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
}
Example #5
0
void ahb_free_consistent(void *hwdev, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
{
    unsigned long addr = (unsigned long) vaddr;

    addr = CAC_ADDR(addr);
    free_pages(addr, get_order(size));
}
Example #6
0
void *snd_malloc_sgbuf_pages(struct device *device,
			     size_t size, struct snd_dma_buffer *dmab,
			     size_t *res_size)
{
	struct snd_sg_buf *sgbuf;
	unsigned int i, pages;
	struct snd_dma_buffer tmpb;

	dmab->area = NULL;
	dmab->addr = 0;
	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
	if (! sgbuf)
		return NULL;
	sgbuf->dev = device;
	pages = snd_sgbuf_aligned_pages(size);
	sgbuf->tblsize = sgbuf_align_table(pages);
	sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL);
	if (! sgbuf->table)
		goto _failed;
	sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL);
	if (! sgbuf->page_table)
		goto _failed;

	/* allocate each page */
	for (i = 0; i < pages; i++) {
		if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, device, PAGE_SIZE, &tmpb) < 0) {
			if (res_size == NULL)
				goto _failed;
			*res_size = size = sgbuf->pages * PAGE_SIZE;
			break;
		}
		sgbuf->table[i].buf = tmpb.area;
		sgbuf->table[i].addr = tmpb.addr;
#if (defined(CONFIG_LS2E_DEV_BOARD) || defined(CONFIG_LS2F_DEV_BOARD)) && defined(CONFIG_DMA_NONCOHERENT)
		sgbuf->page_table[i] = virt_to_page(CAC_ADDR(tmpb.area));
#else
		sgbuf->page_table[i] = virt_to_page(tmpb.area);
#endif
		sgbuf->pages++;
	}

	sgbuf->size = size;

#if (defined(CONFIG_LS2E_DEV_BOARD) || defined(CONFIG_LS2F_DEV_BOARD)) && defined(CONFIG_DMA_NONCOHERENT)
	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP | VM_IO, pgprot_noncached(PAGE_KERNEL));
#else
	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
#endif
	if (! dmab->area)
		goto _failed;
	return dmab->area;

 _failed:
	snd_free_sgbuf_pages(dmab); /* free the table */
	return NULL;
}
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) vaddr;

	plat_unmap_dma_mem(dev, dma_handle);

	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

	free_pages(addr, get_order(size));
}
Example #8
0
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) vaddr;

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);

	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

	free_pages(addr, get_order(size));
}
Example #9
0
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle, unsigned long attrs)
{
	unsigned long addr = (unsigned long) vaddr;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;

	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);

	if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
}
Example #10
0
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) vaddr;
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;

	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);

#ifdef CONFIG_BRCM_CONSISTENT_DMA
	addr = (unsigned long)brcm_unmap_coherent(vaddr);
#else
	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);
#endif

	free_pages(addr, get_order(size));
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	unsigned long addr = (unsigned long) vaddr;
	int order = get_order(size);
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;

	if (dma_release_from_coherent(dev, order, vaddr))
		return;

	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);

	if (!plat_device_is_coherent(dev) && !hw_coherentio)
		addr = CAC_ADDR(addr);

	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
}
Example #12
0
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
#ifdef CONFIG_MSTAR_CHIP
	extern int hw_coherentio;
#endif
	unsigned long addr = (unsigned long) vaddr;
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;

	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);

	if (!plat_device_is_coherent(dev))
#ifdef CONFIG_MSTAR_CHIP
		if (!hw_coherentio)
#endif
		addr = CAC_ADDR(addr);

	free_pages(addr, get_order(size));
}
Example #13
0
void sh_sync_dma_for_device(void *vaddr, size_t size,
		    enum dma_data_direction direction)
{
	void *addr;

	addr = __in_29bit_mode() ?
	       (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;

	switch (direction) {
	case DMA_FROM_DEVICE:		/* invalidate only */
		__flush_invalidate_region(addr, size);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		__flush_wback_region(addr, size);
		break;
	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
		__flush_purge_region(addr, size);
		break;
	default:
		BUG();
	}
}
Example #14
0
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
		dma_addr_t dma_addr)
{
	unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
	return page_to_pfn(virt_to_page((void *)addr));
}