Example #1
0
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	    pgprot_t prot)
{
	struct page *page;
	void *addr;

        /* Following is a work-around (a.k.a. hack) to prevent pages
        * with __GFP_COMP being passed to split_page() which cannot
        * handle them.  The real problem is that this flag probably
        * should be 0 on ARM as it is not supported on this
        * platform--see CONFIG_HUGETLB_PAGE. */
        gfp &= ~(__GFP_COMP);

	*handle = ~0;
	size = PAGE_ALIGN(size);

	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	if (!arch_is_coherent())
		addr = __dma_alloc_remap(page, size, gfp, prot);
	else
		addr = page_address(page);

	if (addr)
		*handle = pfn_to_dma(dev, page_to_pfn(page));
	else
		__dma_free_buffer(page, size);

	return addr;
}
Example #2
0
/*
 * see if a buffer address is in an 'unsafe' range.  if it is
 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir)
{
	dma_addr_t dma_addr;
	int ret;

	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;

	ret = needs_bounce(dev, dma_addr, size);
	if (ret < 0)
		return ~0;

	if (ret == 0) {
		__dma_page_cpu_to_dev(page, offset, size, dir);
		return dma_addr;
	}

	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
		return ~0;
	}

	return map_single(dev, page_address(page) + offset, size, dir);
}
Example #3
0
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	    pgprot_t prot)
{
	struct page *page;
	void *addr;

	*handle = ~0;
	size = PAGE_ALIGN(size);

	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	if (!arch_is_coherent())
		addr = __dma_alloc_remap(page, size, gfp, prot);
	else
		addr = page_address(page);

	if (addr)
		*handle = pfn_to_dma(dev, page_to_pfn(page));
	else
		__dma_free_buffer(page, size);

	return addr;
}
/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_page().
 */
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	if (!arch_is_coherent())
		__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
Example #5
0
/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_page().
 */
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
		__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
Example #6
0
static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir,
				  struct dma_attrs *attrs)
{
	if (dir != DMA_TO_DEVICE)
		mvebu_hwcc_sync_io_barrier();
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
Example #7
0
static void __isp_stat_buf_sync_magic(struct ispstat *stat,
				      struct ispstat_buffer *buf,
				      u32 buf_size, enum dma_data_direction dir,
				      void (*dma_sync)(struct device *,
					dma_addr_t, unsigned long, size_t,
					enum dma_data_direction))
{
	struct device *dev = stat->isp->dev;
	struct page *pg;
	dma_addr_t dma_addr;
	u32 offset;

	/* Initial magic words */
	pg = vmalloc_to_page(buf->virt_addr);
	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
	dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);

	/* Final magic words */
	pg = vmalloc_to_page(buf->virt_addr + buf_size);
	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
	offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
	dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
}
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;
	/* this is only being used to flush the page for dma,
	   this api is not really suitable for calling from a driver
	   but no better way to flush a page for dma exist at this time */
	arm_dma_ops.sync_single_for_device(NULL,
					   pfn_to_dma(NULL, page_to_pfn(page)),
					   PAGE_SIZE << pool->order,
					   DMA_FROM_DEVICE);
	return page;
}
Example #9
0
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;
	/* this is only being used to flush the page for dma,
	   this api is not really suitable for calling from a driver
	   but no better way to flush a page for dma exist at this time */
#ifdef CONFIG_64BIT
	dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#else
	arm_dma_ops.sync_single_for_device(NULL,
		pfn_to_dma(NULL, page_to_pfn(page)),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#endif

	return page;
}
Example #10
0
/* must be called with write lock held on data->sem */
static int bralloc_mem_allocate(struct file *file, bralloc_data_t *data, unsigned long size)
{
	int alloc_flags;
	int ret = 0;

	bralloc_debug("Enter ..\n");

	/* Find what type of allocation and mapping we need to do, as they all
	 * need to be consistent with the mmaps we will be creating for
	 * userspace
	 */

	if (file->f_flags & O_SYNC) {

		alloc_flags = BRALLOC_DMA_COHERENT_ALLOC;

		/* Create uncached allocation, we still need to see if they
		 * need to be write-buffered or not, but we'll add in something
		 * later for that. For now, uncached allocations are coherent
		 * allocations.
		 */

		/* Allocate coherent (uncached, strongly ordered) contiguous dma memory here */
		data->kaddr = dma_alloc_coherent(&br.pdev->dev, size, &data->dma_handle, GFP_KERNEL);
		if (!data->kaddr) {
			bralloc_error("Failed to allocate contiguous DMA memory for (%d/%d), size(%lu)\n",
					current->pid, current->tgid, size);
			ret = -ENOMEM;
			goto done;
		}

		data->start_page = virt_to_page(data->kaddr);

	} else if (file->f_flags & O_DIRECT) {

		alloc_flags = BRALLOC_DMA_WRITECOMBINE_ALLOC;

		/* If file is opened with O_DIRECT we consider this writecombine
		 * allocation+mapping (using write buffers). Userspace *must*
		 * lock/unlock region before using it. For us, "lock" means
		 * do nothing, "unlock" means drain write buffer
		 */

		/* Allocate coherent (uncached, strongly ordered) contiguous dma memory here */
		data->kaddr = dma_alloc_writecombine(&br.pdev->dev, size, &data->dma_handle, GFP_KERNEL);
		if (!data->kaddr) {
			bralloc_error("Failed to allocate contiguous DMA memory for (%d/%d), size(%lu)\n",
					current->pid, current->tgid, size);
			ret = -ENOMEM;
			goto done;
		}

		data->start_page = virt_to_page(data->kaddr);

	} else if (file->f_flags & FASYNC) {

		alloc_flags = BRALLOC_WRITETHROUGH_ALLOC;

		/* If file is opened with O_ASYNC, we allocate+map using the
		 * Write-Through cached mappings. This means for all files
		 * opened with O_ASYNC, userspace must call lock/unlock ioctls
		 * before using the memory region. For us, "lock" means
		 * "invalidate cachelines" and "unlock" and do nothing
		 */

		data->start_page = dma_alloc_from_contiguous(&br.pdev->dev, (size >> PAGE_SHIFT),
								CONFIG_CMA_ALIGNMENT);
		if (!data->start_page) {
			bralloc_error("Failed to allocate contiguous DMA memory for (%d/%d), size(%lu)\n",
					current->pid, current->tgid, size);
			ret = -ENOMEM;
			goto done;
		}

		data->kaddr = page_address(data->start_page);

		/* We must clear the buffer and remap it with WT cachebility to
		 * make sure the userspace mappings and kernel mappings are
		 * identical in terms of memory attributes and cacheability,
		 * otherwise unthinkable things can happen according to ARM TRM
		 */
		bralloc_mem_clear_buffer(data->kaddr, size);
		bralloc_mem_kernel_remap(data->start_page, size, pgprot_writethrough(pgprot_kernel));
		data->dma_handle = pfn_to_dma(&br.pdev->dev, page_to_pfn(data->start_page));

	} else {
Example #11
0
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}