/*
 * Initialise the coherent pool for atomic allocations.
 */
static int __init coherent_init(void)
{
	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
	size_t size = coherent_pool_size;
	struct page *page;
	void *ptr;

	if (cma_available)
		ptr = __alloc_from_contiguous(NULL, size, prot, &page);
	else
		ptr = __alloc_remap_buffer(NULL, size, GFP_KERNEL | GFP_DMA,
			prot, &page, NULL);

	if (ptr) {
		coherent_head.vm_start = (unsigned long) ptr;
		coherent_head.vm_end = (unsigned long) ptr + size;
		coherent_head.vm_pages = page;
		printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
		       (unsigned)size / 1024);
		return 0;
	}
	printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
	       (unsigned)size / 1024);
	return -ENOMEM;
}
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
				 bool coherent)
{
	if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
		return pgprot_writecombine(prot);
	else if (!coherent)
		return pgprot_dmacoherent(prot);
	return prot;
}
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
					  void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);

	if (! dma_mmap_from_coherent(dev, vma, cpu_addr, dma_addr, size))
		return 0;

	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
}
Beispiel #4
0
/*
 * Allocate DMA-coherent memory space and return both the kernel remapped
 * virtual and bus address for that space.
 */
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
    void *memory;

    if (dma_alloc_from_coherent(dev, size, handle, &memory))
        return memory;

    return __dma_alloc(dev, size, handle, gfp,
                       pgprot_dmacoherent(pgprot_kernel));
}
Beispiel #5
0
static int omap_rproc_mmap(struct file *filp, struct vm_area_struct *vma)
{

	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
	vma->vm_flags |= VM_RESERVED;

	if (remap_pfn_range(vma,
			 vma->vm_start,
			 vma->vm_pgoff,
			 vma->vm_end - vma->vm_start,
			 vma->vm_page_prot)) {
		return -EAGAIN;
	}
	return 0;
}
/*
 * Initialise the coherent pool for atomic allocations.
 */
static int __init coherent_init(void)
{
	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
	size_t size = coherent_pool_size;
	struct page *page;
	void *ptr;

	if (!IS_ENABLED(CONFIG_CMA))
		return 0;

	ptr = __alloc_from_contiguous(NULL, size, prot, &page, false);
	if (ptr) {
		coherent_head.vm_start = (unsigned long) ptr;
		coherent_head.vm_end = (unsigned long) ptr + size;
		printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
		       (unsigned)size / 1024);
		return 0;
	}
	printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
	       (unsigned)size / 1024);
	return -ENOMEM;
}
struct page *dma_alloc_at_from_contiguous(struct device *dev, int count,
				       unsigned int align, phys_addr_t at_addr)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;
	unsigned long start_pfn = __phys_to_pfn(at_addr);

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	if (start_pfn && start_pfn < cma->base_pfn)
		return NULL;
	start = start_pfn ? start_pfn - cma->base_pfn : start;

	mutex_lock(&cma_mutex);

	for (;;) {
		unsigned long timeout = jiffies + msecs_to_jiffies(8000);

		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count || (start && start != pageno))
			break;

		pfn = cma->base_pfn + pageno;
retry:
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (start && time_before(jiffies, timeout)) {
			cond_resched();
			invalidate_bh_lrus();
			goto retry;
		} else if (ret != -EBUSY || start) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	if (page) {
		__dma_remap(page, count << PAGE_SHIFT,
			pgprot_dmacoherent(PAGE_KERNEL));
		__dma_clear_buffer(page, count << PAGE_SHIFT);
	}
	return page;
}