Example #1
0
u32 cma_alloc_phys(size_t size)
{
	unsigned long order;
	struct page *page;
	size_t count;

	size = PAGE_ALIGN(size);
	order = get_order(size);
	count = size >> PAGE_SHIFT;

	page = dma_alloc_from_contiguous(NULL, count, order);
	if (!page)
		return 0;

	__dma_clear_buffer(page, size);

	return page_to_phys(page);
}
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);

	add_meminfo_total_pages(NR_DMA_PAGES, size >> PAGE_SHIFT);

	return page;
}
/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);

	return page;
}
struct page *dma_alloc_at_from_contiguous(struct device *dev, int count,
				       unsigned int align, phys_addr_t at_addr)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;
	unsigned long start_pfn = __phys_to_pfn(at_addr);

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	if (start_pfn && start_pfn < cma->base_pfn)
		return NULL;
	start = start_pfn ? start_pfn - cma->base_pfn : start;

	mutex_lock(&cma_mutex);

	for (;;) {
		unsigned long timeout = jiffies + msecs_to_jiffies(8000);

		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count || (start && start != pageno))
			break;

		pfn = cma->base_pfn + pageno;
retry:
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (start && time_before(jiffies, timeout)) {
			cond_resched();
			invalidate_bh_lrus();
			goto retry;
		} else if (ret != -EBUSY || start) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	if (page) {
		__dma_remap(page, count << PAGE_SHIFT,
			pgprot_dmacoherent(PAGE_KERNEL));
		__dma_clear_buffer(page, count << PAGE_SHIFT);
	}
	return page;
}