Exemple #1
0
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific dev_get_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;


	for (;;) {
		mutex_lock(&cma->lock);
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count) {
			mutex_unlock(&cma->lock);
			break;
		}
		bitmap_set(cma->bitmap, pageno, count);
		/*
		 * It's safe to drop the lock here. We've marked this region for
		 * our exclusive use. If the migration fails we will take the
		 * lock again and unmark it.
		 */
		mutex_unlock(&cma->lock);

		pfn = cma->base_pfn + pageno;
		mutex_lock(&cma_mutex);
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		mutex_unlock(&cma_mutex);
		if (ret == 0) {
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			clear_cma_bitmap(cma, pfn, count);
			break;
		}
		clear_cma_bitmap(cma, pfn, count);
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific dev_get_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	return cma_alloc(dev_get_cma_area(dev), count, align);
}
static struct page *__dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count) {
			printk(KERN_ERR "%s : cma->count is %lu, "
					"pageno is %lu\n", __func__,
					cma->count, pageno);
			ret = -ENOMEM;
			goto error;
		}

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			break;
		} else if (ret != -EBUSY && ret != -EAGAIN) {
			goto error;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);

	pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
	return pfn_to_page(pfn);
error:
	pr_err("%s(): returned error (%d)\n", __func__, ret);
	mutex_unlock(&cma_mutex);
	return NULL;
}
Exemple #4
0
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	/* HACK for MFC context buffer */
	if (!strncmp(dev_name(dev), "ion_video", strlen("ion_video")) && (count > 8))
		start = 16;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = cma->isolated ?
			0 : alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			cma->free_count -= count;
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn = 0, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		pr_debug("%s(): memory range at %p (%lx, %lx) is busy,"\
			" retrying\n", __func__, pfn_to_page(pfn),
			pfn, pfn + count);
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	if (!page)
		cma_bitmap_show(dev);

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p pfn(%lx)\n", __func__, page, pfn);
	return page;
}
Exemple #6
0
/**
 * dma_contiguous_isolate() - isolate contiguous memory from the page allocator
 * @dev: Pointer to device which owns the contiguous memory
 *
 * This function isolates contiguous memory from the page allocator. If some of
 * the contiguous memory is allocated, it is reclaimed.
 */
int dma_contiguous_isolate(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	int ret;
	int idx;

	if (!cma)
		return -ENODEV;

	if (cma->count == 0)
		return 0;

	mutex_lock(&cma_mutex);

	if (cma->isolated) {
		mutex_unlock(&cma_mutex);
		dev_err(dev, "Alread isolated!\n");
		return 0;
	}

	idx = find_first_zero_bit(cma->bitmap, cma->count);
	while (idx < cma->count) {
		int idx_set;

		idx_set = find_next_bit(cma->bitmap, cma->count, idx);
		do {
			ret = alloc_contig_range(cma->base_pfn + idx,
						cma->base_pfn + idx_set,
						MIGRATE_CMA);
		} while (ret == -EBUSY);

		if (ret < 0) {
			mutex_unlock(&cma_mutex);
			dma_contiguous_deisolate_until(dev, idx_set);
			dev_err(dev, "Failed to isolate %#lx@%#010llx (%d).\n",
				(idx_set - idx) * PAGE_SIZE,
				PFN_PHYS(cma->base_pfn + idx), ret);
			return ret;
		}

		idx = find_next_zero_bit(cma->bitmap, cma->count, idx_set);
	}

	cma->isolated = true;

	mutex_unlock(&cma_mutex);

	return 0;
}
Exemple #7
0
/**
 * dma_contiguous_info() - retrieving contiguous memory information
 * @dev:  Pointer to device to get the information.
 * @info: [OUT] pointer to a structure to store the information
 *
 * This fills @info the status of a contiguous memory. -ENODEV if
 * the given device does not have contiguous memory.
 */
int dma_contiguous_info(struct device *dev, struct cma_info *info)
{
	struct cma *cma = dev_get_cma_area(dev);

	if (!info)
		return 0;

	if (!cma)
		return -ENODEV;

	info->base = cma->base_pfn << PAGE_SHIFT;
	info->size = cma->count << PAGE_SHIFT;
	info->free = cma->free_count << PAGE_SHIFT;
	info->isolated = cma->isolated;

	return 0;
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	return page;
}
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  unsigned long attrs)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;
	if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
		struct page *page;
		void *addr;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
						 get_order(size), flags);
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		addr = page_address(page);
		memset(addr, 0, size);
		return addr;
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
/**
 * dma_release_from_contiguous() - release allocated pages
 * @dev:   Pointer to device for which the pages were allocated.
 * @pages: Allocated pages.
 * @count: Number of allocated pages.
 *
 * This function releases memory allocated by dma_alloc_from_contiguous().
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
				 int count)
{
	struct cma *cma = dev_get_cma_area(dev);
	unsigned long pfn;

	if (!cma || !pages)
		return false;

	pr_debug("%s(page %p)\n", __func__, (void *)pages);

	pfn = page_to_pfn(pages);

	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
		return false;

	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);

	free_contig_range(pfn, count);
	clear_cma_bitmap(cma, pfn, count);

	return true;
}
Exemple #11
0
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	unsigned long retry_timeout, retry_cnt;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	retry_timeout = jiffies + msecs_to_jiffies(500);
	retry_cnt = 0;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count) {
			if (start == 0 ||
			    (time_is_before_jiffies(retry_timeout) && retry_cnt != 0))
				break;
			cond_resched();
			retry_cnt++;
			start = 0;
			continue;
		}

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			adjust_managed_cma_page_count(page_zone(page), -count);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}
struct page *dma_alloc_at_from_contiguous(struct device *dev, int count,
				       unsigned int align, phys_addr_t at_addr)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;
	unsigned long start_pfn = __phys_to_pfn(at_addr);

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	if (start_pfn && start_pfn < cma->base_pfn)
		return NULL;
	start = start_pfn ? start_pfn - cma->base_pfn : start;

	mutex_lock(&cma_mutex);

	for (;;) {
		unsigned long timeout = jiffies + msecs_to_jiffies(8000);

		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count || (start && start != pageno))
			break;

		pfn = cma->base_pfn + pageno;
retry:
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (start && time_before(jiffies, timeout)) {
			cond_resched();
			invalidate_bh_lrus();
			goto retry;
		} else if (ret != -EBUSY || start) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	if (page) {
		__dma_remap(page, count << PAGE_SHIFT,
			pgprot_dmacoherent(PAGE_KERNEL));
		__dma_clear_buffer(page, count << PAGE_SHIFT);
	}
	return page;
}
Exemple #13
0
/**
 * dma_contiguous_deisolate() - return contiguous memory to the page allocator
 * @dev: Pointer to device which owns the contiguous memory
 *
 * This function return the contiguous memory that is not allocated by CMA to
 * the page allocator so that the kernel can allocate the contiguous memory.
 */
void dma_contiguous_deisolate(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	dma_contiguous_deisolate_until(dev, cma->count);
}
static int cma_info_show(struct seq_file *s, void *unused)
{
	struct cma *cma = dev_get_cma_area(NULL);
	unsigned long start = 0, set = 0, end = 0, sum = 0;
	int nr_per_order[32];
	int i, total = 0, order, order_max = 0;
	struct page *pg;
	phys_addr_t fm = __pfn_to_phys(cma->base_pfn);
	phys_addr_t to = __pfn_to_phys(cma->base_pfn + cma->count - 1);

	seq_printf(s, "CMA Region: pfn(0x%lx:0x%lx) phy(%pa:%pa)\n",
		cma->base_pfn, cma->base_pfn + cma->count - 1, &fm, &to);

	seq_printf(s, "\n( Un-Set    )           [ Set       ]\n");
	while (1) {
		set = find_next_bit(cma->bitmap, cma->count, start);
		if (set >= cma->count)
			break;
		end = find_next_zero_bit(cma->bitmap, cma->count, set);

		if (set > 0)
			seq_printf(s, "(0x%5lx:0x%5lx) %5ld ",
				cma->base_pfn + start, cma->base_pfn + set - 1,
				set - start);
		else
			seq_printf(s, "%16.s", "");

		seq_printf(s, "\t[0x%5lx:0x%5lx] %5ld\n", cma->base_pfn + set,
			cma->base_pfn + end - 1, end - set);

		start = end;
		sum += (end - set);
	}

	if (start < cma->count)
		seq_printf(s, "(0x%5lx:0x%5lx) %5ld\n",
			cma->base_pfn + start, cma->base_pfn + cma->count - 1,
			cma->count - start);

	seq_printf(s, "Total: %16ld%24ld%12ld(pages)\n",
		cma->count - sum, sum, cma->count);

	for (i = 0; i < 32; i++)
		nr_per_order[i] = 0;
	pg = pfn_to_page(cma->base_pfn);
	start = -1;
	for (i = 0; i < cma->count; i++, pg++) {
		if (!test_bit(i, cma->bitmap) && !page_count(pg)) {
			if (start == -1)
				start = i;
			end = i;

			if (i < (cma->count - 1))
				continue;
		}
		if (start != -1) {
			total += (end - start + 1);
			order = fls(end - start + 1) - 1;

			nr_per_order[order]++;
			start = -1;
			if (order_max < order)
				order_max = order;
		}
	}

	seq_printf(s, "\nIdle pages per order, total: %d\nOrder:", total);
	for (i = 0; i <= order_max; i++)
		seq_printf(s, "%6d ", i);

	seq_printf(s, "\nCount:");
	for (i = 0; i <= order_max; i++)
		seq_printf(s, "%6d ", nr_per_order[i]);
	seq_printf(s, "\n");

	return 0;
}
/**
 * dma_release_from_contiguous() - release allocated pages
 * @dev:   Pointer to device for which the pages were allocated.
 * @pages: Allocated pages.
 * @count: Number of allocated pages.
 *
 * This function releases memory allocated by dma_alloc_from_contiguous().
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
				 int count)
{
	return cma_release(dev_get_cma_area(dev), pages, count);
}