static inline int ttm_tt_set_page_caching(struct page *p,
					  enum ttm_caching_state c_old,
					  enum ttm_caching_state c_new)
{
	int ret = 0;

	if (PageHighMem(p))
		return 0;

	if (c_old != tt_cached) {
		/* p isn't in the default caching state, set it to
		 * writeback first to free its current memtype. */

		ret = set_pages_wb(p, 1);
		if (ret)
			return ret;
	}

	if (c_new == tt_wc)
		ret = set_memory_wc((unsigned long) page_address(p), 1);
	else if (c_new == tt_uncached)
		ret = set_pages_uc(p, 1);

	return ret;
}
Example #2
0
static inline int ttm_tt_set_page_caching(struct page *p,
					  enum ttm_caching_state c_state)
{
	if (PageHighMem(p))
		return 0;

	switch (c_state) {
	case tt_cached:
		return set_pages_wb(p, 1);
	case tt_wc:
	    return set_memory_wc((unsigned long) page_address(p), 1);
	default:
		return set_pages_uc(p, 1);
	}
}
Example #3
0
static void __dma_set_pages(struct page *page, unsigned int count,
				struct dma_attrs *attrs)
{
	int ret = 0;

	if (attrs == NULL)
		ret = set_pages_uc(page, count);
	else if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
		ret = set_pages_wc(page, count);
	else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
		pr_warn("%s:DMA attrs %p not supported\n",
					__func__, attrs->flags);

	if (ret)
		pr_err("%s failed\n", __func__);
}
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
				bool cached, struct hmm_pool *dypool,
				struct hmm_pool *repool)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
	struct page *pages;
	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
	int i, j;
	int failure_number = 0;
	bool reduce_order = false;
	bool lack_mem = true;

	if (from_highmem)
		gfp |= __GFP_HIGHMEM;

	pgnr = bo->pgnr;

	bo->page_obj = atomisp_kernel_malloc(
				sizeof(struct hmm_page_object) * pgnr);
	if (unlikely(!bo->page_obj)) {
		dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
		return -ENOMEM;
	}

	i = 0;
	alloc_pgnr = 0;

	/*
	 * get physical pages from dynamic pages pool.
	 */
	if (dypool->pops && dypool->pops->pool_alloc_pages) {
		alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
							bo->page_obj, pgnr,
							cached);
		hmm_mem_stat.dyc_size -= alloc_pgnr;

		if (alloc_pgnr == pgnr)
			return 0;
	}

	pgnr -= alloc_pgnr;
	i += alloc_pgnr;

	/*
	 * get physical pages from reserved pages pool for atomisp.
	 */
	if (repool->pops && repool->pops->pool_alloc_pages) {
		alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
							&bo->page_obj[i], pgnr,
							cached);
		hmm_mem_stat.res_cnt += alloc_pgnr;
		if (alloc_pgnr == pgnr)
			return 0;
	}

	pgnr -= alloc_pgnr;
	i += alloc_pgnr;

	while (pgnr) {
		order = nr_to_order_bottom(pgnr);
		/*
		 * if be short of memory, we will set order to 0
		 * everytime.
		 */
		if (lack_mem)
			order = HMM_MIN_ORDER;
		else if (order > HMM_MAX_ORDER)
			order = HMM_MAX_ORDER;
retry:
		/*
		 * When order > HMM_MIN_ORDER, for performance reasons we don't
		 * want alloc_pages() to sleep. In case it fails and fallbacks
		 * to HMM_MIN_ORDER or in case the requested order is originally
		 * the minimum value, we can allow alloc_pages() to sleep for
		 * robustness purpose.
		 *
		 * REVISIT: why __GFP_FS is necessary?
		 */
		if (order == HMM_MIN_ORDER) {
			gfp &= ~GFP_NOWAIT;
			gfp |= __GFP_WAIT | __GFP_FS;
		}

		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			/*
			 * in low memory case, if allocation page fails,
			 * we turn to try if order=0 allocation could
			 * succeed. if order=0 fails too, that means there is
			 * no memory left.
			 */
			if (order == HMM_MIN_ORDER) {
				dev_err(atomisp_dev,
					"%s: cannot allocate pages\n",
					 __func__);
				goto cleanup;
			}
			order = HMM_MIN_ORDER;
			failure_number++;
			reduce_order = true;
			/*
			 * if fail two times continuously, we think be short
			 * of memory now.
			 */
			if (failure_number == 2) {
				lack_mem = true;
				failure_number = 0;
			}
			goto retry;
		} else {
			blk_pgnr = order_to_nr(order);

			if (!cached) {
				/*
				 * set memory to uncacheable -- UC_MINUS
				 */
				ret = set_pages_uc(pages, blk_pgnr);
				if (ret) {
					dev_err(atomisp_dev,
						     "set page uncacheable"
							"failed.\n");

					__free_pages(pages, order);

					goto cleanup;
				}
			}

			for (j = 0; j < blk_pgnr; j++) {
				bo->page_obj[i].page = pages + j;
				bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
			}

			pgnr -= blk_pgnr;
			hmm_mem_stat.sys_size += blk_pgnr;

			/*
			 * if order is not reduced this time, clear
			 * failure_number.
			 */
			if (reduce_order)
				reduce_order = false;
			else
				failure_number = 0;
		}
	}

	return 0;
cleanup:
	alloc_pgnr = i;
	free_private_bo_pages(bo, dypool, repool, alloc_pgnr);

	atomisp_kernel_free(bo->page_obj);

	return -ENOMEM;
}
Example #5
0
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
				bool cached)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr;
	struct page *pages;
	struct page_block *pgblk;
	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
	int i, j;
	int failure_number = 0;
	bool reduce_order = false;
	bool lack_mem = true;

	if (from_highmem)
		gfp |= __GFP_HIGHMEM;

	pgnr = bo->pgnr;

	bo->pages = atomisp_kernel_malloc(sizeof(struct page *) * pgnr);
	if (unlikely(!bo->pages)) {
		v4l2_err(&atomisp_dev, "out of memory for bo->pages\n");
		return -ENOMEM;
	}

	i = 0;
	while (pgnr) {
		order = nr_to_order_bottom(pgnr);
		/*
		 * if be short of memory, we will set order to 0
		 * everytime.
		 */
		if (lack_mem)
			order = HMM_MIN_ORDER;
		else if (order > HMM_MAX_ORDER)
			order = HMM_MAX_ORDER;
retry:
		/*
		 * When order > HMM_MIN_ORDER, for performance reasons we don't
		 * want alloc_pages() to sleep. In case it fails and fallbacks
		 * to HMM_MIN_ORDER or in case the requested order is originally
		 * the minimum value, we can allow alloc_pages() to sleep for
		 * robustness purpose.
		 *
		 * REVISIT: why __GFP_FS is necessary?
		 */
		if (order == HMM_MIN_ORDER) {
			gfp &= ~GFP_NOWAIT;
			gfp |= __GFP_WAIT | __GFP_FS;
		}

		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			/*
			 * in low memory case, if allocation page fails,
			 * we turn to try if order=0 allocation could
			 * succeed. if order=0 fails too, that means there is
			 * no memory left.
			 */
			if (order == HMM_MIN_ORDER) {
				v4l2_err(&atomisp_dev,
					 "%s: cannot allocate pages\n",
					 __func__);
				goto cleanup;
			}
			order = HMM_MIN_ORDER;
			failure_number++;
			reduce_order = true;
			/*
			 * if fail two times continuously, we think be short
			 * of memory now.
			 */
			if (failure_number == 2) {
				lack_mem = true;
				failure_number = 0;
			}
			goto retry;
		} else {
			blk_pgnr = order_to_nr(order);

			pgblk = kzalloc(sizeof(*pgblk), GFP_KERNEL);
			if (unlikely(!pgblk)) {
				v4l2_err(&atomisp_dev,
						"out of memory for pgblk\n");
				goto out_of_mem;
			}

			INIT_LIST_HEAD(&pgblk->list);
			pgblk->pages = pages;
			pgblk->order = order;

			list_add_tail(&pgblk->list, &bo->pgblocks);

			for (j = 0; j < blk_pgnr; j++)
				bo->pages[i++] = pages + j;

			pgnr -= blk_pgnr;

			if (!cached) {
				/*
				 * set memory to uncacheable -- UC_MINUS
				 */
				ret = set_pages_uc(pages, blk_pgnr);
				if (ret) {
					v4l2_err(&atomisp_dev,
						     "set page uncacheable"
							"failed.\n");
					goto cleanup;
				}
			}
			/*
			 * if order is not reduced this time, clear
			 * failure_number.
			 */
			if (reduce_order)
				reduce_order = false;
			else
				failure_number = 0;
		}
	}

	return 0;
out_of_mem:
	__free_pages(pages, order);
cleanup:
	while (!list_empty(&bo->pgblocks)) {
		pgblk = list_first_entry(&bo->pgblocks,
					 struct page_block, list);

		list_del(&pgblk->list);

		ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order));
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err...\n");

		__free_pages(pgblk->pages, pgblk->order);
		kfree(pgblk);
	}
	atomisp_kernel_free(bo->pages);

	return -ENOMEM;
}
Example #6
0
static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
{
	int ret;
	unsigned int blk_pgnr;
	unsigned int pgnr = pool_size;
	unsigned int order = 0;
	unsigned int i = 0;
	int fail_number = 0;
	struct page *pages;
	int j;
	struct hmm_reserved_pool_info *repool_info;
	if (pool_size == 0)
		return 0;

	ret = hmm_reserved_pool_setup(&repool_info, pool_size);
	if (ret) {
		dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
		return ret;
	}

	pgnr = pool_size;

	i = 0;
	order = MAX_ORDER;

	while (pgnr) {
		blk_pgnr = 1U << order;
		while (blk_pgnr > pgnr) {
			order--;
			blk_pgnr >>= 1U;
		}
		BUG_ON(order > MAX_ORDER);

		pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
		if (unlikely(!pages)) {
			if (order == 0) {
				fail_number++;
				dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
						__func__, fail_number);
				/* if fail five times, will goto end */

				/* FIXME: whether is the mechanism is ok? */
				if (fail_number == ALLOC_PAGE_FAIL_NUM)
					goto end;
			} else {
				order--;
			}
		} else {
			blk_pgnr = 1U << order;

			ret = set_pages_uc(pages, blk_pgnr);
			if (ret) {
				dev_err(atomisp_dev,
						"set pages uncached failed\n");
				__free_pages(pages, order);
				goto end;
			}

			for (j = 0; j < blk_pgnr; j++)
				repool_info->pages[i++] = pages + j;

			repool_info->index += blk_pgnr;
			repool_info->pgnr += blk_pgnr;

			pgnr -= blk_pgnr;

			fail_number = 0;
		}
	}

end:
	repool_info->initialized = true;

	*pool = repool_info;

	dev_info(atomisp_dev,
			"hmm_reserved_pool init successfully,"
			"hmm_reserved_pool is with %d pages.\n",
			repool_info->pgnr);
	return 0;
}
static int hmm_reserved_pool_init(void **priv_data, unsigned int pool_size)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr, i;
	int fail_number = 0;
	struct page *pages;
	int j;
	struct hmm_reserved_pool_info *repool_info;
	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;

	if (pool_size == 0)
		return -EINVAL;

	ret = hmm_reserved_pool_setup(&repool_info, pool_size);
	if (ret) {
		v4l2_err(&atomisp_dev,
			    "hmm_reserved_pool_setup failed.\n");
		return ret;
	}

	pgnr = pool_size;

	i = 0;
	order = 0;

	while (pgnr) {
		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			fail_number++;
			v4l2_err(&atomisp_dev,
				 "%s: cannot allocate pages, fail number is %d times.\n",
				 __func__, fail_number);
			/* if fail five times, will goto end */

			/* FIXME: whether is the mechanism is ok? */
			if (fail_number == ALLOC_PAGE_FAIL_NUM)
				goto end;
		} else {
			blk_pgnr = 1U << order;

			/*
			 * set memory to uncacheable -- UC_MINUS
			 */
			ret = set_pages_uc(pages, blk_pgnr);
			if (ret) {
				v4l2_err(&atomisp_dev,
					     "set page uncacheable"
						"failed.\n");
				__free_pages(pages, order);
				goto end;
			}

			for (j = 0; j < blk_pgnr; j++)
				repool_info->pages[i++] = pages + j;

			repool_info->index += blk_pgnr;
			repool_info->pgnr += blk_pgnr;

			pgnr -= blk_pgnr;

			fail_number = 0;
		}
	}

end:
	repool_info->flag = HMM_RESERVED_POOL_INITED;

	*priv_data = repool_info;

	v4l2_info(&atomisp_dev,
			"hmm_reserved_pool init successfully,"
			"hmm_reserved_pool is with %d pages.\n",
			repool_info->pgnr);
	return 0;
}