static void hmm_dynamic_pool_exit(void **priv_data)
{
	struct hmm_dynamic_pool_info *dypool_info;
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;

	if (*priv_data != NULL)
		dypool_info = *priv_data;
	else
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->flag != HMM_DYNAMIC_POOL_INITED) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	dypool_info->flag &= ~HMM_DYNAMIC_POOL_INITED;

	while (!list_empty(&dypool_info->pages_list)) {
		hmm_page = list_entry(dypool_info->pages_list.next,
					struct hmm_page, list);

		list_del(&hmm_page->list);
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);

		/* can cause thread sleep, so cannot be put into spin_lock */
		ret = set_pages_wb(hmm_page->page, 1);
		if (ret)
			v4l2_err(&atomisp_dev,
				"set page to WB err...\n");
		__free_pages(hmm_page->page, 0);

#ifdef USE_KMEM_CACHE
		kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
		atomisp_kernel_free(hmm_page);
#endif
		spin_lock_irqsave(&dypool_info->list_lock, flags);
	}

	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

#ifdef USE_KMEM_CACHE
	kmem_cache_destroy(dypool_info->pgptr_cache);
#endif

	atomisp_kernel_free(dypool_info);

	*priv_data = NULL;
}
static void hmm_dynamic_pool_exit(void **pool)
{
	struct hmm_dynamic_pool_info *dypool_info = *pool;
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;

	if (!dypool_info)
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (!dypool_info->initialized) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	dypool_info->initialized = false;

	while (!list_empty(&dypool_info->pages_list)) {
		hmm_page = list_entry(dypool_info->pages_list.next,
					struct hmm_page, list);

		list_del(&hmm_page->list);
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);

		/* can cause thread sleep, so cannot be put into spin_lock */
		ret = set_pages_wb(hmm_page->page, 1);
		if (ret)
			dev_err(atomisp_dev, "set page to WB err...\n");
		__free_pages(hmm_page->page, 0);

#ifdef USE_KMEM_CACHE
		kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
		atomisp_kernel_free(hmm_page);
#endif
		spin_lock_irqsave(&dypool_info->list_lock, flags);
	}

	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

#ifdef USE_KMEM_CACHE
	kmem_cache_destroy(dypool_info->pgptr_cache);
#endif

	atomisp_kernel_free(dypool_info);

	*pool = NULL;
}
static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
{
	struct hmm_dynamic_pool_info *dypool_info;

	if (pool_size == 0)
		return 0;

	dypool_info = atomisp_kernel_malloc(
					sizeof(struct hmm_dynamic_pool_info));
	if (unlikely(!dypool_info)) {
		dev_err(atomisp_dev, "out of memory for repool_info.\n");
		return -ENOMEM;
	}

#ifdef USE_KMEM_CACHE
	dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
						sizeof(struct hmm_page), 0,
						SLAB_HWCACHE_ALIGN, NULL);
	if (!dypool_info->pgptr_cache) {
		atomisp_kernel_free(dypool_info);
		return -ENOMEM;
	}
#endif

	INIT_LIST_HEAD(&dypool_info->pages_list);
	spin_lock_init(&dypool_info->list_lock);
	dypool_info->initialized = true;
	dypool_info->pool_size = pool_size;
	dypool_info->pgnr = 0;

	*pool = dypool_info;

	return 0;
}
static int hmm_dynamic_pool_init(void **priv_data, unsigned int pool_size)
{
	struct hmm_dynamic_pool_info *dypool_info;

	if (pool_size == 0)
		return -EINVAL;

	dypool_info = atomisp_kernel_malloc(
					sizeof(struct hmm_dynamic_pool_info));
	if (unlikely(!dypool_info)) {
		v4l2_err(&atomisp_dev,
			"out of memory for repool_info.\n");
		return -ENOMEM;
	}

#ifdef USE_KMEM_CACHE
	dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
						sizeof(struct hmm_page), 0,
						SLAB_HWCACHE_ALIGN, NULL);
	if (!dypool_info->pgptr_cache) {
		atomisp_kernel_free(dypool_info);
		return -ENOMEM;
	}
#endif

	INIT_LIST_HEAD(&dypool_info->pages_list);
	spin_lock_init(&dypool_info->list_lock);
	dypool_info->flag = HMM_DYNAMIC_POOL_INITED;

	*priv_data = dypool_info;

	return 0;
}
static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
					unsigned int pool_size)
{
	struct hmm_reserved_pool_info *pool_info;

	pool_info = atomisp_kernel_malloc(
					sizeof(struct hmm_reserved_pool_info));
	if (unlikely(!pool_info)) {
		v4l2_err(&atomisp_dev,
			"out of memory for repool_info.\n");
		return -ENOMEM;
	}

	pool_info->pages = atomisp_kernel_malloc(
					sizeof(struct page *) * pool_size);
	if (unlikely(!pool_info->pages)) {
		v4l2_err(&atomisp_dev,
			"out of memory for repool_info->pages.\n");
		atomisp_kernel_free(pool_info);
		return -ENOMEM;
	}

	pool_info->index = 0;
	pool_info->pgnr = 0;
	spin_lock_init(&pool_info->list_lock);
	pool_info->flag = HMM_RESERVED_POOL_INITED;

	*repool_info = pool_info;

	return 0;
}
static void free_private_pages(struct hmm_buffer_object *bo,
				struct hmm_pool *dypool,
				struct hmm_pool *repool)
{
	free_private_bo_pages(bo, dypool, repool, bo->pgnr);

	atomisp_kernel_free(bo->page_obj);
}
static void hmm_reserved_pool_exit(void **priv_data)
{
	struct hmm_reserved_pool_info *repool_info;
	unsigned long flags;
	int i, ret;
	unsigned int pgnr;

	if (*priv_data != NULL)
		repool_info = *priv_data;
	else
		return;

	spin_lock_irqsave(&repool_info->list_lock, flags);
	if (repool_info->flag != HMM_RESERVED_POOL_INITED) {
		spin_unlock_irqrestore(&repool_info->list_lock, flags);
		return;
	}
	pgnr = repool_info->pgnr;
	repool_info->index = 0;
	repool_info->pgnr = 0;
	repool_info->flag &= ~HMM_RESERVED_POOL_INITED;
	spin_unlock_irqrestore(&repool_info->list_lock, flags);

	for (i = 0; i < pgnr; i++) {
		ret = set_pages_wb(repool_info->pages[i], 1);
		if (ret)
			v4l2_err(&atomisp_dev,
				"set page to WB err...\n");
		__free_pages(repool_info->pages[i], 0);
	}

	atomisp_kernel_free(repool_info->pages);
	atomisp_kernel_free(repool_info);

	*priv_data = NULL;
}
/*
 * dynamic memory pool ops.
 */
static unsigned int get_pages_from_dynamic_pool(void *priv_data,
					struct hmm_page_object *page_obj,
					unsigned int size)
{
	struct hmm_page *hmm_page;
	unsigned long flags;
	unsigned int i = 0;
	struct hmm_dynamic_pool_info *dypool_info;

	if (priv_data != NULL)
		dypool_info = priv_data;
	else
		return i;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->flag == HMM_DYNAMIC_POOL_INITED) {
		while (!list_empty(&dypool_info->pages_list)) {
			hmm_page = list_entry(dypool_info->pages_list.next,
						struct hmm_page, list);

			list_del(&hmm_page->list);
			spin_unlock_irqrestore(&dypool_info->list_lock, flags);

			page_obj[i].page = hmm_page->page;
			page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
#ifdef USE_KMEM_CACHE
			kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
			atomisp_kernel_free(hmm_page);
#endif

			if (i == size)
				return i;

			spin_lock_irqsave(&dypool_info->list_lock, flags);
		}
	}
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

	return i;
}
/*
 * dynamic memory pool ops.
 */
static unsigned int get_pages_from_dynamic_pool(void *pool,
					struct hmm_page_object *page_obj,
					unsigned int size, bool cached)
{
	struct hmm_page *hmm_page;
	unsigned long flags;
	unsigned int i = 0;
	struct hmm_dynamic_pool_info *dypool_info = pool;

	if (!dypool_info)
		return 0;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->initialized) {
		while (!list_empty(&dypool_info->pages_list)) {
			hmm_page = list_entry(dypool_info->pages_list.next,
						struct hmm_page, list);

			list_del(&hmm_page->list);
			dypool_info->pgnr--;
			spin_unlock_irqrestore(&dypool_info->list_lock, flags);

			page_obj[i].page = hmm_page->page;
			page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
#ifdef USE_KMEM_CACHE
			kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
			atomisp_kernel_free(hmm_page);
#endif

			if (i == size)
				return i;

			spin_lock_irqsave(&dypool_info->list_lock, flags);
		}
	}
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

	return i;
}
Esempio n. 10
0
static void free_private_pages(struct hmm_buffer_object *bo)
{
	struct page_block *pgblk;
	int ret;

	while (!list_empty(&bo->pgblocks)) {
		pgblk = list_first_entry(&bo->pgblocks,
					 struct page_block, list);

		list_del(&pgblk->list);

		ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order));
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err...\n");

		__free_pages(pgblk->pages, pgblk->order);
		kfree(pgblk);
	}

	atomisp_kernel_free(bo->pages);
}
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
				bool cached, struct hmm_pool *dypool,
				struct hmm_pool *repool)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
	struct page *pages;
	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
	int i, j;
	int failure_number = 0;
	bool reduce_order = false;
	bool lack_mem = true;

	if (from_highmem)
		gfp |= __GFP_HIGHMEM;

	pgnr = bo->pgnr;

	bo->page_obj = atomisp_kernel_malloc(
				sizeof(struct hmm_page_object) * pgnr);
	if (unlikely(!bo->page_obj)) {
		dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
		return -ENOMEM;
	}

	i = 0;
	alloc_pgnr = 0;

	/*
	 * get physical pages from dynamic pages pool.
	 */
	if (dypool->pops && dypool->pops->pool_alloc_pages) {
		alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
							bo->page_obj, pgnr,
							cached);
		hmm_mem_stat.dyc_size -= alloc_pgnr;

		if (alloc_pgnr == pgnr)
			return 0;
	}

	pgnr -= alloc_pgnr;
	i += alloc_pgnr;

	/*
	 * get physical pages from reserved pages pool for atomisp.
	 */
	if (repool->pops && repool->pops->pool_alloc_pages) {
		alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
							&bo->page_obj[i], pgnr,
							cached);
		hmm_mem_stat.res_cnt += alloc_pgnr;
		if (alloc_pgnr == pgnr)
			return 0;
	}

	pgnr -= alloc_pgnr;
	i += alloc_pgnr;

	while (pgnr) {
		order = nr_to_order_bottom(pgnr);
		/*
		 * if be short of memory, we will set order to 0
		 * everytime.
		 */
		if (lack_mem)
			order = HMM_MIN_ORDER;
		else if (order > HMM_MAX_ORDER)
			order = HMM_MAX_ORDER;
retry:
		/*
		 * When order > HMM_MIN_ORDER, for performance reasons we don't
		 * want alloc_pages() to sleep. In case it fails and fallbacks
		 * to HMM_MIN_ORDER or in case the requested order is originally
		 * the minimum value, we can allow alloc_pages() to sleep for
		 * robustness purpose.
		 *
		 * REVISIT: why __GFP_FS is necessary?
		 */
		if (order == HMM_MIN_ORDER) {
			gfp &= ~GFP_NOWAIT;
			gfp |= __GFP_WAIT | __GFP_FS;
		}

		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			/*
			 * in low memory case, if allocation page fails,
			 * we turn to try if order=0 allocation could
			 * succeed. if order=0 fails too, that means there is
			 * no memory left.
			 */
			if (order == HMM_MIN_ORDER) {
				dev_err(atomisp_dev,
					"%s: cannot allocate pages\n",
					 __func__);
				goto cleanup;
			}
			order = HMM_MIN_ORDER;
			failure_number++;
			reduce_order = true;
			/*
			 * if fail two times continuously, we think be short
			 * of memory now.
			 */
			if (failure_number == 2) {
				lack_mem = true;
				failure_number = 0;
			}
			goto retry;
		} else {
			blk_pgnr = order_to_nr(order);

			if (!cached) {
				/*
				 * set memory to uncacheable -- UC_MINUS
				 */
				ret = set_pages_uc(pages, blk_pgnr);
				if (ret) {
					dev_err(atomisp_dev,
						     "set page uncacheable"
							"failed.\n");

					__free_pages(pages, order);

					goto cleanup;
				}
			}

			for (j = 0; j < blk_pgnr; j++) {
				bo->page_obj[i].page = pages + j;
				bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
			}

			pgnr -= blk_pgnr;
			hmm_mem_stat.sys_size += blk_pgnr;

			/*
			 * if order is not reduced this time, clear
			 * failure_number.
			 */
			if (reduce_order)
				reduce_order = false;
			else
				failure_number = 0;
		}
	}

	return 0;
cleanup:
	alloc_pgnr = i;
	free_private_bo_pages(bo, dypool, repool, alloc_pgnr);

	atomisp_kernel_free(bo->page_obj);

	return -ENOMEM;
}
Esempio n. 12
0
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
				bool cached)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr;
	struct page *pages;
	struct page_block *pgblk;
	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
	int i, j;
	int failure_number = 0;
	bool reduce_order = false;
	bool lack_mem = true;

	if (from_highmem)
		gfp |= __GFP_HIGHMEM;

	pgnr = bo->pgnr;

	bo->pages = atomisp_kernel_malloc(sizeof(struct page *) * pgnr);
	if (unlikely(!bo->pages)) {
		v4l2_err(&atomisp_dev, "out of memory for bo->pages\n");
		return -ENOMEM;
	}

	i = 0;
	while (pgnr) {
		order = nr_to_order_bottom(pgnr);
		/*
		 * if be short of memory, we will set order to 0
		 * everytime.
		 */
		if (lack_mem)
			order = HMM_MIN_ORDER;
		else if (order > HMM_MAX_ORDER)
			order = HMM_MAX_ORDER;
retry:
		/*
		 * When order > HMM_MIN_ORDER, for performance reasons we don't
		 * want alloc_pages() to sleep. In case it fails and fallbacks
		 * to HMM_MIN_ORDER or in case the requested order is originally
		 * the minimum value, we can allow alloc_pages() to sleep for
		 * robustness purpose.
		 *
		 * REVISIT: why __GFP_FS is necessary?
		 */
		if (order == HMM_MIN_ORDER) {
			gfp &= ~GFP_NOWAIT;
			gfp |= __GFP_WAIT | __GFP_FS;
		}

		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			/*
			 * in low memory case, if allocation page fails,
			 * we turn to try if order=0 allocation could
			 * succeed. if order=0 fails too, that means there is
			 * no memory left.
			 */
			if (order == HMM_MIN_ORDER) {
				v4l2_err(&atomisp_dev,
					 "%s: cannot allocate pages\n",
					 __func__);
				goto cleanup;
			}
			order = HMM_MIN_ORDER;
			failure_number++;
			reduce_order = true;
			/*
			 * if fail two times continuously, we think be short
			 * of memory now.
			 */
			if (failure_number == 2) {
				lack_mem = true;
				failure_number = 0;
			}
			goto retry;
		} else {
			blk_pgnr = order_to_nr(order);

			pgblk = kzalloc(sizeof(*pgblk), GFP_KERNEL);
			if (unlikely(!pgblk)) {
				v4l2_err(&atomisp_dev,
						"out of memory for pgblk\n");
				goto out_of_mem;
			}

			INIT_LIST_HEAD(&pgblk->list);
			pgblk->pages = pages;
			pgblk->order = order;

			list_add_tail(&pgblk->list, &bo->pgblocks);

			for (j = 0; j < blk_pgnr; j++)
				bo->pages[i++] = pages + j;

			pgnr -= blk_pgnr;

			if (!cached) {
				/*
				 * set memory to uncacheable -- UC_MINUS
				 */
				ret = set_pages_uc(pages, blk_pgnr);
				if (ret) {
					v4l2_err(&atomisp_dev,
						     "set page uncacheable"
							"failed.\n");
					goto cleanup;
				}
			}
			/*
			 * if order is not reduced this time, clear
			 * failure_number.
			 */
			if (reduce_order)
				reduce_order = false;
			else
				failure_number = 0;
		}
	}

	return 0;
out_of_mem:
	__free_pages(pages, order);
cleanup:
	while (!list_empty(&bo->pgblocks)) {
		pgblk = list_first_entry(&bo->pgblocks,
					 struct page_block, list);

		list_del(&pgblk->list);

		ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order));
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err...\n");

		__free_pages(pgblk->pages, pgblk->order);
		kfree(pgblk);
	}
	atomisp_kernel_free(bo->pages);

	return -ENOMEM;
}