static void free_pages_to_dynamic_pool(void *pool,
					struct hmm_page_object *page_obj)
{
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;
	struct hmm_dynamic_pool_info *dypool_info = pool;

	if (!dypool_info)
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (!dypool_info->initialized) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

	if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
		return;

	if (dypool_info->pgnr >= dypool_info->pool_size) {
		/* free page directly back to system */
		ret = set_pages_wb(page_obj->page, 1);
		if (ret)
			dev_err(atomisp_dev, "set page to WB err ...\n");
		__free_pages(page_obj->page, 0);

		return;
	}
#ifdef USE_KMEM_CACHE
	hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
						GFP_KERNEL);
#else
	hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
#endif
	if (!hmm_page) {
		dev_err(atomisp_dev, "out of memory for hmm_page.\n");

		/* free page directly */
		ret = set_pages_wb(page_obj->page, 1);
		if (ret)
			dev_err(atomisp_dev, "set page to WB err ...\n");
		__free_pages(page_obj->page, 0);

		return;
	}

	hmm_page->page = page_obj->page;

	/*
	 * add to pages_list of pages_pool
	 */
	spin_lock_irqsave(&dypool_info->list_lock, flags);
	list_add_tail(&hmm_page->list, &dypool_info->pages_list);
	dypool_info->pgnr++;
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);
}
static inline int ttm_tt_set_page_caching(struct page *p,
					  enum ttm_caching_state c_old,
					  enum ttm_caching_state c_new)
{
	int ret = 0;

	if (PageHighMem(p))
		return 0;

	if (c_old != tt_cached) {
		/* p isn't in the default caching state, set it to
		 * writeback first to free its current memtype. */

		ret = set_pages_wb(p, 1);
		if (ret)
			return ret;
	}

	if (c_new == tt_wc)
		ret = set_memory_wc((unsigned long) page_address(p), 1);
	else if (c_new == tt_uncached)
		ret = set_pages_uc(p, 1);

	return ret;
}
示例#3
0
static void __dma_free(struct device *dev, size_t size, void *vaddr,
			       dma_addr_t dma_addr, struct dma_attrs *attrs,
			       bool is_coherent)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = virt_to_page(vaddr);

	if (is_coherent == false)
		set_pages_wb(page, count);

	if (!dma_release_from_contiguous(dev, page, count))
		free_pages((unsigned long)vaddr, get_order(size));
}
static void hmm_dynamic_pool_exit(void **priv_data)
{
	struct hmm_dynamic_pool_info *dypool_info;
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;

	if (*priv_data != NULL)
		dypool_info = *priv_data;
	else
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->flag != HMM_DYNAMIC_POOL_INITED) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	dypool_info->flag &= ~HMM_DYNAMIC_POOL_INITED;

	while (!list_empty(&dypool_info->pages_list)) {
		hmm_page = list_entry(dypool_info->pages_list.next,
					struct hmm_page, list);

		list_del(&hmm_page->list);
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);

		/* can cause thread sleep, so cannot be put into spin_lock */
		ret = set_pages_wb(hmm_page->page, 1);
		if (ret)
			v4l2_err(&atomisp_dev,
				"set page to WB err...\n");
		__free_pages(hmm_page->page, 0);

#ifdef USE_KMEM_CACHE
		kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
		atomisp_kernel_free(hmm_page);
#endif
		spin_lock_irqsave(&dypool_info->list_lock, flags);
	}

	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

#ifdef USE_KMEM_CACHE
	kmem_cache_destroy(dypool_info->pgptr_cache);
#endif

	atomisp_kernel_free(dypool_info);

	*priv_data = NULL;
}
static void free_private_bo_pages(struct hmm_buffer_object *bo,
				  struct hmm_pool *dypool,
				  struct hmm_pool *repool, int free_pgnr)
{
	int i, ret;

	for (i = 0; i < free_pgnr; i++) {
		switch (bo->page_obj[i].type) {
		case HMM_PAGE_TYPE_RESERVED:
			if (repool->pops
			    && repool->pops->pool_free_pages) {
				repool->pops->pool_free_pages(repool->pool_info,
							&bo->page_obj[i]);
				hmm_mem_stat.res_cnt--;
			}
			break;
		/*
		 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
		 * memory, so when free them, they should be put into dynamic
		 * pool.
		 */
		case HMM_PAGE_TYPE_DYNAMIC:
		case HMM_PAGE_TYPE_GENERAL:
			if (dypool->pops
			    && dypool->pops->pool_inited
			    && dypool->pops->pool_inited(dypool->pool_info)) {
				if (dypool->pops->pool_free_pages)
					dypool->pops->pool_free_pages(
							      dypool->pool_info,
							      &bo->page_obj[i]);
				break;
			}

			/*
			 * if dynamic memory pool doesn't exist, need to free
			 * pages to system directly.
			 */
		default:
			ret = set_pages_wb(bo->page_obj[i].page, 1);
			if (ret)
				dev_err(atomisp_dev,
						"set page to WB err ...\n");
			__free_pages(bo->page_obj[i].page, 0);
			hmm_mem_stat.sys_size--;
			break;
		}
	}

	return;
}
static void free_pages_to_dynamic_pool(void *priv_data,
					struct hmm_page_object *page_obj)
{
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;
	struct hmm_dynamic_pool_info *dypool_info;

	if (priv_data != NULL)
		dypool_info = priv_data;
	else
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->flag != HMM_DYNAMIC_POOL_INITED) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

	if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
		return;
#ifdef USE_KMEM_CACHE
	hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
						GFP_KERNEL);
#else
	hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
#endif
	if (!hmm_page) {
		v4l2_err(&atomisp_dev, "out of memory for hmm_page.\n");

		/* free page directly */
		ret = set_pages_wb(page_obj->page, 1);
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err ...\n");
		__free_pages(page_obj->page, 0);

		return;
	}

	hmm_page->page = page_obj->page;

	/*
	 * add to pages_list of pages_pool
	 */
	spin_lock_irqsave(&dypool_info->list_lock, flags);
	list_add_tail(&hmm_page->list, &dypool_info->pages_list);
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);
}
示例#7
0
static inline int ttm_tt_set_page_caching(struct page *p,
					  enum ttm_caching_state c_state)
{
	if (PageHighMem(p))
		return 0;

	switch (c_state) {
	case tt_cached:
		return set_pages_wb(p, 1);
	case tt_wc:
	    return set_memory_wc((unsigned long) page_address(p), 1);
	default:
		return set_pages_uc(p, 1);
	}
}
static void hmm_dynamic_pool_exit(void **pool)
{
	struct hmm_dynamic_pool_info *dypool_info = *pool;
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;

	if (!dypool_info)
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (!dypool_info->initialized) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	dypool_info->initialized = false;

	while (!list_empty(&dypool_info->pages_list)) {
		hmm_page = list_entry(dypool_info->pages_list.next,
					struct hmm_page, list);

		list_del(&hmm_page->list);
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);

		/* can cause thread sleep, so cannot be put into spin_lock */
		ret = set_pages_wb(hmm_page->page, 1);
		if (ret)
			dev_err(atomisp_dev, "set page to WB err...\n");
		__free_pages(hmm_page->page, 0);

#ifdef USE_KMEM_CACHE
		kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
#else
		atomisp_kernel_free(hmm_page);
#endif
		spin_lock_irqsave(&dypool_info->list_lock, flags);
	}

	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

#ifdef USE_KMEM_CACHE
	kmem_cache_destroy(dypool_info->pgptr_cache);
#endif

	atomisp_kernel_free(dypool_info);

	*pool = NULL;
}
示例#9
0
static void hmm_reserved_pool_exit(void **pool)
{
	unsigned long flags;
	int i, ret;
	unsigned int pgnr;
	struct hmm_reserved_pool_info *repool_info = *pool;

	if (!repool_info)
		return;

	spin_lock_irqsave(&repool_info->list_lock, flags);
	if (!repool_info->initialized) {
		spin_unlock_irqrestore(&repool_info->list_lock, flags);
		return;
	}
	pgnr = repool_info->pgnr;
	repool_info->index = 0;
	repool_info->pgnr = 0;
	repool_info->initialized = false;
	spin_unlock_irqrestore(&repool_info->list_lock, flags);

	for (i = 0; i < pgnr; i++) {
		ret = set_pages_wb(repool_info->pages[i], 1);
		if (ret)
			dev_err(atomisp_dev,
				"set page to WB err...ret=%d\n", ret);
		/*
		W/A: set_pages_wb seldom return value = -EFAULT
		indicate that address of page is not in valid
		range(0xffff880000000000~0xffffc7ffffffffff)
		then, _free_pages would panic; Do not know why
		page address be valid, it maybe memory corruption by lowmemory
		*/
		if (!ret)
			__free_pages(repool_info->pages[i], 0);
	}

	kfree(repool_info->pages);
	kfree(repool_info);

	*pool = NULL;
}
示例#10
0
static void free_private_pages(struct hmm_buffer_object *bo)
{
	struct page_block *pgblk;
	int ret;

	while (!list_empty(&bo->pgblocks)) {
		pgblk = list_first_entry(&bo->pgblocks,
					 struct page_block, list);

		list_del(&pgblk->list);

		ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order));
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err...\n");

		__free_pages(pgblk->pages, pgblk->order);
		kfree(pgblk);
	}

	atomisp_kernel_free(bo->pages);
}
static void hmm_reserved_pool_exit(void **priv_data)
{
	struct hmm_reserved_pool_info *repool_info;
	unsigned long flags;
	int i, ret;
	unsigned int pgnr;

	if (*priv_data != NULL)
		repool_info = *priv_data;
	else
		return;

	spin_lock_irqsave(&repool_info->list_lock, flags);
	if (repool_info->flag != HMM_RESERVED_POOL_INITED) {
		spin_unlock_irqrestore(&repool_info->list_lock, flags);
		return;
	}
	pgnr = repool_info->pgnr;
	repool_info->index = 0;
	repool_info->pgnr = 0;
	repool_info->flag &= ~HMM_RESERVED_POOL_INITED;
	spin_unlock_irqrestore(&repool_info->list_lock, flags);

	for (i = 0; i < pgnr; i++) {
		ret = set_pages_wb(repool_info->pages[i], 1);
		if (ret)
			v4l2_err(&atomisp_dev,
				"set page to WB err...\n");
		__free_pages(repool_info->pages[i], 0);
	}

	atomisp_kernel_free(repool_info->pages);
	atomisp_kernel_free(repool_info);

	*priv_data = NULL;
}
示例#12
0
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem,
				bool cached)
{
	int ret;
	unsigned int pgnr, order, blk_pgnr;
	struct page *pages;
	struct page_block *pgblk;
	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
	int i, j;
	int failure_number = 0;
	bool reduce_order = false;
	bool lack_mem = true;

	if (from_highmem)
		gfp |= __GFP_HIGHMEM;

	pgnr = bo->pgnr;

	bo->pages = atomisp_kernel_malloc(sizeof(struct page *) * pgnr);
	if (unlikely(!bo->pages)) {
		v4l2_err(&atomisp_dev, "out of memory for bo->pages\n");
		return -ENOMEM;
	}

	i = 0;
	while (pgnr) {
		order = nr_to_order_bottom(pgnr);
		/*
		 * if be short of memory, we will set order to 0
		 * everytime.
		 */
		if (lack_mem)
			order = HMM_MIN_ORDER;
		else if (order > HMM_MAX_ORDER)
			order = HMM_MAX_ORDER;
retry:
		/*
		 * When order > HMM_MIN_ORDER, for performance reasons we don't
		 * want alloc_pages() to sleep. In case it fails and fallbacks
		 * to HMM_MIN_ORDER or in case the requested order is originally
		 * the minimum value, we can allow alloc_pages() to sleep for
		 * robustness purpose.
		 *
		 * REVISIT: why __GFP_FS is necessary?
		 */
		if (order == HMM_MIN_ORDER) {
			gfp &= ~GFP_NOWAIT;
			gfp |= __GFP_WAIT | __GFP_FS;
		}

		pages = alloc_pages(gfp, order);
		if (unlikely(!pages)) {
			/*
			 * in low memory case, if allocation page fails,
			 * we turn to try if order=0 allocation could
			 * succeed. if order=0 fails too, that means there is
			 * no memory left.
			 */
			if (order == HMM_MIN_ORDER) {
				v4l2_err(&atomisp_dev,
					 "%s: cannot allocate pages\n",
					 __func__);
				goto cleanup;
			}
			order = HMM_MIN_ORDER;
			failure_number++;
			reduce_order = true;
			/*
			 * if fail two times continuously, we think be short
			 * of memory now.
			 */
			if (failure_number == 2) {
				lack_mem = true;
				failure_number = 0;
			}
			goto retry;
		} else {
			blk_pgnr = order_to_nr(order);

			pgblk = kzalloc(sizeof(*pgblk), GFP_KERNEL);
			if (unlikely(!pgblk)) {
				v4l2_err(&atomisp_dev,
						"out of memory for pgblk\n");
				goto out_of_mem;
			}

			INIT_LIST_HEAD(&pgblk->list);
			pgblk->pages = pages;
			pgblk->order = order;

			list_add_tail(&pgblk->list, &bo->pgblocks);

			for (j = 0; j < blk_pgnr; j++)
				bo->pages[i++] = pages + j;

			pgnr -= blk_pgnr;

			if (!cached) {
				/*
				 * set memory to uncacheable -- UC_MINUS
				 */
				ret = set_pages_uc(pages, blk_pgnr);
				if (ret) {
					v4l2_err(&atomisp_dev,
						     "set page uncacheable"
							"failed.\n");
					goto cleanup;
				}
			}
			/*
			 * if order is not reduced this time, clear
			 * failure_number.
			 */
			if (reduce_order)
				reduce_order = false;
			else
				failure_number = 0;
		}
	}

	return 0;
out_of_mem:
	__free_pages(pages, order);
cleanup:
	while (!list_empty(&bo->pgblocks)) {
		pgblk = list_first_entry(&bo->pgblocks,
					 struct page_block, list);

		list_del(&pgblk->list);

		ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order));
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err...\n");

		__free_pages(pgblk->pages, pgblk->order);
		kfree(pgblk);
	}
	atomisp_kernel_free(bo->pages);

	return -ENOMEM;
}