static void free_private_pages(struct hmm_buffer_object *bo) { struct page_block *pgblk; int ret; while (!list_empty(&bo->pgblocks)) { pgblk = list_first_entry(&bo->pgblocks, struct page_block, list); list_del(&pgblk->list); ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order)); if (ret) v4l2_err(&atomisp_dev, "set page to WB err...\n"); __free_pages(pgblk->pages, pgblk->order); kfree(pgblk); } atomisp_kernel_free(bo->pages); }
/*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem, bool cached, struct hmm_pool *dypool, struct hmm_pool *repool) { int ret; unsigned int pgnr, order, blk_pgnr, alloc_pgnr; struct page *pages; gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ int i, j; int failure_number = 0; bool reduce_order = false; bool lack_mem = true; if (from_highmem) gfp |= __GFP_HIGHMEM; pgnr = bo->pgnr; bo->page_obj = atomisp_kernel_malloc( sizeof(struct hmm_page_object) * pgnr); if (unlikely(!bo->page_obj)) { dev_err(atomisp_dev, "out of memory for bo->page_obj\n"); return -ENOMEM; } i = 0; alloc_pgnr = 0; /* * get physical pages from dynamic pages pool. */ if (dypool->pops && dypool->pops->pool_alloc_pages) { alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info, bo->page_obj, pgnr, cached); hmm_mem_stat.dyc_size -= alloc_pgnr; if (alloc_pgnr == pgnr) return 0; } pgnr -= alloc_pgnr; i += alloc_pgnr; /* * get physical pages from reserved pages pool for atomisp. */ if (repool->pops && repool->pops->pool_alloc_pages) { alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info, &bo->page_obj[i], pgnr, cached); hmm_mem_stat.res_cnt += alloc_pgnr; if (alloc_pgnr == pgnr) return 0; } pgnr -= alloc_pgnr; i += alloc_pgnr; while (pgnr) { order = nr_to_order_bottom(pgnr); /* * if be short of memory, we will set order to 0 * everytime. */ if (lack_mem) order = HMM_MIN_ORDER; else if (order > HMM_MAX_ORDER) order = HMM_MAX_ORDER; retry: /* * When order > HMM_MIN_ORDER, for performance reasons we don't * want alloc_pages() to sleep. In case it fails and fallbacks * to HMM_MIN_ORDER or in case the requested order is originally * the minimum value, we can allow alloc_pages() to sleep for * robustness purpose. * * REVISIT: why __GFP_FS is necessary? */ if (order == HMM_MIN_ORDER) { gfp &= ~GFP_NOWAIT; gfp |= __GFP_WAIT | __GFP_FS; } pages = alloc_pages(gfp, order); if (unlikely(!pages)) { /* * in low memory case, if allocation page fails, * we turn to try if order=0 allocation could * succeed. if order=0 fails too, that means there is * no memory left. */ if (order == HMM_MIN_ORDER) { dev_err(atomisp_dev, "%s: cannot allocate pages\n", __func__); goto cleanup; } order = HMM_MIN_ORDER; failure_number++; reduce_order = true; /* * if fail two times continuously, we think be short * of memory now. */ if (failure_number == 2) { lack_mem = true; failure_number = 0; } goto retry; } else { blk_pgnr = order_to_nr(order); if (!cached) { /* * set memory to uncacheable -- UC_MINUS */ ret = set_pages_uc(pages, blk_pgnr); if (ret) { dev_err(atomisp_dev, "set page uncacheable" "failed.\n"); __free_pages(pages, order); goto cleanup; } } for (j = 0; j < blk_pgnr; j++) { bo->page_obj[i].page = pages + j; bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL; } pgnr -= blk_pgnr; hmm_mem_stat.sys_size += blk_pgnr; /* * if order is not reduced this time, clear * failure_number. */ if (reduce_order) reduce_order = false; else failure_number = 0; } } return 0; cleanup: alloc_pgnr = i; free_private_bo_pages(bo, dypool, repool, alloc_pgnr); atomisp_kernel_free(bo->page_obj); return -ENOMEM; }
/*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem, bool cached) { int ret; unsigned int pgnr, order, blk_pgnr; struct page *pages; struct page_block *pgblk; gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ int i, j; int failure_number = 0; bool reduce_order = false; bool lack_mem = true; if (from_highmem) gfp |= __GFP_HIGHMEM; pgnr = bo->pgnr; bo->pages = atomisp_kernel_malloc(sizeof(struct page *) * pgnr); if (unlikely(!bo->pages)) { v4l2_err(&atomisp_dev, "out of memory for bo->pages\n"); return -ENOMEM; } i = 0; while (pgnr) { order = nr_to_order_bottom(pgnr); /* * if be short of memory, we will set order to 0 * everytime. */ if (lack_mem) order = HMM_MIN_ORDER; else if (order > HMM_MAX_ORDER) order = HMM_MAX_ORDER; retry: /* * When order > HMM_MIN_ORDER, for performance reasons we don't * want alloc_pages() to sleep. In case it fails and fallbacks * to HMM_MIN_ORDER or in case the requested order is originally * the minimum value, we can allow alloc_pages() to sleep for * robustness purpose. * * REVISIT: why __GFP_FS is necessary? */ if (order == HMM_MIN_ORDER) { gfp &= ~GFP_NOWAIT; gfp |= __GFP_WAIT | __GFP_FS; } pages = alloc_pages(gfp, order); if (unlikely(!pages)) { /* * in low memory case, if allocation page fails, * we turn to try if order=0 allocation could * succeed. if order=0 fails too, that means there is * no memory left. */ if (order == HMM_MIN_ORDER) { v4l2_err(&atomisp_dev, "%s: cannot allocate pages\n", __func__); goto cleanup; } order = HMM_MIN_ORDER; failure_number++; reduce_order = true; /* * if fail two times continuously, we think be short * of memory now. */ if (failure_number == 2) { lack_mem = true; failure_number = 0; } goto retry; } else { blk_pgnr = order_to_nr(order); pgblk = kzalloc(sizeof(*pgblk), GFP_KERNEL); if (unlikely(!pgblk)) { v4l2_err(&atomisp_dev, "out of memory for pgblk\n"); goto out_of_mem; } INIT_LIST_HEAD(&pgblk->list); pgblk->pages = pages; pgblk->order = order; list_add_tail(&pgblk->list, &bo->pgblocks); for (j = 0; j < blk_pgnr; j++) bo->pages[i++] = pages + j; pgnr -= blk_pgnr; if (!cached) { /* * set memory to uncacheable -- UC_MINUS */ ret = set_pages_uc(pages, blk_pgnr); if (ret) { v4l2_err(&atomisp_dev, "set page uncacheable" "failed.\n"); goto cleanup; } } /* * if order is not reduced this time, clear * failure_number. */ if (reduce_order) reduce_order = false; else failure_number = 0; } } return 0; out_of_mem: __free_pages(pages, order); cleanup: while (!list_empty(&bo->pgblocks)) { pgblk = list_first_entry(&bo->pgblocks, struct page_block, list); list_del(&pgblk->list); ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order)); if (ret) v4l2_err(&atomisp_dev, "set page to WB err...\n"); __free_pages(pgblk->pages, pgblk->order); kfree(pgblk); } atomisp_kernel_free(bo->pages); return -ENOMEM; }