static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info, unsigned int pool_size) { struct hmm_reserved_pool_info *pool_info; pool_info = atomisp_kernel_malloc( sizeof(struct hmm_reserved_pool_info)); if (unlikely(!pool_info)) { v4l2_err(&atomisp_dev, "out of memory for repool_info.\n"); return -ENOMEM; } pool_info->pages = atomisp_kernel_malloc( sizeof(struct page *) * pool_size); if (unlikely(!pool_info->pages)) { v4l2_err(&atomisp_dev, "out of memory for repool_info->pages.\n"); atomisp_kernel_free(pool_info); return -ENOMEM; } pool_info->index = 0; pool_info->pgnr = 0; spin_lock_init(&pool_info->list_lock); pool_info->flag = HMM_RESERVED_POOL_INITED; *repool_info = pool_info; return 0; }
static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) { struct hmm_dynamic_pool_info *dypool_info; if (pool_size == 0) return 0; dypool_info = atomisp_kernel_malloc( sizeof(struct hmm_dynamic_pool_info)); if (unlikely(!dypool_info)) { dev_err(atomisp_dev, "out of memory for repool_info.\n"); return -ENOMEM; } #ifdef USE_KMEM_CACHE dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache", sizeof(struct hmm_page), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dypool_info->pgptr_cache) { atomisp_kernel_free(dypool_info); return -ENOMEM; } #endif INIT_LIST_HEAD(&dypool_info->pages_list); spin_lock_init(&dypool_info->list_lock); dypool_info->initialized = true; dypool_info->pool_size = pool_size; dypool_info->pgnr = 0; *pool = dypool_info; return 0; }
static int hmm_dynamic_pool_init(void **priv_data, unsigned int pool_size) { struct hmm_dynamic_pool_info *dypool_info; if (pool_size == 0) return -EINVAL; dypool_info = atomisp_kernel_malloc( sizeof(struct hmm_dynamic_pool_info)); if (unlikely(!dypool_info)) { v4l2_err(&atomisp_dev, "out of memory for repool_info.\n"); return -ENOMEM; } #ifdef USE_KMEM_CACHE dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache", sizeof(struct hmm_page), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dypool_info->pgptr_cache) { atomisp_kernel_free(dypool_info); return -ENOMEM; } #endif INIT_LIST_HEAD(&dypool_info->pages_list); spin_lock_init(&dypool_info->list_lock); dypool_info->flag = HMM_DYNAMIC_POOL_INITED; *priv_data = dypool_info; return 0; }
static void free_pages_to_dynamic_pool(void *pool, struct hmm_page_object *page_obj) { struct hmm_page *hmm_page; unsigned long flags; int ret; struct hmm_dynamic_pool_info *dypool_info = pool; if (!dypool_info) return; spin_lock_irqsave(&dypool_info->list_lock, flags); if (!dypool_info->initialized) { spin_unlock_irqrestore(&dypool_info->list_lock, flags); return; } spin_unlock_irqrestore(&dypool_info->list_lock, flags); if (page_obj->type == HMM_PAGE_TYPE_RESERVED) return; if (dypool_info->pgnr >= dypool_info->pool_size) { /* free page directly back to system */ ret = set_pages_wb(page_obj->page, 1); if (ret) dev_err(atomisp_dev, "set page to WB err ...\n"); __free_pages(page_obj->page, 0); return; } #ifdef USE_KMEM_CACHE hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache, GFP_KERNEL); #else hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page)); #endif if (!hmm_page) { dev_err(atomisp_dev, "out of memory for hmm_page.\n"); /* free page directly */ ret = set_pages_wb(page_obj->page, 1); if (ret) dev_err(atomisp_dev, "set page to WB err ...\n"); __free_pages(page_obj->page, 0); return; } hmm_page->page = page_obj->page; /* * add to pages_list of pages_pool */ spin_lock_irqsave(&dypool_info->list_lock, flags); list_add_tail(&hmm_page->list, &dypool_info->pages_list); dypool_info->pgnr++; spin_unlock_irqrestore(&dypool_info->list_lock, flags); }
static void free_pages_to_dynamic_pool(void *priv_data, struct hmm_page_object *page_obj) { struct hmm_page *hmm_page; unsigned long flags; int ret; struct hmm_dynamic_pool_info *dypool_info; if (priv_data != NULL) dypool_info = priv_data; else return; spin_lock_irqsave(&dypool_info->list_lock, flags); if (dypool_info->flag != HMM_DYNAMIC_POOL_INITED) { spin_unlock_irqrestore(&dypool_info->list_lock, flags); return; } spin_unlock_irqrestore(&dypool_info->list_lock, flags); if (page_obj->type == HMM_PAGE_TYPE_RESERVED) return; #ifdef USE_KMEM_CACHE hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache, GFP_KERNEL); #else hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page)); #endif if (!hmm_page) { v4l2_err(&atomisp_dev, "out of memory for hmm_page.\n"); /* free page directly */ ret = set_pages_wb(page_obj->page, 1); if (ret) v4l2_err(&atomisp_dev, "set page to WB err ...\n"); __free_pages(page_obj->page, 0); return; } hmm_page->page = page_obj->page; /* * add to pages_list of pages_pool */ spin_lock_irqsave(&dypool_info->list_lock, flags); list_add_tail(&hmm_page->list, &dypool_info->pages_list); spin_unlock_irqrestore(&dypool_info->list_lock, flags); }
/*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem, bool cached, struct hmm_pool *dypool, struct hmm_pool *repool) { int ret; unsigned int pgnr, order, blk_pgnr, alloc_pgnr; struct page *pages; gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ int i, j; int failure_number = 0; bool reduce_order = false; bool lack_mem = true; if (from_highmem) gfp |= __GFP_HIGHMEM; pgnr = bo->pgnr; bo->page_obj = atomisp_kernel_malloc( sizeof(struct hmm_page_object) * pgnr); if (unlikely(!bo->page_obj)) { dev_err(atomisp_dev, "out of memory for bo->page_obj\n"); return -ENOMEM; } i = 0; alloc_pgnr = 0; /* * get physical pages from dynamic pages pool. */ if (dypool->pops && dypool->pops->pool_alloc_pages) { alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info, bo->page_obj, pgnr, cached); hmm_mem_stat.dyc_size -= alloc_pgnr; if (alloc_pgnr == pgnr) return 0; } pgnr -= alloc_pgnr; i += alloc_pgnr; /* * get physical pages from reserved pages pool for atomisp. */ if (repool->pops && repool->pops->pool_alloc_pages) { alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info, &bo->page_obj[i], pgnr, cached); hmm_mem_stat.res_cnt += alloc_pgnr; if (alloc_pgnr == pgnr) return 0; } pgnr -= alloc_pgnr; i += alloc_pgnr; while (pgnr) { order = nr_to_order_bottom(pgnr); /* * if be short of memory, we will set order to 0 * everytime. */ if (lack_mem) order = HMM_MIN_ORDER; else if (order > HMM_MAX_ORDER) order = HMM_MAX_ORDER; retry: /* * When order > HMM_MIN_ORDER, for performance reasons we don't * want alloc_pages() to sleep. In case it fails and fallbacks * to HMM_MIN_ORDER or in case the requested order is originally * the minimum value, we can allow alloc_pages() to sleep for * robustness purpose. * * REVISIT: why __GFP_FS is necessary? */ if (order == HMM_MIN_ORDER) { gfp &= ~GFP_NOWAIT; gfp |= __GFP_WAIT | __GFP_FS; } pages = alloc_pages(gfp, order); if (unlikely(!pages)) { /* * in low memory case, if allocation page fails, * we turn to try if order=0 allocation could * succeed. if order=0 fails too, that means there is * no memory left. */ if (order == HMM_MIN_ORDER) { dev_err(atomisp_dev, "%s: cannot allocate pages\n", __func__); goto cleanup; } order = HMM_MIN_ORDER; failure_number++; reduce_order = true; /* * if fail two times continuously, we think be short * of memory now. */ if (failure_number == 2) { lack_mem = true; failure_number = 0; } goto retry; } else { blk_pgnr = order_to_nr(order); if (!cached) { /* * set memory to uncacheable -- UC_MINUS */ ret = set_pages_uc(pages, blk_pgnr); if (ret) { dev_err(atomisp_dev, "set page uncacheable" "failed.\n"); __free_pages(pages, order); goto cleanup; } } for (j = 0; j < blk_pgnr; j++) { bo->page_obj[i].page = pages + j; bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL; } pgnr -= blk_pgnr; hmm_mem_stat.sys_size += blk_pgnr; /* * if order is not reduced this time, clear * failure_number. */ if (reduce_order) reduce_order = false; else failure_number = 0; } } return 0; cleanup: alloc_pgnr = i; free_private_bo_pages(bo, dypool, repool, alloc_pgnr); atomisp_kernel_free(bo->page_obj); return -ENOMEM; }
/*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo, int from_highmem, bool cached) { int ret; unsigned int pgnr, order, blk_pgnr; struct page *pages; struct page_block *pgblk; gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ int i, j; int failure_number = 0; bool reduce_order = false; bool lack_mem = true; if (from_highmem) gfp |= __GFP_HIGHMEM; pgnr = bo->pgnr; bo->pages = atomisp_kernel_malloc(sizeof(struct page *) * pgnr); if (unlikely(!bo->pages)) { v4l2_err(&atomisp_dev, "out of memory for bo->pages\n"); return -ENOMEM; } i = 0; while (pgnr) { order = nr_to_order_bottom(pgnr); /* * if be short of memory, we will set order to 0 * everytime. */ if (lack_mem) order = HMM_MIN_ORDER; else if (order > HMM_MAX_ORDER) order = HMM_MAX_ORDER; retry: /* * When order > HMM_MIN_ORDER, for performance reasons we don't * want alloc_pages() to sleep. In case it fails and fallbacks * to HMM_MIN_ORDER or in case the requested order is originally * the minimum value, we can allow alloc_pages() to sleep for * robustness purpose. * * REVISIT: why __GFP_FS is necessary? */ if (order == HMM_MIN_ORDER) { gfp &= ~GFP_NOWAIT; gfp |= __GFP_WAIT | __GFP_FS; } pages = alloc_pages(gfp, order); if (unlikely(!pages)) { /* * in low memory case, if allocation page fails, * we turn to try if order=0 allocation could * succeed. if order=0 fails too, that means there is * no memory left. */ if (order == HMM_MIN_ORDER) { v4l2_err(&atomisp_dev, "%s: cannot allocate pages\n", __func__); goto cleanup; } order = HMM_MIN_ORDER; failure_number++; reduce_order = true; /* * if fail two times continuously, we think be short * of memory now. */ if (failure_number == 2) { lack_mem = true; failure_number = 0; } goto retry; } else { blk_pgnr = order_to_nr(order); pgblk = kzalloc(sizeof(*pgblk), GFP_KERNEL); if (unlikely(!pgblk)) { v4l2_err(&atomisp_dev, "out of memory for pgblk\n"); goto out_of_mem; } INIT_LIST_HEAD(&pgblk->list); pgblk->pages = pages; pgblk->order = order; list_add_tail(&pgblk->list, &bo->pgblocks); for (j = 0; j < blk_pgnr; j++) bo->pages[i++] = pages + j; pgnr -= blk_pgnr; if (!cached) { /* * set memory to uncacheable -- UC_MINUS */ ret = set_pages_uc(pages, blk_pgnr); if (ret) { v4l2_err(&atomisp_dev, "set page uncacheable" "failed.\n"); goto cleanup; } } /* * if order is not reduced this time, clear * failure_number. */ if (reduce_order) reduce_order = false; else failure_number = 0; } } return 0; out_of_mem: __free_pages(pages, order); cleanup: while (!list_empty(&bo->pgblocks)) { pgblk = list_first_entry(&bo->pgblocks, struct page_block, list); list_del(&pgblk->list); ret = set_pages_wb(pgblk->pages, order_to_nr(pgblk->order)); if (ret) v4l2_err(&atomisp_dev, "set page to WB err...\n"); __free_pages(pgblk->pages, pgblk->order); kfree(pgblk); } atomisp_kernel_free(bo->pages); return -ENOMEM; }