static void free_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, struct page *page, unsigned int order) { bool cached = ion_buffer_cached(buffer); bool split_pages = ion_buffer_fault_user_mappings(buffer); int i; if ((buffer->flags & ION_FLAG_FREED_FROM_SHRINKER)) { ion_alloc_dec_usage(ION_TOTAL, 1 << order); if (split_pages) { for (i = 0; i < (1 << order); i++) __free_page(page + i); } else { __free_pages(page, order); } } else { struct ion_page_pool *pool; if (cached) pool = heap->cached_pools[order_to_index(order)]; else pool = heap->uncached_pools[order_to_index(order)]; ion_page_pool_free(pool, page); } }
static struct page *alloc_buffer_page(unsigned long order) { struct kgsl_page_pool *pool = kgsl_heap.pools[order_to_index(order)]; struct page *page; page = kgsl_page_pool_alloc(pool); return page; }
static struct page *alloc_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long order, bool *from_pool) { bool cached = ion_buffer_cached(buffer); bool split_pages = ion_buffer_fault_user_mappings(buffer); struct page *page; struct ion_page_pool *pool; if (!cached) pool = heap->uncached_pools[order_to_index(order)]; else pool = heap->cached_pools[order_to_index(order)]; page = ion_page_pool_alloc(pool, from_pool); if (!page) return 0; if (split_pages) split_page(page, order); return page; }
static void free_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, struct page *page, unsigned int order) { bool cached = ion_buffer_cached(buffer); if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { struct ion_page_pool *pool = heap->pools[order_to_index(order)]; ion_page_pool_free(pool, page); } else { __free_pages(page, order); } }
static struct page_info *alloc_largest_available(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned int max_order) { struct page *page; struct page_info *info; int i; struct ion_page_pool *pool; bool from_pool = false; info = kmalloc(sizeof(struct page_info), GFP_KERNEL); if (!info) return NULL; for (i = 0; i < num_orders; i++) { if (size < order_to_size(orders[i])) continue; if (max_order < orders[i]) continue; if (!ion_buffer_cached(buffer)) { pool = heap->pools[order_to_index(orders[i])]; mutex_lock(&pool->mutex); if ((pool->high_count > 0) || (pool->low_count > 0)) from_pool = true; mutex_unlock(&pool->mutex); } page = alloc_buffer_page(heap, buffer, orders[i]); if (!page) continue; info->page = page; info->order = orders[i]; info->from_pool = from_pool; INIT_LIST_HEAD(&info->list); return info; } kfree(info); return NULL; }
static struct page *alloc_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long order) { bool cached = ion_buffer_cached(buffer); struct ion_page_pool *pool = heap->pools[order_to_index(order)]; struct page *page; if (!cached) { page = ion_page_pool_alloc(pool); } else { gfp_t gfp_flags = low_order_gfp_flags; if (order > 4) gfp_flags = high_order_gfp_flags; page = alloc_pages(gfp_flags, order); } if (!page) return NULL; return page; }
static void free_buffer_page(struct page *page, unsigned int order) { struct kgsl_page_pool *pool; pool = kgsl_heap.pools[order_to_index(order)]; kgsl_page_pool_free(pool, page); }