static struct page_info *alloc_largest_available(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned int max_order) { struct page *page; struct page_info *info; int i; bool from_pool; for (i = 0; i < num_orders; i++) { if (size < order_to_size(orders[i])) continue; if (max_order < orders[i]) continue; page = alloc_buffer_page(heap, buffer, orders[i], &from_pool); if (!page) continue; info = kmalloc(sizeof(struct page_info), GFP_KERNEL); if (info) { info->page = page; info->order = orders[i]; info->from_pool = from_pool; } return info; } return NULL; }
struct page *kgsl_heap_alloc(unsigned long size) { struct page *page; int i; for (i = 0; i < num_orders; i++) { if (size < order_to_size(orders[i])) continue; page = alloc_buffer_page(orders[i]); if (!page) continue; return page; } return NULL; }
static struct page_info *alloc_largest_available(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned int max_order) { struct page *page; struct page_info *info; int i; struct ion_page_pool *pool; bool from_pool = false; info = kmalloc(sizeof(struct page_info), GFP_KERNEL); if (!info) return NULL; for (i = 0; i < num_orders; i++) { if (size < order_to_size(orders[i])) continue; if (max_order < orders[i]) continue; if (!ion_buffer_cached(buffer)) { pool = heap->pools[order_to_index(orders[i])]; mutex_lock(&pool->mutex); if ((pool->high_count > 0) || (pool->low_count > 0)) from_pool = true; mutex_unlock(&pool->mutex); } page = alloc_buffer_page(heap, buffer, orders[i]); if (!page) continue; info->page = page; info->order = orders[i]; info->from_pool = from_pool; INIT_LIST_HEAD(&info->list); return info; } kfree(info); return NULL; }