void mali_mem_block_release(mali_mem_allocation *descriptor)
{
	block_allocator *info = descriptor->block_mem.mem.info;
	block_info *block, *next;
	block_allocator_allocation *allocation = &descriptor->block_mem.mem;

	MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type);

	block = allocation->last_allocated;

	MALI_DEBUG_ASSERT_POINTER(block);

	/* unmap */
	mali_mem_mali_map_free(descriptor);

	mutex_lock(&info->mutex);

	while (block) {
		MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));

		next = block->next;

		/* relink into free-list */
		block->next = info->first_free;
		info->first_free = block;

		/* advance the loop */
		block = next;

		++info->free_blocks;
	}

	mutex_unlock(&info->mutex);
}
static void mali_mem_ump_unmap(mali_mem_allocation *alloc)
{
	struct mali_session_data *session;
	MALI_DEBUG_ASSERT_POINTER(alloc);
	session = alloc->session;
	MALI_DEBUG_ASSERT_POINTER(session);
	mali_session_memory_lock(session);
	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
			       alloc->flags);

	session->mali_mem_array[alloc->type] -= alloc->psize;
	mali_session_memory_unlock(session);
}
void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
{
    ump_dd_handle ump_mem;
    struct mali_page_directory *pagedir;

    ump_mem = descriptor->ump_mem.handle;
    pagedir = session->page_directory;

    MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);

    mali_mem_mali_map_free(descriptor);

    ump_dd_reference_release(ump_mem);
    return;
}
void mali_mem_external_release(mali_mem_backend *mem_backend)
{
	mali_mem_allocation *alloc;
	struct mali_session_data *session;
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	alloc = mem_backend->mali_allocation;
	MALI_DEBUG_ASSERT_POINTER(alloc);
	MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);

	session = alloc->session;
	MALI_DEBUG_ASSERT_POINTER(session);
	mali_session_memory_lock(session);
	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
			       alloc->flags);
	session->mali_mem_array[mem_backend->type] -= mem_backend->size;
	mali_session_memory_unlock(session);
}
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);

	if (unlikely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}
	/**
	*create mali memory allocation
	*/

	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* MALI_MEM_OS if need to support mem resize,
	 * or MALI_MEM_BLOCK if have dedicated memory,
	 * or MALI_MEM_OS,
	 * or MALI_MEM_SWAP.
	 */
	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
		mali_allocation->type = MALI_MEM_SWAP;
	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
		mali_allocation->type = MALI_MEM_OS;
		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
	if (mali_allocation->backend_handle < 0) {
		ret = _MALI_OSK_ERR_NOMEM;
		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
		goto failed_alloc_backend;
	}


	mem_backend->mali_allocation = mali_allocation;
	mem_backend->type = mali_allocation->type;

	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
	/* do prepare for MALI mapping */
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);

		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			_mali_osk_mutex_signal(session->memory_lock);
			goto failed_prepare_map;
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	if (mali_allocation->psize == 0) {
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
		goto done;
	}

	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
		/* init for defer bind backend*/
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);

		goto done;
	}
	/**
	*allocate physical memory
	*/
	if (likely(mali_allocation->psize > 0)) {

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else if (MALI_MEM_SWAP == mem_backend->type) {
			retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */

		if (mem_backend->type == MALI_MEM_OS) {
			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);

		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_SWAP) {
			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
						     mali_allocation->mali_mapping.properties);
		} else { /* unsupport type */
			MALI_DEBUG_ASSERT(0);
		}

		_mali_osk_mutex_signal(session->memory_lock);
	}
done:
	if (MALI_MEM_OS == mem_backend->type) {
		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
	} else if (MALI_MEM_BLOCK == mem_backend->type) {
		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
	} else {
		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
	}

	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
	}
	return _MALI_OSK_ERR_OK;

failed_alloc_pages:
	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
failed_prepare_map:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
{
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	mali_mem_os_mem tmp_os_mem;
	s32 change_page_count;

	MALI_DEBUG_ASSERT_POINTER(session);
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
	MALI_DEBUG_ASSERT(0 == physical_size %  MALI_MMU_PAGE_SIZE);

	mali_allocation = mem_backend->mali_allocation;
	MALI_DEBUG_ASSERT_POINTER(mali_allocation);

	MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
	MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);

	mutex_lock(&mem_backend->mutex);

	/* Do resize*/
	if (physical_size > mem_backend->size) {
		u32 add_size = physical_size - mem_backend->size;

		MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);

		/* Allocate new pages from os mem */
		retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);

		if (retval) {
			if (-ENOMEM == retval) {
				ret = _MALI_OSK_ERR_NOMEM;
			} else {
				ret = _MALI_OSK_ERR_FAULT;
			}
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
			goto failed_alloc_memory;
		}

		MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);

		/* Resize the memory of the backend */
		ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);

		if (ret) {
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory	resizing failed !\n"));
			goto failed_resize_pages;
		}

		/*Resize cpu mapping */
		if (NULL != mali_allocation->cpu_mapping.vma) {
			ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start  + mem_backend->size, add_size);
			if (unlikely(ret != _MALI_OSK_ERR_OK)) {
				MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
				goto  failed_cpu_map;
			}
		}

		/* Resize mali mapping */
		_mali_osk_mutex_wait(session->memory_lock);
		ret = mali_mem_mali_map_resize(mali_allocation, physical_size);

		if (ret) {
			MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
			goto failed_gpu_map;
		}

		ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
					   mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
		if (ret) {
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
			goto failed_gpu_map;
		}

		_mali_osk_mutex_signal(session->memory_lock);
	} else {
		u32 dec_size, page_count;
		u32 vaddr = 0;
		INIT_LIST_HEAD(&tmp_os_mem.pages);
		tmp_os_mem.count = 0;

		dec_size = mem_backend->size - physical_size;
		MALI_DEBUG_ASSERT(0 == dec_size %  MALI_MMU_PAGE_SIZE);

		page_count = dec_size / MALI_MMU_PAGE_SIZE;
		vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;

		/* Resize the memory of the backend */
		ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);

		if (ret) {
			MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
			goto failed_resize_pages;
		}

		/* Resize mali map */
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
		_mali_osk_mutex_signal(session->memory_lock);

		/* Zap cpu mapping */
		if (0 != mali_allocation->cpu_mapping.addr) {
			MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
			zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
		}

		/* Free those extra pages */
		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
	}

	/* Resize memory allocation and memory backend */
	change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
	mali_allocation->psize = physical_size;
	mem_backend->size = physical_size;
	mutex_unlock(&mem_backend->mutex);

	if (change_page_count > 0) {
		atomic_add(change_page_count, &session->mali_mem_allocated_pages);
		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
		}

	} else {
		atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
failed_cpu_map:
	if (physical_size > mem_backend->size) {
		mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
					 (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
	} else {
		mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
	}
failed_resize_pages:
	if (0 != tmp_os_mem.count)
		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
failed_alloc_memory:

	mutex_unlock(&mem_backend->mutex);
	return ret;
}
mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
{
	_mali_osk_errcode_t err;
	mali_mem_allocation *descriptor;
	block_allocator *info;
	u32 left;
	block_info *last_allocated = NULL;
	block_allocator_allocation *ret_allocation;
	u32 offset = 0;

	size = ALIGN(size, MALI_BLOCK_SIZE);

	info = mali_mem_block_gobal_allocator;
	if (NULL == info) return NULL;

	left = size;
	MALI_DEBUG_ASSERT(0 != left);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
	if (NULL == descriptor) {
		return NULL;
	}

	descriptor->mali_mapping.addr = mali_addr;
	descriptor->size = size;
	descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
	descriptor->cpu_mapping.ref = 1;

	if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	} else {
		/* Cached Mali memory mapping */
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		vma->vm_flags |= VM_SHARED;
	}

	ret_allocation = &descriptor->block_mem.mem;

	ret_allocation->mapping_length = 0;

	_mali_osk_mutex_wait(session->memory_lock);
	mutex_lock(&info->mutex);

	if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
		MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	err = mali_mem_mali_map_prepare(descriptor);
	if (_MALI_OSK_ERR_OK != err) {
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	while ((left > 0) && (info->first_free)) {
		block_info *block;
		u32 phys_addr;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		if (MALI_BLOCK_SIZE < left) {
			current_mapping_size = MALI_BLOCK_SIZE;
		} else {
			current_mapping_size = left;
		}

		mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
		if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
			/* release all memory back to the pool */
			while (last_allocated) {
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			mutex_unlock(&info->mutex);
			_mali_osk_mutex_signal(session->memory_lock);

			mali_mem_mali_map_free(descriptor);
			mali_mem_descriptor_destroy(descriptor);

			return NULL;
		}

		left -= current_mapping_size;
		offset += current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;

		--info->free_blocks;
	}

	mutex_unlock(&info->mutex);
	_mali_osk_mutex_signal(session->memory_lock);

	MALI_DEBUG_ASSERT(0 == left);

	/* Record all the information about this allocation */
	ret_allocation->last_allocated = last_allocated;
	ret_allocation->info = info;

	return descriptor;
}
void mali_mem_external_release(mali_mem_allocation *descriptor)
{
	MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == descriptor->type);

	mali_mem_mali_map_free(descriptor);
}