static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
{
	block_allocator * info;
	u32 left;
	block_info * last_allocated = NULL;
	mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
	block_allocator_allocation *ret_allocation;

	MALI_DEBUG_ASSERT_POINTER(ctx);
	MALI_DEBUG_ASSERT_POINTER(descriptor);
	MALI_DEBUG_ASSERT_POINTER(offset);
	MALI_DEBUG_ASSERT_POINTER(alloc_info);

	info = (block_allocator*)ctx;
	left = descriptor->size - *offset;
	MALI_DEBUG_ASSERT(0 != left);

	if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;

	ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );

	if ( NULL == ret_allocation )
	{
		/* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
		_mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
		return result;
	}

	ret_allocation->start_offset = *offset;
	ret_allocation->mapping_length = 0;

	while ((left > 0) && (info->first_free))
	{
		block_info * block;
		u32 phys_addr;
		u32 padding;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		padding = *offset & (MALI_BLOCK_SIZE-1);

 		if (MALI_BLOCK_SIZE - padding < left)
		{
			current_mapping_size = MALI_BLOCK_SIZE - padding;
		}
		else
		{
			current_mapping_size = left;
		}

		if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
		{
			MALI_DEBUG_PRINT(1, ("Mapping of physical memory  failed\n"));
			result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
			mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);

			/* release all memory back to the pool */
			while (last_allocated)
			{
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			break;
		}

		*offset += current_mapping_size;
		left -= current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;
	}

	_mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);

	if (last_allocated)
	{
		if (left) result = MALI_MEM_ALLOC_PARTIAL;
		else result = MALI_MEM_ALLOC_FINISHED;

		/* Record all the information about this allocation */
		ret_allocation->last_allocated = last_allocated;
		ret_allocation->engine = engine;
		ret_allocation->descriptor = descriptor;

		alloc_info->ctx = info;
		alloc_info->handle = ret_allocation;
		alloc_info->release = block_allocator_release;
	}
	else
	{
		/* Free the allocation information - nothing to be passed back */
		_mali_osk_free( ret_allocation );
	}

	return result;
}
static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine,  mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
{
	mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
	u32 left;
	os_allocator * info;
	os_allocation * allocation;
	int pages_allocated = 0;
	_mali_osk_errcode_t err = _MALI_OSK_ERR_OK;

	MALI_DEBUG_ASSERT_POINTER(ctx);
	MALI_DEBUG_ASSERT_POINTER(engine);
	MALI_DEBUG_ASSERT_POINTER(descriptor);
	MALI_DEBUG_ASSERT_POINTER(offset);
	MALI_DEBUG_ASSERT_POINTER(alloc_info);

	info = (os_allocator*)ctx;
	left = descriptor->size - *offset;

	if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;

	/** @note this code may not work on Linux, or may require a more complex Linux implementation */
	allocation = _mali_osk_malloc(sizeof(os_allocation));
	if (NULL != allocation)
	{
		/* MALI_SEC */
		//u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
		allocation->offset_start = *offset;
		allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
		MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
		/* MALI_SEC */
		while (left > 0)
		{
			err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
			if ( _MALI_OSK_ERR_OK != err)
			{
				if (  _MALI_OSK_ERR_NOMEM == err)
				{
					/* 'Partial' allocation (or, out-of-memory on first page) */
					break;
				}

				MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));

				/* Fatal error, cleanup any previous pages allocated. */
				if ( pages_allocated > 0 )
				{
					mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
					/* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
				}

				pages_allocated = 0;

				result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
				break;
			}

			/* Loop iteration */
			if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
			else left -= _MALI_OSK_CPU_PAGE_SIZE;

			pages_allocated++;

			*offset += _MALI_OSK_CPU_PAGE_SIZE;
		}

		if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB  Configured maximum OS memory usage: %d kB\n",
				 (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));

		/* Loop termination; decide on result */
		if (pages_allocated)
		{
			MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
			if (left) result = MALI_MEM_ALLOC_PARTIAL;
			else result = MALI_MEM_ALLOC_FINISHED;

            /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
             * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
             * This is required for MALI to have the correct view of the memory.
             */
            _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
			allocation->num_pages = pages_allocated;
			allocation->engine = engine;         /* Necessary to make the engine's unmap call */
			allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
			info->num_pages_allocated += pages_allocated;

			MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));

			alloc_info->ctx = info;
			alloc_info->handle = allocation;
			alloc_info->release = os_allocator_release;
		}
		else
		{
			MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
			_mali_osk_free( allocation );
		}
	}