/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);

	if (unlikely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}
	/**
	*create mali memory allocation
	*/

	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* MALI_MEM_OS if need to support mem resize,
	 * or MALI_MEM_BLOCK if have dedicated memory,
	 * or MALI_MEM_OS,
	 * or MALI_MEM_SWAP.
	 */
	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
		mali_allocation->type = MALI_MEM_SWAP;
	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
		mali_allocation->type = MALI_MEM_OS;
		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
	if (mali_allocation->backend_handle < 0) {
		ret = _MALI_OSK_ERR_NOMEM;
		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
		goto failed_alloc_backend;
	}


	mem_backend->mali_allocation = mali_allocation;
	mem_backend->type = mali_allocation->type;

	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
	/* do prepare for MALI mapping */
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);

		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			_mali_osk_mutex_signal(session->memory_lock);
			goto failed_prepare_map;
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	if (mali_allocation->psize == 0) {
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
		goto done;
	}

	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
		/* init for defer bind backend*/
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);

		goto done;
	}
	/**
	*allocate physical memory
	*/
	if (likely(mali_allocation->psize > 0)) {

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else if (MALI_MEM_SWAP == mem_backend->type) {
			retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */

		if (mem_backend->type == MALI_MEM_OS) {
			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);

		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_SWAP) {
			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
						     mali_allocation->mali_mapping.properties);
		} else { /* unsupport type */
			MALI_DEBUG_ASSERT(0);
		}

		_mali_osk_mutex_signal(session->memory_lock);
	}
done:
	if (MALI_MEM_OS == mem_backend->type) {
		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
	} else if (MALI_MEM_BLOCK == mem_backend->type) {
		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
	} else {
		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
	}

	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
	}
	return _MALI_OSK_ERR_OK;

failed_alloc_pages:
	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
failed_prepare_map:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
Ejemplo n.º 2
0
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*  Can we trust User mode?
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
	if (unlikely(mali_vma_node)) {
		/* Not support yet */
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}

	/**
	*create mali memory allocation
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* check if have dedicated memory */
	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* check if need to allocate backend */
	if (mali_allocation->psize == 0)
		return _MALI_OSK_ERR_OK;

	/**
	*allocate physical backend & pages
	*/
	if (likely(mali_allocation->psize > 0)) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
		if (mali_allocation->backend_handle < 0) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
			goto failed_alloc_backend;
		}

		mem_backend->mali_allocation = mali_allocation;
		mem_backend->type = mali_allocation->type;

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);

	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */
		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
			goto failed_gpu_map;
		}
		/* only support os memory type now */
		if (mem_backend->type == MALI_MEM_OS) {
			mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
					     mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else {
			/* Not support yet */
			MALI_DEBUG_ASSERT(0);
		}
		session->mali_mem_array[mem_backend->type] += mem_backend->size;
		if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) {
			session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
	if (mem_backend->type == MALI_MEM_OS) {
		mali_mem_os_free(&mem_backend->os_mem);
	} else {
		mali_mem_block_free(&mem_backend->block_mem);
	}
failed_alloc_pages:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
{
	_mali_osk_errcode_t err;
	mali_mem_allocation *descriptor;
	block_allocator *info;
	u32 left;
	block_info *last_allocated = NULL;
	block_allocator_allocation *ret_allocation;
	u32 offset = 0;

	size = ALIGN(size, MALI_BLOCK_SIZE);

	info = mali_mem_block_gobal_allocator;
	if (NULL == info) return NULL;

	left = size;
	MALI_DEBUG_ASSERT(0 != left);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
	if (NULL == descriptor) {
		return NULL;
	}

	descriptor->mali_mapping.addr = mali_addr;
	descriptor->size = size;
	descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
	descriptor->cpu_mapping.ref = 1;

	if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	} else {
		/* Cached Mali memory mapping */
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		vma->vm_flags |= VM_SHARED;
	}

	ret_allocation = &descriptor->block_mem.mem;

	ret_allocation->mapping_length = 0;

	_mali_osk_mutex_wait(session->memory_lock);
	mutex_lock(&info->mutex);

	if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
		MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	err = mali_mem_mali_map_prepare(descriptor);
	if (_MALI_OSK_ERR_OK != err) {
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	while ((left > 0) && (info->first_free)) {
		block_info *block;
		u32 phys_addr;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		if (MALI_BLOCK_SIZE < left) {
			current_mapping_size = MALI_BLOCK_SIZE;
		} else {
			current_mapping_size = left;
		}

		mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
		if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
			/* release all memory back to the pool */
			while (last_allocated) {
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			mutex_unlock(&info->mutex);
			_mali_osk_mutex_signal(session->memory_lock);

			mali_mem_mali_map_free(descriptor);
			mali_mem_descriptor_destroy(descriptor);

			return NULL;
		}

		left -= current_mapping_size;
		offset += current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;

		--info->free_blocks;
	}

	mutex_unlock(&info->mutex);
	_mali_osk_mutex_signal(session->memory_lock);

	MALI_DEBUG_ASSERT(0 == left);

	/* Record all the information about this allocation */
	ret_allocation->last_allocated = last_allocated;
	ret_allocation->info = info;

	return descriptor;
}