コード例 #1
0
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*  Can we trust User mode?
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
	if (unlikely(mali_vma_node)) {
		/* Not support yet */
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}

	/**
	*create mali memory allocation
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* check if have dedicated memory */
	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* check if need to allocate backend */
	if (mali_allocation->psize == 0)
		return _MALI_OSK_ERR_OK;

	/**
	*allocate physical backend & pages
	*/
	if (likely(mali_allocation->psize > 0)) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
		if (mali_allocation->backend_handle < 0) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
			goto failed_alloc_backend;
		}

		mem_backend->mali_allocation = mali_allocation;
		mem_backend->type = mali_allocation->type;

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);

	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */
		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
			goto failed_gpu_map;
		}
		/* only support os memory type now */
		if (mem_backend->type == MALI_MEM_OS) {
			mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
					     mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else {
			/* Not support yet */
			MALI_DEBUG_ASSERT(0);
		}
		session->mali_mem_array[mem_backend->type] += mem_backend->size;
		if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) {
			session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
	if (mem_backend->type == MALI_MEM_OS) {
		mali_mem_os_free(&mem_backend->os_mem);
	} else {
		mali_mem_block_free(&mem_backend->block_mem);
	}
failed_alloc_pages:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
コード例 #2
0
/**
* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
* It will allocate a new mem allocation and bind external memory to it.
* Supported backend type are:
* _MALI_MEMORY_BIND_BACKEND_UMP
* _MALI_MEMORY_BIND_BACKEND_DMA_BUF
* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
* CPU access is not supported yet
*/
_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
{
	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	mali_mem_allocation *mali_allocation = NULL;
	MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));

	/**
	* allocate mali allocation.
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->size;
	mali_allocation->vsize = args->size;
	mali_allocation->mali_mapping.addr = args->vaddr;

	/* add allocation node to RB tree for index  */
	mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->size;
	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* allocate backend*/
	if (mali_allocation->psize > 0) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
		if (mali_allocation->backend_handle < 0) {
			goto Failed_alloc_backend;
		}

	} else {
		goto Failed_alloc_backend;
	}

	mem_backend->size = mali_allocation->psize;
	mem_backend->mali_allocation = mali_allocation;

	switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
	case  _MALI_MEMORY_BIND_BACKEND_UMP:
#if defined(CONFIG_MALI400_UMP)
		mali_allocation->type = MALI_MEM_UMP;
		mem_backend->type = MALI_MEM_UMP;
		ret = mali_memory_bind_ump_buf(mali_allocation, mem_backend,
					       args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
		if (_MALI_OSK_ERR_OK != ret) {
			MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
			goto  Failed_bind_backend;
		}
#else
		MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
		goto Failed_bind_backend;
#endif
		break;
	case  _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
#if defined(CONFIG_DMA_SHARED_BUFFER)
		mali_allocation->type = MALI_MEM_DMA_BUF;
		mem_backend->type = MALI_MEM_DMA_BUF;
		ret = mali_memory_bind_dma_buf(mali_allocation, mem_backend,
					       args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
		if (_MALI_OSK_ERR_OK != ret) {
			MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
			goto Failed_bind_backend;
		}
#else
		MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
		goto Failed_bind_backend;
#endif
		break;
	case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
		/* not allowed */
		MALI_DEBUG_ASSERT(0);
		break;

	case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
		mali_allocation->type = MALI_MEM_EXTERNAL;
		mem_backend->type = MALI_MEM_EXTERNAL;
		ret = mali_memory_bind_ext_mem(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
					       args->mem_union.bind_ext_memory.flags);
		if (_MALI_OSK_ERR_OK != ret) {
			MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
			goto Failed_bind_backend;
		}
		break;

	case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
		/* not allowed */
		MALI_DEBUG_ASSERT(0);
		break;

	default:
		MALI_DEBUG_ASSERT(0);
		break;
	}
	return _MALI_OSK_ERR_OK;


Failed_bind_backend:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);

Failed_alloc_backend:
	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
	return ret;
}
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);

	if (unlikely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}
	/**
	*create mali memory allocation
	*/

	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* MALI_MEM_OS if need to support mem resize,
	 * or MALI_MEM_BLOCK if have dedicated memory,
	 * or MALI_MEM_OS,
	 * or MALI_MEM_SWAP.
	 */
	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
		mali_allocation->type = MALI_MEM_SWAP;
	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
		mali_allocation->type = MALI_MEM_OS;
		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
	if (mali_allocation->backend_handle < 0) {
		ret = _MALI_OSK_ERR_NOMEM;
		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
		goto failed_alloc_backend;
	}


	mem_backend->mali_allocation = mali_allocation;
	mem_backend->type = mali_allocation->type;

	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
	/* do prepare for MALI mapping */
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);

		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			_mali_osk_mutex_signal(session->memory_lock);
			goto failed_prepare_map;
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	if (mali_allocation->psize == 0) {
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
		goto done;
	}

	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
		/* init for defer bind backend*/
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);

		goto done;
	}
	/**
	*allocate physical memory
	*/
	if (likely(mali_allocation->psize > 0)) {

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else if (MALI_MEM_SWAP == mem_backend->type) {
			retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */

		if (mem_backend->type == MALI_MEM_OS) {
			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);

		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_SWAP) {
			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
						     mali_allocation->mali_mapping.properties);
		} else { /* unsupport type */
			MALI_DEBUG_ASSERT(0);
		}

		_mali_osk_mutex_signal(session->memory_lock);
	}
done:
	if (MALI_MEM_OS == mem_backend->type) {
		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
	} else if (MALI_MEM_BLOCK == mem_backend->type) {
		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
	} else {
		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
	}

	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
	}
	return _MALI_OSK_ERR_OK;

failed_alloc_pages:
	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
failed_prepare_map:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}