/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct mali_session_data *session;
	mali_mem_allocation *descriptor;
	u32 size = vma->vm_end - vma->vm_start;
	u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;

	session = (struct mali_session_data *)filp->private_data;
	if (NULL == session) {
		MALI_PRINT_ERROR(("mmap called without any session data available\n"));
		return -EFAULT;
	}

	MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
			     (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
			     (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));

	/* Set some bits which indicate that, the memory is IO memory, meaning
	 * that no paging is to be performed and the memory should not be
	 * included in crash dumps. And that the memory is reserved, meaning
	 * that it's present and can never be paged out (see also previous
	 * entry)
	 */
	vma->vm_flags |= VM_IO;
	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
	vma->vm_flags |= VM_RESERVED;
#else
	vma->vm_flags |= VM_DONTDUMP;
	vma->vm_flags |= VM_DONTEXPAND;
#endif

	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */

	descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
	if (NULL == descriptor) {
		descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
		if (NULL == descriptor) {
			MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
			return -ENOMEM;
		}
	}

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	vma->vm_private_data = (void *)descriptor;

	/* Put on descriptor map */
	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_os_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		return -EFAULT;
	}

	return 0;
}
示例#2
0
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*  Can we trust User mode?
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
	if (unlikely(mali_vma_node)) {
		/* Not support yet */
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}

	/**
	*create mali memory allocation
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* check if have dedicated memory */
	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* check if need to allocate backend */
	if (mali_allocation->psize == 0)
		return _MALI_OSK_ERR_OK;

	/**
	*allocate physical backend & pages
	*/
	if (likely(mali_allocation->psize > 0)) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
		if (mali_allocation->backend_handle < 0) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
			goto failed_alloc_backend;
		}

		mem_backend->mali_allocation = mali_allocation;
		mem_backend->type = mali_allocation->type;

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);

	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */
		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
			goto failed_gpu_map;
		}
		/* only support os memory type now */
		if (mem_backend->type == MALI_MEM_OS) {
			mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
					     mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else {
			/* Not support yet */
			MALI_DEBUG_ASSERT(0);
		}
		session->mali_mem_array[mem_backend->type] += mem_backend->size;
		if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) {
			session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
	if (mem_backend->type == MALI_MEM_OS) {
		mali_mem_os_free(&mem_backend->os_mem);
	} else {
		mali_mem_block_free(&mem_backend->block_mem);
	}
failed_alloc_pages:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);

	if (unlikely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}
	/**
	*create mali memory allocation
	*/

	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* MALI_MEM_OS if need to support mem resize,
	 * or MALI_MEM_BLOCK if have dedicated memory,
	 * or MALI_MEM_OS,
	 * or MALI_MEM_SWAP.
	 */
	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
		mali_allocation->type = MALI_MEM_SWAP;
	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
		mali_allocation->type = MALI_MEM_OS;
		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
	if (mali_allocation->backend_handle < 0) {
		ret = _MALI_OSK_ERR_NOMEM;
		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
		goto failed_alloc_backend;
	}


	mem_backend->mali_allocation = mali_allocation;
	mem_backend->type = mali_allocation->type;

	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
	/* do prepare for MALI mapping */
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);

		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			_mali_osk_mutex_signal(session->memory_lock);
			goto failed_prepare_map;
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	if (mali_allocation->psize == 0) {
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
		goto done;
	}

	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
		/* init for defer bind backend*/
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);

		goto done;
	}
	/**
	*allocate physical memory
	*/
	if (likely(mali_allocation->psize > 0)) {

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else if (MALI_MEM_SWAP == mem_backend->type) {
			retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */

		if (mem_backend->type == MALI_MEM_OS) {
			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);

		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_SWAP) {
			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
						     mali_allocation->mali_mapping.properties);
		} else { /* unsupport type */
			MALI_DEBUG_ASSERT(0);
		}

		_mali_osk_mutex_signal(session->memory_lock);
	}
done:
	if (MALI_MEM_OS == mem_backend->type) {
		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
	} else if (MALI_MEM_BLOCK == mem_backend->type) {
		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
	} else {
		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
	}

	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
	}
	return _MALI_OSK_ERR_OK;

failed_alloc_pages:
	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
failed_prepare_map:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}