/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct mali_session_data *session;
	mali_mem_allocation *descriptor = NULL;
	u32 size = vma->vm_end - vma->vm_start;
	u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;

	session = (struct mali_session_data *)filp->private_data;
	if (NULL == session) {
		MALI_PRINT_ERROR(("mmap called without any session data available\n"));
		return -EFAULT;
	}

	MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
	                     (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
	                     (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));

	/* Set some bits which indicate that, the memory is IO memory, meaning
	 * that no paging is to be performed and the memory should not be
	 * included in crash dumps. And that the memory is reserved, meaning
	 * that it's present and can never be paged out (see also previous
	 * entry)
	 */
	vma->vm_flags |= VM_IO;
	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
	vma->vm_flags |= VM_RESERVED;
#else
	vma->vm_flags |= VM_DONTDUMP;
	vma->vm_flags |= VM_DONTEXPAND;
#endif

	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */

   /// since in ALPS project, especially low-memory segment, 
   /// it would be hard to allocate a 256KB(2^6 * 4K) physical continuous memory due to memory fragmentation      
   /// even 32KB conti. phy. might be hard to allocate. And it might cause ANR or KE
   /// avoid using block allocate(256KB) directly
	/// descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
	if (NULL == descriptor) {
		descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
		if (NULL == descriptor) {
			MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
			return -ENOMEM;
		}
	}

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	vma->vm_private_data = (void*)descriptor;

	/* Put on descriptor map */
	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_os_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		return -EFAULT;
	}

	return 0;
}
示例#2
0
void mali_pm_exec_unlock(void)
{
	_mali_osk_mutex_signal(pm_lock_exec);
}
mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
{
	_mali_osk_errcode_t err;
	mali_mem_allocation *descriptor;
	block_allocator *info;
	u32 left;
	block_info *last_allocated = NULL;
	block_allocator_allocation *ret_allocation;
	u32 offset = 0;

	size = ALIGN(size, MALI_BLOCK_SIZE);

	info = mali_mem_block_gobal_allocator;
	if (NULL == info) return NULL;

	left = size;
	MALI_DEBUG_ASSERT(0 != left);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
	if (NULL == descriptor) {
		return NULL;
	}

	descriptor->mali_mapping.addr = mali_addr;
	descriptor->size = size;
	descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
	descriptor->cpu_mapping.ref = 1;

	if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	} else {
		/* Cached Mali memory mapping */
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		vma->vm_flags |= VM_SHARED;
	}

	ret_allocation = &descriptor->block_mem.mem;

	ret_allocation->mapping_length = 0;

	_mali_osk_mutex_wait(session->memory_lock);
	mutex_lock(&info->mutex);

	if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
		MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	err = mali_mem_mali_map_prepare(descriptor);
	if (_MALI_OSK_ERR_OK != err) {
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	while ((left > 0) && (info->first_free)) {
		block_info *block;
		u32 phys_addr;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		if (MALI_BLOCK_SIZE < left) {
			current_mapping_size = MALI_BLOCK_SIZE;
		} else {
			current_mapping_size = left;
		}

		mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
		if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
			/* release all memory back to the pool */
			while (last_allocated) {
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			mutex_unlock(&info->mutex);
			_mali_osk_mutex_signal(session->memory_lock);

			mali_mem_mali_map_free(descriptor);
			mali_mem_descriptor_destroy(descriptor);

			return NULL;
		}

		left -= current_mapping_size;
		offset += current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;

		--info->free_blocks;
	}

	mutex_unlock(&info->mutex);
	_mali_osk_mutex_signal(session->memory_lock);

	MALI_DEBUG_ASSERT(0 == left);

	/* Record all the information about this allocation */
	ret_allocation->last_allocated = last_allocated;
	ret_allocation->info = info;

	return descriptor;
}
/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct mali_session_data *session;
	mali_mem_allocation *descriptor;
	u32 size = vma->vm_end - vma->vm_start;
	u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;

	session = (struct mali_session_data *)filp->private_data;
	if (NULL == session) {
		MALI_PRINT_ERROR(("mmap called without any session data available\n"));
		return -EFAULT;
	}

	MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
			     (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
			     (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));

	/* Set some bits which indicate that, the memory is IO memory, meaning
	 * that no paging is to be performed and the memory should not be
	 * included in crash dumps. And that the memory is reserved, meaning
	 * that it's present and can never be paged out (see also previous
	 * entry)
	 */
	vma->vm_flags |= VM_IO;
	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
	vma->vm_flags |= VM_RESERVED;
#else
	vma->vm_flags |= VM_DONTDUMP;
	vma->vm_flags |= VM_DONTEXPAND;
#endif

	/* For CTS5.1_r0.5 security case
	 * force read mapping fail if meet KBASE_REG_COOKIE_MTP or KBASE_REG_COOKIE_TB
	 */
#define KBASE_REG_COOKIE_MTP 1
#define KBASE_REG_COOKIE_TB 2
	if ((vma->vm_pgoff == KBASE_REG_COOKIE_MTP) || (vma->vm_pgoff == KBASE_REG_COOKIE_TB)) {
		vma->vm_flags &= ~(VM_READ | VM_MAYREAD);
	}

	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */

	descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
	if (NULL == descriptor) {
		descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
		if (NULL == descriptor) {
			MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
			return -ENOMEM;
		}
	}

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	vma->vm_private_data = (void *)descriptor;

	/* Put on descriptor map */
	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
		_mali_osk_mutex_wait(session->memory_lock);
		if (MALI_MEM_OS == descriptor->type) {
			mali_mem_os_release(descriptor);
		} else if (MALI_MEM_BLOCK == descriptor->type) {
			mali_mem_block_release(descriptor);
		}
		_mali_osk_mutex_signal(session->memory_lock);
		return -EFAULT;
	}

	return 0;
}
_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
{
	ump_session_data *session_data = NULL;
	ump_dd_mem *new_allocation = NULL;
	ump_session_memory_list_element *session_memory_element = NULL;
	int ret;

	DEBUG_ASSERT_POINTER(user_interaction);
	DEBUG_ASSERT_POINTER(user_interaction->ctx);

	session_data = (ump_session_data *) user_interaction->ctx;

	session_memory_element = _mali_osk_calloc(1, sizeof(ump_session_memory_list_element));
	if (NULL == session_memory_element) {
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}


	new_allocation = _mali_osk_calloc(1, sizeof(ump_dd_mem));
	if (NULL == new_allocation) {
		_mali_osk_free(session_memory_element);
		DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}

	/* Initialize the part of the new_allocation that we know so for */
	_mali_osk_atomic_init(&new_allocation->ref_count, 1);
	if (0 == (UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints))
		new_allocation->is_cached = 0;
	else new_allocation->is_cached = 1;

	/* Special case a size of 0, we should try to emulate what malloc does
	 * in this case, which is to return a valid pointer that must be freed,
	 * but can't be dereferenced */
	if (0 == user_interaction->size) {
		/* Emulate by actually allocating the minimum block size */
		user_interaction->size = 1;
	}

	/* Page align the size */
	new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size);
	new_allocation->lock_usage = UMP_NOT_LOCKED;

	/* Now, ask the active memory backend to do the actual memory allocation */
	if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
		DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
			    new_allocation->size_bytes,
			    (unsigned long)user_interaction->size));
		_mali_osk_free(new_allocation);
		_mali_osk_free(session_memory_element);
		return _MALI_OSK_ERR_INVALID_FUNC;
	}
	new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
	new_allocation->ctx = device.backend->ctx;
	new_allocation->release_func = device.backend->release;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element->mem = new_allocation;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);

	/* Create a secure ID for this allocation */
	ret = ump_random_mapping_insert(device.secure_id_map, new_allocation);
	if (unlikely(ret)) {
		new_allocation->release_func(new_allocation->ctx, new_allocation);
		_mali_osk_free(session_memory_element);
		_mali_osk_free(new_allocation);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_INVALID_FUNC;
	}

	user_interaction->secure_id = new_allocation->secure_id;
	user_interaction->size = new_allocation->size_bytes;
	DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
		    new_allocation->secure_id,
		    new_allocation->size_bytes));

	return _MALI_OSK_ERR_OK;
}
示例#6
0
_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
{
	ump_dd_handle ump_mem;
	struct mali_session_data *session;
	mali_mem_allocation *descriptor;
	int md, ret;

	MALI_DEBUG_ASSERT_POINTER(args);
	MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);

	session = (struct mali_session_data *)(uintptr_t)args->ctx;

	/* check arguments */
	/* NULL might be a valid Mali address */
	if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	/* size must be a multiple of the system page size */
	if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	MALI_DEBUG_PRINT(3,
			 ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
			  args->secure_id, args->mali_address, args->size));

	ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);

	if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
	if (NULL == descriptor) {
		ump_dd_reference_release(ump_mem);
		MALI_ERROR(_MALI_OSK_ERR_NOMEM);
	}

	descriptor->ump_mem.handle = ump_mem;
	descriptor->mali_mapping.addr = args->mali_address;
	descriptor->size = args->size;
	descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;

	if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
		descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
	}

	_mali_osk_mutex_wait(session->memory_lock);

	ret = mali_ump_map(session, descriptor);
	if (0 != ret) {
		_mali_osk_mutex_signal(session->memory_lock);
		ump_dd_reference_release(ump_mem);
		mali_mem_descriptor_destroy(descriptor);
		MALI_ERROR(_MALI_OSK_ERR_NOMEM);
	}

	_mali_osk_mutex_signal(session->memory_lock);


	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
		ump_dd_reference_release(ump_mem);
		mali_mem_descriptor_destroy(descriptor);
		MALI_ERROR(_MALI_OSK_ERR_FAULT);
	}

	args->cookie = md;

	MALI_DEBUG_PRINT(5, ("Returning from UMP attach\n"));

	MALI_SUCCESS;
}
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*  Can we trust User mode?
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
	if (unlikely(mali_vma_node)) {
		/* Not support yet */
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}

	/**
	*create mali memory allocation
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* check if have dedicated memory */
	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* check if need to allocate backend */
	if (mali_allocation->psize == 0)
		return _MALI_OSK_ERR_OK;

	/**
	*allocate physical backend & pages
	*/
	if (likely(mali_allocation->psize > 0)) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
		if (mali_allocation->backend_handle < 0) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
			goto failed_alloc_backend;
		}

		mem_backend->mali_allocation = mali_allocation;
		mem_backend->type = mali_allocation->type;

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);

	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */
		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
			goto failed_gpu_map;
		}
		/* only support os memory type now */
		if (mem_backend->type == MALI_MEM_OS) {
			mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
					     mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else {
			/* Not support yet */
			MALI_DEBUG_ASSERT(0);
		}
		session->mali_mem_array[mem_backend->type] += mem_backend->size;
		if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) {
			session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
	if (mem_backend->type == MALI_MEM_OS) {
		mali_mem_os_free(&mem_backend->os_mem);
	} else {
		mali_mem_block_free(&mem_backend->block_mem);
	}
failed_alloc_pages:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
{
	struct mali_session_data *session;
	mali_mem_allocation * descriptor;
	int md;
	_mali_osk_errcode_t err;

	MALI_DEBUG_ASSERT_POINTER(args);
	MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

	session = (struct mali_session_data *)args->ctx;
	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

	/* check arguments */
	/* NULL might be a valid Mali address */
	if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	/* size must be a multiple of the system page size */
	if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	MALI_DEBUG_PRINT(3,
	                 ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
	                  (void*)args->phys_addr,
	                  (void*)(args->phys_addr + args->size -1),
	                  (void*)args->mali_address)
	                );

	/* Validate the mali physical range */
	if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
		return _MALI_OSK_ERR_FAULT;
	}

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
	if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);

	descriptor->mali_mapping.addr = args->mali_address;
	descriptor->size = args->size;

	if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
		descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
	}

	_mali_osk_mutex_wait(session->memory_lock);
	{
		u32 virt = descriptor->mali_mapping.addr;
		u32 phys = args->phys_addr;
		u32 size = args->size;

		err = mali_mem_mali_map_prepare(descriptor);
		if (_MALI_OSK_ERR_OK != err) {
			_mali_osk_mutex_signal(session->memory_lock);
			mali_mem_descriptor_destroy(descriptor);
			return _MALI_OSK_ERR_NOMEM;
		}

		mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);

		if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
			mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
		}
	}
	_mali_osk_mutex_signal(session->memory_lock);

	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_external_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		MALI_ERROR(_MALI_OSK_ERR_FAULT);
	}

	args->cookie = md;

	MALI_SUCCESS;
}
示例#9
0
mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	unsigned long rate = 0;
	mali_bool bis_vpll = MALI_TRUE;

#ifndef CONFIG_VPLL_USE_FOR_TVENC
	bis_vpll = MALI_TRUE;
#endif

#ifndef CONFIG_MALI_DVFS
	clk = mali_gpu_clk;
#endif

	_mali_osk_mutex_wait(mali_dvfs_lock);

	if (mali_clk_get(bis_vpll) == MALI_FALSE)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	rate = (unsigned long)clk * (unsigned long)mhz;
	MALI_DEBUG_PRINT(2,("= clk_set_rate : %d , %d \n",clk, mhz ));

	if (bis_vpll)
	{
		clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
		//clk_set_parent(vpll_src_clock, ext_xtal_clock);
		clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

		clk_set_parent(mali_parent_clock, sclk_vpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}
	else
	{
		clk_set_parent(mali_parent_clock, mpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}

	if (clk_enable(mali_clock) < 0)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	clk_set_rate(mali_clock, rate);
	rate = clk_get_rate(mali_clock);

	if (bis_vpll)
		mali_gpu_clk = (int)(rate / mhz);
	else
		mali_gpu_clk = (int)((rate + 500000) / mhz);

	GPU_MHZ = mhz;
	MALI_DEBUG_PRINT(2,("= clk_get_rate: %d \n",mali_gpu_clk));

	mali_clk_put(MALI_FALSE);

	_mali_osk_mutex_signal(mali_dvfs_lock);

	return MALI_TRUE;
}